[
  {
    "path": ".bootstrap.mk",
    "content": "export VERSION_TAG=$(shell git rev-parse --short HEAD)\nexport JOB_NAME=$(shell basename $PWD)\n\ndash-split = $(word $2,$(subst -, ,$1))\ndash-1 = $(call dash-split,$*,1)\ndash-2 = $(call dash-split,$*,2)\n\nhelp:##............Show this help.\n\t@echo \"\"\n\t@fgrep -h \"##\" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\\\$$//' | sed -e 's/##//' | sed 's/^/    /'\n\t@echo \"\"\n\t@echo \"\"\n"
  },
  {
    "path": ".gitattributes",
    "content": "*.job linguist-language=HCL\n"
  },
  {
    "path": ".github/workflows/build-gcp-dns-updater.yaml",
    "content": "# .github/workflows/build-gcp-dns-updater.yaml\nname: Build GCP DNS Updater Image\n\non:\n  push:\n    branches:\n      - main\n    paths:\n      - 'docker_images/gcp-dns-updater/**'\n  workflow_dispatch:\n\njobs:\n  build-and-push:\n    runs-on: ubuntu-latest\n    permissions:\n      contents: read\n      packages: write # Required for pushing to GitHub Packages if used, good practice anyway\n\n    steps:\n      - name: Checkout Code\n        uses: actions/checkout@v6\n\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@v4\n\n      - name: Login to Docker Registry\n        uses: docker/login-action@v4\n        with:\n          registry: docker.${{ secrets.NOMAD_VAR_tld }}\n          username: ${{ secrets.DOCKER_REGISTRY_USER }}\n          password: ${{ secrets.DOCKER_REGISTRY_PASSWORD }}\n\n      - name: Build Image using Makefile\n        env:\n          NOMAD_VAR_tld: ${{ secrets.NOMAD_VAR_tld }}\n        run: make build-gcp-dns-updater\n\n      - name: Push Image\n        run: docker push docker.${{ secrets.NOMAD_VAR_tld }}/gcp-dns-updater:latest\n"
  },
  {
    "path": ".github/workflows/nomad.yaml",
    "content": "on:\n  push:\n    branches:\n      - master\n\njobs:\n  # JOB to run change detection\n  changes:\n    runs-on: ubuntu-latest\n    permissions:\n      pull-requests: read\n    outputs:\n      jobs: ${{ steps.filter.outputs.nomadjobs_files }}\n      volumes: ${{ steps.filter_volumes.outputs.volumes_files }}\n    steps:\n    - name: 'Checkout'\n      uses: 'actions/checkout@v6'\n\n    - uses: dorny/paths-filter@v4\n      id: filter_volumes\n      with:\n        list-files: 'json'\n        filters: |\n          volumes:\n            - 'nomad_jobs/**/volume.hcl'\n            - 'nomad_jobs/**/*-volume.hcl'\n\n    - uses: dorny/paths-filter@v4\n      id: filter\n      with:\n        list-files: 'json'\n        filters: |\n          nomadjobs:\n            # Updated paths based on directory restructure\n            - 'nomad_jobs/media-stack/plex/*.job'\n            - 'nomad_jobs/media-stack/radarr/*.job'\n            - 'nomad_jobs/media-stack/lidarr/*.job'\n            - 'nomad_jobs/media-stack/overseerr/*.job'\n            - 'nomad_jobs/storage-backends/postgres/*.job'\n            - 'nomad_jobs/storage-backends/redis/*.job'\n            - 'nomad_jobs/storage-backends/pgvector/*.job'\n            - 'nomad_jobs/core-infra/coredns/*.job'\n            - 'nomad_jobs/storage-backends/iscsi-csi-plugin/*.job'\n            - 'nomad_jobs/media-stack/sabnzbd/*.job'\n            - 'nomad_jobs/media-stack/qbittorrent/*.job'\n            - 'nomad_jobs/media-stack/prowlarr/*.job'\n            - 'nomad_jobs/media-stack/tdarr/*.job'\n            - 'nomad_jobs/core-infra/smtp/*.job'\n            - 'nomad_jobs/ai-ml/ollama/*.job'\n            - 'nomad_jobs/ai-ml/open-webui/*.job'\n            - 'nomad_jobs/misc/gcp-dns-updater/*.job'\n            - 'nomad_jobs/core-infra/tailscale-este/*.job'\n            - 'nomad_jobs/core-infra/traefik/*.job'\n            - 'nomad_jobs/core-infra/iscsi-csi-plugin/*.job'\n            - 'nomad_jobs/observability/alertmanager/*.job'\n            - 'nomad_jobs/observability/prometheus/*.job'\n            - 'nomad_jobs/ai-ml/radbot/*.job'\n            - 'nomad_jobs/personal-cloud/ntfy/*.job'\n            - 'nomad_jobs/web-apps/homepage/*.job'\n            - 'nomad_jobs/media-stack/multi-scrobbler/*.job'\n            - 'nomad_jobs/media-stack/lidify/*.job'\n            - 'nomad_jobs/media-stack/mediasage/*.job'\n            - 'nomad_jobs/core-infra/netboot-xyz/*.job'\n            - 'nomad_jobs/web-apps/kideo/*.job'\n            - 'nomad_jobs/web-apps/minecraftmath/*.job'\n\n  add_volumes:\n    runs-on: ubuntu-latest\n    needs: changes\n    if: needs.changes.outputs.volumes != '[]'\n    continue-on-error: true\n    strategy:\n      matrix:\n        job: ${{ fromJSON(needs.changes.outputs.volumes ) }}\n\n    steps:\n    - name: 'Checkout'\n      uses: 'actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd' # v6\n\n    - name: Connect to Tailscale\n      uses: tailscale/github-action@v4\n      with:\n        oauth-client-id: ${{ secrets.TAILSCALE_OAUTH_CLIENT_ID }}\n        oauth-secret: ${{ secrets.TAILSCALE_OAUTH_SECRET }}\n        tags: tag:github-actions\n        args: --accept-dns=true\n\n    - name: Setup Nomad\n      uses: hashicorp/setup-nomad@v1.0.0\n      with:\n        version: \"1.10.4\"\n\n    - name: deploy\n      shell: bash\n      run: |\n        # Extract volume ID from the HCL file\n        VOLUME_ID=$(grep '^id' ${{ matrix.job }} | head -1 | sed 's/.*= *\"\\(.*\\)\"/\\1/')\n        # Skip if volume already exists\n        if nomad volume status \"$VOLUME_ID\" > /dev/null 2>&1; then\n          echo \"Volume '$VOLUME_ID' already exists, skipping creation\"\n        else\n          echo \"Creating volume '$VOLUME_ID'\"\n          nomad volume create ${{ matrix.job }}\n        fi\n      env:\n        NOMAD_ADDR: '${{ secrets.NOMAD_ADDR }}'\n\n  deploy_jobs:\n    runs-on: ubuntu-latest\n    needs: changes\n    if: needs.changes.outputs.jobs != '[]'\n    continue-on-error: true\n    strategy:\n      matrix:\n        job: ${{ fromJSON(needs.changes.outputs.jobs ) }}\n\n    steps:\n    - name: 'Checkout'\n      uses: 'actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd' # v6\n\n    - name: Connect to Tailscale\n      uses: tailscale/github-action@v4\n      with:\n        oauth-client-id: ${{ secrets.TAILSCALE_OAUTH_CLIENT_ID }}\n        oauth-secret: ${{ secrets.TAILSCALE_OAUTH_SECRET }}\n        tags: tag:github-actions\n        args: --accept-dns=true\n\n    - name: Setup Nomad\n      uses: hashicorp/setup-nomad@v1.0.0\n      with:\n        version: \"1.10.4\"\n\n    - name: deploy\n      shell: bash\n      run: |\n        nomad job run ${{ matrix.job }} # Removed -var flags\n      env:\n        NOMAD_ADDR: '${{ secrets.NOMAD_ADDR }}'\n        NOMAD_VAR_region: 'home'\n        NOMAD_VAR_tld: '${{ secrets.NOMAD_VAR_tld }}' # Corrected case\n        NOMAD_VAR_shared_dir: '/home/shared/'\n        NOMAD_VAR_downloads_dir: '/home/sabnzbd/downloads'\n        NOMAD_VAR_music_dir: '/home/media/Music'\n        NOMAD_VAR_movies_dir: '/home/media/Movies'\n        NOMAD_VAR_books_dir: '/home/media/Books'\n        NOMAD_VAR_tv_dir: '/home/media/TV'\n        NOMAD_VAR_media_dir: '/home/media'\n        NOMAD_VAR_hass_key: '${{ secrets.NOMAD_VAR_hass_key }}' # Corrected case\n        NOMAD_VAR_hass_ip: '${{ secrets.NOMAD_VAR_hass_ip }}'\n        NOMAD_VAR_github_pat: ${{ secrets.NOMAD_VAR_github_pat }} # Corrected case\n        NOMAD_VAR_datacenters_all: '[\"dc1\", \"public\"]'\n        NOMAD_VAR_datacenters_dc1: '[\"dc1\"]'\n        NOMAD_VAR_datacenters_public: '[\"public\"]'\n        NOMAD_VAR_tailscale_auth: '${{ secrets.NOMAD_VAR_tailscale_auth }}' # Corrected case\n        NOMAD_VAR_tailscale_auth_este: '${{ secrets.NOMAD_VAR_tailscale_auth_este }}' # Corrected case\n        NOMAD_VAR_oauth_client_id: '${{ secrets.NOMAD_VAR_oauth_client_id }}' # Corrected case\n        NOMAD_VAR_oauth_client_secret: '${{ secrets.NOMAD_VAR_oauth_client_secret }}' # Corrected case\n        NOMAD_VAR_oauth_secret: '${{ secrets.NOMAD_VAR_oauth_secret }}' # Corrected case\n        NOMAD_VAR_oauth_emails: '${{ secrets.NOMAD_VAR_oauth_emails }}' # Corrected case\n        NOMAD_VAR_ssh_id: '${{ secrets.NOMAD_VAR_ssh_id }}' # Corrected case\n        NOMAD_VAR_truenas_api_key: '${{ secrets.NOMAD_VAR_truenas_api_key }}' # Corrected case\n        NOMAD_VAR_gh_access_token: '${{ secrets.NOMAD_VAR_gh_access_token }}' # Corrected case\n        NOMAD_VAR_ollama_data_dir: '/home/shared/ollama'\n        NOMAD_VAR_ollama_base_url: 'http://ollama.service.consul:11434'\n        NOMAD_VAR_webui_secret_key: '${{ secrets.NOMAD_VAR_webui_secret_key }}' # Corrected case\n        NOMAD_VAR_datacenter: 'dc1'\n        NOMAD_VAR_dns_server_ip: '192.168.50.2'\n        # Added missing variables\n        NOMAD_VAR_aws_access_key: ${{ secrets.NOMAD_VAR_aws_access_key }}\n        NOMAD_VAR_aws_secret_key: ${{ secrets.NOMAD_VAR_aws_secret_key }}\n        NOMAD_VAR_bedrock_aws_region: ${{ secrets.NOMAD_VAR_bedrock_aws_region }}\n        NOMAD_VAR_gcp_dns_admin: ${{ secrets.NOMAD_VAR_gcp_dns_admin }}\n        NOMAD_VAR_gemini_api_key: ${{ secrets.NOMAD_VAR_gemini_api_key }}\n        NOMAD_VAR_litellm_master_key: ${{ secrets.NOMAD_VAR_litellm_master_key }}\n        NOMAD_VAR_manyfold_secret_key: ${{ secrets.NOMAD_VAR_manyfold_secret_key }}\n        NOMAD_VAR_postgres_pass: ${{ secrets.NOMAD_VAR_postgres_pass }}\n        NOMAD_VAR_truenas_iscsi_pass: ${{ secrets.NOMAD_VAR_truenas_iscsi_pass }}\n        # Added gcp_project_id\n        NOMAD_VAR_gcp_project_id: ${{ secrets.NOMAD_VAR_gcp_project_id }}\n        # GitHub PAT is now stored securely in secrets\n        NOMAD_VAR_truenass_iscsi_pass: ${{ secrets.NOMAD_VAR_truenass_iscsi_pass }} # Note potential typo in name\n        NOMAD_VAR_dns_zone: ${{ secrets.NOMAD_VAR_dns_zone }}\n        NOMAD_VAR_ingress_ip: ${{ secrets.NOMAD_VAR_ingress_ip }}\n        NOMAD_VAR_radbot_credential_key: ${{ secrets.NOMAD_VAR_radbot_credential_key }}\n        NOMAD_VAR_radbot_admin_token: ${{ secrets.NOMAD_VAR_radbot_admin_token }}\n        NOMAD_VAR_radbot_mcp_token: ${{ secrets.NOMAD_VAR_radbot_mcp_token }}\n        NOMAD_VAR_mullvad_wireguard_key: ${{ secrets.NOMAD_VAR_mullvad_wireguard_key }}\n        NOMAD_VAR_mullvad_wireguard_addr: ${{ secrets.NOMAD_VAR_mullvad_wireguard_addr }}\n        NOMAD_VAR_sonarr_api_key: ${{ secrets.NOMAD_VAR_sonarr_api_key }}\n        NOMAD_VAR_radarr_api_key: ${{ secrets.NOMAD_VAR_radarr_api_key }}\n        NOMAD_VAR_curseforge_api_key: ${{ secrets.NOMAD_VAR_curseforge_api_key }}\n        NOMAD_VAR_pgvector_pass: ${{ secrets.NOMAD_VAR_pgvector_pass }}\n        NOMAD_VAR_pgvector_admin_password: ${{ secrets.NOMAD_VAR_pgvector_admin_password }}\n        NOMAD_VAR_postgres_admin_password: ${{ secrets.NOMAD_VAR_postgres_admin_password }}\n        NOMAD_VAR_litellm_crawl4ai_key: ${{ secrets.NOMAD_VAR_litellm_crawl4ai_key }}\n        NOMAD_VAR_litellm_salt_key: ${{ secrets.NOMAD_VAR_litellm_salt_key }}\n        NOMAD_VAR_wazuh_api_password: ${{ secrets.NOMAD_VAR_wazuh_api_password }}\n        NOMAD_VAR_wazuh_dashboard_password: ${{ secrets.NOMAD_VAR_wazuh_dashboard_password }}\n        NOMAD_VAR_wazuh_indexer_password: ${{ secrets.NOMAD_VAR_wazuh_indexer_password }}\n        NOMAD_VAR_otr_pass: ${{ secrets.NOMAD_VAR_otr_pass }}\n        NOMAD_VAR_plex_token: ${{ secrets.NOMAD_VAR_plex_token }}\n        NOMAD_VAR_listenbrainz_token: ${{ secrets.NOMAD_VAR_listenbrainz_token }}\n        NOMAD_VAR_listenbrainz_username: ${{ secrets.NOMAD_VAR_listenbrainz_username }}\n        NOMAD_VAR_lastfm_api_key: ${{ secrets.NOMAD_VAR_lastfm_api_key }}\n        NOMAD_VAR_lastfm_api_secret: ${{ secrets.NOMAD_VAR_lastfm_api_secret }}\n        NOMAD_VAR_lidarr_api_key: ${{ secrets.NOMAD_VAR_lidarr_api_key }}\n        NOMAD_VAR_kideo_jwt_secret: ${{ secrets.NOMAD_VAR_kideo_jwt_secret }}\n        NOMAD_VAR_kideo_youtube_cookies: ${{ secrets.NOMAD_VAR_kideo_youtube_cookies }}\n        NOMAD_VAR_kideo_curiositystream_user: ${{ secrets.NOMAD_VAR_kideo_curiositystream_user }}\n        NOMAD_VAR_kideo_curiositystream_pass: ${{ secrets.NOMAD_VAR_kideo_curiositystream_pass }}\n        NOMAD_VAR_minecraftmath_jwt_secret: ${{ secrets.NOMAD_VAR_minecraftmath_jwt_secret }}\n"
  },
  {
    "path": ".github/workflows/update-kideo.yaml",
    "content": "name: Update kideo image tag\n\non:\n  repository_dispatch:\n    types: [update-kideo]\n\njobs:\n  update-and-deploy:\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v6\n        with:\n          token: ${{ secrets.ACTIONS_PAT }}\n\n      - name: Update image tag in Nomad job\n        run: |\n          TAG=\"${{ github.event.client_payload.tag }}\"\n          sed -i \"s|ghcr.io/perrymanuk/kideo:[^ \\\"]*|ghcr.io/perrymanuk/kideo:${TAG}|\" \\\n            nomad_jobs/web-apps/kideo/nomad.job\n          echo \"Updated kideo image tag to ${TAG}\"\n\n      - name: Commit and push\n        run: |\n          git config user.name \"github-actions[bot]\"\n          git config user.email \"github-actions[bot]@users.noreply.github.com\"\n          TAG=\"${{ github.event.client_payload.tag }}\"\n          git add nomad_jobs/web-apps/kideo/nomad.job\n          git commit -m \"chore: bump kideo to ${TAG}\"\n          git push\n"
  },
  {
    "path": ".github/workflows/update-minecraftmath.yaml",
    "content": "name: Update minecraftmath image tag\n\non:\n  repository_dispatch:\n    types: [update-minecraftmath]\n\njobs:\n  update-and-deploy:\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v6\n        with:\n          token: ${{ secrets.ACTIONS_PAT }}\n\n      - name: Update image tag in Nomad job\n        run: |\n          TAG=\"${{ github.event.client_payload.tag }}\"\n          sed -i \"s|ghcr.io/perrymanuk/minecraftmath:[^ \\\"]*|ghcr.io/perrymanuk/minecraftmath:${TAG}|\" \\\n            nomad_jobs/web-apps/minecraftmath/nomad.job\n          echo \"Updated minecraftmath image tag to ${TAG}\"\n\n      - name: Commit and push\n        run: |\n          git config user.name \"github-actions[bot]\"\n          git config user.email \"github-actions[bot]@users.noreply.github.com\"\n          TAG=\"${{ github.event.client_payload.tag }}\"\n          git add nomad_jobs/web-apps/minecraftmath/nomad.job\n          git commit -m \"chore: bump minecraftmath to ${TAG}\"\n          git push\n"
  },
  {
    "path": ".github/workflows/update-radbot-dev.yaml",
    "content": "name: Update radbot-dev image tag\n\non:\n  repository_dispatch:\n    types: [update-radbot-dev]\n\njobs:\n  update-and-deploy:\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v6\n        with:\n          token: ${{ secrets.ACTIONS_PAT }}\n\n      - name: Update image tag in dev Nomad job\n        run: |\n          TAG=\"${{ github.event.client_payload.tag }}\"\n          sed -i \"s|ghcr.io/perrymanuk/radbot:[^ \\\"]*|ghcr.io/perrymanuk/radbot:${TAG}|\" \\\n            nomad_jobs/ai-ml/radbot/nomad-dev.job\n          echo \"Updated radbot-dev image tag to ${TAG}\"\n\n      - name: Commit and push\n        run: |\n          git config user.name \"github-actions[bot]\"\n          git config user.email \"github-actions[bot]@users.noreply.github.com\"\n          TAG=\"${{ github.event.client_payload.tag }}\"\n          git add nomad_jobs/ai-ml/radbot/nomad-dev.job\n          git commit -m \"chore: deploy radbot-dev with ${TAG}\"\n          git push\n"
  },
  {
    "path": ".github/workflows/update-radbot.yaml",
    "content": "name: Update radbot image tag\n\non:\n  repository_dispatch:\n    types: [update-radbot]\n\njobs:\n  update-and-deploy:\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v6\n        with:\n          token: ${{ secrets.ACTIONS_PAT }}\n\n      - name: Update image tag in Nomad job\n        run: |\n          TAG=\"${{ github.event.client_payload.tag }}\"\n          sed -i \"s|ghcr.io/perrymanuk/radbot:[^ \\\"]*|ghcr.io/perrymanuk/radbot:${TAG}|\" \\\n            nomad_jobs/ai-ml/radbot/nomad.job\n          echo \"Updated radbot image tag to ${TAG}\"\n\n      - name: Commit and push\n        run: |\n          git config user.name \"github-actions[bot]\"\n          git config user.email \"github-actions[bot]@users.noreply.github.com\"\n          TAG=\"${{ github.event.client_payload.tag }}\"\n          git add nomad_jobs/ai-ml/radbot/nomad.job\n          git commit -m \"chore: bump radbot to ${TAG}\"\n          git push\n"
  },
  {
    "path": ".gitignore",
    "content": ".envrc\n.env\n*-pub\n.passwords\n.envrc*\nvault/secrets.yaml\nvault/*.hcl\nwww/main.jpg\nssl\nlevant/*\n!levant/defaults.yml\nhosts\n*.swp\n.ra-aid\nCLAUDE.md\nscripts/*\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "Makefile",
    "content": "# Load .env files\n#include .envrc\n\ninclude ./.bootstrap.mk\n\n# Define base deployments using their service names\nbase_deployments = coredns docker-registry haproxy\n\n#help: # Placeholder for potential future help generation\n\n# Find the nomad job file for a given service name ($1) within nomad_jobs/ structure\n# Usage: $(call find_job_file, service_name)\n# Example: $(call find_job_file, coredns) -> nomad_jobs/core-infra/coredns/coredns.job (or .nomad)\nfind_job_file = $(shell find nomad_jobs/ -mindepth 2 -maxdepth 3 -type f \\( -name '$1.job' -o -name '$1.nomad' \\) -print -quit)\n\n.PHONY: dc1-%\ndc1-%: ## Deploy specific job to dc1 (searches within nomad_jobs/ structure)\n\t@JOB_FILE=$(call find_job_file,$*); \\\n\tif [ -z \"$$JOB_FILE\" ]; then \\\n\t\techo \"Error: Could not find nomad job file for '$*' in nomad_jobs/.\"; \\\n\t\texit 1; \\\n\tfi; \\\n\techo \"Found job file: $$JOB_FILE\"; \\\n\tnomad job run -var datacenters='[\"dc1\"]' $$JOB_FILE\n\n.PHONY: all-%\nall-%: ## Deploy specific job to all DCs (searches within nomad_jobs/ structure)\n\t@JOB_FILE=$(call find_job_file,$*); \\\n\tif [ -z \"$$JOB_FILE\" ]; then \\\n\t\techo \"Error: Could not find nomad job file for '$*' in nomad_jobs/.\"; \\\n\t\texit 1; \\\n\tfi; \\\n\techo \"Found job file: $$JOB_FILE\"; \\\n\tnomad job run -var datacenters='[\"dc1\", \"hetzner\"]' $$JOB_FILE\n\n.PHONY: deploy-%\ndeploy-%: ## Deploy specific job (searches within nomad_jobs/ structure)\n\t@JOB_FILE=$(call find_job_file,$*); \\\n\tif [ -z \"$$JOB_FILE\" ]; then \\\n\t\techo \"Error: Could not find nomad job file for '$*' in nomad_jobs/.\"; \\\n\t\texit 1; \\\n\tfi; \\\n\techo \"Found job file: $$JOB_FILE\"; \\\n\tnomad job run $$JOB_FILE\n\n.PHONY: deploy-base\ndeploy-base: ## Deploys base jobs (coredns, docker-registry, haproxy) to dc1\n\t@echo \"Deploying base services to dc1: $(base_deployments)\"\n\t$(foreach var,$(base_deployments), \\\n\t    @JOB_FILE=$$(call find_job_file,$(var)); \\\n\t    if [ -z \"$$JOB_FILE\" ]; then \\\n\t        echo \"Error: Could not find nomad job file for base deployment '$(var)' in nomad_jobs/.\"; \\\n\t        exit 1; \\\n\t    fi; \\\n\t    echo \"Deploying $(var) from $$JOB_FILE...\"; \\\n\t    nomad job run -var datacenters='[\"dc1\"]' $$JOB_FILE; \\\n\t)\n\n.PHONY: sslkeys\nsslkeys: ## Generate certs if you have SSL enabled\n\tconsul-template -config ssl/consul-template.hcl -once -vault-renew-token=false\n\n.PHONY: ssl-browser-cert\nssl-browser-cert: ## Generate browser cert if you have SSL enabled\n\tsudo openssl pkcs12 -export -out browser_cert.p12 -inkey ssl/hetzner/server-key.pem -in ssl/hetzner/server.pem -certfile ssl/hetzner/nomad-ca.pem\n\n.PHONY: sync-github-secrets\nsync-github-secrets: ## Sync NOMAD_VAR variables from .envrc to GitHub secrets using gh CLI\n\t@echo \"Syncing NOMAD_VAR variables from .envrc to GitHub secrets...\"\n\t@bash -c 'source .envrc && env | grep \"^NOMAD_VAR_\" | while read -r line; do \\\n\t\tname=\"$${line%%=*}\"; \\\n\t\tvalue=\"$${line#*=}\"; \\\n\t\techo \"Setting $$name\"; \\\n\t\tprintf \"%s\" \"$$value\" | gh secret set \"$$name\"; \\\n\tdone'\n\t@echo \"✅ All NOMAD_VAR secrets synced to GitHub\"\n\n.PHONY: build-update-metadata\nbuild-update-metadata: ## Build the update-metadata Docker image\n\t@echo \"Building update-metadata Docker image...\"\n\t# Assumes update-metadata is in docker_images/update-metadata/\n\tdocker build --platform linux/amd64 -t update-metadata:latest docker_images/update-metadata/\n\n.PHONY: build-gcp-dns-updater\nbuild-gcp-dns-updater: ## Build the gcp-dns-updater Docker image\n\t@echo \"Building gcp-dns-updater Docker image...\"\n\t# Assumes gcp-dns-updater is in docker_images/gcp-dns-updater/\n\tdocker build --platform linux/amd64 -t docker.$$NOMAD_VAR_tld/gcp-dns-updater:latest docker_images/gcp-dns-updater/\n\n# Example deployment target for gcp-dns-updater (if needed, uncomment and adjust)\n#.PHONY: deploy-gcp-dns-updater\n#deploy-gcp-dns-updater: ## Deploy gcp-dns-updater job using generic target\n#\t$(MAKE) deploy-gcp-dns-updater\n"
  },
  {
    "path": "README.md",
    "content": "# Hashi-Homelab\n<p align=\"center\">\n<img width=\"250\" src=\"homelab.png\" />\n</p>\n\n### UPDATE - September 2nd 2025\n\nThis repo has gone through some major changes since the last update. I've completely reorganized the job structure into 10 clean categories (77 services total now!), added a comprehensive AI/ML stack with Ollama and Open-WebUI, enhanced the monitoring with Loki and Vector for log aggregation, modernized the alertmanager with better persistence and pushover notifications, added weekly docker cleanup automation, redesigned CoreDNS and Traefik for proper HA deployment, and implemented comprehensive Nomad allocation monitoring. The GitHub Actions deployment has been refined with better change detection and the whole thing just runs much more smoothly now. Also added a bunch of new services like smart home integration, personal cloud apps, and storage backends including pgvector for AI workloads, plus a few other bits and bobs that make the whole setup more robust.\n\n### Background\n\nThe hashi-homelab was born of a desire to have a simple to maintain but very flexible homelab setup. While designed to work as a cohesive whole, each individual job can be taken and deployed on any Nomad cluster with minimal adjustments - they're built to be portable and self-contained.\n\nThe main goals were to keep the resources required to run the base lab setup small and to have all of the parts be easily exchangeable.  \n\n`make deploy-base` will deploy coredns, docker-registry and haproxy - these are needed for everything else to work but aside from these you can pick and choose what to deploy with `make deploy-SERVICE_NAME` to deploy any of the 77 services organized across 10 categories. `make deploy-prometheus` or `make deploy-ollama` for example. You can also target specific datacenters with `make dc1-traefik` or `make all-postgres`.\n\nThe whole thing is organized much better now with services grouped into logical categories like ai-ml, media-stack, smart-home, observability, etc. Makes it way easier to find what you're looking for and deploy related services together.\n\nIn the future I would like to provide a ready to boot image for a raspberry pi where you can run all of this as the resources needed are really minimal. With just the basics you can get away with one pi4 4gb model with plenty of room to spare.\n\n### Core Components:\n\n* **Scheduler**: Nomad *...with proper allocation monitoring now*\n* **Service Catalog/Registry**: Consul  \n* **Service Mesh**: Traefik *...redesigned for HA deployment, much more robust*\n* **VPN**: Tailscale *...can't say enough good things about tailscale, its integral for my homelab now*\n* **DNS**: CoreDNS *...now with HA setup and proper failover*\n* **Keepalived**: Assign a floating IP for DNS to not lose it if a node goes down\n* **Monitoring**: Prometheus, Alertmanager, Telegraf, Blackbox-exporter, and Grafana *...plus Loki and Vector for log aggregation*  \n* **Container Registry**: Docker-Registry *...because sometimes you don't want to rely on Docker Hub being up*  \n* **AI/ML**: Ollama for local LLM serving, Open-WebUI for chat interface, LiteLLM for API compatibility\n* **Vector Database**: PostgreSQL with pgvector extension for AI/ML vector embeddings storage and similarity search\n* **Storage**: NFS and iSCSI CSI plugins for persistent storage across the cluster\n\n### Service Categories (77 total):\n\n* **ai-ml** (8): ollama, open-webui, litellm, cognee, crawl4ai, manyfold, paperless-ai, pgvector-client\n* **core-infra** (13): coredns, traefik, haproxy, keepalived, tailscale, github-runner, csi plugins, etc.\n* **media-stack** (16): plex, sonarr, radarr, lidarr, sabnzbd, qbittorrent, overseerr, navidrome, etc.\n* **personal-cloud** (4): nextcloud, bitwarden, paperless, radicale\n* **smart-home** (5): home-assistant, deconz, zigbee2mqtt, mqtt, owntracks-recorder  \n* **observability** (7): prometheus, grafana, alertmanager, loki, vector, telegraf, blackbox-exporter\n* **storage-backends** (9): postgres, pgvector, redis, mariadb, neo4j, qdrant, docker-registry, etc.\n* **web-apps** (5): heimdall, wordpress, firecrawl, alertmanager-dashboard, www\n* **misc** (7): gitea, uploader, murmur, octoprint, adb, linuxgsm, gcp-dns-updater\n* **system** (3): docker-cleanup, volumes\n\n### Setup\n\nYou need to have Nomad and Consul already running, a simple setup with the -dev flag will suffice for testing but you'll want a proper cluster for real usage. If don't already have a Nomad and Consul cluster, there are some excellent guides here...  \nhttps://www.nomadproject.io/guides/install/production/deployment-guide.html  \nhttps://learn.hashicorp.com/consul/datacenter-deploy/deployment-guide  \n\nThere are also some files in the `config` folder to help you get started and also one with some services to announce so the Consul and Nomad UI are available over the service mesh.\n\nThis repo relies on a `.envrc` file and direnv installed or setting the environment variables manually.\nThere is an `envrc` example file located in the repo that you can fill in and move to `.envrc`\n\n\nThe secret values from the `.envrc` also need to be put into your github secrets if you plan on deploying via the automated workflow. You can use `make sync-github-secrets` to sync them all at once which is pretty handy.\n\nOnce this is done, you simply run a `make deploy-base` and point your DNS to resolve via one of the Nomad nodes' IP address.  \n\nOne of the more specific parts of the setup that you may need to adjust is I use several NFS mounts to provide persistent storage mounted on each client at `/home/shared` for configs and `/home/media` for images, video, audio, etc. Depending on which parts of this you are planning to deploy you will just need to adjust this persistent storage to meet the setup of your clients. The CSI plugins help make this more flexible now.\n\nServices are exposed by their task name in the nomad job and whatever you configure your TLD to be in the `.envrc`. The whole thing works really well with the automated GitHub Actions deployment now - just push changes and they get deployed automatically to your cluster. This requires tailscale for the GitHub Actions to connect to your cluster.\n"
  },
  {
    "path": "ansible/configs/consul.hcl.j2",
    "content": "#jinja2: trim_blocks:False\nserver = {% if \"lan-client-server\" in group_names %}true{% else %}false{% endif %}\nui = {% if \"lan-client-server\" in group_names %}true{% else %}false{% endif %}\n{% if \"wan-clients\" in group_names %}\n{% raw %}\nclient_addr = \"{{GetInterfaceIP \\\"tailscale0\\\"}}\"\nadvertise_addr = \"{{GetInterfaceIP \\\"tailscale0\\\"}}\"\nbind_addr = \"{{GetInterfaceIP \\\"tailscale0\\\"}}\"\n{% endraw %}\n{% else %}\n{% raw %}\nclient_addr = \"0.0.0.0\"\nadvertise_addr = \"{{ GetPrivateInterfaces | include \\\"network\\\" \\\"192.168.50.0/24\\\" | attr \\\"address\\\" }}\"\nbind_addr = \"0.0.0.0\"\n{% endraw %}\n{% endif %}\n{% raw %}\nadvertise_addr_wan = \"{{ GetPrivateInterfaces | include \\\"network\\\" \\\"192.168.50.0/24\\\" | attr \\\"address\\\" }}\"\n{% endraw %}\ntranslate_wan_addrs = true\ndata_dir = \"/var/lib/consul\"\ndatacenter = \"homelab\"\nenable_syslog = true\nleave_on_terminate = true\nlog_level = \"WARN\"\nretry_join = [\"192.168.50.39\", \"192.168.50.113\", \"192.168.50.85\"]\n{% if \"lan-client-server\" in group_names %}bootstrap_expect = 3{% else %}{% endif %}\ntelemetry {\n  prometheus_retention_time = \"60s\"\n}\n"
  },
  {
    "path": "ansible/configs/consul.service",
    "content": "[Unit]\nDescription=consul agent\nRequires=network-online.target tailscaled.service\nAfter=network-online.target tailscaled.service\n\n[Service]\nExecStartPre=/bin/sleep 30\nEnvironmentFile=-/etc/default/consul\nRestart=always\nExecStart=/usr/bin/consul agent -domain consul -ui -config-dir=/etc/consul.d\nExecReload=/bin/kill -HUP $MAINPID\nKillSignal=SIGINT\n[Install]\nWantedBy=multi-user.target\n"
  },
  {
    "path": "ansible/configs/docker-daemon.json.j2",
    "content": "{\n  \"dns\": [\"192.168.50.2\", \"192.168.50.1\", \"8.8.8.8\"]{% if 'cheese' in group_names %},\n  \"runtimes\": {\n    \"nvidia\": {\n      \"args\": [],\n      \"path\": \"nvidia-container-runtime\"\n    }\n  }\n{% endif %}\n}\n"
  },
  {
    "path": "ansible/configs/nomad.hcl.j2",
    "content": "#jinja2: trim_blocks:False\ndata_dir = \"/var/lib/nomad/\"\ndatacenter = {% if \"cheese\" in group_names %}\"cheese\"{% elif \"minecraft\" in group_names %}\"minecraft\"{% else %}\"dc1\"{% endif %}\nlog_level = \"warn\"\nbind_addr = \"0.0.0.0\"\nregion = \"home\"\n\nserver {\n  enabled          = {% if \"lan-client-server\" in group_names %}true{% else %}false{% endif %}\n  bootstrap_expect = 3\n  server_join {\n    retry_join     = [\"192.168.50.39\", \"192.168.50.113\", \"192.168.50.85\"]\n    retry_max      = 3\n    retry_interval = \"15s\"\n  }\n  authoritative_region  = \"home\"\n  heartbeat_grace = \"300s\"\n  min_heartbeat_ttl = \"20s\"\n}\n\nclient {\n  enabled = true\n{% raw %}\n  network_interface = \"{{ GetPrivateInterfaces | include \\\"network\\\" \\\"192.168.50.0/24\\\" | attr \\\"name\\\" }}\"\n{% endraw %}\n  options {\n    docker.auth.config = \"/root/.docker/config.json\"\n    docker.privileged.enabled = true\n    driver.raw_exec.enable = \"1\"\n    docker.volumes.enabled = true\n  }\n\n  meta {\n    shared_mount = {% if \"wan-clients\" in group_names %}\"false\"{% else %}\"true\"{% endif %}\n    dns = {% if \"wan-clients\" in group_names %}\"false\"{% else %}\"true\"{% endif %}\n    {%- if ansible_hostname == \"klo01\" %}\n    keepalived_priority = \"100\"\n    keepalived_priority_dns1 = \"100\"\n    keepalived_priority_dns2 = \"{{ 200 | random(start=101) }}\"\n    {%- else %}\n    keepalived_priority = \"{{ 200 | random(start=101) }}\"\n    keepalived_priority_dns1 = \"{{ 200 | random(start=101) }}\"\n    keepalived_priority_dns2 = \"{{ 200 | random(start=101) }}\"\n    {%- endif %}\n  }\n\n  host_network \"lan\" {\n    cidr = \"192.168.50.0/24\"\n    reserved_ports = \"22\"\n  }\n\n  host_network \"tailscale\" {\n    cidr = \"100.0.0.0/8\"\n    reserved_ports = \"22\"\n  }\n\n  {% if \"wan-clients\" in group_names %}\n  host_network \"public\" {\n    cidr = \"78.47.90.68/32\"\n    reserved_ports = \"22\"\n  }\n  {%- endif %}\n\n  {%- if ansible_hostname == \"klo01\" %}\n  reserved {\n    memory = 3072\n  }\n  {%- endif %}\n\n}\n\ntelemetry {\n  disable_hostname = true\n  prometheus_metrics = true\n  publish_allocation_metrics = true\n  publish_node_metrics = true\n  use_node_name = false\n}\n{% raw %}\nadvertise {\n  http = \"{{ GetPrivateInterfaces | include \\\"network\\\" \\\"192.168.50.0/24\\\" | attr \\\"address\\\" }}:4646\"\n  rpc = \"{{ GetPrivateInterfaces | include \\\"network\\\" \\\"192.168.50.0/24\\\" | attr \\\"address\\\" }}:4647\"\n  serf = \"{{ GetPrivateInterfaces | include \\\"network\\\" \\\"192.168.50.0/24\\\" | attr \\\"address\\\" }}:4648\"\n}\n{% endraw %}\nconsul {\n  # The address to the Consul agent.\n  {%- raw %}\n  address = \"127.0.0.1:8500\"\n  {%- endraw %}\n  # The service name to register the server and client with Consul.\n\n  client_service_name = \"nomad-client\"\n\n  # Enables automatically registering the services.\n  auto_advertise = true\n\n  # Enabling the server and client to bootstrap using Consul.\n  server_auto_join = true\n  client_auto_join = true\n}\n\n#vault {\n#  enabled = true\n#  address = \"http://vault.service.home:8200\"\n#  allow_unauthenticated = true\n#  create_from_role = \"nomad-cluster\"\n#}\n\nplugin \"docker\" {\n  config {\n    allow_caps = [\"CHOWN\",\"DAC_OVERRIDE\",\"FSETID\",\"FOWNER\",\"MKNOD\",\"NET_RAW\",\"SETGID\",\"SETUID\",\"SETFCAP\",\"SETPCAP\",\"NET_BIND_SERVICE\",\"SYS_CHROOT\",\"KILL\",\"AUDIT_WRITE\",\"NET_ADMIN\",\"NET_BROADCAST\",\"SYS_NICE\"]\n    # extra Docker labels to be set by Nomad on each Docker container with the appropriate value\n    extra_labels = [\"job_name\", \"task_group_name\", \"task_name\", \"namespace\", \"node_name\"]\n    allow_privileged = true\n    volumes {\n      enabled      = true\n      selinuxlabel = \"z\"\n    }\n  }\n}\n"
  },
  {
    "path": "ansible/configs/nomad.service",
    "content": "[Unit]\nDescription=nomad.agent\nRequires=network-online.target tailscaled.service\nAfter=network-online.target tailscaled.service remote-fs.target\n# Hard requirement: Nomad must not start until NFS mounts are ready\nRequiresMountsFor=/home/shared /home/media/TV /home/media/Music /home/media/Movies /home/media/Books\n\n[Service]\nEnvironmentFile=-/etc/default/nomad\nRestart=on-failure\nRestartSec=10\nExecStart=/usr/bin/nomad agent $OPTIONS -config=/etc/nomad.d/nomad.hcl\nExecReload=/bin/kill -HUP $MAINPID\nKillSignal=SIGINT\nKillMode=process\n\n[Install]\nWantedBy=multi-user.target\n"
  },
  {
    "path": "ansible/playbook.yml",
    "content": "---\n- name: network mounts\n  hosts:\n    - lan-client-server\n    - lan-client\n    - cheese\n    - minecraft\n  become: true\n  remote_user: root\n  tasks:\n    - name: Configure static IP via netplan\n      copy:\n        dest: /etc/netplan/00-installer-config.yaml\n        content: |\n          network:\n            version: 2\n            ethernets:\n              ens3:\n                addresses:\n                  - {{ inventory_hostname }}/24\n                routes:\n                  - to: default\n                    via: 192.168.50.1\n                nameservers:\n                  addresses:\n                    - 192.168.50.1\n      notify: Apply netplan\n\n    - name: Ensure directories exist\n      file:\n        path: \"{{ item }}\"\n        state: directory\n        mode: '0755'\n      with_items:\n        - /home/shared\n        - /home/media/TV\n        - /home/media/Music\n        - /home/media/Movies\n        - /home/media/Books\n\n    - name: makesure multipath.conf exists\n      copy:\n        content: \"\"\n        dest: /etc/multipath.conf\n        force: no\n        backup: yes\n      ignore_errors: yes\n\n    - name: Manage /etc/multipath.conf\n      blockinfile:\n        path: /etc/multipath.conf\n        block: |\n          defaults {\n              user_friendly_names yes\n              find_multipaths yes\n          }\n\n    - name: Install Apt packages\n      apt:\n        name:\n          - nfs-common\n          - avahi-daemon\n          - docker.io\n          - open-iscsi\n          - lsscsi\n          - sg3-utils\n          - multipath-tools\n          - scsitools\n\n    - name: Ensure /etc/docker directory exists\n      file:\n        path: /etc/docker\n        state: directory\n        mode: '0755'\n\n    - name: Add NVIDIA Container Toolkit GPG key\n      apt_key:\n        url: https://nvidia.github.io/libnvidia-container/gpgkey\n        state: present\n        keyring: /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg\n      when: \"'cheese' in group_names\"\n\n    - name: Add NVIDIA Container Toolkit repository\n      apt_repository:\n        repo: \"deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://nvidia.github.io/libnvidia-container/stable/deb/$(ARCH) /\"\n        state: present\n        filename: nvidia-container-toolkit\n      when: \"'cheese' in group_names\"\n\n    - name: Install NVIDIA Container Toolkit\n      apt:\n        name: nvidia-container-toolkit\n        state: present\n        update_cache: yes\n      when: \"'cheese' in group_names\"\n\n    - name: Configure Docker daemon with fallback DNS and nvidia runtime\n      template:\n        src: configs/docker-daemon.json.j2\n        dest: /etc/docker/daemon.json\n      notify: Restart Docker\n\n    - name: Remove old NFS fstab entries\n      lineinfile:\n        path: /etc/fstab\n        regexp: '^192\\.168\\.50\\.208:/mnt/.*'\n        state: absent\n\n    - name: Add NFS fstab entries with proper options\n      blockinfile:\n        path: /etc/fstab\n        marker: \"# {mark} ANSIBLE MANAGED NFS MOUNTS\"\n        block: |\n          192.168.50.208:/mnt/pool0/share              /home/shared         nfs4    _netdev,hard,timeo=600,retrans=5,x-systemd.mount-timeout=90,x-systemd.requires=network-online.target,x-systemd.after=network-online.target  0  0\n          192.168.50.208:/mnt/pool1/media/TV           /home/media/TV       nfs4    _netdev,hard,timeo=600,retrans=5,x-systemd.mount-timeout=90,x-systemd.requires=network-online.target,x-systemd.after=network-online.target  0  0\n          192.168.50.208:/mnt/pool0/media/music        /home/media/Music    nfs4    _netdev,hard,timeo=600,retrans=5,x-systemd.mount-timeout=90,x-systemd.requires=network-online.target,x-systemd.after=network-online.target  0  0\n          192.168.50.208:/mnt/pool1/media/Movies       /home/media/Movies   nfs4    _netdev,hard,timeo=600,retrans=5,x-systemd.mount-timeout=90,x-systemd.requires=network-online.target,x-systemd.after=network-online.target  0  0\n          192.168.50.208:/mnt/pool0/media/audiobooks   /home/media/Books    nfs4    _netdev,hard,timeo=600,retrans=5,x-systemd.mount-timeout=90,x-systemd.requires=network-online.target,x-systemd.after=network-online.target  0  0\n      notify:\n        - Reload systemd fstab\n        - Mount Filesystems\n\n    - name: Enable services\n      systemd:\n        name: \"{{ item }}\"\n        enabled: yes\n        state: started\n      with_items:\n        - open-iscsi\n        - multipath-tools\n\n  handlers:\n    - name: Apply netplan\n      command: netplan apply\n\n    - name: Reload systemd fstab\n      systemd:\n        daemon_reload: yes\n\n    - name: Mount Filesystems\n      command: mount -a\n\n    - name: Restart Docker\n      service:\n        name: docker\n        state: restarted\n\n- name: Update configuration, execute command, and install packages\n  hosts:\n    - lan-client-server\n    - lan-client\n    - wan-clients\n    - cheese\n    - minecraft\n  remote_user: root\n  #roles:\n  #  - role: artis3n.tailscale\n  #    vars:\n  #      # Example pulling the API key from the env vars on the host running Ansible\n  #      tailscale_authkey: \"{{ lookup('env', 'NOMAD_VAR_tailscale_auth') }}\"\n  #      tailscale_args: \"{% if 'wan-clients' in group_names %}--accept-routes=true{% else %}--accept-routes=false{% endif %}\"\n  tasks:\n    - name: Ensure directories exist\n      file:\n        path: \"{{ item }}\"\n        state: directory\n        mode: '0755'\n      with_items:\n        - /var/lib/nomad\n        - /var/lib/consul\n        - /etc/nomad.d\n        - /etc/consul.d\n\n    - name: Manage systemd service file nomad\n      copy:\n        src: configs/nomad.service\n        dest: /lib/systemd/system/nomad.service\n      notify: Reload systemd\n\n    - name: Manage systemd service file consul\n      copy:\n        src: configs/consul.service\n        dest: /lib/systemd/system/consul.service\n      notify: Reload systemd\n\n    - name: manage nomad config\n      template:\n        src: configs/nomad.hcl.j2\n        dest: /etc/nomad.d/nomad.hcl\n      notify: Restart Service\n\n    - name: manage consul config\n      template:\n        src: configs/consul.hcl.j2\n        dest: /etc/consul.d/server.hcl\n\n    - name: Add HashiCorp APT repository key\n      apt_key:\n        url: https://apt.releases.hashicorp.com/gpg\n        state: present\n        validate_certs: no\n        keyring: /usr/share/keyrings/hashicorp-archive-keyring.gpg\n\n    - name: Configure HashiCorp APT repository\n      apt_repository:\n        repo: \"deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com {{ ansible_distribution_release }} main\"\n\n    - name: Install Apt packages\n      apt:\n        name:\n          - nomad=1.10.4-1\n          - consul=1.19.1-1\n        dpkg_options: 'force-confdef,force-confold'\n        update_cache: true\n        state: latest\n        allow_downgrade: true\n\n    - name: Modify sysctl entry for net.ipv4.ip_nonlocal_bind\n      sysctl:\n        name: \"{{ item.name }}\"\n        value: \"{{ item.value }}\"\n        state: present\n      with_items:\n        - { name: \"net.ipv4.ip_nonlocal_bind\", value: \"1\" }\n        - { name: \"net.ipv4.conf.all.forwarding\", value: \"1\" }\n      notify: Apply Sysctl Changes\n\n    - name: Enable services\n      systemd:\n        name: \"{{ item }}\"\n        enabled: yes\n        state: started\n      with_items:\n        - nomad\n        - consul\n        - tailscaled\n\n  handlers:\n    - name: Restart Service\n      service:\n        name: nomad\n        state: restarted\n\n    - name: Reload systemd\n      systemd:\n        daemon_reload: yes\n\n    - name: Mount Filesystems\n      command: mount -a\n\n    - name: Apply Sysctl Changes\n      command: sysctl -p /etc/sysctl.conf\n\n- name: Install and configure Tailscale\n  hosts:\n    - all\n  become: yes\n  remote_user: root\n  gather_facts: yes\n  tags: tailscale\n\n  vars:\n    # Read authkey from environment variable; default to 'MISSING' if not set\n    tailscale_auth_key: \"{{ lookup('env', 'NOMAD_VAR_tailscale_auth') | default('MISSING') }}\"\n    # Optionally customize your Tailscale hostname\n    tailscale_hostname: \"{{ inventory_hostname }}\"\n    # Tag to advertise (must match OAuth client tag)\n    tailscale_tags: \"tag:nomad\"\n\n  tasks:\n    - name: Download Tailscale GPG key via curl\n      shell: >\n        curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/noble.noarmor.gpg\n        | tee /usr/share/keyrings/tailscale-archive-keyring.gpg\n        >/dev/null\n      changed_when: true\n\n    - name: Update apt cache\n      apt:\n        update_cache: yes\n\n    - name: Configure Tailscale apt repository\n      copy:\n        dest: /etc/apt/sources.list.d/tailscale.list\n        content: |\n          deb [signed-by=/usr/share/keyrings/tailscale-archive-keyring.gpg arch=amd64] https://pkgs.tailscale.com/stable/ubuntu/ noble main\n\n    - name: Update apt cache (after adding Tailscale repo)\n      apt:\n        update_cache: yes\n\n    - name: Install Tailscale\n      apt:\n        name: tailscale\n        state: latest\n\n    - name: Enable and start tailscaled service\n      service:\n        name: tailscaled\n        state: started\n        enabled: yes\n\n    - name: Bring Tailscale interface up using authkey\n      # \"command\" used because there's no official Ansible module for \"tailscale up\".\n      # This is not strictly idempotent; see notes below for advanced usage.\n      command: >\n        tailscale up\n        --authkey={{ tailscale_auth_key }}\n        --hostname={{ tailscale_hostname }}\n        --advertise-tags={{ tailscale_tags }}\n        --accept-dns=false\n        --reset\n      register: tailscale_up\n      changed_when: \"'Success' in tailscale_up.stdout or 'Success' in tailscale_up.stderr or tailscale_up.rc == 0\"\n\n    - name: Show tailscale status\n      command: tailscale status\n      register: tailscale_status\n      changed_when: false\n\n    - debug:\n        var: tailscale_status.stdout\n\n- name: Install Zsh and Oh My Zsh with Agnoster theme\n  hosts: all\n  become: yes\n  remote_user: root\n  gather_facts: yes\n\n  vars:\n    my_zsh_user: \"root\"  # Change this to the desired user\n\n  tasks:\n    - name: Install zsh\n      apt:\n        name: zsh\n        state: present\n        update_cache: yes\n\n    - name: Ensure home directory path is known\n      user:\n        name: \"{{ my_zsh_user }}\"\n      register: user_info  # This captures the user details, including home directory.\n\n    - name: Check if Oh My Zsh is already installed\n      stat:\n        path: \"/root/.oh-my-zsh\"\n      register: oh_my_zsh_stat\n\n    - name: Check if zshrc exists\n      stat:\n        path: \"/root/.zshrc\"\n      register: zshrc_stat\n\n    - name: Clone Oh My Zsh\n      git:\n        repo: \"https://github.com/ohmyzsh/ohmyzsh.git\"\n        dest: \"/root/.oh-my-zsh\"\n      become_user: \"{{ my_zsh_user }}\"\n      when: not oh_my_zsh_stat.stat.exists\n\n    - name: Copy the default .zshrc template if not present\n      copy:\n        src: \"/root/.oh-my-zsh/templates/zshrc.zsh-template\"\n        dest: \"/root/.zshrc\"\n        remote_src: yes\n      become_user: \"{{ my_zsh_user }}\"\n      when: not zshrc_stat.stat.exists\n\n    - name: Set Oh My Zsh theme to agnoster\n      # Uses a regex replace to ensure 'ZSH_THEME=\"agnoster\"'\n      replace:\n        path: \"/root/.zshrc\"\n        regexp: '^ZSH_THEME=\"[^\"]+\"'\n        replace: 'ZSH_THEME=\"agnoster\"'\n      become_user: \"{{ my_zsh_user }}\"\n\n    - name: Change default shell to zsh for the user\n      user:\n        name: \"{{ my_zsh_user }}\"\n        shell: /usr/bin/zsh\n"
  },
  {
    "path": "ansible/zsh.yml",
    "content": "---\n- name: Install Zsh and Oh My Zsh with Agnoster theme\n  hosts: cheese\n  become: yes\n  remote_user: root\n  gather_facts: yes\n\n  vars:\n    my_zsh_user: \"root\"  # Change this to the desired user\n\n  tasks:\n    - name: Install zsh\n      apt:\n        name: zsh\n        state: present\n        update_cache: yes\n\n    - name: Ensure home directory path is known\n      user:\n        name: \"{{ my_zsh_user }}\"\n      register: user_info  # This captures the user details, including home directory.\n\n    - name: Check if Oh My Zsh is already installed\n      stat:\n        path: \"/root/.oh-my-zsh\"\n      register: oh_my_zsh_stat\n\n    - name: Check if zshrc exists\n      stat:\n        path: \"/root/.zshrc\"\n      register: zshrc_stat\n\n    - name: Clone Oh My Zsh \n      git:\n        repo: \"https://github.com/ohmyzsh/ohmyzsh.git\"\n        dest: \"/root/.oh-my-zsh\"\n      become_user: \"{{ my_zsh_user }}\"\n      when: not oh_my_zsh_stat.stat.exists\n\n    - name: Copy the default .zshrc template if not present\n      copy:\n        src: \"/root/.oh-my-zsh/templates/zshrc.zsh-template\"\n        dest: \"/root/.zshrc\"\n        remote_src: yes\n      become_user: \"{{ my_zsh_user }}\"\n      when: not zshrc_stat.stat.exists\n\n    - name: Set Oh My Zsh theme to agnoster\n      # Uses a regex replace to ensure 'ZSH_THEME=\"agnoster\"'\n      replace:\n        path: \"/root/.zshrc\"\n        regexp: '^ZSH_THEME=\"[^\"]+\"'\n        replace: 'ZSH_THEME=\"agnoster\"'\n      become_user: \"{{ my_zsh_user }}\"\n\n    - name: Change default shell to zsh for the user\n      user:\n        name: \"{{ my_zsh_user }}\"\n        shell: /usr/bin/zsh\n\n"
  },
  {
    "path": "docker_images/gcp-dns-updater/Dockerfile",
    "content": "FROM python:3.14-slim\n\n# Set the working directory in the container\nWORKDIR /app\n\n# Copy the requirements file into the container at /app\nCOPY requirements.txt .\n\n# Install any needed packages specified in requirements.txt\n# Using --no-cache-dir to reduce image size\nRUN pip install --no-cache-dir -r requirements.txt\n\n# Copy the current directory contents into the container at /app\nCOPY update_dns.py .\n\n# Define the command to run the application\nCMD [\"python\", \"update_dns.py\"]\n"
  },
  {
    "path": "docker_images/gcp-dns-updater/README.md",
    "content": "# GCP Dynamic DNS Updater Service\n\nThis service periodically checks the public IPv4 address of the node it's running on and updates a specified A record in a Google Cloud DNS managed zone. It's designed to run as a Nomad job within the Hashi-Homelab environment, utilizing a **pre-built Docker image**.\n\n## Features\n\n*   Fetches the current public IPv4 address from `https://v4.ifconfig.co/ip`.\n*   Uses the `google-cloud-dns` Python SDK to interact with Google Cloud DNS.\n*   Authenticates using a GCP Service Account key provided via an environment variable.\n*   Checks the specified DNS record:\n    *   If it's a CNAME, it deletes the CNAME record.\n    *   If it's an A record, it updates the IP address if it has changed.\n    *   If it doesn't exist (or after deleting a CNAME), it creates the A record with the specified TTL.\n*   Runs periodically via a Nomad job, executing the Python script within the pre-built Docker container.\n\n## Prerequisites\n\n1.  **Docker:** Docker must be installed locally to build the service image.\n2.  **GCP Service Account:** You need a Google Cloud Platform service account with the necessary permissions to manage DNS records.\n    *   Go to the GCP Console -> IAM & Admin -> Service Accounts.\n    *   Create a new service account (e.g., `gcp-dns-updater-sa`).\n    *   Grant this service account the `DNS Administrator` role (`roles/dns.admin`) on the project containing your managed zone.\n    *   Create a JSON key file for this service account and download it securely. You will need the *contents* of this file, not the file itself.\n3.  **Nomad Environment:** A running Nomad cluster where this job can be scheduled. The Nomad clients must have Docker installed and configured.\n\n## Configuration\n\nThe service is configured via environment variables passed to the Nomad task, which are then consumed by the `update_dns.py` script running inside the Docker container:\n\n*   `GCP_DNS_ZONE_NAME`: The name of the managed zone in GCP DNS (e.g., `demonsafe-com`). The script derives the Project ID from the credentials.\n*   `GCP_DNS_RECORD_NAME`: The DNS record name to update (e.g., `*.demonsafe.com`). **Note:** The script expects the base name; the trailing dot is handled internally if needed by the SDK.\n*   `RECORD_TTL`: (Optional) The Time-To-Live (in seconds) for the created/updated A record. Defaults to 300 if not set.\n*   `GCP_PROJECT_ID`: The Google Cloud Project ID containing the DNS zone.\n*   `GCP_SERVICE_ACCOUNT_KEY_B64`: **Required.** The base64-encoded *content* of the GCP service account JSON key file.\n\n**Generating the Base64 Key:**\n\nYou need to encode the *content* of your downloaded JSON key file into a single-line base64 string.\n\nOn Linux/macOS, you can use:\n```bash\nbase64 -w 0 < /path/to/your/gcp_key.json\n```\n*(Ensure you use `-w 0` or an equivalent flag for your `base64` command to prevent line wrapping)*\n\nCopy the resulting string.\n\n**Setting Environment Variables in Nomad:**\n\nThese variables are defined within the `env` block of the `nomad.job` file using Go templating to read runtime environment variables provided by the Nomad agent (which in turn are often sourced from the deployment mechanism, like GitHub Actions):\n\n```hcl\n# Example within nomad.job task config\nenv {\n  GCP_DNS_ZONE_NAME = <<EOH\n{{ env \"NOMAD_VAR_tld\" | replace \".\" \"-\" }}\nEOH\n  GCP_DNS_RECORD_NAME = <<EOH\n*.{{ env \"NOMAD_VAR_tld\" }}\nEOH\n  GCP_SERVICE_ACCOUNT_KEY_B64 = <<EOH\n{{ env \"NOMAD_VAR_gcp_dns_admin\" }}\nEOH\n  GCP_PROJECT_ID = <<EOH\n{{ env \"NOMAD_VAR_gcp_project_id\" }}\nEOH\n  # RECORD_TTL = \"300\" # Optional, defaults to 300 in the script\n}\n```\n\n**Important:** The actual values for `NOMAD_VAR_tld`, `NOMAD_VAR_gcp_dns_admin`, and `NOMAD_VAR_gcp_project_id` **must** be provided securely to the Nomad agent's environment during deployment (e.g., via GitHub Actions secrets mapped in the workflow, or using Vault integration), not hardcoded directly in the job file.\n\n## Deployment\n\n1.  **Ensure Prerequisites:** Verify the service account is created, you have the base64 encoded key, and Docker is running.\n2.  **Build the Docker Image:** From the root of the `hashi-homelab` repository, run the make target:\n    ```bash\n    make build-gcp-dns-updater\n    ```\n    This builds the required Docker image tagged `gcp-dns-updater:latest` using the `gcp-dns-updater/Dockerfile`.\n3.  **Deploy the Nomad Job:**\n    *   Ensure the required environment variables (`NOMAD_VAR_tld`, `NOMAD_VAR_gcp_dns_admin`, `NOMAD_VAR_gcp_project_id`) are available to the Nomad agent running the job. This is typically handled by the CI/CD pipeline (like the GitHub Actions workflow in this repo) or Vault integration.\n    *   Deploy using the Nomad CLI (ensure you are in the repository root or adjust paths). This job will use the `gcp-dns-updater:latest` image built in the previous step:\n        ```bash\n        # The job will read variables from its environment\n        nomad job run gcp-dns-updater/nomad.job\n        ```\n    *   Alternatively, if using the project's Makefile structure:\n        ```bash\n        # Assumes the Makefile's deploy target doesn't need extra vars\n        # and that required env vars are set in the deployment runner\n        make deploy-gcp-dns-updater\n        ```\n\n## Files\n\n*   `update_dns.py`: The core Python script for updating DNS (runs inside the container).\n*   `requirements.txt`: Python dependencies (installed during Docker build).\n*   `Dockerfile`: Defines how to build the service's Docker image.\n*   `nomad.job`: Nomad job definition for periodic execution using the `gcp-dns-updater:latest` Docker image.\n*   `README.md`: This documentation file.\n"
  },
  {
    "path": "docker_images/gcp-dns-updater/requirements.txt",
    "content": "google-cloud-dns\nrequests\ngoogle-auth"
  },
  {
    "path": "docker_images/gcp-dns-updater/update_dns.py",
    "content": "\nimport os\nimport requests\nimport logging\nimport sys\nimport base64\nimport json\nimport time\nimport socket # Added import\n\n# Import GCP specific libraries\nfrom google.cloud import dns\nfrom google.oauth2 import service_account\nfrom google.api_core.exceptions import GoogleAPIError\n\n# Setup logging\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')\n\ndef get_env_vars():\n    \"\"\"Reads required environment variables and returns them.\"\"\"\n    project_id = os.environ.get('GCP_PROJECT_ID')\n    zone_name = os.environ.get('GCP_DNS_ZONE_NAME') # This will be the TLD like \"demonsafe.com\"\n    record_name = os.environ.get('GCP_DNS_RECORD_NAME')\n    key_b64 = os.environ.get('GCP_SERVICE_ACCOUNT_KEY_B64') # Changed variable name\n\n    if not all([project_id, zone_name, record_name, key_b64]): # Check for key_b64\n        missing = [var for var, val in [\n            ('GCP_PROJECT_ID', project_id),\n            ('GCP_DNS_ZONE_NAME', zone_name),\n            ('GCP_DNS_RECORD_NAME', record_name),\n            ('GCP_SERVICE_ACCOUNT_KEY_B64', key_b64) # Updated missing check\n        ] if not val]\n        logging.error(f\"Missing required environment variables: {', '.join(missing)}\")\n        sys.exit(1)\n\n    return project_id, zone_name, record_name, key_b64 # Return key_b64\n\ndef get_public_ip():\n    \"\"\"Fetches the public IPv4 address.\"\"\"\n    try:\n        response = requests.get('https://v4.ifconfig.me/ip', timeout=10)\n        response.raise_for_status()  # Raise an exception for bad status codes\n        ip_address = response.text.strip()\n        logging.info(f\"Successfully fetched public IP: {ip_address}\")\n        return ip_address\n    except requests.exceptions.RequestException as e:\n        logging.error(f\"Error fetching public IP: {e}\")\n        sys.exit(1) # Exit if IP cannot be fetched\n\ndef get_dns_client(key_b64: str, project_id: str): # Changed key_path to key_b64 and added project_id\n    \"\"\"Creates and returns a DNS client authenticated with a base64 encoded service account key.\"\"\"\n    try:\n        # Decode the base64 string\n        logging.info(\"Decoding base64 service account key...\")\n        decoded_key = base64.b64decode(key_b64)\n        logging.info(\"Base64 key decoded successfully.\")\n\n        # Parse the decoded JSON key\n        logging.info(\"Parsing service account key JSON...\")\n        key_info = json.loads(decoded_key)\n        logging.info(\"Service account key JSON parsed successfully.\")\n\n        # Create credentials from the parsed key info\n        credentials = service_account.Credentials.from_service_account_info(key_info)\n\n        # Use the provided project_id, not the one from credentials, to ensure consistency\n        client = dns.Client(project=project_id, credentials=credentials)\n        logging.info(f\"Successfully created DNS client for project {project_id}\")\n        return client\n\n    except base64.binascii.Error as e:\n        logging.error(f\"Failed to decode base64 service account key: {e}\")\n        sys.exit(1)\n    except json.JSONDecodeError as e:\n        logging.error(f\"Failed to parse service account key JSON: {e}\")\n        sys.exit(1)\n    except Exception as e:\n        logging.error(f\"Failed to create DNS client from service account info: {e}\")\n        sys.exit(1)\n\ndef update_dns_record(client: dns.Client, project_id: str, zone_name: str, record_name: str, ip_address: str):\n    \"\"\"\n    Checks and updates/creates an A record for the given name in the specified zone,\n    replacing a CNAME if necessary.\n\n    Args:\n        client: Authenticated DNS client.\n        project_id: GCP project ID.\n        zone_name: The domain TLD (e.g., \"demonsafe.com\"). This will be converted\n                   to the GCP zone name format (e.g., \"demonsafe-com\").\n        record_name: The specific record to update (e.g., \"*.demonsafe.com\").\n        ip_address: The public IP address to set.\n    \"\"\"\n    try:\n        # Convert the TLD zone name (e.g., \"demonsafe.com\") to GCP zone name format (e.g., \"demonsafe-com\")\n        gcp_zone_name = zone_name.replace('.', '-')\n        logging.info(f\"Targeting GCP DNS Zone: {gcp_zone_name}\")\n\n        zone = client.zone(gcp_zone_name, project_id)\n        if not zone.exists():\n            logging.error(f\"DNS zone '{gcp_zone_name}' not found in project '{project_id}'.\")\n            return # Cannot proceed without the zone\n\n        # Ensure record_name ends with a dot for FQDN matching\n        fqdn = record_name if record_name.endswith('.') else f\"{record_name}.\"\n        logging.info(f\"Checking DNS records for: {fqdn} in zone {gcp_zone_name}\")\n\n        record_sets = list(zone.list_resource_record_sets(filter_=f\"name={fqdn}\"))\n\n        existing_a_record = None\n        existing_cname_record = None\n\n        for record_set in record_sets:\n            if record_set.record_type == 'A' and record_set.name == fqdn:\n                existing_a_record = record_set\n                logging.info(f\"Found existing A record: {existing_a_record.name} -> {existing_a_record.rrdatas}\")\n            elif record_set.record_type == 'CNAME' and record_set.name == fqdn:\n                existing_cname_record = record_set\n                logging.info(f\"Found existing CNAME record: {existing_cname_record.name} -> {existing_cname_record.rrdatas}\")\n\n        changes = zone.changes()\n        needs_update = False\n\n        # Handle existing CNAME (delete it to replace with A)\n        if existing_cname_record:\n            logging.warning(f\"Deleting existing CNAME record {fqdn} to replace with A record.\")\n            changes.delete_record_set(existing_cname_record)\n            needs_update = True\n            # Ensure we don't try to delete an A record if we just deleted a CNAME\n            existing_a_record = None\n\n        # Define the new A record we want\n        new_a_record = zone.resource_record_set(fqdn, \"A\", 300, [ip_address])\n\n        # Handle existing A record\n        if existing_a_record:\n            if existing_a_record.rrdatas == [ip_address]:\n                logging.info(f\"Existing A record {fqdn} already points to {ip_address}. No update needed.\")\n                return # Nothing to do\n            else:\n                logging.info(f\"Existing A record {fqdn} points to {existing_a_record.rrdatas}. Updating to {ip_address}.\")\n                changes.delete_record_set(existing_a_record)\n                changes.add_record_set(new_a_record)\n                needs_update = True\n        # Handle case where no A record (and no CNAME was found/deleted)\n        elif not existing_cname_record: # Only add if we didn't already decide to replace CNAME\n            logging.info(f\"No existing A or CNAME record found for {fqdn}. Creating new A record pointing to {ip_address}.\")\n            changes.add_record_set(new_a_record)\n            needs_update = True\n        # Handle case where CNAME was found and deleted - we still need to add the A record\n        elif existing_cname_record:\n             logging.info(f\"Adding A record for {fqdn} pointing to {ip_address} after CNAME deletion.\")\n             changes.add_record_set(new_a_record)\n             # needs_update should already be True\n\n        # Execute the changes if any were queued\n        if needs_update:\n            logging.info(f\"Executing DNS changes for {fqdn} in zone {gcp_zone_name}...\")\n            changes.create()\n            # Wait until the changes are finished.\n            while changes.status != 'done':\n                logging.info(f\"Waiting for DNS changes to complete (status: {changes.status})...\")\n                time.sleep(5) # Wait 5 seconds before checking again\n                changes.reload()\n            logging.info(f\"Successfully updated DNS record {fqdn} to {ip_address} in zone {gcp_zone_name}.\")\n        else:\n            # This case should only be hit if an A record existed and was correct\n            logging.info(\"No DNS changes were necessary.\")\n\n    except GoogleAPIError as e:\n        logging.error(f\"GCP API Error updating DNS record {fqdn} in zone {gcp_zone_name}: {e}\")\n    except Exception as e:\n        logging.error(f\"An unexpected error occurred during DNS update for {fqdn} in zone {gcp_zone_name}: {e}\")\n\n\ndef update_spf_record(client: dns.Client, project_id: str, zone_name: str, record_name: str, ip_address: str):\n    \"\"\"Updates the SPF TXT record on the bare domain with the current public IP.\"\"\"\n    try:\n        gcp_zone_name = zone_name.replace('.', '-')\n        logging.info(f\"Updating SPF record in zone: {gcp_zone_name}\")\n\n        zone = client.zone(gcp_zone_name, project_id)\n        if not zone.exists():\n            logging.error(f\"DNS zone '{gcp_zone_name}' not found in project '{project_id}'.\")\n            return\n\n        # Derive bare domain from record_name (e.g., \"*.demonsafe.com\" -> \"demonsafe.com.\")\n        domain = record_name.lstrip('*.') if record_name.startswith('*.') else record_name\n        fqdn = domain if domain.endswith('.') else f\"{domain}.\"\n        logging.info(f\"Checking TXT records for: {fqdn}\")\n\n        spf_value = f'\"v=spf1 ip4:{ip_address} ~all\"'\n\n        record_sets = list(zone.list_resource_record_sets(filter_=f\"name={fqdn}\"))\n        existing_txt = None\n        for rs in record_sets:\n            if rs.record_type == 'TXT' and rs.name == fqdn:\n                existing_txt = rs\n                logging.info(f\"Found existing TXT record: {rs.name} -> {rs.rrdatas}\")\n                break\n\n        changes = zone.changes()\n        needs_update = False\n\n        if existing_txt:\n            new_rrdatas = []\n            spf_found = False\n            for rd in existing_txt.rrdatas:\n                if 'v=spf1' in rd:\n                    spf_found = True\n                    if ip_address in rd:\n                        logging.info(f\"SPF record already contains {ip_address}. No update needed.\")\n                        return\n                    logging.info(f\"Replacing SPF entry: {rd} -> {spf_value}\")\n                    new_rrdatas.append(spf_value)\n                else:\n                    new_rrdatas.append(rd)\n            if not spf_found:\n                logging.info(f\"No existing SPF entry found. Adding: {spf_value}\")\n                new_rrdatas.append(spf_value)\n\n            changes.delete_record_set(existing_txt)\n            new_txt = zone.resource_record_set(fqdn, \"TXT\", 300, new_rrdatas)\n            changes.add_record_set(new_txt)\n            needs_update = True\n        else:\n            logging.info(f\"No TXT record found for {fqdn}. Creating with SPF: {spf_value}\")\n            new_txt = zone.resource_record_set(fqdn, \"TXT\", 300, [spf_value])\n            changes.add_record_set(new_txt)\n            needs_update = True\n\n        if needs_update:\n            logging.info(f\"Executing SPF TXT changes for {fqdn}...\")\n            changes.create()\n            while changes.status != 'done':\n                logging.info(f\"Waiting for SPF changes to complete (status: {changes.status})...\")\n                time.sleep(5)\n                changes.reload()\n            logging.info(f\"Successfully updated SPF record for {fqdn} with ip4:{ip_address}\")\n\n    except GoogleAPIError as e:\n        logging.error(f\"GCP API Error updating SPF record: {e}\")\n    except Exception as e:\n        logging.error(f\"Unexpected error updating SPF record: {e}\")\n\n\nif __name__ == \"__main__\":\n    logging.info(\"Starting DNS update script.\")\n    project_id, zone_name, record_name, key_b64 = get_env_vars()\n    public_ip = get_public_ip()\n\n    # DNS Pre-check logic\n    if public_ip:\n        hostname_to_check = 'asdf.demonsafe.com'\n        logging.info(f\"Performing pre-check for hostname: {hostname_to_check}\")\n        try:\n            resolved_ip = socket.gethostbyname(hostname_to_check)\n            logging.info(f\"Resolved IP for {hostname_to_check}: {resolved_ip}\")\n            if resolved_ip == public_ip:\n                logging.info(f'DNS record for {hostname_to_check} ({resolved_ip}) already matches public IP ({public_ip}). No update needed.')\n                sys.exit(0)\n            else:\n                logging.info(f'Resolved IP for {hostname_to_check} ({resolved_ip}) does not match public IP ({public_ip}). Proceeding with potential update.')\n        except socket.gaierror as e:\n            logging.warning(f'Could not resolve IP for {hostname_to_check}: {e}. Proceeding with potential update.')\n        except Exception as e:\n            logging.warning(f'An unexpected error occurred during DNS pre-check for {hostname_to_check}: {e}. Proceeding with potential update.')\n\n    if public_ip:\n        dns_client = get_dns_client(key_b64, project_id)\n        if dns_client:\n            update_dns_record(dns_client, project_id, zone_name, record_name, public_ip)\n            update_spf_record(dns_client, project_id, zone_name, record_name, public_ip)\n            logging.info(\"DNS update script finished.\")\n        else:\n            logging.error(\"Exiting due to DNS client initialization failure.\")\n            sys.exit(1)\n    else:\n        logging.error(\"Exiting due to inability to fetch public IP.\")\n        sys.exit(1)\n"
  },
  {
    "path": "docker_images/update-metadata/Dockerfile",
    "content": "FROM python:3.14-slim\n\nWORKDIR /app\n\nCOPY requirements.txt .\nRUN pip install --no-cache-dir -r requirements.txt\n\nCOPY sync_secrets.py .\n\nENTRYPOINT [\"python\", \"sync_secrets.py\"]\n"
  },
  {
    "path": "docker_images/update-metadata/README.md",
    "content": "# GitHub Secret Synchronization Script (Containerized)\n\n## Purpose\n\nThis script (`sync_secrets.py`), running inside a Docker container, reads environment variables defined in the project's root `.envrc` file and synchronizes them as GitHub secrets to the `perrymanuk/hashi-homelab` repository using the `PyGithub` library.\n\n## Requirements\n\n*   **Docker:** Docker must be installed and running to build and execute the container.\n*   **`NOMAD_VAR_github_pat` Environment Variable:** A GitHub Personal Access Token (PAT) with the `repo` scope must be available as an environment variable named `NOMAD_VAR_github_pat` in the **host shell** where you run the `make` command. The Makefile target (`sync-secrets`) will handle passing this token into the container under the name `GITHUB_TOKEN` for the script to use.\n*   **`.envrc` File:** An `.envrc` file must exist at the project root (`/Users/perry.manuk/git/perrymanuk/hashi-homelab/.envrc`) containing the secrets to sync.\n\n## Usage\n\n1.  **Ensure `NOMAD_VAR_github_pat` is set:** Export your GitHub PAT in your current host shell session:\n    ```bash\n    export NOMAD_VAR_github_pat=\"your_github_pat_here\"\n    ```\n2.  **Navigate to the project root directory:**\n    ```bash\n    cd /Users/perry.manuk/git/perrymanuk/hashi-homelab\n    ```\n3.  **Run the Makefile target:**\n    ```bash\n    make sync-secrets\n    ```\n\nThis command will:\n    *   Build the Docker image defined in `scripts/Dockerfile`.\n    *   Run a container from the image.\n    *   Mount the host's `.envrc` file into the container.\n    *   Pass the **host's** `NOMAD_VAR_github_pat` environment variable into the container as `GITHUB_TOKEN`.\n    *   Execute the `sync_secrets.py` script within the container.\n\nThe script will output the status of each secret synchronization attempt (created, updated, or failed).\n\n**Important:** Running the script will overwrite any existing secrets in the GitHub repository that have the same name as variables found in the `.envrc` file.\n\n## `.envrc` Format\n\nThe script expects the `.envrc` file to follow this format:\n\n```bash\nexport VARIABLE_NAME=value\nexport ANOTHER_VARIABLE='value with spaces'\nexport YET_ANOTHER=\"double quoted value\"\n# This is a comment and will be ignored\n\n# Empty lines are also ignored\nexport SECRET_KEY=a_very_secret_value_here\n```\n\n*   Lines must start with `export`.\n*   Variable names and values are separated by `=`.\n*   Values can be unquoted, single-quoted (`'...'`), or double-quoted (`\"...\"`). Quotes are stripped before syncing.\n*   Lines starting with `#` (comments) and empty lines are ignored.\n"
  },
  {
    "path": "docker_images/update-metadata/requirements.txt",
    "content": "PyGithub\nhcl2\n"
  },
  {
    "path": "docker_images/update-metadata/update_job_metadata.py",
    "content": "\nimport argparse\nimport logging\nimport pathlib\nimport re\nimport sys\n\n# Configure logging\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')\n\n\ndef find_job_block(content):\n    \"\"\"Find the start and end indices of the main 'job' block.\"\"\"\n    job_match = re.search(r'^job\\s+\"[^\"]+\"\\s*\\{', content, re.MULTILINE)\n    if not job_match:\n        logging.warning(\"Could not find job block start.\")\n        return None, None\n\n    start_index = job_match.start()\n    # Find the matching closing brace\n    brace_level = 0\n    end_index = -1\n    in_string = False\n    escaped = False\n    for i, char in enumerate(content[start_index:]):\n        if escaped:\n            escaped = False\n            continue\n        if char == '\\\\':\n            escaped = True\n            continue\n        if char == '\"':\n            in_string = not in_string\n            continue\n        if not in_string:\n            if char == '{':\n                brace_level += 1\n            elif char == '}':\n                brace_level -= 1\n                if brace_level == 0:\n                    end_index = start_index + i\n                    break\n\n    if end_index == -1:\n        logging.warning(\"Could not find matching closing brace for job block.\")\n        return None, None\n\n    return start_index, end_index + 1\n\ndef find_meta_block(content):\n    \"\"\"Find the start and end indices of the 'meta' block within the given content.\"\"\"\n    meta_match = re.search(r'^\\s*meta\\s*\\{', content, re.MULTILINE)\n    if not meta_match:\n        return None, None\n\n    start_index = meta_match.start()\n    # Find the matching closing brace\n    brace_level = 0\n    end_index = -1\n    in_string = False\n    escaped = False\n    for i, char in enumerate(content[start_index:]):\n        if escaped:\n            escaped = False\n            continue\n        if char == '\\\\':\n            escaped = True\n            continue\n        if char == '\"':\n            in_string = not in_string\n            continue\n        if not in_string:\n            if char == '{':\n                brace_level += 1\n            elif char == '}':\n                brace_level -= 1\n                if brace_level == 0:\n                    end_index = start_index + i\n                    break\n\n    if end_index == -1:\n        logging.warning(\"Could not find matching closing brace for meta block.\")\n        return None, None\n\n    return start_index, end_index + 1\n\ndef update_job_metadata(repo_root):\n    \"\"\"Finds Nomad job files and updates their meta block with job_file path.\"\"\"\n    repo_path = pathlib.Path(repo_root).resolve()\n    nomad_jobs_path = repo_path / 'nomad_jobs'\n\n    if not nomad_jobs_path.is_dir():\n        logging.error(f\"'nomad_jobs' directory not found in {repo_path}\")\n        sys.exit(1)\n\n    logging.info(f\"Scanning for job files in {nomad_jobs_path}...\")\n\n    job_files = list(nomad_jobs_path.rglob('*.nomad')) + list(nomad_jobs_path.rglob('*.job'))\n\n    if not job_files:\n        logging.warning(\"No *.nomad or *.job files found.\")\n        return\n\n    modified_count = 0\n    for job_file in job_files:\n        try:\n            relative_path = job_file.relative_to(repo_path).as_posix()\n            logging.debug(f\"Processing file: {relative_path}\")\n            content = job_file.read_text()\n            original_content = content # Keep a copy for comparison\n\n            job_start, job_end = find_job_block(content)\n            if job_start is None or job_end is None:\n                logging.warning(f\"Skipping {relative_path}: Could not find main job block.\")\n                continue\n            job_block_content = content[job_start:job_end]\n            job_opening_line_match = re.match(r'^job\\s+\"[^\"]+\"\\s*\\{\\s*\\n?', job_block_content, re.MULTILINE)\n            if not job_opening_line_match:\n                 logging.warning(f\"Skipping {relative_path}: Could not match job opening line format.\")\n                 continue\n            job_insert_pos = job_start + job_opening_line_match.end()\n\n            meta_start_rel, meta_end_rel = find_meta_block(job_block_content)\n            new_job_file_line = f'  job_file = \"{relative_path}\"'\n            modified = False\n\n            if meta_start_rel is not None and meta_end_rel is not None:\n                meta_start_abs = job_start + meta_start_rel\n                meta_end_abs = job_start + meta_end_rel\n                meta_block_content = content[meta_start_abs:meta_end_abs]\n                meta_opening_line_match = re.match(r'^\\s*meta\\s*\\{\\s*\\n?', meta_block_content, re.MULTILINE)\n                if not meta_opening_line_match:\n                    logging.warning(f\"Skipping {relative_path}: Could not match meta opening line format.\")\n                    continue\n                meta_insert_pos = meta_start_abs + meta_opening_line_match.end()\n\n                job_file_line_match = re.search(r'^(\\s*)job_file\\s*=\\s*\".*?\"$\\n?', meta_block_content, re.MULTILINE)\n\n                if job_file_line_match:\n                    existing_line = job_file_line_match.group(0)\n                    indent = job_file_line_match.group(1)\n                    new_line_with_indent = f'{indent}job_file = \"{relative_path}\"\\n' # Ensure newline\n                    if existing_line.strip() != new_line_with_indent.strip():\n                         # Replace existing line\n                        start = meta_start_abs + job_file_line_match.start()\n                        end = meta_start_abs + job_file_line_match.end()\n                        # Ensure we capture the trailing newline if present in match\n                        content = content[:start] + new_line_with_indent + content[end:]\n                        modified = True\n                else:\n                    # Insert new job_file line inside meta block\n                    content = content[:meta_insert_pos] + new_job_file_line + '\\n' + content[meta_insert_pos:]\n                    modified = True\n            else:\n                # Insert new meta block\n                new_meta_block = f'\\n  meta {{\\n{new_job_file_line}\\n  }}\\n'\n                content = content[:job_insert_pos] + new_meta_block + content[job_insert_pos:]\n                modified = True\n\n            if modified and content != original_content:\n                job_file.write_text(content)\n                logging.info(f\"Updated metadata in: {relative_path}\")\n                modified_count += 1\n            elif not modified:\n                 logging.debug(f\"No changes needed for: {relative_path}\")\n\n        except Exception as e:\n            logging.error(f\"Failed to process {relative_path}: {e}\")\n\n    logging.info(f\"Metadata update complete. {modified_count} files modified.\")\n\n\nif __name__ == \"__main__\":\n    parser = argparse.ArgumentParser(description=\"Update Nomad job files with job_file metadata.\")\n    # Default to the parent directory of the script's directory (../)\n    script_dir = pathlib.Path(__file__).parent.resolve()\n    default_repo_root = script_dir.parent\n    parser.add_argument(\n        \"--repo-root\",\n        type=str,\n        default=str(default_repo_root),\n        help=\"Path to the root of the repository.\"\n    )\n    args = parser.parse_args()\n\n    update_job_metadata(args.repo_root)\n\n"
  },
  {
    "path": "envrc",
    "content": "export CONSUL_HTTP_ADDR=http://FILL_IN_IP:8500\nexport CONSUL_CACERT=/etc/consul.d/ssl/ca.cert\nexport CONSUL_CLIENT_CERT=/etc/consul.d/ssl/consul.cert\nexport CONSUL_CLIENT_KEY=/etc/consul.d/ssl/consul.key\nexport VAULT_ADDR=http://FILL_IN_IP:8200\nexport VAULT_TOKEN=FILL_IN_TOKEN\nexport NOMAD_ADDR=http://FILL_IN_IP:4646\nexport NOMAD_VAR_region='home'\nexport NOMAD_VAR_tld='home'\nexport NOMAD_VAR_shared_dir='/home/shared/'\nexport NOMAD_VAR_downloads_dir='/home/sabnzbd/downloads'\nexport NOMAD_VAR_music_dir='/home/media/Music'\nexport NOMAD_VAR_movies_dir='/home/media/Movies'\nexport NOMAD_VAR_tv_dir='/home/media/TV'\nexport NOMAD_VAR_media_dir='/home/media'\n"
  },
  {
    "path": "nomad_jobs/TEMPLATE-volume.hcl",
    "content": "// =============================================================================\n// Nomad CSI Volume Template\n// =============================================================================\n//\n// Usage:\n//   1. Copy this file to nomad_jobs/<category>/<service-name>/volume.hcl\n//   2. Replace __VOL_NAME__ with the volume name (usually same as service name)\n//   3. Replace __SIZE__ with capacity (e.g. \"5GiB\", \"10GiB\", \"50GiB\")\n//   4. Set access_mode based on your needs (see below)\n//   5. Volume is auto-created by CI when pushed (if path is in workflow filter)\n//\n// Access modes:\n//   single-node-writer       : one node read/write (most services)\n//   single-node-reader-only  : one node read-only\n//   multi-node-single-writer : multiple nodes can mount, one writes (HA failover)\n//\n// Size guide:\n//   Config-only (app state):  1-5 GiB\n//   Small databases:          5-10 GiB\n//   Media metadata/indexes:   10-20 GiB\n//   Time-series / logs:       50-100 GiB\n//\n// =============================================================================\n\nid           = \"__VOL_NAME__\"\nexternal_id  = \"__VOL_NAME__\"\nname         = \"__VOL_NAME__\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-csi.iscsi\"\ncapacity_min = \"__SIZE__\"\ncapacity_max = \"__SIZE__\"\n\ncapability {\n  access_mode     = \"single-node-writer\"\n  attachment_mode = \"file-system\"\n}\n\nmount_options {\n  fs_type     = \"ext4\"\n  mount_flags = [\"noatime\"]\n}\n"
  },
  {
    "path": "nomad_jobs/TEMPLATE.job",
    "content": "// =============================================================================\n// Nomad Job Template\n// =============================================================================\n//\n// Usage:\n//   1. Copy this file to nomad_jobs/<category>/<service-name>/nomad.job\n//   2. Find/replace the following placeholders:\n//      - __JOB_NAME__        : lowercase service name (e.g. \"sonarr\")\n//      - __GROUP_NAME__      : group name (e.g. \"downloaders\", \"monitoring\", \"ai\")\n//      - __CATEGORY__        : directory category (e.g. \"media-stack\", \"ai-ml\")\n//      - __IMAGE__           : docker image with tag (e.g. \"linuxserver/sonarr:4.0.16\")\n//      - __PORT__            : container port number (e.g. \"8989\")\n//      - __HEALTH_PATH__     : HTTP health check path (e.g. \"/ping\", \"/-/healthy\", \"/api/health\")\n//      - __CPU__             : CPU MHz allocation (see guide below)\n//      - __MEMORY__          : Memory MB allocation (see guide below)\n//   3. Remove any optional sections you don't need (marked with OPTIONAL)\n//   4. Update the variable declarations at the bottom\n//   5. Add any job-specific secrets to .envrc as NOMAD_VAR_<name>\n//   6. Add the job path to .github/workflows/nomad.yaml if it should auto-deploy\n//\n// Resource guide:\n//   Light services (static sites, proxies):     cpu = 100-200,  memory = 128-256\n//   Medium services (APIs, web apps):            cpu = 500-1000, memory = 512-1024\n//   Heavy services (.NET apps, databases, Java): cpu = 1000+,    memory = 1024-2048\n//   GPU / ML workloads:                          cpu = 200+,     memory = 4096-8192\n//\n// =============================================================================\n\njob \"__JOB_NAME__\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/__CATEGORY__/__JOB_NAME__/nomad.job\"\n    version  = \"1\"\n  }\n\n  // Ensures scheduling on nodes with NFS shared mount available.\n  // Remove if the service has no need for shared storage or config dirs.\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"__GROUP_NAME__\" {\n    count = 1\n\n    network {\n      port \"http\" {\n        host_network = \"lan\"\n        to           = \"__PORT__\"\n      }\n    }\n\n    // --- OPTIONAL: CSI Volume ------------------------------------------------\n    // Use for services that need persistent block storage (databases, stateful apps).\n    // Requires a matching volume.hcl deployed first.\n    // Remove this block and the prep-disk task + volume_mount if not needed.\n    //\n    // volume \"__JOB_NAME__\" {\n    //   type            = \"csi\"\n    //   read_only       = false\n    //   source          = \"__JOB_NAME__\"\n    //   access_mode     = \"single-node-writer\"\n    //   attachment_mode = \"file-system\"\n    // }\n    // -------------------------------------------------------------------------\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel      = 1\n      min_healthy_time  = \"30s\"\n      healthy_deadline  = \"5m\"\n      progress_deadline = \"10m\"\n      auto_revert       = true\n    }\n\n    // --- OPTIONAL: Prep-disk task --------------------------------------------\n    // Required when using CSI volumes to fix ownership before the main task runs.\n    // Set UID:GID to match the user the main container runs as.\n    // Common values:\n    //   linuxserver images: 65534:65534 (nobody)\n    //   prometheus:         1000:2000\n    //   grafana:            472:472\n    //   loki:               10001:10001\n    //\n    // task \"prep-disk\" {\n    //   driver = \"docker\"\n    //\n    //   lifecycle {\n    //     hook    = \"prestart\"\n    //     sidecar = false\n    //   }\n    //\n    //   volume_mount {\n    //     volume      = \"__JOB_NAME__\"\n    //     destination = \"/volume/\"\n    //     read_only   = false\n    //   }\n    //\n    //   config {\n    //     image   = \"busybox:latest\"\n    //     command = \"sh\"\n    //     args    = [\"-c\", \"chown -R UID:GID /volume/\"]\n    //   }\n    //\n    //   resources {\n    //     cpu    = 200\n    //     memory = 128\n    //   }\n    // }\n    // -------------------------------------------------------------------------\n\n    task \"__JOB_NAME__\" {\n      driver = \"docker\"\n\n      config {\n        image = \"__IMAGE__\"\n        ports = [\"http\"]\n\n        // --- Bind mount pattern (shared NFS config dir) ---\n        // Use for services that store config on shared NFS.\n        // volumes = [\n        //   \"${var.shared_dir}__JOB_NAME__:/config\",\n        // ]\n\n        // --- Template mount pattern (config rendered by Nomad) ---\n        // Use when config is templated inline below.\n        // volumes = [\n        //   \"local/config.yaml:/app/config.yaml\",\n        // ]\n      }\n\n      // --- OPTIONAL: CSI volume mount ----------------------------------------\n      // volume_mount {\n      //   volume      = \"__JOB_NAME__\"\n      //   destination = \"/data\"\n      //   read_only   = false\n      // }\n      // -----------------------------------------------------------------------\n\n      env {\n        TZ = \"Etc/UTC\"\n        // PUID = \"65534\"    // common for linuxserver images\n        // PGID = \"65534\"\n      }\n\n      // --- OPTIONAL: Config template -----------------------------------------\n      // Use for services that need a rendered config file.\n      // Reference secrets with ${var.secret_name} syntax.\n      //\n      // template {\n      //   data        = <<EOH\n      // your config here\n      // EOH\n      //   destination = \"local/config.yaml\"\n      //   change_mode = \"restart\"\n      //   // change_mode options:\n      //   //   \"restart\" - restart the task on config change (safest default)\n      //   //   \"signal\"  - send a signal: change_signal = \"SIGHUP\"\n      //   //   \"noop\"    - do nothing (use only for static configs)\n      // }\n      // -----------------------------------------------------------------------\n\n      service {\n        port = \"http\"\n        name = \"__JOB_NAME__\"\n        tags = [\n          \"traefik.enable=true\",\n        ]\n        check {\n          type     = \"http\"\n          path     = \"__HEALTH_PATH__\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = __CPU__\n        memory = __MEMORY__\n      }\n    }\n  }\n}\n\n// =============================================================================\n// Variables\n// =============================================================================\n// Common variables (always required - provided by .envrc / GitHub Actions):\n\nvariable \"region\" {\n  type        = string\n  description = \"Nomad region\"\n}\n\nvariable \"tld\" {\n  type        = string\n  description = \"Top-level domain for service discovery\"\n}\n\nvariable \"shared_dir\" {\n  type        = string\n  description = \"Path to shared NFS config directory\"\n}\n\n// --- OPTIONAL: Add job-specific variables below ------------------------------\n// Follow this pattern:\n//\n// variable \"my_secret\" {\n//   type        = string\n//   description = \"Description of what this secret is for\"\n// }\n//\n// Then add to .envrc:  export NOMAD_VAR_my_secret='value'\n// And to GitHub Actions workflow env block if auto-deploying.\n// -----------------------------------------------------------------------------\n"
  },
  {
    "path": "nomad_jobs/ai-ml/cognee/nomad.job",
    "content": "job \"cognee\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/ai-ml/cognee/nomad.job\"\n    version = \"3\"\n  }\n\n  group \"cognee-ai\" {\n    count = 1\n\n    network {\n      port \"http\" { to = 8000 }\n#      port \"mcp\" { to = 3000 }\n    }\n\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"cognee-service\" {\n      driver = \"docker\"\n\n      config {\n        dns_servers = [\"192.168.50.2\"]\n        image = \"cognee/cognee:0.5.8\"\n        ports = [\"http\"]\n      }\n\n      env {\n        # --- LLM Configuration ---\n        LLM_PROVIDER            = \"openai\"\n        LLM_MODEL               = \"vertex_ai/gemini-1.5-pro-latest\"\n        LLM_API_KEY             = \"\"\n        LLM_ENDPOINT            = \"https://litellm.demonsafe.com\"\n\n        # --- Embedding Configuration ---\n        EMBEDDING_PROVIDER      = \"openai\"\n        EMBEDDING_MODEL         = \"text-embedding-ada-002\"\n        EMBEDDING_API_KEY       = \"\"\n\n        # --- Relational Database (PostgreSQL) ---\n        DB_PROVIDER             = \"postgres\"\n        DB_HOST                 = \"pgvector.service.consul\"\n        DB_PORT                 = \"5432\"\n        DB_USERNAME             = \"postgres\"\n        DB_PASSWORD             = \"ChAnGeMe\"\n        DB_NAME                 = \"cognee_metadata_db\"\n\n        # --- Vector Database (Qdrant) ---\n        #VECTOR_DB_PROVIDER      = \"qdrant\"\n        #VECTOR_DB_URL           = \"http://qdrant.service.consul:6333\"\n\n        # --- Graph Database (Neo4j) ---\n        GRAPH_DATABASE_PROVIDER = \"neo4j\"\n        GRAPH_DATABASE_URL      = \"bolt://neo4j.service.consul:7687\"\n        GRAPH_DATABASE_USERNAME = \"neo4j\"\n        GRAPH_DATABASE_PASSWORD = \"ChAnGeMe\"\n\n        # --- General Settings ---\n        HOST                    = \"0.0.0.0\"\n        ENVIRONMENT             = \"production\"\n        DEBUG                   = \"false\"\n      }\n\n      resources {\n        cpu    = 100\n        memory = 2048\n      }\n\n      service {\n        name     = \"cognee\"\n        tags     = [\"traefik.enable=true\"]\n        port     = \"http\"\n\n        check {\n          type     = \"tcp\"\n          port     = \"http\"\n          interval = \"15s\"\n          timeout  = \"3s\"\n        }\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n  type = string\n  default = \"global\"\n}\n"
  },
  {
    "path": "nomad_jobs/ai-ml/crawl4ai/nomad.job",
    "content": "job \"crawl4ai\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/ai-ml/crawl4ai/nomad.job\"\n    version  = \"4\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"app\" {\n    count = 1\n\n    network {\n      port \"http\" {\n        to = 11235\n      }\n    }\n\n    volume \"crawl4ai\" {\n      type            = \"csi\"\n      read_only       = false\n      source          = \"crawl4ai-data\"\n      access_mode     = \"single-node-writer\"\n      attachment_mode = \"file-system\"\n    }\n\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"prep-disk\" {\n      driver = \"docker\"\n      \n      volume_mount {\n        volume      = \"crawl4ai\"\n        destination = \"/volume/\"\n        read_only   = false\n      }\n      \n      config {\n        image   = \"busybox:latest\"\n        command = \"sh\"\n        args    = [\"-c\", \"mkdir -p /volume/config && chmod -R 777 /volume/\"]\n      }\n      \n      resources {\n        cpu    = 200\n        memory = 128\n      }\n\n      lifecycle {\n        hook    = \"prestart\"\n        sidecar = false\n      }\n    }\n\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"crawl4ai\" {\n      driver = \"docker\"\n\n      config {\n        image    = \"unclecode/crawl4ai:0.6.0-r2\"\n        ports    = [\"http\"]\n        shm_size = \"1000000000\"\n        dns_servers = [\"192.168.50.2\"]\n      }\n\n      volume_mount {\n        volume      = \"crawl4ai\"\n        destination = \"/app/data\"\n        read_only   = false\n      }\n\n      template {\n        data = <<EOH\n# Application Configuration\napp:\n  title: \"Crawl4AI API\"\n  version: \"0.6.0-r1\"\n  host: \"0.0.0.0\"\n  port: 11235\n  reload: False\n  timeout_keep_alive: 300\n\n# Default LLM Configuration\nllm:\n  provider: \"gemini/gemini-2.5-flash-preview-04-17\"\n  api_key_env: \"${var.litellm_crawl4ai_key}\"\n  api_base: \"https://litellm.${var.tld}\"\n\n# Redis Configuration\nredis:\n  host: \"redis.service.consul\"\n  port: 6379\n  key_prefix: \"crawl4ai:\"\n\n# Rate Limiting Configuration\nrate_limit:\n  enabled: true\n  limits:\n    default: \"60/minute\"\n    html: \"120/minute\" \n    screenshot: \"30/minute\"\n    pdf: \"15/minute\"\n  storage_uri: \"redis://redis.service.consul:6379/2\"\n\n# Security Configuration\nsecurity:\n  enabled: false\n  jwt_enabled: false\n  https_redirect: false\n  trusted_hosts: [\"*\"]\n  headers:\n    x_content_type_options: \"nosniff\"\n    x_frame_options: \"DENY\"\n    content_security_policy: \"default-src 'self'\"\n    strict_transport_security: \"max-age=63072000; includeSubDomains\"\n\n# Crawler Configuration\ncrawler:\n  memory_threshold_percent: 95.0\n  rate_limiter:\n    base_delay: [1.0, 2.0]\n  timeouts:\n    stream_init: 30.0\n    batch_process: 300.0\n\n# Logging Configuration\nlogging:\n  level: \"INFO\"\n  format: \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n\n# Observability Configuration\nobservability:\n  prometheus:\n    enabled: True\n    endpoint: \"/metrics\"\n  health_check:\n    endpoint: \"/health\"\nEOH\n\n        destination   = \"/app/data/config/config.yml\"\n        change_mode   = \"restart\"\n      }\n\n      resources {\n        cpu    = 1000\n        memory = 1024\n      }\n\n      env {\n        PORT = \"11235\"\n        CONFIG_PATH = \"/app/data/config/config.yml\"\n        OPENAI_API_KEY = \"${var.litellm_crawl4ai_key}\"\n      }\n\n      service {\n        port = \"http\"\n        name = \"crawl4ai\"\n        tags = [\n          \"traefik.enable=true\",\n          \"metrics\"\n        ]\n\n        check {\n          type     = \"http\"\n          path     = \"/health\"\n          port     = \"http\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n    }\n  }\n}\n\nvariable \"region\" {}\nvariable \"tld\" {}\nvariable \"shared_dir\" {}\nvariable \"litellm_crawl4ai_key\" {}"
  },
  {
    "path": "nomad_jobs/ai-ml/crawl4ai/volume.hcl",
    "content": "id           = \"crawl4ai-data\"\nexternal_id  = \"crawl4ai-data\"\nname         = \"crawl4ai-data\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-csi.iscsi\"\ncapacity_min = \"5GiB\"\ncapacity_max = \"5GiB\"\n\ncapability {\n  access_mode     = \"single-node-writer\"\n  attachment_mode = \"block-device\"\n}\n\nmount_options {\n  fs_type     = \"ext4\"\n  mount_flags = [\"noatime\", \"nodiratime\", \"data=ordered\"]\n}"
  },
  {
    "path": "nomad_jobs/ai-ml/litellm/nomad.job",
    "content": "job \"litellm\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/ai-ml/litellm/nomad.job\"\n      version = \"6\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"ai\" {\n    count = 1 \n    network {\n      port \"http\" {\n        host_network = \"lan\"\n        to = \"4000\"  # LiteLLM default port is 8000\n      }\n    }\n\n    volume \"litellm\" {\n      type      = \"csi\"\n      read_only = false\n      source    = \"litellm\"\n      access_mode = \"single-node-writer\"\n      attachment_mode = \"file-system\"\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"litellm\" {\n      driver = \"docker\"\n      config {\n        image = \"ghcr.io/berriai/litellm:main-latest\"\n        ports = [\"http\"]\n        volumes = [\n          \"local/config.yaml:/app/config.yaml\",\n        ]\n      }\n\n      volume_mount {\n        volume      = \"litellm\"\n        destination = \"/data\"\n        read_only   = false\n      }\n\n      env {\n        PORT = \"${NOMAD_PORT_http}\"\n        HOST = \"0.0.0.0\"\n        LITELLM_CONFIG_PATH = \"/app/config.yaml\"\n        OLLAMA_BASE_URL = \"${var.ollama_base_url}\"\n        AWS_ACCESS_KEY_ID = \"${var.aws_access_key}\"\n        AWS_SECRET_ACCESS_KEY = \"${var.aws_secret_key}\"\n        AWS_REGION = \"${var.bedrock_aws_region}\"\n        GOOGLE_API_KEY = \"${var.gemini_api_key}\"\n        LITELLM_MASTER_KEY = \"${var.litellm_master_key}\"\n        #LITELLM_SALT_KEY = \"${var.litellm_salt_key}\"  # Added salt key for credential encryption\n        DATABASE_URL = \"postgresql://postgres:${var.postgres_pass}@postgres.service.consul:5432/litellm\"\n        STORE_MODEL_IN_DB = \"True\"\n      }\n\n      template {\n        data = <<EOH\nmodel_list:\n  # Ollama models\n  - model_name: ollama/llama2\n    litellm_params:\n      model: ollama/llama2\n      api_base: ${var.ollama_base_url}\n\n  - model_name: gpt-3.5-turbo\n    litellm_params:\n      model: ollama/llama2\n      api_base: ${var.ollama_base_url}\n  \n  # AWS Bedrock - Claude 3.7 Sonnet\n  - model_name: anthropic.claude-3-7-sonnet-20250219-v1:0\n    litellm_params:\n      model: bedrock/eu.anthropic.claude-3-7-sonnet-20250219-v1:0\n      aws_access_key_id: ${var.aws_access_key}\n      aws_secret_access_key: ${var.aws_secret_key}\n      bedrock_aws_region: ${var.bedrock_aws_region}\n  \n  # Google Gemini Pro 2.5\n  - model_name: gemini\n    litellm_params:\n      api_key: ${var.gemini_api_key}\n      vertex_project: \"htg-infra\"\n      vertex_location: \"us-central1\"\n\n\nlitellm_settings:\n  drop_params: True\n  cache: True\n  cache_params:\n    type: redis\n    host: redis.service.consul\n    port: 6379\n    password: \"\"\n    namespace: litellm\n  # Log and trace settings\n  streaming: True\n  logging: True\n  # Added user management settings\n  user_api_key_backend: \"postgres\"\n  use_queue: True\n  num_workers: 4\n\nenvironment_variables:\n  AWS_ACCESS_KEY_ID: ${var.aws_access_key}\n  AWS_SECRET_ACCESS_KEY: ${var.aws_secret_key}\n  AWS_REGION: ${var.bedrock_aws_region}\n  GOOGLE_API_KEY: ${var.gemini_api_key}\n  LITELLM_MASTER_KEY: ${var.litellm_master_key}\n  LITELLM_SALT_KEY: ${var.litellm_salt_key}\n  DATABASE_URL: postgresql://postgres:${var.postgres_pass}@postgres.service.consul:5432/litellm\nEOH\n        destination = \"local/config.yaml\"\n        env         = false\n      }\n\n      service {\n        port = \"http\"\n        name = \"litellm\"\n        tags = [\n          \"traefik.enable=true\"\n        ]\n        check {\n          type     = \"tcp\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      resources {\n        cpu    = 800\n        memory = 1536\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\nvariable \"tld\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n\nvariable \"ollama_base_url\" {\n    type = string\n    description = \"Base URL for the Ollama service\"\n    default = \"http://ollama.service.consul:11434\"\n}\n\nvariable \"aws_access_key\" {\n    type = string\n    description = \"AWS Access Key ID for Bedrock access\"\n}\n\nvariable \"aws_secret_key\" {\n    type = string\n    description = \"AWS Secret Access Key for Bedrock access\"\n}\n\nvariable \"bedrock_aws_region\" {\n    type = string\n    description = \"AWS Region for Bedrock\"\n    default = \"eu-central-1\"\n}\n\nvariable \"gemini_api_key\" {\n    type = string\n    description = \"Google API Key for Gemini access\"\n}\n\nvariable \"litellm_master_key\" {\n    type = string\n    description = \"Master key for LiteLLM authentication\"\n}\n\nvariable \"litellm_salt_key\" {\n    type = string\n    description = \"Salt key for encrypting provider credentials\"\n}\n\nvariable \"postgres_pass\" {\n    type = string\n    description = \"Password for PostgreSQL database\"\n}"
  },
  {
    "path": "nomad_jobs/ai-ml/litellm/volume.hcl",
    "content": "id           = \"litellm\"\nexternal_id  = \"litellm\"\nname         = \"litellm\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-csi.iscsi\"\ncapacity_min = \"1GiB\"\ncapacity_max = \"1GiB\"\n\ncapability {\n  access_mode     = \"single-node-writer\"\n  attachment_mode = \"block-device\"\n}\n\nmount_options {\n  fs_type     = \"ext4\"\n  mount_flags = [\"noatime\"]\n}"
  },
  {
    "path": "nomad_jobs/ai-ml/manyfold/3dprints-volume.hcl",
    "content": "id           = \"3dprints\"\nexternal_id  = \"3dprints\"\nname         = \"3dprints\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-csi.iscsi\"\ncapacity_min = \"40GiB\"\ncapacity_max = \"40GiB\"\n\ncapability {\n  access_mode     = \"single-node-writer\"\n  attachment_mode = \"block-device\"\n}\n\nmount_options {\n  fs_type     = \"ext4\"\n  mount_flags = [\"noatime\"]\n}\n"
  },
  {
    "path": "nomad_jobs/ai-ml/manyfold/nomad.job",
    "content": "job \"manyfold\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/ai-ml/manyfold/nomad.job\"\nversion = \"4\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"downloaders\" {\n    count = 1 \n    network {\n      port \"http\" {\n        host_network = \"lan\"\n        to = \"3214\"\n      }\n    }\n\n    volume \"manyfold\" {\n      type      = \"csi\"\n      read_only = false\n      source    = \"manyfold\"\n      access_mode = \"single-node-writer\"\n      attachment_mode = \"file-system\"\n    }\n\n    volume \"3dprints\" {\n      type      = \"csi\"\n      read_only = false\n      source    = \"3dprints\"\n      access_mode = \"single-node-writer\"\n      attachment_mode = \"file-system\"\n    }\n\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"manyfold\" {\n      driver = \"docker\"\n      config {\n        image = \"ghcr.io/manyfold3d/manyfold-solo:0.137.0\"\n        ports = [\"http\"]\n      }\n\n      volume_mount {\n        volume      = \"manyfold\"\n        destination = \"/config\"\n        read_only   = false\n      }\n\n      volume_mount {\n        volume      = \"3dprints\"\n        destination = \"/libraries\"\n        read_only   = false\n      }\n\n      env {\n        PUID = \"1000\"\n        PGID = \"1000\"\n        TZ = \"Etc/UTC\"\n\tSECRET_KEY_BASE = \"${var.manyfold_secret_key}\"\n      }\n\n      service {\n        port = \"http\"\n\tname = \"manyfold\"\n        tags = [\n          \"traefik.enable=true\",\n          \"traefik.http.middlewares.httpsRedirect.redirectscheme.scheme=https\",\n          \"traefik.http.routers.${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_TASK_NAME}.${var.tld}\",\n          \"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=forward-auth\"\n        ]\n        check {\n          type     = \"tcp\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      resources {\n        cpu    = 100\n        memory = 1024\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\nvariable \"tld\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n\nvariable \"downloads_dir\" {\n    type = string\n}\n\nvariable \"music_dir\" {\n    type = string\n}\n\nvariable \"manyfold_secret_key\" {\n    type = string\n}\n"
  },
  {
    "path": "nomad_jobs/ai-ml/manyfold/prints_volume.hcl",
    "content": "id           = \"3dprints\"\nexternal_id  = \"3dprints\"\nname         = \"3dprints\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-csi.iscsi\"\ncapacity_min = \"40GiB\"\ncapacity_max = \"40GiB\"\n\ncapability {\n  access_mode     = \"single-node-writer\"\n  attachment_mode = \"block-device\"\n}\n\nmount_options {\n  fs_type     = \"ext4\"\n  mount_flags = [\"noatime\"]\n}\n\n"
  },
  {
    "path": "nomad_jobs/ai-ml/manyfold/volume.hcl",
    "content": "id           = \"manyfold\"\nexternal_id  = \"manyfold\"\nname         = \"manyfold\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-csi.iscsi\"\ncapacity_min = \"40GiB\"\ncapacity_max = \"40GiB\"\n\ncapability {\n  access_mode     = \"single-node-writer\"\n  attachment_mode = \"block-device\"\n}\n\nmount_options {\n  fs_type     = \"ext4\"\n  mount_flags = [\"noatime\"]\n}\n\n"
  },
  {
    "path": "nomad_jobs/ai-ml/ollama/nomad.job",
    "content": "job \"ollama\" {\n  region = var.region\n  datacenters = [\"cheese\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/ai-ml/ollama/nomad.job\"\n    version = \"4\"\n  }\n\n  group \"web\" {\n    network {\n      mode = \"host\"\n      port \"web\" {\n        static = \"11434\"\n        host_network = \"lan\"\n      }\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"ollama\" {\n      driver = \"docker\"\n\n      config {\n        image = \"ollama/ollama\"\n        runtime = \"nvidia\"\n        dns_servers = [var.dns_server_ip]\n        volumes = [\n          \"${var.ollama_data_dir}:/root/.ollama\",\n        ]\n        ports = [\"web\"]\n      }\n\n      env {\n        # Make the GPU visible to this container.\n        NVIDIA_VISIBLE_DEVICES       = \"all\"\n        NVIDIA_DRIVER_CAPABILITIES   = \"compute,utility\"\n        # Pre-pull models on startup\n        OLLAMA_MODELS               = \"llama3.2:3b,codellama:7b\"\n      }\n\n      service {\n        name = \"${NOMAD_JOB_NAME}\"\n        tags = [\"traefik.enable=true\"]\n        port = \"web\"\n\n        check {\n          type     = \"tcp\"\n          port     = \"web\"\n          interval = \"30s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      resources {\n        cpu    = \"200\"\n        memory = \"7000\"\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n\nvariable \"ollama_data_dir\" {\n  type = string\n}\n\nvariable \"datacenter\" {\n  type = string\n}\n\nvariable \"dns_server_ip\" {\n  type = string\n}\n"
  },
  {
    "path": "nomad_jobs/ai-ml/open-webui/nomad.job",
    "content": "job \"open-webui\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/ai-ml/open-webui/nomad.job\"\n      version = \"3\"  // Right-size memory 1024MB -> 768MB\n  }\n\n  group \"web\" {\n    network {\n      mode = \"host\"\n      port \"web\" {\n        to = \"8080\"\n        host_network = \"lan\"\n      }\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    task \"open-webui\" {\n      driver = \"docker\"\n\n      config {\n        image = \"ghcr.io/open-webui/open-webui:v0.8.12\"\n        dns_servers = [var.dns_server_ip]\n        volumes = [\n          \"${var.shared_dir}open-webui:/app/backend/data\",\n        ]\n        ports = [\"web\"]\n      }\n\n     env {\n        OLLAMA_BASE_URL= var.ollama_base_url\n        WEBUI_SECRET_KEY = var.webui_secret_key\n     }\n      service {\n        name = \"${NOMAD_JOB_NAME}\"\n        tags = [\"traefik.enable=true\"]\n        port = \"web\"\n\n        check {\n          type     = \"tcp\"\n          port     = \"web\"\n          interval = \"30s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      resources {\n        cpu    = \"200\"\n        memory = \"768\"\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n\nvariable \"ollama_base_url\" {\n  type = string\n}\n\nvariable \"webui_secret_key\" {\n  type = string\n}\n\nvariable \"datacenter\" {\n  type = string\n}\n\nvariable \"dns_server_ip\" {\n  type = string\n}\n"
  },
  {
    "path": "nomad_jobs/ai-ml/paperless-ai/nomad.job",
    "content": "job \"paperless-ai\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/ai-ml/paperless-ai/nomad.job\"\nversion = \"2\"\n  }\n\n  group \"web\" {\n    network {\n      mode = \"host\"\n      port \"web\" {\n        to = \"3000\"\n        host_network = \"lan\"\n      }\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    task \"paperless-ai\" {\n      driver = \"docker\"\n\n      config {\n        image = \"clusterzx/paperless-ai\"\n        dns_servers = [\"192.168.50.2\"]\n        volumes = [\n          \"${var.shared_dir}paperless-ai:/app/data\",\n        ]\n        ports = [\"web\"]\n      }\n\n      service {\n        name = \"${NOMAD_JOB_NAME}\"\n        tags = [\"traefik.enable=true\"]\n        port = \"web\"\n\n        check {\n          type     = \"tcp\"\n          port     = \"web\"\n          interval = \"30s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      resources {\n        cpu    = \"200\"\n        memory = \"2048\"\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n"
  },
  {
    "path": "nomad_jobs/ai-ml/pgvector-client/nomad.job",
    "content": "job \"pgvector-client-example\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"batch\"\n\n  meta {\n    job_file = \"nomad_jobs/ai-ml/pgvector-client/nomad.job\"\n    version = \"1\"  // Initial version\n  }\n\n  group \"client\" {\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    task \"embedding-example\" {\n      driver = \"docker\"\n\n      config {\n        image = \"python:3.14-slim\"\n        command = \"python\"\n        args = [\n          \"/local/embedding-example.py\"\n        ]\n      }\n\n      env {\n        PGVECTOR_HOST     = \"pgvector.service.consul\"\n        PGVECTOR_PORT     = \"5433\"\n        PGVECTOR_USER     = \"postgres\"\n        PGVECTOR_PASSWORD = \"${var.pgvector_pass}\"\n        PGVECTOR_DB       = \"embeddings\"\n      }\n\n      template {\n        data = <<EOH\n#!/usr/bin/env python3\nimport os\nimport time\nimport psycopg2\nimport numpy as np\nfrom psycopg2.extras import execute_values\n\n# PostgreSQL connection parameters\npg_host = os.environ.get('PGVECTOR_HOST', 'pgvector.service.consul')\npg_port = os.environ.get('PGVECTOR_PORT', '5433')\npg_user = os.environ.get('PGVECTOR_USER', 'postgres')\npg_password = os.environ.get('PGVECTOR_PASSWORD', '')\npg_db = os.environ.get('PGVECTOR_DB', 'embeddings')\n\n# Function to create random embeddings for demo purposes\ndef create_random_embedding(dim=1536):\n    \"\"\"Create a random normalized embedding vector.\"\"\"\n    vec = np.random.randn(dim)\n    # Normalize to unit vector (common practice for embeddings)\n    vec = vec / np.linalg.norm(vec)\n    return vec.tolist()\n\n# Connect to PostgreSQL with pgvector\nprint(f\"Connecting to pgvector at {pg_host}:{pg_port}\")\nconn = psycopg2.connect(\n    host=pg_host,\n    port=pg_port,\n    user=pg_user,\n    password=pg_password,\n    dbname=pg_db\n)\n\ncursor = conn.cursor()\n\n# Ensure pgvector extension is enabled\nprint(\"Ensuring pgvector extension is enabled...\")\ncursor.execute(\"CREATE EXTENSION IF NOT EXISTS vector\")\n\n# Create a table for storing document embeddings\nprint(\"Creating documents table...\")\ncursor.execute(\"\"\"\n    CREATE TABLE IF NOT EXISTS documents (\n        id SERIAL PRIMARY KEY,\n        content TEXT NOT NULL,\n        embedding VECTOR(1536) NOT NULL,\n        metadata JSONB,\n        created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP\n    )\n\"\"\")\n\n# Create an index for efficient similarity search\nprint(\"Creating vector index (this might take a while for large tables)...\")\ntry:\n    cursor.execute(\"\"\"\n        CREATE INDEX IF NOT EXISTS documents_embedding_idx\n        ON documents\n        USING ivfflat (embedding vector_cosine_ops)\n        WITH (lists = 100)\n    \"\"\")\nexcept Exception as e:\n    print(f\"Warning: Could not create index: {e}\")\n    print(\"Continuing without index...\")\n\n# Sample documents\nprint(\"Inserting sample documents...\")\ndocuments = [\n    \"The quick brown fox jumps over the lazy dog\",\n    \"Machine learning models can process vector embeddings efficiently\",\n    \"PostgreSQL with pgvector extension provides vector similarity search\",\n    \"Vector databases are essential for modern AI applications\",\n    \"Semantic search uses embeddings to find relevant results\"\n]\n\n# Generate random embeddings and insert documents\ndata = []\nfor doc in documents:\n    embedding = create_random_embedding()\n    data.append((doc, embedding, {\"source\": \"example\"}))\n\nexecute_values(\n    cursor,\n    \"\"\"\n    INSERT INTO documents (content, embedding, metadata)\n    VALUES %s\n    RETURNING id\n    \"\"\",\n    data,\n    template=\"(%s, %s::vector, %s)\"\n)\n\nprint(f\"Inserted {len(documents)} documents with embeddings\")\n\n# Perform a similarity search\nprint(\"\\nPerforming similarity search...\")\nquery_embedding = create_random_embedding()\n\ncursor.execute(\"\"\"\n    SELECT id, content, 1 - (embedding <=> %s) AS similarity\n    FROM documents\n    ORDER BY embedding <=> %s\n    LIMIT 3\n\"\"\", (query_embedding, query_embedding))\n\nresults = cursor.fetchall()\nprint(\"\\nTop 3 most similar documents:\")\nfor id, content, similarity in results:\n    print(f\"ID: {id}, Similarity: {similarity:.4f}\")\n    print(f\"Content: {content}\")\n    print(\"-\" * 50)\n\n# Commit and close\nconn.commit()\ncursor.close()\nconn.close()\nprint(\"Example completed successfully!\")\nEOH\n        destination = \"local/embedding-example.py\"\n      }\n\n      resources {\n        cpu    = 500\n        memory = 512\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\nvariable \"pgvector_pass\" {\n    type = string\n    description = \"Admin password for pgvector PostgreSQL server\"\n}\n"
  },
  {
    "path": "nomad_jobs/ai-ml/radbot/nomad-dev.job",
    "content": "job \"radbot-dev\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/ai-ml/radbot/nomad-dev.job\"\n    version  = \"1\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"web\" {\n    count = 1\n\n    network {\n      port \"http\" {\n        host_network = \"lan\"\n        to           = 8000\n      }\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"60s\"\n      healthy_deadline = \"5m\"\n      auto_revert      = true\n    }\n\n    task \"radbot-dev\" {\n      driver = \"docker\"\n\n      config {\n        image       = \"ghcr.io/perrymanuk/radbot:dev\"\n        dns_servers = [var.dns_server_ip]\n        ports       = [\"http\"]\n        volumes     = [\n          \"local/config.yaml:/app/config.yaml\",\n        ]\n      }\n\n      env {\n        RADBOT_CREDENTIAL_KEY = var.radbot_credential_key\n        RADBOT_ADMIN_TOKEN    = var.radbot_admin_token\n        RADBOT_CONFIG_FILE    = \"/app/config.yaml\"\n        RADBOT_ENV            = \"dev\"\n      }\n\n      template {\n        data = <<EOH\ndatabase:\n  host: postgres.service.consul\n  port: 5432\n  user: postgres\n  password: ${var.postgres_pass}\n  db_name: radbot_dev\nEOH\n        destination = \"local/config.yaml\"\n        env         = false\n      }\n\n      service {\n        port = \"http\"\n        name = \"radbot-dev\"\n        tags = [\n          \"traefik.enable=true\",\n        ]\n        check {\n          type     = \"http\"\n          path     = \"/health\"\n          interval = \"30s\"\n          timeout  = \"5s\"\n          check_restart {\n            limit           = 3\n            grace           = \"120s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 1000\n        memory = 2048\n      }\n    }\n  }\n}\n\n# ------------------------------------------------------------------\n# Variables — only bootstrap secrets\n# ------------------------------------------------------------------\n\nvariable \"region\" {\n  type = string\n}\n\nvariable \"dns_server_ip\" {\n  type = string\n}\n\nvariable \"postgres_pass\" {\n  type        = string\n  description = \"PostgreSQL password (needed to connect to DB where all config lives)\"\n}\n\nvariable \"radbot_credential_key\" {\n  type        = string\n  description = \"Fernet master key for encrypting credentials/config in the DB\"\n}\n\nvariable \"radbot_admin_token\" {\n  type        = string\n  description = \"Bearer token for /admin/ — the only pre-shared secret\"\n}\n"
  },
  {
    "path": "nomad_jobs/ai-ml/radbot/nomad.job",
    "content": "job \"radbot\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/ai-ml/radbot/nomad.job\"\n    version  = \"2\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"web\" {\n    count = 1\n\n    network {\n      port \"http\" {\n        host_network = \"lan\"\n        to           = 8000\n      }\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"60s\"\n      healthy_deadline = \"5m\"\n      auto_revert      = true\n    }\n\n    task \"radbot\" {\n      driver = \"docker\"\n\n      config {\n        image       = \"ghcr.io/perrymanuk/radbot:v0.128\"\n        dns_servers = [var.dns_server_ip]\n        ports       = [\"http\"]\n        volumes     = [\n          \"local/config.yaml:/app/config.yaml\",\n          \"${var.shared_dir}ai-intel:/mnt/ai-intel\",\n        ]\n      }\n\n      # Bootstrap-only env vars:\n      # - RADBOT_CREDENTIAL_KEY: decrypt credentials/config stored in the DB\n      # - RADBOT_ADMIN_TOKEN:    access /admin/ to manage everything else\n      # - RADBOT_MCP_TOKEN:      bootstrap bearer for MCP bridge HTTP\n      #                          (credential-store `mcp_token` wins once set)\n      # - RADBOT_WIKI_PATH:      wiki root inside the container (matches the\n      #                          ai-intel bind mount above)\n      # All other config (API keys, models, integrations, endpoints) is stored\n      # encrypted in the radbot_credentials table and managed via /admin/ UI.\n      env {\n        RADBOT_CREDENTIAL_KEY = var.radbot_credential_key\n        RADBOT_ADMIN_TOKEN    = var.radbot_admin_token\n        RADBOT_MCP_TOKEN      = var.radbot_mcp_token\n        RADBOT_WIKI_PATH      = \"/mnt/ai-intel\"\n        RADBOT_CONFIG_FILE    = \"/app/config.yaml\"\n      }\n\n      # Minimal bootstrap config — just enough to connect to the DB.\n      # Everything else is loaded from the DB credential store at startup.\n      template {\n        data = <<EOH\ndatabase:\n  host: postgres.service.consul\n  port: 5432\n  user: postgres\n  password: ${var.postgres_pass}\n  db_name: radbot_todos\nEOH\n        destination = \"local/config.yaml\"\n        env         = false\n      }\n\n      service {\n        port = \"http\"\n        name = \"radbot\"\n        tags = [\n          \"traefik.enable=true\",\n        ]\n        check {\n          type     = \"http\"\n          path     = \"/health\"\n          interval = \"30s\"\n          timeout  = \"5s\"\n          check_restart {\n            limit           = 3\n            grace           = \"120s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 1000\n        memory = 2048\n      }\n    }\n  }\n}\n\n# ------------------------------------------------------------------\n# Variables — only bootstrap secrets\n# ------------------------------------------------------------------\n\nvariable \"region\" {\n  type = string\n}\n\nvariable \"dns_server_ip\" {\n  type = string\n}\n\nvariable \"postgres_pass\" {\n  type        = string\n  description = \"PostgreSQL password (needed to connect to DB where all config lives)\"\n}\n\nvariable \"radbot_credential_key\" {\n  type        = string\n  description = \"Fernet master key for encrypting credentials/config in the DB\"\n}\n\nvariable \"radbot_admin_token\" {\n  type        = string\n  description = \"Bearer token for /admin/ — the only pre-shared secret\"\n}\n\nvariable \"radbot_mcp_token\" {\n  type        = string\n  description = \"Bootstrap bearer token for the MCP bridge HTTP transport. The credential-store entry `mcp_token` takes priority once set, so this is only used before the first rotate.\"\n}\n\nvariable \"shared_dir\" {\n  type        = string\n  description = \"Base path on shared-mount nodes; jobs append their own subdirectory (this job mounts <shared_dir>/ai-intel at /mnt/ai-intel).\"\n}\n"
  },
  {
    "path": "nomad_jobs/core-infra/coredns/README.md",
    "content": "### Coredns\nyou can place extra configuration for coredns in the consul kv store at `apps/coredns/corefile` and it will be deployed with the job\n\n"
  },
  {
    "path": "nomad_jobs/core-infra/coredns/nomad.job",
    "content": "job \"coredns\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type = \"service\"\n  priority = 100\n\n  meta {\n      job_file = \"nomad_jobs/core-infra/coredns/nomad.job\"\n      version = \"10\"  // Write keepalived.conf directly instead of env.yaml\n  }\n\n  constraint {\n    attribute = \"${meta.dns}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"dns\" {\n    count = 2\n    \n    constraint {\n      operator = \"distinct_hosts\"\n      value    = \"true\"\n    }\n    \n    network {\n      mode = \"host\"\n      port \"dns\" {\n        static = \"53\"\n        host_network = \"lan\"\n      }\n      port \"metrics\" {\n        static = \"9153\"\n        host_network = \"lan\"\n      }\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"60s\"\n      auto_revert      = true\n      auto_promote     = true\n      canary           = 2\n    }\n\n    task \"keepalived-dns\" {\n      driver = \"docker\"\n      \n      lifecycle {\n        hook = \"prestart\"\n        sidecar = true\n      }\n      \n      config {\n        image = \"osixia/keepalived:2.3.4\"\n        network_mode = \"host\"\n        force_pull = false\n        volumes = [\n          \"local/keepalived.conf:/etc/keepalived/keepalived.conf\"\n        ]\n        cap_add = [\"NET_ADMIN\", \"NET_BROADCAST\", \"NET_RAW\"]\n      }\n\n      template {\n        destination = \"local/keepalived.conf\"\n        change_mode = \"restart\"\n        splay       = \"1m\"\n        data        = <<EOH\nvrrp_instance VI_1 {\n    state BACKUP\n    interface {{ sockaddr \"GetPrivateInterfaces | include \\\"network\\\" \\\"192.168.50.0/24\\\" | attr \\\"name\\\"\" }}\n    virtual_router_id 51\n    priority 100\n    advert_int 1\n\n    unicast_peer {\n{{- range service \"coredns\" }}\n        {{ .Address }}\n{{- end }}\n    }\n\n    virtual_ipaddress {\n        192.168.50.2/24\n        192.168.50.3/24\n    }\n}\nEOH\n      }\n      \n      resources {\n        cpu    = 100\n        memory = 64\n      }\n    }\n\n    task \"coredns\" {\n      driver = \"docker\"\n      \n      config {\n        image = \"coredns/coredns:1.14.2\"\n        network_mode = \"host\"\n        force_pull = false\n        ports = [\"dns\", \"metrics\"]\n        args = [\"-conf\", \"/local/coredns/corefile\"]\n      }\n\n      service {\n        port = \"dns\"\n        name = \"coredns\"\n        tags = [\"coredns\"]\n        check {\n          type     = \"tcp\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n        }\n      }\n      \n      service {\n        port = \"metrics\"\n        name = \"coredns\"\n        tags = [\"metrics\", \"coredns\"]\n      }\n\n      template {\n        data = <<EOH\n. {\n  bind {{ env \"NOMAD_IP_dns\" }} 192.168.50.2 192.168.50.3\n  forward . 8.8.8.8\n  log\n  errors\n  prometheus {{ env \"NOMAD_IP_metrics\" }}:9153\n}\nconsul.:53 {\n  bind {{ env \"NOMAD_IP_dns\" }} 192.168.50.2 192.168.50.3\n  forward . {{ env \"NOMAD_IP_dns\" }}:8600\n  log\n  prometheus {{ env \"NOMAD_IP_metrics\" }}:9153\n}\nfritz.box.:53 {\n  bind {{ env \"NOMAD_IP_dns\" }} 192.168.50.2 192.168.50.3\n  forward . 192.168.50.1:53\n  log\n  prometheus {{ env \"NOMAD_IP_metrics\" }}:9153\n}\n${var.tld}. {\n  bind {{ env \"NOMAD_IP_dns\" }} 192.168.50.2 192.168.50.3\n\n  file /local/coredns/zones/db.home.lab ${var.tld}\n\n}\nk8s. {\n  bind {{ env \"NOMAD_IP_dns\" }} 192.168.50.2 192.168.50.3\n\n  file /local/coredns/zones/db.k8s k8s\n\n}\nEOH\n        destination = \"local/coredns/corefile\"\n        env         = false\n        change_mode = \"signal\"\n        change_signal = \"SIGHUP\"\n        left_delimiter  = \"{{\"\n        right_delimiter = \"}}\"\n      }\n\n      template {\n        change_mode   = \"signal\"\n        change_signal = \"SIGUSR1\"\n        destination = \"local/coredns/zones/db.home.lab\"\n        data = <<EOH\n$ORIGIN ${var.tld}.\n$TTL    604800\n${var.tld}.         IN SOA\tns1.${var.tld}. admin.${var.tld}. (\n         {{ timestamp \"unix\" }}        ; Serial, current unix timestamp\n             604800        ; Refresh\n              86400        ; Retry\n            2419200        ; Expire\n             604800 )      ; Negative Cache TTL\n\n; name servers - NS records\n${var.tld}.         IN NS\t ns1.${var.tld}.\n${var.tld}.         IN NS\t ns2.${var.tld}.\n\n; name servers - A records\nns1                      IN A   192.168.50.1\nns2                      IN A   192.168.50.2\n\n{{- /*  Point domains to the floating IP from keepalived */}}\n; services - A records\nlab.${var.tld}.         IN A   192.168.50.20\n*                       IN A   192.168.50.20\n@                       IN A   192.168.50.20\n\nEOH\n      }\n\n      template {\n        change_mode   = \"signal\"\n        change_signal = \"SIGUSR1\"\n        destination = \"local/coredns/zones/db.k8s\"\n        data = <<EOH\n$ORIGIN ${var.tld}.\n$TTL    604800\nk8s.         IN SOA\tns1.k8s. admin.k8s. (\n         {{ timestamp \"unix\" }}        ; Serial, current unix timestamp\n             604800        ; Refresh\n              86400        ; Retry\n            2419200        ; Expire\n             604800 )      ; Negative Cache TTL\n\n; name servers - NS records\nk8s.         IN NS\t ns1.k8s.\nk8s.         IN NS\t ns2.k8s.\n\n; name servers - A records\nns1                      IN A   192.168.50.1\nns2                      IN A   192.168.50.2\n\n{{- /*  Point domains to the floating IP from keepalived */}}\n; services - A records\nlab.k8s.                IN A   192.168.50.93\n*.k8s.                  IN A   192.168.50.93\n@                       IN A   192.168.50.93\n\nEOH\n      }\n      \n      resources {\n        cpu    = 100\n        memory = 128\n      }\n    }\n  }\n}\n\nvariable \"region\" {}\nvariable \"tld\" {}\n"
  },
  {
    "path": "nomad_jobs/core-infra/github-runner/nomad.job",
    "content": "job \"github-runner\" {\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/core-infra/github-runner/nomad.job\"\nversion = \"3\"\n  }\n\n  group \"runners\" {\n    count = 3\n    # Don't reschedule in case of failure or drain\n    reschedule {\n      attempts  = 0\n      unlimited = false\n    }\n   \n    restart {\n      attempts = 3\n      delay = \"30s\"\n      interval = \"3m\"\n      mode = \"delay\"\n    }\n\n    network {\n      port \"http\" { \n        host_network = \"lan\"\n        to = \"9252\" \n      }\n    }\n\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"runner\" {\n      driver = \"docker\"\n      kill_timeout = \"25s\"\n\n      template {\n        env         = true\n        destination = \"secrets/env\"\n        data        = <<-EOH\n        RUNNER_NAME_PREFIX = \"amd64\"\n        RUNNER_GROUP = \"Default\"\n        RUNNER_SCOPE = \"repo\"\n        REPO_URL = \"https://github.com/perrymanuk/hashi-homelab\"\n        ACCESS_TOKEN = \"${var.github_pat}\"\n        EPHEMERAL = \"0\"\n        DISABLE_AUTO_UPDATE = \"1\"\n        LABELS = \"${var.labels}\"\n        EOH\n      }\n\n      template {\n        data = <<-EOH\n        {\n        \t\"auths\": {\n        \t\t\"https://index.docker.io/v1/\": {\n              \"auth\": \"\"\n        \t\t}\n\t        }\n        }\n        EOH\n        destination = \"secrets/config.json\"\n      }\n\n      resources {\n        cpu    = 100\n        memory = 128\n      }\n\n      config {\n        image       = \"myoung34/github-runner:2.333.1\"\n        extra_hosts = [\"nomad.service.home:192.168.50.120\"]\n        ports       = [\"http\"]\n        userns_mode = \"host\"\n        privileged  = true\n        mounts = [\n          {\n            type     = \"bind\"\n            source   = \"/var/run/docker.sock\"\n            target   = \"/var/run/docker.sock\"\n            readonly = false\n            bind_options = {\n              propagation = \"rprivate\"\n            }\n          },\n          {\n            type     = \"bind\"\n            source   = \"secrets/config.json\"\n            target   = \"/root/.docker/config.json\"\n            readonly = false\n            bind_options = {\n              propagation = \"rprivate\"\n            }\n          }\n        ]\n      }\n    }\n  }\n}\n\n\nvariable \"labels\" {\n  type = string\n  default = \"self-hosted\"\n}\n\nvariable \"github_pat\" {}\n"
  },
  {
    "path": "nomad_jobs/core-infra/haproxy/nomad.job",
    "content": "job \"haproxy\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type = \"system\"\n\n  meta {\n      job_file = \"nomad_jobs/core-infra/haproxy/nomad.job\"\nversion = \"6\"\n  }\n\n  group \"lbs\" {\n    count = 1\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n    task \"haproxy\" {\n      driver = \"docker\"\n      service {\n        tags = [\"haproxy\"]\n        name = \"haproxy\"\n        port = \"http\"\n\n        check {\n          type     = \"tcp\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n        }\n\n      }\n      service {\n        tags = [\"metrics\", \"${NOMAD_ALLOC_ID}\"]\n        name = \"haproxy-metrics\"\n        port = \"metrics\"\n\n        check {\n          type     = \"tcp\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      service {\n        tags = [\"metrics\", \"${NOMAD_ALLOC_ID}\"]\n        name = \"service-mesh\"\n        port = \"http\"\n\n        check {\n          type     = \"tcp\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      config {\n        image = \"haproxy:3.3.6-alpine\"\n        args = [\"-W\", \"-f\", \"local/etc/haproxy.cfg\"]\n        network_mode = \"host\"\n      }\n\n      template {\ndata = <<EOH\nglobal\n  maxconn     20000\n  pidfile     /run/haproxy.pid\n  stats timeout 2m\n  daemon\n\ndefaults\n  retry-on all-retryable-errors\n  option http-use-htx\n  errorfile 503 local/etc/error503.http\n\nfrontend http\n  bind {{ env \"NOMAD_ADDR_http\" }}\n  # options\n  http-request add-header x-forwarded-proto http\n  maxconn 50000\n  mode http\n  timeout client 0s\n  timeout server 0s\n  # acls\n{{ range services }}{{ if .Tags | contains \"net-internal\" }}\n  acl {{ .Name }}_net-internal hdr_reg(Host) -i {{ .Name }}.homelab\n  acl {{ .Name }}_net-internal base_dom {{ .Name }}.homelab\n  use_backend {{ .Name }}_net-internal if {{ .Name }}_net-internal\n{{ end }}{{ end }}\n\n\n# services\n\n{{ range services -}}{{ if .Tags | contains \"net-internal\" -}}\nbackend {{ .Name }}_net-internal\n  mode http\n  option redispatch\n  retries 10\n\n  timeout connect 5s\n  timeout queue 30s\n  timeout server 30s\n\n  {{ range service .Name -}}\n  {{ if .Tags | contains \"net-internal\" -}}\n  server {{ .ID }} {{ .Address }}:{{ .Port }}\n  {{ end -}}\n  {{ end }}\n{{ end -}}\n{{ end -}}\n\nlisten {{ env \"NOMAD_IP_http\" }}\n  mode http\n\n  bind ${NOMAD_ADDR_metrics}\n\n  timeout client 30s\n  timeout connect 5s\n  timeout server  30s\n  timeout queue   30s\n\n  http-request use-service prometheus-exporter if { path /metrics }\n\n  stats enable\n  stats uri /\n  stats show-node\n  stats refresh 30s\n  stats show-legends\n\n\nEOH\n        destination = \"local/etc/haproxy.cfg\"\n        env         = false\n        change_mode = \"signal\"\n        change_signal = \"SIGUSR2\"\n      }\n\n      template {\ndata = <<EOH\nHTTP/1.0 503 Service Unavailable\nCache-Control: no-cache\nConnection: close\nContent-Type: text/plain\n\nError 503: The specified service was not found or has no allocations. Please check your service configuration and try again\n\nEOH\n        destination = \"local/etc/error503.http\"\n        env         = false\n        change_mode = \"signal\"\n        change_signal = \"SIGHUP\"\n      }\n\n      resources {\n        cpu = 100\n        memory = 64\n        network {\n          port \"http\" { \n            static = \"80\" \n          }\n          port \"metrics\" {}\n        }\n      }\n    }\n  }\n}\n\n\n"
  },
  {
    "path": "nomad_jobs/core-infra/iscsi-csi-plugin/controller.job",
    "content": "job \"democratic-csi-iscsi-controller\" {\n  \n  meta {\n  job_file = \"nomad_jobs/core-infra/iscsi-csi-plugin/controller.job\"\n  }\ndatacenters = [\"dc1\"]\n\n  group \"controller\" {\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    task \"plugin\" {\n      driver = \"docker\"\n\n      config {\n        image = \"docker.io/democraticcsi/democratic-csi:v1.9.5\"\n\n        args = [\n          \"--csi-version=1.5.0\",\n          # must match the csi_plugin.id attribute below\n          \"--csi-name=org.democratic-csi.iscsi\",\n          \"--driver-config-file=${NOMAD_TASK_DIR}/driver-config-file.yaml\",\n          \"--log-level=info\",\n          \"--csi-mode=controller\",\n          \"--server-socket=/csi/csi.sock\",\n        ]\n      }\n\n      template {\n        destination = \"${NOMAD_TASK_DIR}/driver-config-file.yaml\"\n\n        data = <<EOH\ndriver: freenas-iscsi\ninstance_id:\nhttpConnection:\n  protocol: https\n  host: 192.168.50.208\n  port: 443\n  # use only 1 of apiKey or username/password\n  # if both are present, apiKey is preferred\n  # apiKey is only available starting in TrueNAS-12\n  apiKey: ${var.truenas_api_key}\n  username: iscsi-no\n  password: \"${var.truenas_iscsi_pass}\"\n  allowInsecure: true\n  # use apiVersion 2 for TrueNAS-12 and up (will work on 11.x in some scenarios as well)\n  apiVersion: 2\nsshConnection:\n  host: 192.168.50.208\n  port: 22\n  username: root\n  # use either password or key\n  password: \"${var.truenas_iscsi_pass}\"\nzfs:\n  # TrueNAS SCALE 24.10+ (Electric Eel) paths differ from FreeBSD defaults\n  cli:\n    paths:\n      zfs: /usr/sbin/zfs\n      zpool: /usr/sbin/zpool\n      sudo: /usr/bin/sudo\n      chroot: /usr/sbin/chroot\n  \n  # can be used to set arbitrary values on the dataset/zvol\n  # can use handlebars templates with the parameters from the storage class/CO\n  # TODO: set up handlebars templates to make this far more awesome\n  #datasetProperties:\n  #   \"org.freenas:description\": \"created via democratic-csi\"\n\n  datasetParentName: ssd-vms0/nomad/vols\n  # do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap\n  # they may be siblings, but neither should be nested in the other\n  detachedSnapshotsDatasetParentName: ssd-vms0/nomad/snaps\n  zvolCompression: \"lz4\"\n  zvolDedupe: \"\"\n  zvolEnableReservation: False\n  zvolBlocksize: \"\"\niscsi:\n  targetPortal: \"192.168.50.208:3260\"\n  targetPortals: []\n  interface:\n\n  # MUST ensure uniqueness\n  # full iqn limit is 223 bytes, plan accordingly\n  # default is //template name isn't defined!\n  #nameTemplate: \"{ { parameters.[csi.storage.k8s.io/pvc/namespace] }}-{ { parameters.[csi.storage.k8s.io/pvc/name] }}\"\n  namePrefix: csi-\n  nameSuffix: \"-discovery\"\n  # add as many as needed\n  targetGroups:\n    # get the correct ID from the \"portal\" section in the UI\n    - targetGroupPortalGroup: 1\n      # get the correct ID from the \"initiators\" section in the UI\n      targetGroupInitiatorGroup: 1\n      # None, CHAP, or CHAP Mutual\n      targetGroupAuthType: None\n      # get the correct ID from the \"Authorized Access\" section of the UI\n      # only required if using Chap\n      #targetGroupAuthGroup:\n\n  extentInsecureTpc: true\n  extentXenCompat: false\n  extentDisablePhysicalBlocksize: true\n  # 512, 1024, 2048, or 4096,\n  extentBlocksize: 512\n  # \"\" (let FreeNAS decide, currently defaults to SSD), Unknown, SSD, 5400, 7200, 10000, 15000\n  extentRpm: \"SSD\"\n  # 0-100 (0 == ignore)\n  extentAvailThreshold: 0\nEOH\n      }\n\n      csi_plugin {\n        # must match --csi-name arg\n        id        = \"org.democratic-csi.iscsi\"\n        type      = \"controller\"\n        mount_dir = \"/csi\"\n      }\n\n      resources {\n        cpu    = 500\n        memory = 128\n      }\n    }\n  }\n}\n\nvariable \"truenas_api_key\" {}\nvariable \"truenas_iscsi_pass\" {}\n"
  },
  {
    "path": "nomad_jobs/core-infra/iscsi-csi-plugin/node.job",
    "content": "job \"democratic-csi-iscsi-node\" {\n  \n  meta {\n  job_file = \"nomad_jobs/core-infra/iscsi-csi-plugin/node.job\"\n  }\ndatacenters = [\"dc1\", \"cheese\"]\n  priority = 100\n  # you can run node plugins as service jobs as well, but this ensures\n  # that all nodes in the DC have a copy\n  type = \"system\"\n\n  group \"nodes\" {\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    task \"plugin\" {\n      driver = \"docker\"\n\n      env {\n        CSI_NODE_ID = \"${attr.unique.hostname}\"\n        \n        # if you run into a scenario where your iscsi volumes are zeroed each time they are mounted,\n        # you can configure the fs detection system used with the following envvar:\n        #FILESYSTEM_TYPE_DETECTION_STRATEGY = \"blkid\"\n      }\n\n      config {\n        image = \"docker.io/democraticcsi/democratic-csi:v1.9.5\"\n\n        args = [\n          \"--csi-version=1.5.0\",\n          # must match the csi_plugin.id attribute below\n          \"--csi-name=org.democratic-csi.iscsi\",\n          \"--driver-config-file=${NOMAD_TASK_DIR}/driver-config-file.yaml\",\n          \"--log-level=debug\",\n          \"--csi-mode=node\",\n          \"--server-socket=/csi/csi.sock\",\n        ]\n\n        # node plugins must run as privileged jobs because they\n        # mount disks to the host\n        privileged = true\n        ipc_mode = \"host\"\n        network_mode = \"host\"\n\n        mount {\n          type = \"bind\"\n          target = \"/host\"\n          source = \"/\"\n          readonly=false\n        }\n        \n        # if you run into a scenario where your iscsi volumes are zeroed each time they are mounted,\n        # you can try uncommenting the following additional mount block:\n        mount {\n          type     = \"bind\"\n          target   = \"/run/udev\"\n          source   = \"/run/udev\"\n          readonly = true\n        }\n      }\n\n      template {\n        destination = \"${NOMAD_TASK_DIR}/driver-config-file.yaml\"\n\n        data = <<EOH\ndriver: freenas-iscsi\ninstance_id:\nhttpConnection:\n  protocol: https\n  host: 192.168.50.208\n  port: 443\n  # use only 1 of apiKey or username/password\n  # if both are present, apiKey is preferred\n  # apiKey is only available starting in TrueNAS-12\n  apiKey: ${var.truenas_api_key}\n  username: iscsi-no\n  password: \"${var.truenas_iscsi_pass}\"\n  allowInsecure: true\n  # use apiVersion 2 for TrueNAS-12 and up (will work on 11.x in some scenarios as well)\n  # leave unset for auto-detection\n  apiVersion: 2\nsshConnection:\n  host: 192.168.50.208\n  port: 22\n  username: root\n  # use either password or key\n  password: \"${var.truenas_iscsi_pass}\"\nzfs:\n  # can be used to override defaults if necessary\n  # the example below is useful for TrueNAS 12\n  #cli:\n  #  sudoEnabled: true\n  #\n  #  leave paths unset for auto-detection\n  #  paths:\n  #    zfs: /usr/local/sbin/zfs\n  #    zpool: /usr/local/sbin/zpool\n  #    sudo: /usr/local/bin/sudo\n  #    chroot: /usr/sbin/chroot\n  \n  # can be used to set arbitrary values on the dataset/zvol\n  # can use handlebars templates with the parameters from the storage class/CO\n  # TODO: set up handlebars templates to make this far more awesome\n  #datasetProperties:\n  #   \"org.freenas:description\": \"created via democratic-csi\"\n\n  datasetParentName: ssd-vms0/nomad/vols\n  # do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap\n  # they may be siblings, but neither should be nested in the other\n  detachedSnapshotsDatasetParentName: ssd-vms0/nomad/snaps\n  zvolCompression: \"\"\n  zvolDedupe: \"\"\n  zvolEnableReservation: False\n  zvolBlocksize: \"\"\niscsi:\n  targetPortal: \"192.168.50.208:3260\"\n  targetPortals: []\n  interface:\n\n  # MUST iensure uniqueness\n  # full iqn limit is 223 bytes, plan accordingly\n  # default is //template name isn't defined!\n  #nameTemplate: \"{ { parameters.[csi.storage.k8s.io/pvc/namespace] }}-{ { parameters.[csi.storage.k8s.io/pvc/name] }}\"\n  namePrefix: csi-\n  nameSuffix: \"-discovery\"\n  # add as many as needed\n  targetGroups:\n    # get the correct ID from the \"portal\" section in the UI\n    - targetGroupPortalGroup: 1\n      # get the correct ID from the \"initiators\" section in the UI\n      targetGroupInitiatorGroup: 1\n      # None, CHAP, or CHAP Mutual\n      targetGroupAuthType: None\n      # get the correct ID from the \"Authorized Access\" section of the UI\n      # only required if using Chap\n      #targetGroupAuthGroup:\n\n  extentInsecureTpc: true\n  extentXenCompat: false\n  extentDisablePhysicalBlocksize: true\n  # 512, 1024, 2048, or 4096,\n  extentBlocksize: 512\n  # \"\" (let FreeNAS decide, currently defaults to SSD), Unknown, SSD, 5400, 7200, 10000, 15000\n  extentRpm: \"SSD\"\n  # 0-100 (0 == ignore)\n  extentAvailThreshold: 0\nEOH\n      }\n\n      csi_plugin {\n        # must match --csi-name arg\n        id        = \"org.democratic-csi.iscsi\"\n        type      = \"node\"\n        mount_dir = \"/csi\"\n      }\n\n      resources {\n        cpu    = 500\n        memory = 128\n      }\n    }\n  }\n}\n\nvariable \"ssh_id\" {}\nvariable \"truenas_api_key\" {}\nvariable \"truenas_iscsi_pass\" {}\n"
  },
  {
    "path": "nomad_jobs/core-infra/keepalived/TODO.md",
    "content": "# Keepalived Improvements TODO\n\n## Problem\nThe osixia/keepalived image uses environment variables (env.yaml) to generate keepalived.conf at startup. This doesn't support dynamic config reloads via SIGHUP because the conf isn't regenerated from env vars on signal.\n\nCombined with Nomad templates that use `change_mode = \"restart\"` and dynamic Consul service lookups, this causes restart loops.\n\n## Proposed Solution\nReplace osixia/keepalived with plain keepalived using a direct config template:\n\n```hcl\nconfig {\n  image = \"osixia/keepalived:2.0.20\"  # or alpine + keepalived\n  volumes = [\n    \"local/keepalived.conf:/etc/keepalived/keepalived.conf\"\n  ]\n}\n\ntemplate {\n  destination = \"local/keepalived.conf\"\n  change_mode = \"signal\"\n  change_signal = \"SIGHUP\"\n  data = <<EOH\nvrrp_instance VI_1 {\n  state BACKUP\n  interface {{ sockaddr \"GetPrivateInterfaces | include \\\"network\\\" \\\"192.168.50.0/24\\\" | attr \\\"name\\\"\" }}\n  virtual_router_id 51\n  priority 100\n  nopreempt\n  virtual_ipaddress {\n    192.168.50.50/24\n  }\n}\nEOH\n}\n```\n\n## Alternatives Considered\n- **vip-manager** - lightweight, single purpose\n- **kube-vip** - modern, supports ARP/BGP\n- **ucarp** - simple CARP implementation\n\n## Affected Jobs\n- `nomad_jobs/core-infra/coredns/nomad.job` (keepalived-dns sidecar)\n- `nomad_jobs/core-infra/traefik/nomad.job` (keepalived-traefik sidecar)\n"
  },
  {
    "path": "nomad_jobs/core-infra/keepalived/nomad.job",
    "content": "job \"keepalived\" {\n  datacenters = [\"dc1\"]\n  type        = \"system\"\n  priority    = 100\n\n  meta {\n      job_file = \"nomad_jobs/core-infra/keepalived/nomad.job\"\nversion = \"5\"\n  }\n\n  group \"keepalived\" {\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"keepalived\" {\n      driver = \"docker\"\n      config {\n        image = \"osixia/keepalived:2.3.4\"\n        network_mode = \"host\"\n        volumes = [\n            \"local/:/container/environment/01-custom\"\n        ]\n        cap_add = [\"NET_ADMIN\", \"NET_BROADCAST\", \"NET_RAW\"]\n      }\n      template {\n        destination = \"local/env.yaml\"\n        change_mode = \"restart\"\n        splay       = \"1m\"\n        data        = <<EOH\nKEEPALIVED_VIRTUAL_IPS:\n  - 192.168.50.2/24\n  - 192.168.50.3/24\nKEEPALIVED_UNICAST_PEERS:\n{{- with $node := node -}}\n{{ range nodes }}\n{{- if ne .Address $node.Node.Address }}\n  - {{ .Address }}\n{{- end -}}\n{{- end -}}\n{{- end }}\nKEEPALIVED_INTERFACE: {{ sockaddr \"GetPrivateInterfaces | include \\\"network\\\" \\\"192.168.50.0/24\\\" | attr \\\"name\\\"\" }}\nEOH\n      }\n      resources {\n        cpu    = 100\n        memory = 32\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "nomad_jobs/core-infra/nfs-csi-plugin/controller.job",
    "content": "job \"plugin-nfs-controller\" {\n  \n  meta {\n  job_file = \"nomad_jobs/core-infra/nfs-csi-plugin/controller.job\"\n  }\ndatacenters = [\"dc1\"]\n  group \"controller\" {\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    task \"plugin\" {\n      driver = \"docker\"\n      config {\n        image = \"registry.k8s.io/sig-storage/nfsplugin:v4.13.2\"\n        args = [\n          \"--v=5\",\n          \"--nodeid=${attr.unique.hostname}\",\n          \"--endpoint=unix:///csi/csi.sock\",\n          \"--drivername=nfs.csi.k8s.io\"\n        ]\n      }\n      csi_plugin {\n        id        = \"nfsofficial\"\n        type      = \"controller\"\n        mount_dir = \"/csi\"\n      }\n      resources {\n        memory = 128\n        cpu    = 100\n      }\n    }\n  }\n}\n\n"
  },
  {
    "path": "nomad_jobs/core-infra/nfs-csi-plugin/nodes.job",
    "content": "job \"plugin-nfs-nodes\" {\n  \n  meta {\n  job_file = \"nomad_jobs/core-infra/nfs-csi-plugin/nodes.job\"\n  }\ndatacenters = [\"dc1\"]\n  # you can run node plugins as service jobs as well, but this ensures\n  # that all nodes in the DC have a copy.\n  type = \"system\"\n  group \"nodes\" {\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    task \"plugin\" {\n      driver = \"docker\"\n      config {\n        image = \"registry.k8s.io/sig-storage/nfsplugin:v4.13.2\"\n        args = [\n          \"--v=5\",\n          \"--nodeid=${attr.unique.hostname}\",\n          \"--endpoint=unix:///csi/csi.sock\",\n          \"--drivername=nfs.csi.k8s.io\"\n        ]\n        # node plugins must run as privileged jobs because they\n        # mount disks to the host\n        privileged = true\n      }\n      csi_plugin {\n        id        = \"nfsofficial\"\n        type      = \"node\"\n        mount_dir = \"/csi\"\n      }\n      resources {\n        memory = 64\n        cpu = 100\n      }\n    }\n  }\n}\n\n"
  },
  {
    "path": "nomad_jobs/core-infra/pihole/nomad.job",
    "content": "job \"pihole\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n  priority    = 100\n\n  meta {\n      job_file = \"nomad_jobs/core-infra/pihole/nomad.job\"\nversion = \"3\"\n  }\n\n  constraint {\n    attribute = \"${meta.dns}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"infra\" {\n    count = 1 \n\n    network {\n      port \"dns\" { \n        host_network = \"lan\"\n        static = \"8053\" \n        to     = \"53\" \n      }\n      port \"web\" { \n        host_network = \"lan\"\n        to = \"80\" \n      }\n    }\n\n    volume \"pihole\" {\n      type      = \"csi\"\n      read_only = false\n      source    = \"pihole3\"\n      access_mode = \"single-node-writer\"\n      attachment_mode = \"file-system\"\n    }\n\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"pihole\" {\n      driver = \"docker\"\n      config {\n        image = \"pihole/pihole:2026.04.0\"\n        ports = [\"dns\", \"web\"]\n        volumes = [\n          \"${var.shared_dir}pihole-dnsmasq:/etc/dnsmasq.d/\",\n        ]\n      }\n\n      volume_mount {\n        volume      = \"pihole\"\n        destination = \"/etc/pihole\"\n        read_only   = false\n      }\n\n      service {\n         tags = [\n          \"traefik.enable=true\"\n         ]\n         name = \"pihole\"\n\t provider = \"consul\"\n         port = \"web\"\n         check {\n           type     = \"tcp\"\n           interval = \"10s\"\n           timeout  = \"2s\"\n         }\n      }\n\n      env {\n        TZ                             = \"Europe/Amsterdam\"\n        FTLCONF_webserver_api_password = \"\"\n        FTLCONF_dns_upstreams          = \"8.8.8.8;1.1.1.1\"\n        FTLCONF_dns_listeningMode      = \"ALL\"\n        FTLCONF_misc_etc_dnsmasq_d     = \"true\"\n      }\n\n      resources {\n        cpu    = 300\n        memory = 128\n      }\n    }\n  }\n}\n\nvariable \"region\" {}\n\n\nvariable \"shared_dir\" {}\n\n"
  },
  {
    "path": "nomad_jobs/core-infra/pihole/volume.hcl",
    "content": "id           = \"pihole3\"\nexternal_id  = \"pihole3\"\nname         = \"pihole3\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-csi.iscsi\"\ncapacity_min = \"1GiB\"\ncapacity_max = \"1GiB\"\n\ncapability {\n  access_mode     = \"single-node-writer\"\n  attachment_mode = \"block-device\"\n}\n\nmount_options {\n  fs_type     = \"ext4\"\n  mount_flags = [\"noatime\"]\n}\n\n"
  },
  {
    "path": "nomad_jobs/core-infra/smtp/nomad.job",
    "content": "job \"smtp\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/core-infra/smtp/nomad.job\"\nversion = \"7\"\n  }\n\n  group \"mail\" {\n    count = 1 \n    network {\n      port \"smtp\" {\n        host_network = \"lan\"\n        static = \"25\"\n      }\n    }\n\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"smtp\" {\n      driver = \"docker\"\n      config {\n        image = \"ixdotai/smtp\"\n        network_mode = \"host\"\n        ports = [\"smtp\"]\n        force_pull = \"true\"\n      }\n\n      template {\ndata = <<EOH\nMAILNAME=${var.tld}\nRELAY_NETWORKS=:172.0.0.0/8:127.0.0.1/32:10.0.0.0/8:100.0.0.0/8:192.168.50.0/24\nNET_DEV=enp2s0\nDISABLE_IPV6=true\nEOH\n        destination = \"local/env\"\n        env         = true\n      }\n\n      service {\n        port = \"smtp\"\n\tname = \"smtp\"\n        check {\n          type     = \"tcp\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      resources {\n        cpu    = 100\n        memory = 32\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\nvariable \"tld\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n"
  },
  {
    "path": "nomad_jobs/core-infra/tailscale/nomad.job",
    "content": "job \"tailscale\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/core-infra/tailscale/nomad.job\"\n      version = \"6\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"networking\" {\n    count = 1 \n\n    volume \"tailscale\" {\n      type      = \"csi\"\n      read_only = false\n\n      source    = \"tailscale2\"\n      access_mode = \"single-node-writer\"\n      attachment_mode = \"file-system\"\n    }\n\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"tailscale\" {\n      driver = \"docker\"\n      config {\n        image = \"tailscale/tailscale:v1.96.5\"\n        network_mode = \"host\"\n        force_pull = \"true\"\n        privileged = true\n        cap_add = [\"NET_ADMIN\", \"NET_RAW\"]\n\tvolumes = [\n          \"/dev/net/tun:/dev/net/tun\",\n\t]\n      }\n\n      volume_mount {\n        volume      = \"tailscale\"\n        destination = \"/var/lib/tailscale\"\n        read_only   = false\n      }\n\n      template {\ndata = <<EOH\nTS_HOSTNAME=\"home-gateway\"\nTS_ROUTES=\"192.168.50.0/24\"\nTS_AUTHKEY=\"${var.tailscale_auth}\"\nTS_STATE_DIR=\"/var/lib/tailscale/tailscaled.state\"\nTS_USERSPACE=\"true\"\nTS_EXTRA_ARGS=\"--reset --advertise-tags=tag:nomad\"\nEOH\n      destination = \"local/env\"\n      env         = true\n      }\n\n      resources {\n        cpu    = 200\n        memory = 128\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\nvariable \"tld\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n\nvariable \"tailscale_auth\" {}\n"
  },
  {
    "path": "nomad_jobs/core-infra/tailscale/volume.hcl",
    "content": "id           = \"tailscale2\"\nexternal_id  = \"tailscale2\"\nname         = \"tailscale2\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-csi.iscsi\"\ncapacity_min = \"1GiB\"\ncapacity_max = \"1GiB\"\n\ncapability {\n  access_mode     = \"single-node-writer\"\n  attachment_mode = \"block-device\"\n}\n\nmount_options {\n  fs_type     = \"ext4\"\n  mount_flags = [\"noatime\"]\n}\n\n"
  },
  {
    "path": "nomad_jobs/core-infra/tailscale-este/nomad.job",
    "content": "job \"tailscale-este\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/core-infra/tailscale-este/nomad.job\"\nversion = \"3\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"networking\" {\n    count = 1 \n\n    volume \"tailscale-este\" {\n      type      = \"csi\"\n      read_only = false\n\n      source    = \"tailscale-este\"\n      access_mode = \"single-node-writer\"\n      attachment_mode = \"file-system\"\n    }\n\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"tailscale\" {\n      driver = \"docker\"\n      config {\n        image = \"tailscale/tailscale:v1.96.5\"\n        entrypoint = [\"/local/start.sh\"]\n        network_mode = \"host\"\n        force_pull = \"true\"\n        privileged = true\n        cap_add = [\"NET_ADMIN\", \"NET_RAW\"]\n\tvolumes = [\n          \"/dev/net/tun:/dev/net/tun\",\n\t]\n      }\n\n      volume_mount {\n        volume      = \"tailscale-este\"\n        destination = \"/var/lib/tailscale\"\n        read_only   = false\n      }\n\n      template {\ndata = <<EOH\n#!/bin/sh\n\nfunction up() {\n    until /usr/local/bin/tailscale up --snat-subnet-routes=false --auth-key=\"${var.tailscale_auth_este}\" --advertise-routes=\"192.168.50.0/24\" --hostname=\"este-gateway\"\n    do\n        sleep 0.1\n    done\n\n}\n\n# send this function into the background\nup &\n\nexec tailscaled --tun=userspace-networking --statedir=\"/var/lib/tailscale/tailscaled.state\"\nEOH\n        destination = \"local/start.sh\"\n        env         = false\n        perms       = 755\n      }\n\n      resources {\n        cpu    = 200\n        memory = 128\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\n\n\nvariable \"tld\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n\nvariable \"tailscale_auth_este\" {}\n"
  },
  {
    "path": "nomad_jobs/core-infra/tailscale-este/volume.hcl",
    "content": "id           = \"tailscale-este\"\nexternal_id  = \"tailscale-este\"\nname         = \"tailscale-este\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-csi.iscsi\"\ncapacity_min = \"1GiB\"\ncapacity_max = \"1GiB\"\n\ncapability {\n  access_mode     = \"single-node-writer\"\n  attachment_mode = \"block-device\"\n}\n\nmount_options {\n  fs_type     = \"ext4\"\n  mount_flags = [\"noatime\"]\n}\n\n"
  },
  {
    "path": "nomad_jobs/core-infra/traefik/config/consul-catalog.yml",
    "content": "# Enable Rancher Provider.\nproviders:\n  consulcatalog:\n\n    # Expose Consul Catalog services by default in Traefik.\n    exposedByDefault: true\n\n    # Defines the consul address endpoint.\n    address: 127.0.0.1:8500\n\n    # Defines the scheme used.\n    scheme: \"foobar\"\n\n    # Defines the DC.\n    datacenter: \"foobar\"\n\n    # Defines the token.\n    token: \"foobar\"\n\n    # Defines the expoint wait time.\n    endpointWaitTime: \"15s\"\n\n    # Defines Consul Catalog Provider TLS endpoint.\n    endpoint:\n      tls:\n\n        # Defines Consul Catalog Provider endpoint.\n        caOptional: true\n        cert: \"foobar\"\n        key: \"foobar\"\n        insecureSkipVerify: true\n"
  },
  {
    "path": "nomad_jobs/core-infra/traefik/config/consul.yml",
    "content": "# Enable Rancher Provider.\nproviders:\n  consulcatalog:\n\n    # Expose Consul Catalog services by default in Traefik.\n    exposedByDefault: true\n\n    # Defines the consul address endpoint.\n    address: 127.0.0.1:8500\n\n    # Defines the scheme used.\n    scheme: \"foobar\"\n\n    # Defines the DC.\n    datacenter: \"foobar\"\n\n    # Defines the token.\n    token: \"foobar\"\n\n    # Defines the expoint wait time.\n    endpointWaitTime: \"15s\"\n\n    # Defines Consul Catalog Provider TLS endpoint.\n    endpoint:\n      tls:\n\n        # Defines Consul Catalog Provider endpoint.\n        caOptional: true\n        cert: \"foobar\"\n        key: \"foobar\"\n        insecureSkipVerify: true\n"
  },
  {
    "path": "nomad_jobs/core-infra/traefik/config/traefik.toml",
    "content": "################################################################\n#\n# Configuration sample for Traefik v2.\n#\n# For Traefik v1: https://github.com/containous/traefik/blob/v1.7/traefik.sample.toml\n#\n################################################################\n\n################################################################\n# Global configuration\n################################################################\n[global]\n  checkNewVersion = false\n  sendAnonymousUsage = false\n\n################################################################\n# Entrypoints configuration\n################################################################\n\n# Entrypoints definition\n#\n# Optional\n# Default:\n[entryPoints]\n  [entryPoints.web]\n    address = \"0.0.0.0:80\"\n\n  [entryPoints.traefik]\n    address = \"0.0.0.0:9001\"\n\n  [entryPoints.websecure]\n    address = \"0.0.0.0:443\"\n\n[http.middlewares]\n  [http.middlewares.https-redirect.redirectscheme]\n    scheme = \"https\"\n\n[certificatesResolvers.sample.acme]\n  email = \"me@you.com\"\n  storage = \"acme.json\"\n  [certificatesResolvers.sample.acme.httpChallenge]\n    # used during the challenge\n    entryPoint = \"web\"\n\n################################################################\n# ServersTransports for HTTPS backends with self-signed certs\n################################################################\n[serversTransports.insecure-skip-verify]\n  insecureSkipVerify = true\n\n\n################################################################\n# Traefik logs configuration\n################################################################\n\n# Traefik logs\n# Enabled by default and log to stdout\n#\n# Optional\n#\n[log]\n\n  # Log level\n  #\n  # Optional\n  # Default: \"ERROR\"\n  #\n  # level = \"DEBUG\"\n\n  # Sets the filepath for the traefik log. If not specified, stdout will be used.\n  # Intermediate directories are created if necessary.\n  #\n  # Optional\n  # Default: os.Stdout\n  #\n  # filePath = \"log/traefik.log\"\n\n  # Format is either \"json\" or \"common\".\n  #\n  # Optional\n  # Default: \"common\"\n  #\n  # format = \"json\"\n\n################################################################\n# Access logs configuration\n################################################################\n\n# Enable access logs\n# By default it will write to stdout and produce logs in the textual\n# Common Log Format (CLF), extended with additional fields.\n#\n# Optional\n#\n# [accessLog]\n\n  # Sets the file path for the access log. If not specified, stdout will be used.\n  # Intermediate directories are created if necessary.\n  #\n  # Optional\n  # Default: os.Stdout\n  #\n  # filePath = \"/path/to/log/log.txt\"\n\n  # Format is either \"json\" or \"common\".\n  #\n  # Optional\n  # Default: \"common\"\n  #\n  # format = \"json\"\n\n################################################################\n# API and dashboard configuration\n################################################################\n\n# Enable API and dashboard\n[api]\n\n  # Name of the related entry point\n  #\n  # Optional\n  # Default: \"traefik\"\n  #\n  # entryPoint = \"traefik\"\n\n  # Enabled Dashboard\n  #\n  # Optional\n  # Default: true\n  #\n  dashboard = true\n  insecure = true\n################################################################\n# Ping configuration\n################################################################\n\n# Enable ping\n[ping]\n\n  # Name of the related entry point\n  #\n  # Optional\n  # Default: \"traefik\"\n  #\n  # entryPoint = \"traefik\"\n\n################################################################\n# Docker configuration backend\n################################################################\n\n# Enable Docker configuration backend\n#[providers.docker]\n\n  # Docker server endpoint. Can be a tcp or a unix socket endpoint.\n  #\n  # Required\n  # Default: \"unix:///var/run/docker.sock\"\n  #\n  # endpoint = \"tcp://10.10.10.10:2375\"\n\n  # Default host rule.\n  #\n  # Optional\n  # Default: \"Host(`{{ normalize .Name }}`)\"\n  #\n  # defaultRule = \"Host(`{{ normalize .Name }}.docker.localhost`)\"\n\n  # Expose containers by default in traefik\n  #\n  # Optional\n  # Default: true\n  #\n  # exposedByDefault = false\n\n# Enable Consul Catalog Provider.\n[providers.consulcatalog]\n\n  # Expose Consul Catalog services by default in Traefik.\n  exposedByDefault = false\n\n  # Prefix used for accessing the Consul service metadata.\n  prefix = \"traefik\"\n\n  # Defines the polling interval (in seconds).\n  #refreshSeconds = 15\n\n  # Defines default rule.\n  defaultRule = \"Host(`{{ .Name }}.stuck-in-blue.com`)\"\n\n  # Includes only containers having a label with key `a.label.name` and value `foo`\n  #constraints = \"Label(`a.label.name`, `foo`)\"\n  # Defines Consul Catalog Provider endpoint.\n  [providers.consulcatalog.endpoint]\n\n    # Defines the consul address endpoint.\n    address = \"127.0.0.1:8500\"\n\n    # Defines the scheme used.\n    scheme = \"https\"\n\n    # Defines the DC.\n    datacenter = \"home\"\n\n    # Defines the token.\n    #token = \"foobar\"\n\n    # Defines the expoint wait time.\n    endpointWaitTime = \"15s\"\n\n#    [providers.consulCatalog.endpoint.tls]\n#      ca = \"/etc/consul.d/homelab-agent-ca.pem\"\n#      cert = \"/etc/consul.d/hetzner-server-homelab-0.pem\"\n#      key = \"/etc/consul.d/hetzner-server-homelab-0-key.pem\"\n\n#[file]\n#\n## rules\n#[backends]\n#  [backends.sabnzbd]\n#    [backends.sabnzbd.servers.server1]\n#    url = \"http://127.0.0.1:8080\"\n#    weight = 10\n#    extractorfunc = \"request.host\"\n#\n#[frontends]\n#  [frontends.sabnzbd]\n#  backend = \"sabnzbd\"\n#    [frontends.sabnzbd.routes.sab]\n#    rule = \"Host:sab.nolab.xyz\"\n"
  },
  {
    "path": "nomad_jobs/core-infra/traefik/config/traefik.toml.new",
    "content": "################################################################\n#\n# Configuration sample for Traefik v2.\n#\n# For Traefik v1: https://github.com/containous/traefik/blob/v1.7/traefik.sample.toml\n#\n################################################################\n\n################################################################\n# Global configuration\n################################################################\n[global]\n  checkNewVersion = false\n  sendAnonymousUsage = false\n\n################################################################\n# Entrypoints configuration\n################################################################\n\n# Entrypoints definition\n#\n# Optional\n# Default:\n[entryPoints]\n  [entryPoints.grpc]\n    address = \":7576\"\n\n  [entryPoints.traefik]\n    address = \":9009\"\n\n################################################################\n# Traefik logs configuration\n################################################################\n\n# Traefik logs\n# Enabled by default and log to stdout\n#\n# Optional\n#\n[log]\n\n  # Log level\n  #\n  # Optional\n  # Default: \"ERROR\"\n  #\n  # level = \"DEBUG\"\n\n  # Sets the filepath for the traefik log. If not specified, stdout will be used.\n  # Intermediate directories are created if necessary.\n  #\n  # Optional\n  # Default: os.Stdout\n  #\n  # filePath = \"log/traefik.log\"\n\n  # Format is either \"json\" or \"common\".\n  #\n  # Optional\n  # Default: \"common\"\n  #\n  # format = \"json\"\n\n################################################################\n# Access logs configuration\n################################################################\n\n# Enable access logs\n# By default it will write to stdout and produce logs in the textual\n# Common Log Format (CLF), extended with additional fields.\n#\n# Optional\n#\n# [accessLog]\n\n  # Sets the file path for the access log. If not specified, stdout will be used.\n  # Intermediate directories are created if necessary.\n  #\n  # Optional\n  # Default: os.Stdout\n  #\n  # filePath = \"/path/to/log/log.txt\"\n\n  # Format is either \"json\" or \"common\".\n  #\n  # Optional\n  # Default: \"common\"\n  #\n  # format = \"json\"\n\n################################################################\n# API and dashboard configuration\n################################################################\n\n# Enable API and dashboard\n[api]\n\n  # Name of the related entry point\n  #\n  # Optional\n  # Default: \"traefik\"\n  #\n  # entryPoint = \"traefik\"\n\n  # Enabled Dashboard\n  #\n  # Optional\n  # Default: true\n  #\n  # dashboard = false\n  insecure = true\n################################################################\n# Ping configuration\n################################################################\n\n# Enable ping\n[ping]\n\n  # Name of the related entry point\n  #\n  # Optional\n  # Default: \"traefik\"\n  #\n  # entryPoint = \"traefik\"\n\n################################################################\n# Docker configuration backend\n################################################################\n\n# Enable Docker configuration backend\n#[providers.docker]\n\n  # Docker server endpoint. Can be a tcp or a unix socket endpoint.\n  #\n  # Required\n  # Default: \"unix:///var/run/docker.sock\"\n  #\n  # endpoint = \"tcp://10.10.10.10:2375\"\n\n  # Default host rule.\n  #\n  # Optional\n  # Default: \"Host(`{{ normalize .Name }}`)\"\n  #\n  # defaultRule = \"Host(`{{ normalize .Name }}.docker.localhost`)\"\n\n  # Expose containers by default in traefik\n  #\n  # Optional\n  # Default: true\n  #\n  # exposedByDefault = false\n\n# Enable Consul Catalog Provider.\n[providers.consulcatalog]\n\n  # Expose Consul Catalog services by default in Traefik.\n  exposedByDefault = false\n\n  # Prefix used for accessing the Consul service metadata.\n  prefix = \"traefik\"\n\n  # Defines the polling interval (in seconds).\n  #refreshSeconds = 15\n\n  # Defines default rule.\n  defaultRule = \"Host(`{{ .Name }}.stage.dus.tcs.trv.cloud`)\"\n\n  # Includes only containers having a label with key `a.label.name` and value `foo`\n  #constraints = \"Label(`a.label.name`, `foo`)\"\n  # Defines Consul Catalog Provider endpoint.\n  [providers.consulcatalog.endpoint]\n\n    # Defines the consul address endpoint.\n    address = \"127.0.0.1:8500\"\n\n    # Defines the scheme used.\n    scheme = \"http\"\n\n    # Defines the DC.\n    datacenter = \"dus\"\n\n    # Defines the token.\n    #token = \"foobar\"\n\n    # Defines the expoint wait time.\n    endpointWaitTime = \"15s\"\n\n"
  },
  {
    "path": "nomad_jobs/core-infra/traefik/config/traefik.toml.test",
    "content": "################################################################\n# Entrypoints configuration\n################################################################\n# Entrypoints definition\n#\ndefaultEntryPoints = [\"https\"]\n\n[entryPoints]\n  [entryPoints.admin]\n  address = \"10.90.80.120:6062\"\n  [entryPoints.http]\n  address = \"78.94.59.116:80\"\n    [entryPoints.http.redirect]\n    entryPoint = \"https\"\n  [entryPoints.https]\n  address = \":443\"\n  [entryPoints.https.tls]\n\n[acme]\nemail = \"perry@stuck-in-blue.com\"\nstorage = \"acme.json\"\n#caServer = \"https://acme-staging-v02.api.letsencrypt.org/directory\"\ncaServer = \"https://acme-v02.api.letsencrypt.org/directory\"\nentryPoint = \"https\"\n[acme.httpChallenge]\nentryPoint = \"http\"\n[acme.dnsChallenge]\n  provider = \"gcloud\"\n  delayBeforeCheck = 0\n\n[[acme.domains]]\n  main = \"*.nolab.xyz\"\n  sans = [\"nolab.xyz\"]\n\n################################################################\n# Traefik logs configuration\n################################################################\n# Enable logs\n# By default it will write to stdout\n[traefikLog]\n\n################################################################\n# Access logs configuration\n################################################################\n# Enable access logs\n# By default it will write to stdout and produce logs in the textual\n# Common Log Format (CLF), extended with additional fields.\n\n[accessLog]\n\n################################################################\n# Metrics configuration\n################################################################\n[metrics]\n  [metrics.prometheus]\n    entryPoint = \"admin\"\n\n################################################################\n# API and dashboard configuration\n################################################################\n# Enable API and dashboard\n[api]\n  entryPoint = \"admin\"\n  [api.statistics]\n    recentErrors = 100\n################################################################\n# Ping configuration\n################################################################\n# Enable ping\n[ping]\n  entryPoint = \"admin\"\n################################################################\n# Consul Catalog Provider\n################################################################\n[consulCatalog]\nendpoint = \"{{ env \"NOMAD_IP_https\" }}:8500\"\nstale = true\nprefix = \"traefik\"\ndomain = \"holab.io\"\n#filename = \"/usr/local/etc/traefik/consul.toml\"\n#templateVersion = 2\n\n"
  },
  {
    "path": "nomad_jobs/core-infra/traefik/nomad.job",
    "content": "job \"traefik\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type = \"service\"\n  meta {\n      job_file = \"nomad_jobs/core-infra/traefik/nomad.job\"\n      version = \"12\"  // HA: run 2 instances with keepalived failover\n  }\n\n  group \"lbs\" {\n    count = 2\n\n    constraint {\n      operator = \"distinct_hosts\"\n      value    = \"true\"\n    }\n\n    network {\n      port \"http\" {\n        host_network = \"lan\"\n        static = \"80\"\n      }\n      port \"admin\" {\n        host_network = \"lan\"\n        static = \"9002\"\n      }\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n      auto_promote     = true\n      canary           = 2\n    }\n\n    task \"keepalived-traefik\" {\n      driver = \"docker\"\n\n      lifecycle {\n        hook = \"prestart\"\n        sidecar = true\n      }\n\n      config {\n        image = \"osixia/keepalived:2.3.4\"\n        network_mode = \"host\"\n        force_pull = false\n        volumes = [\n          \"local/keepalived.conf:/etc/keepalived/keepalived.conf\"\n        ]\n        cap_add = [\"NET_ADMIN\", \"NET_BROADCAST\", \"NET_RAW\"]\n      }\n\n      template {\n        destination = \"local/keepalived.conf\"\n        change_mode = \"restart\"\n        splay       = \"1m\"\n        data        = <<EOH\nvrrp_instance VI_1 {\n    state BACKUP\n    interface {{ sockaddr \"GetPrivateInterfaces | include \\\"network\\\" \\\"192.168.50.0/24\\\" | attr \\\"name\\\"\" }}\n    virtual_router_id 50\n    priority 100\n    advert_int 1\n\n    use_vmac vrrp.50\n    vmac_xmit_base\n\n    unicast_peer {\n{{- range service \"traefik-web\" }}\n        {{ .Address }}\n{{- end }}\n    }\n\n    virtual_ipaddress {\n        192.168.50.20/24\n    }\n}\nEOH\n      }\n\n      resources {\n        cpu    = 100\n        memory = 64\n      }\n    }\n\n    task \"traefik\" {\n      driver = \"docker\"\n      service {\n        name = \"traefik-web\"\n        port = \"http\"\n      }\n      service {\n        name = \"traefik\"\n        port = \"admin\"\n        tags = [\n          \"metrics\"\n        ]\n        check {\n          type     = \"tcp\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      config {\n        image = \"traefik:v3.6\"\n        ports = [\"http\", \"admin\"]\n        network_mode = \"host\"\n        volumes = [\n          \"local/traefik.toml:/etc/traefik/traefik.toml\",\n          \"local/dynamic-config.toml:/etc/traefik/dynamic/dynamic-config.toml\",\n          \"local/servers-transport.toml:/etc/traefik/dynamic/servers-transport.toml\",\n          \"${var.shared_dir}traefik-ingress/acme.json:/acme.json\",\n          \"${var.shared_dir}traefik-ingress/dynamic-whitelist.toml:/etc/traefik/dynamic/dynamic-whitelist.toml\",\n        ]\n      }\n\n      template {\ndata = <<EOH\n[global]\n  checkNewVersion = false\n  sendAnonymousUsage = false\n\n[metrics]\n  [metrics.prometheus]\n\n[entryPoints]\n  [entryPoints.web]\n    address = \":80\"\n    # No entrypoint middleware - applied at router level to allow ACME challenges\n\n  [entryPoints.websecure]\n    address = \":443\"\n    [entryPoints.websecure.http]\n        middlewares=[\"home-ip-whitelist@file\"]\n    [entryPoints.websecure.http.tls]\n      certResolver = \"letsencrypt\"\n\n  [entryPoints.traefik]\n    address = \":9002\"\n\n[tls.options]\n  [tls.options.TLSOptions]\n    minVersion = \"VersionTLS12\"\n    sniStrict = true\n\n[accessLog]\n  format = \"json\"\n\n[log]\n\n[api]\n  dashboard = true\n  insecure = true  # Enable direct access on :9002 (protected by local network)\n\n[ping]\n\n[providers.consulcatalog]\n  exposedByDefault = false\n  prefix = \"traefik\"\n  defaultRule = \"Host(`{{ .Name }}.${var.tld}`)\"\n\n  [providers.consulcatalog.endpoint]\n    address = \"{{{ env \"NOMAD_IP_http\" }}}:8500\"\n    scheme = \"http\"\n    datacenter = \"homelab\"\n    endpointWaitTime = \"15s\"\n\n[certificatesResolvers.letsencrypt.acme]\n  email = \"me@you.com\"\n  storage = \"acme.json\"\n  [certificatesResolvers.letsencrypt.acme.httpChallenge]\n    entryPoint = \"web\"\n\n[providers.file]\n  directory = \"/etc/traefik/dynamic\"\n  watch = true\nEOH\n        destination = \"local/traefik.toml\"\n        env         = false\n        change_mode = \"noop\"\n        left_delimiter = \"{{{\"\n        right_delimiter = \"}}}\"\n      }\n\n      template {\ndata = <<EOH\n[http.middlewares]\n  [http.middlewares.allow-local-network.ipWhiteList]\n    sourceRange = [\"192.168.50.0/24\", \"10.0.0.0/16\"]  # Local network only\n\n  [http.middlewares.basic-auth.basicAuth]\n    users = [\n      \"admin:$apr1$Ht8D2P1z$7QOq2s8xKUomI1cM.rFJX/\" # Replace with an htpasswd-generated hash\n    ]\n    realm = \"Restricted Area\"\n\n\n  [http.middlewares.ip-or-auth.chain]\n    middlewares = [\"home-ip-whitelist@file\", \"allow-local-network\", \"basic-auth\"]\n\n  [http.middlewares.redirect-to-https.redirectScheme]\n    scheme = \"https\"\n    permanent = true\n\n  [http.middlewares.whitelist-then-redirect.chain]\n    middlewares = [\"home-ip-whitelist@file\", \"redirect-to-https\"]\n\n[http.routers]\n  # Redirect HTTP to HTTPS except for ACME challenges - apply whitelist first\n  [http.routers.http-redirect]\n    entryPoints = [\"web\"]\n    rule = \"PathPrefix(`/`) && !PathPrefix(`/.well-known/acme-challenge/`)\"\n    middlewares = [\"whitelist-then-redirect\"]\n    service = \"noop@internal\"\n    priority = 1000\n\n  [http.routers.https-local]\n    entryPoints = [\"websecure\"]\n    rule = \"HostRegexp(`{any:.+}`)\"  # Matches any domain\n    middlewares = [\"home-ip-whitelist@file\"]\n    service = \"beefcake\"\n    [http.routers.https-local.tls]\n\n[http.services]\n  [http.services.beefcake.loadBalancer]\n    [[http.services.beefcake.loadBalancer.servers]]\n      url = \"http://192.168.50.208:80\"\nEOH\n        destination = \"local/dynamic-config.toml\"\n        env         = false\n        change_mode = \"noop\"\n      }\n\n      template {\n        data = <<EOH\n# ServersTransport for HTTPS backends with self-signed certificates\n[http.serversTransports]\n  [http.serversTransports.insecure-skip-verify]\n    insecureSkipVerify = true\nEOH\n        destination = \"local/servers-transport.toml\"\n        env         = false\n        change_mode = \"noop\"\n      }\n\n      resources {\n        cpu = 100\n        memory = 256\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\nvariable \"tld\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n\nvariable \"ingress_ip\" {\n    type = string\n}\n"
  },
  {
    "path": "nomad_jobs/core-infra/traefik-forward-auth/nomad.job",
    "content": "job \"traefik-forward-auth\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/core-infra/traefik-forward-auth/nomad.job\"\nversion = \"4\"\n  }\n\n  group \"downloaders\" {\n    count = 1 \n    network {\n      port \"http\" {\n        host_network = \"tailscale\"\n        static = \"4181\"\n      }\n\n    }\n\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"auth\" {\n      driver = \"docker\"\n      config {\n        image = \"ghcr.io/jordemort/traefik-forward-auth:latest\"\n        ports = [\"http\"]\n      }\n\n      env {\n        PROVIDERS_GOOGLE_CLIENT_ID     = \"${var.oauth_client_id}\"\n        PROVIDERS_GOOGLE_CLIENT_SECRET = \"${var.oauth_client_secret}\"\n        SECRET                         = \"${var.oauth_secret}\"\n        AUTH_HOST                      = \"auth.${var.tld}\"\n        COOKIE_DOMAIN                  = \"${var.tld}\"\n        WHITELIST                      = \"${var.oauth_emails}\"\n        LOG_LEVEL                      = \"debug\"\n        URL_PATH                       = \"/_oauth\"\n        DEFAULT_ACTION                 = \"auth\"\n        INSECURE_COOKIE                = \"false\"\n        CONFIG                         = \"local/config\"\n\tTRUSTED_IP_ADDRESS\t       = \"86.111.155.199/32,86.111.152.230/32,89.246.171.154/32,86.111.155.93/32\"\n      }\n\n      service {\n        port = \"http\"\n\tname = \"traefik-forward-auth\"\n        tags = [\n          \"traefik.enable=true\",\n          \"traefik.http.routers.auth.rule=Host(`auth.${var.tld}`)\",\n          \"traefik.http.routers.auth.entrypoints=websecure\",\n          \"traefik.http.routers.auth.tls=true\",\n          \"traefik.http.routers.${NOMAD_TASK_NAME}_insecure.rule=Host(`auth.${var.tld}`)\",\n          \"traefik.http.routers.auth.tls.domains[0].main=${var.tld}\",\n          \"traefik.http.routers.auth.tls.domains[0].sans=*.${var.tld}\",\n          \"traefik.http.routers.auth.rule=Host(`auth.${var.tld}`)\",\n          \"traefik.http.routers.auth.rule=Path(`/_oauth`)\",\n          \"traefik.http.middlewares.forward-auth.forwardauth.address=http://${NOMAD_IP_http}:${NOMAD_PORT_http}/\",\n          \"traefik.http.middlewares.forward-auth.forwardauth.trustForwardHeader=true\",\n          \"traefik.http.middlewares.forward-auth.forwardauth.authResponseHeaders=X-Forwarded-User\",\n          \"traefik.http.routers.auth.middlewares=forward-auth\"\n        ]\n        check {\n          type     = \"tcp\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      template {\ndata = <<EOH\nrule.sabnzbd.action = allow\nrule.sabnzbd.rule = PathPrefix(`/sabnzbd/api`)\nEOH\n        destination = \"local/config\"\n        env         = false\n      }\n\n      resources {\n        cpu    = 100\n        memory = 256\n      }\n    }\n  }\n}\n\nvariable \"region\" {}\nvariable \"tld\" {}\nvariable \"oauth_client_id\" {}\nvariable \"oauth_client_secret\" {}\nvariable \"oauth_secret\" {}\nvariable \"oauth_emails\" {}\n"
  },
  {
    "path": "nomad_jobs/core-infra/vault/secrets_template.yaml",
    "content": "backends:\n- type: generic\n  path: secret\n  description: secrets\n\nsecrets:\n  - path: /secret/homelab/couchpotato\n    values:\n      apikey: ${COUCHPOTATO_APIKEY}\n      username: ${COUCHPOTATO_USERNAME}\n      password: ${COUCHPOTATO_PASS}\n      host: ${COUCHPOTATO_HOST}\n\n  - path: /secret/homelab/plex\n    values:\n      auth_token: ${PLEX_AUTHTOKEN}\n      host: ${PLEX_HOST}\n\n  - path: /secret/external/pushover\n    values:\n      api_key: ${PUSHOVER_APIKEY}\n      user_key: ${PUSHOVER_USERKEY}\n\n  - path: /secret/external/newsnab\n    values:\n      api: ${NEWSNAB_API}\n      host: ${NEWSNAB_HOST}\n\n  - path: /secret/homelab/mqtt-pub\n    values:\n      remote_username: ${MQTTPUB_REMOTE_USERNAME}\n      remote_password: ${MQTTPUB_REMOTE_PASSWORD}\n      address: ${MQTTPUB_ADDRESS}\n\n  - path: /secret/homelab/sonarr\n    values:\n      api: ${SONAR_API}\n      host: ${SONAR_HOST}\n\n  - path: /secret/homelab/sabnzbd\n    values:\n      api: ${SABNZBD_API}\n      host: ${SABNZBD_HOST}\n\n  - path: /secret/homelab/asuswrt\n    values:\n      username: ${ASUSWRT_USER}\n      password: ${ASUSWRT_PASS}\n      host: ${ASUSWRT_HOST}\n\n  - path: /secret/homelab/sabnzbd\n    values:\n      api: ${SABNZBD_API}\n      host: ${SABNZBD_HOST}\n\n  - path: /secret/external/github\n    values:\n      token: ${GITHUB_TOKEN}\n\n"
  },
  {
    "path": "nomad_jobs/gaming/minecraft-1.21/nomad.job",
    "content": "job \"minecraft-1-21\" {\n  region      = var.region\n  datacenters = [\"minecraft\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/gaming/minecraft-1.21/nomad.job\"\n    version  = \"2\"\n  }\n\n  group \"minecraft\" {\n    count = 1\n\n    network {\n      port \"minecraft\" {\n        host_network = \"lan\"\n        static       = 25568\n        to           = 25565\n      }\n      port \"query\" {\n        host_network = \"lan\"\n        static       = 25569\n        to           = 25565\n      }\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"minecraft-server\" {\n      driver = \"docker\"\n\n      config {\n        image        = \"itzg/minecraft-server:2026.4.1\"\n        force_pull   = false\n        network_mode = \"host\"\n\n        volumes = [\n          \"/root/minecraft-1.21.10/data:/data\",\n          \"/root/minecraft-1.21.10/config:/config\",\n        ]\n      }\n\n      env {\n        EULA                          = \"TRUE\"\n        VERSION                       = \"1.21.10\"\n        TYPE                          = \"FABRIC\"\n        INIT_MEMORY                   = \"2G\"\n        MAX_MEMORY                    = \"8G\"\n        MOTD                          = \"Minecraft 1.21.10 Fabric Server\"\n        DIFFICULTY                    = \"peaceful\"\n        MAX_PLAYERS                   = \"20\"\n        VIEW_DISTANCE                 = \"10\"\n        SIMULATION_DISTANCE           = \"8\"\n        SPAWN_PROTECTION              = \"16\"\n        ONLINE_MODE                   = \"false\"\n        ENABLE_WHITELIST              = \"false\"\n        SERVER_PORT                   = \"25568\"\n        ENABLE_QUERY                  = \"true\"\n        QUERY_PORT                    = \"25568\"\n        NETWORK_COMPRESSION_THRESHOLD = \"512\"\n        ENABLE_AUTOPAUSE              = \"FALSE\"\n        PAUSE_WHEN_EMPTY_SECONDS      = \"0\"\n        REMOVE_OLD_MODS               = \"true\"\n        MODRINTH_PROJECTS             = \"axiom\\nfabric-api\\ncloth-config\\ndistanthorizons\\nbalm\\nferrite-core\\nwaystones\\ncarry-on\\nbeautify-refabricated\\nmacaws-furniture\\nmacaws-trapdoors\\nmore-decorative-blocks\\nblackwolf-library\"\n        MODRINTH_ALLOWED_VERSION_TYPE = \"beta\"\n        OPS                           = \"perry,hannah,Perry,Steve5leon\"\n        JVM_OPTS                      = \"-XX:+UseG1GC -XX:+ParallelRefProcEnabled -XX:MaxGCPauseMillis=100 -XX:+UnlockExperimentalVMOptions -XX:+DisableExplicitGC -XX:+AlwaysPreTouch -XX:+UseStringDeduplication -XX:G1NewSizePercent=30 -XX:G1MaxNewSizePercent=40 -XX:G1HeapRegionSize=8M -XX:G1ReservePercent=20 -XX:InitiatingHeapOccupancyPercent=20 -XX:G1MixedGCLiveThresholdPercent=90 -XX:SurvivorRatio=32 -XX:MaxTenuringThreshold=1\"\n      }\n\n      service {\n        name = \"minecraft-1-21\"\n        port = \"minecraft\"\n        tags = [\"minecraft\", \"gaming\", \"1.21\"]\n\n        check {\n          type     = \"tcp\"\n          interval = \"30s\"\n          timeout  = \"5s\"\n        }\n      }\n\n      resources {\n        cpu        = 4000\n        memory     = 3072\n        memory_max = 9728\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n  type = string\n}\n"
  },
  {
    "path": "nomad_jobs/gaming/minecraft-avaritia/nomad.job",
    "content": "job \"minecraft-avaritia\" {\n  region      = var.region\n  datacenters = [\"minecraft\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/gaming/minecraft-avaritia/nomad.job\"\n    version  = \"1\"\n  }\n\n  group \"minecraft\" {\n    count = 1\n\n    network {\n      port \"minecraft\" {\n        host_network = \"lan\"\n        static       = 25571\n        to           = 25565\n      }\n    }\n\n    restart {\n      attempts = 2\n      delay    = \"120s\"\n      interval = \"30m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"minecraft-server\" {\n      driver = \"docker\"\n\n      config {\n        image        = \"itzg/minecraft-server:2026.4.1\"\n        force_pull   = false\n        network_mode = \"host\"\n\n        volumes = [\n          \"/root/minecraft-avaritia/data:/data\",\n        ]\n      }\n\n      env {\n        EULA                          = \"TRUE\"\n        VERSION                       = \"1.20.1\"\n        TYPE                          = \"FORGE\"\n        INIT_MEMORY                   = \"2G\"\n        MAX_MEMORY                    = \"8G\"\n        MOTD                          = \"Avaritia Modded Server (1.20.1)\"\n        MODE                          = \"creative\"\n        DIFFICULTY                    = \"normal\"\n        MAX_PLAYERS                   = \"20\"\n        VIEW_DISTANCE                 = \"10\"\n        SIMULATION_DISTANCE           = \"8\"\n        SPAWN_PROTECTION              = \"16\"\n        ONLINE_MODE                   = \"false\"\n        ENABLE_WHITELIST              = \"false\"\n        SERVER_PORT                   = \"25571\"\n        ENABLE_QUERY                  = \"true\"\n        QUERY_PORT                    = \"25571\"\n        NETWORK_COMPRESSION_THRESHOLD = \"512\"\n        FORCE_GAMEMODE                = \"true\"\n        ENABLE_COMMAND_BLOCK          = \"true\"\n        OPS                           = \"perry,hannah,Perry\"\n        MAX_TICK_TIME                 = \"-1\"\n        ENABLE_RCON                   = \"true\"\n        RCON_PORT                     = \"25572\"\n        RCON_PASSWORD                 = \"minecraft\"\n        CREATE_CONSOLE_IN_PIPE        = \"true\"\n        JVM_OPTS                      = \"-XX:+UseG1GC -XX:+ParallelRefProcEnabled -XX:MaxGCPauseMillis=100 -XX:+UnlockExperimentalVMOptions -XX:+DisableExplicitGC -XX:+AlwaysPreTouch -XX:+UseStringDeduplication -XX:G1NewSizePercent=30 -XX:G1MaxNewSizePercent=40 -XX:G1HeapRegionSize=8M -XX:G1ReservePercent=20 -XX:InitiatingHeapOccupancyPercent=20 -XX:G1MixedGCLiveThresholdPercent=90 -XX:SurvivorRatio=32 -XX:MaxTenuringThreshold=1 -XX:G1PeriodicGCInterval=15000 -XX:G1PeriodicGCSystemLoadThreshold=0.0\"\n        MODRINTH_PROJECTS             = \"remorphed\\nwoodwalkers\\ncrafted-core\"\n        CURSEFORGE_FILES              = \"re-avaritia\\nluckytnt\\nlucky-tnt-lib\\npehkui\\nterrablender\\nbiomes-o-plenty\\noh-the-biomes-weve-gone\\nterralith\\ntectonic\\nlithostitched\\noh-the-trees-youll-grow\\njei\\njourneymap\\nwaystones\\nembeddium\\ngeckolib\\narchitectury-api\\nglitchcore\\ncorgilib\\nbalm\"\n        CF_API_KEY                    = \"${var.curseforge_api_key}\"\n      }\n\n      service {\n        name = \"minecraft-avaritia\"\n        port = \"minecraft\"\n        tags = [\"minecraft\", \"gaming\", \"avaritia\", \"modded\"]\n\n        check {\n          type     = \"tcp\"\n          interval = \"30s\"\n          timeout  = \"5s\"\n        }\n      }\n\n      resources {\n        cpu        = 4000\n        memory     = 3072\n        memory_max = 9728\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n  type = string\n}\n\nvariable \"curseforge_api_key\" {\n  type    = string\n  default = \"\"\n}\n"
  },
  {
    "path": "nomad_jobs/gaming/minecraft-axiom/nomad.job",
    "content": "job \"minecraft-axiom\" {\n  region      = var.region\n  datacenters = [\"minecraft\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/gaming/minecraft-axiom/nomad.job\"\n    version  = \"2\"  // Added forge-config-api-port dependency\n  }\n\n  group \"minecraft\" {\n    count = 1\n\n    network {\n      port \"minecraft\" {\n        host_network = \"lan\"\n        static       = 25566\n        to           = 25565\n      }\n      port \"query\" {\n        host_network = \"lan\"\n        static       = 25567\n        to           = 25565\n      }\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"minecraft-server\" {\n      driver = \"docker\"\n\n      config {\n        image        = \"itzg/minecraft-server:2026.4.1\"\n        force_pull   = false\n        network_mode = \"host\"\n\n        volumes = [\n          \"/root/minecraft_new/minecraft-fabric-server/data:/data\",\n          \"/root/minecraft_new/minecraft-fabric-server/config:/config\",\n          \"/root/minecraft_new/minecraft-fabric-server/mods:/mods\",\n        ]\n      }\n\n      env {\n        EULA                          = \"TRUE\"\n        VERSION                       = \"1.20.1\"\n        TYPE                          = \"FABRIC\"\n        FABRIC_LOADER_VERSION         = \"0.17.2\"\n        INIT_MEMORY                   = \"2G\"\n        MAX_MEMORY                    = \"8G\"\n        MOTD                          = \"Axiom Building Server (1.20.1)\"\n        MODE                          = \"creative\"\n        DIFFICULTY                    = \"easy\"\n        MAX_PLAYERS                   = \"20\"\n        VIEW_DISTANCE                 = \"10\"\n        SIMULATION_DISTANCE           = \"8\"\n        SPAWN_PROTECTION              = \"16\"\n        ONLINE_MODE                   = \"false\"\n        ENABLE_WHITELIST              = \"false\"\n        SERVER_PORT                   = \"25566\"\n        ENABLE_QUERY                  = \"true\"\n        QUERY_PORT                    = \"25566\"\n        NETWORK_COMPRESSION_THRESHOLD = \"512\"\n        FORCE_GAMEMODE                = \"true\"\n        ENABLE_COMMAND_BLOCK          = \"true\"\n        OPS                           = \"perry,hannah,Perry\"\n        JVM_OPTS                      = \"-XX:+UseG1GC -XX:+ParallelRefProcEnabled -XX:MaxGCPauseMillis=100 -XX:+UnlockExperimentalVMOptions -XX:+DisableExplicitGC -XX:+AlwaysPreTouch -XX:+UseStringDeduplication -XX:G1NewSizePercent=30 -XX:G1MaxNewSizePercent=40 -XX:G1HeapRegionSize=8M -XX:G1ReservePercent=20 -XX:InitiatingHeapOccupancyPercent=20 -XX:G1MixedGCLiveThresholdPercent=90 -XX:SurvivorRatio=32 -XX:MaxTenuringThreshold=1 -Dfabric.networkMaxPacketSize=16777216 -Dnetty.maxDirectMemory=0\"\n        MODRINTH_PROJECTS             = \"axiom\\nbeautify-refabricated\\nchipped\\necologics\\ndecorative-blocks\\nresourceful-lib\\nathena-ctm\\nconnected-glass\\ndiagonal-fences\\ndiagonal-walls\\ndiagonal-windows\\ndouble-doors\\nsupplementaries\\nhandcrafted\\nmacaws-fences-and-walls\\nmacaws-windows\\nmacaws-bridges\\nmacaws-roofs\\nfairy-lights-fabric\\nimmediatelyfast\\nmacaws-doors\\nmacaws-furniture\\nmacaws-paths-and-pavings\\nmacaws-stairs\\nmacaws-paintings\\nmacaws-trapdoors\\nmacaws-lights-and-lamps\\nmoonlight\\nstarlight\\npuzzles-lib\\ncollective\\nregions-unexplored\\ntwigs\\nforge-config-api-port\"\n        CURSEFORGE_FILES              = \"grass-overhaul\\nfabric-api\\nfusion-connected-textures\\narchitectury-api\\nsodium\\nlithium\\ncloth-config\\nworldedit\\njourneymap\\nxaeros-minimap\\nwaystones\\nbalm-fabric\\nbiomes-o-plenty\\ntectonic\\nterralith\\nsupermartijn642s-core-lib\\noh-the-biomes-weve-gone\\nglitchcore\\nterrablender-fabric\\ncorgilib\\noh-the-trees-youll-grow\\ngeckolib\\nrefurbished-furniture\\nframework\"\n        CF_API_KEY                    = \"${var.curseforge_api_key}\"\n      }\n\n      service {\n        name = \"minecraft-axiom\"\n        port = \"minecraft\"\n        tags = [\"minecraft\", \"gaming\", \"axiom\"]\n\n        check {\n          type     = \"tcp\"\n          interval = \"30s\"\n          timeout  = \"5s\"\n        }\n      }\n\n      resources {\n        cpu        = 4000\n        memory     = 3072\n        memory_max = 9728\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n  type = string\n}\n\nvariable \"curseforge_api_key\" {\n  type    = string\n  default = \"\"\n}\n"
  },
  {
    "path": "nomad_jobs/gaming/minecraft-fiskheroes/nomad.job",
    "content": "job \"minecraft-fiskheroes\" {\n  region      = var.region\n  datacenters = [\"minecraft\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/gaming/minecraft-fiskheroes/nomad.job\"\n    version  = \"1\"\n  }\n\n  group \"minecraft\" {\n    count = 1\n\n    network {\n      port \"minecraft\" {\n        host_network = \"lan\"\n        static       = 25570\n        to           = 25565\n      }\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"minecraft-server\" {\n      driver = \"docker\"\n\n      config {\n        image        = \"itzg/minecraft-server:java8\"\n        force_pull   = false\n        network_mode = \"host\"\n\n        volumes = [\n          \"/root/minecraft-fiskheroes/data:/data\",\n        ]\n      }\n\n      env {\n        EULA                          = \"TRUE\"\n        VERSION                       = \"1.7.10\"\n        TYPE                          = \"FORGE\"\n        FORGE_VERSION                 = \"10.13.4.1614\"\n        MEMORY                        = \"10G\"\n        SERVER_PORT                   = \"25570\"\n        ENABLE_QUERY                  = \"true\"\n        QUERY_PORT                    = \"25570\"\n        MOTD                          = \"Fisk Superheroes - New York City\"\n        MODE                          = \"creative\"\n        DIFFICULTY                    = \"peaceful\"\n        FORCE_GAMEMODE                = \"true\"\n        MAX_PLAYERS                   = \"20\"\n        VIEW_DISTANCE                 = \"12\"\n        SIMULATION_DISTANCE           = \"8\"\n        SPAWN_PROTECTION              = \"0\"\n        ONLINE_MODE                   = \"false\"\n        ENABLE_WHITELIST              = \"false\"\n        ENABLE_COMMAND_BLOCK          = \"true\"\n        OPS                           = \"perry,hannah,Perry\"\n        JAVA_TOOL_OPTIONS             = \"-Dfml.queryResult=confirm\"\n        JVM_OPTS                      = \"-XX:+UseG1GC -XX:+ParallelRefProcEnabled -XX:MaxGCPauseMillis=100 -XX:+UnlockExperimentalVMOptions -XX:+DisableExplicitGC -XX:+AlwaysPreTouch -XX:+UseStringDeduplication -XX:G1NewSizePercent=30 -XX:G1MaxNewSizePercent=40 -XX:G1HeapRegionSize=8M -XX:G1ReservePercent=20 -XX:InitiatingHeapOccupancyPercent=20 -XX:G1MixedGCLiveThresholdPercent=90 -XX:SurvivorRatio=32 -XX:MaxTenuringThreshold=1 -XX:MetaspaceSize=256M -XX:MaxMetaspaceSize=512M -XX:ReservedCodeCacheSize=512M -XX:+UseCodeCacheFlushing\"\n      }\n\n      service {\n        name = \"minecraft-fiskheroes\"\n        port = \"minecraft\"\n        tags = [\"minecraft\", \"gaming\", \"forge\", \"fiskheroes\"]\n\n        check {\n          type     = \"tcp\"\n          interval = \"30s\"\n          timeout  = \"5s\"\n        }\n      }\n\n      resources {\n        cpu    = 4000\n        memory = 12288\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n  type = string\n}\n\n"
  },
  {
    "path": "nomad_jobs/gaming/minecraft-forge/nomad.job",
    "content": "job \"minecraft-forge\" {\n  region      = var.region\n  datacenters = [\"minecraft\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/gaming/minecraft-forge/nomad.job\"\n    version  = \"1\"\n  }\n\n  group \"minecraft\" {\n    count = 1\n\n    network {\n      port \"minecraft\" {\n        host_network = \"lan\"\n        static       = 25565\n        to           = 25565\n      }\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"minecraft-server\" {\n      driver = \"docker\"\n\n      config {\n        image        = \"itzg/minecraft-server:2026.4.1\"\n        force_pull   = false\n        network_mode = \"host\"\n\n        volumes = [\n          \"/root/minecraft/minecraft-server/data:/data\",\n          \"/root/minecraft/minecraft-server/mods:/mods:ro\",\n          \"/root/minecraft/minecraft-server/config:/config:ro\",\n        ]\n      }\n\n      env {\n        EULA                          = \"TRUE\"\n        VERSION                       = \"1.20.1\"\n        TYPE                          = \"FORGE\"\n        INIT_MEMORY                   = \"4G\"\n        MAX_MEMORY                    = \"12G\"\n        SERVER_PORT                   = \"25565\"\n        MOTD                          = \"Welcome to our modded Minecraft server!\"\n        MODE                          = \"creative\"\n        DIFFICULTY                    = \"easy\"\n        MAX_PLAYERS                   = \"20\"\n        VIEW_DISTANCE                 = \"12\"\n        SIMULATION_DISTANCE           = \"10\"\n        SPAWN_PROTECTION              = \"16\"\n        ONLINE_MODE                   = \"false\"\n        ENABLE_WHITELIST              = \"false\"\n        FORCE_GAMEMODE                = \"true\"\n        ENABLE_COMMAND_BLOCK          = \"true\"\n        ENABLE_RCON                   = \"true\"\n        RCON_PASSWORD                 = \"minecraft\"\n        RCON_PORT                     = \"25576\"\n        MAX_TICK_TIME                 = \"-1\"\n        OPS                           = \"perry,hannah,Perry\"\n        JVM_OPTS                      = \"-Dwatchdog.timeoutMillis=180000 -XX:+UseG1GC -XX:+ParallelRefProcEnabled -XX:MaxGCPauseMillis=100 -XX:+UnlockExperimentalVMOptions -XX:+DisableExplicitGC -XX:+AlwaysPreTouch -XX:+UseStringDeduplication -XX:G1NewSizePercent=30 -XX:G1MaxNewSizePercent=40 -XX:G1HeapRegionSize=8M -XX:G1ReservePercent=20 -XX:InitiatingHeapOccupancyPercent=20 -XX:G1MixedGCLiveThresholdPercent=90 -XX:SurvivorRatio=32 -XX:MaxTenuringThreshold=1 -XX:MetaspaceSize=256M -XX:MaxMetaspaceSize=512M -XX:ReservedCodeCacheSize=512M -XX:+UseCodeCacheFlushing\"\n      }\n\n      service {\n        name = \"minecraft-forge\"\n        port = \"minecraft\"\n        tags = [\"minecraft\", \"gaming\", \"forge\", \"1.20.1\"]\n\n        check {\n          type     = \"tcp\"\n          interval = \"30s\"\n          timeout  = \"5s\"\n        }\n      }\n\n      resources {\n        cpu        = 4000\n        memory     = 13312\n        memory_max = 13312\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n  type = string\n}\n"
  },
  {
    "path": "nomad_jobs/media-stack/audioserve/nomad.job",
    "content": "job \"audioserve\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/media-stack/audioserve/nomad.job\"\nversion = \"3\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"downloaders\" {\n    count = 1 \n    network {\n      port \"http\" {\n        host_network = \"tailscale\"\n        to = \"3000\"\n      }\n    }\n\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"audioserve\" {\n      driver = \"docker\"\n      config {\n        image = \"izderadicka/audioserve:v0.28.6\"\n        ports = [\"http\"]\n        args = [\"--tags\", \"/audiobooks\"]\n        volumes = [\n          \"${var.media_dir}/Books:/audiobooks\",\n        ]\n      }\n\n      env {\n        AUDIOSERVE_SHARED_SECRET = \"${var.web_pass}\"\n      }\n\n      service {\n        port = \"http\"\n\tname = \"audioserve\"\n        tags = [\n          \"traefik.enable=true\",\n          \"traefik.http.routers.${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_TASK_NAME}.${var.tld}\",\n          \"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=forward-auth\"\n        ]\n        check {\n          type     = \"http\"\n          path     = \"/\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 100\n        memory = 256\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\n\n\nvariable \"tld\" {\n    type = string\n}\n\nvariable \"media_dir\" {\n    type = string\n}\n\nvariable \"web_pass\" {}\n"
  },
  {
    "path": "nomad_jobs/media-stack/flaresolverr/nomad.job",
    "content": "job \"flaresolverr\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/media-stack/flaresolverr/nomad.job\"\nversion = \"2\"\n  }\n\n  group \"downloaders\" {\n    count = 1\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    task \"flaresolverr\" {\n      driver = \"docker\"\n      config {\n        image = \"ghcr.io/flaresolverr/flaresolverr:v3.4.6\"\n        network_mode = \"host\"\n        volumes = [\n          \"${var.shared_dir}flaresolverr/config:/config\",\n          \"${var.shared_dir}flaresolverr/torrents:/torrents\",\n          \"${var.downloads_dir}:/downloads\",\n        ]\n      }\n\n      service {\n        port = \"http\"\n\tname = \"flaresolverr\"\n      }\n\n      resources {\n        cpu    = 500\n        memory = 256\n        network {\n          port \"http\" { static = \"8191\" }\n        }\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\n\n\nvariable \"shared_dir\" {\n    type = string\n}\n\nvariable \"downloads_dir\" {\n    type = string\n}\n"
  },
  {
    "path": "nomad_jobs/media-stack/jackett/nomad.job",
    "content": "job \"jackett\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/media-stack/jackett/nomad.job\"\nversion = \"4\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"downloaders\" {\n    count = 1 \n\n    network {\n      port \"http\" {\n        static = \"9117\"\n        host_network = \"tailscale\"\n      }\n    }\n\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"jackett\" {\n      driver = \"docker\"\n      config {\n        image = \"lscr.io/linuxserver/jackett:0.24.1234\"\n        network_mode = \"host\"\n        ports = [\"http\"]\n        volumes = [\n          \"${var.shared_dir}jackett:/config\",\n          \"/tmp:/downloads\",\n        ]\n      }\n\n      service {\n        port = \"http\"\n\tname = \"jackett\"\n        tags = [\n          \"traefik.enable=true\",\n          \"traefik.http.middlewares.httpsRedirect.redirectscheme.scheme=https\",\n\n\n          \"traefik.http.routers.${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_TASK_NAME}.${var.tld}\",\n          \"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=forward-auth\"\n        ]\n      }\n\n      resources {\n        cpu    = 500\n        memory = 256\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\n\n\nvariable \"tld\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n"
  },
  {
    "path": "nomad_jobs/media-stack/lazylibrarian/nomad.job",
    "content": "job \"lazylibrarian\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/media-stack/lazylibrarian/nomad.job\"\nversion = \"4\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"downloaders\" {\n    count = 1 \n    network {\n      port \"http\" {\n        static = \"5299\"\n        host_network = \"lan\"\n      }\n    }\n\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"lazylibrarian\" {\n      driver = \"docker\"\n      config {\n        image = \"linuxserver/lazylibrarian\"\n        dns_servers = [\"192.168.50.2\"]\n        ports = [\"http\"]\n        volumes = [\n          \"${var.shared_dir}lazylibrarian:/config\",\n          \"${var.books_dir}:/books\",\n          \"${var.downloads_dir}:/downloads\",\n        ]\n      }\n\n      service {\n        port = \"http\"\n      \tname = \"lazylibrarian\"\n        tags = [\n          \"traefik.enable=true\"\n        ]\n        check {\n          type     = \"http\"\n          path     = \"/\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 100\n        memory = 128\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\nvariable \"tld\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n\nvariable \"downloads_dir\" {\n    type = string\n}\n\nvariable \"books_dir\" {\n    type = string\n}\n"
  },
  {
    "path": "nomad_jobs/media-stack/lidarr/nomad.job",
    "content": "job \"lidarr\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/media-stack/lidarr/nomad.job\"\nversion = \"10\"  // Full config.xml template with API key\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"downloaders\" {\n    count = 1 \n    network {\n      port \"http\" {\n        host_network = \"lan\"\n        to = \"8686\"\n      }\n    }\n\n    volume \"lidarr\" {\n      type      = \"csi\"\n      read_only = false\n      source    = \"lidarr2\"\n      access_mode = \"single-node-writer\"\n      attachment_mode = \"file-system\"\n    }\n\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"lidarr\" {\n      driver = \"docker\"\n      config {\n        image = \"linuxserver/lidarr:3.1.0\"\n        ports = [\"http\"]\n        dns_servers = [\"192.168.50.2\"]\n        volumes = [\n          \"${var.downloads_dir}:/downloads\",\n          \"${var.music_dir}:/music\",\n          \"${var.music_dir}:/media/Music\",\n          \"local/config.xml:/config/config.xml\",\n        ]\n      }\n\n      volume_mount {\n        volume      = \"lidarr\"\n        destination = \"/config\"\n        read_only   = false\n      }\n\n      env {\n        PUID = \"65534\"\n        PGID = \"65534\"\n        TZ = \"Etc/UTC\"\n      }\n\n      template {\n        data = <<EOH\n<Config>\n  <PostgresUser>postgres</PostgresUser>\n  <PostgresPassword>${var.postgres_pass}</PostgresPassword>\n  <PostgresPort>5432</PostgresPort>\n  <PostgresHost>postgres.service.consul</PostgresHost>\n  <PostgresMainDb>lidarr_main</PostgresMainDb>\n  <PostgresLogDb>lidarr_logs</PostgresLogDb>\n  <LogLevel>info</LogLevel>\n  <UrlBase></UrlBase>\n  <BindAddress>*</BindAddress>\n  <Port>8686</Port>\n  <SslPort>6868</SslPort>\n  <EnableSsl>False</EnableSsl>\n  <LaunchBrowser>False</LaunchBrowser>\n  <ApiKey>${var.lidarr_api_key}</ApiKey>\n  <AuthenticationMethod>External</AuthenticationMethod>\n  <AuthenticationRequired>DisabledForLocalAddresses</AuthenticationRequired>\n  <TrustedProxies>100.64.0.0/10,192.168.50.0/24</TrustedProxies>\n  <Branch>master</Branch>\n  <InstanceName>Lidarr</InstanceName>\n  <UpdateMechanism>Docker</UpdateMechanism>\n</Config>\nEOH\n        destination = \"local/config.xml\"\n        perms = \"644\"\n      }\n\n      service {\n        port = \"http\"\n        name = \"lidarr\"\n        tags = [\n          \"traefik.enable=true\",\n          \"traefik.http.routers.${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_TASK_NAME}.${var.tld}\",\n        ]\n        check {\n          type     = \"http\"\n          path     = \"/ping\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 100\n        memory = 256\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\nvariable \"tld\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n\nvariable \"downloads_dir\" {\n    type = string\n}\n\nvariable \"music_dir\" {\n    type = string\n}\n\nvariable \"postgres_pass\" {\n    type = string\n    description = \"Admin password for PostgreSQL\"\n}\n\nvariable \"lidarr_api_key\" {\n    type = string\n    description = \"API key for Lidarr\"\n}\n"
  },
  {
    "path": "nomad_jobs/media-stack/lidarr/volume.hcl",
    "content": "id           = \"lidarr2\"\nexternal_id  = \"lidarr2\"\nname         = \"lidarr2\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-csi.iscsi\"\ncapacity_min = \"10GiB\"\ncapacity_max = \"10GiB\"\n\ncapability {\n  access_mode     = \"single-node-writer\"\n  attachment_mode = \"block-device\"\n}\n\nmount_options {\n  fs_type     = \"ext4\"\n  mount_flags = [\"noatime\"]\n}\n\n"
  },
  {
    "path": "nomad_jobs/media-stack/lidify/nomad.job",
    "content": "job \"lidify\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/media-stack/lidify/nomad.job\"\n    version  = \"1\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"discovery\" {\n    count = 1\n\n    network {\n      port \"http\" {\n        host_network = \"lan\"\n        to = \"5000\"\n      }\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"lidify\" {\n      driver = \"docker\"\n      config {\n        image = \"thewicklowwolf/lidify:latest\"\n        ports = [\"http\"]\n        dns_servers = [\"192.168.50.2\"]\n        volumes = [\n          \"${var.shared_dir}lidify:/lidify/config\",\n        ]\n      }\n\n      env {\n        lidarr_address      = \"http://lidarr.service.consul:8686\"\n        lidarr_api_key      = var.lidarr_api_key\n        lastfm_api_key      = var.lastfm_api_key\n        root_folder_path    = \"/music\"\n        quality_profile_id  = \"1\"\n        metadata_profile_id = \"1\"\n        sleep_interval      = \"3600\"\n      }\n\n      service {\n        port = \"http\"\n        name = \"lidify\"\n        tags = [\n          \"traefik.enable=true\",\n        ]\n        check {\n          type     = \"http\"\n          path     = \"/\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 100\n        memory = 256\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n  type = string\n}\n\nvariable \"tld\" {\n  type = string\n}\n\nvariable \"shared_dir\" {\n  type = string\n}\n\nvariable \"lidarr_api_key\" {\n  type        = string\n  description = \"API key for Lidarr\"\n}\n\nvariable \"lastfm_api_key\" {\n  type        = string\n  description = \"Last.fm API key\"\n}\n"
  },
  {
    "path": "nomad_jobs/media-stack/maintainerr/nomad.job",
    "content": "job \"maintainerr\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/media-stack/maintainerr/nomad.job\"\n    version = \"2\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"media\" {\n    count = 1\n\n    network {\n      port \"http\" {\n        host_network = \"lan\"\n        to = 6246\n      }\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"maintainerr\" {\n      driver = \"docker\"\n\n      config {\n        image = \"ghcr.io/maintainerr/maintainerr:3.7.0\"\n        ports = [\"http\"]\n        volumes = [\n          \"${var.shared_dir}maintainerr:/opt/data\",\n        ]\n      }\n\n      env {\n        TZ = \"Etc/UTC\"\n      }\n\n      user = \"1000:1000\"\n\n      service {\n        port = \"http\"\n        name = \"maintainerr\"\n        tags = [\n          \"traefik.enable=true\"\n        ]\n        check {\n          type     = \"http\"\n          path     = \"/\"\n          interval = \"30s\"\n          timeout  = \"5s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 200\n        memory = 512\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n  type = string\n}\n\nvariable \"tld\" {\n  type = string\n}\n\nvariable \"shared_dir\" {\n  type = string\n}\n"
  },
  {
    "path": "nomad_jobs/media-stack/mediasage/nomad.job",
    "content": "job \"mediasage\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/media-stack/mediasage/nomad.job\"\n    version  = \"1\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"playlists\" {\n    count = 1\n\n    network {\n      port \"http\" {\n        host_network = \"lan\"\n        to = \"5765\"\n      }\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"prep-disk\" {\n      driver = \"docker\"\n      lifecycle {\n        hook    = \"prestart\"\n        sidecar = false\n      }\n      config {\n        image   = \"busybox:latest\"\n        command = \"sh\"\n        args    = [\"-c\", \"mkdir -p /data && chmod 777 /data\"]\n        volumes = [\n          \"${var.shared_dir}mediasage:/data\",\n        ]\n      }\n      resources {\n        cpu    = 50\n        memory = 32\n      }\n    }\n\n    task \"mediasage\" {\n      driver = \"docker\"\n      config {\n        image = \"ghcr.io/ecwilsonaz/mediasage:latest\"\n        ports = [\"http\"]\n        dns_servers = [\"192.168.50.2\"]\n        volumes = [\n          \"${var.shared_dir}mediasage:/app/data\",\n        ]\n      }\n\n      env {\n        PLEX_URL     = \"http://plex.service.consul:32400\"\n        PLEX_TOKEN   = var.plex_token\n        AI_PROVIDER  = \"ollama\"\n        OLLAMA_URL   = \"http://ollama.service.consul:11434\"\n      }\n\n      service {\n        port = \"http\"\n        name = \"mediasage\"\n        tags = [\n          \"traefik.enable=true\",\n        ]\n        check {\n          type     = \"http\"\n          path     = \"/\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 200\n        memory = 512\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n  type = string\n}\n\nvariable \"tld\" {\n  type = string\n}\n\nvariable \"shared_dir\" {\n  type = string\n}\n\nvariable \"plex_token\" {\n  type        = string\n  description = \"Plex authentication token\"\n}\n"
  },
  {
    "path": "nomad_jobs/media-stack/multi-scrobbler/nomad.job",
    "content": "job \"multi-scrobbler\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/media-stack/multi-scrobbler/nomad.job\"\n    version  = \"1\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"scrobbler\" {\n    count = 1\n\n    network {\n      port \"http\" {\n        host_network = \"lan\"\n        to = \"9078\"\n      }\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"multi-scrobbler\" {\n      driver = \"docker\"\n      config {\n        image = \"foxxmd/multi-scrobbler:latest\"\n        ports = [\"http\"]\n        dns_servers = [\"192.168.50.2\"]\n        volumes = [\n          \"${var.shared_dir}multi-scrobbler:/config\",\n          \"local/config.json:/config/config.json\",\n        ]\n      }\n\n      env {\n        TZ = \"Etc/UTC\"\n      }\n\n      template {\n        data = <<EOH\n{\n  \"sources\": [\n    {\n      \"type\": \"plex\",\n      \"name\": \"Plex\",\n      \"data\": {\n        \"url\": \"http://plex.service.consul:32400\",\n        \"token\": \"${var.plex_token}\"\n      }\n    }\n  ],\n  \"clients\": [\n    {\n      \"type\": \"listenbrainz\",\n      \"name\": \"ListenBrainz\",\n      \"data\": {\n        \"token\": \"${var.listenbrainz_token}\",\n        \"url\": \"https://api.listenbrainz.org\",\n        \"username\": \"${var.listenbrainz_username}\"\n      }\n    },\n    {\n      \"type\": \"lastfm\",\n      \"name\": \"Last.fm\",\n      \"data\": {\n        \"apiKey\": \"${var.lastfm_api_key}\",\n        \"secret\": \"${var.lastfm_api_secret}\",\n        \"redirectUri\": \"https://multi-scrobbler.${var.tld}/lastfm/callback\"\n      }\n    }\n  ]\n}\nEOH\n        destination = \"local/config.json\"\n        perms       = \"644\"\n      }\n\n      service {\n        port = \"http\"\n        name = \"multi-scrobbler\"\n        tags = [\n          \"traefik.enable=true\",\n        ]\n        check {\n          type     = \"http\"\n          path     = \"/\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 100\n        memory = 256\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n  type = string\n}\n\nvariable \"tld\" {\n  type = string\n}\n\nvariable \"shared_dir\" {\n  type = string\n}\n\nvariable \"plex_token\" {\n  type        = string\n  description = \"Plex authentication token for scrobbling\"\n}\n\nvariable \"listenbrainz_token\" {\n  type        = string\n  description = \"ListenBrainz user token\"\n}\n\nvariable \"listenbrainz_username\" {\n  type        = string\n  description = \"ListenBrainz/MusicBrainz username\"\n}\n\nvariable \"lastfm_api_key\" {\n  type        = string\n  description = \"Last.fm API key\"\n}\n\nvariable \"lastfm_api_secret\" {\n  type        = string\n  description = \"Last.fm API secret\"\n}\n"
  },
  {
    "path": "nomad_jobs/media-stack/navidrome/nomad.job",
    "content": "job \"navidrome\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/media-stack/navidrome/nomad.job\"\nversion = \"3\"\n  }\n\n  group \"downloaders\" {\n    count = 1 \n    network {\n      port \"http\" {\n        host_network = \"lan\"\n        to = \"4533\"\n      }\n    }\n\n    volume \"navidrome\" {\n      type      = \"csi\"\n      read_only = false\n      source    = \"navidrome\"\n      access_mode = \"single-node-writer\"\n      attachment_mode = \"file-system\"\n    }\n\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"navidrome\" {\n      driver = \"docker\"\n      config {\n        image = \"deluan/navidrome:0.61.2\"\n        dns_servers = [\"192.168.50.2\"]\n        ports = [\"http\"]\n        volumes = [\n          \"${var.music_dir}:/music\",\n        ]\n      }\n\n      volume_mount {\n        volume      = \"navidrome\"\n        destination = \"/data\"\n        read_only   = false\n      }\n\n      env {\n        UMASK_SET = \"022\"\n        TZ        = \"UTC\"\n        PGUID     = \"1000\"\n        PGID      = \"1000\"\n      }\n\n      service {\n        port = \"http\"\n        name = \"navidrome\"\n        tags = [\n          \"traefik.enable=true\"\n        ]\n        check {\n          type     = \"http\"\n          path     = \"/\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 500\n        memory = 1024\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\nvariable \"tld\" {\n    type = string\n}\n\nvariable \"music_dir\" {\n    type = string\n}\n"
  },
  {
    "path": "nomad_jobs/media-stack/navidrome/volume.hcl",
    "content": "id           = \"navidrome\"\nexternal_id  = \"navidrome\"\nname         = \"navidrome\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-csi.iscsi\"\ncapacity_min = \"10GiB\"\ncapacity_max = \"10GiB\"\n\ncapability {\n  access_mode     = \"single-node-writer\"\n  attachment_mode = \"block-device\"\n}\n\nmount_options {\n  fs_type     = \"ext4\"\n  mount_flags = [\"noatime\"]\n}\n\n"
  },
  {
    "path": "nomad_jobs/media-stack/ombi/nomad.job",
    "content": "job \"ombi\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/media-stack/ombi/nomad.job\"\nversion = \"4\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"downloaders\" {\n    count = 1 \n\n    network {\n      port \"http\" {\n        host_network = \"tailscale\"\n        to = \"3579\"\n      }\n    }\n\n    volume \"ombi\" {\n      type      = \"csi\"\n      read_only = false\n      \n      source    = \"ombi\"\n      access_mode = \"single-node-writer\"\n      attachment_mode = \"file-system\"\n    }\n\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"ombi\" {\n      driver = \"docker\"\n      config {\n        image = \"linuxserver/ombi:4.53.4\"\n        force_pull = \"true\"\n        ports = [\"http\"]\n      }\n\n      volume_mount {\n        volume      = \"ombi\"\n        destination = \"/config\"\n        read_only   = false\n      }\n\n      env {\n        PUID = \"65534\"\n        PGID = \"65534\"\n        TZ = \"Etc/UTC\"\n      }\n\n      service {\n        port = \"http\"\n        name = \"ombi\"\n        tags = [\n          \"traefik.enable=true\",\n          \"traefik.http.middlewares.httpsRedirect.redirectscheme.scheme=https\",\n          \"traefik.http.routers.${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_TASK_NAME}.${var.tld}\",\n          \"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=forward-auth\"\n        ]\n\n        check {\n          type     = \"tcp\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      resources {\n        cpu    = 100\n        memory = 512\n      }\n    }\n  }\n}\n\nvariable \"region\" {}\nvariable \"tld\" {}\n"
  },
  {
    "path": "nomad_jobs/media-stack/ombi/volume.hcl",
    "content": "id           = \"ombi\"\nexternal_id  = \"ombi\"\nname         = \"ombi\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-csi.iscsi\"\ncapacity_min = \"1GiB\"\ncapacity_max = \"1GiB\"\n\ncapability {\n  access_mode     = \"single-node-writer\"\n  attachment_mode = \"block-device\"\n}\n\nmount_options {\n  fs_type     = \"ext4\"\n  mount_flags = [\"noatime\"]\n}\n\n"
  },
  {
    "path": "nomad_jobs/media-stack/overseerr/nomad.job",
    "content": "job \"overseerr\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/media-stack/overseerr/nomad.job\"\n    version = \"7\"  // Migrate to Seerr + patch webhook bug\n  }\n\n  group \"media\" {\n    count = 1\n\n    network {\n      port \"http\" {\n        host_network = \"lan\"\n        to = 5055\n      }\n    }\n\n    volume \"overseerr\" {\n      type      = \"csi\"\n      read_only = false\n      source    = \"overseerr\"\n      access_mode = \"single-node-writer\"\n      attachment_mode = \"file-system\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"prep-disk\" {\n      driver = \"docker\"\n\n      volume_mount {\n        volume      = \"overseerr\"\n        destination = \"/volume/\"\n        read_only   = false\n      }\n\n      config {\n        image   = \"busybox:latest\"\n        command = \"sh\"\n        args    = [\"-c\", \"chown -R 1000:1000 /volume/\"]\n      }\n\n      resources {\n        cpu    = 200\n        memory = 128\n      }\n\n      lifecycle {\n        hook    = \"prestart\"\n        sidecar = false\n      }\n    }\n\n    task \"overseerr\" {\n      driver = \"docker\"\n      config {\n        image = \"seerr/seerr:v3.2.0\"\n        dns_servers = [\"192.168.50.2\"]\n        init = true\n        ports = [\"http\"]\n        entrypoint = [\"/bin/sh\", \"-c\", \"sed -i 's/Buffer.from(req.body.options.jsonPayload)/Buffer.from(JSON.stringify(req.body.options.jsonPayload))/g' /app/dist/routes/settings/notifications.js && exec docker-entrypoint.sh npm start\"]\n      }\n\n      volume_mount {\n        volume      = \"overseerr\"\n        destination = \"/app/config\"\n        read_only   = false\n      }\n\n      env {\n        TZ = \"Etc/UTC\"\n        LOG_LEVEL = \"info\"\n      }\n\n      service {\n        port = \"http\"\n        name = \"overseerr\"\n        tags = [\n          \"traefik.enable=true\"\n        ]\n        check {\n          type     = \"http\"\n          path     = \"/\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 500\n        memory = 384\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\nvariable \"tld\" {\n    type = string\n}\n"
  },
  {
    "path": "nomad_jobs/media-stack/overseerr/volume.hcl",
    "content": "id           = \"overseerr\"\nexternal_id  = \"overseerr\"\nname         = \"overseerr\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-csi.iscsi\"\ncapacity_min = \"2GiB\"\ncapacity_max = \"2GiB\"\n\ncapability {\n  access_mode     = \"single-node-writer\"\n  attachment_mode = \"block-device\"\n}\n\nmount_options {\n  fs_type     = \"ext4\"\n  mount_flags = [\"noatime\"]\n}"
  },
  {
    "path": "nomad_jobs/media-stack/plex/nomad.job",
    "content": "job \"plex\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n  priority    = 80\n\n  meta {\n      job_file = \"nomad_jobs/media-stack/plex/nomad.job\"\nversion = \"7\"  // Pin to klo01 for QuickSync hardware transcoding\n  }\n\n  group \"downloaders\" {\n    count = 1\n\n    # Pin to klo01 for Intel QuickSync hardware transcoding\n    constraint {\n      attribute = \"${attr.unique.hostname}\"\n      value     = \"klo01\"\n    }\n\n    network {\n      port \"http\" {\n        host_network = \"lan\"\n        static = \"32400\"\n      }\n    }\n\n    // Use a CSI volume specifically optimized for databases\n    volume \"plex-db\" {\n      type      = \"csi\"\n      read_only = false\n      source    = \"plex-database\"\n      access_mode = \"single-node-writer\"\n      attachment_mode = \"file-system\"\n    }\n\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"plex\" {\n      driver = \"docker\"\n      config {\n        image = \"plexinc/pms-docker@sha256:4e704a2172129d8a6bc5b05ea201945b329bb6512f48a4e92ed4a4a787c35462\"\n        network_mode = \"host\"\n        privileged = \"true\"\n        force_pull = \"true\"\n        volumes = [\n          # Intel QuickSync hardware transcoding (privileged mode grants access)\n          \"/dev/dri:/dev/dri\",\n          \"/tmp:/transcode\",\n          \"${var.media_dir}:/data\",\n          \"${var.shared_dir}plex_new:/config\",\n          \"local/optimize-db.sh:/etc/cont-init.d/30-optimize-db\",\n        ]\n        // No custom command - let the container start normally\n      }\n\n      volume_mount {\n        volume      = \"plex-db\"\n        destination = \"/opt/plex-db\"\n        read_only   = false\n      }\n\n      // Setup script that runs as part of the container init system\n      template {\n        data = <<EOH\n#!/usr/bin/with-contenv bash\nset -euo pipefail\n\necho \"Setting up Plex database optimizations...\"\n\n# Create necessary directory structure\nmkdir -p /opt/plex-db\nDB_DIR=\"/config/Library/Application Support/Plex Media Server/Plug-in Support/Databases\"\nmkdir -p \"$DB_DIR\"\n\n# Check if we need to move existing databases\nif [ -z \"$(ls -A /opt/plex-db 2>/dev/null || true)\" ]; then\n  echo \"Database volume is empty, copying existing databases if any...\"\n  if [ -d \"$DB_DIR\" ] && [ -n \"$(ls -A \"$DB_DIR\" 2>/dev/null || true)\" ]; then\n    cp -a \"$DB_DIR\"/* /opt/plex-db/\n    echo \"Copied existing databases to persistent volume\"\n  fi\nfi\n\n# Set up link to optimized database storage (only if not already linked)\nif [ ! -L \"$DB_DIR\" ] || [ \"$(readlink \"$DB_DIR\")\" != \"/opt/plex-db\" ]; then\n  echo \"Setting up database symlink...\"\n  rm -rf \"$DB_DIR\"\n  ln -sf /opt/plex-db \"$DB_DIR\"\nfi\n\n# Install SQLite3 if needed\nif ! command -v sqlite3 &>/dev/null; then\n  echo \"Installing SQLite3...\"\n  apt-get update && apt-get install -y sqlite3\nfi\n\n# Set environment variables for SQLite\nexport SQLITE_TMPDIR=/tmp/plex_sqlite\nmkdir -p \"$SQLITE_TMPDIR\"\n\n# Apply optimizations to all databases\necho \"Applying SQLite optimizations to databases...\"\nfind /opt/plex-db -name \"*.db\" -type f 2>/dev/null | while read -r db; do\n  echo \"Optimizing $db\"\n  sqlite3 \"$db\" <<EOF\nPRAGMA journal_mode = WAL;\nPRAGMA synchronous = NORMAL;\nPRAGMA temp_store = MEMORY;\nPRAGMA mmap_size = 268435456;\nPRAGMA cache_size = -8000;\nPRAGMA busy_timeout = 5000;\nANALYZE;\nEOF\ndone\n\necho \"Database optimizations complete\"\nEOH\n        destination = \"local/optimize-db.sh\"\n        perms = \"755\"\n      }\n\n      env {\n        PLEX_CLAIM = \"\"  // Add your claim token if needed\n        ADVERTISE_IP = \"http://192.168.50.5:32400/\"  // Replace with your server IP\n        PLEX_UID = \"1000\"\n        PLEX_GID = \"1000\"\n        CHANGE_CONFIG_DIR_OWNERSHIP = \"false\"\n        // Skip first run setup if config exists\n      }\n\n      service {\n        port = \"http\"\n        name = \"plex\"\n        tags = [\n          \"traefik.enable=true\"\n        ]\n        check {\n          type     = \"tcp\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      resources {\n        cpu    = 1500  # QuickSync handles transcoding, minimal CPU needed\n        memory = 4096\n      }\n    }\n  }\n}\n\nvariable \"region\" {}\nvariable \"tld\" {}\nvariable \"shared_dir\" {}\nvariable \"media_dir\" {}\n"
  },
  {
    "path": "nomad_jobs/media-stack/plex/volume.hcl",
    "content": "id           = \"plex-database\"\nexternal_id  = \"plex-database\"\nname         = \"plex-database\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-csi.iscsi\"\ncapacity_min = \"20GiB\"\ncapacity_max = \"20GiB\"\n\ncapability {\n  access_mode     = \"single-node-writer\"\n  attachment_mode = \"block-device\"\n}\n\nmount_options {\n  fs_type     = \"ext4\"\n  mount_flags = [\"noatime\", \"nodiratime\", \"discard\", \"data=ordered\"]\n}\n"
  },
  {
    "path": "nomad_jobs/media-stack/prowlarr/nomad.job",
    "content": "job \"prowlarr\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/media-stack/prowlarr/nomad.job\"\n    version  = \"3\"  // Right-size prowlarr memory 512 -> 256\n  }\n\n  group \"downloaders\" {\n    count = 1\n\n    network {\n      port \"http\" {\n        host_network = \"lan\"\n        static       = 9696\n      }\n      port \"flaresolverr\" {\n        host_network = \"lan\"\n        static       = 8191\n      }\n    }\n\n    volume \"prowlarr\" {\n      type            = \"csi\"\n      read_only       = false\n      source          = \"prowlarr\"\n      access_mode     = \"single-node-writer\"\n      attachment_mode = \"file-system\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"prowlarr\" {\n      driver = \"docker\"\n\n      config {\n        image       = \"linuxserver/prowlarr\"\n        dns_servers = [\"192.168.50.2\"]\n        ports       = [\"http\"]\n      }\n\n      volume_mount {\n        volume      = \"prowlarr\"\n        destination = \"/config\"\n        read_only   = false\n      }\n\n      env {\n        PUID = \"65534\"\n        PGID = \"65534\"\n        TZ   = \"Etc/UTC\"\n      }\n\n      service {\n        port = \"http\"\n        name = \"prowlarr\"\n        tags = [\n          \"traefik.enable=true\",\n        ]\n        check {\n          type     = \"http\"\n          path     = \"/ping\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 500\n        memory = 256\n      }\n    }\n\n    task \"flaresolverr\" {\n      driver = \"docker\"\n\n      config {\n        image = \"ghcr.io/flaresolverr/flaresolverr:v3.4.6\"\n        ports = [\"flaresolverr\"]\n      }\n\n      env {\n        LOG_LEVEL = \"info\"\n        LOG_HTML  = \"false\"\n        TZ        = \"Etc/UTC\"\n      }\n\n      service {\n        port = \"flaresolverr\"\n        name = \"flaresolverr\"\n        check {\n          type     = \"http\"\n          path     = \"/\"\n          interval = \"30s\"\n          timeout  = \"5s\"\n        }\n      }\n\n      resources {\n        cpu    = 500\n        memory = 512\n      }\n\n      lifecycle {\n        hook    = \"prestart\"\n        sidecar = true\n      }\n    }\n  }\n}\n\nvariable \"region\" {}\n\nvariable \"tld\" {}\n"
  },
  {
    "path": "nomad_jobs/media-stack/prowlarr/volume.hcl",
    "content": "id           = \"prowlarr\"\nexternal_id  = \"prowlarr\"\nname         = \"prowlarr\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-csi.iscsi\"\ncapacity_min = \"5GiB\"\ncapacity_max = \"5GiB\"\n\ncapability {\n  access_mode     = \"single-node-writer\"\n  attachment_mode = \"block-device\"\n}\n\nmount_options {\n  fs_type     = \"ext4\"\n  mount_flags = [\"noatime\"]\n}\n"
  },
  {
    "path": "nomad_jobs/media-stack/qbittorrent/nomad.job",
    "content": "job \"qbittorrent\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/media-stack/qbittorrent/nomad.job\"\n    version  = \"5\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"downloaders\" {\n    count = 1\n\n    network {\n      port \"http\" {\n        host_network = \"lan\"\n        static       = 8081\n      }\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"gluetun\" {\n      driver = \"docker\"\n\n      lifecycle {\n        hook    = \"prestart\"\n        sidecar = true\n      }\n\n      config {\n        image = \"qmcgaw/gluetun\"\n\n        cap_add = [\"NET_ADMIN\"]\n\n        ports = [\"http\"]\n\n        mounts = [\n          {\n            type     = \"tmpfs\"\n            target   = \"/tmp/gluetun\"\n            readonly = false\n          },\n        ]\n      }\n\n      env {\n        VPN_SERVICE_PROVIDER  = \"mullvad\"\n        VPN_TYPE              = \"wireguard\"\n        WIREGUARD_PRIVATE_KEY = var.mullvad_wireguard_key\n        WIREGUARD_ADDRESSES   = var.mullvad_wireguard_addr\n        SERVER_COUNTRIES      = \"Netherlands\"\n        FIREWALL_VPN_INPUT_PORTS = \"8081\"\n      }\n\n      resources {\n        cpu    = 500\n        memory = 512\n      }\n    }\n\n    task \"qbittorrent\" {\n      driver = \"docker\"\n\n      config {\n        image        = \"linuxserver/qbittorrent\"\n        network_mode = \"container:gluetun-${NOMAD_ALLOC_ID}\"\n\n        mounts = [\n          {\n            type     = \"bind\"\n            target   = \"/config\"\n            source   = \"${var.shared_dir}qbittorrent\"\n            readonly = false\n            bind_options = {\n              propagation = \"rshared\"\n            }\n          },\n          {\n            type     = \"bind\"\n            target   = \"/downloads\"\n            source   = \"${var.downloads_dir}\"\n            readonly = false\n            bind_options = {\n              propagation = \"rshared\"\n            }\n          },\n          {\n            type     = \"bind\"\n            target   = \"/media\"\n            source   = \"${var.media_dir}\"\n            readonly = false\n            bind_options = {\n              propagation = \"rshared\"\n            }\n          },\n        ]\n      }\n\n      env {\n        PUID            = \"65534\"\n        PGID            = \"65534\"\n        TZ              = \"Etc/UTC\"\n        WEBUI_PORT      = \"8081\"\n      }\n\n      service {\n        port = \"http\"\n        name = \"qbittorrent\"\n        tags = [\n          \"traefik.enable=true\",\n        ]\n        check {\n          type     = \"tcp\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"90s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 1000\n        memory = 1024\n      }\n    }\n  }\n}\n\nvariable \"region\" {}\n\nvariable \"tld\" {}\n\nvariable \"shared_dir\" {}\n\nvariable \"downloads_dir\" {}\n\nvariable \"media_dir\" {}\n\nvariable \"mullvad_wireguard_key\" {\n  type        = string\n  description = \"Mullvad WireGuard private key\"\n}\n\nvariable \"mullvad_wireguard_addr\" {\n  type        = string\n  description = \"Mullvad WireGuard interface address\"\n}\n"
  },
  {
    "path": "nomad_jobs/media-stack/radarr/nomad.job",
    "content": "job \"radarr\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/media-stack/radarr/nomad.job\"\nversion = \"10\"  // Full config.xml template with API key\n  }\n\n  group \"downloaders\" {\n    count = 1 \n    network {\n      port \"http\" {\n        host_network = \"lan\"\n        to = \"7878\"\n      }\n    }\n\n    volume \"radarr\" {\n      type      = \"csi\"\n      read_only = false\n      source    = \"radarr2\"\n      access_mode = \"single-node-writer\"\n      attachment_mode = \"file-system\"\n    }\n\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"radarr\" {\n      driver = \"docker\"\n      config {\n        image = \"linuxserver/radarr:6.1.1\"\n        dns_servers = [\"192.168.50.2\"]\n        ports = [\"http\"]\n        volumes = [\n          \"${var.downloads_dir}:/downloads\",\n          \"${var.movies_dir}:/media/Movies\",\n          \"local/config.xml:/config/config.xml\",\n        ]\n      }\n\n      volume_mount {\n        volume      = \"radarr\"\n        destination = \"/config\"\n        read_only   = false\n      }\n\n      env {\n        UMASK_SET = \"022\"\n        TZ        = \"UTC\"\n        PUID      = \"65534\"\n        PGID      = \"65534\"\n      }\n\n      template {\n        data = <<EOH\n<Config>\n  <PostgresUser>postgres</PostgresUser>\n  <PostgresPassword>${var.postgres_pass}</PostgresPassword>\n  <PostgresPort>5432</PostgresPort>\n  <PostgresHost>postgres.service.consul</PostgresHost>\n  <PostgresMainDb>radarr_main</PostgresMainDb>\n  <PostgresLogDb>radarr_logs</PostgresLogDb>\n  <LogLevel>info</LogLevel>\n  <UrlBase></UrlBase>\n  <BindAddress>*</BindAddress>\n  <Port>7878</Port>\n  <SslPort>9898</SslPort>\n  <EnableSsl>False</EnableSsl>\n  <LaunchBrowser>False</LaunchBrowser>\n  <ApiKey>${var.radarr_api_key}</ApiKey>\n  <AuthenticationMethod>External</AuthenticationMethod>\n  <AuthenticationRequired>DisabledForLocalAddresses</AuthenticationRequired>\n  <TrustedProxies>100.64.0.0/10,192.168.50.0/24</TrustedProxies>\n  <Branch>master</Branch>\n  <InstanceName>Radarr</InstanceName>\n  <UpdateMechanism>Docker</UpdateMechanism>\n</Config>\nEOH\n        destination = \"local/config.xml\"\n        perms = \"644\"\n      }\n      \n      service {\n        port = \"http\"\n        name = \"radarr\"\n        tags = [\n          \"traefik.enable=true\"\n        ]\n        check {\n          type     = \"http\"\n          path     = \"/ping\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 1000\n        memory = 512\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\nvariable \"tld\" {\n    type = string\n}\n\nvariable \"downloads_dir\" {\n    type = string\n}\n\nvariable \"tv_dir\" {\n    type = string\n}\n\nvariable \"movies_dir\" {\n    type = string\n}\n\nvariable \"postgres_pass\" {\n    type = string\n    description = \"Admin password for PostgreSQL\"\n}\n\nvariable \"radarr_api_key\" {\n    type = string\n    description = \"API key for Radarr\"\n}\n"
  },
  {
    "path": "nomad_jobs/media-stack/radarr/volume.hcl",
    "content": "id           = \"radarr2\"\nexternal_id  = \"radarr2\"\nname         = \"radarr2\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-csi.iscsi\"\ncapacity_min = \"10GiB\"\ncapacity_max = \"10GiB\"\n\ncapability {\n  access_mode     = \"single-node-writer\"\n  attachment_mode = \"block-device\"\n}\n\nmount_options {\n  fs_type     = \"ext4\"\n  mount_flags = [\"noatime\"]\n}\n"
  },
  {
    "path": "nomad_jobs/media-stack/requestrr/nomad.job",
    "content": "job \"requestrr\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/media-stack/requestrr/nomad.job\"\n    version = \"1\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"media\" {\n    count = 1\n\n    network {\n      port \"http\" {\n        host_network = \"lan\"\n        to = 4545\n      }\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"requestrr\" {\n      driver = \"docker\"\n\n      config {\n        dns_servers = [\"192.168.50.2\"]\n        image = \"thomst08/requestrr:v2.1.9\"\n        ports = [\"http\"]\n        volumes = [\n          \"${var.shared_dir}requestrr:/root/config\",\n        ]\n      }\n\n      env {\n        TZ = \"Etc/UTC\"\n      }\n\n      service {\n        port = \"http\"\n        name = \"requestrr\"\n        tags = [\n          \"traefik.enable=true\"\n        ]\n        check {\n          type     = \"tcp\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      resources {\n        cpu    = 200\n        memory = 256\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n  type = string\n}\n\nvariable \"tld\" {\n  type = string\n}\n\nvariable \"shared_dir\" {\n  type = string\n}\n"
  },
  {
    "path": "nomad_jobs/media-stack/sabnzbd/nomad.job",
    "content": "job \"sabnzbd\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/media-stack/sabnzbd/nomad.job\"\nversion = \"6\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"downloaders\" {\n    count = 1 \n    network {\n      port \"http\" {\n        host_network = \"lan\"\n        static = \"8080\"\n      }\n    }\n\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"sabnzbd\" {\n      driver = \"docker\"\n      config {\n        image = \"linuxserver/sabnzbd\"\n        network_mode = \"host\"\n        ports = [\"http\"]\n        mounts = [\n          {\n            type = \"bind\"\n            target = \"/config\"\n            source = \"${var.shared_dir}sabnzbd\",\n            readonly = false\n            bind_options = {\n              propagation = \"rshared\"\n            }\n          },\n          {\n            type = \"bind\"\n            target = \"/downloads\"\n            source = \"/tmp\"\n            readonly = false\n            bind_options = {\n              propagation = \"rshared\"\n            }\n          },\n          {\n            type = \"bind\"\n            target = \"/media\"\n            source = \"${var.media_dir}\"\n            readonly = false\n            bind_options = {\n              propagation = \"rshared\"\n            }\n          }\n        ]\n      }\n  \n      env {\n        PUID = \"65534\"\n        PGID = \"65534\"\n        TZ = \"Etc/UTC\"\n      }\n\n      service {\n        port = \"http\"\n        name = \"${NOMAD_TASK_NAME}\"\n        tags = [\n          \"traefik.enable=true\"\n        ]\n        check {\n          type     = \"http\"\n          path     = \"/api?mode=auth\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu        = 1000    # Match actual usage (952 MHz observed)\n        memory     = 3072    # Accommodate 2GB cache + 1GB overhead\n        memory_max = 4096    # Hard limit for burst usage\n      }\n    }\n  }\n}\n\nvariable \"region\" {}\n\nvariable \"tld\" {}\n\nvariable \"shared_dir\" {}\n\nvariable \"media_dir\" {}\n\nvariable \"downloads_dir\" {}\n"
  },
  {
    "path": "nomad_jobs/media-stack/sickchill/nomad.job",
    "content": "job \"sickchill\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/media-stack/sickchill/nomad.job\"\nversion = \"4\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"downloaders\" {\n    count = 1 \n    network {\n      port \"http\" {\n        host_network = \"lan\"\n        to = \"8081\"\n      }\n    }\n\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"sickchill\" {\n      driver = \"docker\"\n      config {\n        image = \"linuxserver/sickchill:2024.3.1\"\n        dns_servers = [\"192.168.50.2\"]\n        ports = [\"http\"]\n        volumes = [\n          \"${var.downloads_dir}:/downloads\",\n          \"${var.tv_dir}:/tv\",\n          \"${var.shared_dir}sickchill:/config\",\n        ]\n      }\n\n      env {\n        PUID = \"65534\"\n        PGID = \"65534\"\n        TZ = \"Etc/UTC\"\n      }\n\n      service {\n        port = \"http\"\n        name = \"sickchill\"\n        tags = [\n          \"traefik.enable=true\"\n        ]\n        check {\n          type     = \"http\"\n          path     = \"/\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 1000\n        memory = 256\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\nvariable \"tld\" {\n    type = string\n}\n\nvariable \"downloads_dir\" {\n    type = string\n}\n\nvariable \"tv_dir\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n"
  },
  {
    "path": "nomad_jobs/media-stack/sonarr/nomad.job",
    "content": "job \"sonarr\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/media-stack/sonarr/nomad.job\"\nversion = \"11\"  // Full config.xml template with API key\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"downloaders\" {\n    count = 1 \n    network {\n      port \"http\" {\n        host_network = \"lan\"\n        to = \"8989\"\n      }\n    }\n\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"sonarr\" {\n      driver = \"docker\"\n      config {\n        image = \"linuxserver/sonarr:4.0.17\"\n        dns_servers = [\"192.168.50.2\"]\n        ports = [\"http\"]\n        volumes = [\n          \"${var.shared_dir}sonarr:/config\",\n          \"${var.downloads_dir}:/downloads\",\n          \"${var.tv_dir}:/media/TV\",\n          \"local/config.xml:/config/config.xml\",\n        ]\n      }\n\n      env {\n        PUID = \"65534\"\n        PGID = \"65534\"\n        TZ = \"Etc/UTC\"\n      }\n\n      template {\n        data = <<EOH\n<Config>\n  <PostgresUser>postgres</PostgresUser>\n  <PostgresPassword>${var.postgres_pass}</PostgresPassword>\n  <PostgresPort>5432</PostgresPort>\n  <PostgresHost>postgres.service.consul</PostgresHost>\n  <PostgresMainDb>sonarr_main</PostgresMainDb>\n  <PostgresLogDb>sonarr_logs</PostgresLogDb>\n  <LogLevel>info</LogLevel>\n  <UrlBase></UrlBase>\n  <BindAddress>*</BindAddress>\n  <Port>8989</Port>\n  <SslPort>9898</SslPort>\n  <EnableSsl>False</EnableSsl>\n  <LaunchBrowser>False</LaunchBrowser>\n  <ApiKey>${var.sonarr_api_key}</ApiKey>\n  <AuthenticationMethod>External</AuthenticationMethod>\n  <AuthenticationRequired>DisabledForLocalAddresses</AuthenticationRequired>\n  <TrustedProxies>100.64.0.0/10,192.168.50.0/24</TrustedProxies>\n  <Branch>main</Branch>\n  <InstanceName>Sonarr</InstanceName>\n  <UpdateMechanism>Docker</UpdateMechanism>\n</Config>\nEOH\n        destination = \"local/config.xml\"\n        perms = \"644\"\n      }\n\n      service {\n        port = \"http\"\n        name = \"sonarr\"\n        tags = [\n          \"traefik.enable=true\",\n        ]\n        check {\n          type     = \"http\"\n          path     = \"/ping\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 1000\n        memory = 512\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\nvariable \"tld\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n\nvariable \"downloads_dir\" {\n    type = string\n}\n\nvariable \"tv_dir\" {\n    type = string\n}\n\nvariable \"postgres_pass\" {\n    type = string\n    description = \"Admin password for PostgreSQL\"\n}\n\nvariable \"sonarr_api_key\" {\n    type = string\n    description = \"API key for Sonarr\"\n}\n"
  },
  {
    "path": "nomad_jobs/media-stack/synclounge/nomad.job",
    "content": "job \"synclounge\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/media-stack/synclounge/nomad.job\"\nversion = \"4\"\n  }\n\n  group \"synclounge\" {\n    count = 1 \n    network {\n      port \"http\" {\n        host_network = \"tailscale\"\n        to = \"8088\"\n      }\n      port \"server\" {\n        host_network = \"tailscale\"\n        to = \"8089\"\n      }\n    }\n\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"plexlounge\" {\n      driver = \"docker\"\n      config {\n        image = \"starbix/synclounge\"\n        network_mode = \"host\"\n        force_pull = \"true\"\n        ports = [\"http\", \"server\"]\n      }\n\n      env {\n        DOMAIN = \"${NOMAD_TASK_NAME}.${var.tld}\"\n      }\n\n      service {\n        port = \"http\"\n\tname = \"plexlounge\"\n        tags = [\n          \"traefik.enable=true\",\n          \"traefik.http.middlewares.httpsRedirect.redirectscheme.scheme=https\",\n\n\n          \"traefik.http.routers.${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_TASK_NAME}.${var.tld}\",\n          \"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=forward-auth\"\n        ]\n\n        check {\n          type     = \"tcp\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      service {\n        port = \"server\"\n\tname = \"syncserver\"\n        tags = [\n          \"traefik.enable=true\",\n          \"traefik.http.middlewares.httpsRedirect.redirectscheme.scheme=https\",\n\n\n          \"traefik.http.routers.syncserver.tls.domains[0].sans=syncserver.${var.tld}\",\n          \"traefik.http.routers.syncserver.middlewares=forward-auth\"\n        ]\n\n        check {\n          type     = \"tcp\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      resources {\n        cpu    = 3500\n        memory = 512\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\n\n\nvariable \"tld\" {\n    type = string\n}\n\n"
  },
  {
    "path": "nomad_jobs/media-stack/tautulli/nomad.job",
    "content": "job \"tautulli\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/media-stack/tautulli/nomad.job\"\nversion = \"3\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"metrics\" {\n    count = 1 \n    network {\n      port \"http\" {\n        host_network = \"tailscale\"\n        to = \"8181\"\n      }\n    }\n\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"tautulli\" {\n      driver = \"docker\"\n      config {\n        image = \"tautulli/tautulli\"\n        ports = [\"http\"]\n        volumes = [\n          \"${var.shared_dir}tautulli:/config\",\n          \"[[ .dirs.plexlogs ]]:/media/TV\",\n        ]\n      }\n\n      service {\n        port = \"http\"\n\tname = \"tautulli\"\n        tags = [\"net-internal\", \"net-external\", \"tautulli\", \"net.frontend.entryPoints=https\"]\n        check {\n          type     = \"http\"\n          path     = \"/\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 100\n        memory = 128\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\n\n\nvariable \"tld\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n"
  },
  {
    "path": "nomad_jobs/media-stack/tdarr/nomad.job",
    "content": "job \"tdarr\" {\n  region      = var.region\n  datacenters = [\"cheese\"]\n  type        = \"service\"\n  priority    = 50\n\n  meta {\n    job_file = \"nomad_jobs/media-stack/tdarr/nomad.job\"\n    version  = \"4\"  // Move to cheese01 for NVENC GPU transcoding\n  }\n\n  group \"tdarr\" {\n    count = 1\n\n    constraint {\n      attribute = \"${attr.unique.hostname}\"\n      value     = \"cheese01\"\n    }\n\n    network {\n      port \"http\" {\n        host_network = \"lan\"\n        static       = 8265\n      }\n      port \"server\" {\n        host_network = \"lan\"\n        static       = 8266\n      }\n    }\n\n    volume \"tdarr\" {\n      type            = \"csi\"\n      read_only       = false\n      source          = \"tdarr\"\n      access_mode     = \"single-node-writer\"\n      attachment_mode = \"file-system\"\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel      = 1\n      min_healthy_time  = \"30s\"\n      healthy_deadline  = \"9m\"\n      progress_deadline = \"15m\"\n      auto_revert       = true\n    }\n\n    task \"tdarr\" {\n      driver = \"docker\"\n\n      config {\n        image        = \"ghcr.io/haveagitgat/tdarr:latest\"\n        network_mode = \"host\"\n        privileged   = true\n        runtime      = \"nvidia\"\n        force_pull   = true\n        ports        = [\"http\", \"server\"]\n        volumes = [\n          \"/tmp/tdarr:/temp\",\n          \"${var.shared_dir}tdarr/configs:/app/configs\",\n          \"${var.shared_dir}tdarr/logs:/app/logs\",\n          \"${var.media_dir}:/media\",\n        ]\n      }\n\n      volume_mount {\n        volume      = \"tdarr\"\n        destination = \"/app/server\"\n        read_only   = false\n      }\n\n      env {\n        PUID                   = \"1000\"\n        PGID                   = \"1000\"\n        NVIDIA_VISIBLE_DEVICES = \"all\"\n        serverIP               = \"0.0.0.0\"\n        serverPort             = \"8266\"\n        webUIPort              = \"8265\"\n        internalNode           = \"true\"\n        nodeName               = \"cheese01\"\n      }\n\n      service {\n        port = \"http\"\n        name = \"tdarr\"\n        tags = [\n          \"traefik.enable=true\",\n        ]\n        check {\n          type     = \"tcp\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"120s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 2000\n        memory = 2048\n      }\n    }\n  }\n}\n\nvariable \"region\" {}\nvariable \"tld\" {}\nvariable \"shared_dir\" {}\nvariable \"media_dir\" {}\n"
  },
  {
    "path": "nomad_jobs/media-stack/tdarr/volume.hcl",
    "content": "id           = \"tdarr\"\nexternal_id  = \"tdarr\"\nname         = \"tdarr\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-csi.iscsi\"\ncapacity_min = \"10GiB\"\ncapacity_max = \"10GiB\"\n\ncapability {\n  access_mode     = \"single-node-writer\"\n  attachment_mode = \"block-device\"\n}\n\nmount_options {\n  fs_type     = \"ext4\"\n  mount_flags = [\"noatime\"]\n}\n"
  },
  {
    "path": "nomad_jobs/misc/adb/nomad.job",
    "content": "job \"adb\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/misc/adb/nomad.job\"\nversion = \"4\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  constraint {\n    attribute = \"${meta.zigbee}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"downloaders\" {\n    count = 1 \n\n    network {\n      mode = \"host\"\n      port \"tcp\" {\n        static = \"5037\"\n        host_network = \"lan\"\n      }\n    }\n\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"adb\" {\n      driver = \"docker\"\n      config {\n        image = \"docker-registry.demonsafe.com/adb\"\n        entrypoint = [\"/local/start.sh\"]\n        network_mode = \"host\"\n        extra_hosts = [\"hassio:127.0.0.1\"]\n        args = [\"&\", \"adb\", \"-a\", \"-P\", \"5037\", \"server\", \"nodaemon\"]\n        volumes = [\n          \"${var.shared_dir}home-assistant/android:/root/.android\",\n        ]\n      }\n\n      env {\n        log_level = \"warning\"\n      }\n\n      service {\n        port = \"tcp\"\n\tname = \"adb\"\n        tags = [\"net-internal\", \"adb\"]\n        check {\n          type     = \"tcp\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      template {\ndata = <<EOH\n#!/bin/sh\necho \"Start the server in background mode...\"\nadb -a -P 5037 server nodaemon &\necho \"Sleep 10 seconds for the adb server to start\"\nsleep 10\nDEVICES=\"192.168.50.206\"\necho \"Connecting to devices.\"\nfor device in $DEVICES; do\necho \"Connect to device: $device\"\nadb connect $device\ndone   \necho \"Done.\"\necho \"Foreground the server again...\"\n       \nwait %1 \n\nEOH\n        destination = \"local/start.sh\"\n        env         = false\n        perms       = 755\n        change_mode = \"signal\"\n        change_signal = \"SIGHUP\"\n        left_delimiter  = \"{{\"\n        right_delimiter = \"}}\"\n\n      }\n\n      resources {\n        cpu    = 100\n        memory = 10\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\nvariable \"shared_dir\" {}\n"
  },
  {
    "path": "nomad_jobs/misc/gcp-dns-updater/Dockerfile",
    "content": "# Use an official Python runtime as a parent image\nFROM python:3.14-slim\n\n# Set the working directory in the container\nWORKDIR /app\n\n# Copy the requirements file into the container at /app\nCOPY requirements.txt .\n\n# Install any needed packages specified in requirements.txt\n# Using --no-cache-dir to reduce image size\nRUN pip install --no-cache-dir -r requirements.txt\n\n# Copy the current directory contents into the container at /app\nCOPY update_dns.py .\n\n# Define the command to run the application\nCMD [\"python\", \"update_dns.py\"]\n"
  },
  {
    "path": "nomad_jobs/misc/gcp-dns-updater/README.md",
    "content": "# GCP Dynamic DNS Updater Service\n\nThis service periodically checks the public IPv4 address of the node it's running on and updates a specified A record in a Google Cloud DNS managed zone. It's designed to run as a Nomad job within the Hashi-Homelab environment, utilizing a **pre-built Docker image**.\n\n## Features\n\n*   Fetches the current public IPv4 address from `https://v4.ifconfig.co/ip`.\n*   Uses the `google-cloud-dns` Python SDK to interact with Google Cloud DNS.\n*   Authenticates using a GCP Service Account key provided via an environment variable.\n*   Checks the specified DNS record:\n    *   If it's a CNAME, it deletes the CNAME record.\n    *   If it's an A record, it updates the IP address if it has changed.\n    *   If it doesn't exist (or after deleting a CNAME), it creates the A record with the specified TTL.\n*   Runs periodically via a Nomad job, executing the Python script within the pre-built Docker container.\n\n## Prerequisites\n\n1.  **Docker:** Docker must be installed locally to build the service image.\n2.  **GCP Service Account:** You need a Google Cloud Platform service account with the necessary permissions to manage DNS records.\n    *   Go to the GCP Console -> IAM & Admin -> Service Accounts.\n    *   Create a new service account (e.g., `gcp-dns-updater-sa`).\n    *   Grant this service account the `DNS Administrator` role (`roles/dns.admin`) on the project containing your managed zone.\n    *   Create a JSON key file for this service account and download it securely. You will need the *contents* of this file, not the file itself.\n3.  **Nomad Environment:** A running Nomad cluster where this job can be scheduled. The Nomad clients must have Docker installed and configured.\n\n## Configuration\n\nThe service is configured via environment variables passed to the Nomad task, which are then consumed by the `update_dns.py` script running inside the Docker container:\n\n*   `GCP_DNS_ZONE_NAME`: The name of the managed zone in GCP DNS (e.g., `demonsafe-com`). The script derives the Project ID from the credentials.\n*   `GCP_DNS_RECORD_NAME`: The DNS record name to update (e.g., `*.demonsafe.com`). **Note:** The script expects the base name; the trailing dot is handled internally if needed by the SDK.\n*   `RECORD_TTL`: (Optional) The Time-To-Live (in seconds) for the created/updated A record. Defaults to 300 if not set.\n*   `GCP_PROJECT_ID`: The Google Cloud Project ID containing the DNS zone.\n*   `GCP_SERVICE_ACCOUNT_KEY_B64`: **Required.** The base64-encoded *content* of the GCP service account JSON key file.\n\n**Generating the Base64 Key:**\n\nYou need to encode the *content* of your downloaded JSON key file into a single-line base64 string.\n\nOn Linux/macOS, you can use:\n```bash\nbase64 -w 0 < /path/to/your/gcp_key.json\n```\n*(Ensure you use `-w 0` or an equivalent flag for your `base64` command to prevent line wrapping)*\n\nCopy the resulting string.\n\n**Setting Environment Variables in Nomad:**\n\nThese variables are defined within the `env` block of the `nomad.job` file using Go templating to read runtime environment variables provided by the Nomad agent (which in turn are often sourced from the deployment mechanism, like GitHub Actions):\n\n```hcl\n# Example within nomad.job task config\nenv {\n  GCP_DNS_ZONE_NAME = <<EOH\n{{ env \"NOMAD_VAR_tld\" | replace \".\" \"-\" }}\nEOH\n  GCP_DNS_RECORD_NAME = <<EOH\n*.{{ env \"NOMAD_VAR_tld\" }}\nEOH\n  GCP_SERVICE_ACCOUNT_KEY_B64 = <<EOH\n{{ env \"NOMAD_VAR_gcp_dns_admin\" }}\nEOH\n  GCP_PROJECT_ID = <<EOH\n{{ env \"NOMAD_VAR_gcp_project_id\" }}\nEOH\n  # RECORD_TTL = \"300\" # Optional, defaults to 300 in the script\n}\n```\n\n**Important:** The actual values for `NOMAD_VAR_tld`, `NOMAD_VAR_gcp_dns_admin`, and `NOMAD_VAR_gcp_project_id` **must** be provided securely to the Nomad agent's environment during deployment (e.g., via GitHub Actions secrets mapped in the workflow, or using Vault integration), not hardcoded directly in the job file.\n\n## Deployment\n\n1.  **Ensure Prerequisites:** Verify the service account is created, you have the base64 encoded key, and Docker is running.\n2.  **Build the Docker Image:** From the root of the `hashi-homelab` repository, run the make target:\n    ```bash\n    make build-gcp-dns-updater\n    ```\n    This builds the required Docker image tagged `gcp-dns-updater:latest` using the `gcp-dns-updater/Dockerfile`.\n3.  **Deploy the Nomad Job:**\n    *   Ensure the required environment variables (`NOMAD_VAR_tld`, `NOMAD_VAR_gcp_dns_admin`, `NOMAD_VAR_gcp_project_id`) are available to the Nomad agent running the job. This is typically handled by the CI/CD pipeline (like the GitHub Actions workflow in this repo) or Vault integration.\n    *   Deploy using the Nomad CLI (ensure you are in the repository root or adjust paths). This job will use the `gcp-dns-updater:latest` image built in the previous step:\n        ```bash\n        # The job will read variables from its environment\n        nomad job run gcp-dns-updater/nomad.job\n        ```\n    *   Alternatively, if using the project's Makefile structure:\n        ```bash\n        # Assumes the Makefile's deploy target doesn't need extra vars\n        # and that required env vars are set in the deployment runner\n        make deploy-gcp-dns-updater\n        ```\n\n## Files\n\n*   `update_dns.py`: The core Python script for updating DNS (runs inside the container).\n*   `requirements.txt`: Python dependencies (installed during Docker build).\n*   `Dockerfile`: Defines how to build the service's Docker image.\n*   `nomad.job`: Nomad job definition for periodic execution using the `gcp-dns-updater:latest` Docker image.\n*   `README.md`: This documentation file.\n"
  },
  {
    "path": "nomad_jobs/misc/gcp-dns-updater/nomad.job",
    "content": "job \"gcp-dns-updater\" {\n  \n  meta {\n    job_file = \"nomad_jobs/misc/gcp-dns-updater/nomad.job\"\n    version  = \"6\"  // Added SPF TXT record update\n  }\n  type        = \"batch\"\n  periodic {\n    crons = [\"*/15 * * * *\"]\n    prohibit_overlap = true \n  }\n\n  group \"updater\" {\n    count = 1\n    restart {\n      attempts = 2\n      interval = \"1m\"\n      delay = \"15s\"\n      mode = \"delay\" \n    }\n\n\n    task \"update-dns\" {\n      driver = \"docker\"\n\n      config {\n        image = \"python:3.14-slim\"\n        dns_servers = [\"192.168.50.2\"]\n        command = \"sh\"\n        args = [\"-c\", \"pip install --no-cache-dir --root-user-action=ignore google-cloud-dns requests pyyaml && python /local/update_dns.py\"]\n        volumes = [\n          \"${var.shared_dir}traefik-ingress:/shared/traefik-ingress\"\n        ]\n      }\n\n      template {\n        data = <<-EOH\nGCP_DNS_ZONE_NAME = ${var.dns_zone}\nGCP_DNS_RECORD_NAME = *.${var.tld}\nGCP_SERVICE_ACCOUNT_KEY_B64 = ${var.gcp_dns_admin}\nGCP_PROJECT_ID = ${var.gcp_project_id}\nEOH\n        env         = true\n        destination = \"secrets/file.env\" \n      }\n\n      template {\n        data = <<-EOF\nimport os\nimport requests\nimport logging\nimport sys\nimport base64\nimport binascii\nimport json\nimport time\n\nfrom google.cloud import dns\nfrom google.oauth2 import service_account\nfrom google.api_core.exceptions import GoogleAPIError\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')\n\ndef get_env_vars():\n    \"\"\"Reads required environment variables and returns them.\"\"\"\n    project_id = os.environ.get('GCP_PROJECT_ID')\n    zone_name = os.environ.get('GCP_DNS_ZONE_NAME')\n    record_name = os.environ.get('GCP_DNS_RECORD_NAME')\n    key_b64 = os.environ.get('GCP_SERVICE_ACCOUNT_KEY_B64')\n\n    if not all([project_id, zone_name, record_name, key_b64]):\n        missing = [var for var, val in [\n            ('GCP_PROJECT_ID', project_id),\n            ('GCP_DNS_ZONE_NAME', zone_name),\n            ('GCP_DNS_RECORD_NAME', record_name),\n            ('GCP_SERVICE_ACCOUNT_KEY_B64', key_b64)\n        ] if not val]\n        logging.error(f\"Missing required environment variables: {', '.join(missing)}\")\n        sys.exit(1)\n\n    return project_id, zone_name, record_name, key_b64\n\ndef get_public_ip():\n    \"\"\"Fetches the public IPv4 address.\"\"\"\n    try:\n        response = requests.get('https://v4.ifconfig.me/ip', timeout=10)\n        response.raise_for_status()\n        ip_address = response.text.strip()\n        logging.info(f\"Successfully fetched public IP: {ip_address}\")\n        return ip_address\n    except requests.exceptions.RequestException as e:\n        logging.error(f\"Error fetching public IP: {e}\")\n        sys.exit(1)\n\ndef get_dns_client(key_b64: str, project_id: str):\n    \"\"\"Creates and returns a DNS client authenticated with a base64 encoded service account key.\"\"\"\n    try:\n        logging.info(\"Decoding base64 service account key...\")\n        logging.info(f\"Original key length: {len(key_b64)}\")\n        logging.info(f\"Key content (first 50 chars): {key_b64[:50]}{'...' if len(key_b64) > 50 else ''}\")\n        \n        # Clean the base64 string - remove any whitespace/newlines\n        key_b64 = key_b64.strip().replace('\\n', '').replace('\\r', '').replace(' ', '')\n        logging.info(f\"Cleaned key length: {len(key_b64)}\")\n        logging.info(f\"Cleaned key content (first 50 chars): {key_b64[:50]}{'...' if len(key_b64) > 50 else ''}\")\n        \n        # Check if this looks like a valid base64 string\n        if len(key_b64) < 100:\n            logging.warning(f\"Service account key seems too short ({len(key_b64)} chars). Expected several thousand characters.\")\n            logging.warning(f\"Full key content: '{key_b64}'\")\n            logging.error(\"The GCP_SERVICE_ACCOUNT_KEY_B64 environment variable appears to contain invalid or incomplete data.\")\n            sys.exit(1)\n        \n        # Fix base64 padding if needed\n        missing_padding = len(key_b64) % 4\n        if missing_padding:\n            padding_needed = 4 - missing_padding\n            key_b64 += '=' * padding_needed\n            logging.info(f\"Added {padding_needed} padding characters\")\n        \n        logging.info(f\"Final key length: {len(key_b64)}\")\n        decoded_key = base64.b64decode(key_b64, validate=True)\n        logging.info(\"Base64 key decoded successfully.\")\n\n        logging.info(\"Parsing service account key JSON...\")\n        key_info = json.loads(decoded_key)\n        logging.info(\"Service account key JSON parsed successfully.\")\n\n        credentials = service_account.Credentials.from_service_account_info(key_info)\n        client = dns.Client(project=project_id, credentials=credentials)\n        logging.info(f\"Successfully created DNS client for project {project_id}\")\n        return client\n\n    except binascii.Error as e:\n        logging.error(f\"Failed to decode base64 service account key: {e}\")\n        sys.exit(1)\n    except json.JSONDecodeError as e:\n        logging.error(f\"Failed to parse service account key JSON: {e}\")\n        sys.exit(1)\n    except Exception as e:\n        logging.error(f\"Failed to create DNS client from service account info: {e}\")\n        sys.exit(1)\n\ndef update_traefik_whitelist(ip_address: str):\n    \"\"\"Updates Traefik IP whitelist configuration.\"\"\"\n    try:\n        logging.info(f\"Updating Traefik whitelist with IP: {ip_address}\")\n        \n        traefik_config = {\n            \"http\": {\n                \"middlewares\": {\n                    \"home-ip-whitelist\": {\n                        \"ipAllowList\": {\n                            \"sourceRange\": [\n                                f\"{ip_address}/32\",\n                                \"192.168.0.0/16\",\n                                \"10.0.0.0/8\",\n                                \"172.16.0.0/12\",\n                                \"100.64.0.0/10\"\n                            ]\n                        }\n                    }\n                }\n            }\n        }\n        \n        config_path = \"/shared/traefik-ingress/dynamic-whitelist.toml\"\n        \n        # Write as TOML format\n        toml_content = f\"\"\"[http.middlewares.home-ip-whitelist.ipAllowList]\nsourceRange = [\"{ip_address}/32\", \"192.168.0.0/16\", \"10.0.0.0/8\", \"172.16.0.0/12\", \"100.64.0.0/10\"]\n\"\"\"\n        \n        with open(config_path, 'w') as f:\n            f.write(toml_content)\n        \n        logging.info(f\"Successfully updated Traefik whitelist configuration at {config_path}\")\n        \n    except Exception as e:\n        logging.error(f\"Failed to update Traefik whitelist: {e}\")\n\ndef update_dns_record(client: dns.Client, project_id: str, zone_name: str, record_name: str, ip_address: str):\n    \"\"\"Updates DNS record.\"\"\"\n    try:\n        # Use zone_name directly as it should already be the correct GCP zone name\n        gcp_zone_name = zone_name\n        logging.info(f\"Targeting GCP DNS Zone: {gcp_zone_name}\")\n\n        zone = client.zone(gcp_zone_name, project_id)\n        if not zone.exists():\n            logging.error(f\"DNS zone '{gcp_zone_name}' not found in project '{project_id}'.\")\n            return\n\n        fqdn = record_name if record_name.endswith('.') else f\"{record_name}.\"\n        logging.info(f\"Checking DNS records for: {fqdn} in zone {gcp_zone_name}\")\n\n        record_sets = list(zone.list_resource_record_sets())\n\n        existing_a_record = None\n        existing_cname_record = None\n\n        for record_set in record_sets:\n            if record_set.record_type == 'A' and record_set.name == fqdn:\n                existing_a_record = record_set\n                logging.info(f\"Found existing A record: {existing_a_record.name} -> {existing_a_record.rrdatas}\")\n            elif record_set.record_type == 'CNAME' and record_set.name == fqdn:\n                existing_cname_record = record_set\n                logging.info(f\"Found existing CNAME record: {existing_cname_record.name} -> {existing_cname_record.rrdatas}\")\n\n        changes = zone.changes()\n        needs_update = False\n\n        if existing_cname_record:\n            logging.warning(f\"Deleting existing CNAME record {fqdn} to replace with A record.\")\n            changes.delete_record_set(existing_cname_record)\n            needs_update = True\n            existing_a_record = None\n\n        new_a_record = zone.resource_record_set(fqdn, \"A\", 300, [ip_address])\n\n        if existing_a_record:\n            if existing_a_record.rrdatas == [ip_address]:\n                logging.info(f\"Existing A record {fqdn} already points to {ip_address}. No update needed.\")\n                return\n            else:\n                logging.info(f\"Existing A record {fqdn} points to {existing_a_record.rrdatas}. Updating to {ip_address}.\")\n                changes.delete_record_set(existing_a_record)\n                changes.add_record_set(new_a_record)\n                needs_update = True\n        elif not existing_cname_record:\n            logging.info(f\"No existing A or CNAME record found for {fqdn}. Creating new A record pointing to {ip_address}.\")\n            changes.add_record_set(new_a_record)\n            needs_update = True\n        elif existing_cname_record:\n             logging.info(f\"Adding A record for {fqdn} pointing to {ip_address} after CNAME deletion.\")\n             changes.add_record_set(new_a_record)\n\n        if needs_update:\n            logging.info(f\"Executing DNS changes for {fqdn} in zone {gcp_zone_name}...\")\n            changes.create()\n            while changes.status != 'done':\n                logging.info(f\"Waiting for DNS changes to complete (status: {changes.status})...\")\n                time.sleep(5)\n                changes.reload()\n            logging.info(f\"Successfully updated DNS record {fqdn} to {ip_address} in zone {gcp_zone_name}.\")\n        else:\n            logging.info(\"No DNS changes were necessary.\")\n\n    except GoogleAPIError as e:\n        logging.error(f\"GCP API Error updating DNS record {fqdn} in zone {gcp_zone_name}: {e}\")\n    except Exception as e:\n        logging.error(f\"An unexpected error occurred during DNS update for {fqdn} in zone {gcp_zone_name}: {e}\")\n\ndef update_spf_record(client: dns.Client, project_id: str, zone_name: str, record_name: str, ip_address: str):\n    \"\"\"Updates the SPF TXT record on the bare domain with the current public IP.\"\"\"\n    try:\n        gcp_zone_name = zone_name\n        logging.info(f\"Updating SPF record in zone: {gcp_zone_name}\")\n\n        zone = client.zone(gcp_zone_name, project_id)\n        if not zone.exists():\n            logging.error(f\"DNS zone '{gcp_zone_name}' not found in project '{project_id}'.\")\n            return\n\n        # Derive bare domain from record_name (e.g., \"*.demonsafe.com\" -> \"demonsafe.com.\")\n        domain = record_name.lstrip('*.') if record_name.startswith('*.') else record_name\n        fqdn = domain if domain.endswith('.') else f\"{domain}.\"\n        logging.info(f\"Checking TXT records for: {fqdn}\")\n\n        spf_value = f'\"v=spf1 ip4:{ip_address} ~all\"'\n\n        record_sets = list(zone.list_resource_record_sets())\n        existing_txt = None\n        for rs in record_sets:\n            if rs.record_type == 'TXT' and rs.name == fqdn:\n                existing_txt = rs\n                logging.info(f\"Found existing TXT record: {rs.name} -> {rs.rrdatas}\")\n                break\n\n        changes = zone.changes()\n        needs_update = False\n\n        if existing_txt:\n            new_rrdatas = []\n            spf_found = False\n            for rd in existing_txt.rrdatas:\n                if 'v=spf1' in rd:\n                    spf_found = True\n                    if ip_address in rd:\n                        logging.info(f\"SPF record already contains {ip_address}. No update needed.\")\n                        return\n                    logging.info(f\"Replacing SPF entry: {rd} -> {spf_value}\")\n                    new_rrdatas.append(spf_value)\n                else:\n                    new_rrdatas.append(rd)\n            if not spf_found:\n                logging.info(f\"No existing SPF entry found. Adding: {spf_value}\")\n                new_rrdatas.append(spf_value)\n\n            changes.delete_record_set(existing_txt)\n            new_txt = zone.resource_record_set(fqdn, \"TXT\", 300, new_rrdatas)\n            changes.add_record_set(new_txt)\n            needs_update = True\n        else:\n            logging.info(f\"No TXT record found for {fqdn}. Creating with SPF: {spf_value}\")\n            new_txt = zone.resource_record_set(fqdn, \"TXT\", 300, [spf_value])\n            changes.add_record_set(new_txt)\n            needs_update = True\n\n        if needs_update:\n            logging.info(f\"Executing SPF TXT changes for {fqdn}...\")\n            changes.create()\n            while changes.status != 'done':\n                logging.info(f\"Waiting for SPF changes to complete (status: {changes.status})...\")\n                time.sleep(5)\n                changes.reload()\n            logging.info(f\"Successfully updated SPF record for {fqdn} with ip4:{ip_address}\")\n\n    except GoogleAPIError as e:\n        logging.error(f\"GCP API Error updating SPF record: {e}\")\n    except Exception as e:\n        logging.error(f\"Unexpected error updating SPF record: {e}\")\n\nif __name__ == \"__main__\":\n    logging.info(\"Starting DNS update script.\")\n    project_id, zone_name, record_name, key_b64 = get_env_vars()\n    logging.info(f\"Environment variables loaded - zone_name: '{zone_name}', record_name: '{record_name}'\")\n    public_ip = get_public_ip()\n\n    if public_ip:\n        dns_client = get_dns_client(key_b64, project_id)\n        if dns_client:\n            update_dns_record(dns_client, project_id, zone_name, record_name, public_ip)\n            update_spf_record(dns_client, project_id, zone_name, record_name, public_ip)\n            update_traefik_whitelist(public_ip)\n            logging.info(\"DNS, SPF, and Traefik whitelist update script finished.\")\n        else:\n            logging.error(\"Exiting due to DNS client initialization failure.\")\n            sys.exit(1)\n    else:\n        logging.error(\"Exiting due to inability to fetch public IP.\")\n        sys.exit(1)\n    \n    # Sleep to allow log viewing before container exits\n    logging.info(\"Sleeping for 10 seconds to allow log viewing...\")\n    time.sleep(10)\nEOF\n        destination = \"local/update_dns.py\"\n      }\n\n      resources {\n        cpu    = 100 \n        memory = 128  \n      }\n    }\n  }\n}\n\nvariable \"gcp_project_id\" {}\nvariable \"dns_zone\" {}\nvariable \"tld\" {}\nvariable \"gcp_dns_admin\" {}\nvariable \"shared_dir\" {}\n"
  },
  {
    "path": "nomad_jobs/misc/gcp-dns-updater/requirements.txt",
    "content": "google-cloud-dns\nrequests\ngoogle-auth"
  },
  {
    "path": "nomad_jobs/misc/gcp-dns-updater/update_dns.py",
    "content": "\nimport os\nimport requests\nimport logging\nimport sys\nimport base64\nimport json\nimport time  # Moved import to top\n\n# Import GCP specific libraries\nfrom google.cloud import dns\nfrom google.oauth2 import service_account\nfrom google.api_core.exceptions import GoogleAPIError\n\n# Setup logging\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')\n\ndef get_env_vars():\n    \"\"\"Reads required environment variables and returns them.\"\"\"\n    project_id = os.environ.get('GCP_PROJECT_ID')\n    zone_name = os.environ.get('GCP_DNS_ZONE_NAME') # This will be the TLD like \"demonsafe.com\"\n    record_name = os.environ.get('GCP_DNS_RECORD_NAME')\n    key_b64 = os.environ.get('GCP_SERVICE_ACCOUNT_KEY_B64') # Changed variable name\n\n    if not all([project_id, zone_name, record_name, key_b64]): # Check for key_b64\n        missing = [var for var, val in [\n            ('GCP_PROJECT_ID', project_id),\n            ('GCP_DNS_ZONE_NAME', zone_name),\n            ('GCP_DNS_RECORD_NAME', record_name),\n            ('GCP_SERVICE_ACCOUNT_KEY_B64', key_b64) # Updated missing check\n        ] if not val]\n        logging.error(f\"Missing required environment variables: {', '.join(missing)}\")\n        sys.exit(1)\n\n    return project_id, zone_name, record_name, key_b64 # Return key_b64\n\ndef get_public_ip():\n    \"\"\"Fetches the public IPv4 address.\"\"\"\n    try:\n        response = requests.get('https://v4.ifconfig.me/ip', timeout=10)\n        response.raise_for_status()  # Raise an exception for bad status codes\n        ip_address = response.text.strip()\n        logging.info(f\"Successfully fetched public IP: {ip_address}\")\n        return ip_address\n    except requests.exceptions.RequestException as e:\n        logging.error(f\"Error fetching public IP: {e}\")\n        sys.exit(1) # Exit if IP cannot be fetched\n\ndef get_dns_client(key_b64: str, project_id: str): # Changed key_path to key_b64 and added project_id\n    \"\"\"Creates and returns a DNS client authenticated with a base64 encoded service account key.\"\"\"\n    try:\n        # Decode the base64 string\n        logging.info(\"Decoding base64 service account key...\")\n        decoded_key = base64.b64decode(key_b64)\n        logging.info(\"Base64 key decoded successfully.\")\n\n        # Parse the decoded JSON key\n        logging.info(\"Parsing service account key JSON...\")\n        key_info = json.loads(decoded_key)\n        logging.info(\"Service account key JSON parsed successfully.\")\n\n        # Create credentials from the parsed key info\n        credentials = service_account.Credentials.from_service_account_info(key_info)\n\n        # Use the provided project_id, not the one from credentials, to ensure consistency\n        client = dns.Client(project=project_id, credentials=credentials)\n        logging.info(f\"Successfully created DNS client for project {project_id}\")\n        return client\n\n    except base64.binascii.Error as e:\n        logging.error(f\"Failed to decode base64 service account key: {e}\")\n        sys.exit(1)\n    except json.JSONDecodeError as e:\n        logging.error(f\"Failed to parse service account key JSON: {e}\")\n        sys.exit(1)\n    except Exception as e:\n        logging.error(f\"Failed to create DNS client from service account info: {e}\")\n        sys.exit(1)\n\ndef update_dns_record(client: dns.Client, project_id: str, zone_name: str, record_name: str, ip_address: str):\n    \"\"\"\n    Checks and updates/creates an A record for the given name in the specified zone,\n    replacing a CNAME if necessary.\n\n    Args:\n        client: Authenticated DNS client.\n        project_id: GCP project ID.\n        zone_name: The domain TLD (e.g., \"demonsafe.com\"). This will be converted\n                   to the GCP zone name format (e.g., \"demonsafe-com\").\n        record_name: The specific record to update (e.g., \"*.demonsafe.com\").\n        ip_address: The public IP address to set.\n    \"\"\"\n    try:\n        # Convert the TLD zone name (e.g., \"demonsafe.com\") to GCP zone name format (e.g., \"demonsafe-com\")\n        gcp_zone_name = zone_name.replace('.', '-')\n        logging.info(f\"Targeting GCP DNS Zone: {gcp_zone_name}\")\n\n        zone = client.zone(gcp_zone_name, project_id)\n        if not zone.exists():\n            logging.error(f\"DNS zone '{gcp_zone_name}' not found in project '{project_id}'.\")\n            return # Cannot proceed without the zone\n\n        # Ensure record_name ends with a dot for FQDN matching\n        fqdn = record_name if record_name.endswith('.') else f\"{record_name}.\"\n        logging.info(f\"Checking DNS records for: {fqdn} in zone {gcp_zone_name}\")\n\n        record_sets = list(zone.list_resource_record_sets(filter_=f\"name={fqdn}\"))\n\n        existing_a_record = None\n        existing_cname_record = None\n\n        for record_set in record_sets:\n            if record_set.record_type == 'A' and record_set.name == fqdn:\n                existing_a_record = record_set\n                logging.info(f\"Found existing A record: {existing_a_record.name} -> {existing_a_record.rrdatas}\")\n            elif record_set.record_type == 'CNAME' and record_set.name == fqdn:\n                existing_cname_record = record_set\n                logging.info(f\"Found existing CNAME record: {existing_cname_record.name} -> {existing_cname_record.rrdatas}\")\n\n        changes = zone.changes()\n        needs_update = False\n\n        # Handle existing CNAME (delete it to replace with A)\n        if existing_cname_record:\n            logging.warning(f\"Deleting existing CNAME record {fqdn} to replace with A record.\")\n            changes.delete_record_set(existing_cname_record)\n            needs_update = True\n            # Ensure we don't try to delete an A record if we just deleted a CNAME\n            existing_a_record = None\n\n        # Define the new A record we want\n        new_a_record = zone.resource_record_set(fqdn, \"A\", 300, [ip_address])\n\n        # Handle existing A record\n        if existing_a_record:\n            if existing_a_record.rrdatas == [ip_address]:\n                logging.info(f\"Existing A record {fqdn} already points to {ip_address}. No update needed.\")\n                return # Nothing to do\n            else:\n                logging.info(f\"Existing A record {fqdn} points to {existing_a_record.rrdatas}. Updating to {ip_address}.\")\n                changes.delete_record_set(existing_a_record)\n                changes.add_record_set(new_a_record)\n                needs_update = True\n        # Handle case where no A record (and no CNAME was found/deleted)\n        elif not existing_cname_record: # Only add if we didn't already decide to replace CNAME\n            logging.info(f\"No existing A or CNAME record found for {fqdn}. Creating new A record pointing to {ip_address}.\")\n            changes.add_record_set(new_a_record)\n            needs_update = True\n        # Handle case where CNAME was found and deleted - we still need to add the A record\n        elif existing_cname_record:\n             logging.info(f\"Adding A record for {fqdn} pointing to {ip_address} after CNAME deletion.\")\n             changes.add_record_set(new_a_record)\n             # needs_update should already be True\n\n        # Execute the changes if any were queued\n        if needs_update:\n            logging.info(f\"Executing DNS changes for {fqdn} in zone {gcp_zone_name}...\")\n            changes.create()\n            # Wait until the changes are finished.\n            while changes.status != 'done':\n                logging.info(f\"Waiting for DNS changes to complete (status: {changes.status})...\")\n                time.sleep(5) # Wait 5 seconds before checking again\n                changes.reload()\n            logging.info(f\"Successfully updated DNS record {fqdn} to {ip_address} in zone {gcp_zone_name}.\")\n        else:\n            # This case should only be hit if an A record existed and was correct\n            logging.info(\"No DNS changes were necessary.\")\n\n    except GoogleAPIError as e:\n        logging.error(f\"GCP API Error updating DNS record {fqdn} in zone {gcp_zone_name}: {e}\")\n    except Exception as e:\n        logging.error(f\"An unexpected error occurred during DNS update for {fqdn} in zone {gcp_zone_name}: {e}\")\n\n\ndef update_spf_record(client: dns.Client, project_id: str, zone_name: str, record_name: str, ip_address: str):\n    \"\"\"Updates the SPF TXT record on the bare domain with the current public IP.\"\"\"\n    try:\n        gcp_zone_name = zone_name.replace('.', '-')\n        logging.info(f\"Updating SPF record in zone: {gcp_zone_name}\")\n\n        zone = client.zone(gcp_zone_name, project_id)\n        if not zone.exists():\n            logging.error(f\"DNS zone '{gcp_zone_name}' not found in project '{project_id}'.\")\n            return\n\n        # Derive bare domain from record_name (e.g., \"*.demonsafe.com\" -> \"demonsafe.com.\")\n        domain = record_name.lstrip('*.') if record_name.startswith('*.') else record_name\n        fqdn = domain if domain.endswith('.') else f\"{domain}.\"\n        logging.info(f\"Checking TXT records for: {fqdn}\")\n\n        spf_value = f'\"v=spf1 ip4:{ip_address} ~all\"'\n\n        record_sets = list(zone.list_resource_record_sets(filter_=f\"name={fqdn}\"))\n        existing_txt = None\n        for rs in record_sets:\n            if rs.record_type == 'TXT' and rs.name == fqdn:\n                existing_txt = rs\n                logging.info(f\"Found existing TXT record: {rs.name} -> {rs.rrdatas}\")\n                break\n\n        changes = zone.changes()\n        needs_update = False\n\n        if existing_txt:\n            new_rrdatas = []\n            spf_found = False\n            for rd in existing_txt.rrdatas:\n                if 'v=spf1' in rd:\n                    spf_found = True\n                    if ip_address in rd:\n                        logging.info(f\"SPF record already contains {ip_address}. No update needed.\")\n                        return\n                    logging.info(f\"Replacing SPF entry: {rd} -> {spf_value}\")\n                    new_rrdatas.append(spf_value)\n                else:\n                    new_rrdatas.append(rd)\n            if not spf_found:\n                logging.info(f\"No existing SPF entry found. Adding: {spf_value}\")\n                new_rrdatas.append(spf_value)\n\n            changes.delete_record_set(existing_txt)\n            new_txt = zone.resource_record_set(fqdn, \"TXT\", 300, new_rrdatas)\n            changes.add_record_set(new_txt)\n            needs_update = True\n        else:\n            logging.info(f\"No TXT record found for {fqdn}. Creating with SPF: {spf_value}\")\n            new_txt = zone.resource_record_set(fqdn, \"TXT\", 300, [spf_value])\n            changes.add_record_set(new_txt)\n            needs_update = True\n\n        if needs_update:\n            logging.info(f\"Executing SPF TXT changes for {fqdn}...\")\n            changes.create()\n            while changes.status != 'done':\n                logging.info(f\"Waiting for SPF changes to complete (status: {changes.status})...\")\n                time.sleep(5)\n                changes.reload()\n            logging.info(f\"Successfully updated SPF record for {fqdn} with ip4:{ip_address}\")\n\n    except GoogleAPIError as e:\n        logging.error(f\"GCP API Error updating SPF record: {e}\")\n    except Exception as e:\n        logging.error(f\"Unexpected error updating SPF record: {e}\")\n\n\nif __name__ == \"__main__\":\n    logging.info(\"Starting DNS update script.\")\n    project_id, zone_name, record_name, key_b64 = get_env_vars()\n    public_ip = get_public_ip()\n\n    if public_ip:\n        dns_client = get_dns_client(key_b64, project_id)\n        if dns_client:\n            update_dns_record(dns_client, project_id, zone_name, record_name, public_ip)\n            update_spf_record(dns_client, project_id, zone_name, record_name, public_ip)\n            logging.info(\"DNS update script finished.\")\n        else:\n            # Error handled in get_dns_client, it exits\n            logging.error(\"Exiting due to DNS client initialization failure.\")\n            sys.exit(1) # Explicit exit for clarity\n    else:\n        # Error handled in get_public_ip, it exits\n        logging.error(\"Exiting due to inability to fetch public IP.\")\n        sys.exit(1) # Explicit exit for clarity\n"
  },
  {
    "path": "nomad_jobs/misc/gitea/nomad.job",
    "content": "job \"gitea\" {\n  \n  meta {\n  job_file = \"nomad_jobs/misc/gitea/nomad.job\"\n  }\nregion = var.region\n  datacenters = [\"dc1\"]\n  type = \"service\"\n    group \"svc\" {\n      count = 1\n      volume \"gitea-data\" {\n        type      = \"host\"\n        source    = \"gitea-data\"\n        read_only = false\n      }\n      volume \"gitea-db\" {\n        type      = \"host\"\n        source    = \"gitea-db\"\n        read_only = false\n      }\n      restart {\n        attempts = 5\n        delay    = \"30s\"\n      }\n      task \"app\" {\n        driver = \"docker\"\n        volume_mount {\n          volume      = \"gitea-data\"\n          destination = \"/data\"\n          read_only   = false\n      }\n        config {\n          image = \"gitea/gitea\"\n          port_map {\n            http     = 3000\n            ssh_pass = 22\n          }\n        }\n        env = {\n          \"APP_NAME\"   = \"Gitea: Git with a cup of tea\"\n          \"RUN_MODE\"   = \"prod\"\n          \"SSH_DOMAIN\" = \"git.${var.tld}\"\n          \"SSH_PORT\"   = \"22\"\n          \"ROOT_URL\"   = \"http://git.${var.tld}/\"\n          \"USER_UID\"   = \"1002\"\n          \"USER_GID\"   = \"1002\"\n          \"DB_TYPE\"    = \"postgres\"\n          \"DB_NAME\"    = \"gitea\"\n          \"DB_USER\"    = \"gitea\"\n          \"DB_PASSWD\"  = \"gitea\"\n          \"SHOW_REGISTRATION_BUTTON\" = \"false\"\n        }\n      template {\ndata = <<EOH\nGITEA__database__HOST=\"{{ env \"NOMAD_ADDR_db_db\" }}\"\nEOH\n        destination = \"local/env\"\n        env         = true\n        }\n        resources {\n          cpu    = 200\n          memory = 256\n            network {\n              port \"http\" {}\n              port \"ssh_pass\" {\n                static = \"2222\"\n              }\n            }\n        }\n        service {\n          name = \"gitea\"\n          port = \"http\"\n          tags = [\n            \"traefik.enable=true\",\n            \"traefik.http.middlewares.httpsRedirect.redirectscheme.scheme=https\",\n\n\n            \"traefik.http.routers.${NOMAD_TASK_NAME}.tls.domains[0].sans=git.${var.tld}\",\n            \"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=forward-auth\"\n          ]\n        }\n      }\n      task \"db\" {\n        driver = \"docker\"\n          volume_mount {\n            volume      = \"gitea-db\"\n            destination = \"/var/lib/postgresql/data\"\n            read_only   = false\n          }\n        config {\n          image = \"postgres:17-alpine\"\n          port_map {\n            db = 5432\n          }\n        }\n        template {\ndata = <<EOH\nPOSTGRES_USER=\"gitea\"\nPOSTGRES_PASSWORD=\"gitea\"\nPOSTGRES_DB=\"gitea\"\nEOH\n        destination = \"local/env\"\n        env         = true\n        }\n        resources {\n          cpu    = 200\n          memory = 128\n          network {\n            port \"db\" {}\n          }\n        }\n      }\n    }\n}\nvariable \"region\" {\n    type = string\n}\n\n\n\nvariable \"tld\" {\n    type = string\n}\n\n"
  },
  {
    "path": "nomad_jobs/misc/linuxgsm/nomad.job",
    "content": "job \"linuxgsm\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/misc/linuxgsm/nomad.job\"\nversion = \"2\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"gaming\" {\n    count = 1\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    task \"linuxgsm\" {\n      driver = \"docker\"\n      config {\n        image = \"gameservermanagers/linuxgsm-docker\"\n        network_mode = \"host\"\n        command = \"/home/linuxgsm/rustserver\"\n        args = [\"start\"]\n        volumes = [\n          \"${var.shared_dir}linuxgsm:/home/linuxgsm\",\n        ]\n      }\n\n      service {\n        port = \"port0\"\n\tname = \"linuxgsm\"\n      }\n\n      env {\n        GAMESERVER = \"rustserver\"\n        LGSM_GITHUBUSER = \"GameServerManagers\"\n        LGSM_GITHUBREPO = \"LinuxGSM\"\n        LGSM_GITHUBBRANCH = \"master\"\n      }\n\n      resources {\n        cpu    = 100\n        memory = 8000\n        network {\n          port \"port0\" { static = \"27015\" }\n          port \"port1\" { static = \"27020\" }\n          port \"port2\" { static = \"27005\" }\n        }\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\n\n\nvariable \"tld\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n\n"
  },
  {
    "path": "nomad_jobs/misc/murmur/nomad.job",
    "content": "job \"murmur\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/misc/murmur/nomad.job\"\nservice_owner = \"@pmanuk\"\n    version = \"2\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"chat\" {\n    count = 1\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    task \"mumble-server\" {\n      driver = \"docker\"\n      config {\n        image = \"vimagick/murmur\"\n        force_pull = true\n        network_mode = \"host\"\n        volumes = [\n          \"${var.shared_dir}murmur-data:/opt/murmur/data\",\n          \"${var.shared_dir}murmur-cert:/opt/murmur/cert\",\n          \"local/murmur-config:/etc/murmur/murmur.ini\",\n        ]\n      }\n\n      service {\n         tags = [\n          \"traefik.enable=true\",\n          \"traefik.tcp.routers.${NOMAD_TASK_NAME}.rule=HostSNI(`mumble-server.${var.tld}`)\",\n          \"traefik.tcp.services.${NOMAD_TASK_NAME}.loadbalancer.server.port=${NOMAD_PORT_0}\",\n          \"traefik.tcp.routers.${NOMAD_TASK_NAME}.entrypoints=mumble\",\n          \"traefik.tcp.routers.${NOMAD_TASK_NAME}.tls.passthrough=true\",\n          \"traefik.udp.routers.${NOMAD_TASK_NAME}.service=mumble-server\",\n          \"traefik.udp.services.${NOMAD_TASK_NAME}.loadbalancer.server.port=${NOMAD_PORT_0}\",\n          \"traefik.udp.routers.${NOMAD_TASK_NAME}.entrypoints=mumble-udp\",\n         ]\n         name = \"mumble-server\"\n         port = \"0\"\n      }\n\n      env {\n        TZ = \"Europe/Amsterdam\"\n      }\n\n      template {\ndata = <<EOH\n; Murmur configuration file.\n;\n; General notes:\n; * Settings in this file are default settings and many of them can be overridden\n;   with virtual server specific configuration via the Ice or DBus interface.\n; * Due to the way this configuration file is read some rules have to be\n;   followed when specifying variable values (as in variable = value):\n;     * Make sure to quote the value when using commas in strings or passwords.\n;        NOT variable = super,secret BUT variable = \"super,secret\"\n;     * Make sure to escape special characters like '\\' or '\"' correctly\n;        NOT variable = \"\"\" BUT variable = \"\\\"\"\n;        NOT regex = \\w* BUT regex = \\\\w*\n\n; Path to database. If blank, will search for\n; murmur.sqlite in default locations or create it if not found.\ndatabase=\n\n; Murmur defaults to using SQLite with its default rollback journal.\n; In some situations, using SQLite's write-ahead log (WAL) can be\n; advantageous.\n; If you encounter slowdowns when moving between channels and similar\n; operations, enabling the SQLite write-ahead log might help.\n;\n; To use SQLite's write-ahead log, set sqlite_wal to one of the following\n; values:\n;\n; 0 - Use SQLite's default rollback journal.\n; 1 - Use write-ahead log with synchronous=NORMAL.\n;     If Murmur crashes, the database will be in a consistent state, but\n;     the most recent changes might be lost if the operating system did\n;     not write them to disk yet. This option can improve Murmur's\n;     interactivity on busy servers, or servers with slow storage.\n; 2 - Use write-ahead log with synchronous=FULL.\n;     All database writes are synchronized to disk when they are made.\n;     If Murmur crashes, the database will be include all completed writes.\n;sqlite_wal=0\n\n; If you wish to use something other than SQLite, you'll need to set the name\n; of the database above, and also uncomment the below.\n; Sticking with SQLite is strongly recommended, as it's the most well tested\n; and by far the fastest solution.\n;\n;dbDriver=QMYSQL\n;dbUsername=\n;dbPassword=\n;dbHost=\n;dbPort=\n;dbPrefix=murmur_\n;dbOpts=\n\n; Murmur defaults to not using D-Bus. If you wish to use dbus, which is one of the\n; RPC methods available in Murmur, please specify so here.\n;\n;dbus=session\n\n; Alternate D-Bus service name. Only use if you are running distinct\n; murmurd processes connected to the same D-Bus daemon.\n;dbusservice=net.sourceforge.mumble.murmur\n\n; If you want to use ZeroC Ice to communicate with Murmur, you need\n; to specify the endpoint to use. Since there is no authentication\n; with ICE, you should only use it if you trust all the users who have\n; shell access to your machine.\n; Please see the ICE documentation on how to specify endpoints.\nice=\"tcp -h 127.0.0.1 -p 6502\"\n\n; Ice primarily uses local sockets. This means anyone who has a\n; user account on your machine can connect to the Ice services.\n; You can set a plaintext \"secret\" on the Ice connection, and\n; any script attempting to access must then have this secret\n; (as context with name \"secret\").\n; Access is split in read (look only) and write (modify)\n; operations. Write access always includes read access,\n; unless read is explicitly denied (see note below).\n;\n; Note that if this is uncommented and with empty content,\n; access will be denied.\n\n;icesecretread=\nicesecretwrite=\n\n; If you want to expose Murmur's experimental gRPC API, you\n; need to specify an address to bind on.\n; Note: not all builds of Murmur support gRPC. If gRPC is not\n; available, Murmur will warn you in its log output.\n;grpc=\"127.0.0.1:50051\"\n; Specifying both a certificate and key file below will cause gRPC to use\n; secured, TLS connections.\n; When using secured connections you need to also set the list of authorized\n; clients. grpcauthorized is a space delimited list of SHA256 fingerprints\n; for authorized client certificates.\n; Get this from the command line:\n; openssl x509 -in cert.pem -SHA256 -noout -fingerprint\n;grpccert=\"\"\n;grpckey=\"\"\n;grpcauthorized=\"\"\n\n; Specifies the file Murmur should log to. By default, Murmur\n; logs to the file 'murmur.log'. If you leave this field blank\n; on Unix-like systems, Murmur will force itself into foreground\n; mode which logs to the console.\n;logfile=murmur.log\n\n; If set, Murmur will write its process ID to this file\n; when running in daemon mode (when the -fg flag is not\n; specified on the command line). Only available on\n; Unix-like systems.\n;pidfile=\n\n; The below will be used as defaults for new configured servers.\n; If you're just running one server (the default), it's easier to\n; configure it here than through D-Bus or Ice.\n;\n; Welcome message sent to clients when they connect.\n; If the welcome message is set to an empty string,\n; no welcome message will be sent to clients.\nwelcometext=\"<br />Welcome to this server running <b>Murmur</b>.<br />Enjoy your stay!<br />\"\n\n; The welcometext can also be read from an external file which might be useful\n; if you want to specify a rather lengthy text. If a value for welcometext is\n; set, the welcometextfile will not be read.\n;welcometextfile=\n\n; Port to bind TCP and UDP sockets to.\nport={{ env \"NOMAD_PORT_0\" }}\n\n; Specific IP or hostname to bind to.\n; If this is left blank (default), Murmur will bind to all available addresses.\n;host=\n\n; Password to join server.\nserverpassword=\n\n; Maximum bandwidth (in bits per second) clients are allowed\n; to send speech at.\nbandwidth=558000\n\n; Murmur and Mumble are usually pretty good about cleaning up hung clients, but\n; occasionally one will get stuck on the server. The timeout setting will cause\n; a periodic check of all clients who haven't communicated with the server in\n; this many seconds - causing zombie clients to be disconnected.\n;\n; Note that this has no effect on idle clients or people who are AFK. It will\n; only affect people who are already disconnected, and just haven't told the\n; server.\n;timeout=30\n\n; Maximum number of concurrent clients allowed.\nusers=100\n\n; Where users sets a blanket limit on the number of clients per virtual server,\n; usersperchannel sets a limit on the number per channel. The default is 0, for\n; no limit.\n;usersperchannel=0\n\n; Per-user rate limiting\n;\n; These two settings allow to configure the per-user rate limiter for some\n; command messages sent from the client to the server. The messageburst setting\n; specifies an amount of messages which are allowed in short bursts. The\n; messagelimit setting specifies the number of messages per second allowed over\n; a longer period. If a user hits the rate limit, his packages are then ignored\n; for some time. Both of these settings have a minimum of 1 as setting either to\n; 0 could render the server unusable.\nmessageburst=5\nmessagelimit=1\n\n; Respond to UDP ping packets.\n;\n; Setting to true exposes the current user count, the maximum user count, and\n; the server's maximum bandwidth per client to unauthenticated users. In the\n; Mumble client, this information is shown in the Connect dialog.\nallowping=true\n\n; Amount of users with Opus support needed to force Opus usage, in percent.\n; 0 = Always enable Opus, 100 = enable Opus if it's supported by all clients.\n;opusthreshold=100\n\n; Maximum depth of channel nesting. Note that some databases like MySQL using\n; InnoDB will fail when operating on deeply nested channels.\n;channelnestinglimit=10\n\n; Maximum number of channels per server. 0 for unlimited. Note that an\n; excessive number of channels will impact server performance\n;channelcountlimit=1000\n\n; Regular expression used to validate channel names.\n; (Note that you have to escape backslashes with \\ )\n;channelname=[ \\\\-=\\\\w\\\\#\\\\[\\\\]\\\\{\\\\}\\\\(\\\\)\\\\@\\\\|]+\n\n; Regular expression used to validate user names.\n; (Note that you have to escape backslashes with \\ )\n;username=[-=\\\\w\\\\[\\\\]\\\\{\\\\}\\\\(\\\\)\\\\@\\\\|\\\\.]+\n\n; If a user has no stored channel (they've never been connected to the server\n; before, or rememberchannel is set to false) and the client hasn't been given\n; a URL that includes a channel path, the default behavior is that they will\n; end up in the root channel.\n;\n; You can set this setting to a channel ID, and the user will automatically be\n; moved into that channel instead. Note that this is the numeric ID of the\n; channel, which can be a little tricky to get (you'll either need to use an\n; RPC mechanism, watch the console of a debug client, or root around through\n; the Murmur Database to get it).\n;\n;defaultchannel=0\n\n; When a user connects to a server they've already been on, by default the\n; server will remember the last channel they were in and move them to it\n; automatically. Toggling this setting to false will disable that feature.\n;\n;rememberchannel=true\n\n; How many seconds should the server remember the last channel of a user.\n; Set to 0 (default) to remember forever. This option has no effect if\n; rememberchannel is set to false.\n;rememberchannelduration=0\n\n; Maximum length of text messages in characters. 0 for no limit.\n;textmessagelength=5000\n\n; Maximum length of text messages in characters, with image data. 0 for no limit.\n;imagemessagelength=131072\n\n; Allow clients to use HTML in messages, user comments and channel descriptions?\n;allowhtml=true\n\n; Murmur retains the per-server log entries in an internal database which\n; allows it to be accessed over D-Bus/ICE.\n; How many days should such entries be kept?\n; Set to 0 to keep forever, or -1 to disable logging to the DB.\n;logdays=31\n\n; To enable public server registration, the serverpassword must be blank, and\n; this must all be filled out.\n; The password here is used to create a registry for the server name; subsequent\n; updates will need the same password. Don't lose your password.\n; The URL is your own website, and only set the registerHostname for static IP\n; addresses.\n; Location is typically the country of typical users of the server, in\n; two-letter TLD style (ISO 3166-1 alpha-2 country code)\n;\n; If you only wish to give your \"Root\" channel a custom name, then only\n; uncomment the 'registerName' parameter.\n;\n;registerName=Mumble Server\n;registerPassword=secret\n;registerUrl=http://www.mumble.info/\n;registerHostname=\n;registerLocation=\n\n; If this option is enabled, the server will announce its presence via the\n; bonjour service discovery protocol. To change the name announced by bonjour\n; adjust the registerName variable.\n; See http://developer.apple.com/networking/bonjour/index.html for more information\n; about bonjour.\n;bonjour=True\n\n; If you have a proper SSL certificate, you can provide the filenames here.\n; Otherwise, Murmur will create its own certificate automatically.\n;sslCert=\n;sslKey=\n\n; If the keyfile specified above is encrypted with a passphrase, you can enter\n; it in this setting. It must be plaintext, so you may wish to adjust the\n; permissions on your murmur.ini file accordingly.\n;sslPassPhrase=\n\n; If your certificate is signed by an authority that uses a sub-signed or\n; \"intermediate\" certificate, you probably need to bundle it with your\n; certificate in order to get Murmur to accept it. You can either concatenate\n; the two certificates into one file, or you can put it in a file by itself and\n; put the path to that PEM-file in sslCA.\n;sslCA=\n\n; The sslDHParams option allows you to specify a PEM-encoded file with\n; Diffie-Hellman parameters, which will be used as the default Diffie-\n; Hellman parameters for all virtual servers.\n;\n; Instead of pointing sslDHParams to a file, you can also use the option\n; to specify a named set of Diffie-Hellman parameters for Murmur to use.\n; Murmur comes bundled with the Diffie-Hellman parameters from RFC 7919.\n; These parameters are available by using the following names:\n;\n; @ffdhe2048, @ffdhe3072, @ffdhe4096, @ffdhe6144, @ffdhe8192\n;\n; By default, Murmur uses @ffdhe2048.\n;sslDHParams=@ffdhe2048\n\n; The sslCiphers option chooses the cipher suites to make available for use\n; in SSL/TLS. This option is server-wide, and cannot be set on a\n; per-virtual-server basis.\n;\n; This option is specified using OpenSSL cipher list notation (see\n; https://www.openssl.org/docs/apps/ciphers.html#CIPHER-LIST-FORMAT).\n;\n; It is recommended that you try your cipher string using 'openssl ciphers <string>'\n; before setting it here, to get a feel for which cipher suites you will get.\n;\n; After setting this option, it is recommend that you inspect your Murmur log\n; to ensure that Murmur is using the cipher suites that you expected it to.\n;\n; Note: Changing this option may impact the backwards compatibility of your\n; Murmur server, and can remove the ability for older Mumble clients to be able\n; to connect to it.\n;sslCiphers=EECDH+AESGCM:EDH+aRSA+AESGCM:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA:AES256-SHA:AES128-SHA\n\n; If Murmur is started as root, which user should it switch to?\n; This option is ignored if Murmur isn't started with root privileges.\n;uname=\n\n; By default, in log files and in the user status window for privileged users,\n; Mumble will show IP addresses - in some situations you may find this unwanted\n; behavior. If obfuscate is set to true, Murmur will randomize the IP addresses\n; of connecting users.\n;\n; The obfuscate function only affects the log file and DOES NOT effect the user\n; information section in the client window.\n;obfuscate=false\n\n; If this options is enabled, only clients which have a certificate are allowed\n; to connect.\n;certrequired=False\n\n; If enabled, clients are sent information about the servers version and operating\n; system.\n;sendversion=True\n\n; You can set a recommended minimum version for your server, and clients will\n; be notified in their log when they connect if their client does not meet the\n; minimum requirements. suggestVersion expects the version in the format X.X.X.\n;\n; Note that the suggest* options appeared after 1.2.3 and will have no effect\n; on client versions 1.2.3 and earlier.\n;\n;suggestVersion=\n\n; Setting this to \"true\" will alert any user who does not have positional audio\n; enabled that the server administrators recommend enabling it. Setting it to\n; \"false\" will have the opposite effect - if you do not care whether the user\n; enables positional audio or not, set it to blank. The message will appear in\n; the log window upon connection, but only if the user's settings do not match\n; what the server requests.\n;\n; Note that the suggest* options appeared after 1.2.3 and will have no effect\n; on client versions 1.2.3 and earlier.\n;\n;suggestPositional=\n\n; Setting this to \"true\" will alert any user who does not have Push-To-Talk\n; enabled that the server administrators recommend enabling it. Setting it to\n; \"false\" will have the opposite effect - if you do not care whether the user\n; enables PTT or not, set it to blank. The message will appear in the log\n; window upon connection, but only if the user's settings do not match what the\n; server requests.\n;\n; Note that the suggest* options appeared after 1.2.3 and will have no effect\n; on client versions 1.2.3 and earlier.\n;\n;suggestPushToTalk=\n\n; This sets password hash storage to legacy mode (1.2.4 and before)\n; (Note that setting this to true is insecure and should not be used unless absolutely necessary)\n;legacyPasswordHash=false\n\n; By default a strong amount of PBKDF2 iterations are chosen automatically. If >0 this setting\n; overrides the automatic benchmark and forces a specific number of iterations.\n; (Note that you should only change this value if you know what you are doing)\n;kdfIterations=-1\n\n; In order to prevent misconfigured, impolite or malicious clients from\n; affecting the low-latency of other users, Murmur has a rudimentary global-ban\n; system. It's configured using the autobanAttempts, autobanTimeframe and\n; autobanTime settings.\n;\n; If a client attempts autobanAttempts connections in autobanTimeframe seconds,\n; they will be banned for autobanTime seconds. This is a global ban, from all\n; virtual servers on the Murmur process. It will not show up in any of the\n; ban-lists on the server, and they can't be removed without restarting the\n; Murmur process - just let them expire. A single, properly functioning client\n; should not trip these bans.\n;\n; To disable, set autobanAttempts or autobanTimeframe to 0. Commenting these\n; settings out will cause Murmur to use the defaults:\n;\n; To avoid autobanning successful connection attempts from the same IP address,\n; set autobanSuccessfulConnections=False.\n;\n;autobanAttempts=10\n;autobanTimeframe=120\n;autobanTime=300\n;autobanSuccessfulConnections=True\n\n; Enables logging of group changes. This means that every time a group in a\n; channel changes, the server will log all groups and their members from before\n; the change and after the change. Deault is false. This option was introduced\n; with Murmur 1.4.0.\n;\n;loggroupchanges=false\n\n; Enables logging of ACL changes. This means that every time the ACL in a\n; channel changes, the server will log all ACLs from before the change and\n; after the change. Default is false. This option was introduced with Murmur\n; 1.4.0.\n;\n;logaclchanges=false\n\n; You can configure any of the configuration options for Ice here. We recommend\n; leave the defaults as they are.\n; Please note that this section has to be last in the configuration file.\n;\n[Ice]\nIce.Warn.UnknownProperties=1\nIce.MessageSizeMax=65536\n\nEOH\n        destination = \"local/murmur-config\"\n        env         = false\n      }\n\n      resources {\n        cpu    = 100\n        memory = 128\n        network {\n          port \"0\" {}\n        }\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\n\n\nvariable \"tld\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n\nvariable \"auth\" {\n    type = string\n}\n"
  },
  {
    "path": "nomad_jobs/misc/octoprint/nomad.job",
    "content": "job \"octoprint\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/misc/octoprint/nomad.job\"\nversion = \"6\"\n  }\n\n  constraint {\n    attribute = \"${meta.3d_printer}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"3dprinter\" {\n    count = 1 \n\n    network {\n      port \"web\" {\n        host_network = \"tailscale\"\n        to = \"5000\"\n      }\n    }\n\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"octoprint\" {\n      driver = \"docker\"\n      config {\n        image = \"octoprint/octoprint\"\n        force_pull = true\n        #network_mode = \"host\"\n        privileged = true\n        ports = [\"web\"]\n        volumes = [\n          \"${var.shared_dir}octoprint:/home/octoprint/.octoprint\",\n          \"/dev/ttyUSB0:/dev/ttyUSB0\",\n        ]\n      }\n\n      service {\n        port = \"web\"\n\tname = \"octoprint\"\n        tags = [\n          \"traefik.enable=true\",\n          \"traefik.http.middlewares.cors.headers.accesscontrolallowmethods=GET,OPTIONS,PUT\",\n          \"traefik.http.middlewares.cors.headers.accesscontrolalloworigin=origin-list-or-null\",\n          \"traefik.http.middlewares.cors.headers.accesscontrolmaxage=100\",\n          \"traefik.http.middlewares.cors.headers.addvaryheader=true\",\n\n\n          \"traefik.http.middlewares.malpotAuth.basicauth.users=${var.auth}\",\n          \"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=forward-auth\"\n        ]\n        check {\n          type     = \"http\"\n          path     = \"/\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      env {\n        TZ = \"Europe/Amsterdam\"\n      }\n\n      resources {\n        cpu    = 100\n        memory = 1024\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\n\n\nvariable \"tld\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n"
  },
  {
    "path": "nomad_jobs/misc/uploader/nomad.job",
    "content": "job \"uploader\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type = \"service\"\n  meta {\n      job_file = \"nomad_jobs/misc/uploader/nomad.job\"\nversion = \"5\"\n  }\n\n  group \"webserver\" {\n    count = 1\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n    task \"uploader\" {\n      driver = \"docker\"\n\n      service {\n        name = \"uploader\"\n        tags = [\n          \"traefik.enable=true\",\n          \"traefik.http.middlewares.httpsRedirect.redirectscheme.scheme=https\",\n\n\n          \"traefik.http.routers.${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_TASK_NAME}.${var.tld}\",\n          \"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=forward-auth\"\n        ]\n        port = \"http\"\n\n        check {\n          type     = \"tcp\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      config {\n        image = \"docker-registry.${var.tld}/uploader:latest\"\n        network_mode = \"host\"\n        volumes = [\n          \"${var.shared_dir}uploader:/data\",\n        ]\n      }\n      template {\ndata = <<EOH\nUPLOADER_RS_FILES_DIR=/data\nUPLOADER_RS_BINDING={{env \"NOMAD_ADDR_http\" }}\nEOH\n        destination = \"local/env\"\n        env         = true\n      }\n\n      resources {\n        cpu = 100\n        memory = 16\n        network {\n          port \"http\" {} \n        }\n      }\n    }\n  }\n}\n\n\n"
  },
  {
    "path": "nomad_jobs/observability/alertmanager/nomad.job",
    "content": "job \"alertmanager\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/observability/alertmanager/nomad.job\"\n    version = \"13\"  // Switch from Pushover to ntfy\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  update {\n    max_parallel     = 1\n    min_healthy_time = \"30s\"\n    healthy_deadline = \"5m\"\n    progress_deadline = \"10m\"\n    auto_revert      = true\n  }\n\n  group \"alerting\" {\n    count = 1\n    \n    ephemeral_disk {\n      sticky = true\n    }\n\n    volume \"alertmanager\" {\n      type      = \"csi\"\n      read_only = false\n      source    = \"alertmanager\"\n      access_mode = \"multi-node-single-writer\"\n      attachment_mode = \"file-system\"\n    }\n    network {\n      port \"http\" {\n        static = \"9093\"\n      }\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    task \"prep-disk\" {\n      driver = \"docker\"\n      volume_mount {\n        volume      = \"alertmanager\"\n        destination = \"/volume/\"\n        read_only   = false\n      }\n      config {\n        image        = \"busybox:latest\"\n        command      = \"sh\"\n        args         = [\"-c\", \"chown -R 1000:2000 /volume/ && chmod -R 755 /volume/\"]\n      }\n      resources {\n        cpu    = 200\n        memory = 128\n      }\n\n      lifecycle {\n        hook    = \"prestart\"\n        sidecar = false\n      }\n    }\n\n    task \"alertmanager\" {\n      driver = \"docker\"\n      user = \"1000:2000\"\n\n      volume_mount {\n        volume      = \"alertmanager\"\n        destination = \"/alertmanager\"\n        read_only   = false\n      }\n      \n      service {\n        name = \"alertmanager\"\n        port = \"http\"\n        tags = [\n          \"traefik.enable=true\"\n        ]\n\n        check {\n          type     = \"tcp\"\n          port     = \"http\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      config {\n        image = \"prom/alertmanager:v0.32.0\"\n        network_mode = \"host\"\n        ports = [\"http\"]\n        force_pull = true\n        args = [\"--web.external-url\", \"https://alertmanager.${var.tld}\", \"--config.file\", \"/local/config.yml\", \"--storage.path\", \"/alertmanager\", \"--web.listen-address\", \"0.0.0.0:9093\"]\n      }\n\n      template {\n        left_delimiter = \"{{{\"\n        right_delimiter = \"}}}\"\n        data = <<EOH\nglobal:\n  smtp_smarthost: 'smtp.service.home:25'\n  smtp_from: 'alertmanager@example.org'\n\nroute:\n  group_by: ['alertname', 'cluster', 'service', 'service_name', 'service_id']\n  group_wait: 10s\n  group_interval: 10s\n  repeat_interval: 12h\n  receiver: 'ntfy'\n\n  routes:\n  - match:\n      severity: critical\n    receiver: 'ntfy'\n    group_wait: 5s\n    group_interval: 5s\n    repeat_interval: 15m\n  - match:\n      severity: warning\n    receiver: 'ntfy'\n    group_wait: 30s\n    group_interval: 30s\n    repeat_interval: 1h\n  - match:\n      severity: page\n    receiver: 'ntfy'\n    group_wait: 5s\n    group_interval: 5s\n    repeat_interval: 30m\n  - match_re:\n      alertname: \".*Down.*\"\n    receiver: 'ntfy'\n    group_wait: 5s\n    group_interval: 5s\n    repeat_interval: 15m\n\nreceivers:\n- name: 'email'\n  email_configs:\n  - to: 'pmanuk@perrymanuk.com'\n\n- name: 'ntfy'\n  webhook_configs:\n  - url: 'http://{{{ range service \"ntfy\" }}}{{{ .Address }}}:{{{ .Port }}}{{{ end }}}/homelab-alerts?template=alerts'\n    send_resolved: true\n\nEOH\n\n        destination = \"local/config.yml\"\n        env         = false\n      }\n\n      resources {\n        cpu    = 100\n        memory = 64\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n  type = string\n}\n\nvariable \"tld\" {\n  type = string\n}\n\n"
  },
  {
    "path": "nomad_jobs/observability/alertmanager/volume.hcl",
    "content": "id        = \"alertmanager\"\nname      = \"alertmanager\"\ntype      = \"csi\"\nplugin_id = \"org.democratic-csi.iscsi\"\n\ncapacity_max = \"1GB\"\ncapacity_min = \"100MB\"\n\ncapability {\n  access_mode     = \"multi-node-single-writer\"\n  attachment_mode = \"file-system\"\n}\n\nmount_options {\n  fs_type = \"ext4\"\n}\n\nparameters {\n  fsType = \"ext4\"\n}"
  },
  {
    "path": "nomad_jobs/observability/blackbox-exporter/nomad.job",
    "content": "job \"blackbox-exporter\" {\n  \n  meta {\n  job_file = \"nomad_jobs/observability/blackbox-exporter/nomad.job\"\n  }\nregion      = var.region\n  datacenters = [\"dc1\"]\n  type = \"system\"\n\n  group \"blackbox-exporter\" {\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      canary           = 1\n      min_healthy_time = \"30s\"\n      healthy_deadline = \"8m\"\n      auto_revert      = true\n    }\n\n    task \"blackbox_exporter\" {\n      driver = \"docker\"\n      service {\n        tags = [\"net-internal\", \"blackbox-exporter\", \"metrics\"]\n        name = \"blackbox-exporter\"\n        port = \"http\"\n\n        check {\n          type     = \"tcp\"\n          interval = \"5s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      config {\n        image = \"prom/blackbox-exporter:v0.28.0\"\n        network_mode = \"host\"\n        args = [\n          \"--config.file=/local/config.yaml\",\n          \"--web.listen-address=${NOMAD_ADDR_http}\",\n        ]\n\n      }\n      template {\ndata = <<EOH\nmodules:\n  http_2xx:\n    prober: http\n    timeout: 10s\n    http:\n      valid_http_versions: [\"HTTP/1.1\", \"HTTP/2\"]\n      valid_status_codes: []  # Defaults to 2xx\n      method: GET\n      no_follow_redirects: false\n      fail_if_ssl: false\n      fail_if_not_ssl: false\n      tls_config:\n        insecure_skip_verify: false\n      preferred_ip_protocol: \"ip4\" # defaults to \"ip6\"\n  tls_tcp:\n    prober: tcp\n    timeout: 10s\n    tcp:\n      tls: false\n      preferred_ip_protocol: ip4\n  dns_consul:\n    prober: dns\n    dns:\n      preferred_ip_protocol: \"ip4\"\n      query_name: \"consul.service.consul\"\n      query_type: \"MX\"\n  dns_google_com:\n    prober: dns\n    dns:\n      preferred_ip_protocol: \"ip4\"\n      query_name: \"www.google.com\"\n      query_type: \"MX\"\n  dns_vault:\n    prober: dns\n    dns:\n      preferred_ip_protocol: \"ip4\"\n      query_name: \"vault.service.consul\"\n      query_type: \"MX\"\n\nEOH\n        destination = \"local/config.yaml\"\n         env         = false\n      }\n      resources {\n        cpu    = 100\n        memory = 64\n\n        network {\n          port \"http\" { static = 9115 }\n        }\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\n\n"
  },
  {
    "path": "nomad_jobs/observability/grafana/nomad.job",
    "content": "job \"grafana\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/observability/grafana/nomad.job\"\nversion = \"4\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"monitoring\" {\n    count = 1 \n\n    network {\n      port \"http\" {\n        static = \"3000\"\n        host_network = \"tailscale\"\n      }\n    }\n\n    volume \"grafana\" {\n      type      = \"csi\"\n      read_only = false\n      source    = \"grafana\"\n      access_mode = \"single-node-writer\"\n      attachment_mode = \"file-system\"\n    }\n\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"prep-disk\" {\n      driver = \"docker\"\n      volume_mount {\n        volume      = \"grafana\"\n        destination = \"/volume/\"\n        read_only   = false\n      }\n      config {\n        image        = \"busybox:latest\"\n        command      = \"sh\"\n        args         = [\"-c\", \"chown -R 1000:1000 /volume/\"]\n      }\n      resources {\n        cpu    = 200\n        memory = 128\n      }\n\n      lifecycle {\n        hook    = \"prestart\"\n        sidecar = false\n      }\n    }\n\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"grafana\" {\n      driver = \"docker\"\n      user = \"1000:1000\"\n      config {\n        image = \"grafana/grafana:12.4.3\"\n        userns_mode = \"host\"\n        volumes = [\n          \"${var.shared_dir}grafana/config:/etc/grafana/\",\n        ]\n\n        network_mode = \"host\"\n      }\n      volume_mount {\n        volume      = \"grafana\"\n        destination = \"/var/lib/grafana\"\n        read_only   = false\n      }\n      env {\n        GF_PATHS_DATA = \"/var/lib/grafana\"\n        GF_AUTH_BASIC_ENABLED = \"false\"\n        GF_INSTALL_PLUGINS = \"grafana-piechart-panel\"\n      }\n      service {\n        port = \"http\"\n\tname = \"grafana\"\n        tags = [\n          \"traefik.enable=true\",\n          \"traefik.http.routers.${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_TASK_NAME}.${var.tld}\",\n          \"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=forward-auth\"\n        ] \n        check {\n          type     = \"http\"\n          path     = \"/\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 100\n        memory = 128\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\n\n\nvariable \"tld\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n"
  },
  {
    "path": "nomad_jobs/observability/grafana/volume.hcl",
    "content": "id           = \"grafana\"\nexternal_id  = \"grafana\"\nname         = \"grafana\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-csi.iscsi\"\ncapacity_min = \"1GiB\"\ncapacity_max = \"1GiB\"\n\ncapability {\n  access_mode     = \"single-node-writer\"\n  attachment_mode = \"block-device\"\n}\n\nmount_options {\n  fs_type     = \"ext4\"\n  mount_flags = [\"noatime\"]\n}\n\n"
  },
  {
    "path": "nomad_jobs/observability/loki/nomad.job",
    "content": "job \"loki\" {\n  \n  meta {\n  job_file = \"nomad_jobs/observability/loki/nomad.job\"\n  }\nregion      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"loki\" {\n    network {\n      port \"loki\" {\n        host_network = \"tailscale\"\n        static = 3100\n      }\n    }\n\n    volume \"loki\" {\n      type      = \"csi\"\n      read_only = false\n      source    = \"loki\"\n      access_mode = \"single-node-writer\"\n      attachment_mode = \"file-system\"\n    }\n\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"prep-disk\" {\n      driver = \"docker\"\n      volume_mount {\n        volume      = \"loki\"\n        destination = \"/volume/\"\n        read_only   = false\n      }\n      config {\n        image        = \"busybox:latest\"\n        command      = \"sh\"\n        args         = [\"-c\", \"chown -R 10001:10001 /volume/\"]\n      }\n      resources {\n        cpu    = 200\n        memory = 128\n      }\n\n      lifecycle {\n        hook    = \"prestart\"\n        sidecar = false\n      }\n    }\n\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"loki\" {\n      user = \"10001:10001\"\n      driver = \"docker\"\n      config {\n        image = \"grafana/loki:3.7.1\"\n        args = [\n          \"-config.file\",\n          \"local/loki/local-config.yaml\",\n        ]\n        ports = [\"loki\"]\n      }\n      volume_mount {\n        volume      = \"loki\"\n        destination = \"/loki\"\n        read_only   = false\n      }\n      service {\n        name = \"loki\"\n        port = \"loki\"\n        check {\n          name     = \"Loki healthcheck\"\n          port     = \"loki\"\n          type     = \"http\"\n          path     = \"/ready\"\n          interval = \"20s\"\n          timeout  = \"5s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n        tags = []\n      }\n      template {\n        data = <<EOH\nauth_enabled: false\nserver:\n  http_listen_port: 3100\ningester:\n  lifecycler:\n    address: 127.0.0.1\n    ring:\n      kvstore:\n        store: inmemory\n      replication_factor: 1\n    final_sleep: 0s\n  # Any chunk not receiving new logs in this time will be flushed\n  chunk_idle_period: 1h\n  # All chunks will be flushed when they hit this age, default is 1h\n  max_chunk_age: 1h\n  # Loki will attempt to build chunks up to 1.5MB, flushing if chunk_idle_period or max_chunk_age is reached first\n  chunk_target_size: 1048576\n  # Must be greater than index read cache TTL if using an index cache (Default index read cache TTL is 5m)\n  chunk_retain_period: 30s\n  max_transfer_retries: 0     # Chunk transfers disabled\n  wal:\n    dir: \"/tmp/wal\"\nschema_config:\n  configs:\n    - from: 2020-10-24\n      store: boltdb-shipper\n      object_store: filesystem\n      schema: v11\n      index:\n        prefix: index_\n        period: 24h\nstorage_config:\n  boltdb_shipper:\n    active_index_directory: /loki/boltdb-shipper-active\n    cache_location: /loki/boltdb-shipper-cache\n    cache_ttl: 24h         # Can be increased for faster performance over longer query periods, uses more disk space\n    shared_store: filesystem\n  filesystem:\n    directory: /loki/chunks\ncompactor:\n  working_directory: /tmp/loki/boltdb-shipper-compactor\n  shared_store: filesystem\nlimits_config:\n  reject_old_samples: true\n  reject_old_samples_max_age: 168h\nchunk_store_config:\n  max_look_back_period: 0s\ntable_manager:\n  retention_deletes_enabled: false\n  retention_period: 0s\nEOH\n        destination = \"local/loki/local-config.yaml\"\n      }\n      resources {\n        cpu    = 500\n        memory = 256\n      }\n    }\n  }\n}\n\nvariable \"datacenters_dc1\" {\n  type = list(string)\n}\nvariable \"region\" {}\nvariable \"shared_dir\" {}\n"
  },
  {
    "path": "nomad_jobs/observability/loki/volume.hcl",
    "content": "id           = \"loki\"\nexternal_id  = \"loki\"\nname         = \"loki\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-csi.iscsi\"\ncapacity_min = \"1GiB\"\ncapacity_max = \"1GiB\"\n\ncapability {\n  access_mode     = \"single-node-writer\"\n  attachment_mode = \"block-device\"\n}\n\nmount_options {\n  fs_type     = \"ext4\"\n  mount_flags = [\"noatime\"]\n}\n\n"
  },
  {
    "path": "nomad_jobs/observability/oom-test/nomad.job",
    "content": "job \"oom-test\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/observability/oom-test/nomad.job\"\n    version  = \"3\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"oom-test\" {\n    count = 1\n\n    network {\n      port \"http\" {\n        host_network = \"lan\"\n        to           = \"8080\"\n      }\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel      = 1\n      min_healthy_time  = \"30s\"\n      healthy_deadline  = \"5m\"\n      progress_deadline = \"10m\"\n      auto_revert       = true\n    }\n\n    task \"oom-test\" {\n      driver = \"docker\"\n\n      config {\n        image   = \"python:3.14-slim\"\n        ports   = [\"http\"]\n        command = \"python3\"\n        args    = [\"-c\", <<EOH\nimport http.server\nimport threading\nimport time\n\nmemory_hog = []\n\ndef consume_memory():\n    \"\"\"Gradually consume memory until OOM.\"\"\"\n    time.sleep(30)  # Give the task time to become healthy\n    while True:\n        # Allocate ~1MB per iteration\n        memory_hog.append(b'x' * 1024 * 1024)\n        time.sleep(1)\n\n# Start memory consumer in background\nthreading.Thread(target=consume_memory, daemon=True).start()\n\n# Run a simple HTTP server for health checks\nserver = http.server.HTTPServer(('0.0.0.0', 8080), http.server.SimpleHTTPRequestHandler)\nserver.serve_forever()\nEOH\n        ]\n      }\n\n      service {\n        port = \"http\"\n        name = \"oom-test\"\n        tags = [\n          \"traefik.enable=true\",\n        ]\n        check {\n          type     = \"http\"\n          path     = \"/\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 100\n        memory = 64\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n  type        = string\n  description = \"Nomad region\"\n}\n\nvariable \"tld\" {\n  type        = string\n  description = \"Top-level domain for service discovery\"\n}\n\nvariable \"shared_dir\" {\n  type        = string\n  description = \"Path to shared NFS config directory\"\n}\n"
  },
  {
    "path": "nomad_jobs/observability/prometheus/README.md",
    "content": "### Prometheus\nThis prometheus is configured to scrape any service launched with the service tag `metrics` in addition to scraping all consul/nomad agents and itself.\n"
  },
  {
    "path": "nomad_jobs/observability/prometheus/nomad.job",
    "content": "job \"prometheus\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/observability/prometheus/nomad.job\"\n    version = \"10\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"monitoring\" {\n    count = 1\n\n    network {\n      port \"http\" {\n        static = \"9090\"\n      }\n    }\n\n    volume \"prometheus\" {\n      type      = \"csi\"\n      read_only = false\n      source    = \"prometheus\"\n      access_mode = \"multi-node-single-writer\"\n      attachment_mode = \"file-system\"\n    }\n\n\n    task \"prep-disk\" {\n      driver = \"docker\"\n      volume_mount {\n        volume      = \"prometheus\"\n        destination = \"/volume/\"\n        read_only   = false\n      }\n      config {\n        image        = \"busybox:latest\"\n        command      = \"sh\"\n        args         = [\"-c\", \"chown -R 1000:2000 /volume/\"]\n      }\n      resources {\n        cpu    = 200\n        memory = 128\n      }\n\n      lifecycle {\n        hook    = \"prestart\"\n        sidecar = false\n      }\n    }\n\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      healthy_deadline = \"5m\"\n      progress_deadline = \"10m\"\n      auto_revert      = true\n    }\n\n    task \"prometheus\" {\n      driver = \"docker\"\n      user = \"1000:2000\"\n\n      volume_mount {\n        volume      = \"prometheus\"\n        destination = \"/opt/prometheus\"\n        read_only   = false\n      }\n\n      service {\n        name = \"prometheus\"\n        port = \"http\"\n        tags = [\n          \"traefik.enable=true\"\n        ]\n\n        check {\n          type     = \"http\"\n          path     = \"/-/healthy\"\n          name     = \"http\"\n          interval = \"5s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      # main configuration file\n      template {\n        left_delimiter = \"[[\"\n        right_delimiter = \"]]\"\n        data = <<EOH\nglobal:\n  scrape_interval:     60s # Set the scrape interval to every 15 seconds. Default is every 1 minute.\n  evaluation_interval: 60s # Evaluate rules every 15 seconds. The default is every 1 minute.\n  # scrape_timeout is set to the global default (10s).\n\n# Alertmanager configuration\nalerting:\n  alertmanagers:\n  - static_configs:\n    - targets:\n       - alertmanager.service.consul:9093\n\n# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.\nrule_files:\n  - \"alerts.yml\"\n  # - \"second_rules.yml\"\n\nscrape_configs:\n  - job_name: 'prometheus'\n    static_configs:\n      - targets: ['localhost:9090']\n\n  - job_name: 'metrics'\n    scrape_interval: 5s\n    metrics_path: /metrics\n    consul_sd_configs:\n      - server: '[[ env \"NOMAD_IP_http\" ]]:8500'\n        tags: ['metrics']\n        scheme: http\n    relabel_configs:\n      - source_labels: ['__meta_consul_dc']\n        target_label:  'dc'\n      - source_labels: [__meta_consul_service]\n        target_label:  'job'\n      - source_labels: ['__meta_consul_node']\n        target_label:  'host'\n      - source_labels: ['__meta_consul_tags']\n        target_label: 'tags'\n      - source_labels: ['__meta_consul_tags']\n        regex: '.*job-(.*?)(,.*)'\n        replacement: '${1}'\n        target_label: 'job_name'\n\n  - job_name: 'consul-server'\n    scrape_interval: 10s\n    metrics_path: /v1/agent/metrics\n    honor_labels: true\n    params:\n      format: ['prometheus']\n    consul_sd_configs:\n      - server: '[[ env \"NOMAD_IP_http\" ]]:8500'\n        services: ['nomad-client']\n        scheme: http\n    relabel_configs:\n      - source_labels: ['__meta_consul_dc']\n        target_label:  'dc'\n      - source_labels: ['__meta_consul_node']\n        target_label:  'host'\n      - source_labels: ['__meta_consul_tags']\n        target_label: 'tags'\n      - source_labels: [__address__]\n        action: replace\n        regex: ([^:]+):.*\n        replacement: $1:8500\n        target_label: __address__\n\n  - job_name: 'hass'\n    scrape_interval: 60s\n    metrics_path: /api/prometheus\n\n    # Long-Lived Access Token\n    authorization:\n      credentials: ${var.hass_key}\n\n    scheme: http\n    static_configs:\n      - targets: ['${var.hass_ip}:8123']\n\n  - job_name: 'nomad'\n    consul_sd_configs:\n    - server: '[[ env \"NOMAD_IP_http\" ]]:8500'\n      services: ['nomad-client']\n      tags: ['http']\n      scheme: http\n    scrape_interval: 10s\n    metrics_path: /v1/metrics\n    params:\n      format: ['prometheus']\n    relabel_configs:\n      - source_labels: ['__meta_consul_dc']\n        target_label:  'dc'\n      - source_labels: [__meta_consul_service]\n        target_label:  'job'\n      - source_labels: ['__meta_consul_node']\n        target_label:  'host'\n\n#  - job_name: 'blackbox_http_2xx'\n#    metrics_path: /probe\n#    scheme: http\n#    scrape_interval: 30s\n#    scrape_timeout: 10s\n#    params:\n#      module: [ http_2xx ]\n#    static_configs:\n#      - targets:\n#        - https://www.google.com/\n#        - http://prometheus.homelab/\n#    relabel_configs:\n#      - source_labels: ['__address__']\n#        regex: 'https?://(.+?)(/.*)'\n#        replacement: '${1}'\n#        target_label: 'url'\n#      - source_labels: ['__param_target']\n#        target_label: 'instance'\n#      - source_labels: [__address__]\n#        target_label: __param_target\n#      - target_label: __address__\n#        replacement: blackbox-exporter.service.[[ .region ]]:9115\n#      - source_labels: ['__param_target']\n#        target_label: 'endpoint'\n#\n#  - job_name: 'dns_google_com' \n#    metrics_path: /probe   \n#    params:                \n#      module: [dns_google_com]                                                                 \n#    static_configs:        \n#      - targets:           \n#        - 8.8.8.8\n#        - 1.1.1.1\n#        labels:                            \n#          dc: '[[ .datacenter ]]'        \n#          region: '[[ .region ]]'        \n#    relabel_configs:       \n#      - source_labels: [__address__] \n#        target_label: __param_target \n#      - source_labels: [__param_target] \n#        target_label: instance \n#      - target_label: __address__ \n#        replacement: blackbox-exporter.service.[[ .region ]]:9115\n\nEOH\n\n        destination   = \"local/prometheus.yml\"\n        change_mode   = \"signal\"\n        change_signal = \"SIGHUP\"\n        env           = false\n      }\n\n      template {\n        change_mode = \"noop\"\n        destination = \"local/alerts.yml\"\n        left_delimiter = \"[[\"\n        right_delimiter = \"]]\"\n        data = <<EOH\n---\ngroups:\n- name: basic_alerts\n  rules:\n  - alert: PrometheusDown\n    expr: absent(up{job=\"prometheus\"})\n    for: 2m\n    labels:\n      severity: page\n      alertname: \"PrometheusDown\"\n    annotations:\n      summary: \"Prometheus is down\"\n      description: \"Prometheus has been down for more than 2 minutes\"\n      service: \"prometheus\"\n      \n  - alert: NomadClusterDown\n    expr: absent(up{job=\"nomad-client\"})\n    for: 2m\n    labels:\n      severity: page\n      alertname: \"NomadClusterDown\"\n    annotations:\n      summary: \"Nomad cluster is unreachable\"\n      description: \"No Nomad metrics available - cluster may be down\"\n      service: \"nomad\"\n  # Alert for any instance that is unreachable for >5 minutes.\n  - alert: InstanceDown\n    expr: up{job!=\"hass\"} == 0\n    for: 5m\n    labels:\n      severity: page\n    annotations:\n      summary: \"Instance {{ $labels.instance }} down\"\n      description: \"{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes.\"\n  \n  - alert: HomeAssistantDown\n    expr: up{job=\"hass\"} == 0\n    for: 10m\n    labels:\n      severity: warning\n    annotations:\n      summary: \"Home Assistant is down\"  \n      description: \"Home Assistant at {{ $labels.instance }} has been down for more than 10 minutes.\"\n  # Alert for any device that is over 80% capacity  \n  - alert: DiskUsage\n    expr: avg(nomad_client_host_disk_used_percent) by (host, device) > 80\n    for: 5m\n    labels:\n      severity: page\n    annotations:\n      summary: \"Host {{ $labels.host }} disk {{ $labels.device }} usage alert\"\n      description: \"{{ $labels.host }} is using over 80% of its device: {{ $labels.device }}\"\n\n- name: nomad_allocation_alerts\n  rules:\n  - alert: NomadJobFailureRate\n    expr: rate(nomad_nomad_job_summary_failed[5m]) > 0\n    for: 2m\n    labels:\n      severity: critical\n      alertname: \"NomadJobFailureRate\"\n    annotations:\n      summary: \"Nomad job {{ $labels.exported_job }} is experiencing failures\"\n      description: \"Job {{ $labels.exported_job }} is failing allocations at a rate of {{ $value | printf \\\"%.2f\\\" }} per second\"\n      service: \"nomad\"\n      \n  - alert: NomadJobLostRate\n    expr: rate(nomad_nomad_job_summary_lost[5m]) > 0\n    for: 2m\n    labels:\n      severity: warning\n      alertname: \"NomadJobLostRate\"\n    annotations:\n      summary: \"Nomad job {{ $labels.exported_job }} is losing allocations\"\n      description: \"Job {{ $labels.exported_job }} is losing allocations at a rate of {{ $value | printf \\\"%.2f\\\" }} per second\"\n      service: \"nomad\"\n      \n  - alert: NomadJobQueued\n    expr: nomad_nomad_job_summary_queued > 0\n    for: 5m\n    labels:\n      severity: warning\n      alertname: \"NomadJobQueued\"\n    annotations:\n      summary: \"Nomad job {{ $labels.exported_job }} has queued allocations\"\n      description: \"Job {{ $labels.exported_job }} has {{ $value }} allocations queued for over 5 minutes\"\n      service: \"nomad\"\n      \n  - alert: NomadAllocationsRestarting\n    expr: rate(nomad_client_allocs_restart[5m]) > 0.1\n    for: 2m\n    labels:\n      severity: warning\n      alertname: \"NomadAllocationsRestarting\"\n    annotations:\n      summary: \"High allocation restart rate on {{ $labels.host }}\"\n      description: \"Allocation restart rate is {{ $value }} per second on {{ $labels.host }}\"\n      service: \"nomad\"\n      \n  - alert: NomadAllocationsOOMKilled\n    expr: nomad_client_allocs_oom_killed > 0\n    for: 0s\n    labels:\n      severity: critical\n      alertname: \"NomadAllocationsOOMKilled\"\n    annotations:\n      summary: \"Allocation killed due to OOM on {{ $labels.host }}\"\n      description: \"{{ $value }} allocations were killed due to out-of-memory on {{ $labels.host }}\"\n      service: \"nomad\"\n      \nEOH\n      }\n\n\n      config {\n        image = \"prom/prometheus:v3.11.2\"\n        network_mode = \"host\"\n        args = [\"--storage.tsdb.path\", \"/opt/prometheus\", \"--web.listen-address\", \"0.0.0.0:9090\", \"--storage.tsdb.retention.time\", \"90d\"]\n        force_pull = true\n        ports = [\"http\"]\n        dns_servers = [\"192.168.50.2\"]\n        volumes = [\n          \"local/alerts.yml:/prometheus/alerts.yml\",\n          \"local/prometheus.yml:/prometheus/prometheus.yml\",\n        ]\n      }\n\n      resources {\n        cpu    = 1000\n        memory = 512\n      }\n    }\n  }\n}\n\n\n\nvariable \"region\" {}\nvariable \"tld\" {}\nvariable \"shared_dir\" {}\nvariable \"hass_key\" {}\nvariable \"hass_ip\" {}\n"
  },
  {
    "path": "nomad_jobs/observability/prometheus/volume.hcl",
    "content": "id           = \"prometheus\"\nexternal_id  = \"prometheus\"\nname         = \"prometheus\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-csi.iscsi\"\ncapacity_min = \"50GiB\"\ncapacity_max = \"50GiB\"\n\ncapability {\n  access_mode     = \"multi-node-single-writer\"\n  attachment_mode = \"file-system\"\n}\n\nmount_options {\n  fs_type     = \"ext4\"\n  mount_flags = [\"noatime\"]\n}\n\n"
  },
  {
    "path": "nomad_jobs/observability/telegraf/nomad.job",
    "content": "job \"telegraf\" {\n  region = var.region\n  datacenters = [\"dc1\", \"public\", \"system\"]\n  type = \"system\"\n  priority = 100\n  meta {\n      job_file = \"nomad_jobs/observability/telegraf/nomad.job\"\nversion = \"4\"\n  }\n  group \"telegraf-exporter\" {\n\n    network {\n      port \"http\" {\n        host_network = \"tailscale\"\n        to = \"9273\"\n      }\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"telegraf\" {\n      driver = \"docker\"\n      service {\n        name = \"telegraf\"\n        port = \"http\"\n        tags = [\"metrics\"]\n        check {\n          type     = \"tcp\"\n          interval = \"5s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      config {\n        image = \"telegraf:1.38.2\"\n        privileged = \"true\"\n        ports = [\"http\"]\n        args = [\n          \"--config=/local/config.yaml\",\n        ]\n      }\n      template {\n      data = <<EOH\n[global_tags]\n  realm = '${var.region}'\n  role = 'nomad'\n[agent]\n[[outputs.prometheus_client]]\n  listen = ':9273'\n[[inputs.cpu]]\n  percpu = true\n  totalcpu = true\n[[inputs.disk]]\n  ignore_fs = ['tmpfs', 'devtmpfs']\n[[inputs.diskio]]\n[[inputs.kernel]]\n[[inputs.mem]]\n[[inputs.net]]\n[[inputs.ntpq]]\n[[inputs.processes]]\n[[inputs.swap]]\n[[inputs.system]]\n\nEOH\n        destination = \"local/config.yaml\"\n        env         = false\n      }\n      resources {\n        cpu    = 100\n        memory = 128\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\n\n\n"
  },
  {
    "path": "nomad_jobs/observability/truenas-graphite-exporter/nomad.job",
    "content": "job \"truenas-graphite-exporter\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/observability/truenas-graphite-exporter/nomad.job\"\n    version  = \"1\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"graphite-exporter\" {\n    count = 1\n\n    network {\n      port \"graphite\" {\n        static = 9109\n      }\n      port \"metrics\" {\n        static = 9108\n      }\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      healthy_deadline = \"5m\"\n      progress_deadline = \"10m\"\n      auto_revert      = true\n    }\n\n    task \"graphite-exporter\" {\n      driver = \"docker\"\n\n      config {\n        image      = \"ghcr.io/supporterino/truenas-graphite-to-prometheus:v2.2.1\"\n        force_pull = true\n        ports      = [\"graphite\", \"metrics\"]\n      }\n\n      service {\n        name = \"truenas-graphite-exporter\"\n        port = \"metrics\"\n        tags = [\n          \"metrics\",\n        ]\n\n        check {\n          type     = \"http\"\n          path     = \"/metrics\"\n          name     = \"http\"\n          interval = \"15s\"\n          timeout  = \"5s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 200\n        memory = 128\n      }\n    }\n  }\n}\n\nvariable \"region\" {}\nvariable \"shared_dir\" {}\n"
  },
  {
    "path": "nomad_jobs/observability/vector/nomad.job",
    "content": "job \"vector\" {\n  \n  meta {\n  job_file = \"nomad_jobs/observability/vector/nomad.job\"\n  }\ndatacenters = [\"dc1\"]\n  region = var.region\n  type = \"system\"\n  group \"vector\" {\n    network {\n      port \"api\" {\n        host_network = \"tailscale\"\n        to = 8686\n      }\n    }\n    ephemeral_disk {\n      size    = 500\n      sticky  = true\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"vector\" {\n      driver = \"docker\"\n      config {\n        image = \"timberio/vector:0.28.X-alpine\"\n        ports = [\"api\"]\n        volumes = [\"/var/run/docker.sock:/var/run/docker.sock\"]\n      }\n\n      env {\n        VECTOR_CONFIG = \"local/vector.toml\"\n        VECTOR_REQUIRE_HEALTHY = \"true\"\n      }\n\n      service {\n        check {\n          port     = \"api\"\n          type     = \"http\"\n          path     = \"/health\"\n          interval = \"30s\"\n          timeout  = \"5s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 500 # 500 MHz\n        memory = 256 # 256MB\n      }\n\n      template {\n        destination = \"local/vector.toml\"\n        change_mode   = \"signal\"\n        change_signal = \"SIGHUP\"\n        # overriding the delimiters to [[ ]] to avoid conflicts with Vector's native templating, which also uses {{ }}\n        left_delimiter = \"[[\"\n        right_delimiter = \"]]\"\n        data=<<EOH\n          data_dir = \"alloc/data/vector/\"\n          [api]\n            enabled = true\n            address = \"0.0.0.0:8686\"\n            playground = false\n          [sources.logs]\n            type = \"docker_logs\"\n          [transforms.transformed]\n            type = \"remap\"\n            inputs = [ \"logs\" ]\n            source = '''\n                   .debug = parse_key_value!(.message)\n                   .job_name = split(get!(value: .label, path: [\"com.hashicorp.nomad.job_name\"]), \"/\")[0] ?? get!(value: .label, path: [\"com.hashicorp.nomad.job_name\"])\n            '''\n          [sinks.loki]\n            type = \"loki\"\n            inputs = [\"transformed\"]\n            endpoint = \"http://[[ range service \"loki\" ]][[ .Address ]]:[[ .Port ]][[ end ]]\"\n            encoding.codec = \"json\"\n            buffer.type = \"memory\"\n            out_of_order_action = \"accept\"\n            request.concurrency = \"adaptive\"\n            remove_label_fields = true\n            healthcheck.enabled = true\n              [sinks.loki.labels]\n              job = \"{{label.\\\"com.hashicorp.nomad.job_name\\\" }}\"\n              task = \"{{label.\\\"com.hashicorp.nomad.task_name\\\" }}\"\n              group = \"{{label.\\\"com.hashicorp.nomad.task_group_name\\\" }}\"\n              namespace = \"{{label.\\\"com.hashicorp.nomad.namespace\\\" }}\"\n              node = \"{{label.\\\"com.hashicorp.nomad.node_name\\\" }}\"\n              correlation_id = \"{{ message.requestId }}\"\n              stream = \"{{ stream }}\"\n        EOH\n      }\n      kill_timeout = \"30s\"\n    }\n  }\n}\n\nvariable \"datacenters_all\" {\n  type = list(string)\n}\nvariable \"region\" {}\n"
  },
  {
    "path": "nomad_jobs/personal-cloud/actualbudget/nomad.job",
    "content": "job \"actualbudget\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/personal-cloud/actualbudget/nomad.job\"\n      version = \"1\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"actualbudget\" {\n    count = 1\n\n    network {\n      mode = \"host\"\n      port \"http\" {\n        static = \"5006\"\n        host_network = \"lan\"\n      }\n    }\n\n    volume \"actualbudget\" {\n      type            = \"csi\"\n      read_only       = false\n      source          = \"actualbudget-data\"\n      access_mode     = \"single-node-writer\"\n      attachment_mode = \"file-system\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"prep-disk\" {\n      driver = \"docker\"\n      volume_mount {\n        volume      = \"actualbudget\"\n        destination = \"/volume/\"\n        read_only   = false\n      }\n      config {\n        image   = \"busybox:latest\"\n        command = \"sh\"\n        args    = [\"-c\", \"chmod 777 /volume/\"]\n      }\n      resources {\n        cpu    = 200\n        memory = 128\n      }\n\n      lifecycle {\n        hook    = \"prestart\"\n        sidecar = false\n      }\n    }\n\n    task \"actualbudget\" {\n      driver = \"docker\"\n      config {\n        image = \"actualbudget/actual-server:26.4.0\"\n        ports = [\"http\"]\n      }\n\n      volume_mount {\n        volume      = \"actualbudget\"\n        destination = \"/data\"\n        read_only   = false\n      }\n\n      env {\n        TZ = \"Etc/UTC\"\n      }\n\n      service {\n        port = \"http\"\n        name = \"actualbudget\"\n        tags = [\n          \"traefik.enable=true\"\n        ]\n        check {\n          type     = \"http\"\n          path     = \"/\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 200\n        memory = 512\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}"
  },
  {
    "path": "nomad_jobs/personal-cloud/actualbudget/volume.hcl",
    "content": "id           = \"actualbudget-data\"\nexternal_id  = \"actualbudget-data\"\nname         = \"actualbudget-data\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-csi.iscsi\"\ncapacity_min = \"5GiB\"\ncapacity_max = \"5GiB\"\n\ncapability {\n  access_mode     = \"single-node-writer\"\n  attachment_mode = \"block-device\"\n}\n\nmount_options {\n  fs_type     = \"ext4\"\n  mount_flags = [\"noatime\", \"nodiratime\", \"data=ordered\"]\n}"
  },
  {
    "path": "nomad_jobs/personal-cloud/bitwarden/nomad.job",
    "content": "job \"bitwarden\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/personal-cloud/bitwarden/nomad.job\"\nversion = \"3\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"password\" {\n    count = 1\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    task \"bitwarden\" {\n      driver = \"docker\"\n      config {\n        image = \"vaultwarden/server:1.35.7\"\n        force_pull = \"true\"\n        port_map = {\n          http = 80 \n        }\n        volumes = [\n          \"${var.shared_dir}bitwarden:/data\",\n        ]\n      }\n\n      service {\n        port = \"http\"\n\tname = \"bitwarden\"\n        tags = [\n          \"traefik.enable=true\",\n        ]\n\n        check {\n          type     = \"tcp\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      template {\ndata = <<EOH\nADMIN_TOKEN=\"u3L9xN7vB2mK5zP8wA4qR1tY6cX0jH2nG7dS9fL4kM1vP3qW\"\nSMTP_HOST=\"192.168.50.120\"\nSMTP_PORT=\"25\"\nSMTP_SECURITY=\"off\"\nSMTP_FROM=\"vault@demonsafe.com\"\nEOH\n        destination = \"local/env\"\n        env         = true\n      }\n\n      resources {\n        cpu    = 1000\n        memory = 2048\n        network {\n          port \"http\" {}\n        }\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\n\n\nvariable \"tld\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n"
  },
  {
    "path": "nomad_jobs/personal-cloud/nextcloud/nomad.job",
    "content": "job \"nextcloud\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/personal-cloud/nextcloud/nomad.job\"\nversion = \"4\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  vault {\n    policies      = [\"admin\"]\n    change_mode   = \"signal\"\n    change_signal = \"SIGUSR1\"\n  }\n\n  group \"nextcloud\" {\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    task \"web\" {\n      driver = \"docker\"\n\n      config {\n        image = \"nextcloud\"\n        volumes = [\n          \"${var.shared_dir}nextcloud:/var/www/html\",\n          \"${var.shared_dir}nextcloud_data:/data\",\n          \"local/default:/config/nginx/site-confs/default\"\n        ]\n        port_map {\n          http = 80\n        }\n      }\n\n      service {\n        name = \"${NOMAD_JOB_NAME}\"\n        port = \"http\"\n        tags = [\n          \"traefik.enable=true\",\n          \"traefik.http.middlewares.httpsRedirect.redirectscheme.scheme=https\",\n\n\n          \"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.${var.tld}\",\n          \"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=forward-auth\"\n        ]\n\n        check {\n          type     = \"tcp\"\n          port     = \"http\"\n          interval = \"30s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      template {\ndata = <<EOH\nPOSTGRES_DB=\"nextcloud\"\nPOSTGRES_USER=\"\"\nPOSTGRES_PASSWORD=\"\"\nNEXTCLOUD_ADMIN_USER=\"\"\nNEXTCLOUD_ADMIN_PASSWORD=\"\"\nNEXTCLOUD_TRUSTED_DOMAINS=\"\"\nPOSTGRES_HOST=\"{{ env \"NOMAD_ADDR_postgres_db\" }}\"\nEOH\n        destination = \"local/env\"\n        env         = true\n      }\n\n      template {\ndata = <<EOH\nupstream php-handler {\n    server 127.0.0.1:9000;\n}\n#server {\n#    listen 80;\n#    listen [::]:80;\n#    server_name _;\n#    return 301 https://$host$request_uri;\n#}\nserver {\n    listen 80;\n    listen [::]:80;\n    server_name _;\n    ssl_certificate /config/keys/cert.crt;\n    ssl_certificate_key /config/keys/cert.key;\n    add_header X-Content-Type-Options nosniff;\n    add_header X-XSS-Protection \"1; mode=block\";\n    add_header X-Robots-Tag none;\n    add_header X-Download-Options noopen;\n    add_header X-Permitted-Cross-Domain-Policies none;\n    add_header Referrer-Policy no-referrer;\n    fastcgi_hide_header X-Powered-By;\n    root /config/www/nextcloud/;\n    location = /robots.txt {\n        allow all;\n        log_not_found off;\n        access_log off;\n    }\n    location = /.well-known/carddav {\n      return 301 $scheme://$host/remote.php/dav;\n    }\n    location = /.well-known/caldav {\n      return 301 $scheme://$host/remote.php/dav;\n    }\n    client_max_body_size 10G;\n    fastcgi_buffers 64 4K;\n    gzip on;\n    gzip_vary on;\n    gzip_comp_level 4;\n    gzip_min_length 256;\n    gzip_proxied expired no-cache no-store private no_last_modified no_etag auth;\n    gzip_types application/atom+xml application/javascript application/json application/ld+json application/manifest+json application/rss+xml application/vnd.geo+json application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/bmp image/svg+xml image/x-icon text/cache-manifest text/css text/plain text/vcard text/vnd.rim.location.xloc text/vtt text/x-component text/x-cross-domain-policy;\n    location / {\n        rewrite ^ /index.php;\n    }\n    location ~ ^\\/(?:build|tests|config|lib|3rdparty|templates|data)\\/ {\n        deny all;\n    }\n    location ~ ^\\/(?:\\.|autotest|occ|issue|indie|db_|console) {\n        deny all;\n    }\n    location ~ ^\\/(?:index|remote|public|cron|core\\/ajax\\/update|status|ocs\\/v[12]|updater\\/.+|ocs-provider\\/.+|ocm-provider\\/.+)\\.php(?:$|\\/) {\n        fastcgi_split_path_info ^(.+?\\.php)(\\/.*|)$;\n        try_files $fastcgi_script_name =404;\n        include /etc/nginx/fastcgi_params;\n        fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;\n        fastcgi_param PATH_INFO $fastcgi_path_info;\n        fastcgi_param HTTPS on;\n        fastcgi_param modHeadersAvailable true;\n        fastcgi_param front_controller_active true;\n        fastcgi_pass php-handler;\n        fastcgi_intercept_errors on;\n        fastcgi_request_buffering off;\n    }\n\n    location ~ ^\\/(?:updater|ocs-provider|ocm-provider)(?:$|\\/) {\n        try_files $uri/ =404;\n        index index.php;\n    }\n    location ~ \\.(?:css|js|woff2?|svg|gif)$ {\n        try_files $uri /index.php$request_uri;\n        add_header Cache-Control \"public, max-age=15778463\";\n        add_header X-Content-Type-Options nosniff;\n        add_header X-XSS-Protection \"1; mode=block\";\n        add_header X-Robots-Tag none;\n        add_header X-Download-Options noopen;\n        add_header X-Permitted-Cross-Domain-Policies none;\n        add_header Referrer-Policy no-referrer;\n        access_log off;\n    }\n    location ~ \\.(?:png|html|ttf|ico|jpg|jpeg)$ {\n        try_files $uri /index.php$request_uri;\n        access_log off;\n    }\n}\nEOH\n        destination = \"local/default\"\n        env         = false\n      }\n\n      resources {\n        cpu = 500\n        memory = 2048\n        network {\n          port \"http\" {}\n        }\n      }\n    }\n    task \"postgres\" {\n      driver = \"docker\"\n\n      config {\n        image = \"postgres:9.6.24\"\n        volumes = [\n          \"${var.shared_dir}nextcloud-postgres:/appdata/postgres\",\n        ]\n        port_map {\n          db = 5432\n        }\n      }\n      template {\ndata = <<EOH\nPOSTGRES_DB=nextcloud\nPOSTGRES_USER=\"\"\nPOSTGRES_PASSWORD=\"\"\nPGDATA=\"/appdata/postgres\"\nEOH\n        destination = \"local/env\"\n        env         = true\n      }\n\n      service {\n        name = \"${NOMAD_JOB_NAME}-postgres\"\n        tags = [\"postgres\"]\n\n        port = \"db\"\n\n        check {\n          type     = \"tcp\"\n          port     = \"db\"\n          interval = \"30s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      resources {\n        cpu = 100\n        memory = 512\n        network {\n          port  \"db\"  {\n            static = \"25432\"\n          }\n        }\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\n\n\nvariable \"tld\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n"
  },
  {
    "path": "nomad_jobs/personal-cloud/ntfy/nomad.job",
    "content": "job \"ntfy\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/personal-cloud/ntfy/nomad.job\"\n    version  = \"4\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"ntfy\" {\n    count = 1\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    volume \"ntfy\" {\n      type            = \"csi\"\n      read_only       = false\n      source          = \"ntfy-data\"\n      access_mode     = \"single-node-writer\"\n      attachment_mode = \"file-system\"\n    }\n\n    network {\n      port \"http\" {\n        static       = 8180\n        host_network = \"lan\"\n      }\n    }\n\n    task \"prep-disk\" {\n      driver = \"docker\"\n\n      volume_mount {\n        volume      = \"ntfy\"\n        destination = \"/volume/\"\n        read_only   = false\n      }\n\n      config {\n        image   = \"busybox:latest\"\n        command = \"sh\"\n        args    = [\"-c\", \"chmod 777 /volume/\"]\n      }\n\n      resources {\n        cpu    = 200\n        memory = 128\n      }\n\n      lifecycle {\n        hook    = \"prestart\"\n        sidecar = false\n      }\n    }\n\n    task \"ntfy\" {\n      driver = \"docker\"\n\n      config {\n        image      = \"binwiederhier/ntfy:v2.21.0\"\n        force_pull = true\n        ports      = [\"http\"]\n        args       = [\"serve\"]\n        volumes = [\n          \"local/server.yml:/etc/ntfy/server.yml:ro\",\n          \"local/templates/:/etc/ntfy/templates/:ro\",\n        ]\n      }\n\n      volume_mount {\n        volume      = \"ntfy\"\n        destination = \"/var/cache/ntfy\"\n        read_only   = false\n      }\n\n      env {\n        TZ = \"Europe/Berlin\"\n      }\n\n      template {\n        data = <<EOH\nbase-url: \"https://ntfy.${var.tld}\"\nlisten-http: \":{{ env \"NOMAD_PORT_http\" }}\"\ncache-file: \"/var/cache/ntfy/cache.db\"\ncache-duration: \"12h\"\nattachment-cache-dir: \"/var/cache/ntfy/attachments\"\nattachment-total-size-limit: \"1G\"\nattachment-file-size-limit: \"15M\"\ntemplate-dir: \"/etc/ntfy/templates\"\nEOH\n        destination = \"local/server.yml\"\n        change_mode = \"restart\"\n      }\n\n      template {\n        left_delimiter = \"(((\"\n        right_delimiter = \")))\"\n        data = <<EOH\ntitle: |-\n  {{- $status := .status -}}\n  {{- $first := index .alerts 0 -}}\n  {{- if eq $status \"firing\" -}}\n  🚨 FIRING: {{ $first.labels.alertname }}\n  {{- else if eq $status \"resolved\" -}}\n  ✅ RESOLVED: {{ $first.labels.alertname }}\n  {{- else -}}\n  {{ $status | title }}: {{ $first.labels.alertname }}\n  {{- end }}\nmessage: |-\n  {{- range .alerts }}\n  Status: {{ .status | title }}\n  {{- if .annotations.summary }}\n  Summary: {{ .annotations.summary }}\n  {{- end }}\n  {{- if .annotations.description }}\n  Description: {{ .annotations.description }}\n  {{- end }}\n  {{- if .labels.exported_job }}\n  Job: {{ .labels.exported_job }}\n  {{- end }}\n  {{- if .labels.task_group }}\n  Task Group: {{ .labels.task_group }}\n  {{- end }}\n  {{- if .labels.instance }}\n  Instance: {{ .labels.instance }}\n  {{- end }}\n  {{- if .labels.host }}\n  Host: {{ .labels.host }}\n  {{- end }}\n  {{- if .labels.severity }}\n  Severity: {{ .labels.severity }}\n  {{- end }}\n  {{ end }}\npriority: |-\n  {{- $first := index .alerts 0 -}}\n  {{- if eq .status \"resolved\" -}}\n  default\n  {{- else if eq $first.labels.severity \"critical\" -}}\n  urgent\n  {{- else if eq $first.labels.severity \"page\" -}}\n  high\n  {{- else if eq $first.labels.severity \"warning\" -}}\n  default\n  {{- else -}}\n  default\n  {{- end }}\nEOH\n        destination = \"local/templates/alerts.yml\"\n        change_mode = \"restart\"\n      }\n\n      template {\n        left_delimiter = \"(((\"\n        right_delimiter = \")))\"\n        data = <<EOH\ntitle: |-\n  {{- if eq .notification_type \"MEDIA_PENDING\" -}}\n  📥 Request Pending: {{ .subject }}\n  {{- else if eq .notification_type \"MEDIA_APPROVED\" -}}\n  ✅ Approved: {{ .subject }}\n  {{- else if eq .notification_type \"MEDIA_AUTO_APPROVED\" -}}\n  ✅ Auto-Approved: {{ .subject }}\n  {{- else if eq .notification_type \"MEDIA_AVAILABLE\" -}}\n  🎬 Now Available: {{ .subject }}\n  {{- else if eq .notification_type \"MEDIA_DECLINED\" -}}\n  ❌ Declined: {{ .subject }}\n  {{- else if eq .notification_type \"MEDIA_FAILED\" -}}\n  ⚠️ Failed: {{ .subject }}\n  {{- else if eq .notification_type \"MEDIA_AUTO_REQUESTED\" -}}\n  🤖 Auto-Requested: {{ .subject }}\n  {{- else if eq .notification_type \"ISSUE_CREATED\" -}}\n  🐛 Issue: {{ .subject }}\n  {{- else if eq .notification_type \"ISSUE_RESOLVED\" -}}\n  ✅ Issue Resolved: {{ .subject }}\n  {{- else if eq .notification_type \"ISSUE_COMMENT\" -}}\n  💬 Issue Comment: {{ .subject }}\n  {{- else if eq .notification_type \"TEST_NOTIFICATION\" -}}\n  🔔 Test: {{ .subject }}\n  {{- else -}}\n  {{ .subject }}\n  {{- end }}\nmessage: |-\n  {{- if .message }}\n  {{ .message }}\n  {{ end }}\n  {{- if .media_type }}\n  Type: {{ .media_type | title }}\n  {{- end }}\n  {{- if .media_status }}\n  Status: {{ .media_status }}\n  {{- end }}\n  {{- if .requestedBy_username }}\n  Requested by: {{ .requestedBy_username }}\n  {{- end }}\n  {{- if .event }}\n  Event: {{ .event }}\n  {{- end }}\npriority: |-\n  {{- if or (eq .notification_type \"MEDIA_FAILED\") (eq .notification_type \"ISSUE_CREATED\") -}}\n  high\n  {{- else if or (eq .notification_type \"MEDIA_AVAILABLE\") (eq .notification_type \"MEDIA_APPROVED\") -}}\n  default\n  {{- else if eq .notification_type \"MEDIA_PENDING\" -}}\n  default\n  {{- else -}}\n  low\n  {{- end }}\nEOH\n        destination = \"local/templates/overseerr.yml\"\n        change_mode = \"restart\"\n      }\n\n      service {\n        name = \"ntfy\"\n        port = \"http\"\n        tags = [\n          \"traefik.enable=true\",\n        ]\n\n        check {\n          type     = \"http\"\n          path     = \"/v1/health\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 200\n        memory = 256\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n  type = string\n}\n\nvariable \"tld\" {\n  type = string\n}\n\nvariable \"shared_dir\" {\n  type = string\n}\n"
  },
  {
    "path": "nomad_jobs/personal-cloud/ntfy/volume.hcl",
    "content": "id           = \"ntfy-data\"\nexternal_id  = \"ntfy-data\"\nname         = \"ntfy-data\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-csi.iscsi\"\ncapacity_min = \"1GiB\"\ncapacity_max = \"1GiB\"\n\ncapability {\n  access_mode     = \"single-node-writer\"\n  attachment_mode = \"block-device\"\n}\n\nmount_options {\n  fs_type     = \"ext4\"\n  mount_flags = [\"noatime\"]\n}\n"
  },
  {
    "path": "nomad_jobs/personal-cloud/paperless/nomad.job",
    "content": "job \"paperless\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/personal-cloud/paperless/nomad.job\"\n    version = \"6\"  // Incremented version number\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"paperless\" {\n\n    network {\n      mode = \"host\"\n      port \"tika\" {\n        static = \"9998\"\n        host_network = \"lan\"\n      }\n      port \"gotenberg\" {\n        static = \"3000\"\n        host_network = \"lan\"\n      }\n      port \"paperless\" {\n        static = \"8000\"\n        host_network = \"lan\"\n      }\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"paperless\" {\n      driver = \"docker\"\n      config {\n        image = \"ghcr.io/paperless-ngx/paperless-ngx:2.20.14\"\n        dns_servers = [\"192.168.50.2\"]\n        network_mode = \"host\"\n        ports = [\"paperless\"]\n        volumes = [\n          \"${var.shared_dir}paperless/data:/usr/src/paperless/data\",\n          \"${var.shared_dir}paperless/consume:/usr/src/paperless/consume\",\n          \"${var.shared_dir}paperless/export:/usr/src/paperless/export\",\n          \"${var.shared_dir}paperless/media:/usr/src/paperless/media\",\n        ]\n      }\n\n      resources {\n        cpu    = 500\n        memory = 2048\n      }\n\n      service {\n        port = \"paperless\"\n        name = \"paperless\"\n        tags = [\"traefik.enable=true\"]\n      }\n\n      template {\n        data = <<EOH\nPAPERLESS_DBHOST=\"postgres.service.consul\"\nPAPERLESS_DBPASS=\"${var.postgres_admin_password}\"\nPAPERLESS_DBUSER=\"postgres\"\nPAPERLESS_DBENGINE=\"postgres\"\nPAPERLESS_REDIS=\"redis://redis.service.consul:6379/1\"\nPAPERLESS_REDIS_PREFIX=\"paperless\"\nPAPERLESS_TIKA_ENABLED=\"true\"\nPAPERLESS_TIKA_ENDPOINT=\"http://paperless-tika.service.consul:9998\"\nPAPERLESS_TIKA_GOTENBERG_ENDPOINT=\"http://paperless-gotenberg.service.consul:3000\"\nPAPERLESS_ADMIN_USER=\"admin\"\nPAPERLESS_ADMIN_PASSWORD=\"${var.paperless_admin_password}\"\nPAPERLESS_OCR_LANGUAGE=\"deu+eng\"\nEOH\n        destination = \"local/env\"\n        env         = true\n      }\n    }\n\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"tika\" {\n      driver = \"docker\"\n\n      config {\n        image = \"apache/tika\"\n        ports = [\"tika\"]\n      }\n\n      service {\n        name = \"${NOMAD_JOB_NAME}-${NOMAD_TASK_NAME}\"\n        tags = [\"${NOMAD_TASK_NAME}\"]\n        port = \"tika\"\n\n        check {\n          type     = \"tcp\"\n          port     = \"tika\"\n          interval = \"30s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      resources {\n        cpu    = 500\n        memory = 512\n      }\n    }\n\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"gotenberg\" {\n      driver = \"docker\"\n\n      config {\n        image = \"gotenberg/gotenberg:8\"\n        ports = [\"gotenberg\"]\n        command = \"gotenberg\"\n        args = [\"--chromium-disable-javascript=true\", \"--chromium-allow-list=file:///tmp/.*\"]\n      }\n\n      service {\n        name = \"${NOMAD_JOB_NAME}-${NOMAD_TASK_NAME}\"\n        tags = [\"${NOMAD_TASK_NAME}\"]\n        port = \"gotenberg\"\n\n        check {\n          type     = \"tcp\"\n          port     = \"gotenberg\"\n          interval = \"30s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      resources {\n        cpu    = 500\n        memory = 512\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n\nvariable \"postgres_admin_password\" {\n    type = string\n    description = \"Admin password for PostgreSQL\"\n}\n\nvariable \"paperless_admin_password\" {\n    type = string\n    description = \"Admin password for Paperless web interface\"\n    default = \"admin\"  // You might want to override this with a more secure password\n}"
  },
  {
    "path": "nomad_jobs/personal-cloud/radicale/nomad.job",
    "content": "job \"radicale\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/personal-cloud/radicale/nomad.job\"\nversion = \"3\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"downloaders\" {\n    count = 1 \n    network {\n      port \"http\" {\n        host_network = \"lan\"\n        static = \"5232\"\n      }\n    }\n\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"radicale\" {\n      driver = \"docker\"\n      config {\n        image = \"tomsquest/docker-radicale\"\n        ports = [\"http\"]\n        mounts = [\n          {\n            type = \"bind\"\n            target = \"/data\"\n            source = \"${var.shared_dir}radicale\",\n            readonly = false\n            bind_options = {\n              propagation = \"rshared\"\n            }\n          }\n        ]\n      }\n  \n      env {\n        PUID = \"65534\"\n        PGID = \"65534\"\n        TZ = \"Etc/UTC\"\n      }\n\n      service {\n        port = \"http\"\n        name = \"${NOMAD_TASK_NAME}\"\n        tags = [\n          \"traefik.enable=true\",\n          \"traefik.http.middlewares.httpsRedirect.redirectscheme.scheme=https\",\n          \"traefik.http.routers.${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_TASK_NAME}.${var.tld}\",\n        ]\n        check {\n          type     = \"http\"\n          path     = \"/\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 100\n        memory = 512\n      }\n    }\n  }\n}\n\nvariable \"region\" {}\n\nvariable \"tld\" {}\n\nvariable \"shared_dir\" {}\n"
  },
  {
    "path": "nomad_jobs/security/suricata/nomad.job",
    "content": "job \"suricata\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"system\"\n  priority    = 100\n\n  meta {\n    job_file = \"nomad_jobs/security/suricata/nomad.job\"\n    version  = \"8\"  // Add hourly log rotation\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"ids\" {\n    network {\n      mode = \"host\"\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"suricata\" {\n      driver = \"docker\"\n\n      config {\n        image        = \"jasonish/suricata:8.0\"\n        network_mode = \"host\"\n        force_pull   = true\n        privileged   = true\n        cap_add      = [\"NET_ADMIN\", \"NET_RAW\", \"SYS_NICE\"]\n\n        # Run Suricata in af-packet mode using config file\n        args = [\"-c\", \"/etc/suricata/suricata.yaml\", \"--af-packet\"]\n\n        # Host mounts for logs (Wazuh reads these) and rules (shared NFS)\n        volumes = [\n          \"local/suricata.yaml:/etc/suricata/suricata.yaml:ro\",\n          \"/var/log/suricata:/var/log/suricata\",\n          \"${var.shared_dir}suricata/rules:/var/lib/suricata:ro\",\n        ]\n      }\n\n      # Suricata configuration\n      template {\n        data = <<EOH\n%YAML 1.1\n---\nvars:\n  address-groups:\n    HOME_NET: \"[192.168.50.0/24,10.0.0.0/8]\"\n    EXTERNAL_NET: \"!$HOME_NET\"\n    HTTP_SERVERS: \"$HOME_NET\"\n    SMTP_SERVERS: \"$HOME_NET\"\n    SQL_SERVERS: \"$HOME_NET\"\n    DNS_SERVERS: \"$HOME_NET\"\n    TELNET_SERVERS: \"$HOME_NET\"\n    AIM_SERVERS: \"$EXTERNAL_NET\"\n    DC_SERVERS: \"$HOME_NET\"\n    DNP3_SERVER: \"$HOME_NET\"\n    DNP3_CLIENT: \"$HOME_NET\"\n    MODBUS_CLIENT: \"$HOME_NET\"\n    MODBUS_SERVER: \"$HOME_NET\"\n    ENIP_CLIENT: \"$HOME_NET\"\n    ENIP_SERVER: \"$HOME_NET\"\n\n  port-groups:\n    HTTP_PORTS: \"80\"\n    SHELLCODE_PORTS: \"!80\"\n    ORACLE_PORTS: 1521\n    SSH_PORTS: 22\n    DNP3_PORTS: 20000\n    MODBUS_PORTS: 502\n    FILE_DATA_PORTS: \"[$HTTP_PORTS,110,143]\"\n    FTP_PORTS: 21\n    GENEVE_PORTS: 6081\n    VXLAN_PORTS: 4789\n    TEREDO_PORTS: 3544\n\n# Output configuration for JSON logs (Wazuh integration)\noutputs:\n  - eve-log:\n      enabled: yes\n      filetype: regular\n      filename: /var/log/suricata/eve.json\n      rotate: yes\n      rotate-interval: hour\n      rotate-count: 2\n      pcap-file: false\n      community-id: true\n      community-id-seed: 0\n      xff:\n        enabled: no\n        mode: extra-data\n        deployment: reverse\n        header: X-Forwarded-For\n\n      types:\n        - alert:\n            tagged-packets: yes\n            xff:\n              enabled: no\n              mode: extra-data\n              deployment: reverse\n              header: X-Forwarded-For\n        - anomaly:\n            enabled: yes\n            types:\n        - http:\n            extended: yes\n        - dns:\n            version: 2\n        - tls:\n            extended: yes\n        - files:\n            force-magic: no\n        - smtp:\n        - ssh\n        - stats:\n            totals: yes\n            threads: no\n            deltas: no\n        - flow\n        - netflow\n\n  # Stats output\n  - stats:\n      enabled: yes\n      filename: /var/log/suricata/stats.log\n      append: yes\n      totals: yes\n      threads: no\n\n# Logging configuration\nlogging:\n  default-log-level: notice\n  outputs:\n    - console:\n        enabled: yes\n    - file:\n        enabled: yes\n        level: info\n        filename: /var/log/suricata/suricata.log\n\n# Network interface configuration\naf-packet:\n  - interface: {{ sockaddr \"GetPrivateInterfaces | include \\\"network\\\" \\\"192.168.50.0/24\\\" | attr \\\"name\\\"\" }}\n    threads: auto\n    cluster-id: 99\n    cluster-type: cluster_flow\n    defrag: yes\n    use-mmap: yes\n    tpacket-v3: yes\n    ring-size: 2048\n    block-size: 32768\n\n# Detect engine settings\ndetect-engine:\n  - profile: medium\n  - custom-values:\n      toclient-groups: 3\n      toserver-groups: 25\n  - sgh-mpm-context: auto\n  - inspection-recursion-limit: 3000\n\n# Threading\nthreading:\n  set-cpu-affinity: no\n  detect-thread-ratio: 1.0\n\n# Rules configuration\ndefault-rule-path: /var/lib/suricata/rules\nrule-files:\n  - suricata.rules\n\n# Live rule reload\nrule-reload: true\n\n# Application layer protocols\napp-layer:\n  protocols:\n    rfb:\n      enabled: yes\n    mqtt:\n      enabled: yes\n    krb5:\n      enabled: yes\n    snmp:\n      enabled: yes\n    ikev2:\n      enabled: yes\n    tls:\n      enabled: yes\n      detection-ports:\n        dp: 443\n    dcerpc:\n      enabled: yes\n    ftp:\n      enabled: yes\n    ssh:\n      enabled: yes\n    smtp:\n      enabled: yes\n    http:\n      enabled: yes\n    dns:\n      tcp:\n        enabled: yes\n      udp:\n        enabled: yes\n\n# Performance tuning\nstream:\n  memcap: 64mb\n  checksum-validation: yes\n  inline: auto\n  reassembly:\n    memcap: 256mb\n    depth: 1mb\n    toserver-chunk-size: 2560\n    toclient-chunk-size: 2560\n    randomize-chunk-size: yes\n\nhost:\n  hash-size: 4096\n  prealloc: 1000\n  memcap: 32mb\n\nflow:\n  memcap: 128mb\n  hash-size: 65536\n  prealloc: 10000\n  emergency-recovery: 30\n\n# Defragmentation settings\ndefrag:\n  memcap: 32mb\n  hash-size: 65536\n  trackers: 65535\n  max-frags: 65535\n  prealloc: yes\n  timeout: 60\nEOH\n        destination = \"local/suricata.yaml\"\n        change_mode = \"restart\"\n      }\n\n      resources {\n        cpu    = 500\n        memory = 768\n      }\n\n      service {\n        name = \"suricata\"\n        tags = [\"security\", \"ids\"]\n\n        check {\n          type     = \"script\"\n          name     = \"suricata-running\"\n          command  = \"/bin/sh\"\n          args     = [\"-c\", \"pgrep -x Suricata-Main > /dev/null || pgrep -x suricata > /dev/null\"]\n          interval = \"30s\"\n          timeout  = \"5s\"\n        }\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n  type = string\n}\n\nvariable \"shared_dir\" {\n  type = string\n}\n"
  },
  {
    "path": "nomad_jobs/security/suricata-update/nomad.job",
    "content": "job \"suricata-update\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"batch\"\n  priority    = 80\n\n  meta {\n    job_file = \"nomad_jobs/security/suricata-update/nomad.job\"\n    version  = \"3\"  // Single instance with shared NFS storage\n  }\n\n  # Run daily at 4am\n  periodic {\n    crons            = [\"0 4 * * *\"]\n    prohibit_overlap = true\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"update\" {\n    count = 1\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    task \"suricata-update\" {\n      driver = \"docker\"\n\n      config {\n        image   = \"jasonish/suricata:8.0\"\n        command = \"suricata-update\"\n        volumes = [\n          \"${var.shared_dir}suricata/rules:/var/lib/suricata\",\n        ]\n      }\n\n      resources {\n        cpu    = 500\n        memory = 1024\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n  type = string\n}\n\nvariable \"shared_dir\" {\n  type = string\n}\n"
  },
  {
    "path": "nomad_jobs/security/wazuh-agent/nomad.job",
    "content": "job \"wazuh-agent\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"system\"\n  priority    = 100\n\n  meta {\n    job_file = \"nomad_jobs/security/wazuh-agent/nomad.job\"\n    version  = \"6\"  // Fix client.keys file permissions for persistence\n  }\n\n  group \"agent\" {\n    network {\n      mode = \"host\"\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    # Ensure agent data directory exists on host\n    task \"prep-agent-dir\" {\n      driver = \"docker\"\n\n      config {\n        image   = \"busybox:latest\"\n        command = \"sh\"\n        args    = [\"-c\", \"mkdir -p /host/var/lib/wazuh-agent; test -f /host/var/lib/wazuh-agent/client.keys || touch /host/var/lib/wazuh-agent/client.keys; chmod 666 /host/var/lib/wazuh-agent/client.keys\"]\n        volumes = [\n          \"/var/lib:/host/var/lib\",\n        ]\n      }\n\n      resources {\n        cpu    = 100\n        memory = 32\n      }\n\n      lifecycle {\n        hook    = \"prestart\"\n        sidecar = false\n      }\n    }\n\n    task \"wazuh-agent\" {\n      driver = \"docker\"\n\n      config {\n        image        = \"wazuh/wazuh-agent:4.14.4\"\n        network_mode = \"host\"\n        force_pull   = true\n        privileged   = true\n\n        # Mount host directories for monitoring and config\n        volumes = [\n          \"/var/log:/host/var/log:ro\",\n          \"/var/run/docker.sock:/var/run/docker.sock:ro\",\n          \"/:/host:ro\",\n          \"/var/lib/wazuh-agent/client.keys:/var/ossec/etc/client.keys\",\n          \"local/ossec.conf:/var/ossec/etc/ossec.conf\",\n        ]\n      }\n\n      # Configuration template for the agent\n      # Uses Consul service discovery to automatically find Wazuh manager\n      template {\n        data = <<EOH\n{{- if service \"wazuh-agent-comm\" -}}\n{{- with index (service \"wazuh-agent-comm\") 0 -}}\nWAZUH_MANAGER={{ .Address }}\nWAZUH_MANAGER_PORT={{ .Port }}\nWAZUH_PROTOCOL=tcp\nWAZUH_REGISTRATION_SERVER={{ .Address }}\nWAZUH_REGISTRATION_PORT=1515\nWAZUH_AGENT_NAME={{ env \"node.unique.name\" }}\nWAZUH_AGENT_GROUP=nomad-cluster\n{{- end -}}\n{{- else }}\n# Waiting for wazuh-agent-comm service to be available in Consul\nWAZUH_MANAGER=127.0.0.1\nWAZUH_MANAGER_PORT=1514\nWAZUH_PROTOCOL=tcp\nWAZUH_REGISTRATION_SERVER=127.0.0.1\nWAZUH_REGISTRATION_PORT=1515\nWAZUH_AGENT_NAME={{ env \"node.unique.name\" }}\nWAZUH_AGENT_GROUP=nomad-cluster\n{{- end }}\nEOH\n        destination = \"local/agent.env\"\n        env         = true\n        change_mode = \"restart\"\n      }\n\n      # Custom ossec.conf for monitoring specific logs\n      # Uses Consul service discovery to automatically find Wazuh manager\n      # Uses SIGHUP for hot reload (requires Wazuh 4.14.0+)\n      template {\n        data = <<EOH\n<ossec_config>\n  <client>\n    <server>\n{{- if service \"wazuh-agent-comm\" -}}\n{{- with index (service \"wazuh-agent-comm\") 0 }}\n      <address>{{ .Address }}</address>\n      <port>{{ .Port }}</port>\n{{- end -}}\n{{- else }}\n      <address>127.0.0.1</address>\n      <port>1514</port>\n{{- end }}\n      <protocol>tcp</protocol>\n    </server>\n    <config-profile>ubuntu, ubuntu20, ubuntu20.04</config-profile>\n    <notify_time>10</notify_time>\n    <time-reconnect>60</time-reconnect>\n    <auto_restart>yes</auto_restart>\n  </client>\n\n  <client_buffer>\n    <disabled>no</disabled>\n    <queue_size>5000</queue_size>\n    <events_per_second>500</events_per_second>\n  </client_buffer>\n\n  <!-- ==================== LOG COLLECTION ==================== -->\n\n  <!-- System logs -->\n  <localfile>\n    <log_format>syslog</log_format>\n    <location>/host/var/log/syslog</location>\n  </localfile>\n\n  <localfile>\n    <log_format>syslog</log_format>\n    <location>/host/var/log/auth.log</location>\n  </localfile>\n\n  <localfile>\n    <log_format>syslog</log_format>\n    <location>/host/var/log/kern.log</location>\n  </localfile>\n\n  <localfile>\n    <log_format>syslog</log_format>\n    <location>/host/var/log/dpkg.log</location>\n  </localfile>\n\n  <!-- HashiCorp stack logs -->\n  <localfile>\n    <log_format>json</log_format>\n    <location>/host/var/log/nomad/*.log</location>\n  </localfile>\n\n  <localfile>\n    <log_format>json</log_format>\n    <location>/host/var/log/consul/*.log</location>\n  </localfile>\n\n  <!-- Docker daemon logs -->\n  <localfile>\n    <log_format>syslog</log_format>\n    <location>/host/var/log/docker.log</location>\n  </localfile>\n\n  <!-- Audit logs if auditd is installed -->\n  <localfile>\n    <log_format>audit</log_format>\n    <location>/host/var/log/audit/audit.log</location>\n  </localfile>\n\n  <!-- SSH logs -->\n  <localfile>\n    <log_format>syslog</log_format>\n    <location>/host/var/log/secure</location>\n  </localfile>\n\n  <!-- Journal logs for systemd -->\n  <localfile>\n    <log_format>journald</log_format>\n    <location>journald</location>\n  </localfile>\n\n  <!-- ==================== SURICATA IDS INTEGRATION ==================== -->\n  <!-- Suricata eve.json - Wazuh has native decoder for this format -->\n  <localfile>\n    <log_format>json</log_format>\n    <location>/host/var/log/suricata/eve.json</location>\n  </localfile>\n\n  <!-- Suricata service log -->\n  <localfile>\n    <log_format>syslog</log_format>\n    <location>/host/var/log/suricata/suricata.log</location>\n  </localfile>\n\n  <!-- ==================== FILE INTEGRITY MONITORING ==================== -->\n  <syscheck>\n    <disabled>no</disabled>\n    <frequency>21600</frequency>\n    <scan_on_start>yes</scan_on_start>\n    <alert_new_files>yes</alert_new_files>\n\n    <!-- Critical system directories -->\n    <directories check_all=\"yes\" realtime=\"yes\">/host/etc</directories>\n    <directories check_all=\"yes\" realtime=\"yes\">/host/usr/bin</directories>\n    <directories check_all=\"yes\" realtime=\"yes\">/host/usr/sbin</directories>\n    <directories check_all=\"yes\" realtime=\"yes\">/host/bin</directories>\n    <directories check_all=\"yes\" realtime=\"yes\">/host/sbin</directories>\n\n    <!-- HashiCorp config directories -->\n    <directories check_all=\"yes\">/host/etc/nomad.d</directories>\n    <directories check_all=\"yes\">/host/etc/consul.d</directories>\n\n    <!-- SSH keys and config -->\n    <directories check_all=\"yes\" realtime=\"yes\">/host/root/.ssh</directories>\n    <directories check_all=\"yes\">/host/home/*/.ssh</directories>\n\n    <!-- Ignore frequently changing files -->\n    <ignore>/host/etc/mtab</ignore>\n    <ignore>/host/etc/hosts.deny</ignore>\n    <ignore>/host/etc/mail/statistics</ignore>\n    <ignore>/host/etc/random-seed</ignore>\n    <ignore>/host/etc/adjtime</ignore>\n    <ignore>/host/etc/httpd/logs</ignore>\n    <ignore>/host/etc/resolv.conf</ignore>\n    <ignore type=\"sregex\">.log$|.tmp$|.swp$</ignore>\n  </syscheck>\n\n  <!-- ==================== ROOTCHECK ==================== -->\n  <rootcheck>\n    <disabled>no</disabled>\n    <check_files>yes</check_files>\n    <check_trojans>yes</check_trojans>\n    <check_dev>yes</check_dev>\n    <check_sys>yes</check_sys>\n    <check_pids>yes</check_pids>\n    <check_ports>yes</check_ports>\n    <check_if>yes</check_if>\n    <frequency>43200</frequency>\n    <rootkit_files>/var/ossec/etc/shared/rootkit_files.txt</rootkit_files>\n    <rootkit_trojans>/var/ossec/etc/shared/rootkit_trojans.txt</rootkit_trojans>\n  </rootcheck>\n\n  <!-- ==================== VULNERABILITY DETECTION ==================== -->\n  <wodle name=\"syscollector\">\n    <disabled>no</disabled>\n    <interval>1h</interval>\n    <scan_on_start>yes</scan_on_start>\n    <hardware>yes</hardware>\n    <os>yes</os>\n    <network>yes</network>\n    <packages>yes</packages>\n    <ports all=\"no\">yes</ports>\n    <processes>yes</processes>\n  </wodle>\n\n  <!-- ==================== SECURITY CONFIGURATION ASSESSMENT ==================== -->\n  <wodle name=\"sca\">\n    <enabled>yes</enabled>\n    <scan_on_start>yes</scan_on_start>\n    <interval>12h</interval>\n    <skip_nfs>yes</skip_nfs>\n  </wodle>\n\n  <!-- ==================== DOCKER MONITORING ==================== -->\n  <wodle name=\"docker-listener\">\n    <disabled>no</disabled>\n    <interval>10m</interval>\n    <attempts>5</attempts>\n    <run_on_start>yes</run_on_start>\n  </wodle>\n\n  <!-- ==================== ACTIVE RESPONSE ==================== -->\n  <!-- Disabled by default - enable with caution in production -->\n  <active-response>\n    <disabled>yes</disabled>\n  </active-response>\n\n</ossec_config>\nEOH\n        destination = \"local/ossec.conf\"\n        change_mode = \"restart\"\n      }\n\n      resources {\n        cpu    = 300\n        memory = 512\n      }\n\n      service {\n        name = \"wazuh-agent\"\n        tags = [\"security\", \"monitoring\"]\n\n        # Use a simple script check that runs inside the container\n        check {\n          type     = \"script\"\n          name     = \"agent-status\"\n          command  = \"/var/ossec/bin/wazuh-control\"\n          args     = [\"status\"]\n          interval = \"30s\"\n          timeout  = \"10s\"\n        }\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n  type = string\n}\n"
  },
  {
    "path": "nomad_jobs/security/wazuh-server/nomad.job",
    "content": "job \"wazuh-server\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"wazuh-stack\" {\n    count = 1\n\n    network {\n      port \"indexer\" {\n        host_network = \"lan\"\n        to = 9200\n      }\n      port \"manager\" {\n        static       = 1514\n        host_network = \"lan\"\n        to = 1514\n      }\n      port \"manager_reg\" {\n        static       = 1515\n        host_network = \"lan\"\n        to = 1515\n      }\n      port \"manager_api\" {\n        host_network = \"lan\"\n        to = 55000\n      }\n      port \"dashboard\" {\n        host_network = \"lan\"\n        to = 443\n      }\n    }\n\n    # Persistent volumes for Wazuh components\n    volume \"wazuh-indexer\" {\n      type            = \"csi\"\n      read_only       = false\n      source          = \"wazuh-indexer\"\n      access_mode     = \"single-node-writer\"\n      attachment_mode = \"file-system\"\n    }\n\n    volume \"wazuh-manager\" {\n      type            = \"csi\"\n      read_only       = false\n      source          = \"wazuh-manager\"\n      access_mode     = \"single-node-writer\"\n      attachment_mode = \"file-system\"\n    }\n\n    volume \"wazuh-dashboard\" {\n      type            = \"csi\"\n      read_only       = false\n      source          = \"wazuh-dashboard\"\n      access_mode     = \"single-node-writer\"\n      attachment_mode = \"file-system\"\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    # Prep disk task for indexer volume permissions\n    task \"prep-indexer-disk\" {\n      driver = \"docker\"\n\n      volume_mount {\n        volume      = \"wazuh-indexer\"\n        destination = \"/volume/\"\n        read_only   = false\n      }\n\n      config {\n        image   = \"busybox:latest\"\n        command = \"sh\"\n        args    = [\"-c\", \"chown -R 1000:1000 /volume/\"]\n      }\n\n      resources {\n        cpu    = 200\n        memory = 128\n      }\n\n      lifecycle {\n        hook    = \"prestart\"\n        sidecar = false\n      }\n    }\n\n    # Prep disk task for manager volume permissions\n    task \"prep-manager-disk\" {\n      driver = \"docker\"\n\n      volume_mount {\n        volume      = \"wazuh-manager\"\n        destination = \"/volume/\"\n        read_only   = false\n      }\n\n      config {\n        image   = \"busybox:latest\"\n        command = \"sh\"\n        args    = [\"-c\", \"chown -R 999:999 /volume/\"]\n      }\n\n      resources {\n        cpu    = 200\n        memory = 128\n      }\n\n      lifecycle {\n        hook    = \"prestart\"\n        sidecar = false\n      }\n    }\n\n\n    # Prep disk task for dashboard volume permissions\n    task \"prep-dashboard-disk\" {\n      driver = \"docker\"\n\n      volume_mount {\n        volume      = \"wazuh-dashboard\"\n        destination = \"/volume/\"\n        read_only   = false\n      }\n\n      config {\n        image   = \"busybox:latest\"\n        command = \"sh\"\n        args    = [\"-c\", \"rm -rf /volume/wazuh && mkdir -p /volume/wazuh/config && chown -R 1000:1000 /volume/\"]\n      }\n\n      resources {\n        cpu    = 200\n        memory = 128\n      }\n\n      lifecycle {\n        hook    = \"prestart\"\n        sidecar = false\n      }\n    }\n\n    # Wazuh Indexer (OpenSearch-based)\n    task \"wazuh-indexer\" {\n      driver = \"docker\"\n\n      volume_mount {\n        volume      = \"wazuh-indexer\"\n        destination = \"/var/lib/wazuh-indexer\"\n        read_only   = false\n      }\n\n      config {\n        image      = \"wazuh/wazuh-indexer:4.14.4\"\n        force_pull = true\n        ports      = [\"indexer\"]\n        volumes = [\n          \"local/opensearch.yml:/usr/share/wazuh-indexer/config/opensearch.yml\",\n        ]\n        ulimit {\n          nofile = \"65536:65536\"\n          memlock = \"-1:-1\"\n        }\n      }\n\n      env {\n        OPENSEARCH_JAVA_OPTS = \"-Xms1g -Xmx1g\"\n      }\n\n      template {\n        data = <<EOH\nnetwork.host: \"0.0.0.0\"\nnode.name: \"wazuh-indexer\"\ncluster.name: \"wazuh-cluster\"\npath.data: /var/lib/wazuh-indexer\npath.logs: /var/log/wazuh-indexer\ndiscovery.type: single-node\ncompatibility.override_main_response_version: true\n\n# Enable SSL with demo certificates\nplugins.security.ssl.http.enabled: true\nplugins.security.ssl.http.pemcert_filepath: certs/indexer.pem\nplugins.security.ssl.http.pemkey_filepath: certs/indexer-key.pem\nplugins.security.ssl.http.pemtrustedcas_filepath: certs/root-ca.pem\nplugins.security.ssl.transport.enabled: true\nplugins.security.ssl.transport.pemcert_filepath: certs/indexer.pem\nplugins.security.ssl.transport.pemkey_filepath: certs/indexer-key.pem\nplugins.security.ssl.transport.pemtrustedcas_filepath: certs/root-ca.pem\nplugins.security.ssl.transport.enforce_hostname_verification: false\nplugins.security.ssl.transport.resolve_hostname: false\nplugins.security.allow_unsafe_democertificates: true\nplugins.security.allow_default_init_securityindex: true\nplugins.security.authcz.admin_dn:\n  - \"CN=admin,OU=Wazuh,O=Wazuh,L=California,C=US\"\nplugins.security.nodes_dn:\n  - \"CN=wazuh-indexer,OU=Wazuh,O=Wazuh,L=California,C=US\"\nplugins.security.audit.type: internal_opensearch\nplugins.security.enable_snapshot_restore_privilege: true\nplugins.security.check_snapshot_restore_write_privileges: true\nplugins.security.restapi.roles_enabled: [\"all_access\", \"security_rest_api_access\"]\nEOH\n        destination = \"local/opensearch.yml\"\n        perms       = \"0644\"\n      }\n\n      resources {\n        cpu    = 1000\n        memory = 2048\n      }\n\n      service {\n        name = \"wazuh-indexer\"\n        port = \"indexer\"\n\n        check {\n          type     = \"tcp\"\n          port     = \"indexer\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n        }\n      }\n    }\n\n    # Wazuh Manager (Analysis Engine)\n    task \"wazuh-manager\" {\n      driver = \"docker\"\n\n      volume_mount {\n        volume      = \"wazuh-manager\"\n        destination = \"/var/ossec/data\"\n        read_only   = false\n      }\n\n      config {\n        image      = \"wazuh/wazuh-manager:4.14.4\"\n        force_pull = true\n        hostname   = \"wazuh-manager\"\n        ports      = [\"manager\", \"manager_reg\", \"manager_api\"]\n        volumes = [\n          \"local/local_rules.xml:/var/ossec/etc/rules/local_rules.xml:ro\",\n          \"local/ossec.conf:/var/ossec/etc/ossec.conf:ro\",\n        ]\n      }\n\n      template {\n        data = <<EOH\nINDEXER_URL=https://{{ env \"NOMAD_IP_indexer\" }}:{{ env \"NOMAD_HOST_PORT_indexer\" }}\nINDEXER_USERNAME=admin\nINDEXER_PASSWORD=admin\nFILEBEAT_SSL_VERIFICATION_MODE=none\nSSL_CERTIFICATE_AUTHORITIES=\"\"\nAPI_USERNAME=wazuh-wui\nAPI_PASSWORD=${var.wazuh_api_password}\nEOH\n        destination = \"secrets/manager.env\"\n        env         = true\n      }\n\n      # Custom rules to suppress noisy alerts\n      template {\n        data = <<EOH\n<!-- Local rules for Wazuh Manager -->\n<group name=\"local,syslog,suricata,\">\n\n  <!-- Suppress noisy Suricata STREAM alerts (rule 86601) -->\n  <!-- These \"invalid ack\" alerts are low-value network noise -->\n  <rule id=\"100001\" level=\"0\">\n    <if_sid>86601</if_sid>\n    <description>Suppressed: Suricata STREAM ESTABLISHED invalid ack</description>\n  </rule>\n\n</group>\nEOH\n        destination = \"local/local_rules.xml\"\n        perms       = \"0644\"\n      }\n\n      # Wazuh ossec.conf with log_alert_level=8 (only high/critical alerts)\n      # Wazuh levels: 0=ignored, 1-4=low, 5-7=medium, 8-10=high, 11-15=critical\n      template {\n        data = <<EOH\n<!--\n  Wazuh - Manager - Default configuration for amzn 2023\n  More info at: https://documentation.wazuh.com\n  Mailing list: https://groups.google.com/forum/#!forum/wazuh\n\n  CUSTOMIZED: log_alert_level set to 8 (high/critical alerts only)\n-->\n\n<ossec_config>\n  <global>\n    <jsonout_output>yes</jsonout_output>\n    <alerts_log>yes</alerts_log>\n    <logall>no</logall>\n    <logall_json>no</logall_json>\n    <email_notification>no</email_notification>\n    <smtp_server>smtp.example.wazuh.com</smtp_server>\n    <email_from>wazuh@example.wazuh.com</email_from>\n    <email_to>recipient@example.wazuh.com</email_to>\n    <email_maxperhour>12</email_maxperhour>\n    <email_log_source>alerts.log</email_log_source>\n    <agents_disconnection_time>15m</agents_disconnection_time>\n    <agents_disconnection_alert_time>0</agents_disconnection_alert_time>\n    <update_check>yes</update_check>\n  </global>\n\n  <alerts>\n    <!-- CUSTOMIZED: Changed from 3 to 8 to only log high/critical alerts -->\n    <log_alert_level>8</log_alert_level>\n    <email_alert_level>12</email_alert_level>\n  </alerts>\n\n  <!-- Choose between \"plain\", \"json\", or \"plain,json\" for the format of internal logs -->\n  <logging>\n    <log_format>plain</log_format>\n  </logging>\n\n  <remote>\n    <connection>secure</connection>\n    <port>1514</port>\n    <protocol>tcp</protocol>\n    <queue_size>131072</queue_size>\n  </remote>\n\n  <!-- Policy monitoring -->\n  <rootcheck>\n    <disabled>no</disabled>\n    <check_files>yes</check_files>\n    <check_trojans>yes</check_trojans>\n    <check_dev>yes</check_dev>\n    <check_sys>yes</check_sys>\n    <check_pids>yes</check_pids>\n    <check_ports>yes</check_ports>\n    <check_if>yes</check_if>\n\n    <!-- Frequency that rootcheck is executed - every 12 hours -->\n    <frequency>43200</frequency>\n\n    <rootkit_files>etc/rootcheck/rootkit_files.txt</rootkit_files>\n    <rootkit_trojans>etc/rootcheck/rootkit_trojans.txt</rootkit_trojans>\n\n    <skip_nfs>yes</skip_nfs>\n\n    <ignore>/var/lib/containerd</ignore>\n    <ignore>/var/lib/docker/overlay2</ignore>\n  </rootcheck>\n\n  <wodle name=\"cis-cat\">\n    <disabled>yes</disabled>\n    <timeout>1800</timeout>\n    <interval>1d</interval>\n    <scan-on-start>yes</scan-on-start>\n\n    <java_path>wodles/java</java_path>\n    <ciscat_path>wodles/ciscat</ciscat_path>\n  </wodle>\n\n  <!-- Osquery integration -->\n  <wodle name=\"osquery\">\n    <disabled>yes</disabled>\n    <run_daemon>yes</run_daemon>\n    <log_path>/var/log/osquery/osqueryd.results.log</log_path>\n    <config_path>/etc/osquery/osquery.conf</config_path>\n    <add_labels>yes</add_labels>\n  </wodle>\n\n  <!-- System inventory -->\n  <wodle name=\"syscollector\">\n    <disabled>no</disabled>\n    <interval>1h</interval>\n    <scan_on_start>yes</scan_on_start>\n    <hardware>yes</hardware>\n    <os>yes</os>\n    <network>yes</network>\n    <packages>yes</packages>\n    <ports all=\"yes\">yes</ports>\n    <processes>yes</processes>\n    <users>yes</users>\n    <groups>yes</groups>\n    <services>yes</services>\n    <browser_extensions>yes</browser_extensions>\n\n    <!-- Database synchronization settings -->\n    <synchronization>\n      <max_eps>10</max_eps>\n    </synchronization>\n  </wodle>\n\n  <sca>\n    <enabled>yes</enabled>\n    <scan_on_start>yes</scan_on_start>\n    <interval>12h</interval>\n    <skip_nfs>yes</skip_nfs>\n  </sca>\n\n  <vulnerability-detection>\n    <enabled>yes</enabled>\n    <index-status>yes</index-status>\n    <feed-update-interval>60m</feed-update-interval>\n  </vulnerability-detection>\n\n  <indexer>\n    <enabled>yes</enabled>\n    <hosts>\n      <host>https://{{ env \"NOMAD_IP_indexer\" }}:{{ env \"NOMAD_HOST_PORT_indexer\" }}</host>\n    </hosts>\n    <ssl>\n      <certificate_authorities>\n        <ca>/etc/filebeat/certs/root-ca.pem</ca>\n      </certificate_authorities>\n      <certificate>/etc/filebeat/certs/filebeat.pem</certificate>\n      <key>/etc/filebeat/certs/filebeat-key.pem</key>\n    </ssl>\n  </indexer>\n\n  <!-- File integrity monitoring -->\n  <syscheck>\n    <disabled>no</disabled>\n\n    <!-- Frequency that syscheck is executed default every 12 hours -->\n    <frequency>43200</frequency>\n\n    <scan_on_start>yes</scan_on_start>\n\n    <!-- Generate alert when new file detected -->\n    <alert_new_files>yes</alert_new_files>\n\n    <!-- Don't ignore files that change more than 'frequency' times -->\n    <auto_ignore frequency=\"10\" timeframe=\"3600\">no</auto_ignore>\n\n    <!-- Directories to check  (perform all possible verifications) -->\n    <directories>/etc,/usr/bin,/usr/sbin</directories>\n    <directories>/bin,/sbin,/boot</directories>\n\n    <!-- Files/directories to ignore -->\n    <ignore>/etc/mtab</ignore>\n    <ignore>/etc/hosts.deny</ignore>\n    <ignore>/etc/mail/statistics</ignore>\n    <ignore>/etc/random-seed</ignore>\n    <ignore>/etc/random.seed</ignore>\n    <ignore>/etc/adjtime</ignore>\n    <ignore>/etc/httpd/logs</ignore>\n    <ignore>/etc/utmpx</ignore>\n    <ignore>/etc/wtmpx</ignore>\n    <ignore>/etc/cups/certs</ignore>\n    <ignore>/etc/dumpdates</ignore>\n    <ignore>/etc/svc/volatile</ignore>\n\n    <!-- File types to ignore -->\n    <ignore type=\"sregex\">.log$|.swp$</ignore>\n\n    <!-- Check the file, but never compute the diff -->\n    <nodiff>/etc/ssl/private.key</nodiff>\n\n    <skip_nfs>yes</skip_nfs>\n    <skip_dev>yes</skip_dev>\n    <skip_proc>yes</skip_proc>\n    <skip_sys>yes</skip_sys>\n\n    <!-- Nice value for Syscheck process -->\n    <process_priority>10</process_priority>\n\n    <!-- Maximum output throughput -->\n    <max_eps>50</max_eps>\n\n    <!-- Database synchronization settings -->\n    <synchronization>\n      <enabled>yes</enabled>\n      <interval>5m</interval>\n      <max_eps>10</max_eps>\n    </synchronization>\n  </syscheck>\n\n  <!-- Active response -->\n  <global>\n    <white_list>127.0.0.1</white_list>\n    <white_list>^localhost.localdomain$</white_list>\n    <white_list>168.63.129.16</white_list>\n  </global>\n\n  <command>\n    <name>disable-account</name>\n    <executable>disable-account</executable>\n    <timeout_allowed>yes</timeout_allowed>\n  </command>\n\n  <command>\n    <name>restart-wazuh</name>\n    <executable>restart-wazuh</executable>\n  </command>\n\n  <command>\n    <name>firewall-drop</name>\n    <executable>firewall-drop</executable>\n    <timeout_allowed>yes</timeout_allowed>\n  </command>\n\n  <command>\n    <name>host-deny</name>\n    <executable>host-deny</executable>\n    <timeout_allowed>yes</timeout_allowed>\n  </command>\n\n  <command>\n    <name>route-null</name>\n    <executable>route-null</executable>\n    <timeout_allowed>yes</timeout_allowed>\n  </command>\n\n  <command>\n    <name>win_route-null</name>\n    <executable>route-null.exe</executable>\n    <timeout_allowed>yes</timeout_allowed>\n  </command>\n\n  <command>\n    <name>netsh</name>\n    <executable>netsh.exe</executable>\n    <timeout_allowed>yes</timeout_allowed>\n  </command>\n\n  <!--\n  <active-response>\n    active-response options here\n  </active-response>\n  -->\n\n  <!-- Log analysis -->\n  <localfile>\n    <log_format>command</log_format>\n    <command>df -P</command>\n    <frequency>360</frequency>\n  </localfile>\n\n  <localfile>\n    <log_format>full_command</log_format>\n    <command>netstat -tulpn | sed 's/\\([[:alnum:]]\\+\\)\\ \\+[[:digit:]]\\+\\ \\+[[:digit:]]\\+\\ \\+\\(.*\\):\\([[:digit:]]*\\)\\ \\+\\([0-9\\.\\:\\*]\\+\\).\\+\\ \\([[:digit:]]*\\/[[:alnum:]\\-]*\\).*/\\1 \\2 == \\3 == \\4 \\5/' | sort -k 4 -g | sed 's/ == \\(.*\\) ==/:\\1/' | sed 1,2d</command>\n    <alias>netstat listening ports</alias>\n    <frequency>360</frequency>\n  </localfile>\n\n  <localfile>\n    <log_format>full_command</log_format>\n    <command>last -n 20</command>\n    <frequency>360</frequency>\n  </localfile>\n\n  <ruleset>\n    <!-- Default ruleset -->\n    <decoder_dir>ruleset/decoders</decoder_dir>\n    <rule_dir>ruleset/rules</rule_dir>\n    <rule_exclude>0215-policy_rules.xml</rule_exclude>\n    <list>etc/lists/audit-keys</list>\n    <list>etc/lists/amazon/aws-eventnames</list>\n    <list>etc/lists/security-eventchannel</list>\n    <list>etc/lists/malicious-ioc/malware-hashes</list>\n    <list>etc/lists/malicious-ioc/malicious-ip</list>\n    <list>etc/lists/malicious-ioc/malicious-domains</list>\n\n    <!-- User-defined ruleset -->\n    <decoder_dir>etc/decoders</decoder_dir>\n    <rule_dir>etc/rules</rule_dir>\n  </ruleset>\n\n  <rule_test>\n    <enabled>yes</enabled>\n    <threads>1</threads>\n    <max_sessions>64</max_sessions>\n    <session_timeout>15m</session_timeout>\n  </rule_test>\n\n  <!-- Configuration for wazuh-authd -->\n  <auth>\n    <disabled>no</disabled>\n    <port>1515</port>\n    <use_source_ip>no</use_source_ip>\n    <purge>yes</purge>\n    <use_password>no</use_password>\n    <ciphers>HIGH:!ADH:!EXP:!MD5:!RC4:!3DES:!CAMELLIA:@STRENGTH</ciphers>\n    <!-- <ssl_agent_ca></ssl_agent_ca> -->\n    <ssl_verify_host>no</ssl_verify_host>\n    <ssl_manager_cert>etc/sslmanager.cert</ssl_manager_cert>\n    <ssl_manager_key>etc/sslmanager.key</ssl_manager_key>\n    <ssl_auto_negotiate>no</ssl_auto_negotiate>\n  </auth>\n\n  <cluster>\n    <name>wazuh</name>\n    <node_name>node01</node_name>\n    <node_type>master</node_type>\n    <key></key>\n    <port>1516</port>\n    <bind_addr>0.0.0.0</bind_addr>\n    <nodes>\n        <node>NODE_IP</node>\n    </nodes>\n    <hidden>no</hidden>\n    <disabled>yes</disabled>\n  </cluster>\n\n</ossec_config>\n\n<ossec_config>\n  <localfile>\n    <log_format>syslog</log_format>\n    <location>/var/ossec/logs/active-responses.log</location>\n  </localfile>\n\n</ossec_config>\nEOH\n        destination = \"local/ossec.conf\"\n        perms       = \"0644\"\n      }\n\n      resources {\n        cpu    = 1000\n        memory = 1024\n      }\n\n      service {\n        name = \"wazuh-manager\"\n        port = \"manager_api\"\n        tags = [\"metrics\"]\n\n        meta {\n          api_port = \"${NOMAD_HOST_PORT_manager_api}\"\n        }\n\n        check {\n          type     = \"tcp\"\n          port     = \"manager_api\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      service {\n        name = \"wazuh-agent-comm\"\n        port = \"manager\"\n        tags = [\"agent-communication\"]\n\n        check {\n          type     = \"tcp\"\n          port     = \"manager\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      service {\n        name = \"wazuh-agent-reg\"\n        port = \"manager_reg\"\n        tags = [\"agent-registration\"]\n\n        check {\n          type     = \"tcp\"\n          port     = \"manager_reg\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n        }\n      }\n    }\n\n    # Wazuh Dashboard (Web UI)\n    task \"wazuh-dashboard\" {\n      driver = \"docker\"\n\n      volume_mount {\n        volume      = \"wazuh-dashboard\"\n        destination = \"/usr/share/wazuh-dashboard/data\"\n        read_only   = false\n      }\n\n      config {\n        image      = \"wazuh/wazuh-dashboard:4.14.4\"\n        force_pull = true\n        ports      = [\"dashboard\"]\n        volumes = [\n          \"local/opensearch_dashboards.yml:/usr/share/wazuh-dashboard/config/opensearch_dashboards.yml\",\n          \"local/wazuh.yml:/usr/share/wazuh-dashboard/data/wazuh/config/wazuh.yml:ro\",\n        ]\n      }\n\n      template {\n        data = <<EOH\nOPENSEARCH_HOSTS=https://{{ env \"NOMAD_IP_indexer\" }}:{{ env \"NOMAD_HOST_PORT_indexer\" }}\nOPENSEARCH_USERNAME=admin\nOPENSEARCH_PASSWORD=admin\nOPENSEARCH_SSL_VERIFICATIONMODE=none\nSERVER_HOST=0.0.0.0\nSERVER_PORT=443\nSERVER_SSL_ENABLED=true\nSERVER_SSL_CERTIFICATE=/usr/share/wazuh-dashboard/config/certs/dashboard.pem\nSERVER_SSL_KEY=/usr/share/wazuh-dashboard/config/certs/dashboard-key.pem\nWAZUH_API_URL=https://{{ env \"NOMAD_IP_manager_api\" }}:{{ env \"NOMAD_HOST_PORT_manager_api\" }}\nAPI_USERNAME=wazuh-wui\nAPI_PASSWORD=${var.wazuh_api_password}\nEOH\n        destination = \"secrets/dashboard.env\"\n        env         = true\n        change_mode = \"noop\"\n      }\n\n      template {\n        data = <<EOH\nserver.host: \"0.0.0.0\"\nserver.port: 443\nopensearch.hosts: [\"https://{{ env \"NOMAD_IP_indexer\" }}:{{ env \"NOMAD_HOST_PORT_indexer\" }}\"]\nopensearch.ssl.verificationMode: none\nopensearch.username: \"admin\"\nopensearch.password: \"admin\"\nopensearch.requestHeadersWhitelist: [\"securitytenant\",\"Authorization\"]\nopensearch_security.multitenancy.enabled: false\nopensearch_security.readonly_mode.roles: [\"kibana_read_only\"]\nserver.ssl.enabled: true\nserver.ssl.certificate: /usr/share/wazuh-dashboard/config/certs/dashboard.pem\nserver.ssl.key: /usr/share/wazuh-dashboard/config/certs/dashboard-key.pem\nuiSettings.overrides.defaultRoute: \"/app/wz-home\"\ncsp.warnLegacyBrowsers: false\nEOH\n        destination = \"local/opensearch_dashboards.yml\"\n        perms       = \"0644\"\n        change_mode = \"noop\"\n      }\n\n      template {\n        data = <<EOH\nhosts:\n  - default:\n      url: https://{{ env \"NOMAD_IP_manager_api\" }}\n      port: {{ env \"NOMAD_HOST_PORT_manager_api\" }}\n      username: wazuh-wui\n      password: ${var.wazuh_api_password}\n      run_as: false\nEOH\n        destination = \"local/wazuh.yml\"\n        perms       = \"0644\"\n        change_mode = \"noop\"\n      }\n\n      resources {\n        cpu    = 500\n        memory = 1024\n      }\n\n      service {\n        name = \"wazuh-dashboard\"\n        port = \"dashboard\"\n        tags = [\n          \"traefik.enable=true\",\n          \"traefik.http.routers.wazuh.rule=Host(`wazuh.${var.tld}`)\",\n          \"traefik.http.routers.wazuh.entrypoints=websecure\",\n          \"traefik.http.routers.wazuh.tls=true\",\n          \"traefik.http.routers.wazuh.tls.certresolver=letsencrypt\",\n          \"traefik.http.services.wazuh.loadbalancer.server.scheme=https\",\n          \"traefik.http.services.wazuh.loadbalancer.serversTransport=insecure-skip-verify@file\",\n        ]\n\n        check {\n          type     = \"tcp\"\n          port     = \"dashboard\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n        }\n      }\n    }\n\n  }\n}\n\nvariable \"region\" {\n  type = string\n}\n\nvariable \"tld\" {\n  type = string\n}\n\nvariable \"shared_dir\" {\n  type = string\n}\n\nvariable \"wazuh_indexer_password\" {\n  type = string\n  description = \"Password for Wazuh indexer admin user\"\n}\n\nvariable \"wazuh_api_password\" {\n  type = string\n  description = \"Password for Wazuh API user\"\n}\n\nvariable \"wazuh_dashboard_password\" {\n  type = string\n  description = \"Password for Wazuh dashboard kibanaserver user\"\n}\n"
  },
  {
    "path": "nomad_jobs/security/wazuh-server/volume-dashboard.hcl",
    "content": "type = \"csi\"\nid = \"wazuh-dashboard\"\nname = \"wazuh-dashboard\"\nplugin_id = \"org.democratic-csi.iscsi\"\n\ncapability {\n  access_mode = \"single-node-writer\"\n  attachment_mode = \"file-system\"\n}\n\n# iSCSI volume will be created via democratic-csi\n# No additional context needed - managed by TrueNAS/democratic-csi\n"
  },
  {
    "path": "nomad_jobs/security/wazuh-server/volume-indexer.hcl",
    "content": "type = \"csi\"\nid = \"wazuh-indexer\"\nname = \"wazuh-indexer\"\nplugin_id = \"org.democratic-csi.iscsi\"\n\ncapability {\n  access_mode = \"single-node-writer\"\n  attachment_mode = \"file-system\"\n}\n\n# iSCSI volume will be created via democratic-csi\n# No additional context needed - managed by TrueNAS/democratic-csi\n"
  },
  {
    "path": "nomad_jobs/security/wazuh-server/volume-manager.hcl",
    "content": "type = \"csi\"\nid = \"wazuh-manager\"\nname = \"wazuh-manager\"\nplugin_id = \"org.democratic-csi.iscsi\"\n\ncapability {\n  access_mode = \"single-node-writer\"\n  attachment_mode = \"file-system\"\n}\n\n# iSCSI volume will be created via democratic-csi\n# No additional context needed - managed by TrueNAS/democratic-csi\n"
  },
  {
    "path": "nomad_jobs/smart-home/deconz/nomad.job",
    "content": "job \"deconz\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/smart-home/deconz/nomad.job\"\nservice_owner = \"@pmanuk\"\n    version = \"3\"\n  }\n\n  constraint {\n    attribute = \"${meta.zigbee}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"homeautomation\" {\n    count = 1 \n\n    network {\n      port \"web\" {\n        static = \"8182\"\n        host_network = \"lan\"\n      }\n      port \"ws\" {\n        host_network = \"lan\"\n      }\n    }\n\n    volume \"deconz\" {\n      type      = \"csi\"\n      read_only = false\n\n      source    = \"deconz\"\n      access_mode = \"single-node-writer\"\n      attachment_mode = \"file-system\"\n    }\n\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"deconz\" {\n      driver = \"docker\"\n      config {\n        image = \"deconzcommunity/deconz:2.33.0\"\n        force_pull = true\n        ports = [\"web\", \"ws\"]\n        privileged = true\n        volumes = [\n          \"/dev/ttyACM0:/dev/ttyACM0\",\n        ]\n      }\n\n      volume_mount {\n        volume      = \"deconz\"\n        destination = \"/opt/deCONZ\"\n        read_only   = false\n      }\n\n      service {\n        port = \"web\"\n\tname = \"deconz\"\n        tags = [\n          \"traefik.enable=true\",\n          \"traefik.http.middlewares.httpsRedirect.redirectscheme.scheme=https\",\n          \"traefik.http.middlewares.cors.headers.accesscontrolallowmethods=GET,OPTIONS,PUT\",\n          \"traefik.http.middlewares.cors.headers.accesscontrolmaxage=100\",\n          \"traefik.http.middlewares.cors.headers.addvaryheader=true\",\n\n\n          \"traefik.http.routers.${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_TASK_NAME}.${var.tld}\",\n          \"traefik.http.middlewares.malpotAuth.basicauth.users=${var.auth}\",\n          \"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=forward-auth, malpotAuth\"\n        ]\n        check {\n          type     = \"http\"\n          path     = \"/\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      env {\n        TZ = \"Europe/Amsterdam\"\n        DECONZ_WEB_PORT = \"${NOMAD_PORT_web}\"\n        DECONZ_WS_PORT = \"${NOMAD_PORT_ws}\"\n        DECONZ_DEVICE = \"/dev/ttyACM0\"\n        DECONZ_VNC_MODE = \"0\"\n        DECONZ_VNC_PASSWORD = \"changeme\"\n        DECONZ_VNC_PORT = \"5901\"\n      }\n\n      resources {\n        cpu    = 100\n        memory = 256\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\n\n\nvariable \"tld\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n\nvariable \"auth\" {\n   type = string\n}\n"
  },
  {
    "path": "nomad_jobs/smart-home/deconz/volume.hcl",
    "content": "id           = \"deconz\"\nexternal_id  = \"deconz\"\nname         = \"deconz\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-csi.iscsi\"\ncapacity_min = \"1GiB\"\ncapacity_max = \"1GiB\"\n\ncapability {\n  access_mode     = \"single-node-writer\"\n  attachment_mode = \"block-device\"\n}\n\nmount_options {\n  fs_type     = \"ext4\"\n  mount_flags = [\"noatime\"]\n}\n\n"
  },
  {
    "path": "nomad_jobs/smart-home/home-assistant/nomad.job",
    "content": "job \"home-assistant\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/smart-home/home-assistant/nomad.job\"\nservice_owner = \"@pmanuk\"\n    version = \"4\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  constraint {\n    attribute = \"${meta.zigbee}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"home-automation\" {\n    count = 1 \n\n    network {\n      port \"http\" {\n        static = \"8123\"\n        host_network = \"lan\"\n      }\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"hass\" {\n      driver = \"docker\"\n      config {\n        image = \"homeassistant/home-assistant:2026.4.3\"\n        network_mode = \"host\"\n        privileged = \"true\"\n        volumes = [\n          \"/run/dbus:/run/dbus\",\n          \"${var.shared_dir}home-assistant:/config\",\n        ]\n      }\n\n      service {\n        port = \"http\"\n        name = \"hass\"\n        tags = [\n          \"traefik.enable=true\",\n          \"traefik.http.middlewares.httpsRedirect.redirectscheme.scheme=https\",\n          \"traefik.http.routers.${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_TASK_NAME}.${var.tld}\",\n          \"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=forward-auth\"\n        ]\n        check {\n          type     = \"tcp\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      resources {\n        cpu    = 100\n        memory = 512\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\n\n\nvariable \"tld\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n"
  },
  {
    "path": "nomad_jobs/smart-home/home-assistant/volume.hcl",
    "content": "id           = \"home-assistant\"\nexternal_id  = \"home-assistant\"\nname         = \"home-assistant\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-csi.iscsi\"\ncapacity_min = \"1GiB\"\ncapacity_max = \"1GiB\"\n\ncapability {\n  access_mode     = \"single-node-writer\"\n  attachment_mode = \"block-device\"\n}\n\nmount_options {\n  fs_type     = \"ext4\"\n  mount_flags = [\"noatime\"]\n}\n\n"
  },
  {
    "path": "nomad_jobs/smart-home/mqtt/nomad.job",
    "content": "job \"mosquitto\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/smart-home/mqtt/nomad.job\"\nversion = \"4\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"homeautomation\" {\n    count = 1 \n\n    network {\n      port \"0\" {\n        host_network = \"tailscale\"\n        to = \"9001\"\n      }\n      port \"1\" {\n        host_network = \"tailscale\"\n        to = \"1883\"\n      }\n    }\n\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"mosquitto\" {\n      driver = \"docker\"\n      config {\n        image = \"docker-registry.${var.tld}/mosquitto:2\"\n        force_pull = true\n        network_mode = \"host\"\n        ports = [\"0\", \"1\"]\n        volumes = [\n          \"${var.shared_dir}mosquitto:/mosquitto/\",\n        ]\n      }\n\n      service {\n         tags = [\"mqtt\", \"net-internal\"]\n         name = \"mqtt\"\n         port = \"1\"\n      }\n\n      env {\n        TZ = \"Europe/Amsterdam\"\n      }\n\n      resources {\n        cpu    = 100\n        memory = 64\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\nvariable \"tld\" {}\n\nvariable \"shared_dir\" {\n    type = string\n}\n"
  },
  {
    "path": "nomad_jobs/smart-home/owntracks-recorder/nomad.job",
    "content": "job \"owntracks-recorder\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/smart-home/owntracks-recorder/nomad.job\"\nversion = \"4\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"tracking\" {\n    count = 1 \n\n    network {\n      port \"http\" {\n        host_network = \"tailscale\"\n        to = \"8083\"\n      }\n    }\n\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"owntracks\" {\n      driver = \"docker\"\n      config {\n        image = \"owntracks/recorder:1.0.1\"\n        args = [\n          \"--http-host\", \"${NOMAD_IP_http}\"\n        ]\n        force_pull = true\n        #network_mode = \"host\"\n        ports = [\"http\"]\n        volumes = [\n          \"${var.shared_dir}owntracks-recorder:/store/\",\n          \"${var.shared_dir}owntracks-recorder/views:/htdocs/views/\",\n        ]\n      }\n\n      service {\n        port = \"http\"\n        name = \"owntracks\"\n        tags = [\n          \"traefik.enable=true\",\n          \"traefik.http.middlewares.httpsRedirect.redirectscheme.scheme=https\",\n\n\n          \"traefik.http.routers.${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_TASK_NAME}.${var.tld}\",\n          \"traefik.http.middlewares.malpotAuthNew.basicauth.users=${var.auth}\",\n          \"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=forward-auth, forward-auth\"\n        ]\n        check {\n          type     = \"http\"\n          path     = \"/\"\n          interval = \"10s\"\n          timeout  = \"10s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      env {\n        TZ = \"Europe/Amsterdam\"\n        OTR_HOST = \"mqtt-pub.${var.tld}\"\n        OTR_PORT = \"8883\"\n        OTR_USER = \"mqtt\"\n        OTR_PASS = var.otr_pass\n        OTR_STORAGEDIR = \"/store\"\n        OTR_VIEWSDIR = \"/store/views\"\n        #OTR_CAPATH = \"/etc/ssl/certs\"\n      }\n\n      resources {\n        cpu    = 100\n        memory = 256\n      }\n    }\n  }\n}\n\nvariable \"region\" {}\n\nvariable \"tld\" {}\n\nvariable \"shared_dir\" {}\n\nvariable \"auth\" {}\n\nvariable \"otr_pass\" {}\n"
  },
  {
    "path": "nomad_jobs/smart-home/zigbee2mqtt/nomad.job",
    "content": "job \"zigbee2mqtt\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  meta {\n      job_file = \"nomad_jobs/smart-home/zigbee2mqtt/nomad.job\"\nservice_owner = \"@pmanuk\"\n    version = \"15\"\n  }\n\n  group \"homeautomation\" {\n    count = 1 \n    network {\n      port \"tcp\" {\n        host_network = \"lan\"\n      }\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    task \"zigbee2mqtt\" {\n      driver = \"docker\"\n      config {\n        image = \"koenkk/zigbee2mqtt\"\n        force_pull = true\n        network_mode = \"host\"\n        ports = [\"tcp\"]\n        privileged = true\n        volumes = [\n          \"${var.shared_dir}zigbee2mqtt:/app/data\",\n          \"${var.shared_dir}zigbee2mqtt/zigbee-shepherd-converters:/app/node_modules/zigbee-shepherd-converters\",\n          \"/dev/ttyACM0:/dev/ttyACM0\",\n        ]\n      }\n\n      env {\n        TZ = \"Europe/Amsterdam\"\n      }\n\n      resources {\n        cpu    = 100\n        memory = 256\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\n\n\nvariable \"shared_dir\" {\n    type = string\n}\n"
  },
  {
    "path": "nomad_jobs/storage-backends/docker-registry/nomad.job",
    "content": "job \"docker-registry\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/storage-backends/docker-registry/nomad.job\"\nversion = \"4\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"infrastructure\" {\n    count = 1 \n\n    network {\n      port \"http\" {\n        static = \"5000\"\n        host_network = \"lan\"\n      }\n    }\n\n    volume \"docker-registry\" {\n      type      = \"csi\"\n      read_only = false\n      source    = \"docker-registry-data\"\n      access_mode = \"single-node-writer\"\n      attachment_mode = \"file-system\"\n    }\n\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"docker-registry\" {\n      driver = \"docker\"\n      config {\n        image = \"registry:3.1.0\"\n        ports = [\"http\"]\n        network_mode = \"host\"\n      }\n\n      volume_mount {\n        volume      = \"docker-registry\"\n        destination = \"/data\"\n        read_only   = false\n      }\n\n      env {\n        REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY = \"/data\"\n        REGISTRY_HTTP_ADDR = \"${NOMAD_ADDR_http}\"\n      }\n      service {\n        port = \"http\"\n\t      name = \"docker-registry\"\n        tags = [\n          \"traefik.enable=true\"\n        ]\n        check {\n          type     = \"http\"\n          path     = \"/\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 20\n        memory = 64\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\nvariable \"tld\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n"
  },
  {
    "path": "nomad_jobs/storage-backends/docker-registry/volume.hcl",
    "content": "id           = \"docker-registry-data\"\nexternal_id  = \"docker-registry-data\"\nname         = \"docker-registry-data\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-csi.iscsi\"\ncapacity_min = \"10GiB\"\ncapacity_max = \"10GiB\"\n\ncapability {\n  access_mode     = \"single-node-writer\"\n  attachment_mode = \"block-device\"\n}\n\nmount_options {\n  fs_type     = \"ext4\"\n  mount_flags = [\"noatime\", \"nodiratime\", \"data=ordered\"]\n}\n\n"
  },
  {
    "path": "nomad_jobs/storage-backends/mariadb/nomad.job",
    "content": "job \"mariadb\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/storage-backends/mariadb/nomad.job\"\nversion = \"2\"  // Incremented version number\n  }\n\n  group \"db\" {\n    network {\n      mode = \"host\"\n      port \"mariadb\" {\n        static = \"3306\"\n        host_network = \"lan\"\n      }\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    task \"mariadb\" {\n      driver = \"docker\"\n\n      config {\n        image = \"mariadb:11\"\n        volumes = [\n          \"${var.shared_dir}shared-mariadb:/var/lib/mysql\",\n        ]\n        ports = [\"mariadb\"]\n      }\n\n      env {\n        MARIADB_ROOT_PASSWORD = \"${var.db_password}\"\n      }\n\n      service {\n        name = \"${NOMAD_JOB_NAME}\"\n        tags = [\"mariadb\"]\n        port = \"mariadb\"\n\n        check {\n          type     = \"tcp\"\n          port     = \"mariadb\"\n          interval = \"30s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      resources {\n        cpu    = \"200\"\n        memory = \"512\"\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n\nvariable \"db_password\" {\n    type = string\n    description = \"Password for MariaDB root user\"\n}\n"
  },
  {
    "path": "nomad_jobs/storage-backends/neo4j/nomad.job",
    "content": "job \"neo4j\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/storage-backends/neo4j/nomad.job\"\n    version = \"3\"\n  }\n\n  group \"neo4j\" {\n    count = 1\n\n    network {\n      mode = \"host\"\n      port \"http\" {\n        static = 7474\n        host_network = \"lan\"\n      }\n      port \"https\" {\n        static = 7473\n        host_network = \"lan\"\n      }\n      port \"bolt\" {\n        static = 7687\n        host_network = \"lan\"\n      }\n    }\n\n    volume \"neo4j-data\" {\n      type            = \"csi\"\n      read_only       = false\n      source          = \"neo4j-data\"\n      access_mode     = \"single-node-writer\"\n      attachment_mode = \"file-system\"\n    }\n\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"neo4j\" {\n      driver = \"docker\"\n\n      config {\n        image = \"neo4j:5.26\"\n        ports = [\"http\", \"https\", \"bolt\"]\n      }\n\n      volume_mount {\n        volume      = \"neo4j-data\"\n        destination = \"/data\"\n        read_only   = false\n      }\n\n      env {\n        NEO4J_AUTH = \"neo4j/ChAnGeMe\"\n        NEO4J_PLUGINS = \"[\\\"apoc\\\"]\"\n        NEO4J_apoc_export_file_enabled = \"true\"\n        NEO4J_apoc_import_file_enabled = \"true\"\n        NEO4J_apoc_import_file_use__neo4j__config = \"true\"\n      }\n\n      resources {\n        cpu    = 100\n        memory = 1600\n      }\n\n      service {\n        name = \"neo4j\"\n        tags = [\"graph-db\", \"ai\"]\n        port = \"bolt\"\n\n        check {\n          type     = \"tcp\"\n          port     = \"bolt\"\n          interval = \"30s\"\n          timeout  = \"2s\"\n        }\n      }\n    }\n  }\n\n}\nvariable \"region\" {\n  type = string\n  default = \"global\"\n}\n"
  },
  {
    "path": "nomad_jobs/storage-backends/neo4j/setup.job",
    "content": "job \"neo4j-setup\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type = \"batch\"\n\n  meta {\n    job_file = \"nomad_jobs/storage-backends/neo4j/setup.job\"\n    version = \"2\"\n  }\n\n  group \"setup\" {\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    task \"neo4j-init\" {\n      driver = \"docker\"\n\n      config {\n        image = \"neo4j:5.26\"\n        command = \"cypher-shell\"\n        args = [\n          \"-a\", \"neo4j.service.consul:7687\",\n          \"-u\", \"neo4j\", \n          \"-p\", \"ChAnGeMe\",\n          \"CREATE CONSTRAINT cognee_node_id IF NOT EXISTS FOR (n:CogneeNode) REQUIRE n.id IS UNIQUE;\"\n        ]\n      }\n\n      template {\n        data = <<EOH\nNEO4J_PASSWORD=\"changeme\"\nEOH\n        destination = \"secrets/neo4j-config.env\"\n        env = true\n      }\n\n      resources {\n        cpu    = 200\n        memory = 256\n      }\n    }\n  }\n\n}\nvariable \"region\" {\n  type = string\n  default = \"global\"\n}\n"
  },
  {
    "path": "nomad_jobs/storage-backends/neo4j/volume.hcl",
    "content": "# Neo4j graph database storage volume\nid           = \"neo4j-data\"\nname         = \"neo4j-data\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-csi.iscsi\"\ncapacity_min = \"8GiB\"\ncapacity_max = \"8GiB\"\n\ncapability {\n  access_mode     = \"single-node-writer\"\n  attachment_mode = \"block-device\"\n}\n\nmount_options {\n  fs_type     = \"ext4\"\n  mount_flags = [\"noatime\"]\n}\n"
  },
  {
    "path": "nomad_jobs/storage-backends/pgvector/nomad.job",
    "content": "job \"pgvector\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/storage-backends/pgvector/nomad.job\"\n    version = \"5\"  // Reduced memory 256MB -> 128MB\n  }\n\n  group \"db\" {\n    network {\n      mode = \"host\"\n      port \"pgvector\" {\n        static = \"5432\"  \n        host_network = \"lan\"\n      }\n    }\n    \n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"pgvector\" {\n      driver = \"docker\"\n      config {\n        image = \"pgvector/pgvector:pg16\"\n        volumes = [\n          \"${var.shared_dir}pgvector-data:/var/lib/postgresql/data\",\n        ]\n        ports = [\"pgvector\"]\n      }\n\n      env {\n        POSTGRES_DB       = \"vectordb\"\n        POSTGRES_USER     = \"postgres\"\n        POSTGRES_PASSWORD = \"${var.postgres_pass}\"\n        PGDATA            = \"/var/lib/postgresql/data\"\n      }\n\n      service {\n        name = \"${NOMAD_JOB_NAME}\"\n        tags = [\"pgvector\", \"database\", \"vector-database\"]\n        port = \"pgvector\"\n\n        check {\n          type     = \"tcp\"\n          port     = \"pgvector\"\n          interval = \"30s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      resources {\n        cpu    = \"100\"\n        memory = \"128\"\n      }\n    }\n  }\n}\n\nvariable \"region\" {}\nvariable \"shared_dir\" {}\nvariable \"pgvector_admin_password\" {}\n"
  },
  {
    "path": "nomad_jobs/storage-backends/pgvector/pgvector-setup.job",
    "content": "job \"pgvector-setup\" {\n  type = \"batch\"\n  datacenters = [\"dc1\"]\n  \n  meta {\n    job_file = \"nomad_jobs/storage-backends/pgvector/pgvector-setup.job\"\n    version = \"1\"\n  }\n  \n  group \"setup\" {\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    task \"initialize-pgvector\" {\n      driver = \"docker\"\n      \n      config {\n        image = \"pgvector/pgvector:pg16\"\n        command = \"sh\"\n        args = [\n          \"-c\",\n          \"PGPASSWORD=$PGVECTOR_PASSWORD psql -h 192.168.50.120 -p 5432 -U postgres -d cognee_db -c \\\"CREATE EXTENSION IF NOT EXISTS vector;\\\" && PGPASSWORD=$PGVECTOR_PASSWORD psql -h 192.168.50.120 -p 5432 -U postgres -c \\\"DO \\\\$\\\\$ BEGIN CREATE DATABASE embeddings; EXCEPTION WHEN duplicate_database THEN RAISE NOTICE 'embeddings database exists'; END \\\\$\\\\$;\\\" && PGPASSWORD=$PGVECTOR_PASSWORD psql -h 192.168.50.120 -p 5432 -U postgres -d embeddings -c \\\"CREATE EXTENSION IF NOT EXISTS vector;\\\"\"\n        ]\n      }\n      \n      env {\n        PGVECTOR_PASSWORD = \"${var.pgvector_pass}\"\n      }\n      \n      resources {\n        cpu    = 200\n        memory = 256\n      }\n    }\n  }\n}\n\nvariable \"pgvector_pass\" {\n  type = string\n  description = \"Admin password for the pgvector PostgreSQL server\"\n}\n"
  },
  {
    "path": "nomad_jobs/storage-backends/postgres/nomad.job",
    "content": "job \"postgres\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/storage-backends/postgres/nomad.job\"\nversion = \"5\"  // Fixed postgres password variable\n  }\n\n  group \"db\" {\n    network {\n      mode = \"host\"\n      port \"postgres\" {\n        static = \"5432\"\n        host_network = \"lan\"\n      }\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"postgres\" {\n      driver = \"docker\"\n\n      config {\n        image = \"postgres:15.17\"\n        volumes = [\n          \"${var.shared_dir}paperless-postgres:/appdata/postgres\",\n        ]\n        ports = [\"postgres\"]\n      }\n\n      env {\n        POSTGRES_DB       = \"paperless\"\n        POSTGRES_USER     = \"postgres\"\n        POSTGRES_PASSWORD = \"${var.postgres_pass}\"\n        PGDATA            = \"/appdata/postgres\"\n      }\n\n      service {\n        name = \"${NOMAD_JOB_NAME}\"\n        tags = [\"postgres\"]\n        port = \"postgres\"\n\n        check {\n          type     = \"tcp\"\n          port     = \"postgres\"\n          interval = \"30s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      resources {\n        cpu    = \"200\"\n        memory = \"512\"\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n\nvariable \"postgres_pass\" {\n    type = string\n    description = \"Admin password for PostgreSQL\"\n}\n"
  },
  {
    "path": "nomad_jobs/storage-backends/postgres/postgres-setup.job",
    "content": "job \"postgres-setup\" {\n  type = \"batch\"\n  datacenters = [\"dc1\"]\n  \n  meta {\n      job_file = \"nomad_jobs/storage-backends/postgres/postgres-setup.job\"\nversion = \"2\"\n  }\n  \n  group \"setup\" {\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    task \"create-dbs\" {\n      driver = \"docker\"\n      \n      config {\n        image = \"postgres:15\"\n        command = \"sh\"\n        args = [\n          \"-c\",\n          \"PGPASSWORD=$POSTGRES_PASSWORD psql -h postgres.service.consul -U postgres -c \\\"DO \\\\$\\\\$ BEGIN CREATE DATABASE sonarr_main; EXCEPTION WHEN duplicate_database THEN RAISE NOTICE 'sonarr_main exists'; END \\\\$\\\\$;\\\" -c \\\"DO \\\\$\\\\$ BEGIN CREATE DATABASE sonarr_logs; EXCEPTION WHEN duplicate_database THEN RAISE NOTICE 'sonarr_logs exists'; END \\\\$\\\\$;\\\" -c \\\"DO \\\\$\\\\$ BEGIN CREATE DATABASE radarr_main; EXCEPTION WHEN duplicate_database THEN RAISE NOTICE 'radarr_main exists'; END \\\\$\\\\$;\\\" -c \\\"DO \\\\$\\\\$ BEGIN CREATE DATABASE radarr_logs; EXCEPTION WHEN duplicate_database THEN RAISE NOTICE 'radarr_logs exists'; END \\\\$\\\\$;\\\" -c \\\"DO \\\\$\\\\$ BEGIN CREATE DATABASE lidarr_main; EXCEPTION WHEN duplicate_database THEN RAISE NOTICE 'lidarr_main exists'; END \\\\$\\\\$;\\\" -c \\\"DO \\\\$\\\\$ BEGIN CREATE DATABASE lidarr_logs; EXCEPTION WHEN duplicate_database THEN RAISE NOTICE 'lidarr_logs exists'; END \\\\$\\\\$;\\\" -c \\\"DO \\\\$\\\\$ BEGIN CREATE DATABASE litellm; EXCEPTION WHEN duplicate_database THEN RAISE NOTICE 'litellm exists'; END \\\\$\\\\$;\\\" -c \\\"DO \\\\$\\\\$ BEGIN CREATE DATABASE nextcloud; EXCEPTION WHEN duplicate_database THEN RAISE NOTICE 'nextcloud exists'; END \\\\$\\\\$;\\\" -c \\\"DO \\\\$\\\\$ BEGIN CREATE DATABASE paperless; EXCEPTION WHEN duplicate_database THEN RAISE NOTICE 'paperless exists'; END \\\\$\\\\$;\\\" \"\n        ]\n      }\n      \n      env {\n        POSTGRES_PASSWORD = \"${var.postgres_pass}\"\n      }\n      \n      resources {\n        cpu    = 200\n        memory = 256\n      }\n    }\n  }\n}\n\nvariable \"postgres_pass\" {\n  type = string\n  description = \"Admin password for the PostgreSQL server\"\n}\n"
  },
  {
    "path": "nomad_jobs/storage-backends/qdrant/nomad.job",
    "content": "job \"qdrant\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/storage-backends/qdrant/nomad.job\"\n    version = \"3\"\n  }\n\n  group \"qdrant\" {\n    count = 1\n\n    network {\n      mode = \"host\"\n      port \"http\" {\n        static = 6333\n        to = 6333\n        host_network = \"lan\"\n      }\n      port \"grpc\" {\n        static = 6334\n        to = 6334\n        host_network = \"lan\"\n      }\n    }\n\n    volume \"qdrant-data\" {\n      type = \"csi\"\n      read_only = false\n      source = \"qdrant-data\"\n      access_mode = \"single-node-writer\"\n      attachment_mode = \"file-system\"\n    }\n\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"qdrant\" {\n      driver = \"docker\"\n\n      config {\n        image = \"qdrant/qdrant:v1.17\"\n        ports = [\"http\", \"grpc\"]\n      }\n\n      volume_mount {\n        volume      = \"qdrant-data\"\n        destination = \"/qdrant/storage\"\n        read_only   = false\n      }\n\n      resources {\n        cpu    = 500\n        memory = 128\n      }\n\n      service {\n        name = \"qdrant\"\n        tags = [\"vector-db\", \"ai\", \"http\"]\n        port = \"http\"\n\n        check {\n          type     = \"tcp\"\n          port     = \"http\"\n          interval = \"30s\"\n          timeout  = \"2s\"\n        }\n      }\n    }\n  }\n\n}\nvariable \"region\" {\n  type = string\n  default = \"global\"\n}\n"
  },
  {
    "path": "nomad_jobs/storage-backends/qdrant/volume.hcl",
    "content": "# Qdrant vector database storage volume\nid           = \"qdrant-data\"\nname         = \"qdrant-data\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-csi.iscsi\"\ncapacity_min = \"10GiB\"\ncapacity_max = \"10GiB\"\n\ncapability {\n  access_mode     = \"single-node-writer\"\n  attachment_mode = \"block-device\"\n}\n\nmount_options {\n  fs_type     = \"ext4\"\n  mount_flags = [\"noatime\"]\n}\n"
  },
  {
    "path": "nomad_jobs/storage-backends/redis/nomad.job",
    "content": "job \"redis\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/storage-backends/redis/nomad.job\"\n    version  = \"4\"  // Reduced memory 512MB -> 128MB\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"db\" {\n    count = 1\n\n    network {\n      mode = \"host\"\n      port \"redis\" {\n        static       = 6379\n        host_network = \"lan\"\n      }\n    }\n\n    volume \"redis\" {\n      type            = \"csi\"\n      read_only       = false\n      source          = \"redis-data\"\n      access_mode     = \"single-node-writer\"\n      attachment_mode = \"file-system\"\n    }\n\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"prep-disk\" {\n      driver = \"docker\"\n      volume_mount {\n        volume      = \"redis\"\n        destination = \"/volume/\"\n        read_only   = false\n      }\n      config {\n        image   = \"busybox:latest\"\n        command = \"sh\"\n        args    = [\"-c\", \"chmod 777 /volume/\"]\n      }\n      resources {\n        cpu    = 200\n        memory = 128\n      }\n\n      lifecycle {\n        hook    = \"prestart\"\n        sidecar = false\n      }\n    }\n\n    task \"redis\" {\n      driver = \"docker\"\n\n      config {\n        image = \"redis:8.6.2-alpine\"\n        ports = [\"redis\"]\n      }\n\n      volume_mount {\n        volume      = \"redis\"\n        destination = \"/data\"\n        read_only   = false\n      }\n\n      env {\n        # Save settings - save to disk every 60 seconds if at least 1 change\n        REDIS_SAVE_TO_DISK = \"60 1\"\n        # Set appendonly for durability\n        REDIS_APPENDONLY = \"yes\"\n      }\n\n      service {\n        name = \"redis\"\n        port = \"redis\"\n\n        check {\n          type     = \"tcp\"\n          port     = \"redis\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n        }\n      }\n\n      resources {\n        cpu    = 300\n        memory = 128\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n  type = string\n}\n\nvariable \"shared_dir\" {\n  type = string\n}"
  },
  {
    "path": "nomad_jobs/storage-backends/redis/volume.hcl",
    "content": "id           = \"redis-data\"\nexternal_id  = \"redis-data\"\nname         = \"redis-data\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-csi.iscsi\"\ncapacity_min = \"5GiB\"\ncapacity_max = \"5GiB\"\n\ncapability {\n  access_mode     = \"single-node-writer\"\n  attachment_mode = \"block-device\"\n}\n\nmount_options {\n  fs_type     = \"ext4\"\n  mount_flags = [\"noatime\", \"nodiratime\", \"data=ordered\"]\n}"
  },
  {
    "path": "nomad_jobs/storage-backends/volumes/nfs-example.hcl",
    "content": "type = \"csi\"\nid = \"example\"\nname = \"example\"\nplugin_id = \"nfsofficial\"\nexternal_id = \"example\"\ncapability {\n  access_mode = \"multi-node-multi-writer\"\n  attachment_mode = \"file-system\"\n}\ncontext {\n  server = \"192.168.50.208\"\n  share = \"/mnt/pool0/share/example\"\n  mountPermissions = \"0\"  \n}\nmount_options {\n  fs_type = \"nfs\"\n  mount_flags = [ \"timeo=30\", \"intr\", \"vers=3\", \"_netdev\" , \"nolock\" ]\n}\n"
  },
  {
    "path": "nomad_jobs/system/docker-cleanup/nomad.job",
    "content": "job \"docker-cleanup\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type = \"sysbatch\"\n\n  meta {\n    job_file = \"nomad_jobs/system/docker-cleanup/nomad.job\"\n    version = \"1\"\n  }\n\n  # Run weekly on Sundays at 2 AM\n  periodic {\n    crons            = [\"0 2 * * 0\"]\n    prohibit_overlap = true\n    time_zone        = \"UTC\"\n  }\n\n  group \"cleanup\" {\n    # sysbatch will automatically run on all eligible nodes\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    task \"docker-prune\" {\n      driver = \"raw_exec\"\n      \n      config {\n        command = \"/bin/bash\"\n        args    = [\"-c\", <<EOF\n          echo \"Starting Docker cleanup on node: ${node.unique.name}\"\n          echo \"Current disk usage:\"\n          df -h /\n          \n          echo \"Docker system info before cleanup:\"\n          docker system df\n          \n          echo \"Running docker system prune...\"\n          # Prune unused containers, networks, images (dangling), and build cache\n          docker system prune -f\n          \n          # Prune unused images (including non-dangling)\n          docker image prune -a -f --filter \"until=168h\"  # Remove images older than 1 week\n          \n          echo \"Docker system info after cleanup:\"\n          docker system df\n          \n          echo \"Final disk usage:\"\n          df -h /\n          \n          echo \"Docker cleanup completed on node: ${node.unique.name}\"\n        EOF\n        ]\n      }\n\n      resources {\n        cpu    = 100\n        memory = 128\n      }\n\n      # Only run on nodes that have docker command available\n      constraint {\n        attribute = \"${attr.kernel.name}\"\n        value     = \"linux\"\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n  type = string\n}"
  },
  {
    "path": "nomad_jobs/web-apps/alertmanager-dashboard/nomad.job",
    "content": "job \"alertmanager-dashboard\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/web-apps/alertmanager-dashboard/nomad.job\"\nversion = \"1\"\n  }\n\n  group \"ui\" {\n    count = 1 \n    task \"alertmanager-dashboard\" {\n      driver = \"docker\"\n      config {\n        image = \"lmierzwa/karma:v0.129\"\n        network_mode = \"host\"\n      }\n      env {\n        ALERTMANAGER_URI = \"http://alertmanager.service.consul:9093\"\n        HOST = \"${NOMAD_IP_http}\"\n        PORT = \"${NOMAD_PORT_http}\"\n        ALERTMANAGER_PROXY = \"true\"\n      }\n      service {\n        port = \"http\"\n\t      name = \"alerts\"\n        tags = [\"net-internal\"]\n        check {\n          type     = \"http\"\n          path     = \"/\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 20\n        memory = 24\n        network {\n          port \"http\" {}\n        }\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\n\n"
  },
  {
    "path": "nomad_jobs/web-apps/firecrawl/nomad.job",
    "content": "job \"firecrawl\" {\n  region      = \"global\"\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/web-apps/firecrawl/nomad.job\" \n    version  = \"0.1.2\" \n  }\n\n  group \"firecrawl\" {\n    count = 1\n\n    network {\n      mode = \"host\"\n      port \"http\" {\n        to     = 3002\n      }\n    }\n\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"firecrawl\" {\n      driver = \"docker\"\n\n      config {\n        image = \"mendable/firecrawl:0.1.23\" \n        ports = [\"http\"]\n      }\n\n      resources {\n        cpu    = 500 # MHz\n        memory = 512 # MiB\n      }\n\n      env {\n        PORT = \"3002\"\n      }\n\n      service {\n        name = \"firecrawl\"\n        port = \"http\"\n        tags = [\"traefik.enable=true\"]\n\n        check {\n          type     = \"http\" # Changed from tcp\n          path     = \"/\"    # Added health check path\n          port     = \"http\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "nomad_jobs/web-apps/heimdall/nomad.job",
    "content": "job \"heimdall\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/web-apps/heimdall/nomad.job\"\nversion = \"4\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"downloaders\" {\n    count = 1 \n    network {\n      port \"http\" {\n        host_network = \"tailscale\"\n        to = \"80\"\n      }\n    }\n\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"heimdall\" {\n      driver = \"docker\"\n      config {\n        image = \"linuxserver/heimdall:2021.11.28\"\n        ports = [\"http\"]\n        volumes = [\n          \"${var.shared_dir}heimdall:/config\",\n        ]\n      }\n\n      env {\n        TZ = \"Etc/UTC\"\n        PUID = \"1000\"\n        PGID = \"1000\"\n      }\n\n      service {\n        port = \"http\"\n\tname = \"heimdall\"\n        tags = [\n          \"traefik.enable=true\",\n          \"traefik.http.middlewares.httpsRedirect.redirectscheme.scheme=https\",\n\n\n          \"traefik.http.routers.${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_TASK_NAME}.${var.tld}\",\n          \"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=forward-auth\",\n          \"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=forward-auth\"\n        ]\n        check {\n          type     = \"http\"\n          path     = \"/\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 100\n        memory = 512\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\n\n\nvariable \"tld\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n"
  },
  {
    "path": "nomad_jobs/web-apps/homepage/nomad.job",
    "content": "job \"homepage\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/web-apps/homepage/nomad.job\"\n    version  = \"1\"\n  }\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"homepage\" {\n    count = 1\n\n    network {\n      port \"http\" {\n        host_network = \"tailscale\"\n        to           = 3000\n      }\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"homepage\" {\n      driver = \"docker\"\n      config {\n        image = \"ghcr.io/gethomepage/homepage:v1.12.3\"\n        ports = [\"http\"]\n        volumes = [\n          \"local/config:/app/config\",\n        ]\n      }\n\n      env {\n        TZ                     = \"Etc/UTC\"\n        HOMEPAGE_ALLOWED_HOSTS = \"homepage.${var.tld}\"\n      }\n\n      template {\n        data        = <<EOH\n---\nEOH\n        destination = \"local/config/settings.yaml\"\n      }\n\n      template {\n        data        = <<EOH\n---\n- Media:\n    - Plex:\n        icon: plex.svg\n        href: \"https://plex.${var.tld}\"\n        description: Media Server\n    - Sonarr:\n        icon: sonarr.svg\n        href: \"https://sonarr.${var.tld}\"\n        description: TV Shows\n    - Radarr:\n        icon: radarr.svg\n        href: \"https://radarr.${var.tld}\"\n        description: Movies\n    - Lidarr:\n        icon: lidarr.svg\n        href: \"https://lidarr.${var.tld}\"\n        description: Music\n    - Overseerr:\n        icon: overseerr.svg\n        href: \"https://overseerr.${var.tld}\"\n        description: Media Requests\n    - Tautulli:\n        icon: tautulli.svg\n        href: \"https://tautulli.${var.tld}\"\n        description: Plex Statistics\n\n- Downloads:\n    - SABnzbd:\n        icon: sabnzbd.svg\n        href: \"https://sabnzbd.${var.tld}\"\n        description: Usenet\n    - qBittorrent:\n        icon: qbittorrent.svg\n        href: \"https://qbittorrent.${var.tld}\"\n        description: Torrents\n    - Prowlarr:\n        icon: prowlarr.svg\n        href: \"https://prowlarr.${var.tld}\"\n        description: Indexer Manager\n    - Jackett:\n        icon: jackett.svg\n        href: \"https://jackett.${var.tld}\"\n        description: Indexer Proxy\n    - Tdarr:\n        icon: tdarr.svg\n        href: \"https://tdarr.${var.tld}\"\n        description: Media Transcoding\n    - Maintainerr:\n        icon: maintainerr.svg\n        href: \"https://maintainerr.${var.tld}\"\n        description: Media Maintenance\n\n- Personal Cloud:\n    - Nextcloud:\n        icon: nextcloud.svg\n        href: \"https://nextcloud.${var.tld}\"\n        description: Cloud Storage\n    - Bitwarden:\n        icon: bitwarden.svg\n        href: \"https://bitwarden.${var.tld}\"\n        description: Password Manager\n    - Actual Budget:\n        icon: actual-budget.svg\n        href: \"https://actualbudget.${var.tld}\"\n        description: Budget Manager\n    - Paperless:\n        icon: paperless-ngx.svg\n        href: \"https://paperless.${var.tld}\"\n        description: Document Management\n    - Ntfy:\n        icon: ntfy.svg\n        href: \"https://ntfy.${var.tld}\"\n        description: Push Notifications\n    - Navidrome:\n        icon: navidrome.svg\n        href: \"https://navidrome.${var.tld}\"\n        description: Music Streaming\n\n- Music Discovery:\n    - Multi-Scrobbler:\n        icon: si-lastdotfm\n        href: \"https://multi-scrobbler.${var.tld}\"\n        description: Scrobbling Hub\n    - Lidify:\n        icon: lidarr.svg\n        href: \"https://lidify.${var.tld}\"\n        description: Artist Discovery\n    - MediaSage:\n        icon: si-openai\n        href: \"https://mediasage.${var.tld}\"\n        description: AI Playlist Generator\n\n- AI & ML:\n    - Open WebUI:\n        icon: open-webui.svg\n        href: \"https://open-webui.${var.tld}\"\n        description: Chat Interface\n    - LiteLLM:\n        icon: si-openai\n        href: \"https://litellm.${var.tld}\"\n        description: LLM Proxy\n    - Manyfold:\n        icon: si-threedotjs\n        href: \"https://manyfold.${var.tld}\"\n        description: 3D Model Library\n\n- Observability:\n    - Grafana:\n        icon: grafana.svg\n        href: \"https://grafana.${var.tld}\"\n        description: Dashboards\n    - Prometheus:\n        icon: prometheus.svg\n        href: \"https://prometheus.${var.tld}\"\n        description: Metrics\n    - Alertmanager:\n        icon: alertmanager.svg\n        href: \"https://alertmanager.${var.tld}\"\n        description: Alert Routing\n\n- Infrastructure:\n    - Traefik:\n        icon: traefik.svg\n        href: \"https://traefik.${var.tld}\"\n        description: Reverse Proxy\n    - Nomad:\n        icon: nomad.svg\n        href: \"http://192.168.50.113:4646\"\n        description: Workload Orchestrator\n    - Consul:\n        icon: consul.svg\n        href: \"http://192.168.50.113:8500\"\n        description: Service Discovery\n    - TrueNAS:\n        icon: truenas.svg\n        href: \"https://192.168.50.208\"\n        description: Storage\n\n- Smart Home:\n    - Home Assistant:\n        icon: home-assistant.svg\n        href: \"https://home-assistant.${var.tld}\"\n        description: Home Automation\n    - Zigbee2MQTT:\n        icon: zigbee2mqtt.svg\n        href: \"https://zigbee2mqtt.${var.tld}\"\n        description: Zigbee Bridge\nEOH\n        destination = \"local/config/services.yaml\"\n      }\n\n      template {\n        data        = <<EOH\n---\n- Developer:\n    - GitHub:\n        icon: github.svg\n        href: \"https://github.com\"\n    - ChatGPT:\n        icon: openai.svg\n        href: \"https://chat.openai.com\"\n    - Claude:\n        icon: si-anthropic\n        href: \"https://claude.ai\"\nEOH\n        destination = \"local/config/bookmarks.yaml\"\n      }\n\n      template {\n        data        = <<EOH\n---\nEOH\n        destination = \"local/config/widgets.yaml\"\n      }\n\n      template {\n        data        = <<EOH\n---\nEOH\n        destination = \"local/config/docker.yaml\"\n      }\n\n      template {\n        data        = <<EOH\n---\nEOH\n        destination = \"local/config/kubernetes.yaml\"\n      }\n\n      service {\n        port = \"http\"\n        name = \"homepage\"\n        tags = [\n          \"traefik.enable=true\",\n        ]\n        check {\n          type     = \"http\"\n          path     = \"/\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 200\n        memory = 256\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n  type = string\n}\n\nvariable \"shared_dir\" {\n  type = string\n}\n\nvariable \"tld\" {\n  type = string\n}\n"
  },
  {
    "path": "nomad_jobs/web-apps/kideo/nomad.job",
    "content": "job \"kideo\" {\n  region      = var.region\n  datacenters = [\"cheese\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/web-apps/kideo/nomad.job\"\n    version  = \"1\"\n  }\n\n  group \"kideo\" {\n    count = 1\n\n    constraint {\n      attribute = \"${attr.unique.hostname}\"\n      value     = \"cheese01\"\n    }\n\n    network {\n      port \"http\" {\n        host_network = \"lan\"\n        to           = \"8000\"\n      }\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel      = 1\n      min_healthy_time  = \"30s\"\n      healthy_deadline  = \"5m\"\n      progress_deadline = \"10m\"\n      auto_revert       = true\n    }\n\n    task \"kideo\" {\n      driver = \"docker\"\n\n      config {\n        image      = \"ghcr.io/perrymanuk/kideo:v0.37\"\n        ports      = [\"http\"]\n        runtime    = \"nvidia\"\n        privileged = true\n\n        volumes = [\n          \"${var.shared_dir}kideo:/data\",\n        ]\n      }\n\n      env {\n        TZ                      = \"Etc/UTC\"\n        NVIDIA_VISIBLE_DEVICES  = \"all\"\n        KIDEO_POSTGRES_HOST     = \"postgres.service.consul\"\n        KIDEO_POSTGRES_PORT     = \"5432\"\n        KIDEO_POSTGRES_USER     = \"postgres\"\n        KIDEO_POSTGRES_PASSWORD = var.postgres_pass\n        KIDEO_POSTGRES_DB       = \"kideo\"\n        KIDEO_JWT_SECRET        = var.kideo_jwt_secret\n        KIDEO_VIDEO_DIR         = \"/data/videos\"\n        KIDEO_THUMBNAIL_DIR     = \"/data/thumbnails\"\n        KIDEO_DEFAULT_QUALITY   = \"720\"\n        KIDEO_COOKIES_FILE            = \"/data/cookies.txt\"\n        KIDEO_CURIOSITYSTREAM_USER    = var.kideo_curiositystream_user\n        KIDEO_CURIOSITYSTREAM_PASS    = var.kideo_curiositystream_pass\n      }\n\n      service {\n        port = \"http\"\n        name = \"kideo\"\n        tags = [\n          \"traefik.enable=true\",\n        ]\n        check {\n          type     = \"http\"\n          path     = \"/api/health\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 500\n        memory = 1024\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n  type = string\n}\n\nvariable \"shared_dir\" {\n  type = string\n}\n\nvariable \"postgres_pass\" {\n  type        = string\n  description = \"Shared PostgreSQL password\"\n}\n\nvariable \"kideo_jwt_secret\" {\n  type        = string\n  description = \"JWT signing secret for admin auth\"\n}\n\nvariable \"kideo_curiositystream_user\" {\n  type        = string\n  description = \"CuriosityStream email\"\n  default     = \"\"\n}\n\nvariable \"kideo_curiositystream_pass\" {\n  type        = string\n  description = \"CuriosityStream password\"\n  default     = \"\"\n}\n"
  },
  {
    "path": "nomad_jobs/web-apps/minecraftmath/nomad.job",
    "content": "job \"minecraftmath\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/web-apps/minecraftmath/nomad.job\"\n    version  = \"1\"\n  }\n\n  group \"minecraftmath\" {\n    count = 1\n\n    network {\n      port \"http\" {\n        host_network = \"lan\"\n        to           = \"8000\"\n      }\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel      = 1\n      min_healthy_time  = \"30s\"\n      healthy_deadline  = \"5m\"\n      progress_deadline = \"10m\"\n      auto_revert       = true\n    }\n\n    task \"minecraftmath\" {\n      driver = \"docker\"\n\n      config {\n        image = \"ghcr.io/perrymanuk/minecraftmath:v0.7\"\n        ports = [\"http\"]\n      }\n\n      env {\n        TZ                       = \"Etc/UTC\"\n        MCMATH_POSTGRES_HOST     = \"postgres.service.consul\"\n        MCMATH_POSTGRES_PORT     = \"5432\"\n        MCMATH_POSTGRES_USER     = \"postgres\"\n        MCMATH_POSTGRES_PASSWORD = var.postgres_pass\n        MCMATH_POSTGRES_DB       = \"minecraftmath\"\n        MCMATH_JWT_SECRET        = var.minecraftmath_jwt_secret\n      }\n\n      service {\n        port = \"http\"\n        name = \"minecraftmath\"\n        tags = [\n          \"traefik.enable=true\",\n        ]\n        check {\n          type     = \"http\"\n          path     = \"/api/health\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n\n      resources {\n        cpu    = 300\n        memory = 256\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n  type = string\n}\n\nvariable \"postgres_pass\" {\n  type        = string\n  description = \"Shared PostgreSQL password\"\n}\n\nvariable \"minecraftmath_jwt_secret\" {\n  type        = string\n  description = \"JWT signing secret for parent auth\"\n}\n"
  },
  {
    "path": "nomad_jobs/web-apps/wordpress/nomad.job",
    "content": "job \"wordpress\" {\n  \n  meta {\n  job_file = \"nomad_jobs/web-apps/wordpress/nomad.job\"\n  }\nregion = var.region\n  datacenters = [\"dc1\"]\n\n  constraint {\n    attribute = \"${meta.shared_mount}\"\n    operator  = \"=\"\n    value     = \"true\"\n  }\n\n  group \"database\" {\n    network {\n      port \"db\" {\n        to = 3306\n      }\n    }\n\n    service {\n      name = \"wordpress-db\"\n      port = \"db\"\n\n      check {\n        type     = \"tcp\"\n        port     = \"db\"\n        interval = \"10s\"\n        timeout  = \"2s\"\n      }\n    }\n\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"mysql\" {\n      driver = \"docker\"\n\n      env {\n        MYSQL_ROOT_PASSWORD=\"somewordpress\"\n        MYSQL_DATABASE=\"wordpress\"\n        MYSQL_USER=\"wordpress\"\n        MYSQL_PASSWORD=\"wordpress\"\n      }\n\n      volume_mount {\n        volume      = \"wordpress-db\"\n        destination = \"/var/lib/mysql\"\n      }\n\n      config {\n        image = \"mysql:9.6\"\n        volumes = [\n          \"${var.shared_dir}wordpress-db:/var/lib/mysql\",\n        ]\n      }\n\n      resources {\n        cpu    = 500\n        memory = 256\n      }\n    }\n  }\n\n  group \"wordpress\" {\n    network {\n      port \"http\" {\n        to = 80\n      }\n    }\n\n    restart {\n      attempts = 3\n      delay    = \"15s\"\n      interval = \"10m\"\n      mode     = \"delay\"\n    }\n\n    service {\n      name = \"wordpress\"\n      tags = [\"www\"]\n      port = \"http\"\n\n      check {\n        type     = \"tcp\"\n        port     = \"http\"\n        interval = \"10s\"\n        timeout  = \"2s\"\n      }\n    }\n\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"await-wordpress\" {\n      driver = \"docker\"\n\n      config {\n        image        = \"alpine:3.23.4\"\n        command      = \"sh\"\n        args         = [\"-c\", \"echo -n 'Waiting for service'; until nslookup -port=8600 wordpress-db.service.consul ${NOMAD_IP_http} 2>&1 >/dev/null; do echo '.'; sleep 2; done\"]\n        network_mode = \"host\"\n      }\n\n      resources {\n        cpu    = 200\n        memory = 128\n      }\n\n      lifecycle {\n        hook    = \"prestart\"\n        sidecar = false\n      }\n    }\n\n\n    update {\n      max_parallel     = 1\n      min_healthy_time = \"30s\"\n      auto_revert      = true\n    }\n\n    task \"wordpress\" {\n      driver = \"docker\"\n\n      template {\n        data = <<EOH\n{{- if service \"wordpress-db\" -}}\n{{- with index (service \"wordpress-db\") 0 -}}\nWORDPRESS_DB_HOST={{ .Address }}:{{ .Port }}\n{{- end -}}\n{{- end }}\nWORDPRESS_DB_USER=wordpress\nWORDPRESS_DB_PASSWORD=wordpress\nWORDPRESS_DB_NAME=wordpress\n  EOH\n\n        destination = \"local/envvars.txt\"\n        env = true\n      }\n\n      config {\n        image = \"wordpress:6.9.4\"\n        ports = [\"http\"]\n      }\n\n      resources {\n        cpu    = 500\n        memory = 256\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "nomad_jobs/web-apps/www/Dockerfile",
    "content": "FROM nginx\nADD main.jpg /usr/local/www/nginx/images/main.jpg\nCMD [\"nginx\", \"-g\", \"daemon off;\"]\n"
  },
  {
    "path": "nomad_jobs/web-apps/www/nomad.job",
    "content": "job \"www\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/web-apps/www/nomad.job\"\nversion = \"2\"\n  }\n\n  group \"nginx\" {\n    count = 1 \n    network {\n      port \"http\" {\n        host_network = \"tailscale\"\n      }\n    }\n    task \"www\" {\n      driver = \"docker\"\n      config {\n        image = \"docker-registry.${var.tld}/www:2\"\n        network_mode = \"host\"\n        ports = [\"http\"]\n        command = \"nginx\"\n        args = [\"-c\", \"/usr/local/etc/nginx/nginx.conf\", \"-g\", \"daemon off;\"]\n        volumes = [\n          \"local/index.html:/usr/local/www/nginx/index.html\",\n          \"local/nginx.conf:/usr/local/etc/nginx/nginx.conf\",\n        ]\n      }\n\n      service {\n        port = \"http\"\n\tname = \"www\"\n        tags = [\n          \"traefik.enable=true\",\n          \"traefik.http.middlewares.httpsRedirect.redirectscheme.scheme=https\",\n          \"traefik.http.routers.${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_TASK_NAME}.${var.tld}\",\n          \"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=forward-auth\"\n        ]\n        check {\n          type     = \"http\"\n          path     = \"/\"\n          interval = \"10s\"\n          timeout  = \"2s\"\n          check_restart {\n            limit           = 3\n            grace           = \"60s\"\n            ignore_warnings = false\n          }\n        }\n      }\n      template {\ndata = <<EOH\nworker_processes  1;\n\n#error_log  /var/log/nginx/error.log;\n\nevents {\n    worker_connections  1024;\n}\n\n\nhttp {\n    access_log /dev/stdout;\n    error_log /dev/stderr;\n    #include       mime.types;\n    default_type  application/octet-stream;\n\n    sendfile        on;\n    keepalive_timeout  65;\n\n    server {\n        listen       {{ env \"NOMAD_PORT_http\" }};\n        server_name  localhost;\n\n        location / {\n            root   /usr/local/www/nginx;\n            index  index.html index.htm;\n        }\n\n        error_page   500 502 503 504  /50x.html;\n        location = /50x.html {\n            root   /usr/local/www/nginx-dist;\n        }\n    }\n}\nEOH\n        destination = \"local/nginx.conf\"\n        env         = false\n        perms       = 755\n        change_mode = \"signal\"\n        change_signal = \"SIGHUP\"\n        left_delimiter  = \"{{\"\n        right_delimiter = \"}}\"\n\n      }\n      template {\ndata = <<EOH\n<html>\n<body bgcolor=\"#303030\">\n    <p align=center><img width=\"50%\" src=images/main.jpg></p>\n</body>\n</html>\nEOH\n        destination = \"local/index.html\"\n        env         = false\n        perms       = 755\n        change_mode = \"signal\"\n        change_signal = \"SIGHUP\"\n        left_delimiter  = \"{{\"\n        right_delimiter = \"}}\"\n\n      }\n\n\n      resources {\n        cpu    = 100\n        memory = 128\n      }\n    }\n  }\n}\n\nvariable \"region\" {\n    type = string\n}\n\n\n\nvariable \"tld\" {\n    type = string\n}\n\nvariable \"shared_dir\" {\n    type = string\n}\n"
  },
  {
    "path": "renovate.json",
    "content": "{\n  \"$schema\": \"https://docs.renovatebot.com/renovate-schema.json\",\n  \"extends\": [\n    \"config:recommended\"\n  ],\n  \"dependencyDashboard\": true,\n  \"dependencyDashboardTitle\": \"🔄 Dependency Dashboard - Homelab Updates\",\n  \"assignees\": [\n    \"perrymanuk\"\n  ],\n  \"reviewers\": [\n    \"perrymanuk\"\n  ],\n  \"packageRules\": [\n    {\n      \"matchPackageNames\": [\n        \"prom/prometheus\"\n      ],\n      \"allowedVersions\": \"/^v[0-9]+\\\\.[0-9]+\\\\.[2-9]+$/\",\n      \"automerge\": true,\n      \"ignoreTests\": true\n    },\n    {\n      \"matchUpdateTypes\": [\n        \"minor\",\n        \"patch\",\n        \"pin\",\n        \"digest\"\n      ],\n      \"automerge\": true,\n      \"ignoreTests\": true\n    },\n    {\n      \"matchPackageNames\": [\n        \"postgres\",\n        \"mariadb\",\n        \"mysql\"\n      ],\n      \"matchUpdateTypes\": [\n        \"major\"\n      ],\n      \"enabled\": false,\n      \"description\": \"Block major database version updates - require manual approval\",\n      \"dependencyDashboardApproval\": true,\n      \"prCreation\": \"approval\"\n    },\n    {\n      \"versioning\": \"regex:^(?<compatibility>.*?)-(?<major>\\\\d+)\\\\.(?<minor>\\\\d+)\\\\.(?<patch>\\\\d+)$\",\n      \"matchPackageNames\": [\n        \"/^lscr.io\\\\/linuxserver\\\\//\"\n      ]\n    },\n    {\n      \"matchPackageNames\": [\n        \"ghcr.io/perrymanuk/radbot\"\n      ],\n      \"enabled\": false,\n      \"description\": \"Managed by radbot CI dispatch workflow\"\n    }\n  ],\n  \"customManagers\": [\n    {\n      \"customType\": \"regex\",\n      \"managerFilePatterns\": [\n        \"/(^|/)*\\\\.job$/\"\n      ],\n      \"matchStrings\": [\n        \"\\\\s*image\\\\s*=\\\\s*\\\"(?<depName>.*?)[@:](?<currentValue>.*?)\\\"\\\\n\"\n      ],\n      \"datasourceTemplate\": \"docker\"\n    }\n  ]\n}\n"
  },
  {
    "path": "services/beefcake.json",
    "content": "{\n  \"Service\": {\n    \"Name\": \"beefcake\",\n    \"ID\": \"beefcake-instance-1\",\n    \"Address\": \"192.168.50.208\",\n    \"Port\": 80,\n    \"Check\": {\n      \"HTTP\": \"http://192.168.50.208:80\",\n      \"Interval\": \"10s\",\n      \"Timeout\": \"5s\"\n    },\n    \"Tags\": [\"traefik.enable=true\"]\n  }\n}\n"
  }
]