Full Code of perrymanuk/hashi-homelab for AI

master c1705ab65354 cached
180 files
450.4 KB
128.3k tokens
13 symbols
1 requests
Download .txt
Showing preview only (499K chars total). Download the full file or copy to clipboard to get everything.
Repository: perrymanuk/hashi-homelab
Branch: master
Commit: c1705ab65354
Files: 180
Total size: 450.4 KB

Directory structure:
gitextract_ikark14b/

├── .bootstrap.mk
├── .gitattributes
├── .github/
│   └── workflows/
│       ├── build-gcp-dns-updater.yaml
│       ├── nomad.yaml
│       ├── update-kideo.yaml
│       ├── update-minecraftmath.yaml
│       ├── update-radbot-dev.yaml
│       └── update-radbot.yaml
├── .gitignore
├── LICENSE
├── Makefile
├── README.md
├── ansible/
│   ├── configs/
│   │   ├── consul.hcl.j2
│   │   ├── consul.service
│   │   ├── docker-daemon.json.j2
│   │   ├── nomad.hcl.j2
│   │   └── nomad.service
│   ├── playbook.yml
│   └── zsh.yml
├── docker_images/
│   ├── gcp-dns-updater/
│   │   ├── Dockerfile
│   │   ├── README.md
│   │   ├── requirements.txt
│   │   └── update_dns.py
│   └── update-metadata/
│       ├── Dockerfile
│       ├── README.md
│       ├── requirements.txt
│       └── update_job_metadata.py
├── envrc
├── nomad_jobs/
│   ├── TEMPLATE-volume.hcl
│   ├── TEMPLATE.job
│   ├── ai-ml/
│   │   ├── cognee/
│   │   │   └── nomad.job
│   │   ├── crawl4ai/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── litellm/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── manyfold/
│   │   │   ├── 3dprints-volume.hcl
│   │   │   ├── nomad.job
│   │   │   ├── prints_volume.hcl
│   │   │   └── volume.hcl
│   │   ├── ollama/
│   │   │   └── nomad.job
│   │   ├── open-webui/
│   │   │   └── nomad.job
│   │   ├── paperless-ai/
│   │   │   └── nomad.job
│   │   ├── pgvector-client/
│   │   │   └── nomad.job
│   │   └── radbot/
│   │       ├── nomad-dev.job
│   │       └── nomad.job
│   ├── core-infra/
│   │   ├── coredns/
│   │   │   ├── README.md
│   │   │   └── nomad.job
│   │   ├── github-runner/
│   │   │   └── nomad.job
│   │   ├── haproxy/
│   │   │   └── nomad.job
│   │   ├── iscsi-csi-plugin/
│   │   │   ├── controller.job
│   │   │   └── node.job
│   │   ├── keepalived/
│   │   │   ├── TODO.md
│   │   │   └── nomad.job
│   │   ├── nfs-csi-plugin/
│   │   │   ├── controller.job
│   │   │   └── nodes.job
│   │   ├── pihole/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── smtp/
│   │   │   └── nomad.job
│   │   ├── tailscale/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── tailscale-este/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── traefik/
│   │   │   ├── config/
│   │   │   │   ├── consul-catalog.yml
│   │   │   │   ├── consul.yml
│   │   │   │   ├── traefik.toml
│   │   │   │   ├── traefik.toml.new
│   │   │   │   └── traefik.toml.test
│   │   │   └── nomad.job
│   │   ├── traefik-forward-auth/
│   │   │   └── nomad.job
│   │   └── vault/
│   │       └── secrets_template.yaml
│   ├── gaming/
│   │   ├── minecraft-1.21/
│   │   │   └── nomad.job
│   │   ├── minecraft-avaritia/
│   │   │   └── nomad.job
│   │   ├── minecraft-axiom/
│   │   │   └── nomad.job
│   │   ├── minecraft-fiskheroes/
│   │   │   └── nomad.job
│   │   └── minecraft-forge/
│   │       └── nomad.job
│   ├── media-stack/
│   │   ├── audioserve/
│   │   │   └── nomad.job
│   │   ├── flaresolverr/
│   │   │   └── nomad.job
│   │   ├── jackett/
│   │   │   └── nomad.job
│   │   ├── lazylibrarian/
│   │   │   └── nomad.job
│   │   ├── lidarr/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── lidify/
│   │   │   └── nomad.job
│   │   ├── maintainerr/
│   │   │   └── nomad.job
│   │   ├── mediasage/
│   │   │   └── nomad.job
│   │   ├── multi-scrobbler/
│   │   │   └── nomad.job
│   │   ├── navidrome/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── ombi/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── overseerr/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── plex/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── prowlarr/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── qbittorrent/
│   │   │   └── nomad.job
│   │   ├── radarr/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── requestrr/
│   │   │   └── nomad.job
│   │   ├── sabnzbd/
│   │   │   └── nomad.job
│   │   ├── sickchill/
│   │   │   └── nomad.job
│   │   ├── sonarr/
│   │   │   └── nomad.job
│   │   ├── synclounge/
│   │   │   └── nomad.job
│   │   ├── tautulli/
│   │   │   └── nomad.job
│   │   └── tdarr/
│   │       ├── nomad.job
│   │       └── volume.hcl
│   ├── misc/
│   │   ├── adb/
│   │   │   └── nomad.job
│   │   ├── gcp-dns-updater/
│   │   │   ├── Dockerfile
│   │   │   ├── README.md
│   │   │   ├── nomad.job
│   │   │   ├── requirements.txt
│   │   │   └── update_dns.py
│   │   ├── gitea/
│   │   │   └── nomad.job
│   │   ├── linuxgsm/
│   │   │   └── nomad.job
│   │   ├── murmur/
│   │   │   └── nomad.job
│   │   ├── octoprint/
│   │   │   └── nomad.job
│   │   └── uploader/
│   │       └── nomad.job
│   ├── observability/
│   │   ├── alertmanager/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── blackbox-exporter/
│   │   │   └── nomad.job
│   │   ├── grafana/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── loki/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── oom-test/
│   │   │   └── nomad.job
│   │   ├── prometheus/
│   │   │   ├── README.md
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── telegraf/
│   │   │   └── nomad.job
│   │   ├── truenas-graphite-exporter/
│   │   │   └── nomad.job
│   │   └── vector/
│   │       └── nomad.job
│   ├── personal-cloud/
│   │   ├── actualbudget/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── bitwarden/
│   │   │   └── nomad.job
│   │   ├── nextcloud/
│   │   │   └── nomad.job
│   │   ├── ntfy/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── paperless/
│   │   │   └── nomad.job
│   │   └── radicale/
│   │       └── nomad.job
│   ├── security/
│   │   ├── suricata/
│   │   │   └── nomad.job
│   │   ├── suricata-update/
│   │   │   └── nomad.job
│   │   ├── wazuh-agent/
│   │   │   └── nomad.job
│   │   └── wazuh-server/
│   │       ├── nomad.job
│   │       ├── volume-dashboard.hcl
│   │       ├── volume-indexer.hcl
│   │       └── volume-manager.hcl
│   ├── smart-home/
│   │   ├── deconz/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── home-assistant/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── mqtt/
│   │   │   └── nomad.job
│   │   ├── owntracks-recorder/
│   │   │   └── nomad.job
│   │   └── zigbee2mqtt/
│   │       └── nomad.job
│   ├── storage-backends/
│   │   ├── docker-registry/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── mariadb/
│   │   │   └── nomad.job
│   │   ├── neo4j/
│   │   │   ├── nomad.job
│   │   │   ├── setup.job
│   │   │   └── volume.hcl
│   │   ├── pgvector/
│   │   │   ├── nomad.job
│   │   │   └── pgvector-setup.job
│   │   ├── postgres/
│   │   │   ├── nomad.job
│   │   │   └── postgres-setup.job
│   │   ├── qdrant/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── redis/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   └── volumes/
│   │       └── nfs-example.hcl
│   ├── system/
│   │   └── docker-cleanup/
│   │       └── nomad.job
│   └── web-apps/
│       ├── alertmanager-dashboard/
│       │   └── nomad.job
│       ├── firecrawl/
│       │   └── nomad.job
│       ├── heimdall/
│       │   └── nomad.job
│       ├── homepage/
│       │   └── nomad.job
│       ├── kideo/
│       │   └── nomad.job
│       ├── minecraftmath/
│       │   └── nomad.job
│       ├── wordpress/
│       │   └── nomad.job
│       └── www/
│           ├── Dockerfile
│           └── nomad.job
├── renovate.json
└── services/
    └── beefcake.json

================================================
FILE CONTENTS
================================================

================================================
FILE: .bootstrap.mk
================================================
export VERSION_TAG=$(shell git rev-parse --short HEAD)
export JOB_NAME=$(shell basename $PWD)

dash-split = $(word $2,$(subst -, ,$1))
dash-1 = $(call dash-split,$*,1)
dash-2 = $(call dash-split,$*,2)

help:##............Show this help.
	@echo ""
	@fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/##//' | sed 's/^/    /'
	@echo ""
	@echo ""


================================================
FILE: .gitattributes
================================================
*.job linguist-language=HCL


================================================
FILE: .github/workflows/build-gcp-dns-updater.yaml
================================================
# .github/workflows/build-gcp-dns-updater.yaml
name: Build GCP DNS Updater Image

on:
  push:
    branches:
      - main
    paths:
      - 'docker_images/gcp-dns-updater/**'
  workflow_dispatch:

jobs:
  build-and-push:
    runs-on: ubuntu-latest
    permissions:
      contents: read
      packages: write # Required for pushing to GitHub Packages if used, good practice anyway

    steps:
      - name: Checkout Code
        uses: actions/checkout@v6

      - name: Set up Docker Buildx
        uses: docker/setup-buildx-action@v4

      - name: Login to Docker Registry
        uses: docker/login-action@v4
        with:
          registry: docker.${{ secrets.NOMAD_VAR_tld }}
          username: ${{ secrets.DOCKER_REGISTRY_USER }}
          password: ${{ secrets.DOCKER_REGISTRY_PASSWORD }}

      - name: Build Image using Makefile
        env:
          NOMAD_VAR_tld: ${{ secrets.NOMAD_VAR_tld }}
        run: make build-gcp-dns-updater

      - name: Push Image
        run: docker push docker.${{ secrets.NOMAD_VAR_tld }}/gcp-dns-updater:latest


================================================
FILE: .github/workflows/nomad.yaml
================================================
on:
  push:
    branches:
      - master

jobs:
  # JOB to run change detection
  changes:
    runs-on: ubuntu-latest
    permissions:
      pull-requests: read
    outputs:
      jobs: ${{ steps.filter.outputs.nomadjobs_files }}
      volumes: ${{ steps.filter_volumes.outputs.volumes_files }}
    steps:
    - name: 'Checkout'
      uses: 'actions/checkout@v6'

    - uses: dorny/paths-filter@v4
      id: filter_volumes
      with:
        list-files: 'json'
        filters: |
          volumes:
            - 'nomad_jobs/**/volume.hcl'
            - 'nomad_jobs/**/*-volume.hcl'

    - uses: dorny/paths-filter@v4
      id: filter
      with:
        list-files: 'json'
        filters: |
          nomadjobs:
            # Updated paths based on directory restructure
            - 'nomad_jobs/media-stack/plex/*.job'
            - 'nomad_jobs/media-stack/radarr/*.job'
            - 'nomad_jobs/media-stack/lidarr/*.job'
            - 'nomad_jobs/media-stack/overseerr/*.job'
            - 'nomad_jobs/storage-backends/postgres/*.job'
            - 'nomad_jobs/storage-backends/redis/*.job'
            - 'nomad_jobs/storage-backends/pgvector/*.job'
            - 'nomad_jobs/core-infra/coredns/*.job'
            - 'nomad_jobs/storage-backends/iscsi-csi-plugin/*.job'
            - 'nomad_jobs/media-stack/sabnzbd/*.job'
            - 'nomad_jobs/media-stack/qbittorrent/*.job'
            - 'nomad_jobs/media-stack/prowlarr/*.job'
            - 'nomad_jobs/media-stack/tdarr/*.job'
            - 'nomad_jobs/core-infra/smtp/*.job'
            - 'nomad_jobs/ai-ml/ollama/*.job'
            - 'nomad_jobs/ai-ml/open-webui/*.job'
            - 'nomad_jobs/misc/gcp-dns-updater/*.job'
            - 'nomad_jobs/core-infra/tailscale-este/*.job'
            - 'nomad_jobs/core-infra/traefik/*.job'
            - 'nomad_jobs/core-infra/iscsi-csi-plugin/*.job'
            - 'nomad_jobs/observability/alertmanager/*.job'
            - 'nomad_jobs/observability/prometheus/*.job'
            - 'nomad_jobs/ai-ml/radbot/*.job'
            - 'nomad_jobs/personal-cloud/ntfy/*.job'
            - 'nomad_jobs/web-apps/homepage/*.job'
            - 'nomad_jobs/media-stack/multi-scrobbler/*.job'
            - 'nomad_jobs/media-stack/lidify/*.job'
            - 'nomad_jobs/media-stack/mediasage/*.job'
            - 'nomad_jobs/core-infra/netboot-xyz/*.job'
            - 'nomad_jobs/web-apps/kideo/*.job'
            - 'nomad_jobs/web-apps/minecraftmath/*.job'

  add_volumes:
    runs-on: ubuntu-latest
    needs: changes
    if: needs.changes.outputs.volumes != '[]'
    continue-on-error: true
    strategy:
      matrix:
        job: ${{ fromJSON(needs.changes.outputs.volumes ) }}

    steps:
    - name: 'Checkout'
      uses: 'actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd' # v6

    - name: Connect to Tailscale
      uses: tailscale/github-action@v4
      with:
        oauth-client-id: ${{ secrets.TAILSCALE_OAUTH_CLIENT_ID }}
        oauth-secret: ${{ secrets.TAILSCALE_OAUTH_SECRET }}
        tags: tag:github-actions
        args: --accept-dns=true

    - name: Setup Nomad
      uses: hashicorp/setup-nomad@v1.0.0
      with:
        version: "1.10.4"

    - name: deploy
      shell: bash
      run: |
        # Extract volume ID from the HCL file
        VOLUME_ID=$(grep '^id' ${{ matrix.job }} | head -1 | sed 's/.*= *"\(.*\)"/\1/')
        # Skip if volume already exists
        if nomad volume status "$VOLUME_ID" > /dev/null 2>&1; then
          echo "Volume '$VOLUME_ID' already exists, skipping creation"
        else
          echo "Creating volume '$VOLUME_ID'"
          nomad volume create ${{ matrix.job }}
        fi
      env:
        NOMAD_ADDR: '${{ secrets.NOMAD_ADDR }}'

  deploy_jobs:
    runs-on: ubuntu-latest
    needs: changes
    if: needs.changes.outputs.jobs != '[]'
    continue-on-error: true
    strategy:
      matrix:
        job: ${{ fromJSON(needs.changes.outputs.jobs ) }}

    steps:
    - name: 'Checkout'
      uses: 'actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd' # v6

    - name: Connect to Tailscale
      uses: tailscale/github-action@v4
      with:
        oauth-client-id: ${{ secrets.TAILSCALE_OAUTH_CLIENT_ID }}
        oauth-secret: ${{ secrets.TAILSCALE_OAUTH_SECRET }}
        tags: tag:github-actions
        args: --accept-dns=true

    - name: Setup Nomad
      uses: hashicorp/setup-nomad@v1.0.0
      with:
        version: "1.10.4"

    - name: deploy
      shell: bash
      run: |
        nomad job run ${{ matrix.job }} # Removed -var flags
      env:
        NOMAD_ADDR: '${{ secrets.NOMAD_ADDR }}'
        NOMAD_VAR_region: 'home'
        NOMAD_VAR_tld: '${{ secrets.NOMAD_VAR_tld }}' # Corrected case
        NOMAD_VAR_shared_dir: '/home/shared/'
        NOMAD_VAR_downloads_dir: '/home/sabnzbd/downloads'
        NOMAD_VAR_music_dir: '/home/media/Music'
        NOMAD_VAR_movies_dir: '/home/media/Movies'
        NOMAD_VAR_books_dir: '/home/media/Books'
        NOMAD_VAR_tv_dir: '/home/media/TV'
        NOMAD_VAR_media_dir: '/home/media'
        NOMAD_VAR_hass_key: '${{ secrets.NOMAD_VAR_hass_key }}' # Corrected case
        NOMAD_VAR_hass_ip: '${{ secrets.NOMAD_VAR_hass_ip }}'
        NOMAD_VAR_github_pat: ${{ secrets.NOMAD_VAR_github_pat }} # Corrected case
        NOMAD_VAR_datacenters_all: '["dc1", "public"]'
        NOMAD_VAR_datacenters_dc1: '["dc1"]'
        NOMAD_VAR_datacenters_public: '["public"]'
        NOMAD_VAR_tailscale_auth: '${{ secrets.NOMAD_VAR_tailscale_auth }}' # Corrected case
        NOMAD_VAR_tailscale_auth_este: '${{ secrets.NOMAD_VAR_tailscale_auth_este }}' # Corrected case
        NOMAD_VAR_oauth_client_id: '${{ secrets.NOMAD_VAR_oauth_client_id }}' # Corrected case
        NOMAD_VAR_oauth_client_secret: '${{ secrets.NOMAD_VAR_oauth_client_secret }}' # Corrected case
        NOMAD_VAR_oauth_secret: '${{ secrets.NOMAD_VAR_oauth_secret }}' # Corrected case
        NOMAD_VAR_oauth_emails: '${{ secrets.NOMAD_VAR_oauth_emails }}' # Corrected case
        NOMAD_VAR_ssh_id: '${{ secrets.NOMAD_VAR_ssh_id }}' # Corrected case
        NOMAD_VAR_truenas_api_key: '${{ secrets.NOMAD_VAR_truenas_api_key }}' # Corrected case
        NOMAD_VAR_gh_access_token: '${{ secrets.NOMAD_VAR_gh_access_token }}' # Corrected case
        NOMAD_VAR_ollama_data_dir: '/home/shared/ollama'
        NOMAD_VAR_ollama_base_url: 'http://ollama.service.consul:11434'
        NOMAD_VAR_webui_secret_key: '${{ secrets.NOMAD_VAR_webui_secret_key }}' # Corrected case
        NOMAD_VAR_datacenter: 'dc1'
        NOMAD_VAR_dns_server_ip: '192.168.50.2'
        # Added missing variables
        NOMAD_VAR_aws_access_key: ${{ secrets.NOMAD_VAR_aws_access_key }}
        NOMAD_VAR_aws_secret_key: ${{ secrets.NOMAD_VAR_aws_secret_key }}
        NOMAD_VAR_bedrock_aws_region: ${{ secrets.NOMAD_VAR_bedrock_aws_region }}
        NOMAD_VAR_gcp_dns_admin: ${{ secrets.NOMAD_VAR_gcp_dns_admin }}
        NOMAD_VAR_gemini_api_key: ${{ secrets.NOMAD_VAR_gemini_api_key }}
        NOMAD_VAR_litellm_master_key: ${{ secrets.NOMAD_VAR_litellm_master_key }}
        NOMAD_VAR_manyfold_secret_key: ${{ secrets.NOMAD_VAR_manyfold_secret_key }}
        NOMAD_VAR_postgres_pass: ${{ secrets.NOMAD_VAR_postgres_pass }}
        NOMAD_VAR_truenas_iscsi_pass: ${{ secrets.NOMAD_VAR_truenas_iscsi_pass }}
        # Added gcp_project_id
        NOMAD_VAR_gcp_project_id: ${{ secrets.NOMAD_VAR_gcp_project_id }}
        # GitHub PAT is now stored securely in secrets
        NOMAD_VAR_truenass_iscsi_pass: ${{ secrets.NOMAD_VAR_truenass_iscsi_pass }} # Note potential typo in name
        NOMAD_VAR_dns_zone: ${{ secrets.NOMAD_VAR_dns_zone }}
        NOMAD_VAR_ingress_ip: ${{ secrets.NOMAD_VAR_ingress_ip }}
        NOMAD_VAR_radbot_credential_key: ${{ secrets.NOMAD_VAR_radbot_credential_key }}
        NOMAD_VAR_radbot_admin_token: ${{ secrets.NOMAD_VAR_radbot_admin_token }}
        NOMAD_VAR_radbot_mcp_token: ${{ secrets.NOMAD_VAR_radbot_mcp_token }}
        NOMAD_VAR_mullvad_wireguard_key: ${{ secrets.NOMAD_VAR_mullvad_wireguard_key }}
        NOMAD_VAR_mullvad_wireguard_addr: ${{ secrets.NOMAD_VAR_mullvad_wireguard_addr }}
        NOMAD_VAR_sonarr_api_key: ${{ secrets.NOMAD_VAR_sonarr_api_key }}
        NOMAD_VAR_radarr_api_key: ${{ secrets.NOMAD_VAR_radarr_api_key }}
        NOMAD_VAR_curseforge_api_key: ${{ secrets.NOMAD_VAR_curseforge_api_key }}
        NOMAD_VAR_pgvector_pass: ${{ secrets.NOMAD_VAR_pgvector_pass }}
        NOMAD_VAR_pgvector_admin_password: ${{ secrets.NOMAD_VAR_pgvector_admin_password }}
        NOMAD_VAR_postgres_admin_password: ${{ secrets.NOMAD_VAR_postgres_admin_password }}
        NOMAD_VAR_litellm_crawl4ai_key: ${{ secrets.NOMAD_VAR_litellm_crawl4ai_key }}
        NOMAD_VAR_litellm_salt_key: ${{ secrets.NOMAD_VAR_litellm_salt_key }}
        NOMAD_VAR_wazuh_api_password: ${{ secrets.NOMAD_VAR_wazuh_api_password }}
        NOMAD_VAR_wazuh_dashboard_password: ${{ secrets.NOMAD_VAR_wazuh_dashboard_password }}
        NOMAD_VAR_wazuh_indexer_password: ${{ secrets.NOMAD_VAR_wazuh_indexer_password }}
        NOMAD_VAR_otr_pass: ${{ secrets.NOMAD_VAR_otr_pass }}
        NOMAD_VAR_plex_token: ${{ secrets.NOMAD_VAR_plex_token }}
        NOMAD_VAR_listenbrainz_token: ${{ secrets.NOMAD_VAR_listenbrainz_token }}
        NOMAD_VAR_listenbrainz_username: ${{ secrets.NOMAD_VAR_listenbrainz_username }}
        NOMAD_VAR_lastfm_api_key: ${{ secrets.NOMAD_VAR_lastfm_api_key }}
        NOMAD_VAR_lastfm_api_secret: ${{ secrets.NOMAD_VAR_lastfm_api_secret }}
        NOMAD_VAR_lidarr_api_key: ${{ secrets.NOMAD_VAR_lidarr_api_key }}
        NOMAD_VAR_kideo_jwt_secret: ${{ secrets.NOMAD_VAR_kideo_jwt_secret }}
        NOMAD_VAR_kideo_youtube_cookies: ${{ secrets.NOMAD_VAR_kideo_youtube_cookies }}
        NOMAD_VAR_kideo_curiositystream_user: ${{ secrets.NOMAD_VAR_kideo_curiositystream_user }}
        NOMAD_VAR_kideo_curiositystream_pass: ${{ secrets.NOMAD_VAR_kideo_curiositystream_pass }}
        NOMAD_VAR_minecraftmath_jwt_secret: ${{ secrets.NOMAD_VAR_minecraftmath_jwt_secret }}


================================================
FILE: .github/workflows/update-kideo.yaml
================================================
name: Update kideo image tag

on:
  repository_dispatch:
    types: [update-kideo]

jobs:
  update-and-deploy:
    runs-on: ubuntu-latest
    steps:
      - name: Checkout
        uses: actions/checkout@v6
        with:
          token: ${{ secrets.ACTIONS_PAT }}

      - name: Update image tag in Nomad job
        run: |
          TAG="${{ github.event.client_payload.tag }}"
          sed -i "s|ghcr.io/perrymanuk/kideo:[^ \"]*|ghcr.io/perrymanuk/kideo:${TAG}|" \
            nomad_jobs/web-apps/kideo/nomad.job
          echo "Updated kideo image tag to ${TAG}"

      - name: Commit and push
        run: |
          git config user.name "github-actions[bot]"
          git config user.email "github-actions[bot]@users.noreply.github.com"
          TAG="${{ github.event.client_payload.tag }}"
          git add nomad_jobs/web-apps/kideo/nomad.job
          git commit -m "chore: bump kideo to ${TAG}"
          git push


================================================
FILE: .github/workflows/update-minecraftmath.yaml
================================================
name: Update minecraftmath image tag

on:
  repository_dispatch:
    types: [update-minecraftmath]

jobs:
  update-and-deploy:
    runs-on: ubuntu-latest
    steps:
      - name: Checkout
        uses: actions/checkout@v6
        with:
          token: ${{ secrets.ACTIONS_PAT }}

      - name: Update image tag in Nomad job
        run: |
          TAG="${{ github.event.client_payload.tag }}"
          sed -i "s|ghcr.io/perrymanuk/minecraftmath:[^ \"]*|ghcr.io/perrymanuk/minecraftmath:${TAG}|" \
            nomad_jobs/web-apps/minecraftmath/nomad.job
          echo "Updated minecraftmath image tag to ${TAG}"

      - name: Commit and push
        run: |
          git config user.name "github-actions[bot]"
          git config user.email "github-actions[bot]@users.noreply.github.com"
          TAG="${{ github.event.client_payload.tag }}"
          git add nomad_jobs/web-apps/minecraftmath/nomad.job
          git commit -m "chore: bump minecraftmath to ${TAG}"
          git push


================================================
FILE: .github/workflows/update-radbot-dev.yaml
================================================
name: Update radbot-dev image tag

on:
  repository_dispatch:
    types: [update-radbot-dev]

jobs:
  update-and-deploy:
    runs-on: ubuntu-latest
    steps:
      - name: Checkout
        uses: actions/checkout@v6
        with:
          token: ${{ secrets.ACTIONS_PAT }}

      - name: Update image tag in dev Nomad job
        run: |
          TAG="${{ github.event.client_payload.tag }}"
          sed -i "s|ghcr.io/perrymanuk/radbot:[^ \"]*|ghcr.io/perrymanuk/radbot:${TAG}|" \
            nomad_jobs/ai-ml/radbot/nomad-dev.job
          echo "Updated radbot-dev image tag to ${TAG}"

      - name: Commit and push
        run: |
          git config user.name "github-actions[bot]"
          git config user.email "github-actions[bot]@users.noreply.github.com"
          TAG="${{ github.event.client_payload.tag }}"
          git add nomad_jobs/ai-ml/radbot/nomad-dev.job
          git commit -m "chore: deploy radbot-dev with ${TAG}"
          git push


================================================
FILE: .github/workflows/update-radbot.yaml
================================================
name: Update radbot image tag

on:
  repository_dispatch:
    types: [update-radbot]

jobs:
  update-and-deploy:
    runs-on: ubuntu-latest
    steps:
      - name: Checkout
        uses: actions/checkout@v6
        with:
          token: ${{ secrets.ACTIONS_PAT }}

      - name: Update image tag in Nomad job
        run: |
          TAG="${{ github.event.client_payload.tag }}"
          sed -i "s|ghcr.io/perrymanuk/radbot:[^ \"]*|ghcr.io/perrymanuk/radbot:${TAG}|" \
            nomad_jobs/ai-ml/radbot/nomad.job
          echo "Updated radbot image tag to ${TAG}"

      - name: Commit and push
        run: |
          git config user.name "github-actions[bot]"
          git config user.email "github-actions[bot]@users.noreply.github.com"
          TAG="${{ github.event.client_payload.tag }}"
          git add nomad_jobs/ai-ml/radbot/nomad.job
          git commit -m "chore: bump radbot to ${TAG}"
          git push


================================================
FILE: .gitignore
================================================
.envrc
.env
*-pub
.passwords
.envrc*
vault/secrets.yaml
vault/*.hcl
www/main.jpg
ssl
levant/*
!levant/defaults.yml
hosts
*.swp
.ra-aid
CLAUDE.md
scripts/*


================================================
FILE: LICENSE
================================================
                                 Apache License
                           Version 2.0, January 2004
                        http://www.apache.org/licenses/

   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION

   1. Definitions.

      "License" shall mean the terms and conditions for use, reproduction,
      and distribution as defined by Sections 1 through 9 of this document.

      "Licensor" shall mean the copyright owner or entity authorized by
      the copyright owner that is granting the License.

      "Legal Entity" shall mean the union of the acting entity and all
      other entities that control, are controlled by, or are under common
      control with that entity. For the purposes of this definition,
      "control" means (i) the power, direct or indirect, to cause the
      direction or management of such entity, whether by contract or
      otherwise, or (ii) ownership of fifty percent (50%) or more of the
      outstanding shares, or (iii) beneficial ownership of such entity.

      "You" (or "Your") shall mean an individual or Legal Entity
      exercising permissions granted by this License.

      "Source" form shall mean the preferred form for making modifications,
      including but not limited to software source code, documentation
      source, and configuration files.

      "Object" form shall mean any form resulting from mechanical
      transformation or translation of a Source form, including but
      not limited to compiled object code, generated documentation,
      and conversions to other media types.

      "Work" shall mean the work of authorship, whether in Source or
      Object form, made available under the License, as indicated by a
      copyright notice that is included in or attached to the work
      (an example is provided in the Appendix below).

      "Derivative Works" shall mean any work, whether in Source or Object
      form, that is based on (or derived from) the Work and for which the
      editorial revisions, annotations, elaborations, or other modifications
      represent, as a whole, an original work of authorship. For the purposes
      of this License, Derivative Works shall not include works that remain
      separable from, or merely link (or bind by name) to the interfaces of,
      the Work and Derivative Works thereof.

      "Contribution" shall mean any work of authorship, including
      the original version of the Work and any modifications or additions
      to that Work or Derivative Works thereof, that is intentionally
      submitted to Licensor for inclusion in the Work by the copyright owner
      or by an individual or Legal Entity authorized to submit on behalf of
      the copyright owner. For the purposes of this definition, "submitted"
      means any form of electronic, verbal, or written communication sent
      to the Licensor or its representatives, including but not limited to
      communication on electronic mailing lists, source code control systems,
      and issue tracking systems that are managed by, or on behalf of, the
      Licensor for the purpose of discussing and improving the Work, but
      excluding communication that is conspicuously marked or otherwise
      designated in writing by the copyright owner as "Not a Contribution."

      "Contributor" shall mean Licensor and any individual or Legal Entity
      on behalf of whom a Contribution has been received by Licensor and
      subsequently incorporated within the Work.

   2. Grant of Copyright License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      copyright license to reproduce, prepare Derivative Works of,
      publicly display, publicly perform, sublicense, and distribute the
      Work and such Derivative Works in Source or Object form.

   3. Grant of Patent License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      (except as stated in this section) patent license to make, have made,
      use, offer to sell, sell, import, and otherwise transfer the Work,
      where such license applies only to those patent claims licensable
      by such Contributor that are necessarily infringed by their
      Contribution(s) alone or by combination of their Contribution(s)
      with the Work to which such Contribution(s) was submitted. If You
      institute patent litigation against any entity (including a
      cross-claim or counterclaim in a lawsuit) alleging that the Work
      or a Contribution incorporated within the Work constitutes direct
      or contributory patent infringement, then any patent licenses
      granted to You under this License for that Work shall terminate
      as of the date such litigation is filed.

   4. Redistribution. You may reproduce and distribute copies of the
      Work or Derivative Works thereof in any medium, with or without
      modifications, and in Source or Object form, provided that You
      meet the following conditions:

      (a) You must give any other recipients of the Work or
          Derivative Works a copy of this License; and

      (b) You must cause any modified files to carry prominent notices
          stating that You changed the files; and

      (c) You must retain, in the Source form of any Derivative Works
          that You distribute, all copyright, patent, trademark, and
          attribution notices from the Source form of the Work,
          excluding those notices that do not pertain to any part of
          the Derivative Works; and

      (d) If the Work includes a "NOTICE" text file as part of its
          distribution, then any Derivative Works that You distribute must
          include a readable copy of the attribution notices contained
          within such NOTICE file, excluding those notices that do not
          pertain to any part of the Derivative Works, in at least one
          of the following places: within a NOTICE text file distributed
          as part of the Derivative Works; within the Source form or
          documentation, if provided along with the Derivative Works; or,
          within a display generated by the Derivative Works, if and
          wherever such third-party notices normally appear. The contents
          of the NOTICE file are for informational purposes only and
          do not modify the License. You may add Your own attribution
          notices within Derivative Works that You distribute, alongside
          or as an addendum to the NOTICE text from the Work, provided
          that such additional attribution notices cannot be construed
          as modifying the License.

      You may add Your own copyright statement to Your modifications and
      may provide additional or different license terms and conditions
      for use, reproduction, or distribution of Your modifications, or
      for any such Derivative Works as a whole, provided Your use,
      reproduction, and distribution of the Work otherwise complies with
      the conditions stated in this License.

   5. Submission of Contributions. Unless You explicitly state otherwise,
      any Contribution intentionally submitted for inclusion in the Work
      by You to the Licensor shall be under the terms and conditions of
      this License, without any additional terms or conditions.
      Notwithstanding the above, nothing herein shall supersede or modify
      the terms of any separate license agreement you may have executed
      with Licensor regarding such Contributions.

   6. Trademarks. This License does not grant permission to use the trade
      names, trademarks, service marks, or product names of the Licensor,
      except as required for reasonable and customary use in describing the
      origin of the Work and reproducing the content of the NOTICE file.

   7. Disclaimer of Warranty. Unless required by applicable law or
      agreed to in writing, Licensor provides the Work (and each
      Contributor provides its Contributions) on an "AS IS" BASIS,
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
      implied, including, without limitation, any warranties or conditions
      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
      PARTICULAR PURPOSE. You are solely responsible for determining the
      appropriateness of using or redistributing the Work and assume any
      risks associated with Your exercise of permissions under this License.

   8. Limitation of Liability. In no event and under no legal theory,
      whether in tort (including negligence), contract, or otherwise,
      unless required by applicable law (such as deliberate and grossly
      negligent acts) or agreed to in writing, shall any Contributor be
      liable to You for damages, including any direct, indirect, special,
      incidental, or consequential damages of any character arising as a
      result of this License or out of the use or inability to use the
      Work (including but not limited to damages for loss of goodwill,
      work stoppage, computer failure or malfunction, or any and all
      other commercial damages or losses), even if such Contributor
      has been advised of the possibility of such damages.

   9. Accepting Warranty or Additional Liability. While redistributing
      the Work or Derivative Works thereof, You may choose to offer,
      and charge a fee for, acceptance of support, warranty, indemnity,
      or other liability obligations and/or rights consistent with this
      License. However, in accepting such obligations, You may act only
      on Your own behalf and on Your sole responsibility, not on behalf
      of any other Contributor, and only if You agree to indemnify,
      defend, and hold each Contributor harmless for any liability
      incurred by, or claims asserted against, such Contributor by reason
      of your accepting any such warranty or additional liability.

   END OF TERMS AND CONDITIONS

   APPENDIX: How to apply the Apache License to your work.

      To apply the Apache License to your work, attach the following
      boilerplate notice, with the fields enclosed by brackets "[]"
      replaced with your own identifying information. (Don't include
      the brackets!)  The text should be enclosed in the appropriate
      comment syntax for the file format. We also recommend that a
      file or class name and description of purpose be included on the
      same "printed page" as the copyright notice for easier
      identification within third-party archives.

   Copyright [yyyy] [name of copyright owner]

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

       http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.


================================================
FILE: Makefile
================================================
# Load .env files
#include .envrc

include ./.bootstrap.mk

# Define base deployments using their service names
base_deployments = coredns docker-registry haproxy

#help: # Placeholder for potential future help generation

# Find the nomad job file for a given service name ($1) within nomad_jobs/ structure
# Usage: $(call find_job_file, service_name)
# Example: $(call find_job_file, coredns) -> nomad_jobs/core-infra/coredns/coredns.job (or .nomad)
find_job_file = $(shell find nomad_jobs/ -mindepth 2 -maxdepth 3 -type f \( -name '$1.job' -o -name '$1.nomad' \) -print -quit)

.PHONY: dc1-%
dc1-%: ## Deploy specific job to dc1 (searches within nomad_jobs/ structure)
	@JOB_FILE=$(call find_job_file,$*); \
	if [ -z "$$JOB_FILE" ]; then \
		echo "Error: Could not find nomad job file for '$*' in nomad_jobs/."; \
		exit 1; \
	fi; \
	echo "Found job file: $$JOB_FILE"; \
	nomad job run -var datacenters='["dc1"]' $$JOB_FILE

.PHONY: all-%
all-%: ## Deploy specific job to all DCs (searches within nomad_jobs/ structure)
	@JOB_FILE=$(call find_job_file,$*); \
	if [ -z "$$JOB_FILE" ]; then \
		echo "Error: Could not find nomad job file for '$*' in nomad_jobs/."; \
		exit 1; \
	fi; \
	echo "Found job file: $$JOB_FILE"; \
	nomad job run -var datacenters='["dc1", "hetzner"]' $$JOB_FILE

.PHONY: deploy-%
deploy-%: ## Deploy specific job (searches within nomad_jobs/ structure)
	@JOB_FILE=$(call find_job_file,$*); \
	if [ -z "$$JOB_FILE" ]; then \
		echo "Error: Could not find nomad job file for '$*' in nomad_jobs/."; \
		exit 1; \
	fi; \
	echo "Found job file: $$JOB_FILE"; \
	nomad job run $$JOB_FILE

.PHONY: deploy-base
deploy-base: ## Deploys base jobs (coredns, docker-registry, haproxy) to dc1
	@echo "Deploying base services to dc1: $(base_deployments)"
	$(foreach var,$(base_deployments), \
	    @JOB_FILE=$$(call find_job_file,$(var)); \
	    if [ -z "$$JOB_FILE" ]; then \
	        echo "Error: Could not find nomad job file for base deployment '$(var)' in nomad_jobs/."; \
	        exit 1; \
	    fi; \
	    echo "Deploying $(var) from $$JOB_FILE..."; \
	    nomad job run -var datacenters='["dc1"]' $$JOB_FILE; \
	)

.PHONY: sslkeys
sslkeys: ## Generate certs if you have SSL enabled
	consul-template -config ssl/consul-template.hcl -once -vault-renew-token=false

.PHONY: ssl-browser-cert
ssl-browser-cert: ## Generate browser cert if you have SSL enabled
	sudo openssl pkcs12 -export -out browser_cert.p12 -inkey ssl/hetzner/server-key.pem -in ssl/hetzner/server.pem -certfile ssl/hetzner/nomad-ca.pem

.PHONY: sync-github-secrets
sync-github-secrets: ## Sync NOMAD_VAR variables from .envrc to GitHub secrets using gh CLI
	@echo "Syncing NOMAD_VAR variables from .envrc to GitHub secrets..."
	@bash -c 'source .envrc && env | grep "^NOMAD_VAR_" | while read -r line; do \
		name="$${line%%=*}"; \
		value="$${line#*=}"; \
		echo "Setting $$name"; \
		printf "%s" "$$value" | gh secret set "$$name"; \
	done'
	@echo "✅ All NOMAD_VAR secrets synced to GitHub"

.PHONY: build-update-metadata
build-update-metadata: ## Build the update-metadata Docker image
	@echo "Building update-metadata Docker image..."
	# Assumes update-metadata is in docker_images/update-metadata/
	docker build --platform linux/amd64 -t update-metadata:latest docker_images/update-metadata/

.PHONY: build-gcp-dns-updater
build-gcp-dns-updater: ## Build the gcp-dns-updater Docker image
	@echo "Building gcp-dns-updater Docker image..."
	# Assumes gcp-dns-updater is in docker_images/gcp-dns-updater/
	docker build --platform linux/amd64 -t docker.$$NOMAD_VAR_tld/gcp-dns-updater:latest docker_images/gcp-dns-updater/

# Example deployment target for gcp-dns-updater (if needed, uncomment and adjust)
#.PHONY: deploy-gcp-dns-updater
#deploy-gcp-dns-updater: ## Deploy gcp-dns-updater job using generic target
#	$(MAKE) deploy-gcp-dns-updater


================================================
FILE: README.md
================================================
# Hashi-Homelab
<p align="center">
<img width="250" src="homelab.png" />
</p>

### UPDATE - September 2nd 2025

This repo has gone through some major changes since the last update. I've completely reorganized the job structure into 10 clean categories (77 services total now!), added a comprehensive AI/ML stack with Ollama and Open-WebUI, enhanced the monitoring with Loki and Vector for log aggregation, modernized the alertmanager with better persistence and pushover notifications, added weekly docker cleanup automation, redesigned CoreDNS and Traefik for proper HA deployment, and implemented comprehensive Nomad allocation monitoring. The GitHub Actions deployment has been refined with better change detection and the whole thing just runs much more smoothly now. Also added a bunch of new services like smart home integration, personal cloud apps, and storage backends including pgvector for AI workloads, plus a few other bits and bobs that make the whole setup more robust.

### Background

The hashi-homelab was born of a desire to have a simple to maintain but very flexible homelab setup. While designed to work as a cohesive whole, each individual job can be taken and deployed on any Nomad cluster with minimal adjustments - they're built to be portable and self-contained.

The main goals were to keep the resources required to run the base lab setup small and to have all of the parts be easily exchangeable.  

`make deploy-base` will deploy coredns, docker-registry and haproxy - these are needed for everything else to work but aside from these you can pick and choose what to deploy with `make deploy-SERVICE_NAME` to deploy any of the 77 services organized across 10 categories. `make deploy-prometheus` or `make deploy-ollama` for example. You can also target specific datacenters with `make dc1-traefik` or `make all-postgres`.

The whole thing is organized much better now with services grouped into logical categories like ai-ml, media-stack, smart-home, observability, etc. Makes it way easier to find what you're looking for and deploy related services together.

In the future I would like to provide a ready to boot image for a raspberry pi where you can run all of this as the resources needed are really minimal. With just the basics you can get away with one pi4 4gb model with plenty of room to spare.

### Core Components:

* **Scheduler**: Nomad *...with proper allocation monitoring now*
* **Service Catalog/Registry**: Consul  
* **Service Mesh**: Traefik *...redesigned for HA deployment, much more robust*
* **VPN**: Tailscale *...can't say enough good things about tailscale, its integral for my homelab now*
* **DNS**: CoreDNS *...now with HA setup and proper failover*
* **Keepalived**: Assign a floating IP for DNS to not lose it if a node goes down
* **Monitoring**: Prometheus, Alertmanager, Telegraf, Blackbox-exporter, and Grafana *...plus Loki and Vector for log aggregation*  
* **Container Registry**: Docker-Registry *...because sometimes you don't want to rely on Docker Hub being up*  
* **AI/ML**: Ollama for local LLM serving, Open-WebUI for chat interface, LiteLLM for API compatibility
* **Vector Database**: PostgreSQL with pgvector extension for AI/ML vector embeddings storage and similarity search
* **Storage**: NFS and iSCSI CSI plugins for persistent storage across the cluster

### Service Categories (77 total):

* **ai-ml** (8): ollama, open-webui, litellm, cognee, crawl4ai, manyfold, paperless-ai, pgvector-client
* **core-infra** (13): coredns, traefik, haproxy, keepalived, tailscale, github-runner, csi plugins, etc.
* **media-stack** (16): plex, sonarr, radarr, lidarr, sabnzbd, qbittorrent, overseerr, navidrome, etc.
* **personal-cloud** (4): nextcloud, bitwarden, paperless, radicale
* **smart-home** (5): home-assistant, deconz, zigbee2mqtt, mqtt, owntracks-recorder  
* **observability** (7): prometheus, grafana, alertmanager, loki, vector, telegraf, blackbox-exporter
* **storage-backends** (9): postgres, pgvector, redis, mariadb, neo4j, qdrant, docker-registry, etc.
* **web-apps** (5): heimdall, wordpress, firecrawl, alertmanager-dashboard, www
* **misc** (7): gitea, uploader, murmur, octoprint, adb, linuxgsm, gcp-dns-updater
* **system** (3): docker-cleanup, volumes

### Setup

You need to have Nomad and Consul already running, a simple setup with the -dev flag will suffice for testing but you'll want a proper cluster for real usage. If don't already have a Nomad and Consul cluster, there are some excellent guides here...  
https://www.nomadproject.io/guides/install/production/deployment-guide.html  
https://learn.hashicorp.com/consul/datacenter-deploy/deployment-guide  

There are also some files in the `config` folder to help you get started and also one with some services to announce so the Consul and Nomad UI are available over the service mesh.

This repo relies on a `.envrc` file and direnv installed or setting the environment variables manually.
There is an `envrc` example file located in the repo that you can fill in and move to `.envrc`


The secret values from the `.envrc` also need to be put into your github secrets if you plan on deploying via the automated workflow. You can use `make sync-github-secrets` to sync them all at once which is pretty handy.

Once this is done, you simply run a `make deploy-base` and point your DNS to resolve via one of the Nomad nodes' IP address.  

One of the more specific parts of the setup that you may need to adjust is I use several NFS mounts to provide persistent storage mounted on each client at `/home/shared` for configs and `/home/media` for images, video, audio, etc. Depending on which parts of this you are planning to deploy you will just need to adjust this persistent storage to meet the setup of your clients. The CSI plugins help make this more flexible now.

Services are exposed by their task name in the nomad job and whatever you configure your TLD to be in the `.envrc`. The whole thing works really well with the automated GitHub Actions deployment now - just push changes and they get deployed automatically to your cluster. This requires tailscale for the GitHub Actions to connect to your cluster.


================================================
FILE: ansible/configs/consul.hcl.j2
================================================
#jinja2: trim_blocks:False
server = {% if "lan-client-server" in group_names %}true{% else %}false{% endif %}
ui = {% if "lan-client-server" in group_names %}true{% else %}false{% endif %}
{% if "wan-clients" in group_names %}
{% raw %}
client_addr = "{{GetInterfaceIP \"tailscale0\"}}"
advertise_addr = "{{GetInterfaceIP \"tailscale0\"}}"
bind_addr = "{{GetInterfaceIP \"tailscale0\"}}"
{% endraw %}
{% else %}
{% raw %}
client_addr = "0.0.0.0"
advertise_addr = "{{ GetPrivateInterfaces | include \"network\" \"192.168.50.0/24\" | attr \"address\" }}"
bind_addr = "0.0.0.0"
{% endraw %}
{% endif %}
{% raw %}
advertise_addr_wan = "{{ GetPrivateInterfaces | include \"network\" \"192.168.50.0/24\" | attr \"address\" }}"
{% endraw %}
translate_wan_addrs = true
data_dir = "/var/lib/consul"
datacenter = "homelab"
enable_syslog = true
leave_on_terminate = true
log_level = "WARN"
retry_join = ["192.168.50.39", "192.168.50.113", "192.168.50.85"]
{% if "lan-client-server" in group_names %}bootstrap_expect = 3{% else %}{% endif %}
telemetry {
  prometheus_retention_time = "60s"
}


================================================
FILE: ansible/configs/consul.service
================================================
[Unit]
Description=consul agent
Requires=network-online.target tailscaled.service
After=network-online.target tailscaled.service

[Service]
ExecStartPre=/bin/sleep 30
EnvironmentFile=-/etc/default/consul
Restart=always
ExecStart=/usr/bin/consul agent -domain consul -ui -config-dir=/etc/consul.d
ExecReload=/bin/kill -HUP $MAINPID
KillSignal=SIGINT
[Install]
WantedBy=multi-user.target


================================================
FILE: ansible/configs/docker-daemon.json.j2
================================================
{
  "dns": ["192.168.50.2", "192.168.50.1", "8.8.8.8"]{% if 'cheese' in group_names %},
  "runtimes": {
    "nvidia": {
      "args": [],
      "path": "nvidia-container-runtime"
    }
  }
{% endif %}
}


================================================
FILE: ansible/configs/nomad.hcl.j2
================================================
#jinja2: trim_blocks:False
data_dir = "/var/lib/nomad/"
datacenter = {% if "cheese" in group_names %}"cheese"{% elif "minecraft" in group_names %}"minecraft"{% else %}"dc1"{% endif %}
log_level = "warn"
bind_addr = "0.0.0.0"
region = "home"

server {
  enabled          = {% if "lan-client-server" in group_names %}true{% else %}false{% endif %}
  bootstrap_expect = 3
  server_join {
    retry_join     = ["192.168.50.39", "192.168.50.113", "192.168.50.85"]
    retry_max      = 3
    retry_interval = "15s"
  }
  authoritative_region  = "home"
  heartbeat_grace = "300s"
  min_heartbeat_ttl = "20s"
}

client {
  enabled = true
{% raw %}
  network_interface = "{{ GetPrivateInterfaces | include \"network\" \"192.168.50.0/24\" | attr \"name\" }}"
{% endraw %}
  options {
    docker.auth.config = "/root/.docker/config.json"
    docker.privileged.enabled = true
    driver.raw_exec.enable = "1"
    docker.volumes.enabled = true
  }

  meta {
    shared_mount = {% if "wan-clients" in group_names %}"false"{% else %}"true"{% endif %}
    dns = {% if "wan-clients" in group_names %}"false"{% else %}"true"{% endif %}
    {%- if ansible_hostname == "klo01" %}
    keepalived_priority = "100"
    keepalived_priority_dns1 = "100"
    keepalived_priority_dns2 = "{{ 200 | random(start=101) }}"
    {%- else %}
    keepalived_priority = "{{ 200 | random(start=101) }}"
    keepalived_priority_dns1 = "{{ 200 | random(start=101) }}"
    keepalived_priority_dns2 = "{{ 200 | random(start=101) }}"
    {%- endif %}
  }

  host_network "lan" {
    cidr = "192.168.50.0/24"
    reserved_ports = "22"
  }

  host_network "tailscale" {
    cidr = "100.0.0.0/8"
    reserved_ports = "22"
  }

  {% if "wan-clients" in group_names %}
  host_network "public" {
    cidr = "78.47.90.68/32"
    reserved_ports = "22"
  }
  {%- endif %}

  {%- if ansible_hostname == "klo01" %}
  reserved {
    memory = 3072
  }
  {%- endif %}

}

telemetry {
  disable_hostname = true
  prometheus_metrics = true
  publish_allocation_metrics = true
  publish_node_metrics = true
  use_node_name = false
}
{% raw %}
advertise {
  http = "{{ GetPrivateInterfaces | include \"network\" \"192.168.50.0/24\" | attr \"address\" }}:4646"
  rpc = "{{ GetPrivateInterfaces | include \"network\" \"192.168.50.0/24\" | attr \"address\" }}:4647"
  serf = "{{ GetPrivateInterfaces | include \"network\" \"192.168.50.0/24\" | attr \"address\" }}:4648"
}
{% endraw %}
consul {
  # The address to the Consul agent.
  {%- raw %}
  address = "127.0.0.1:8500"
  {%- endraw %}
  # The service name to register the server and client with Consul.

  client_service_name = "nomad-client"

  # Enables automatically registering the services.
  auto_advertise = true

  # Enabling the server and client to bootstrap using Consul.
  server_auto_join = true
  client_auto_join = true
}

#vault {
#  enabled = true
#  address = "http://vault.service.home:8200"
#  allow_unauthenticated = true
#  create_from_role = "nomad-cluster"
#}

plugin "docker" {
  config {
    allow_caps = ["CHOWN","DAC_OVERRIDE","FSETID","FOWNER","MKNOD","NET_RAW","SETGID","SETUID","SETFCAP","SETPCAP","NET_BIND_SERVICE","SYS_CHROOT","KILL","AUDIT_WRITE","NET_ADMIN","NET_BROADCAST","SYS_NICE"]
    # extra Docker labels to be set by Nomad on each Docker container with the appropriate value
    extra_labels = ["job_name", "task_group_name", "task_name", "namespace", "node_name"]
    allow_privileged = true
    volumes {
      enabled      = true
      selinuxlabel = "z"
    }
  }
}


================================================
FILE: ansible/configs/nomad.service
================================================
[Unit]
Description=nomad.agent
Requires=network-online.target tailscaled.service
After=network-online.target tailscaled.service remote-fs.target
# Hard requirement: Nomad must not start until NFS mounts are ready
RequiresMountsFor=/home/shared /home/media/TV /home/media/Music /home/media/Movies /home/media/Books

[Service]
EnvironmentFile=-/etc/default/nomad
Restart=on-failure
RestartSec=10
ExecStart=/usr/bin/nomad agent $OPTIONS -config=/etc/nomad.d/nomad.hcl
ExecReload=/bin/kill -HUP $MAINPID
KillSignal=SIGINT
KillMode=process

[Install]
WantedBy=multi-user.target


================================================
FILE: ansible/playbook.yml
================================================
---
- name: network mounts
  hosts:
    - lan-client-server
    - lan-client
    - cheese
    - minecraft
  become: true
  remote_user: root
  tasks:
    - name: Configure static IP via netplan
      copy:
        dest: /etc/netplan/00-installer-config.yaml
        content: |
          network:
            version: 2
            ethernets:
              ens3:
                addresses:
                  - {{ inventory_hostname }}/24
                routes:
                  - to: default
                    via: 192.168.50.1
                nameservers:
                  addresses:
                    - 192.168.50.1
      notify: Apply netplan

    - name: Ensure directories exist
      file:
        path: "{{ item }}"
        state: directory
        mode: '0755'
      with_items:
        - /home/shared
        - /home/media/TV
        - /home/media/Music
        - /home/media/Movies
        - /home/media/Books

    - name: makesure multipath.conf exists
      copy:
        content: ""
        dest: /etc/multipath.conf
        force: no
        backup: yes
      ignore_errors: yes

    - name: Manage /etc/multipath.conf
      blockinfile:
        path: /etc/multipath.conf
        block: |
          defaults {
              user_friendly_names yes
              find_multipaths yes
          }

    - name: Install Apt packages
      apt:
        name:
          - nfs-common
          - avahi-daemon
          - docker.io
          - open-iscsi
          - lsscsi
          - sg3-utils
          - multipath-tools
          - scsitools

    - name: Ensure /etc/docker directory exists
      file:
        path: /etc/docker
        state: directory
        mode: '0755'

    - name: Add NVIDIA Container Toolkit GPG key
      apt_key:
        url: https://nvidia.github.io/libnvidia-container/gpgkey
        state: present
        keyring: /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg
      when: "'cheese' in group_names"

    - name: Add NVIDIA Container Toolkit repository
      apt_repository:
        repo: "deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://nvidia.github.io/libnvidia-container/stable/deb/$(ARCH) /"
        state: present
        filename: nvidia-container-toolkit
      when: "'cheese' in group_names"

    - name: Install NVIDIA Container Toolkit
      apt:
        name: nvidia-container-toolkit
        state: present
        update_cache: yes
      when: "'cheese' in group_names"

    - name: Configure Docker daemon with fallback DNS and nvidia runtime
      template:
        src: configs/docker-daemon.json.j2
        dest: /etc/docker/daemon.json
      notify: Restart Docker

    - name: Remove old NFS fstab entries
      lineinfile:
        path: /etc/fstab
        regexp: '^192\.168\.50\.208:/mnt/.*'
        state: absent

    - name: Add NFS fstab entries with proper options
      blockinfile:
        path: /etc/fstab
        marker: "# {mark} ANSIBLE MANAGED NFS MOUNTS"
        block: |
          192.168.50.208:/mnt/pool0/share              /home/shared         nfs4    _netdev,hard,timeo=600,retrans=5,x-systemd.mount-timeout=90,x-systemd.requires=network-online.target,x-systemd.after=network-online.target  0  0
          192.168.50.208:/mnt/pool1/media/TV           /home/media/TV       nfs4    _netdev,hard,timeo=600,retrans=5,x-systemd.mount-timeout=90,x-systemd.requires=network-online.target,x-systemd.after=network-online.target  0  0
          192.168.50.208:/mnt/pool0/media/music        /home/media/Music    nfs4    _netdev,hard,timeo=600,retrans=5,x-systemd.mount-timeout=90,x-systemd.requires=network-online.target,x-systemd.after=network-online.target  0  0
          192.168.50.208:/mnt/pool1/media/Movies       /home/media/Movies   nfs4    _netdev,hard,timeo=600,retrans=5,x-systemd.mount-timeout=90,x-systemd.requires=network-online.target,x-systemd.after=network-online.target  0  0
          192.168.50.208:/mnt/pool0/media/audiobooks   /home/media/Books    nfs4    _netdev,hard,timeo=600,retrans=5,x-systemd.mount-timeout=90,x-systemd.requires=network-online.target,x-systemd.after=network-online.target  0  0
      notify:
        - Reload systemd fstab
        - Mount Filesystems

    - name: Enable services
      systemd:
        name: "{{ item }}"
        enabled: yes
        state: started
      with_items:
        - open-iscsi
        - multipath-tools

  handlers:
    - name: Apply netplan
      command: netplan apply

    - name: Reload systemd fstab
      systemd:
        daemon_reload: yes

    - name: Mount Filesystems
      command: mount -a

    - name: Restart Docker
      service:
        name: docker
        state: restarted

- name: Update configuration, execute command, and install packages
  hosts:
    - lan-client-server
    - lan-client
    - wan-clients
    - cheese
    - minecraft
  remote_user: root
  #roles:
  #  - role: artis3n.tailscale
  #    vars:
  #      # Example pulling the API key from the env vars on the host running Ansible
  #      tailscale_authkey: "{{ lookup('env', 'NOMAD_VAR_tailscale_auth') }}"
  #      tailscale_args: "{% if 'wan-clients' in group_names %}--accept-routes=true{% else %}--accept-routes=false{% endif %}"
  tasks:
    - name: Ensure directories exist
      file:
        path: "{{ item }}"
        state: directory
        mode: '0755'
      with_items:
        - /var/lib/nomad
        - /var/lib/consul
        - /etc/nomad.d
        - /etc/consul.d

    - name: Manage systemd service file nomad
      copy:
        src: configs/nomad.service
        dest: /lib/systemd/system/nomad.service
      notify: Reload systemd

    - name: Manage systemd service file consul
      copy:
        src: configs/consul.service
        dest: /lib/systemd/system/consul.service
      notify: Reload systemd

    - name: manage nomad config
      template:
        src: configs/nomad.hcl.j2
        dest: /etc/nomad.d/nomad.hcl
      notify: Restart Service

    - name: manage consul config
      template:
        src: configs/consul.hcl.j2
        dest: /etc/consul.d/server.hcl

    - name: Add HashiCorp APT repository key
      apt_key:
        url: https://apt.releases.hashicorp.com/gpg
        state: present
        validate_certs: no
        keyring: /usr/share/keyrings/hashicorp-archive-keyring.gpg

    - name: Configure HashiCorp APT repository
      apt_repository:
        repo: "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com {{ ansible_distribution_release }} main"

    - name: Install Apt packages
      apt:
        name:
          - nomad=1.10.4-1
          - consul=1.19.1-1
        dpkg_options: 'force-confdef,force-confold'
        update_cache: true
        state: latest
        allow_downgrade: true

    - name: Modify sysctl entry for net.ipv4.ip_nonlocal_bind
      sysctl:
        name: "{{ item.name }}"
        value: "{{ item.value }}"
        state: present
      with_items:
        - { name: "net.ipv4.ip_nonlocal_bind", value: "1" }
        - { name: "net.ipv4.conf.all.forwarding", value: "1" }
      notify: Apply Sysctl Changes

    - name: Enable services
      systemd:
        name: "{{ item }}"
        enabled: yes
        state: started
      with_items:
        - nomad
        - consul
        - tailscaled

  handlers:
    - name: Restart Service
      service:
        name: nomad
        state: restarted

    - name: Reload systemd
      systemd:
        daemon_reload: yes

    - name: Mount Filesystems
      command: mount -a

    - name: Apply Sysctl Changes
      command: sysctl -p /etc/sysctl.conf

- name: Install and configure Tailscale
  hosts:
    - all
  become: yes
  remote_user: root
  gather_facts: yes
  tags: tailscale

  vars:
    # Read authkey from environment variable; default to 'MISSING' if not set
    tailscale_auth_key: "{{ lookup('env', 'NOMAD_VAR_tailscale_auth') | default('MISSING') }}"
    # Optionally customize your Tailscale hostname
    tailscale_hostname: "{{ inventory_hostname }}"
    # Tag to advertise (must match OAuth client tag)
    tailscale_tags: "tag:nomad"

  tasks:
    - name: Download Tailscale GPG key via curl
      shell: >
        curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/noble.noarmor.gpg
        | tee /usr/share/keyrings/tailscale-archive-keyring.gpg
        >/dev/null
      changed_when: true

    - name: Update apt cache
      apt:
        update_cache: yes

    - name: Configure Tailscale apt repository
      copy:
        dest: /etc/apt/sources.list.d/tailscale.list
        content: |
          deb [signed-by=/usr/share/keyrings/tailscale-archive-keyring.gpg arch=amd64] https://pkgs.tailscale.com/stable/ubuntu/ noble main

    - name: Update apt cache (after adding Tailscale repo)
      apt:
        update_cache: yes

    - name: Install Tailscale
      apt:
        name: tailscale
        state: latest

    - name: Enable and start tailscaled service
      service:
        name: tailscaled
        state: started
        enabled: yes

    - name: Bring Tailscale interface up using authkey
      # "command" used because there's no official Ansible module for "tailscale up".
      # This is not strictly idempotent; see notes below for advanced usage.
      command: >
        tailscale up
        --authkey={{ tailscale_auth_key }}
        --hostname={{ tailscale_hostname }}
        --advertise-tags={{ tailscale_tags }}
        --accept-dns=false
        --reset
      register: tailscale_up
      changed_when: "'Success' in tailscale_up.stdout or 'Success' in tailscale_up.stderr or tailscale_up.rc == 0"

    - name: Show tailscale status
      command: tailscale status
      register: tailscale_status
      changed_when: false

    - debug:
        var: tailscale_status.stdout

- name: Install Zsh and Oh My Zsh with Agnoster theme
  hosts: all
  become: yes
  remote_user: root
  gather_facts: yes

  vars:
    my_zsh_user: "root"  # Change this to the desired user

  tasks:
    - name: Install zsh
      apt:
        name: zsh
        state: present
        update_cache: yes

    - name: Ensure home directory path is known
      user:
        name: "{{ my_zsh_user }}"
      register: user_info  # This captures the user details, including home directory.

    - name: Check if Oh My Zsh is already installed
      stat:
        path: "/root/.oh-my-zsh"
      register: oh_my_zsh_stat

    - name: Check if zshrc exists
      stat:
        path: "/root/.zshrc"
      register: zshrc_stat

    - name: Clone Oh My Zsh
      git:
        repo: "https://github.com/ohmyzsh/ohmyzsh.git"
        dest: "/root/.oh-my-zsh"
      become_user: "{{ my_zsh_user }}"
      when: not oh_my_zsh_stat.stat.exists

    - name: Copy the default .zshrc template if not present
      copy:
        src: "/root/.oh-my-zsh/templates/zshrc.zsh-template"
        dest: "/root/.zshrc"
        remote_src: yes
      become_user: "{{ my_zsh_user }}"
      when: not zshrc_stat.stat.exists

    - name: Set Oh My Zsh theme to agnoster
      # Uses a regex replace to ensure 'ZSH_THEME="agnoster"'
      replace:
        path: "/root/.zshrc"
        regexp: '^ZSH_THEME="[^"]+"'
        replace: 'ZSH_THEME="agnoster"'
      become_user: "{{ my_zsh_user }}"

    - name: Change default shell to zsh for the user
      user:
        name: "{{ my_zsh_user }}"
        shell: /usr/bin/zsh


================================================
FILE: ansible/zsh.yml
================================================
---
- name: Install Zsh and Oh My Zsh with Agnoster theme
  hosts: cheese
  become: yes
  remote_user: root
  gather_facts: yes

  vars:
    my_zsh_user: "root"  # Change this to the desired user

  tasks:
    - name: Install zsh
      apt:
        name: zsh
        state: present
        update_cache: yes

    - name: Ensure home directory path is known
      user:
        name: "{{ my_zsh_user }}"
      register: user_info  # This captures the user details, including home directory.

    - name: Check if Oh My Zsh is already installed
      stat:
        path: "/root/.oh-my-zsh"
      register: oh_my_zsh_stat

    - name: Check if zshrc exists
      stat:
        path: "/root/.zshrc"
      register: zshrc_stat

    - name: Clone Oh My Zsh 
      git:
        repo: "https://github.com/ohmyzsh/ohmyzsh.git"
        dest: "/root/.oh-my-zsh"
      become_user: "{{ my_zsh_user }}"
      when: not oh_my_zsh_stat.stat.exists

    - name: Copy the default .zshrc template if not present
      copy:
        src: "/root/.oh-my-zsh/templates/zshrc.zsh-template"
        dest: "/root/.zshrc"
        remote_src: yes
      become_user: "{{ my_zsh_user }}"
      when: not zshrc_stat.stat.exists

    - name: Set Oh My Zsh theme to agnoster
      # Uses a regex replace to ensure 'ZSH_THEME="agnoster"'
      replace:
        path: "/root/.zshrc"
        regexp: '^ZSH_THEME="[^"]+"'
        replace: 'ZSH_THEME="agnoster"'
      become_user: "{{ my_zsh_user }}"

    - name: Change default shell to zsh for the user
      user:
        name: "{{ my_zsh_user }}"
        shell: /usr/bin/zsh



================================================
FILE: docker_images/gcp-dns-updater/Dockerfile
================================================
FROM python:3.14-slim

# Set the working directory in the container
WORKDIR /app

# Copy the requirements file into the container at /app
COPY requirements.txt .

# Install any needed packages specified in requirements.txt
# Using --no-cache-dir to reduce image size
RUN pip install --no-cache-dir -r requirements.txt

# Copy the current directory contents into the container at /app
COPY update_dns.py .

# Define the command to run the application
CMD ["python", "update_dns.py"]


================================================
FILE: docker_images/gcp-dns-updater/README.md
================================================
# GCP Dynamic DNS Updater Service

This service periodically checks the public IPv4 address of the node it's running on and updates a specified A record in a Google Cloud DNS managed zone. It's designed to run as a Nomad job within the Hashi-Homelab environment, utilizing a **pre-built Docker image**.

## Features

*   Fetches the current public IPv4 address from `https://v4.ifconfig.co/ip`.
*   Uses the `google-cloud-dns` Python SDK to interact with Google Cloud DNS.
*   Authenticates using a GCP Service Account key provided via an environment variable.
*   Checks the specified DNS record:
    *   If it's a CNAME, it deletes the CNAME record.
    *   If it's an A record, it updates the IP address if it has changed.
    *   If it doesn't exist (or after deleting a CNAME), it creates the A record with the specified TTL.
*   Runs periodically via a Nomad job, executing the Python script within the pre-built Docker container.

## Prerequisites

1.  **Docker:** Docker must be installed locally to build the service image.
2.  **GCP Service Account:** You need a Google Cloud Platform service account with the necessary permissions to manage DNS records.
    *   Go to the GCP Console -> IAM & Admin -> Service Accounts.
    *   Create a new service account (e.g., `gcp-dns-updater-sa`).
    *   Grant this service account the `DNS Administrator` role (`roles/dns.admin`) on the project containing your managed zone.
    *   Create a JSON key file for this service account and download it securely. You will need the *contents* of this file, not the file itself.
3.  **Nomad Environment:** A running Nomad cluster where this job can be scheduled. The Nomad clients must have Docker installed and configured.

## Configuration

The service is configured via environment variables passed to the Nomad task, which are then consumed by the `update_dns.py` script running inside the Docker container:

*   `GCP_DNS_ZONE_NAME`: The name of the managed zone in GCP DNS (e.g., `demonsafe-com`). The script derives the Project ID from the credentials.
*   `GCP_DNS_RECORD_NAME`: The DNS record name to update (e.g., `*.demonsafe.com`). **Note:** The script expects the base name; the trailing dot is handled internally if needed by the SDK.
*   `RECORD_TTL`: (Optional) The Time-To-Live (in seconds) for the created/updated A record. Defaults to 300 if not set.
*   `GCP_PROJECT_ID`: The Google Cloud Project ID containing the DNS zone.
*   `GCP_SERVICE_ACCOUNT_KEY_B64`: **Required.** The base64-encoded *content* of the GCP service account JSON key file.

**Generating the Base64 Key:**

You need to encode the *content* of your downloaded JSON key file into a single-line base64 string.

On Linux/macOS, you can use:
```bash
base64 -w 0 < /path/to/your/gcp_key.json
```
*(Ensure you use `-w 0` or an equivalent flag for your `base64` command to prevent line wrapping)*

Copy the resulting string.

**Setting Environment Variables in Nomad:**

These variables are defined within the `env` block of the `nomad.job` file using Go templating to read runtime environment variables provided by the Nomad agent (which in turn are often sourced from the deployment mechanism, like GitHub Actions):

```hcl
# Example within nomad.job task config
env {
  GCP_DNS_ZONE_NAME = <<EOH
{{ env "NOMAD_VAR_tld" | replace "." "-" }}
EOH
  GCP_DNS_RECORD_NAME = <<EOH
*.{{ env "NOMAD_VAR_tld" }}
EOH
  GCP_SERVICE_ACCOUNT_KEY_B64 = <<EOH
{{ env "NOMAD_VAR_gcp_dns_admin" }}
EOH
  GCP_PROJECT_ID = <<EOH
{{ env "NOMAD_VAR_gcp_project_id" }}
EOH
  # RECORD_TTL = "300" # Optional, defaults to 300 in the script
}
```

**Important:** The actual values for `NOMAD_VAR_tld`, `NOMAD_VAR_gcp_dns_admin`, and `NOMAD_VAR_gcp_project_id` **must** be provided securely to the Nomad agent's environment during deployment (e.g., via GitHub Actions secrets mapped in the workflow, or using Vault integration), not hardcoded directly in the job file.

## Deployment

1.  **Ensure Prerequisites:** Verify the service account is created, you have the base64 encoded key, and Docker is running.
2.  **Build the Docker Image:** From the root of the `hashi-homelab` repository, run the make target:
    ```bash
    make build-gcp-dns-updater
    ```
    This builds the required Docker image tagged `gcp-dns-updater:latest` using the `gcp-dns-updater/Dockerfile`.
3.  **Deploy the Nomad Job:**
    *   Ensure the required environment variables (`NOMAD_VAR_tld`, `NOMAD_VAR_gcp_dns_admin`, `NOMAD_VAR_gcp_project_id`) are available to the Nomad agent running the job. This is typically handled by the CI/CD pipeline (like the GitHub Actions workflow in this repo) or Vault integration.
    *   Deploy using the Nomad CLI (ensure you are in the repository root or adjust paths). This job will use the `gcp-dns-updater:latest` image built in the previous step:
        ```bash
        # The job will read variables from its environment
        nomad job run gcp-dns-updater/nomad.job
        ```
    *   Alternatively, if using the project's Makefile structure:
        ```bash
        # Assumes the Makefile's deploy target doesn't need extra vars
        # and that required env vars are set in the deployment runner
        make deploy-gcp-dns-updater
        ```

## Files

*   `update_dns.py`: The core Python script for updating DNS (runs inside the container).
*   `requirements.txt`: Python dependencies (installed during Docker build).
*   `Dockerfile`: Defines how to build the service's Docker image.
*   `nomad.job`: Nomad job definition for periodic execution using the `gcp-dns-updater:latest` Docker image.
*   `README.md`: This documentation file.


================================================
FILE: docker_images/gcp-dns-updater/requirements.txt
================================================
google-cloud-dns
requests
google-auth

================================================
FILE: docker_images/gcp-dns-updater/update_dns.py
================================================

import os
import requests
import logging
import sys
import base64
import json
import time
import socket # Added import

# Import GCP specific libraries
from google.cloud import dns
from google.oauth2 import service_account
from google.api_core.exceptions import GoogleAPIError

# Setup logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

def get_env_vars():
    """Reads required environment variables and returns them."""
    project_id = os.environ.get('GCP_PROJECT_ID')
    zone_name = os.environ.get('GCP_DNS_ZONE_NAME') # This will be the TLD like "demonsafe.com"
    record_name = os.environ.get('GCP_DNS_RECORD_NAME')
    key_b64 = os.environ.get('GCP_SERVICE_ACCOUNT_KEY_B64') # Changed variable name

    if not all([project_id, zone_name, record_name, key_b64]): # Check for key_b64
        missing = [var for var, val in [
            ('GCP_PROJECT_ID', project_id),
            ('GCP_DNS_ZONE_NAME', zone_name),
            ('GCP_DNS_RECORD_NAME', record_name),
            ('GCP_SERVICE_ACCOUNT_KEY_B64', key_b64) # Updated missing check
        ] if not val]
        logging.error(f"Missing required environment variables: {', '.join(missing)}")
        sys.exit(1)

    return project_id, zone_name, record_name, key_b64 # Return key_b64

def get_public_ip():
    """Fetches the public IPv4 address."""
    try:
        response = requests.get('https://v4.ifconfig.me/ip', timeout=10)
        response.raise_for_status()  # Raise an exception for bad status codes
        ip_address = response.text.strip()
        logging.info(f"Successfully fetched public IP: {ip_address}")
        return ip_address
    except requests.exceptions.RequestException as e:
        logging.error(f"Error fetching public IP: {e}")
        sys.exit(1) # Exit if IP cannot be fetched

def get_dns_client(key_b64: str, project_id: str): # Changed key_path to key_b64 and added project_id
    """Creates and returns a DNS client authenticated with a base64 encoded service account key."""
    try:
        # Decode the base64 string
        logging.info("Decoding base64 service account key...")
        decoded_key = base64.b64decode(key_b64)
        logging.info("Base64 key decoded successfully.")

        # Parse the decoded JSON key
        logging.info("Parsing service account key JSON...")
        key_info = json.loads(decoded_key)
        logging.info("Service account key JSON parsed successfully.")

        # Create credentials from the parsed key info
        credentials = service_account.Credentials.from_service_account_info(key_info)

        # Use the provided project_id, not the one from credentials, to ensure consistency
        client = dns.Client(project=project_id, credentials=credentials)
        logging.info(f"Successfully created DNS client for project {project_id}")
        return client

    except base64.binascii.Error as e:
        logging.error(f"Failed to decode base64 service account key: {e}")
        sys.exit(1)
    except json.JSONDecodeError as e:
        logging.error(f"Failed to parse service account key JSON: {e}")
        sys.exit(1)
    except Exception as e:
        logging.error(f"Failed to create DNS client from service account info: {e}")
        sys.exit(1)

def update_dns_record(client: dns.Client, project_id: str, zone_name: str, record_name: str, ip_address: str):
    """
    Checks and updates/creates an A record for the given name in the specified zone,
    replacing a CNAME if necessary.

    Args:
        client: Authenticated DNS client.
        project_id: GCP project ID.
        zone_name: The domain TLD (e.g., "demonsafe.com"). This will be converted
                   to the GCP zone name format (e.g., "demonsafe-com").
        record_name: The specific record to update (e.g., "*.demonsafe.com").
        ip_address: The public IP address to set.
    """
    try:
        # Convert the TLD zone name (e.g., "demonsafe.com") to GCP zone name format (e.g., "demonsafe-com")
        gcp_zone_name = zone_name.replace('.', '-')
        logging.info(f"Targeting GCP DNS Zone: {gcp_zone_name}")

        zone = client.zone(gcp_zone_name, project_id)
        if not zone.exists():
            logging.error(f"DNS zone '{gcp_zone_name}' not found in project '{project_id}'.")
            return # Cannot proceed without the zone

        # Ensure record_name ends with a dot for FQDN matching
        fqdn = record_name if record_name.endswith('.') else f"{record_name}."
        logging.info(f"Checking DNS records for: {fqdn} in zone {gcp_zone_name}")

        record_sets = list(zone.list_resource_record_sets(filter_=f"name={fqdn}"))

        existing_a_record = None
        existing_cname_record = None

        for record_set in record_sets:
            if record_set.record_type == 'A' and record_set.name == fqdn:
                existing_a_record = record_set
                logging.info(f"Found existing A record: {existing_a_record.name} -> {existing_a_record.rrdatas}")
            elif record_set.record_type == 'CNAME' and record_set.name == fqdn:
                existing_cname_record = record_set
                logging.info(f"Found existing CNAME record: {existing_cname_record.name} -> {existing_cname_record.rrdatas}")

        changes = zone.changes()
        needs_update = False

        # Handle existing CNAME (delete it to replace with A)
        if existing_cname_record:
            logging.warning(f"Deleting existing CNAME record {fqdn} to replace with A record.")
            changes.delete_record_set(existing_cname_record)
            needs_update = True
            # Ensure we don't try to delete an A record if we just deleted a CNAME
            existing_a_record = None

        # Define the new A record we want
        new_a_record = zone.resource_record_set(fqdn, "A", 300, [ip_address])

        # Handle existing A record
        if existing_a_record:
            if existing_a_record.rrdatas == [ip_address]:
                logging.info(f"Existing A record {fqdn} already points to {ip_address}. No update needed.")
                return # Nothing to do
            else:
                logging.info(f"Existing A record {fqdn} points to {existing_a_record.rrdatas}. Updating to {ip_address}.")
                changes.delete_record_set(existing_a_record)
                changes.add_record_set(new_a_record)
                needs_update = True
        # Handle case where no A record (and no CNAME was found/deleted)
        elif not existing_cname_record: # Only add if we didn't already decide to replace CNAME
            logging.info(f"No existing A or CNAME record found for {fqdn}. Creating new A record pointing to {ip_address}.")
            changes.add_record_set(new_a_record)
            needs_update = True
        # Handle case where CNAME was found and deleted - we still need to add the A record
        elif existing_cname_record:
             logging.info(f"Adding A record for {fqdn} pointing to {ip_address} after CNAME deletion.")
             changes.add_record_set(new_a_record)
             # needs_update should already be True

        # Execute the changes if any were queued
        if needs_update:
            logging.info(f"Executing DNS changes for {fqdn} in zone {gcp_zone_name}...")
            changes.create()
            # Wait until the changes are finished.
            while changes.status != 'done':
                logging.info(f"Waiting for DNS changes to complete (status: {changes.status})...")
                time.sleep(5) # Wait 5 seconds before checking again
                changes.reload()
            logging.info(f"Successfully updated DNS record {fqdn} to {ip_address} in zone {gcp_zone_name}.")
        else:
            # This case should only be hit if an A record existed and was correct
            logging.info("No DNS changes were necessary.")

    except GoogleAPIError as e:
        logging.error(f"GCP API Error updating DNS record {fqdn} in zone {gcp_zone_name}: {e}")
    except Exception as e:
        logging.error(f"An unexpected error occurred during DNS update for {fqdn} in zone {gcp_zone_name}: {e}")


def update_spf_record(client: dns.Client, project_id: str, zone_name: str, record_name: str, ip_address: str):
    """Updates the SPF TXT record on the bare domain with the current public IP."""
    try:
        gcp_zone_name = zone_name.replace('.', '-')
        logging.info(f"Updating SPF record in zone: {gcp_zone_name}")

        zone = client.zone(gcp_zone_name, project_id)
        if not zone.exists():
            logging.error(f"DNS zone '{gcp_zone_name}' not found in project '{project_id}'.")
            return

        # Derive bare domain from record_name (e.g., "*.demonsafe.com" -> "demonsafe.com.")
        domain = record_name.lstrip('*.') if record_name.startswith('*.') else record_name
        fqdn = domain if domain.endswith('.') else f"{domain}."
        logging.info(f"Checking TXT records for: {fqdn}")

        spf_value = f'"v=spf1 ip4:{ip_address} ~all"'

        record_sets = list(zone.list_resource_record_sets(filter_=f"name={fqdn}"))
        existing_txt = None
        for rs in record_sets:
            if rs.record_type == 'TXT' and rs.name == fqdn:
                existing_txt = rs
                logging.info(f"Found existing TXT record: {rs.name} -> {rs.rrdatas}")
                break

        changes = zone.changes()
        needs_update = False

        if existing_txt:
            new_rrdatas = []
            spf_found = False
            for rd in existing_txt.rrdatas:
                if 'v=spf1' in rd:
                    spf_found = True
                    if ip_address in rd:
                        logging.info(f"SPF record already contains {ip_address}. No update needed.")
                        return
                    logging.info(f"Replacing SPF entry: {rd} -> {spf_value}")
                    new_rrdatas.append(spf_value)
                else:
                    new_rrdatas.append(rd)
            if not spf_found:
                logging.info(f"No existing SPF entry found. Adding: {spf_value}")
                new_rrdatas.append(spf_value)

            changes.delete_record_set(existing_txt)
            new_txt = zone.resource_record_set(fqdn, "TXT", 300, new_rrdatas)
            changes.add_record_set(new_txt)
            needs_update = True
        else:
            logging.info(f"No TXT record found for {fqdn}. Creating with SPF: {spf_value}")
            new_txt = zone.resource_record_set(fqdn, "TXT", 300, [spf_value])
            changes.add_record_set(new_txt)
            needs_update = True

        if needs_update:
            logging.info(f"Executing SPF TXT changes for {fqdn}...")
            changes.create()
            while changes.status != 'done':
                logging.info(f"Waiting for SPF changes to complete (status: {changes.status})...")
                time.sleep(5)
                changes.reload()
            logging.info(f"Successfully updated SPF record for {fqdn} with ip4:{ip_address}")

    except GoogleAPIError as e:
        logging.error(f"GCP API Error updating SPF record: {e}")
    except Exception as e:
        logging.error(f"Unexpected error updating SPF record: {e}")


if __name__ == "__main__":
    logging.info("Starting DNS update script.")
    project_id, zone_name, record_name, key_b64 = get_env_vars()
    public_ip = get_public_ip()

    # DNS Pre-check logic
    if public_ip:
        hostname_to_check = 'asdf.demonsafe.com'
        logging.info(f"Performing pre-check for hostname: {hostname_to_check}")
        try:
            resolved_ip = socket.gethostbyname(hostname_to_check)
            logging.info(f"Resolved IP for {hostname_to_check}: {resolved_ip}")
            if resolved_ip == public_ip:
                logging.info(f'DNS record for {hostname_to_check} ({resolved_ip}) already matches public IP ({public_ip}). No update needed.')
                sys.exit(0)
            else:
                logging.info(f'Resolved IP for {hostname_to_check} ({resolved_ip}) does not match public IP ({public_ip}). Proceeding with potential update.')
        except socket.gaierror as e:
            logging.warning(f'Could not resolve IP for {hostname_to_check}: {e}. Proceeding with potential update.')
        except Exception as e:
            logging.warning(f'An unexpected error occurred during DNS pre-check for {hostname_to_check}: {e}. Proceeding with potential update.')

    if public_ip:
        dns_client = get_dns_client(key_b64, project_id)
        if dns_client:
            update_dns_record(dns_client, project_id, zone_name, record_name, public_ip)
            update_spf_record(dns_client, project_id, zone_name, record_name, public_ip)
            logging.info("DNS update script finished.")
        else:
            logging.error("Exiting due to DNS client initialization failure.")
            sys.exit(1)
    else:
        logging.error("Exiting due to inability to fetch public IP.")
        sys.exit(1)


================================================
FILE: docker_images/update-metadata/Dockerfile
================================================
FROM python:3.14-slim

WORKDIR /app

COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt

COPY sync_secrets.py .

ENTRYPOINT ["python", "sync_secrets.py"]


================================================
FILE: docker_images/update-metadata/README.md
================================================
# GitHub Secret Synchronization Script (Containerized)

## Purpose

This script (`sync_secrets.py`), running inside a Docker container, reads environment variables defined in the project's root `.envrc` file and synchronizes them as GitHub secrets to the `perrymanuk/hashi-homelab` repository using the `PyGithub` library.

## Requirements

*   **Docker:** Docker must be installed and running to build and execute the container.
*   **`NOMAD_VAR_github_pat` Environment Variable:** A GitHub Personal Access Token (PAT) with the `repo` scope must be available as an environment variable named `NOMAD_VAR_github_pat` in the **host shell** where you run the `make` command. The Makefile target (`sync-secrets`) will handle passing this token into the container under the name `GITHUB_TOKEN` for the script to use.
*   **`.envrc` File:** An `.envrc` file must exist at the project root (`/Users/perry.manuk/git/perrymanuk/hashi-homelab/.envrc`) containing the secrets to sync.

## Usage

1.  **Ensure `NOMAD_VAR_github_pat` is set:** Export your GitHub PAT in your current host shell session:
    ```bash
    export NOMAD_VAR_github_pat="your_github_pat_here"
    ```
2.  **Navigate to the project root directory:**
    ```bash
    cd /Users/perry.manuk/git/perrymanuk/hashi-homelab
    ```
3.  **Run the Makefile target:**
    ```bash
    make sync-secrets
    ```

This command will:
    *   Build the Docker image defined in `scripts/Dockerfile`.
    *   Run a container from the image.
    *   Mount the host's `.envrc` file into the container.
    *   Pass the **host's** `NOMAD_VAR_github_pat` environment variable into the container as `GITHUB_TOKEN`.
    *   Execute the `sync_secrets.py` script within the container.

The script will output the status of each secret synchronization attempt (created, updated, or failed).

**Important:** Running the script will overwrite any existing secrets in the GitHub repository that have the same name as variables found in the `.envrc` file.

## `.envrc` Format

The script expects the `.envrc` file to follow this format:

```bash
export VARIABLE_NAME=value
export ANOTHER_VARIABLE='value with spaces'
export YET_ANOTHER="double quoted value"
# This is a comment and will be ignored

# Empty lines are also ignored
export SECRET_KEY=a_very_secret_value_here
```

*   Lines must start with `export`.
*   Variable names and values are separated by `=`.
*   Values can be unquoted, single-quoted (`'...'`), or double-quoted (`"..."`). Quotes are stripped before syncing.
*   Lines starting with `#` (comments) and empty lines are ignored.


================================================
FILE: docker_images/update-metadata/requirements.txt
================================================
PyGithub
hcl2


================================================
FILE: docker_images/update-metadata/update_job_metadata.py
================================================

import argparse
import logging
import pathlib
import re
import sys

# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


def find_job_block(content):
    """Find the start and end indices of the main 'job' block."""
    job_match = re.search(r'^job\s+"[^"]+"\s*\{', content, re.MULTILINE)
    if not job_match:
        logging.warning("Could not find job block start.")
        return None, None

    start_index = job_match.start()
    # Find the matching closing brace
    brace_level = 0
    end_index = -1
    in_string = False
    escaped = False
    for i, char in enumerate(content[start_index:]):
        if escaped:
            escaped = False
            continue
        if char == '\\':
            escaped = True
            continue
        if char == '"':
            in_string = not in_string
            continue
        if not in_string:
            if char == '{':
                brace_level += 1
            elif char == '}':
                brace_level -= 1
                if brace_level == 0:
                    end_index = start_index + i
                    break

    if end_index == -1:
        logging.warning("Could not find matching closing brace for job block.")
        return None, None

    return start_index, end_index + 1

def find_meta_block(content):
    """Find the start and end indices of the 'meta' block within the given content."""
    meta_match = re.search(r'^\s*meta\s*\{', content, re.MULTILINE)
    if not meta_match:
        return None, None

    start_index = meta_match.start()
    # Find the matching closing brace
    brace_level = 0
    end_index = -1
    in_string = False
    escaped = False
    for i, char in enumerate(content[start_index:]):
        if escaped:
            escaped = False
            continue
        if char == '\\':
            escaped = True
            continue
        if char == '"':
            in_string = not in_string
            continue
        if not in_string:
            if char == '{':
                brace_level += 1
            elif char == '}':
                brace_level -= 1
                if brace_level == 0:
                    end_index = start_index + i
                    break

    if end_index == -1:
        logging.warning("Could not find matching closing brace for meta block.")
        return None, None

    return start_index, end_index + 1

def update_job_metadata(repo_root):
    """Finds Nomad job files and updates their meta block with job_file path."""
    repo_path = pathlib.Path(repo_root).resolve()
    nomad_jobs_path = repo_path / 'nomad_jobs'

    if not nomad_jobs_path.is_dir():
        logging.error(f"'nomad_jobs' directory not found in {repo_path}")
        sys.exit(1)

    logging.info(f"Scanning for job files in {nomad_jobs_path}...")

    job_files = list(nomad_jobs_path.rglob('*.nomad')) + list(nomad_jobs_path.rglob('*.job'))

    if not job_files:
        logging.warning("No *.nomad or *.job files found.")
        return

    modified_count = 0
    for job_file in job_files:
        try:
            relative_path = job_file.relative_to(repo_path).as_posix()
            logging.debug(f"Processing file: {relative_path}")
            content = job_file.read_text()
            original_content = content # Keep a copy for comparison

            job_start, job_end = find_job_block(content)
            if job_start is None or job_end is None:
                logging.warning(f"Skipping {relative_path}: Could not find main job block.")
                continue
            job_block_content = content[job_start:job_end]
            job_opening_line_match = re.match(r'^job\s+"[^"]+"\s*\{\s*\n?', job_block_content, re.MULTILINE)
            if not job_opening_line_match:
                 logging.warning(f"Skipping {relative_path}: Could not match job opening line format.")
                 continue
            job_insert_pos = job_start + job_opening_line_match.end()

            meta_start_rel, meta_end_rel = find_meta_block(job_block_content)
            new_job_file_line = f'  job_file = "{relative_path}"'
            modified = False

            if meta_start_rel is not None and meta_end_rel is not None:
                meta_start_abs = job_start + meta_start_rel
                meta_end_abs = job_start + meta_end_rel
                meta_block_content = content[meta_start_abs:meta_end_abs]
                meta_opening_line_match = re.match(r'^\s*meta\s*\{\s*\n?', meta_block_content, re.MULTILINE)
                if not meta_opening_line_match:
                    logging.warning(f"Skipping {relative_path}: Could not match meta opening line format.")
                    continue
                meta_insert_pos = meta_start_abs + meta_opening_line_match.end()

                job_file_line_match = re.search(r'^(\s*)job_file\s*=\s*".*?"$\n?', meta_block_content, re.MULTILINE)

                if job_file_line_match:
                    existing_line = job_file_line_match.group(0)
                    indent = job_file_line_match.group(1)
                    new_line_with_indent = f'{indent}job_file = "{relative_path}"\n' # Ensure newline
                    if existing_line.strip() != new_line_with_indent.strip():
                         # Replace existing line
                        start = meta_start_abs + job_file_line_match.start()
                        end = meta_start_abs + job_file_line_match.end()
                        # Ensure we capture the trailing newline if present in match
                        content = content[:start] + new_line_with_indent + content[end:]
                        modified = True
                else:
                    # Insert new job_file line inside meta block
                    content = content[:meta_insert_pos] + new_job_file_line + '\n' + content[meta_insert_pos:]
                    modified = True
            else:
                # Insert new meta block
                new_meta_block = f'\n  meta {{\n{new_job_file_line}\n  }}\n'
                content = content[:job_insert_pos] + new_meta_block + content[job_insert_pos:]
                modified = True

            if modified and content != original_content:
                job_file.write_text(content)
                logging.info(f"Updated metadata in: {relative_path}")
                modified_count += 1
            elif not modified:
                 logging.debug(f"No changes needed for: {relative_path}")

        except Exception as e:
            logging.error(f"Failed to process {relative_path}: {e}")

    logging.info(f"Metadata update complete. {modified_count} files modified.")


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Update Nomad job files with job_file metadata.")
    # Default to the parent directory of the script's directory (../)
    script_dir = pathlib.Path(__file__).parent.resolve()
    default_repo_root = script_dir.parent
    parser.add_argument(
        "--repo-root",
        type=str,
        default=str(default_repo_root),
        help="Path to the root of the repository."
    )
    args = parser.parse_args()

    update_job_metadata(args.repo_root)



================================================
FILE: envrc
================================================
export CONSUL_HTTP_ADDR=http://FILL_IN_IP:8500
export CONSUL_CACERT=/etc/consul.d/ssl/ca.cert
export CONSUL_CLIENT_CERT=/etc/consul.d/ssl/consul.cert
export CONSUL_CLIENT_KEY=/etc/consul.d/ssl/consul.key
export VAULT_ADDR=http://FILL_IN_IP:8200
export VAULT_TOKEN=FILL_IN_TOKEN
export NOMAD_ADDR=http://FILL_IN_IP:4646
export NOMAD_VAR_region='home'
export NOMAD_VAR_tld='home'
export NOMAD_VAR_shared_dir='/home/shared/'
export NOMAD_VAR_downloads_dir='/home/sabnzbd/downloads'
export NOMAD_VAR_music_dir='/home/media/Music'
export NOMAD_VAR_movies_dir='/home/media/Movies'
export NOMAD_VAR_tv_dir='/home/media/TV'
export NOMAD_VAR_media_dir='/home/media'


================================================
FILE: nomad_jobs/TEMPLATE-volume.hcl
================================================
// =============================================================================
// Nomad CSI Volume Template
// =============================================================================
//
// Usage:
//   1. Copy this file to nomad_jobs/<category>/<service-name>/volume.hcl
//   2. Replace __VOL_NAME__ with the volume name (usually same as service name)
//   3. Replace __SIZE__ with capacity (e.g. "5GiB", "10GiB", "50GiB")
//   4. Set access_mode based on your needs (see below)
//   5. Volume is auto-created by CI when pushed (if path is in workflow filter)
//
// Access modes:
//   single-node-writer       : one node read/write (most services)
//   single-node-reader-only  : one node read-only
//   multi-node-single-writer : multiple nodes can mount, one writes (HA failover)
//
// Size guide:
//   Config-only (app state):  1-5 GiB
//   Small databases:          5-10 GiB
//   Media metadata/indexes:   10-20 GiB
//   Time-series / logs:       50-100 GiB
//
// =============================================================================

id           = "__VOL_NAME__"
external_id  = "__VOL_NAME__"
name         = "__VOL_NAME__"
type         = "csi"
plugin_id    = "org.democratic-csi.iscsi"
capacity_min = "__SIZE__"
capacity_max = "__SIZE__"

capability {
  access_mode     = "single-node-writer"
  attachment_mode = "file-system"
}

mount_options {
  fs_type     = "ext4"
  mount_flags = ["noatime"]
}


================================================
FILE: nomad_jobs/TEMPLATE.job
================================================
// =============================================================================
// Nomad Job Template
// =============================================================================
//
// Usage:
//   1. Copy this file to nomad_jobs/<category>/<service-name>/nomad.job
//   2. Find/replace the following placeholders:
//      - __JOB_NAME__        : lowercase service name (e.g. "sonarr")
//      - __GROUP_NAME__      : group name (e.g. "downloaders", "monitoring", "ai")
//      - __CATEGORY__        : directory category (e.g. "media-stack", "ai-ml")
//      - __IMAGE__           : docker image with tag (e.g. "linuxserver/sonarr:4.0.16")
//      - __PORT__            : container port number (e.g. "8989")
//      - __HEALTH_PATH__     : HTTP health check path (e.g. "/ping", "/-/healthy", "/api/health")
//      - __CPU__             : CPU MHz allocation (see guide below)
//      - __MEMORY__          : Memory MB allocation (see guide below)
//   3. Remove any optional sections you don't need (marked with OPTIONAL)
//   4. Update the variable declarations at the bottom
//   5. Add any job-specific secrets to .envrc as NOMAD_VAR_<name>
//   6. Add the job path to .github/workflows/nomad.yaml if it should auto-deploy
//
// Resource guide:
//   Light services (static sites, proxies):     cpu = 100-200,  memory = 128-256
//   Medium services (APIs, web apps):            cpu = 500-1000, memory = 512-1024
//   Heavy services (.NET apps, databases, Java): cpu = 1000+,    memory = 1024-2048
//   GPU / ML workloads:                          cpu = 200+,     memory = 4096-8192
//
// =============================================================================

job "__JOB_NAME__" {
  region      = var.region
  datacenters = ["dc1"]
  type        = "service"

  meta {
    job_file = "nomad_jobs/__CATEGORY__/__JOB_NAME__/nomad.job"
    version  = "1"
  }

  // Ensures scheduling on nodes with NFS shared mount available.
  // Remove if the service has no need for shared storage or config dirs.
  constraint {
    attribute = "${meta.shared_mount}"
    operator  = "="
    value     = "true"
  }

  group "__GROUP_NAME__" {
    count = 1

    network {
      port "http" {
        host_network = "lan"
        to           = "__PORT__"
      }
    }

    // --- OPTIONAL: CSI Volume ------------------------------------------------
    // Use for services that need persistent block storage (databases, stateful apps).
    // Requires a matching volume.hcl deployed first.
    // Remove this block and the prep-disk task + volume_mount if not needed.
    //
    // volume "__JOB_NAME__" {
    //   type            = "csi"
    //   read_only       = false
    //   source          = "__JOB_NAME__"
    //   access_mode     = "single-node-writer"
    //   attachment_mode = "file-system"
    // }
    // -------------------------------------------------------------------------

    restart {
      attempts = 3
      delay    = "15s"
      interval = "10m"
      mode     = "delay"
    }

    update {
      max_parallel      = 1
      min_healthy_time  = "30s"
      healthy_deadline  = "5m"
      progress_deadline = "10m"
      auto_revert       = true
    }

    // --- OPTIONAL: Prep-disk task --------------------------------------------
    // Required when using CSI volumes to fix ownership before the main task runs.
    // Set UID:GID to match the user the main container runs as.
    // Common values:
    //   linuxserver images: 65534:65534 (nobody)
    //   prometheus:         1000:2000
    //   grafana:            472:472
    //   loki:               10001:10001
    //
    // task "prep-disk" {
    //   driver = "docker"
    //
    //   lifecycle {
    //     hook    = "prestart"
    //     sidecar = false
    //   }
    //
    //   volume_mount {
    //     volume      = "__JOB_NAME__"
    //     destination = "/volume/"
    //     read_only   = false
    //   }
    //
    //   config {
    //     image   = "busybox:latest"
    //     command = "sh"
    //     args    = ["-c", "chown -R UID:GID /volume/"]
    //   }
    //
    //   resources {
    //     cpu    = 200
    //     memory = 128
    //   }
    // }
    // -------------------------------------------------------------------------

    task "__JOB_NAME__" {
      driver = "docker"

      config {
        image = "__IMAGE__"
        ports = ["http"]

        // --- Bind mount pattern (shared NFS config dir) ---
        // Use for services that store config on shared NFS.
        // volumes = [
        //   "${var.shared_dir}__JOB_NAME__:/config",
        // ]

        // --- Template mount pattern (config rendered by Nomad) ---
        // Use when config is templated inline below.
        // volumes = [
        //   "local/config.yaml:/app/config.yaml",
        // ]
      }

      // --- OPTIONAL: CSI volume mount ----------------------------------------
      // volume_mount {
      //   volume      = "__JOB_NAME__"
      //   destination = "/data"
      //   read_only   = false
      // }
      // -----------------------------------------------------------------------

      env {
        TZ = "Etc/UTC"
        // PUID = "65534"    // common for linuxserver images
        // PGID = "65534"
      }

      // --- OPTIONAL: Config template -----------------------------------------
      // Use for services that need a rendered config file.
      // Reference secrets with ${var.secret_name} syntax.
      //
      // template {
      //   data        = <<EOH
      // your config here
      // EOH
      //   destination = "local/config.yaml"
      //   change_mode = "restart"
      //   // change_mode options:
      //   //   "restart" - restart the task on config change (safest default)
      //   //   "signal"  - send a signal: change_signal = "SIGHUP"
      //   //   "noop"    - do nothing (use only for static configs)
      // }
      // -----------------------------------------------------------------------

      service {
        port = "http"
        name = "__JOB_NAME__"
        tags = [
          "traefik.enable=true",
        ]
        check {
          type     = "http"
          path     = "__HEALTH_PATH__"
          interval = "10s"
          timeout  = "2s"
          check_restart {
            limit           = 3
            grace           = "60s"
            ignore_warnings = false
          }
        }
      }

      resources {
        cpu    = __CPU__
        memory = __MEMORY__
      }
    }
  }
}

// =============================================================================
// Variables
// =============================================================================
// Common variables (always required - provided by .envrc / GitHub Actions):

variable "region" {
  type        = string
  description = "Nomad region"
}

variable "tld" {
  type        = string
  description = "Top-level domain for service discovery"
}

variable "shared_dir" {
  type        = string
  description = "Path to shared NFS config directory"
}

// --- OPTIONAL: Add job-specific variables below ------------------------------
// Follow this pattern:
//
// variable "my_secret" {
//   type        = string
//   description = "Description of what this secret is for"
// }
//
// Then add to .envrc:  export NOMAD_VAR_my_secret='value'
// And to GitHub Actions workflow env block if auto-deploying.
// -----------------------------------------------------------------------------


================================================
FILE: nomad_jobs/ai-ml/cognee/nomad.job
================================================
job "cognee" {
  region = var.region
  datacenters = ["dc1"]
  type = "service"

  meta {
    job_file = "nomad_jobs/ai-ml/cognee/nomad.job"
    version = "3"
  }

  group "cognee-ai" {
    count = 1

    network {
      port "http" { to = 8000 }
#      port "mcp" { to = 3000 }
    }


    restart {
      attempts = 3
      delay    = "15s"
      interval = "10m"
      mode     = "delay"
    }

    update {
      max_parallel     = 1
      min_healthy_time = "30s"
      auto_revert      = true
    }

    task "cognee-service" {
      driver = "docker"

      config {
        dns_servers = ["192.168.50.2"]
        image = "cognee/cognee:0.5.8"
        ports = ["http"]
      }

      env {
        # --- LLM Configuration ---
        LLM_PROVIDER            = "openai"
        LLM_MODEL               = "vertex_ai/gemini-1.5-pro-latest"
        LLM_API_KEY             = ""
        LLM_ENDPOINT            = "https://litellm.demonsafe.com"

        # --- Embedding Configuration ---
        EMBEDDING_PROVIDER      = "openai"
        EMBEDDING_MODEL         = "text-embedding-ada-002"
        EMBEDDING_API_KEY       = ""

        # --- Relational Database (PostgreSQL) ---
        DB_PROVIDER             = "postgres"
        DB_HOST                 = "pgvector.service.consul"
        DB_PORT                 = "5432"
        DB_USERNAME             = "postgres"
        DB_PASSWORD             = "ChAnGeMe"
        DB_NAME                 = "cognee_metadata_db"

        # --- Vector Database (Qdrant) ---
        #VECTOR_DB_PROVIDER      = "qdrant"
        #VECTOR_DB_URL           = "http://qdrant.service.consul:6333"

        # --- Graph Database (Neo4j) ---
        GRAPH_DATABASE_PROVIDER = "neo4j"
        GRAPH_DATABASE_URL      = "bolt://neo4j.service.consul:7687"
        GRAPH_DATABASE_USERNAME = "neo4j"
        GRAPH_DATABASE_PASSWORD = "ChAnGeMe"

        # --- General Settings ---
        HOST                    = "0.0.0.0"
        ENVIRONMENT             = "production"
        DEBUG                   = "false"
      }

      resources {
        cpu    = 100
        memory = 2048
      }

      service {
        name     = "cognee"
        tags     = ["traefik.enable=true"]
        port     = "http"

        check {
          type     = "tcp"
          port     = "http"
          interval = "15s"
          timeout  = "3s"
        }
      }
    }
  }
}

variable "region" {
  type = string
  default = "global"
}


================================================
FILE: nomad_jobs/ai-ml/crawl4ai/nomad.job
================================================
job "crawl4ai" {
  region      = var.region
  datacenters = ["dc1"]
  type        = "service"

  meta {
    job_file = "nomad_jobs/ai-ml/crawl4ai/nomad.job"
    version  = "4"
  }

  constraint {
    attribute = "${meta.shared_mount}"
    operator  = "="
    value     = "true"
  }

  group "app" {
    count = 1

    network {
      port "http" {
        to = 11235
      }
    }

    volume "crawl4ai" {
      type            = "csi"
      read_only       = false
      source          = "crawl4ai-data"
      access_mode     = "single-node-writer"
      attachment_mode = "file-system"
    }


    update {
      max_parallel     = 1
      min_healthy_time = "30s"
      auto_revert      = true
    }

    task "prep-disk" {
      driver = "docker"
      
      volume_mount {
        volume      = "crawl4ai"
        destination = "/volume/"
        read_only   = false
      }
      
      config {
        image   = "busybox:latest"
        command = "sh"
        args    = ["-c", "mkdir -p /volume/config && chmod -R 777 /volume/"]
      }
      
      resources {
        cpu    = 200
        memory = 128
      }

      lifecycle {
        hook    = "prestart"
        sidecar = false
      }
    }


    update {
      max_parallel     = 1
      min_healthy_time = "30s"
      auto_revert      = true
    }

    task "crawl4ai" {
      driver = "docker"

      config {
        image    = "unclecode/crawl4ai:0.6.0-r2"
        ports    = ["http"]
        shm_size = "1000000000"
        dns_servers = ["192.168.50.2"]
      }

      volume_mount {
        volume      = "crawl4ai"
        destination = "/app/data"
        read_only   = false
      }

      template {
        data = <<EOH
# Application Configuration
app:
  title: "Crawl4AI API"
  version: "0.6.0-r1"
  host: "0.0.0.0"
  port: 11235
  reload: False
  timeout_keep_alive: 300

# Default LLM Configuration
llm:
  provider: "gemini/gemini-2.5-flash-preview-04-17"
  api_key_env: "${var.litellm_crawl4ai_key}"
  api_base: "https://litellm.${var.tld}"

# Redis Configuration
redis:
  host: "redis.service.consul"
  port: 6379
  key_prefix: "crawl4ai:"

# Rate Limiting Configuration
rate_limit:
  enabled: true
  limits:
    default: "60/minute"
    html: "120/minute" 
    screenshot: "30/minute"
    pdf: "15/minute"
  storage_uri: "redis://redis.service.consul:6379/2"

# Security Configuration
security:
  enabled: false
  jwt_enabled: false
  https_redirect: false
  trusted_hosts: ["*"]
  headers:
    x_content_type_options: "nosniff"
    x_frame_options: "DENY"
    content_security_policy: "default-src 'self'"
    strict_transport_security: "max-age=63072000; includeSubDomains"

# Crawler Configuration
crawler:
  memory_threshold_percent: 95.0
  rate_limiter:
    base_delay: [1.0, 2.0]
  timeouts:
    stream_init: 30.0
    batch_process: 300.0

# Logging Configuration
logging:
  level: "INFO"
  format: "%(asctime)s - %(name)s - %(levelname)s - %(message)s"

# Observability Configuration
observability:
  prometheus:
    enabled: True
    endpoint: "/metrics"
  health_check:
    endpoint: "/health"
EOH

        destination   = "/app/data/config/config.yml"
        change_mode   = "restart"
      }

      resources {
        cpu    = 1000
        memory = 1024
      }

      env {
        PORT = "11235"
        CONFIG_PATH = "/app/data/config/config.yml"
        OPENAI_API_KEY = "${var.litellm_crawl4ai_key}"
      }

      service {
        port = "http"
        name = "crawl4ai"
        tags = [
          "traefik.enable=true",
          "metrics"
        ]

        check {
          type     = "http"
          path     = "/health"
          port     = "http"
          interval = "10s"
          timeout  = "2s"
          check_restart {
            limit           = 3
            grace           = "60s"
            ignore_warnings = false
          }
        }
      }
    }
  }
}

variable "region" {}
variable "tld" {}
variable "shared_dir" {}
variable "litellm_crawl4ai_key" {}

================================================
FILE: nomad_jobs/ai-ml/crawl4ai/volume.hcl
================================================
id           = "crawl4ai-data"
external_id  = "crawl4ai-data"
name         = "crawl4ai-data"
type         = "csi"
plugin_id    = "org.democratic-csi.iscsi"
capacity_min = "5GiB"
capacity_max = "5GiB"

capability {
  access_mode     = "single-node-writer"
  attachment_mode = "block-device"
}

mount_options {
  fs_type     = "ext4"
  mount_flags = ["noatime", "nodiratime", "data=ordered"]
}

================================================
FILE: nomad_jobs/ai-ml/litellm/nomad.job
================================================
job "litellm" {
  region = var.region
  datacenters = ["dc1"]
  type        = "service"

  meta {
      job_file = "nomad_jobs/ai-ml/litellm/nomad.job"
      version = "6"
  }

  constraint {
    attribute = "${meta.shared_mount}"
    operator  = "="
    value     = "true"
  }

  group "ai" {
    count = 1 
    network {
      port "http" {
        host_network = "lan"
        to = "4000"  # LiteLLM default port is 8000
      }
    }

    volume "litellm" {
      type      = "csi"
      read_only = false
      source    = "litellm"
      access_mode = "single-node-writer"
      attachment_mode = "file-system"
    }

    restart {
      attempts = 3
      delay    = "15s"
      interval = "10m"
      mode     = "delay"
    }


    update {
      max_parallel     = 1
      min_healthy_time = "30s"
      auto_revert      = true
    }

    task "litellm" {
      driver = "docker"
      config {
        image = "ghcr.io/berriai/litellm:main-latest"
        ports = ["http"]
        volumes = [
          "local/config.yaml:/app/config.yaml",
        ]
      }

      volume_mount {
        volume      = "litellm"
        destination = "/data"
        read_only   = false
      }

      env {
        PORT = "${NOMAD_PORT_http}"
        HOST = "0.0.0.0"
        LITELLM_CONFIG_PATH = "/app/config.yaml"
        OLLAMA_BASE_URL = "${var.ollama_base_url}"
        AWS_ACCESS_KEY_ID = "${var.aws_access_key}"
        AWS_SECRET_ACCESS_KEY = "${var.aws_secret_key}"
        AWS_REGION = "${var.bedrock_aws_region}"
        GOOGLE_API_KEY = "${var.gemini_api_key}"
        LITELLM_MASTER_KEY = "${var.litellm_master_key}"
        #LITELLM_SALT_KEY = "${var.litellm_salt_key}"  # Added salt key for credential encryption
        DATABASE_URL = "postgresql://postgres:${var.postgres_pass}@postgres.service.consul:5432/litellm"
        STORE_MODEL_IN_DB = "True"
      }

      template {
        data = <<EOH
model_list:
  # Ollama models
  - model_name: ollama/llama2
    litellm_params:
      model: ollama/llama2
      api_base: ${var.ollama_base_url}

  - model_name: gpt-3.5-turbo
    litellm_params:
      model: ollama/llama2
      api_base: ${var.ollama_base_url}
  
  # AWS Bedrock - Claude 3.7 Sonnet
  - model_name: anthropic.claude-3-7-sonnet-20250219-v1:0
    litellm_params:
      model: bedrock/eu.anthropic.claude-3-7-sonnet-20250219-v1:0
      aws_access_key_id: ${var.aws_access_key}
      aws_secret_access_key: ${var.aws_secret_key}
      bedrock_aws_region: ${var.bedrock_aws_region}
  
  # Google Gemini Pro 2.5
  - model_name: gemini
    litellm_params:
      api_key: ${var.gemini_api_key}
      vertex_project: "htg-infra"
      vertex_location: "us-central1"


litellm_settings:
  drop_params: True
  cache: True
  cache_params:
    type: redis
    host: redis.service.consul
    port: 6379
    password: ""
    namespace: litellm
  # Log and trace settings
  streaming: True
  logging: True
  # Added user management settings
  user_api_key_backend: "postgres"
  use_queue: True
  num_workers: 4

environment_variables:
  AWS_ACCESS_KEY_ID: ${var.aws_access_key}
  AWS_SECRET_ACCESS_KEY: ${var.aws_secret_key}
  AWS_REGION: ${var.bedrock_aws_region}
  GOOGLE_API_KEY: ${var.gemini_api_key}
  LITELLM_MASTER_KEY: ${var.litellm_master_key}
  LITELLM_SALT_KEY: ${var.litellm_salt_key}
  DATABASE_URL: postgresql://postgres:${var.postgres_pass}@postgres.service.consul:5432/litellm
EOH
        destination = "local/config.yaml"
        env         = false
      }

      service {
        port = "http"
        name = "litellm"
        tags = [
          "traefik.enable=true"
        ]
        check {
          type     = "tcp"
          interval = "10s"
          timeout  = "2s"
        }
      }

      resources {
        cpu    = 800
        memory = 1536
      }
    }
  }
}

variable "region" {
    type = string
}

variable "tld" {
    type = string
}

variable "shared_dir" {
    type = string
}

variable "ollama_base_url" {
    type = string
    description = "Base URL for the Ollama service"
    default = "http://ollama.service.consul:11434"
}

variable "aws_access_key" {
    type = string
    description = "AWS Access Key ID for Bedrock access"
}

variable "aws_secret_key" {
    type = string
    description = "AWS Secret Access Key for Bedrock access"
}

variable "bedrock_aws_region" {
    type = string
    description = "AWS Region for Bedrock"
    default = "eu-central-1"
}

variable "gemini_api_key" {
    type = string
    description = "Google API Key for Gemini access"
}

variable "litellm_master_key" {
    type = string
    description = "Master key for LiteLLM authentication"
}

variable "litellm_salt_key" {
    type = string
    description = "Salt key for encrypting provider credentials"
}

variable "postgres_pass" {
    type = string
    description = "Password for PostgreSQL database"
}

================================================
FILE: nomad_jobs/ai-ml/litellm/volume.hcl
================================================
id           = "litellm"
external_id  = "litellm"
name         = "litellm"
type         = "csi"
plugin_id    = "org.democratic-csi.iscsi"
capacity_min = "1GiB"
capacity_max = "1GiB"

capability {
  access_mode     = "single-node-writer"
  attachment_mode = "block-device"
}

mount_options {
  fs_type     = "ext4"
  mount_flags = ["noatime"]
}

================================================
FILE: nomad_jobs/ai-ml/manyfold/3dprints-volume.hcl
================================================
id           = "3dprints"
external_id  = "3dprints"
name         = "3dprints"
type         = "csi"
plugin_id    = "org.democratic-csi.iscsi"
capacity_min = "40GiB"
capacity_max = "40GiB"

capability {
  access_mode     = "single-node-writer"
  attachment_mode = "block-device"
}

mount_options {
  fs_type     = "ext4"
  mount_flags = ["noatime"]
}


================================================
FILE: nomad_jobs/ai-ml/manyfold/nomad.job
================================================
job "manyfold" {
  region = var.region
  datacenters = ["dc1"]
  type        = "service"

  meta {
      job_file = "nomad_jobs/ai-ml/manyfold/nomad.job"
version = "4"
  }

  constraint {
    attribute = "${meta.shared_mount}"
    operator  = "="
    value     = "true"
  }

  group "downloaders" {
    count = 1 
    network {
      port "http" {
        host_network = "lan"
        to = "3214"
      }
    }

    volume "manyfold" {
      type      = "csi"
      read_only = false
      source    = "manyfold"
      access_mode = "single-node-writer"
      attachment_mode = "file-system"
    }

    volume "3dprints" {
      type      = "csi"
      read_only = false
      source    = "3dprints"
      access_mode = "single-node-writer"
      attachment_mode = "file-system"
    }


    restart {
      attempts = 3
      delay    = "15s"
      interval = "10m"
      mode     = "delay"
    }

    update {
      max_parallel     = 1
      min_healthy_time = "30s"
      auto_revert      = true
    }

    task "manyfold" {
      driver = "docker"
      config {
        image = "ghcr.io/manyfold3d/manyfold-solo:0.137.0"
        ports = ["http"]
      }

      volume_mount {
        volume      = "manyfold"
        destination = "/config"
        read_only   = false
      }

      volume_mount {
        volume      = "3dprints"
        destination = "/libraries"
        read_only   = false
      }

      env {
        PUID = "1000"
        PGID = "1000"
        TZ = "Etc/UTC"
	SECRET_KEY_BASE = "${var.manyfold_secret_key}"
      }

      service {
        port = "http"
	name = "manyfold"
        tags = [
          "traefik.enable=true",
          "traefik.http.middlewares.httpsRedirect.redirectscheme.scheme=https",
          "traefik.http.routers.${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_TASK_NAME}.${var.tld}",
          "traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=forward-auth"
        ]
        check {
          type     = "tcp"
          interval = "10s"
          timeout  = "2s"
        }
      }

      resources {
        cpu    = 100
        memory = 1024
      }
    }
  }
}

variable "region" {
    type = string
}

variable "tld" {
    type = string
}

variable "shared_dir" {
    type = string
}

variable "downloads_dir" {
    type = string
}

variable "music_dir" {
    type = string
}

variable "manyfold_secret_key" {
    type = string
}


================================================
FILE: nomad_jobs/ai-ml/manyfold/prints_volume.hcl
================================================
id           = "3dprints"
external_id  = "3dprints"
name         = "3dprints"
type         = "csi"
plugin_id    = "org.democratic-csi.iscsi"
capacity_min = "40GiB"
capacity_max = "40GiB"

capability {
  access_mode     = "single-node-writer"
  attachment_mode = "block-device"
}

mount_options {
  fs_type     = "ext4"
  mount_flags = ["noatime"]
}



================================================
FILE: nomad_jobs/ai-ml/manyfold/volume.hcl
================================================
id           = "manyfold"
external_id  = "manyfold"
name         = "manyfold"
type         = "csi"
plugin_id    = "org.democratic-csi.iscsi"
capacity_min = "40GiB"
capacity_max = "40GiB"

capability {
  access_mode     = "single-node-writer"
  attachment_mode = "block-device"
}

mount_options {
  fs_type     = "ext4"
  mount_flags = ["noatime"]
}



================================================
FILE: nomad_jobs/ai-ml/ollama/nomad.job
================================================
job "ollama" {
  region = var.region
  datacenters = ["cheese"]
  type        = "service"

  meta {
    job_file = "nomad_jobs/ai-ml/ollama/nomad.job"
    version = "4"
  }

  group "web" {
    network {
      mode = "host"
      port "web" {
        static = "11434"
        host_network = "lan"
      }
    }

    restart {
      attempts = 3
      delay    = "15s"
      interval = "10m"
      mode     = "delay"
    }

    update {
      max_parallel     = 1
      min_healthy_time = "30s"
      auto_revert      = true
    }

    task "ollama" {
      driver = "docker"

      config {
        image = "ollama/ollama"
        runtime = "nvidia"
        dns_servers = [var.dns_server_ip]
        volumes = [
          "${var.ollama_data_dir}:/root/.ollama",
        ]
        ports = ["web"]
      }

      env {
        # Make the GPU visible to this container.
        NVIDIA_VISIBLE_DEVICES       = "all"
        NVIDIA_DRIVER_CAPABILITIES   = "compute,utility"
        # Pre-pull models on startup
        OLLAMA_MODELS               = "llama3.2:3b,codellama:7b"
      }

      service {
        name = "${NOMAD_JOB_NAME}"
        tags = ["traefik.enable=true"]
        port = "web"

        check {
          type     = "tcp"
          port     = "web"
          interval = "30s"
          timeout  = "2s"
        }
      }

      resources {
        cpu    = "200"
        memory = "7000"
      }
    }
  }
}

variable "region" {
    type = string
}

variable "shared_dir" {
    type = string
}

variable "ollama_data_dir" {
  type = string
}

variable "datacenter" {
  type = string
}

variable "dns_server_ip" {
  type = string
}


================================================
FILE: nomad_jobs/ai-ml/open-webui/nomad.job
================================================
job "open-webui" {
  region = var.region
  datacenters = ["dc1"]
  type        = "service"

  meta {
      job_file = "nomad_jobs/ai-ml/open-webui/nomad.job"
      version = "3"  // Right-size memory 1024MB -> 768MB
  }

  group "web" {
    network {
      mode = "host"
      port "web" {
        to = "8080"
        host_network = "lan"
      }
    }

    restart {
      attempts = 3
      delay    = "15s"
      interval = "10m"
      mode     = "delay"
    }

    task "open-webui" {
      driver = "docker"

      config {
        image = "ghcr.io/open-webui/open-webui:v0.8.12"
        dns_servers = [var.dns_server_ip]
        volumes = [
          "${var.shared_dir}open-webui:/app/backend/data",
        ]
        ports = ["web"]
      }

     env {
        OLLAMA_BASE_URL= var.ollama_base_url
        WEBUI_SECRET_KEY = var.webui_secret_key
     }
      service {
        name = "${NOMAD_JOB_NAME}"
        tags = ["traefik.enable=true"]
        port = "web"

        check {
          type     = "tcp"
          port     = "web"
          interval = "30s"
          timeout  = "2s"
        }
      }

      resources {
        cpu    = "200"
        memory = "768"
      }
    }
  }
}

variable "region" {
    type = string
}

variable "shared_dir" {
    type = string
}

variable "ollama_base_url" {
  type = string
}

variable "webui_secret_key" {
  type = string
}

variable "datacenter" {
  type = string
}

variable "dns_server_ip" {
  type = string
}


================================================
FILE: nomad_jobs/ai-ml/paperless-ai/nomad.job
================================================
job "paperless-ai" {
  region = var.region
  datacenters = ["dc1"]
  type        = "service"

  meta {
      job_file = "nomad_jobs/ai-ml/paperless-ai/nomad.job"
version = "2"
  }

  group "web" {
    network {
      mode = "host"
      port "web" {
        to = "3000"
        host_network = "lan"
      }
    }

    restart {
      attempts = 3
      delay    = "15s"
      interval = "10m"
      mode     = "delay"
    }

    task "paperless-ai" {
      driver = "docker"

      config {
        image = "clusterzx/paperless-ai"
        dns_servers = ["192.168.50.2"]
        volumes = [
          "${var.shared_dir}paperless-ai:/app/data",
        ]
        ports = ["web"]
      }

      service {
        name = "${NOMAD_JOB_NAME}"
        tags = ["traefik.enable=true"]
        port = "web"

        check {
          type     = "tcp"
          port     = "web"
          interval = "30s"
          timeout  = "2s"
        }
      }

      resources {
        cpu    = "200"
        memory = "2048"
      }
    }
  }
}

variable "region" {
    type = string
}

variable "shared_dir" {
    type = string
}


================================================
FILE: nomad_jobs/ai-ml/pgvector-client/nomad.job
================================================
job "pgvector-client-example" {
  region = var.region
  datacenters = ["dc1"]
  type        = "batch"

  meta {
    job_file = "nomad_jobs/ai-ml/pgvector-client/nomad.job"
    version = "1"  // Initial version
  }

  group "client" {

    restart {
      attempts = 3
      delay    = "15s"
      interval = "10m"
      mode     = "delay"
    }

    task "embedding-example" {
      driver = "docker"

      config {
        image = "python:3.14-slim"
        command = "python"
        args = [
          "/local/embedding-example.py"
        ]
      }

      env {
        PGVECTOR_HOST     = "pgvector.service.consul"
        PGVECTOR_PORT     = "5433"
        PGVECTOR_USER     = "postgres"
        PGVECTOR_PASSWORD = "${var.pgvector_pass}"
        PGVECTOR_DB       = "embeddings"
      }

      template {
        data = <<EOH
#!/usr/bin/env python3
import os
import time
import psycopg2
import numpy as np
from psycopg2.extras import execute_values

# PostgreSQL connection parameters
pg_host = os.environ.get('PGVECTOR_HOST', 'pgvector.service.consul')
pg_port = os.environ.get('PGVECTOR_PORT', '5433')
pg_user = os.environ.get('PGVECTOR_USER', 'postgres')
pg_password = os.environ.get('PGVECTOR_PASSWORD', '')
pg_db = os.environ.get('PGVECTOR_DB', 'embeddings')

# Function to create random embeddings for demo purposes
def create_random_embedding(dim=1536):
    """Create a random normalized embedding vector."""
    vec = np.random.randn(dim)
    # Normalize to unit vector (common practice for embeddings)
    vec = vec / np.linalg.norm(vec)
    return vec.tolist()

# Connect to PostgreSQL with pgvector
print(f"Connecting to pgvector at {pg_host}:{pg_port}")
conn = psycopg2.connect(
    host=pg_host,
    port=pg_port,
    user=pg_user,
    password=pg_password,
    dbname=pg_db
)

cursor = conn.cursor()

# Ensure pgvector extension is enabled
print("Ensuring pgvector extension is enabled...")
cursor.execute("CREATE EXTENSION IF NOT EXISTS vector")

# Create a table for storing document embeddings
print("Creating documents table...")
cursor.execute("""
    CREATE TABLE IF NOT EXISTS documents (
        id SERIAL PRIMARY KEY,
        content TEXT NOT NULL,
        embedding VECTOR(1536) NOT NULL,
        metadata JSONB,
        created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
    )
""")

# Create an index for efficient similarity search
print("Creating vector index (this might take a while for large tables)...")
try:
    cursor.execute("""
        CREATE INDEX IF NOT EXISTS documents_embedding_idx
        ON documents
        USING ivfflat (embedding vector_cosine_ops)
        WITH (lists = 100)
    """)
except Exception as e:
    print(f"Warning: Could not create index: {e}")
    print("Continuing without index...")

# Sample documents
print("Inserting sample documents...")
documents = [
    "The quick brown fox jumps over the lazy dog",
    "Machine learning models can process vector embeddings efficiently",
    "PostgreSQL with pgvector extension provides vector similarity search",
    "Vector databases are essential for modern AI applications",
    "Semantic search uses embeddings to find relevant results"
]

# Generate random embeddings and insert documents
data = []
for doc in documents:
    embedding = create_random_embedding()
    data.append((doc, embedding, {"source": "example"}))

execute_values(
    cursor,
    """
    INSERT INTO documents (content, embedding, metadata)
    VALUES %s
    RETURNING id
    """,
    data,
    template="(%s, %s::vector, %s)"
)

print(f"Inserted {len(documents)} documents with embeddings")

# Perform a similarity search
print("\nPerforming similarity search...")
query_embedding = create_random_embedding()

cursor.execute("""
    SELECT id, content, 1 - (embedding <=> %s) AS similarity
    FROM documents
    ORDER BY embedding <=> %s
    LIMIT 3
""", (query_embedding, query_embedding))

results = cursor.fetchall()
print("\nTop 3 most similar documents:")
for id, content, similarity in results:
    print(f"ID: {id}, Similarity: {similarity:.4f}")
    print(f"Content: {content}")
    print("-" * 50)

# Commit and close
conn.commit()
cursor.close()
conn.close()
print("Example completed successfully!")
EOH
        destination = "local/embedding-example.py"
      }

      resources {
        cpu    = 500
        memory = 512
      }
    }
  }
}

variable "region" {
    type = string
}

variable "pgvector_pass" {
    type = string
    description = "Admin password for pgvector PostgreSQL server"
}


================================================
FILE: nomad_jobs/ai-ml/radbot/nomad-dev.job
================================================
job "radbot-dev" {
  region      = var.region
  datacenters = ["dc1"]
  type        = "service"

  meta {
    job_file = "nomad_jobs/ai-ml/radbot/nomad-dev.job"
    version  = "1"
  }

  constraint {
    attribute = "${meta.shared_mount}"
    operator  = "="
    value     = "true"
  }

  group "web" {
    count = 1

    network {
      port "http" {
        host_network = "lan"
        to           = 8000
      }
    }

    restart {
      attempts = 3
      delay    = "15s"
      interval = "10m"
      mode     = "delay"
    }

    update {
      max_parallel     = 1
      min_healthy_time = "60s"
      healthy_deadline = "5m"
      auto_revert      = true
    }

    task "radbot-dev" {
      driver = "docker"

      config {
        image       = "ghcr.io/perrymanuk/radbot:dev"
        dns_servers = [var.dns_server_ip]
        ports       = ["http"]
        volumes     = [
          "local/config.yaml:/app/config.yaml",
        ]
      }

      env {
        RADBOT_CREDENTIAL_KEY = var.radbot_credential_key
        RADBOT_ADMIN_TOKEN    = var.radbot_admin_token
        RADBOT_CONFIG_FILE    = "/app/config.yaml"
        RADBOT_ENV            = "dev"
      }

      template {
        data = <<EOH
database:
  host: postgres.service.consul
  port: 5432
  user: postgres
  password: ${var.postgres_pass}
  db_name: radbot_dev
EOH
        destination = "local/config.yaml"
        env         = false
      }

      service {
        port = "http"
        name = "radbot-dev"
        tags = [
          "traefik.enable=true",
        ]
        check {
          type     = "http"
          path     = "/health"
          interval = "30s"
          timeout  = "5s"
          check_restart {
            limit           = 3
            grace           = "120s"
            ignore_warnings = false
          }
        }
      }

      resources {
        cpu    = 1000
        memory = 2048
      }
    }
  }
}

# ------------------------------------------------------------------
# Variables — only bootstrap secrets
# ------------------------------------------------------------------

variable "region" {
  type = string
}

variable "dns_server_ip" {
  type = string
}

variable "postgres_pass" {
  type        = string
  description = "PostgreSQL password (needed to connect to DB where all config lives)"
}

variable "radbot_credential_key" {
  type        = string
  description = "Fernet master key for encrypting credentials/config in the DB"
}

variable "radbot_admin_token" {
  type        = string
  description = "Bearer token for /admin/ — the only pre-shared secret"
}


================================================
FILE: nomad_jobs/ai-ml/radbot/nomad.job
================================================
job "radbot" {
  region      = var.region
  datacenters = ["dc1"]
  type        = "service"

  meta {
    job_file = "nomad_jobs/ai-ml/radbot/nomad.job"
    version  = "2"
  }

  constraint {
    attribute = "${meta.shared_mount}"
    operator  = "="
    value     = "true"
  }

  group "web" {
    count = 1

    network {
      port "http" {
        host_network = "lan"
        to           = 8000
      }
    }

    restart {
      attempts = 3
      delay    = "15s"
      interval = "10m"
      mode     = "delay"
    }

    update {
      max_parallel     = 1
      min_healthy_time = "60s"
      healthy_deadline = "5m"
      auto_revert      = true
    }

    task "radbot" {
      driver = "docker"

      config {
        image       = "ghcr.io/perrymanuk/radbot:v0.128"
        dns_servers = [var.dns_server_ip]
        ports       = ["http"]
        volumes     = [
          "local/config.yaml:/app/config.yaml",
          "${var.shared_dir}ai-intel:/mnt/ai-intel",
        ]
      }

      # Bootstrap-only env vars:
      # - RADBOT_CREDENTIAL_KEY: decrypt credentials/config stored in the DB
      # - RADBOT_ADMIN_TOKEN:    access /admin/ to manage everything else
      # - RADBOT_MCP_TOKEN:      bootstrap bearer for MCP bridge HTTP
      #                          (credential-store `mcp_token` wins once set)
      # - RADBOT_WIKI_PATH:      wiki root inside the container (matches the
      #                          ai-intel bind mount above)
      # All other config (API keys, models, integrations, endpoints) is stored
      # encrypted in the radbot_credentials table and managed via /admin/ UI.
      env {
        RADBOT_CREDENTIAL_KEY = var.radbot_credential_key
        RADBOT_ADMIN_TOKEN    = var.radbot_admin_token
        RADBOT_MCP_TOKEN      = var.radbot_mcp_token
        RADBOT_WIKI_PATH      = "/mnt/ai-intel"
        RADBOT_CONFIG_FILE    = "/app/config.yaml"
      }

      # Minimal bootstrap config — just enough to connect to the DB.
      # Everything else is loaded from the DB credential store at startup.
      template {
        data = <<EOH
database:
  host: postgres.service.consul
  port: 5432
  user: postgres
  password: ${var.postgres_pass}
  db_name: radbot_todos
EOH
        destination = "local/config.yaml"
        env         = false
      }

      service {
        port = "http"
        name = "radbot"
        tags = [
          "traefik.enable=true",
        ]
        check {
          type     = "http"
          path     = "/health"
          interval = "30s"
          timeout  = "5s"
          check_restart {
            limit           = 3
            grace           = "120s"
            ignore_warnings = false
          }
        }
      }

      resources {
        cpu    = 1000
        memory = 2048
      }
    }
  }
}

# ------------------------------------------------------------------
# Variables — only bootstrap secrets
# ------------------------------------------------------------------

variable "region" {
  type = string
}

variable "dns_server_ip" {
  type = string
}

variable "postgres_pass" {
  type        = string
  description = "PostgreSQL password (needed to connect to DB where all config lives)"
}

variable "radbot_credential_key" {
  type        = string
  description = "Fernet master key for encrypting credentials/config in the DB"
}

variable "radbot_admin_token" {
  type        = string
  description = "Bearer token for /admin/ — the only pre-shared secret"
}

variable "radbot_mcp_token" {
  type        = string
  description = "Bootstrap bearer token for the MCP bridge HTTP transport. The credential-store entry `mcp_token` takes priority once set, so this is only used before the first rotate."
}

variable "shared_dir" {
  type        = string
  description = "Base path on shared-mount nodes; jobs append their own subdirectory (this job mounts <shared_dir>/ai-intel at /mnt/ai-intel)."
}


================================================
FILE: nomad_jobs/core-infra/coredns/README.md
================================================
### Coredns
you can place extra configuration for coredns in the consul kv store at `apps/coredns/corefile` and it will be deployed with the job



================================================
FILE: nomad_jobs/core-infra/coredns/nomad.job
================================================
job "coredns" {
  region = var.region
  datacenters = ["dc1"]
  type = "service"
  priority = 100

  meta {
      job_file = "nomad_jobs/core-infra/coredns/nomad.job"
      version = "10"  // Write keepalived.conf directly instead of env.yaml
  }

  constraint {
    attribute = "${meta.dns}"
    operator  = "="
    value     = "true"
  }

  group "dns" {
    count = 2
    
    constraint {
      operator = "distinct_hosts"
      value    = "true"
    }
    
    network {
      mode = "host"
      port "dns" {
        static = "53"
        host_network = "lan"
      }
      port "metrics" {
        static = "9153"
        host_network = "lan"
      }
    }

    restart {
      attempts = 3
      delay    = "15s"
      interval = "10m"
      mode     = "delay"
    }

    update {
      max_parallel     = 1
      min_healthy_time = "60s"
      auto_revert      = true
      auto_promote     = true
      canary           = 2
    }

    task "keepalived-dns" {
      driver = "docker"
      
      lifecycle {
        hook = "prestart"
        sidecar = true
      }
      
      config {
        image = "osixia/keepalived:2.3.4"
        network_mode = "host"
        force_pull = false
        volumes = [
          "local/keepalived.conf:/etc/keepalived/keepalived.conf"
        ]
        cap_add = ["NET_ADMIN", "NET_BROADCAST", "NET_RAW"]
      }

      template {
        destination = "local/keepalived.conf"
        change_mode = "restart"
        splay       = "1m"
        data        = <<EOH
vrrp_instance VI_1 {
    state BACKUP
    interface {{ sockaddr "GetPrivateInterfaces | include \"network\" \"192.168.50.0/24\" | attr \"name\"" }}
    virtual_router_id 51
    priority 100
    advert_int 1

    unicast_peer {
{{- range service "coredns" }}
        {{ .Address }}
{{- end }}
    }

    virtual_ipaddress {
        192.168.50.2/24
        192.168.50.3/24
    }
}
EOH
      }
      
      resources {
        cpu    = 100
        memory = 64
      }
    }

    task "coredns" {
      driver = "docker"
      
      config {
        image = "coredns/coredns:1.14.2"
        network_mode = "host"
        force_pull = false
        ports = ["dns", "metrics"]
        args = ["-conf", "/local/coredns/corefile"]
      }

      service {
        port = "dns"
        name = "coredns"
        tags = ["coredns"]
        check {
          type     = "tcp"
          interval = "10s"
          timeout  = "2s"
        }
      }
      
      service {
        port = "metrics"
        name = "coredns"
        tags = ["metrics", "coredns"]
      }

      template {
        data = <<EOH
. {
  bind {{ env "NOMAD_IP_dns" }} 192.168.50.2 192.168.50.3
  forward . 8.8.8.8
  log
  errors
  prometheus {{ env "NOMAD_IP_metrics" }}:9153
}
consul.:53 {
  bind {{ env "NOMAD_IP_dns" }} 192.168.50.2 192.168.50.3
  forward . {{ env "NOMAD_IP_dns" }}:8600
  log
  prometheus {{ env "NOMAD_IP_metrics" }}:9153
}
fritz.box.:53 {
  bind {{ env "NOMAD_IP_dns" }} 192.168.50.2 192.168.50.3
  forward . 192.168.50.1:53
  log
  prometheus {{ env "NOMAD_IP_metrics" }}:9153
}
${var.tld}. {
  bind {{ env "NOMAD_IP_dns" }} 192.168.50.2 192.168.50.3

  file /local/coredns/zones/db.home.lab ${var.tld}

}
k8s. {
  bind {{ env "NOMAD_IP_dns" }} 192.168.50.2 192.168.50.3

  file /local/coredns/zones/db.k8s k8s

}
EOH
        destination = "local/coredns/corefile"
        env         = false
        change_mode = "signal"
        change_signal = "SIGHUP"
        left_delimiter  = "{{"
        right_delimiter = "}}"
      }

      template {
        change_mode   = "signal"
        change_signal = "SIGUSR1"
        destination = "local/coredns/zones/db.home.lab"
        data = <<EOH
$ORIGIN ${var.tld}.
$TTL    604800
${var.tld}.         IN SOA	ns1.${var.tld}. admin.${var.tld}. (
         {{ timestamp "unix" }}        ; Serial, current unix timestamp
             604800        ; Refresh
              86400        ; Retry
            2419200        ; Expire
             604800 )      ; Negative Cache TTL

; name servers - NS records
${var.tld}.         IN NS	 ns1.${var.tld}.
${var.tld}.         IN NS	 ns2.${var.tld}.

; name servers - A records
ns1                      IN A   192.168.50.1
ns2                      IN A   192.168.50.2

{{- /*  Point domains to the floating IP from keepalived */}}
; services - A records
lab.${var.tld}.         IN A   192.168.50.20
*                       IN A   192.168.50.20
@                       IN A   192.168.50.20

EOH
      }

      template {
        change_mode   = "signal"
        change_signal = "SIGUSR1"
        destination = "local/coredns/zones/db.k8s"
        data = <<EOH
$ORIGIN ${var.tld}.
$TTL    604800
k8s.         IN SOA	ns1.k8s. admin.k8s. (
         {{ timestamp "unix" }}        ; Serial, current unix timestamp
             604800        ; Refresh
              86400        ; Retry
            2419200        ; Expire
             604800 )      ; Negative Cache TTL

; name servers - NS records
k8s.         IN NS	 ns1.k8s.
k8s.         IN NS	 ns2.k8s.

; name servers - A records
ns1                      IN A   192.168.50.1
ns2                      IN A   192.168.50.2

{{- /*  Point domains to the floating IP from keepalived */}}
; services - A records
lab.k8s.                IN A   192.168.50.93
*.k8s.                  IN A   192.168.50.93
@                       IN A   192.168.50.93

EOH
      }
      
      resources {
        cpu    = 100
        memory = 128
      }
    }
  }
}

variable "region" {}
variable "tld" {}


================================================
FILE: nomad_jobs/core-infra/github-runner/nomad.job
================================================
job "github-runner" {
  datacenters = ["dc1"]
  type        = "service"

  meta {
      job_file = "nomad_jobs/core-infra/github-runner/nomad.job"
version = "3"
  }

  group "runners" {
    count = 3
    # Don't reschedule in case of failure or drain
    reschedule {
      attempts  = 0
      unlimited = false
    }
   
    restart {
      attempts = 3
      delay = "30s"
      interval = "3m"
      mode = "delay"
    }

    network {
      port "http" { 
        host_network = "lan"
        to = "9252" 
      }
    }


    update {
      max_parallel     = 1
      min_healthy_time = "30s"
      auto_revert      = true
    }

    task "runner" {
      driver = "docker"
      kill_timeout = "25s"

      template {
        env         = true
        destination = "secrets/env"
        data        = <<-EOH
        RUNNER_NAME_PREFIX = "amd64"
        RUNNER_GROUP = "Default"
        RUNNER_SCOPE = "repo"
        REPO_URL = "https://github.com/perrymanuk/hashi-homelab"
        ACCESS_TOKEN = "${var.github_pat}"
        EPHEMERAL = "0"
        DISABLE_AUTO_UPDATE = "1"
        LABELS = "${var.labels}"
        EOH
      }

      template {
        data = <<-EOH
        {
        	"auths": {
        		"https://index.docker.io/v1/": {
              "auth": ""
        		}
	        }
        }
        EOH
        destination = "secrets/config.json"
      }

      resources {
        cpu    = 100
        memory = 128
      }

      config {
        image       = "myoung34/github-runner:2.333.1"
        extra_hosts = ["nomad.service.home:192.168.50.120"]
        ports       = ["http"]
        userns_mode = "host"
        privileged  = true
        mounts = [
          {
            type     = "bind"
            source   = "/var/run/docker.sock"
            target   = "/var/run/docker.sock"
            readonly = false
            bind_options = {
              propagation = "rprivate"
            }
          },
          {
            type     = "bind"
            source   = "secrets/config.json"
            target   = "/root/.docker/config.json"
            readonly = false
            bind_options = {
              propagation = "rprivate"
            }
          }
        ]
      }
    }
  }
}


variable "labels" {
  type = string
  default = "self-hosted"
}

variable "github_pat" {}


================================================
FILE: nomad_jobs/core-infra/haproxy/nomad.job
================================================
job "haproxy" {
  region = var.region
  datacenters = ["dc1"]
  type = "system"

  meta {
      job_file = "nomad_jobs/core-infra/haproxy/nomad.job"
version = "6"
  }

  group "lbs" {
    count = 1

    restart {
      attempts = 3
      delay    = "15s"
      interval = "10m"
      mode     = "delay"
    }

    update {
      max_parallel     = 1
      min_healthy_time = "30s"
      auto_revert      = true
    }
    task "haproxy" {
      driver = "docker"
      service {
        tags = ["haproxy"]
        name = "haproxy"
        port = "http"

        check {
          type     = "tcp"
          interval = "10s"
          timeout  = "2s"
        }

      }
      service {
        tags = ["metrics", "${NOMAD_ALLOC_ID}"]
        name = "haproxy-metrics"
        port = "metrics"

        check {
          type     = "tcp"
          interval = "10s"
          timeout  = "2s"
        }
      }

      service {
        tags = ["metrics", "${NOMAD_ALLOC_ID}"]
        name = "service-mesh"
        port = "http"

        check {
          type     = "tcp"
          interval = "10s"
          timeout  = "2s"
        }
      }

      config {
        image = "haproxy:3.3.6-alpine"
        args = ["-W", "-f", "local/etc/haproxy.cfg"]
        network_mode = "host"
      }

      template {
data = <<EOH
global
  maxconn     20000
  pidfile     /run/haproxy.pid
  stats timeout 2m
  daemon

defaults
  retry-on all-retryable-errors
  option http-use-htx
  errorfile 503 local/etc/error503.http

frontend http
  bind {{ env "NOMAD_ADDR_http" }}
  # options
  http-request add-header x-forwarded-proto http
  maxconn 50000
  mode http
  timeout client 0s
  timeout server 0s
  # acls
{{ range services }}{{ if .Tags | contains "net-internal" }}
  acl {{ .Name }}_net-internal hdr_reg(Host) -i {{ .Name }}.homelab
  acl {{ .Name }}_net-internal base_dom {{ .Name }}.homelab
  use_backend {{ .Name }}_net-internal if {{ .Name }}_net-internal
{{ end }}{{ end }}


# services

{{ range services -}}{{ if .Tags | contains "net-internal" -}}
backend {{ .Name }}_net-internal
  mode http
  option redispatch
  retries 10

  timeout connect 5s
  timeout queue 30s
  timeout server 30s

  {{ range service .Name -}}
  {{ if .Tags | contains "net-internal" -}}
  server {{ .ID }} {{ .Address }}:{{ .Port }}
  {{ end -}}
  {{ end }}
{{ end -}}
{{ end -}}

listen {{ env "NOMAD_IP_http" }}
  mode http

  bind ${NOMAD_ADDR_metrics}

  timeout client 30s
  timeout connect 5s
  timeout server  30s
  timeout queue   30s

  http-request use-service prometheus-exporter if { path /metrics }

  stats enable
  stats uri /
  stats show-node
  stats refresh 30s
  stats show-legends


EOH
        destination = "local/etc/haproxy.cfg"
        env         = false
        change_mode = "signal"
        change_signal = "SIGUSR2"
      }

      template {
data = <<EOH
HTTP/1.0 503 Service Unavailable
Cache-Control: no-cache
Connection: close
Content-Type: text/plain

Error 503: The specified service was not found or has no allocations. Please check your service configuration and try again

EOH
        destination = "local/etc/error503.http"
        env         = false
        change_mode = "signal"
        change_signal = "SIGHUP"
      }

      resources {
        cpu = 100
        memory = 64
        network {
          port "http" { 
            static = "80" 
          }
          port "metrics" {}
        }
      }
    }
  }
}




================================================
FILE: nomad_jobs/core-infra/iscsi-csi-plugin/controller.job
================================================
job "democratic-csi-iscsi-controller" {
  
  meta {
  job_file = "nomad_jobs/core-infra/iscsi-csi-plugin/controller.job"
  }
datacenters = ["dc1"]

  group "controller" {

    restart {
      attempts = 3
      delay    = "15s"
      interval = "10m"
      mode     = "delay"
    }

    task "plugin" {
      driver = "docker"

      config {
        image = "docker.io/democraticcsi/democratic-csi:v1.9.5"

        args = [
          "--csi-version=1.5.0",
          # must match the csi_plugin.id attribute below
          "--csi-name=org.democratic-csi.iscsi",
          "--driver-config-file=${NOMAD_TASK_DIR}/driver-config-file.yaml",
          "--log-level=info",
          "--csi-mode=controller",
          "--server-socket=/csi/csi.sock",
        ]
      }

      template {
        destination = "${NOMAD_TASK_DIR}/driver-config-file.yaml"

        data = <<EOH
driver: freenas-iscsi
instance_id:
httpConnection:
  protocol: https
  host: 192.168.50.208
  port: 443
  # use only 1 of apiKey or username/password
  # if both are present, apiKey is preferred
  # apiKey is only available starting in TrueNAS-12
  apiKey: ${var.truenas_api_key}
  username: iscsi-no
  password: "${var.truenas_iscsi_pass}"
  allowInsecure: true
  # use apiVersion 2 for TrueNAS-12 and up (will work on 11.x in some scenarios as well)
  apiVersion: 2
sshConnection:
  host: 192.168.50.208
  port: 22
  username: root
  # use either password or key
  password: "${var.truenas_iscsi_pass}"
zfs:
  # TrueNAS SCALE 24.10+ (Electric Eel) paths differ from FreeBSD defaults
  cli:
    paths:
      zfs: /usr/sbin/zfs
      zpool: /usr/sbin/zpool
      sudo: /usr/bin/sudo
      chroot: /usr/sbin/chroot
  
  # can be used to set arbitrary values on the dataset/zvol
  # can use handlebars templates with the parameters from the storage class/CO
  # TODO: set up handlebars templates to make this far more awesome
  #datasetProperties:
  #   "org.freenas:description": "created via democratic-csi"

  datasetParentName: ssd-vms0/nomad/vols
  # do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
  # they may be siblings, but neither should be nested in the other
  detachedSnapshotsDatasetParentName: ssd-vms0/nomad/snaps
  zvolCompression: "lz4"
  zvolDedupe: ""
  zvolEnableReservation: False
  zvolBlocksize: ""
iscsi:
  targetPortal: "192.168.50.208:3260"
  targetPortals: []
  interface:

  # MUST ensure uniqueness
  # full iqn limit is 223 bytes, plan accordingly
  # default is //template name isn't defined!
  #nameTemplate: "{ { parameters.[csi.storage.k8s.io/pvc/namespace] }}-{ { parameters.[csi.storage.k8s.io/pvc/name] }}"
  namePrefix: csi-
  nameSuffix: "-discovery"
  # add as many as needed
  targetGroups:
    # get the correct ID from the "portal" section in the UI
    - targetGroupPortalGroup: 1
      # get the correct ID from the "initiators" section in the UI
      targetGroupInitiatorGroup: 1
      # None, CHAP, or CHAP Mutual
      targetGroupAuthType: None
      # get the correct ID from the "Authorized Access" section of the UI
      # only required if using Chap
      #targetGroupAuthGroup:

  extentInsecureTpc: true
  extentXenCompat: false
  extentDisablePhysicalBlocksize: true
  # 512, 1024, 2048, or 4096,
  extentBlocksize: 512
  # "" (let FreeNAS decide, currently defaults to SSD), Unknown, SSD, 5400, 7200, 10000, 15000
  extentRpm: "SSD"
  # 0-100 (0 == ignore)
  extentAvailThreshold: 0
EOH
      }

      csi_plugin {
        # must match --csi-name arg
        id        = "org.democratic-csi.iscsi"
        type      = "controller"
        mount_dir = "/csi"
      }

      resources {
        cpu    = 500
        memory = 128
      }
    }
  }
}

variable "truenas_api_key" {}
variable "truenas_iscsi_pass" {}


================================================
FILE: nomad_jobs/core-infra/iscsi-csi-plugin/node.job
================================================
job "democratic-csi-iscsi-node" {
  
  meta {
  job_file = "nomad_jobs/core-infra/iscsi-csi-plugin/node.job"
  }
datacenters = ["dc1", "cheese"]
  priority = 100
  # you can run node plugins as service jobs as well, but this ensures
  # that all nodes in the DC have a copy
  type = "system"

  group "nodes" {

    restart {
      attempts = 3
      delay    = "15s"
      interval = "10m"
      mode     = "delay"
    }

    task "plugin" {
      driver = "docker"

      env {
        CSI_NODE_ID = "${attr.unique.hostname}"
        
        # if you run into a scenario where your iscsi volumes are zeroed each time they are mounted,
        # you can configure the fs detection system used with the following envvar:
        #FILESYSTEM_TYPE_DETECTION_STRATEGY = "blkid"
      }

      config {
        image = "docker.io/democraticcsi/democratic-csi:v1.9.5"

        args = [
          "--csi-version=1.5.0",
          # must match the csi_plugin.id attribute below
          "--csi-name=org.democratic-csi.iscsi",
          "--driver-config-file=${NOMAD_TASK_DIR}/driver-config-file.yaml",
          "--log-level=debug",
          "--csi-mode=node",
          "--server-socket=/csi/csi.sock",
        ]

        # node plugins must run as privileged jobs because they
        # mount disks to the host
        privileged = true
        ipc_mode = "host"
        network_mode = "host"

        mount {
          type = "bind"
          target = "/host"
          source = "/"
          readonly=false
        }
        
        # if you run into a scenario where your iscsi volumes are zeroed each time they are mounted,
        # you can try uncommenting the following additional mount block:
        mount {
          type     = "bind"
          target   = "/run/udev"
          source   = "/run/udev"
          readonly = true
        }
      }

      template {
        destination = "${NOMAD_TASK_DIR}/driver-config-file.yaml"

        data = <<EOH
driver: freenas-iscsi
instance_id:
httpConnection:
  protocol: https
  host: 192.168.50.208
  port: 443
  # use only 1 of apiKey or username/password
  # if both are present, apiKey is preferred
  # apiKey is only available starting in TrueNAS-12
  apiKey: ${var.truenas_api_key}
  username: iscsi-no
  password: "${var.truenas_iscsi_pass}"
  allowInsecure: true
  # use apiVersion 2 for TrueNAS-12 and up (will work on 11.x in some scenarios as well)
  # leave unset for auto-detection
  apiVersion: 2
sshConnection:
  host: 192.168.50.208
  port: 22
  username: root
  # use either password or key
  password: "${var.truenas_iscsi_pass}"
zfs:
  # can be used to override defaults if necessary
  # the example below is useful for TrueNAS 12
  #cli:
  #  sudoEnabled: true
  #
  #  leave paths unset for auto-detection
  #  paths:
  #    zfs: /usr/local/sbin/zfs
  #    zpool: /usr/local/sbin/zpool
  #    sudo: /usr/local/bin/sudo
  #    chroot: /usr/sbin/chroot
  
  # can be used to set arbitrary values on the dataset/zvol
  # can use handlebars templates with the parameters from the storage class/CO
  # TODO: set up handlebars templates to make this far more awesome
  #datasetProperties:
  #   "org.freenas:description": "created via democratic-csi"

  datasetParentName: ssd-vms0/nomad/vols
  # do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
  # they may be siblings, but neither should be nested in the other
  detachedSnapshotsDatasetParentName: ssd-vms0/nomad/snaps
  zvolCompression: ""
  zvolDedupe: ""
  zvolEnableReservation: False
  zvolBlocksize: ""
iscsi:
  targetPortal: "192.168.50.208:3260"
  targetPortals: []
  interface:

  # MUST iensure uniqueness
  # full iqn limit is 223 bytes, plan accordingly
  # default is //template name isn't defined!
  #nameTemplate: "{ { parameters.[csi.storage.k8s.io/pvc/namespace] }}-{ { parameters.[csi.storage.k8s.io/pvc/name] }}"
  namePrefix: csi-
  nameSuffix: "-discovery"
  # add as many as needed
  targetGroups:
    # get the correct ID from the "portal" section in the UI
    - targetGroupPortalGroup: 1
      # get the correct ID from the "initiators" section in the UI
      targetGroupInitiatorGroup: 1
      # None, CHAP, or CHAP Mutual
      targetGroupAuthType: None
      # get the correct ID from the "Authorized Access" section of the UI
      # only required if using Chap
      #targetGroupAuthGroup:

  extentInsecureTpc: true
  extentXenCompat: false
  extentDisablePhysicalBlocksize: true
  # 512, 1024, 2048, or 4096,
  extentBlocksize: 512
  # "" (let FreeNAS decide, currently defaults to SSD), Unknown, SSD, 5400, 7200, 10000, 15000
  extentRpm: "SSD"
  # 0-100 (0 == ignore)
  extentAvailThreshold: 0
EOH
      }

      csi_plugin {
        # must match --csi-name arg
        id        = "org.democratic-csi.iscsi"
        type      = "node"
        mount_dir = "/csi"
      }

      resources {
        cpu    = 500
        memory = 128
      }
    }
  }
}

variable "ssh_id" {}
variable "truenas_api_key" {}
variable "truenas_iscsi_pass" {}


================================================
FILE: nomad_jobs/core-infra/keepalived/TODO.md
================================================
# Keepalived Improvements TODO

## Problem
The osixia/keepalived image uses environment variables (env.yaml) to generate keepalived.conf at startup. This doesn't support dynamic config reloads via SIGHUP because the conf isn't regenerated from env vars on signal.

Combined with Nomad templates that use `change_mode = "restart"` and dynamic Consul service lookups, this causes restart loops.

## Proposed Solution
Replace osixia/keepalived with plain keepalived using a direct config template:

```hcl
config {
  image = "osixia/keepalived:2.0.20"  # or alpine + keepalived
  volumes = [
    "local/keepalived.conf:/etc/keepalived/keepalived.conf"
  ]
}

template {
  destination = "local/keepalived.conf"
  change_mode = "signal"
  change_signal = "SIGHUP"
  data = <<EOH
vrrp_instance VI_1 {
  state BACKUP
  interface {{ sockaddr "GetPrivateInterfaces | include \"network\" \"192.168.50.0/24\" | attr \"name\"" }}
  virtual_router_id 51
  priority 100
  nopreempt
  virtual_ipaddress {
    192.168.50.50/24
  }
}
EOH
}
```

## Alternatives Considered
- **vip-manager** - lightweight, single purpose
- **kube-vip** - modern, supports ARP/BGP
- **ucarp** - simple CARP implementation

## Affected Jobs
- `nomad_jobs/core-infra/coredns/nomad.job` (keepalived-dns sidecar)
- `nomad_jobs/core-infra/traefik/nomad.job` (keepalived-traefik sidecar)


================================================
FILE: nomad_jobs/core-infra/keepalived/nomad.job
================================================
job "keepalived" {
  datacenters = ["dc1"]
  type        = "system"
  priority    = 100

  meta {
      job_file = "nomad_jobs/core-infra/keepalived/nomad.job"
version = "5"
  }

  group "keepalived" {

    restart {
      attempts = 3
      delay    = "15s"
      interval = "10m"
      mode     = "delay"
    }

    update {
      max_parallel     = 1
      min_healthy_time = "30s"
      auto_revert      = true
    }

    task "keepalived" {
      driver = "docker"
      config {
        image = "osixia/keepalived:2.3.4"
        network_mode = "host"
        volumes = [
            "local/:/container/environment/01-custom"
        ]
        cap_add = ["NET_ADMIN", "NET_BROADCAST", "NET_RAW"]
      }
      template {
        destination = "local/env.yaml"
        change_mode = "restart"
        splay       = "1m"
        data        = <<EOH
KEEPALIVED_VIRTUAL_IPS:
  - 192.168.50.2/24
  - 192.168.50.3/24
KEEPALIVED_UNICAST_PEERS:
{{- with $node := node -}}
{{ range nodes }}
{{- if ne .Address $node.Node.Address }}
  - {{ .Address }}
{{- end -}}
{{- end -}}
{{- end }}
KEEPALIVED_INTERFACE: {{ sockaddr "GetPrivateInterfaces | include \"network\" \"192.168.50.0/24\" | attr \"name\"" }}
EOH
      }
      resources {
        cpu    = 100
        memory = 32
      }
    }
  }
}


================================================
FILE: nomad_jobs/core-infra/nfs-csi-plugin/controller.job
================================================
job "plugin-nfs-controller" {
  
  meta {
  job_file = "nomad_jobs/core-infra/nfs-csi-plugin/controller.job"
  }
datacenters = ["dc1"]
  group "controller" {

    restart {
      attempts = 3
      delay    = "15s"
      interval = "10m"
      mode     = "delay"
    }

    task "plugin" {
      driver = "docker"
      config {
        image = "registry.k8s.io/sig-storage/nfsplugin:v4.13.2"
        args = [
          "--v=5",
          "--nodeid=${attr.unique.hostname}",
          "--endpoint=unix:///csi/csi.sock",
          "--drivername=nfs.csi.k8s.io"
        ]
      }
      csi_plugin {
        id        = "nfsofficial"
        type      = "controller"
        mount_dir = "/csi"
      }
      resources {
        memory = 128
        cpu    = 100
      }
    }
  }
}



================================================
FILE: nomad_jobs/core-infra/nfs-csi-plugin/nodes.job
================================================
job "plugin-nfs-nodes" {
  
  meta {
  job_file = "nomad_jobs/core-infra/nfs-csi-plugin/nodes.job"
  }
datacenters = ["dc1"]
  # you can run node plugins as service jobs as well, but this ensures
  # that all nodes in the DC have a copy.
  type = "system"
  group "nodes" {

    restart {
      attempts = 3
      delay    = "15s"
      interval = "10m"
      mode     = "delay"
    }

    task "plugin" {
      driver = "docker"
      config {
        image = "registry.k8s.io/sig-storage/nfsplugin:v4.13.2"
        args = [
          "--v=5",
          "--nodeid=${attr.unique.hostname}",
          "--endpoint=unix:///csi/csi.sock",
          "--drivername=nfs.csi.k8s.io"
        ]
        # node plugins must run as privileged jobs because they
        # mount disks to the host
        privileged = true
      }
      csi_plugin {
        id        = "nfsofficial"
        type      = "node"
        mount_dir = "/csi"
      }
      resources {
        memory = 64
        cpu = 100
      }
    }
  }
}



================================================
FILE: nomad_jobs/core-infra/pihole/nomad.job
================================================
job "pihole" {
  region = var.region
  datacenters = ["dc1"]
  type        = "service"
  priority    = 100

  meta {
      job_file = "nomad_jobs/core-infra/pihole/nomad.job"
version = "3"
  }

  constraint {
    attribute = "${meta.dns}"
    operator  = "="
    value     = "true"
  }

  constraint {
    attribute = "${meta.shared_mount}"
    operator  = "="
    value     = "true"
  }

  group "infra" {
    count = 1 

    network {
      port "dns" { 
        host_network = "lan"
        static = "8053" 
        to     = "53" 
      }
      port "web" { 
        host_network = "lan"
        to = "80" 
      }
    }

    volume "pihole" {
      type      = "csi"
      read_only = false
      source    = "pihole3"
      access_mode = "single-node-writer"
      attachment_mode = "file-system"
    }


    restart {
      attempts = 3
      delay    = "15s"
      interval = "10m"
      mode     = "delay"
    }

    update {
      max_parallel     = 1
      min_healthy_time = "30s"
      auto_revert      = true
    }

    task "pihole" {
      driver = "docker"
      config {
        image = "pihole/pihole:2026.04.0"
        ports = ["dns", "web"]
        volumes = [
          "${var.shared_dir}pihole-dnsmasq:/etc/dnsmasq.d/",
        ]
      }

      volume_mount {
        volume      = "pihole"
        destination = "/etc/pihole"
        read_only   = false
      }

      service {
         tags = [
          "traefik.enable=true"
         ]
         name = "pihole"
	 provider = "consul"
         port = "web"
         check {
           type     = "tcp"
           interval = "10s"
           timeout  = "2s"
         }
      }

      env {
        TZ                             = "Europe/Amsterdam"
        FTLCONF_webserver_api_password = ""
        FTLCONF_dns_upstreams          = "8.8.8.8;1.1.1.1"
        FTLCONF_dns_listeningMode      = "ALL"
        FTLCONF_misc_etc_dnsmasq_d     = "true"
      }

      resources {
        cpu    = 300
        memory = 128
      }
    }
  }
}

variable "region" {}


variable "shared_dir" {}



================================================
FILE: nomad_jobs/core-infra/pihole/volume.hcl
================================================
id           = "pihole3"
external_id  = "pihole3"
name         = "pihole3"
type         = "csi"
plugin_id    = "org.democratic-csi.iscsi"
capacity_min = "1GiB"
capacity_max = "1GiB"

capability {
  access_mode     = "single-node-writer"
  attachment_mode = "block-device"
}

mount_options {
  fs_type     = "ext4"
  mount_flags = ["noatime"]
}



================================================
FILE: nomad_jobs/core-infra/smtp/nomad.job
================================================
job "smtp" {
  region = var.region
  datacenters = ["dc1"]
  type        = "service"

  meta {
      job_file = "nomad_jobs/core-infra/smtp/nomad.job"
version = "7"
  }

  group "mail" {
    count = 1 
    network {
      port "smtp" {
        host_network = "lan"
        static = "25"
      }
    }


    restart {
      attempts = 3
      delay    = "15s"
      interval = "10m"
      mode     = "delay"
    }

    update {
      max_parallel     = 1
      min_healthy_time = "30s"
      auto_revert      = true
    }

    task "smtp" {
      driver = "docker"
      config {
        image = "ixdotai/smtp"
        network_mode = "host"
        ports = ["smtp"]
        force_pull = "true"
      }

      template {
data = <<EOH
MAILNAME=${var.tld}
RELAY_NETWORKS=:172.0.0.0/8:127.0.0.1/32:10.0.0.0/8:100.0.0.0/8:192.168.50.0/24
NET_DEV=enp2s0
DISABLE_IPV6=true
EOH
        destination = "local/env"
        env         = true
      }

      service {
        port = "smtp"
	name = "smtp"
        check {
          type     = "tcp"
          interval = "10s"
          timeout  = "2s"
        }
      }

      resources {
        cpu    = 100
        memory = 32
      }
    }
  }
}

variable "region" {
    type = string
}

variable "tld" {
    type = string
}

variable "shared_dir" {
    type = string
}


================================================
FILE: nomad_jobs/core-infra/tailscale/nomad.job
================================================
job "tailscale" {
  region = var.region
  datacenters = ["dc1"]
  type        = "service"

  meta {
      job_file = "nomad_jobs/core-infra/tailscale/nomad.job"
      version = "6"
  }

  constraint {
    attribute = "${meta.shared_mount}"
    operator  = "="
    value     = "true"
  }

  group "networking" {
    count = 1 

    volume "tailscale" {
      type      = "csi"
      read_only = false

      source    = "tailscale2"
      access_mode = "single-node-writer"
      attachment_mode = "file-system"
    }


    restart {
      attempts = 3
      delay    = "15s"
      interval = "10m"
      mode     = "delay"
    }

    update {
      max_parallel     = 1
      min_healthy_time = "30s"
      auto_revert      = true
    }

    task "tailscale" {
      driver = "docker"
      config {
        image = "tailscale/tailscale:v1.96.5"
        network_mode = "host"
        force_pull = "true"
        privileged = true
        cap_add = ["NET_ADMIN", "NET_RAW"]
	volumes = [
          "/dev/net/tun:/dev/net/tun",
	]
      }

      volume_mount {
        volume      = "tailscale"
        destination = "/var/lib/tailscale"
        read_only   = false
      }

      template {
data = <<EOH
TS_HOSTNAME="home-gateway"
TS_ROUTES="192.168.50.0/24"
TS_AUTHKEY="${var.tailscale_auth}"
TS_STATE_DIR="/var/lib/tailscale/tailscaled.state"
TS_USERSPACE="true"
TS_EXTRA_ARGS="--reset --advertise-tags=tag:nomad"
EOH
      destination = "local/env"
      env         = true
      }

      resources {
        cpu    = 200
        memory = 128
      }
    }
  }
}

variable "region" {
    type = string
}

variable "tld" {
    type = string
}

variable "shared_dir" {
    type = string
}

variable "tailscale_auth" {}


================================================
FILE: nomad_jobs/core-infra/tailscale/volume.hcl
================================================
id           = "tailscale2"
external_id  = "tailscale2"
name         = "tailscale2"
type         = "csi"
plugin_id    = "org.democratic-csi.iscsi"
capacity_min = "1GiB"
capacity_max = "1GiB"

capability {
  access_mode     = "single-node-writer"
  attachment_mode = "block-device"
}

mount_options {
  fs_type     = "ext4"
  mount_flags = ["noatime"]
}



================================================
FILE: nomad_jobs/core-infra/tailscale-este/nomad.job
================================================
job "tailscale-este" {
  region = var.region
  datacenters = ["dc1"]
  type        = "service"

  meta {
      job_file = "nomad_jobs/core-infra/tailscale-este/nomad.job"
version = "3"
  }

  constraint {
    attribute = "${meta.shared_mount}"
    operator  = "="
    value     = "true"
  }

  group "networking" {
    count = 1 

    volume "tailscale-este" {
      type      = "csi"
      read_only = false

      source    = "tailscale-este"
      access_mode = "single-node-writer"
      attachment_mode = "file-system"
    }


    restart {
      attempts = 3
      delay    = "15s"
      interval = "10m"
      mode     = "delay"
    }

    update {
      max_parallel     = 1
      min_healthy_time = "30s"
      auto_revert      = true
    }

    task "tailscale" {
      driver = "docker"
      config {
        image = "tailscale/tailscale:v1.96.5"
        entrypoint = ["/local/start.sh"]
        network_mode = "host"
        force_pull = "true"
        privileged = true
        cap_add = ["NET_ADMIN", "NET_RAW"]
	volumes = [
          "/dev/net/tun:/dev/net/tun",
	]
      }

      volume_mount {
        volume      = "tailscale-este"
        destination = "/var/lib/tailscale"
        read_only   = false
      }

      template {
data = <<EOH
#!/bin/sh

function up() {
    until /usr/local/bin/tailscale up --snat-subnet-routes=false --auth-key="${var.tailscale_auth_este}" --advertise-routes="192.168.50.0/24" --hostname="este-gateway"
    do
        sleep 0.1
    done

}

# send this function into the background
up &

exec tailscaled --tun=userspace-networking --statedir="/var/lib/tailscale/tailscaled.state"
EOH
        destination = "local/start.sh"
        env         = false
        perms       = 755
      }

      resources {
        cpu    = 200
        memory = 128
      }
    }
  }
}

variable "region" {
    type = string
}



variable "tld" {
    type = string
}

variable "shared_dir" {
    type = string
}

variable "tailscale_auth_este" {}


================================================
FILE: nomad_jobs/core-infra/tailscale-este/volume.hcl
================================================
id           = "tailscale-este"
external_id  = "tailscale-este"
name         = "tailscale-este"
type         = "csi"
plugin_id    = "org.democratic-csi.iscsi"
capacity_min = "1GiB"
capacity_max = "1GiB"

capability {
  access_mode     = "single-node-writer"
  attachment_mode = "block-device"
}

mount_options {
  fs_type     = "ext4"
  mount_flags = ["noatime"]
}



================================================
FILE: nomad_jobs/core-infra/traefik/config/consul-catalog.yml
================================================
# Enable Rancher Provider.
providers:
  consulcatalog:

    # Expose Consul Catalog services by default in Traefik.
    exposedByDefault: true

    # Defines the consul address endpoint.
    address: 127.0.0.1:8500

    # Defines the scheme used.
    scheme: "foobar"

    # Defines the DC.
    datacenter: "foobar"

    # Defines the token.
    token: "foobar"

    # Defines the expoint wait time.
    endpointWaitTime: "15s"

    # Defines Consul Catalog Provider TLS endpoint.
    endpoint:
      tls:

        # Defines Consul Catalog Provider endpoint.
        caOptional: true
        cert: "foobar"
        key: "foobar"
        insecureSkipVerify: true


================================================
FILE: nomad_jobs/core-infra/traefik/config/consul.yml
================================================
# Enable Rancher Provider.
providers:
  consulcatalog:

    # Expose Consul Catalog services by default in Traefik.
    exposedByDefault: true

    # Defines the consul address endpoint.
    address: 127.0.0.1:8500

    # Defines the scheme used.
    scheme: "foobar"

    # Defines the DC.
    datacenter: "foobar"

    # Defines the token.
    token: "foobar"

    # Defines the expoint wait time.
    endpointWaitTime: "15s"

    # Defines Consul Catalog Provider TLS endpoint.
    endpoint:
      tls:

        # Defines Consul Catalog Provider endpoint.
        caOptional: true
        cert: "foobar"
        key: "foobar"
        insecureSkipVerify: true


================================================
FILE: nomad_jobs/core-infra/traefik/config/traefik.toml
================================================
################################################################
#
# Configuration sample for Traefik v2.
#
# For Traefik v1: https://github.com/containous/traefik/blob/v1.7/traefik.sample.toml
#
################################################################

################################################################
# Global configuration
################################################################
[global]
  checkNewVersion = false
  sendAnonymousUsage = false

################################################################
# Entrypoints configuration
################################################################

# Entrypoints definition
#
# Optional
# Default:
[entryPoints]
  [entryPoints.web]
    address = "0.0.0.0:80"

  [entryPoints.traefik]
    address = "0.0.0.0:9001"

  [entryPoints.websecure]
    address = "0.0.0.0:443"

[http.middlewares]
  [http.middlewares.https-redirect.redirectscheme]
    scheme = "https"

[certificatesResolvers.sample.acme]
  email = "me@you.com"
  storage = "acme.json"
  [certificatesResolvers.sample.acme.httpChallenge]
    # used during the challenge
    entryPoint = "web"

################################################################
# ServersTransports for HTTPS backends with self-signed certs
################################################################
[serversTransports.insecure-skip-verify]
  insecureSkipVerify = true


################################################################
# Traefik logs configuration
################################################################

# Traefik logs
# Enabled by default and log to stdout
#
# Optional
#
[log]

  # Log level
  #
  # Optional
  # Default: "ERROR"
  #
  # level = "DEBUG"

  # Sets the filepath for the traefik log. If not specified, stdout will be used.
  # Intermediate directories are created if necessary.
  #
  # Optional
  # Default: os.Stdout
  #
  # filePath = "log/traefik.log"

  # Format is either "json" or "common".
  #
  # Optional
  # Default: "common"
  #
  # format = "json"

################################################################
# Access logs configuration
################################################################

# Enable access logs
# By default it will write to stdout and produce logs in the textual
# Common Log Format (CLF), extended with additional fields.
#
# Optional
#
# [accessLog]

  # Sets the file path for the access log. If not specified, stdout will be used.
  # Intermediate directories are created if necessary.
  #
  # Optional
  # Default: os.Stdout
  #
  # filePath = "/path/to/log/log.txt"

  # Format is either "json" or "common".
  #
  # Optional
  # Default: "common"
  #
  # format = "json"

################################################################
# API and dashboard configuration
################################################################

# Enable API and dashboard
[api]

  # Name of the related entry point
  #
  # Optional
  # Default: "traefik"
  #
  # entryPoint = "traefik"

  # Enabled Dashboard
  #
  # Optional
  # Default: true
  #
  dashboard = true
  insecure = true
################################################################
# Ping configuration
################################################################

# Enable ping
[ping]

  # Name of the related entry point
  #
  # Optional
  # Default: "traefik"
  #
  # entryPoint = "traefik"

################################################################
# Docker configuration backend
################################################################

# Enable Docker configuration backend
#[providers.docker]

  # Docker server endpoint. Can be a tcp or a unix socket endpoint.
  #
  # Required
  # Default: "unix:///var/run/docker.sock"
  #
  # endpoint = "tcp://10.10.10.10:2375"

  # Default host rule.
  #
  # Optional
  # Default: "Host(`{{ normalize .Name }}`)"
  #
  # defaultRule = "Host(`{{ normalize .Name }}.docker.localhost`)"

  # Expose containers by default in traefik
  #
  # Optional
  # Default: true
  #
  # exposedByDefault = false

# Enable Consul Catalog Provider.
[providers.consulcatalog]

  # Expose Consul Catalog services by default in Traefik.
  exposedByDefault = false

  # Prefix used for accessing the Consul service metadata.
  prefix = "traefik"

  # Defines the polling interval (in seconds).
  #refreshSeconds = 15

  # Defines default rule.
  defaultRule = "Host(`{{ .Name }}.stuck-in-blue.com`)"

  # Includes only containers having a label with key `a.label.name` and value `foo`
  #constraints = "Label(`a.label.name`, `foo`)"
  # Defines Consul Catalog Provider endpoint.
  [providers.consulcatalog.endpoint]

    # Defines the consul address endpoint.
    address = "127.0.0.1:8500"

    # Defines the scheme used.
    scheme = "https"

    # Defines the DC.
    datacenter = "home"

    # Defines the token.
    #token = "foobar"

    # Defines the expoint wait time.
    endpointWaitTime = "15s"

#    [providers.consulCatalog.endpoint.tls]
#      ca = "/etc/consul.d/homelab-agent-ca.pem"
#      cert = "/etc/consul.d/hetzner-server-homelab-0.pem"
#      key = "/etc/consul.d/hetzner-server-homelab-0-key.pem"

#[file]
#
## rules
#[backends]
#  [backends.sabnzbd]
#    [backends.sabnzbd.servers.server1]
#    url = "http://127.0.0.1:8080"
#    weight = 10
#    extractorfunc = "request.host"
#
#[frontends]
#  [frontends.sabnzbd]
#  backend = "sabnzbd"
#    [frontends.sabnzbd.routes.sab]
#    rule = "Host:sab.nolab.xyz"


================================================
FILE: nomad_jobs/core-infra/traefik/config/traefik.toml.new
================================================
################################################################
#
# Configuration sample for Traefik v2.
#
# For Traefik v1: https://github.com/containous/traefik/blob/v1.7/traefik.sample.toml
#
################################################################

################################################################
# Global configuration
################################################################
[global]
  checkNewVersion = false
  sendAnonymousUsage = false

################################################################
# Entrypoints configuration
################################################################

# Entrypoints definition
#
# Optional
# Default:
[entryPoints]
  [entryPoints.grpc]
    address = ":7576"

  [entryPoints.traefik]
    address = ":9009"

################################################################
# Traefik logs configuration
################################################################

# Traefik logs
# Enabled by default and log to stdout
#
# Optional
#
[log]

  # Log level
  #
  # Optional
  # Default: "ERROR"
  #
  # level = "DEBUG"

  # Sets the filepath for the traefik log. If not specified, stdout will be used.
  # Intermediate directories are created if necessary.
  #
  # Optional
  # Default: os.Stdout
  #
  # filePath = "log/traefik.log"

  # Format is either "json" or "common".
  #
  # Optional
  # Default: "common"
  #
  # format = "json"

################################################################
# Access logs configuration
################################################################

# Enable access logs
# By default it will write to stdout and produce logs in the textual
# Common Log Format (CLF), extended with additional fields.
#
# Optional
#
# [accessLog]

  # Sets the file path for the access log. If not specified, stdout will be used.
  # Intermediate directories are created if necessary.
  #
  # Optional
  # Default: os.Stdout
  #
  # filePath = "/path/to/log/log.txt"

  # Format is either "json" or "common".
  #
  # Optional
  # Default: "common"
  #
  # format = "json"

################################################################
# API and dashboard configuration
################################################################

# Enable API and dashboard
[api]

  # Name of the related entry point
  #
  # Optional
  # Default: "traefik"
  #
  # entryPoint = "traefik"

  # Enabled Dashboard
  #
  # Optional
  # Default: true
  #
  # dashboard = false
  insecure = true
################################################################
# Ping configuration
################################################################

# Enable ping
[ping]

  # Name of the related entry point
  #
  # Optional
  # Default: "traefik"
  #
  # entryPoint = "traefik"

################################################################
# Docker configuration backend
################################################################

# Enable Docker configuration backend
#[providers.docker]

  # Docker server endpoint. Can be a tcp or a unix socket endpoint.
  #
  # Required
  # Default: "unix:///var/run/docker.sock"
  #
  # endpoint = "tcp://10.10.10.10:2375"

  # Default host rule.
  #
  # Optional
  # Default: "Host(`{{ normalize .Name }}`)"
  #
  # defaultRule = "Host(`{{ normalize .Name }}.docker.localhost`)"

  # Expose containers by default in traefik
  #
  # Optional
  # Default: true
  #
  # exposedByDefault = false

# Enable Consul Catalog Provider.
[providers.consulcatalog]

  # Expose Consul Catalog services by default in Traefik.
  exposedByDefault = false

  # Prefix used for accessing the Consul service metadata.
  prefix = "traefik"

  # Defines the polling interval (in seconds).
  #refreshSeconds = 15

  # Defines default rule.
  defaultRule = "Host(`{{ .Name }}.stage.dus.tcs.trv.cloud`)"

  # Includes only containers having a label with key `a.label.name` and value `foo`
  #constraints = "Label(`a.label.name`, `foo`)"
  # Defines Consul Catalog Provider endpoint.
  [providers.consulcatalog.endpoint]

    # Defines the consul address endpoint.
    address = "127.0.0.1:8500"

    # Defines the scheme used.
    scheme = "http"

    # Defines the DC.
    datacenter = "dus"

    # Defines the token.
    #token = "foobar"

    # Defines the expoint wait time.
    endpointWaitTime = "15s"



================================================
FILE: nomad_jobs/core-infra/traefik/config/traefik.toml.test
================================================
################################################################
# Entrypoints configuration
################################################################
# Entrypoints definition
#
defaultEntryPoints = ["https"]

[entryPoints]
  [entryPoints.admin]
  address = "10.90.80.120:6062"
  [entryPoints.http]
  address = "78.94.59.116:80"
    [entryPoints.http.redirect]
    entryPoint = "https"
  [entryPoints.https]
  address = ":443"
  [entryPoints.https.tls]

[acme]
email = "perry@stuck-in-blue.com"
storage = "acme.json"
#caServer = "https://acme-staging-v02.api.letsencrypt.org/directory"
caServer = "https://acme-v02.api.letsencrypt.org/directory"
entryPoint = "https"
[acme.httpChallenge]
entryPoint = "http"
[acme.dnsChallenge]
  provider = "gcloud"
  delayBeforeCheck = 0

[[acme.domains]]
  main = "*.nolab.xyz"
  sans = ["nolab.xyz"]

################################################################
# Traefik logs configuration
################################################################
# Enable logs
# By default it will write to stdout
[traefikLog]

################################################################
# Access logs configuration
################################################################
# Enable access logs
# By default it will write to stdout and produce logs in the textual
# Common Log Format (CLF), extended with additional fields.

[accessLog]

################################################################
# Metrics configuration
################################################################
[metrics]
  [metrics.prometheus]
    entryPoint = "admin"

################################################################
# API and dashboard configuration
################################################################
# Enable API and dashboard
[api]
  entryPoint = "admin"
  [api.statistics]
    recentErrors = 100
################################################################
# Ping configuration
################################################################
# Enable ping
[ping]
  entryPoint = "admin"
################################################################
# Consul Catalog Provider
################################################################
[consulCatalog]
endpoint = "{{ env "NOMAD_IP_https" }}:8500"
stale = true
prefix = "traefik"
domain = "holab.io"
#filename = "/usr/local/etc/traefik/consul.toml"
#templateVersion = 2



================================================
FILE: nomad_jobs/core-infra/traefik/nomad.job
================================================
job "traefik" {
  region = var.region
  datacenters = ["dc1"]
  type = "service"
  meta {
      job_file = "nomad_jobs/core-infra/traefik/nomad.job"
      version = "12"  // HA: run 2 instances with keepalived failover
  }

  group "lbs" {
    count = 2

    constraint {
      operator = "distinct_hosts"
      value    = "true"
    }

    network {
      port "http" {
        host_network = "lan"
        static = "80"
      }
      port "admin" {
        host_network = "lan"
        static = "9002"
      }
    }

    restart {
      attempts = 3
      delay    = "15s"
      interval = "10m"
      mode     = "delay"
    }

    update {
      max_parallel     = 1
      min_healthy_time = "30s"
      auto_revert      = true
      auto_promote     = true
      canary           = 2
    }

    task "keepalived-traefik" {
      driver = "docker"

      lifecycle {
        hook = "prestart"
        sidecar = true
      }

      config {
        image = "osixia/keepalived:2.3.4"
        network_mode = "host"
        force_pull = false
        volumes = [
          "local/keepalived.conf:/etc/keepalived/keepalived.conf"
        ]
        cap_add = ["NET_ADMIN", "NET_BROADCAST", "NET_RAW"]
      }

      template {
        destination = "local/keepalived.conf"
        change_mode = "restart"
        splay       = "1m"
        data        = <<EOH
vrrp_instance VI_1 {
    state BACKUP
    interface {{ sockaddr "GetPrivateInterfaces | include \"network\" \"192.168.50.0/24\" | attr \"name\"" }}
    virtual_router_id 50
    priority 100
    advert_int 1

    use_vmac vrrp.50
    vmac_xmit_base

    unicast_peer {
{{- range service "traefik-web" }}
        {{ .Address }}
{{- end }}
    }

    virtual_ipaddress {
        192.168.50.20/24
    }
}
EOH
      }

      resources {
        cpu    = 100
        memory = 64
      }
    }

    task "traefik" {
      driver = "docker"
      service {
        name = "traefik-web"
        port = "http"
      }
      service {
        name = "traefik"
        port = "admin"
        tags = [
          "metrics"
        ]
        check {
          type     = "tcp"
          interval = "10s"
          timeout  = "2s"
        }
      }

      config {
        image = "traefik:v3.6"
        ports = ["http", "admin"]
        network_mode = "host"
        volumes = [
          "local/traefik.toml:/etc/traefik/traefik.toml",
          "local/dynamic-config.toml:/etc/traefik/dynamic/dynamic-config.toml",
          "local/servers-transport.toml:/etc/traefik/dynamic/servers-transport.toml",
          "${var.shared_dir}traefik-ingress/acme.json:/acme.json",
          "${var.shared_dir}traefik-ingress/dynamic-whitelist.toml:/etc/traefik/dynamic/dynamic-whitelist.toml",
        ]
      }

      template {
data = <<EOH
[global]
  checkNewVersion = false
  sendAnonymousUsage = false

[metrics]
  [metrics.prometheus]

[entryPoints]
  [entryPoints.web]
    address = ":80"
    # No entrypoint middleware - applied at router level to allow ACME challenges

  [entryPoints.websecure]
    address = ":443"
    [entryPoints.websecure.http]
        middlewares=["home-ip-whitelist@file"]
    [entryPoints.websecure.http.tls]
      certResolver = "letsencrypt"

  [entryPoints.traefik]
    address = ":9002"

[tls.options]
  [tls.options.TLSOptions]
    minVersion = "VersionTLS12"
    sniStrict = true

[accessLog]
  format = "json"

[log]

[api]
  dashboard = true
  insecure = true  # Enable direct access on :9002 (protected by local network)

[ping]

[providers.consulcatalog]
  exposedByDefault = false
  prefix = "traefik"
  defaultRule = "Host(`{{ .Name }}.${var.tld}`)"

  [providers.consulcatalog.endpoint]
    address = "{{{ env "NOMAD_IP_http" }}}:8500"
    scheme = "http"
    datacenter = "homelab"
    endpointWaitTime = "15s"

[certificatesResolvers.letsencrypt.acme]
  email = "me@you.com"
  storage = "acme.json"
  [certificatesResolvers.letsencrypt.acme.httpChallenge]
    entryPoint = "web"

[providers.file]
  directory = "/etc/traefik/dynamic"
  watch = true
EOH
        destination = "local/traefik.toml"
        env         = false
        change_mode = "noop"
        left_delimiter = "{{{"
        right_delimiter = "}}}"
      }

      template {
data = <<EOH
[http.middlewares]
  [http.middlewares.allow-local-network.ipWhiteList]
    sourceRange = ["192.168.50.0/24", "10.0.0.0/16"]  # Local network only

  [http.middlewares.basic-auth.basicAuth]
    users = [
      "admin:$apr1$Ht8D2P1z$7QOq2s8xKUomI1cM.rFJX/" # Replace with an htpasswd-generated hash
    ]
    realm = "Restricted Area"


  [http.middlewares.ip-or-auth.chain]
    middlewares = ["home-ip-whitelist@file", "allow-local-network", "basic-auth"]

  [http.middlewares.redirect-to-https.redirectScheme]
    scheme = "https"
    permanent = true

  [http.middlewares.whitelist-then-redirect.chain]
    middlewares = ["home-ip-whitelist@file", "redirect-to-https"]

[http.routers]
  # Redirect HTTP to HTTPS except for ACME challenges - apply whitelist first
  [http.routers.http-redirect]
    entryPoints = ["web"]
    rule = "PathPrefix(`/`) && !PathPrefix(`/.well-known/acme-challenge/`)"
    middlewares = ["whitelist-then-redirect"]
    service = "noop@internal"
    priority = 1000

  [http.routers.https-local]
    entryPoints = ["websecure"]
    rule = "HostRegexp(`{any:.+}`)"  # Matches any domain
    middlewares = ["home-ip-whitelist@file"]
    service = "beefcake"
    [http.routers.https-local.tls]

[http.services]
  [http.services.beefcake.loadBalancer]
    [[http.services.beefcake.loadBalancer.servers]]
      url = "http://192.168.50.208:80"
EOH
        destination = "local/dynamic-config.toml"
        env         = false
        change_mode = "noop"
      }

      template {
        data = <<EOH
# ServersTransport for HTTPS backends with self-signed certificates
[http.serversTransports]
  [http.serversTransports.insecure-skip-verify]
    insecureSkipVerify = true
EOH
        destination = "local/servers-transport.toml"
        env         = false
        change_mode = "noop"
      }

      resources {
        cpu = 100
        memory = 256
      }
    }
  }
}

variable "region" {
    type = string
}

variable "tld" {
    type = string
}

variable "shared_dir" {
    type = string
}

variable "ingress_ip" {
    type = string
}


================================================
FILE: nomad_jobs/core-infra/traefik-forward-auth/nomad.job
================================================
job "traefik-forward-auth" {
  region = var.region
  datacenters = ["dc1"]
  type        = "service"

  meta {
      job_file = "nomad_jobs/core-infra/traefik-forward-auth/nomad.job"
version = "4"
  }

  group "downloaders" {
    count = 1 
    network {
      port "http" {
        host_network = "tailscale"
        static = "4181"
      }

    }


    restart {
      attempts = 3
      delay    = "15s"
      interval = "10m"
      mode     = "delay"
    }

    update {
      max_parallel     = 1
      min_healthy_time = "30s"
      auto_revert      = true
    }

    task "auth" {
      driver = "docker"
      config {
        image = "ghcr.io/jordemort/traefik-forward-auth:latest"
        ports = ["http"]
      }

      env {
        PROVIDERS_GOOGLE_CLIENT_ID     = "${var.oauth_client_id}"
        PROVIDERS_GOOGLE_CLIENT_SECRET = "${var.oauth_client_secret}"
        SECRET                         = "${var.oauth_secret}"
        AUTH_HOST                      = "auth.${var.tld}"
        COOKIE_DOMAIN                  = "${var.tld}"
        WHITELIST                      = "${var.oauth_emails}"
        LOG_LEVEL                      = "debug"
        URL_PATH                       = "/_oauth"
        DEFAULT_ACTION                 = "auth"
        INSECURE_COOKIE                = "false"
        CONFIG                         = "local/config"
	TRUSTED_IP_ADDRESS	       = "86.111.155.199/32,86.111.152.230/32,89.246.171.154/32,86.111.155.93/32"
      }

      service {
        port = "http"
	name = "traefik-forward-auth"
        tags = [
          "traefik.enable=true",
          "traefik.http.routers.auth.rule=Host(`auth.${var.tld}`)",
          "traefik.http.routers.auth.entrypoints=websecure",
          "traefik.http.routers.auth.tls=true",
          "traefik.http.routers.${NOMAD_TASK_NAME}_insecure.rule=Host(`auth.${var.tld}`)",
          "traefik.http.routers.auth.tls.domains[0].main=${var.tld}",
          "traefik.http.routers.auth.tls.domains[0].sans=*.${var.tld}",
          "traefik.http.routers.auth.rule=Host(`auth.${var.tld}`)",
          "traefik.http.routers.auth.rule=Path(`/_oauth`)",
          "traefik.http.middlewares.forward-auth.forwardauth.address=http://${NOMAD_IP_http}:${NOMAD_PORT_http}/",
          "traefik.http.middlewares.forward-auth.forwardauth.trustForwardHeader=true",
          "traefik.http.middlewares.forward-auth.forwardauth.authResponseHeaders=X-Forwarded-User",
          "traefik.http.routers.auth.middlewares=forward-auth"
        ]
        check {
          type     = "tcp"
          interval = "10s"
          timeout  = "2s"
        }
      }

      template {
data = <<EOH
rule.sabnzbd.action = allow
rule.sabnzbd.rule = PathPrefix(`/sabnzbd/api`)
EOH
        destination = "local/config"
        env         = false
      }

      resources {
        cpu    = 100
        memory = 256
      }
    }
  }
}

variable "region" {}
variable "tld" {}
variable "oauth_client_id" {}
variable "oauth_client_secret" {}
variable "oauth_secret" {}
variable "oauth_emails" {}


================================================
FILE: nomad_jobs/core-infra/vault/secrets_template.yaml
================================================
backends:
- type: generic
  path: secret
  description: secrets

secrets:
  - path: /secret/homelab/couchpotato
    values:
      apikey: ${COUCHPOTATO_APIKEY}
      username: ${COUCHPOTATO_USERNAME}
      password: ${COUCHPOTATO_PASS}
      host: ${COUCHPOTATO_HOST}

  - path: /secret/homelab/plex
    values:
      auth_token: ${PLEX_AUTHTOKEN}
      host: ${PLEX_HOST}

  - path: /secret/external/pushover
    values:
      api_key: ${PUSHOVER_APIKEY}
      user_key: ${PUSHOVER_USERKEY}

  - path: /secret/external/newsnab
    values:
      api: ${NEWSNAB_API}
      host: ${NEWSNAB_HOST}

  - path: /secret/homelab/mqtt-pub
    values:
      remote_username: ${MQTTPUB_REMOTE_USERNAME}
      remote_password: ${MQTTPUB_REMOTE_PASSWORD}
      address: ${MQTTPUB_ADDRESS}

  - path: /secret/homelab/sonarr
    values:
      api: ${SONAR_API}
      host: ${SONAR_HOST}

  - path: /secret/homelab/sabnzbd
    values:
      api: ${SABNZBD_API}
      host: ${SABNZBD_HOST}

  - path: /secret/homelab/asuswrt
    values:
      username: ${ASUSWRT_USER}
      password: ${ASUSWRT_PASS}
      host: ${ASUSWRT_HOST}

  - path: /secret/homelab/sabnzbd
    values:
      api: ${SABNZBD_API}
      host: ${SABNZBD_HOST}

  - path: /secret/external/github
    values:
      token: ${GITHUB_TOKEN}



================================================
FILE: nomad_jobs/gaming/minecraft-1.21/nomad.job
================================================
job "minecraft-1-21" {
  region      = var.region
  datacenters = ["minecraft"]
  type        = "service"

  meta {
    job_file = "nomad_jobs/gaming/minecraft-1.21/nomad.job"
    version  = "2"
  }

  group "minecraft" {
    count = 1

    network {
      port "minecraft" {
        host_network = "lan"
        static       = 25568
        to           = 25565
      }
      port "query" {
        host_network = "lan"
        static       = 25569
        to           = 25565
      }
    }

    restart {
      attempts = 3
      delay    = "15s"
      interval = "10m"
      mode     = "delay"
    }

    update {
      max_parallel     = 1
      min_healthy_time = "30s"
      auto_revert      = true
    }

    task "minecraft-server" {
      driver = "docker"

      config {
        image        = "itzg/minecraft-server:2026.4.1"
        force_pull   = false
        network_mode = "host"

        volumes = [
          "/root/minecraft-1.21.10/data:/data",
          "/root/minecraft-1.21.10/config:/config",
        ]
      }

      env {
        EULA                          = "TRUE"
        VERSION                       = "1.21.10"
        TYPE                          = "FABRIC"
        INIT_MEMORY                   = "2G"
        MAX_MEMORY                    = "8G"
        MOTD                          = "Minecraft 1.21.10 Fabric Server"
        DIFFICULTY                    = "peaceful"
        MAX_PLAYERS                   = "20"
        VIEW_DISTANCE                 = "10"
        SIMULATION_DISTANCE           = "8"
        SPAWN_PROTECTION              = "16"
        ONLINE_MODE                   = "false"
        ENABLE_WHITELIST              = "false"
        SERVER_PORT                   = "25568"
        ENABLE_QUERY                  = "true"
        QUERY_PORT                    = "25568"
        NETWORK_COMPRESSION_THRESHOLD = "512"
        ENABLE_AUTOPAUSE              = "FALSE"
        PAUSE_WHEN_EMPTY_SECONDS      = "0"
        REMOVE_OLD_MODS               = "true"
        MODRINTH_PROJECTS             = "axiom\nfabric-api\ncloth-config\ndistanthorizons\nbalm\nferrite-core\nwaystones\ncarry-on\nbeautify-refabricated\nmacaws-furniture\nmacaws-trapdoors\nmore-decorative-blocks\nblackwolf-library"
        MODRINTH_ALLOWED_VERSION_TYPE = "beta"
        OPS                           = "perry,hannah,Perry,Steve5leon"
        JVM_OPTS                      = "-XX:+UseG1GC -XX:+ParallelRefProcEnabled -XX:MaxGCPauseMillis=100 -XX:+UnlockExperimentalVMOptions -XX:+DisableExplicitGC -XX:+AlwaysPreTouch -XX:+UseStringDeduplication -XX:G1NewSizePercent=30 -XX:G1MaxNewSizePercent=40 -XX:G1HeapRegionSize=8M -XX:G1ReservePercent=20 -XX:InitiatingHeapOccupancyPercent=20 -XX:G1MixedGCLiveThresholdPercent=90 -XX:SurvivorRatio=32 -XX:MaxTenuringThreshold=1"
      }

      service {
        name = "minecraft-1-21"
        port = "minecraft"
        tags = ["minecraft", "gaming", "1.21"]

        check {
          type     = "tcp"
          interval = "30s"
          timeout  = "5s"
        }
      }

      resources {
        cpu        = 4000
        memory     = 3072
        memory_max = 9728
      }
    }
  }
}

variable "region" {
  type = string
}


================================================
FILE: nomad_jobs/gaming/minecraft-avaritia/nomad.job
================================================
job "minecraft-avaritia" {
  region      = var.region
  datacenters = ["minecraft"]
  type        = "service"

  meta {
    job_file = "nomad_jobs/gaming/minecraft-avaritia/nomad.job"
    version  = "1"
  }

  group "minecraft" {
    count = 1

    network {
      port "minecraft" {
        host_network = "lan"
        static       = 25571
        to           = 25565
      }
    }

    restart {
      attempts = 2
      delay    = "120s"
      interval = "30m"
      mode     = "delay"
    }

    update {
      max_parallel     = 1
      min_healthy_time = "30s"
      auto_revert      = true
    }

    task "minecraft-server" {
      driver = "docker"

      config {
        image        = "itzg/minecraft-server:2026.4.1"
        force_pull   = false
        network_mode = "host"

        volumes = [
          "/root/minecraft-avaritia/data:/data",
        ]
      }

      env {
        EULA                          = "TRUE"
        VERSION                       = "1.20.1"
        TYPE                          = "FORGE"
        INIT_MEMORY                   = "2G"
        MAX_MEMORY                    = "8G"
        MOTD                          = "Avaritia Modded Server (1.20.1)"
        MODE                          = "creative"
        DIFFICULTY                    = "normal"
        MAX_PLAYERS                   = "20"
        VIEW_DISTANCE                 = "10"
        SIMULATION_DISTANCE           = "8"
        SPAWN_PROTECTION              = "16"
        ONLINE_MODE                   = "false"
        ENABLE_WHITELIST              = "false"
        SERVER_PORT                   = "25571"
        ENABLE_QUERY                  = "true"
        QUERY_PORT                    = "25571"
        NETWORK_COMPRESSION_THRESHOLD = "512"
        FORCE_GAMEMODE                = "true"
        ENABLE_COMMAND_BLOCK          = "true"
        OPS                           = "perry,hannah,Perry"
        MAX_TICK_TIME                 = "-1"
        ENABLE_RCON                   = "true"
        RCON_PORT                     = "25572"
        RCON_PASSWORD                 = "minecraft"
        CREATE_CONSOLE_IN_PIPE        = "true"
        JVM_OPTS                      = "-XX:+UseG1GC -XX:+ParallelRefProcEnabled -XX:MaxGCPauseMillis=100 -XX:+UnlockExperimentalVMOptions -XX:+DisableExplicitGC -XX:+AlwaysPreTouch -XX:+UseStringDeduplication -XX:G1NewSizePercent=30 -XX:G1MaxNewSizePercent=40 -XX:G1HeapRegionSize=8M -XX:G1ReservePercent=20 -XX:InitiatingHeapOccupancyPercent=20 -XX:G1MixedGCLiveThresholdPercent=90 -XX:SurvivorRatio=32 -XX:MaxTenuringThreshold=1 -XX:G1PeriodicGCInterval=15000 -XX:G1PeriodicGCSystemLoadThreshold=0.0"
        MODRINTH_PROJECTS             = "remorphed\nwoodwalkers\ncrafted-core"
        CURSEFORGE_FILES              = "re-avaritia\nluckytnt\nlucky-tnt-lib\npehkui\nterrablender\nbiomes-o-plenty\noh-the-biomes-weve-gone\nterralith\ntectonic\nlithostitched\noh-the-trees-youll-grow\njei\njourneymap\nwaystones\nembeddium\ngeckolib\narchitectury-api\nglitchcore\ncorgilib\nbalm"
        CF_API_KEY                    = "${var.curseforge_api_key}"
      }

      service {
        name = "minecraft-avaritia"
        port = "minecraft"
        tags = ["minecraft", "gaming", "avaritia", "modded"]

        check {
          type     = "tcp"
          interval = "30s"
          timeout  = "5s"
        }
      }

      resources {
        cpu        = 4000
        memory     = 3072
        memory_max = 9728
      }
    }
  }
}

variable "region" {
  type = string
}

variable "curseforge_api_key" {
  type    = string
  default = ""
}


================================================
FILE: nomad_jobs/gaming/minecraft-axiom/nomad.job
================================================
job "minecraft-axiom" {
  region      = var.region
  datacenters = ["minecraft"]
  type        = "service"

  meta {
    job_file = "nomad_jobs/gaming/minecraft-axiom/nomad.job"
    version  = "2"  // Added forge-config-api-port dependency
  }

  group "minecraft" {
    count = 1

    network {
      port "minecraft" {
        host_network = "lan"
        static       = 25566
        to           = 25565
      }
      port "query" {
        host_network = "lan"
        static       = 25567
        to           = 25565
      }
    }

    restart {
      attempts = 3
      delay    = "15s"
      interval = "10m"
      mode     = "delay"
    }

    update {
      max_parallel     = 1
      min_healthy_time = "30s"
      auto_revert      = true
    }

    task "minecraft-server" {
      driver = "docker"

      config {
        image        = "itzg/minecraft-server:2026.4.1"
        force_pull   = false
        network_
Download .txt
gitextract_ikark14b/

├── .bootstrap.mk
├── .gitattributes
├── .github/
│   └── workflows/
│       ├── build-gcp-dns-updater.yaml
│       ├── nomad.yaml
│       ├── update-kideo.yaml
│       ├── update-minecraftmath.yaml
│       ├── update-radbot-dev.yaml
│       └── update-radbot.yaml
├── .gitignore
├── LICENSE
├── Makefile
├── README.md
├── ansible/
│   ├── configs/
│   │   ├── consul.hcl.j2
│   │   ├── consul.service
│   │   ├── docker-daemon.json.j2
│   │   ├── nomad.hcl.j2
│   │   └── nomad.service
│   ├── playbook.yml
│   └── zsh.yml
├── docker_images/
│   ├── gcp-dns-updater/
│   │   ├── Dockerfile
│   │   ├── README.md
│   │   ├── requirements.txt
│   │   └── update_dns.py
│   └── update-metadata/
│       ├── Dockerfile
│       ├── README.md
│       ├── requirements.txt
│       └── update_job_metadata.py
├── envrc
├── nomad_jobs/
│   ├── TEMPLATE-volume.hcl
│   ├── TEMPLATE.job
│   ├── ai-ml/
│   │   ├── cognee/
│   │   │   └── nomad.job
│   │   ├── crawl4ai/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── litellm/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── manyfold/
│   │   │   ├── 3dprints-volume.hcl
│   │   │   ├── nomad.job
│   │   │   ├── prints_volume.hcl
│   │   │   └── volume.hcl
│   │   ├── ollama/
│   │   │   └── nomad.job
│   │   ├── open-webui/
│   │   │   └── nomad.job
│   │   ├── paperless-ai/
│   │   │   └── nomad.job
│   │   ├── pgvector-client/
│   │   │   └── nomad.job
│   │   └── radbot/
│   │       ├── nomad-dev.job
│   │       └── nomad.job
│   ├── core-infra/
│   │   ├── coredns/
│   │   │   ├── README.md
│   │   │   └── nomad.job
│   │   ├── github-runner/
│   │   │   └── nomad.job
│   │   ├── haproxy/
│   │   │   └── nomad.job
│   │   ├── iscsi-csi-plugin/
│   │   │   ├── controller.job
│   │   │   └── node.job
│   │   ├── keepalived/
│   │   │   ├── TODO.md
│   │   │   └── nomad.job
│   │   ├── nfs-csi-plugin/
│   │   │   ├── controller.job
│   │   │   └── nodes.job
│   │   ├── pihole/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── smtp/
│   │   │   └── nomad.job
│   │   ├── tailscale/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── tailscale-este/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── traefik/
│   │   │   ├── config/
│   │   │   │   ├── consul-catalog.yml
│   │   │   │   ├── consul.yml
│   │   │   │   ├── traefik.toml
│   │   │   │   ├── traefik.toml.new
│   │   │   │   └── traefik.toml.test
│   │   │   └── nomad.job
│   │   ├── traefik-forward-auth/
│   │   │   └── nomad.job
│   │   └── vault/
│   │       └── secrets_template.yaml
│   ├── gaming/
│   │   ├── minecraft-1.21/
│   │   │   └── nomad.job
│   │   ├── minecraft-avaritia/
│   │   │   └── nomad.job
│   │   ├── minecraft-axiom/
│   │   │   └── nomad.job
│   │   ├── minecraft-fiskheroes/
│   │   │   └── nomad.job
│   │   └── minecraft-forge/
│   │       └── nomad.job
│   ├── media-stack/
│   │   ├── audioserve/
│   │   │   └── nomad.job
│   │   ├── flaresolverr/
│   │   │   └── nomad.job
│   │   ├── jackett/
│   │   │   └── nomad.job
│   │   ├── lazylibrarian/
│   │   │   └── nomad.job
│   │   ├── lidarr/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── lidify/
│   │   │   └── nomad.job
│   │   ├── maintainerr/
│   │   │   └── nomad.job
│   │   ├── mediasage/
│   │   │   └── nomad.job
│   │   ├── multi-scrobbler/
│   │   │   └── nomad.job
│   │   ├── navidrome/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── ombi/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── overseerr/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── plex/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── prowlarr/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── qbittorrent/
│   │   │   └── nomad.job
│   │   ├── radarr/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── requestrr/
│   │   │   └── nomad.job
│   │   ├── sabnzbd/
│   │   │   └── nomad.job
│   │   ├── sickchill/
│   │   │   └── nomad.job
│   │   ├── sonarr/
│   │   │   └── nomad.job
│   │   ├── synclounge/
│   │   │   └── nomad.job
│   │   ├── tautulli/
│   │   │   └── nomad.job
│   │   └── tdarr/
│   │       ├── nomad.job
│   │       └── volume.hcl
│   ├── misc/
│   │   ├── adb/
│   │   │   └── nomad.job
│   │   ├── gcp-dns-updater/
│   │   │   ├── Dockerfile
│   │   │   ├── README.md
│   │   │   ├── nomad.job
│   │   │   ├── requirements.txt
│   │   │   └── update_dns.py
│   │   ├── gitea/
│   │   │   └── nomad.job
│   │   ├── linuxgsm/
│   │   │   └── nomad.job
│   │   ├── murmur/
│   │   │   └── nomad.job
│   │   ├── octoprint/
│   │   │   └── nomad.job
│   │   └── uploader/
│   │       └── nomad.job
│   ├── observability/
│   │   ├── alertmanager/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── blackbox-exporter/
│   │   │   └── nomad.job
│   │   ├── grafana/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── loki/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── oom-test/
│   │   │   └── nomad.job
│   │   ├── prometheus/
│   │   │   ├── README.md
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── telegraf/
│   │   │   └── nomad.job
│   │   ├── truenas-graphite-exporter/
│   │   │   └── nomad.job
│   │   └── vector/
│   │       └── nomad.job
│   ├── personal-cloud/
│   │   ├── actualbudget/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── bitwarden/
│   │   │   └── nomad.job
│   │   ├── nextcloud/
│   │   │   └── nomad.job
│   │   ├── ntfy/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── paperless/
│   │   │   └── nomad.job
│   │   └── radicale/
│   │       └── nomad.job
│   ├── security/
│   │   ├── suricata/
│   │   │   └── nomad.job
│   │   ├── suricata-update/
│   │   │   └── nomad.job
│   │   ├── wazuh-agent/
│   │   │   └── nomad.job
│   │   └── wazuh-server/
│   │       ├── nomad.job
│   │       ├── volume-dashboard.hcl
│   │       ├── volume-indexer.hcl
│   │       └── volume-manager.hcl
│   ├── smart-home/
│   │   ├── deconz/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── home-assistant/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── mqtt/
│   │   │   └── nomad.job
│   │   ├── owntracks-recorder/
│   │   │   └── nomad.job
│   │   └── zigbee2mqtt/
│   │       └── nomad.job
│   ├── storage-backends/
│   │   ├── docker-registry/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── mariadb/
│   │   │   └── nomad.job
│   │   ├── neo4j/
│   │   │   ├── nomad.job
│   │   │   ├── setup.job
│   │   │   └── volume.hcl
│   │   ├── pgvector/
│   │   │   ├── nomad.job
│   │   │   └── pgvector-setup.job
│   │   ├── postgres/
│   │   │   ├── nomad.job
│   │   │   └── postgres-setup.job
│   │   ├── qdrant/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   ├── redis/
│   │   │   ├── nomad.job
│   │   │   └── volume.hcl
│   │   └── volumes/
│   │       └── nfs-example.hcl
│   ├── system/
│   │   └── docker-cleanup/
│   │       └── nomad.job
│   └── web-apps/
│       ├── alertmanager-dashboard/
│       │   └── nomad.job
│       ├── firecrawl/
│       │   └── nomad.job
│       ├── heimdall/
│       │   └── nomad.job
│       ├── homepage/
│       │   └── nomad.job
│       ├── kideo/
│       │   └── nomad.job
│       ├── minecraftmath/
│       │   └── nomad.job
│       ├── wordpress/
│       │   └── nomad.job
│       └── www/
│           ├── Dockerfile
│           └── nomad.job
├── renovate.json
└── services/
    └── beefcake.json
Download .txt
SYMBOL INDEX (13 symbols across 3 files)

FILE: docker_images/gcp-dns-updater/update_dns.py
  function get_env_vars (line 19) | def get_env_vars():
  function get_public_ip (line 38) | def get_public_ip():
  function get_dns_client (line 50) | def get_dns_client(key_b64: str, project_id: str): # Changed key_path to...
  function update_dns_record (line 81) | def update_dns_record(client: dns.Client, project_id: str, zone_name: st...
  function update_spf_record (line 176) | def update_spf_record(client: dns.Client, project_id: str, zone_name: st...

FILE: docker_images/update-metadata/update_job_metadata.py
  function find_job_block (line 12) | def find_job_block(content):
  function find_meta_block (line 50) | def find_meta_block(content):
  function update_job_metadata (line 87) | def update_job_metadata(repo_root):

FILE: nomad_jobs/misc/gcp-dns-updater/update_dns.py
  function get_env_vars (line 18) | def get_env_vars():
  function get_public_ip (line 37) | def get_public_ip():
  function get_dns_client (line 49) | def get_dns_client(key_b64: str, project_id: str): # Changed key_path to...
  function update_dns_record (line 80) | def update_dns_record(client: dns.Client, project_id: str, zone_name: st...
  function update_spf_record (line 175) | def update_spf_record(client: dns.Client, project_id: str, zone_name: st...
Condensed preview — 180 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (504K chars).
[
  {
    "path": ".bootstrap.mk",
    "chars": 373,
    "preview": "export VERSION_TAG=$(shell git rev-parse --short HEAD)\nexport JOB_NAME=$(shell basename $PWD)\n\ndash-split = $(word $2,$("
  },
  {
    "path": ".gitattributes",
    "chars": 28,
    "preview": "*.job linguist-language=HCL\n"
  },
  {
    "path": ".github/workflows/build-gcp-dns-updater.yaml",
    "chars": 1056,
    "preview": "# .github/workflows/build-gcp-dns-updater.yaml\nname: Build GCP DNS Updater Image\n\non:\n  push:\n    branches:\n      - main"
  },
  {
    "path": ".github/workflows/nomad.yaml",
    "chars": 10023,
    "preview": "on:\n  push:\n    branches:\n      - master\n\njobs:\n  # JOB to run change detection\n  changes:\n    runs-on: ubuntu-latest\n  "
  },
  {
    "path": ".github/workflows/update-kideo.yaml",
    "chars": 927,
    "preview": "name: Update kideo image tag\n\non:\n  repository_dispatch:\n    types: [update-kideo]\n\njobs:\n  update-and-deploy:\n    runs-"
  },
  {
    "path": ".github/workflows/update-minecraftmath.yaml",
    "chars": 991,
    "preview": "name: Update minecraftmath image tag\n\non:\n  repository_dispatch:\n    types: [update-minecraftmath]\n\njobs:\n  update-and-d"
  },
  {
    "path": ".github/workflows/update-radbot-dev.yaml",
    "chars": 961,
    "preview": "name: Update radbot-dev image tag\n\non:\n  repository_dispatch:\n    types: [update-radbot-dev]\n\njobs:\n  update-and-deploy:"
  },
  {
    "path": ".github/workflows/update-radbot.yaml",
    "chars": 929,
    "preview": "name: Update radbot image tag\n\non:\n  repository_dispatch:\n    types: [update-radbot]\n\njobs:\n  update-and-deploy:\n    run"
  },
  {
    "path": ".gitignore",
    "chars": 155,
    "preview": ".envrc\n.env\n*-pub\n.passwords\n.envrc*\nvault/secrets.yaml\nvault/*.hcl\nwww/main.jpg\nssl\nlevant/*\n!levant/defaults.yml\nhosts"
  },
  {
    "path": "LICENSE",
    "chars": 11357,
    "preview": "                                 Apache License\n                           Version 2.0, January 2004\n                   "
  },
  {
    "path": "Makefile",
    "chars": 3834,
    "preview": "# Load .env files\n#include .envrc\n\ninclude ./.bootstrap.mk\n\n# Define base deployments using their service names\nbase_dep"
  },
  {
    "path": "README.md",
    "chars": 6181,
    "preview": "# Hashi-Homelab\n<p align=\"center\">\n<img width=\"250\" src=\"homelab.png\" />\n</p>\n\n### UPDATE - September 2nd 2025\n\nThis rep"
  },
  {
    "path": "ansible/configs/consul.hcl.j2",
    "chars": 1080,
    "preview": "#jinja2: trim_blocks:False\nserver = {% if \"lan-client-server\" in group_names %}true{% else %}false{% endif %}\nui = {% if"
  },
  {
    "path": "ansible/configs/consul.service",
    "chars": 386,
    "preview": "[Unit]\nDescription=consul agent\nRequires=network-online.target tailscaled.service\nAfter=network-online.target tailscaled"
  },
  {
    "path": "ansible/configs/docker-daemon.json.j2",
    "chars": 203,
    "preview": "{\n  \"dns\": [\"192.168.50.2\", \"192.168.50.1\", \"8.8.8.8\"]{% if 'cheese' in group_names %},\n  \"runtimes\": {\n    \"nvidia\": {\n"
  },
  {
    "path": "ansible/configs/nomad.hcl.j2",
    "chars": 3505,
    "preview": "#jinja2: trim_blocks:False\ndata_dir = \"/var/lib/nomad/\"\ndatacenter = {% if \"cheese\" in group_names %}\"cheese\"{% elif \"mi"
  },
  {
    "path": "ansible/configs/nomad.service",
    "chars": 573,
    "preview": "[Unit]\nDescription=nomad.agent\nRequires=network-online.target tailscaled.service\nAfter=network-online.target tailscaled."
  },
  {
    "path": "ansible/playbook.yml",
    "chars": 11366,
    "preview": "---\n- name: network mounts\n  hosts:\n    - lan-client-server\n    - lan-client\n    - cheese\n    - minecraft\n  become: true"
  },
  {
    "path": "ansible/zsh.yml",
    "chars": 1594,
    "preview": "---\n- name: Install Zsh and Oh My Zsh with Agnoster theme\n  hosts: cheese\n  become: yes\n  remote_user: root\n  gather_fac"
  },
  {
    "path": "docker_images/gcp-dns-updater/Dockerfile",
    "chars": 482,
    "preview": "FROM python:3.14-slim\n\n# Set the working directory in the container\nWORKDIR /app\n\n# Copy the requirements file into the "
  },
  {
    "path": "docker_images/gcp-dns-updater/README.md",
    "chars": 5626,
    "preview": "# GCP Dynamic DNS Updater Service\n\nThis service periodically checks the public IPv4 address of the node it's running on "
  },
  {
    "path": "docker_images/gcp-dns-updater/requirements.txt",
    "chars": 37,
    "preview": "google-cloud-dns\nrequests\ngoogle-auth"
  },
  {
    "path": "docker_images/gcp-dns-updater/update_dns.py",
    "chars": 13023,
    "preview": "\nimport os\nimport requests\nimport logging\nimport sys\nimport base64\nimport json\nimport time\nimport socket # Added import\n"
  },
  {
    "path": "docker_images/update-metadata/Dockerfile",
    "chars": 178,
    "preview": "FROM python:3.14-slim\n\nWORKDIR /app\n\nCOPY requirements.txt .\nRUN pip install --no-cache-dir -r requirements.txt\n\nCOPY sy"
  },
  {
    "path": "docker_images/update-metadata/README.md",
    "chars": 2584,
    "preview": "# GitHub Secret Synchronization Script (Containerized)\n\n## Purpose\n\nThis script (`sync_secrets.py`), running inside a Do"
  },
  {
    "path": "docker_images/update-metadata/requirements.txt",
    "chars": 14,
    "preview": "PyGithub\nhcl2\n"
  },
  {
    "path": "docker_images/update-metadata/update_job_metadata.py",
    "chars": 7199,
    "preview": "\nimport argparse\nimport logging\nimport pathlib\nimport re\nimport sys\n\n# Configure logging\nlogging.basicConfig(level=loggi"
  },
  {
    "path": "envrc",
    "chars": 657,
    "preview": "export CONSUL_HTTP_ADDR=http://FILL_IN_IP:8500\nexport CONSUL_CACERT=/etc/consul.d/ssl/ca.cert\nexport CONSUL_CLIENT_CERT="
  },
  {
    "path": "nomad_jobs/TEMPLATE-volume.hcl",
    "chars": 1420,
    "preview": "// =============================================================================\n// Nomad CSI Volume Template\n// ======="
  },
  {
    "path": "nomad_jobs/TEMPLATE.job",
    "chars": 7428,
    "preview": "// =============================================================================\n// Nomad Job Template\n// =============="
  },
  {
    "path": "nomad_jobs/ai-ml/cognee/nomad.job",
    "chars": 2447,
    "preview": "job \"cognee\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/ai-"
  },
  {
    "path": "nomad_jobs/ai-ml/crawl4ai/nomad.job",
    "chars": 3985,
    "preview": "job \"crawl4ai\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \""
  },
  {
    "path": "nomad_jobs/ai-ml/crawl4ai/volume.hcl",
    "chars": 391,
    "preview": "id           = \"crawl4ai-data\"\nexternal_id  = \"crawl4ai-data\"\nname         = \"crawl4ai-data\"\ntype         = \"csi\"\nplugin"
  },
  {
    "path": "nomad_jobs/ai-ml/litellm/nomad.job",
    "chars": 4855,
    "preview": "job \"litellm\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"noma"
  },
  {
    "path": "nomad_jobs/ai-ml/litellm/volume.hcl",
    "chars": 343,
    "preview": "id           = \"litellm\"\nexternal_id  = \"litellm\"\nname         = \"litellm\"\ntype         = \"csi\"\nplugin_id    = \"org.demo"
  },
  {
    "path": "nomad_jobs/ai-ml/manyfold/3dprints-volume.hcl",
    "chars": 349,
    "preview": "id           = \"3dprints\"\nexternal_id  = \"3dprints\"\nname         = \"3dprints\"\ntype         = \"csi\"\nplugin_id    = \"org.d"
  },
  {
    "path": "nomad_jobs/ai-ml/manyfold/nomad.job",
    "chars": 2388,
    "preview": "job \"manyfold\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nom"
  },
  {
    "path": "nomad_jobs/ai-ml/manyfold/prints_volume.hcl",
    "chars": 350,
    "preview": "id           = \"3dprints\"\nexternal_id  = \"3dprints\"\nname         = \"3dprints\"\ntype         = \"csi\"\nplugin_id    = \"org.d"
  },
  {
    "path": "nomad_jobs/ai-ml/manyfold/volume.hcl",
    "chars": 350,
    "preview": "id           = \"manyfold\"\nexternal_id  = \"manyfold\"\nname         = \"manyfold\"\ntype         = \"csi\"\nplugin_id    = \"org.d"
  },
  {
    "path": "nomad_jobs/ai-ml/ollama/nomad.job",
    "chars": 1642,
    "preview": "job \"ollama\" {\n  region = var.region\n  datacenters = [\"cheese\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"noma"
  },
  {
    "path": "nomad_jobs/ai-ml/open-webui/nomad.job",
    "chars": 1470,
    "preview": "job \"open-webui\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"n"
  },
  {
    "path": "nomad_jobs/ai-ml/paperless-ai/nomad.job",
    "chars": 1112,
    "preview": "job \"paperless-ai\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = "
  },
  {
    "path": "nomad_jobs/ai-ml/pgvector-client/nomad.job",
    "chars": 4499,
    "preview": "job \"pgvector-client-example\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"batch\"\n\n  meta {\n    job_"
  },
  {
    "path": "nomad_jobs/ai-ml/radbot/nomad-dev.job",
    "chars": 2596,
    "preview": "job \"radbot-dev\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file ="
  },
  {
    "path": "nomad_jobs/ai-ml/radbot/nomad.job",
    "chars": 3907,
    "preview": "job \"radbot\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"no"
  },
  {
    "path": "nomad_jobs/core-infra/coredns/README.md",
    "chars": 146,
    "preview": "### Coredns\nyou can place extra configuration for coredns in the consul kv store at `apps/coredns/corefile` and it will "
  },
  {
    "path": "nomad_jobs/core-infra/coredns/nomad.job",
    "chars": 5507,
    "preview": "job \"coredns\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type = \"service\"\n  priority = 100\n\n  meta {\n      job_fi"
  },
  {
    "path": "nomad_jobs/core-infra/github-runner/nomad.job",
    "chars": 2316,
    "preview": "job \"github-runner\" {\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jobs/core-infr"
  },
  {
    "path": "nomad_jobs/core-infra/haproxy/nomad.job",
    "chars": 3434,
    "preview": "job \"haproxy\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type = \"system\"\n\n  meta {\n      job_file = \"nomad_jobs/c"
  },
  {
    "path": "nomad_jobs/core-infra/iscsi-csi-plugin/controller.job",
    "chars": 3770,
    "preview": "job \"democratic-csi-iscsi-controller\" {\n  \n  meta {\n  job_file = \"nomad_jobs/core-infra/iscsi-csi-plugin/controller.job\""
  },
  {
    "path": "nomad_jobs/core-infra/iscsi-csi-plugin/node.job",
    "chars": 5021,
    "preview": "job \"democratic-csi-iscsi-node\" {\n  \n  meta {\n  job_file = \"nomad_jobs/core-infra/iscsi-csi-plugin/node.job\"\n  }\ndatacen"
  },
  {
    "path": "nomad_jobs/core-infra/keepalived/TODO.md",
    "chars": 1346,
    "preview": "# Keepalived Improvements TODO\n\n## Problem\nThe osixia/keepalived image uses environment variables (env.yaml) to generate"
  },
  {
    "path": "nomad_jobs/core-infra/keepalived/nomad.job",
    "chars": 1291,
    "preview": "job \"keepalived\" {\n  datacenters = [\"dc1\"]\n  type        = \"system\"\n  priority    = 100\n\n  meta {\n      job_file = \"noma"
  },
  {
    "path": "nomad_jobs/core-infra/nfs-csi-plugin/controller.job",
    "chars": 780,
    "preview": "job \"plugin-nfs-controller\" {\n  \n  meta {\n  job_file = \"nomad_jobs/core-infra/nfs-csi-plugin/controller.job\"\n  }\ndatacen"
  },
  {
    "path": "nomad_jobs/core-infra/nfs-csi-plugin/nodes.job",
    "chars": 1010,
    "preview": "job \"plugin-nfs-nodes\" {\n  \n  meta {\n  job_file = \"nomad_jobs/core-infra/nfs-csi-plugin/nodes.job\"\n  }\ndatacenters = [\"d"
  },
  {
    "path": "nomad_jobs/core-infra/pihole/nomad.job",
    "chars": 2061,
    "preview": "job \"pihole\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n  priority    = 100\n\n  meta {\n   "
  },
  {
    "path": "nomad_jobs/core-infra/pihole/volume.hcl",
    "chars": 345,
    "preview": "id           = \"pihole3\"\nexternal_id  = \"pihole3\"\nname         = \"pihole3\"\ntype         = \"csi\"\nplugin_id    = \"org.demo"
  },
  {
    "path": "nomad_jobs/core-infra/smtp/nomad.job",
    "chars": 1310,
    "preview": "job \"smtp\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_j"
  },
  {
    "path": "nomad_jobs/core-infra/tailscale/nomad.job",
    "chars": 1718,
    "preview": "job \"tailscale\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"no"
  },
  {
    "path": "nomad_jobs/core-infra/tailscale/volume.hcl",
    "chars": 354,
    "preview": "id           = \"tailscale2\"\nexternal_id  = \"tailscale2\"\nname         = \"tailscale2\"\ntype         = \"csi\"\nplugin_id    = "
  },
  {
    "path": "nomad_jobs/core-infra/tailscale-este/nomad.job",
    "chars": 1980,
    "preview": "job \"tailscale-este\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file "
  },
  {
    "path": "nomad_jobs/core-infra/tailscale-este/volume.hcl",
    "chars": 366,
    "preview": "id           = \"tailscale-este\"\nexternal_id  = \"tailscale-este\"\nname         = \"tailscale-este\"\ntype         = \"csi\"\nplu"
  },
  {
    "path": "nomad_jobs/core-infra/traefik/config/consul-catalog.yml",
    "chars": 662,
    "preview": "# Enable Rancher Provider.\nproviders:\n  consulcatalog:\n\n    # Expose Consul Catalog services by default in Traefik.\n    "
  },
  {
    "path": "nomad_jobs/core-infra/traefik/config/consul.yml",
    "chars": 662,
    "preview": "# Enable Rancher Provider.\nproviders:\n  consulcatalog:\n\n    # Expose Consul Catalog services by default in Traefik.\n    "
  },
  {
    "path": "nomad_jobs/core-infra/traefik/config/traefik.toml",
    "chars": 5467,
    "preview": "################################################################\n#\n# Configuration sample for Traefik v2.\n#\n# For Traefi"
  },
  {
    "path": "nomad_jobs/core-infra/traefik/config/traefik.toml.new",
    "chars": 4336,
    "preview": "################################################################\n#\n# Configuration sample for Traefik v2.\n#\n# For Traefi"
  },
  {
    "path": "nomad_jobs/core-infra/traefik/config/traefik.toml.test",
    "chars": 2401,
    "preview": "################################################################\n# Entrypoints configuration\n###########################"
  },
  {
    "path": "nomad_jobs/core-infra/traefik/nomad.job",
    "chars": 6300,
    "preview": "job \"traefik\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type = \"service\"\n  meta {\n      job_file = \"nomad_jobs/c"
  },
  {
    "path": "nomad_jobs/core-infra/traefik-forward-auth/nomad.job",
    "chars": 3044,
    "preview": "job \"traefik-forward-auth\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job"
  },
  {
    "path": "nomad_jobs/core-infra/vault/secrets_template.yaml",
    "chars": 1290,
    "preview": "backends:\n- type: generic\n  path: secret\n  description: secrets\n\nsecrets:\n  - path: /secret/homelab/couchpotato\n    valu"
  },
  {
    "path": "nomad_jobs/gaming/minecraft-1.21/nomad.job",
    "chars": 3203,
    "preview": "job \"minecraft-1-21\" {\n  region      = var.region\n  datacenters = [\"minecraft\"]\n  type        = \"service\"\n\n  meta {\n    "
  },
  {
    "path": "nomad_jobs/gaming/minecraft-avaritia/nomad.job",
    "chars": 3598,
    "preview": "job \"minecraft-avaritia\" {\n  region      = var.region\n  datacenters = [\"minecraft\"]\n  type        = \"service\"\n\n  meta {\n"
  },
  {
    "path": "nomad_jobs/gaming/minecraft-axiom/nomad.job",
    "chars": 4318,
    "preview": "job \"minecraft-axiom\" {\n  region      = var.region\n  datacenters = [\"minecraft\"]\n  type        = \"service\"\n\n  meta {\n   "
  },
  {
    "path": "nomad_jobs/gaming/minecraft-fiskheroes/nomad.job",
    "chars": 2887,
    "preview": "job \"minecraft-fiskheroes\" {\n  region      = var.region\n  datacenters = [\"minecraft\"]\n  type        = \"service\"\n\n  meta "
  },
  {
    "path": "nomad_jobs/gaming/minecraft-forge/nomad.job",
    "chars": 3091,
    "preview": "job \"minecraft-forge\" {\n  region      = var.region\n  datacenters = [\"minecraft\"]\n  type        = \"service\"\n\n  meta {\n   "
  },
  {
    "path": "nomad_jobs/media-stack/audioserve/nomad.job",
    "chars": 1687,
    "preview": "job \"audioserve\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"n"
  },
  {
    "path": "nomad_jobs/media-stack/flaresolverr/nomad.job",
    "chars": 1051,
    "preview": "job \"flaresolverr\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = "
  },
  {
    "path": "nomad_jobs/media-stack/jackett/nomad.job",
    "chars": 1525,
    "preview": "job \"jackett\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"noma"
  },
  {
    "path": "nomad_jobs/media-stack/lazylibrarian/nomad.job",
    "chars": 1594,
    "preview": "job \"lazylibrarian\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file ="
  },
  {
    "path": "nomad_jobs/media-stack/lidarr/nomad.job",
    "chars": 3405,
    "preview": "job \"lidarr\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad"
  },
  {
    "path": "nomad_jobs/media-stack/lidarr/volume.hcl",
    "chars": 347,
    "preview": "id           = \"lidarr2\"\nexternal_id  = \"lidarr2\"\nname         = \"lidarr2\"\ntype         = \"csi\"\nplugin_id    = \"org.demo"
  },
  {
    "path": "nomad_jobs/media-stack/lidify/nomad.job",
    "chars": 2006,
    "preview": "job \"lidify\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad_j"
  },
  {
    "path": "nomad_jobs/media-stack/maintainerr/nomad.job",
    "chars": 1440,
    "preview": "job \"maintainerr\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"no"
  },
  {
    "path": "nomad_jobs/media-stack/mediasage/nomad.job",
    "chars": 2236,
    "preview": "job \"mediasage\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"noma"
  },
  {
    "path": "nomad_jobs/media-stack/multi-scrobbler/nomad.job",
    "chars": 2959,
    "preview": "job \"multi-scrobbler\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file ="
  },
  {
    "path": "nomad_jobs/media-stack/navidrome/nomad.job",
    "chars": 1720,
    "preview": "job \"navidrome\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"no"
  },
  {
    "path": "nomad_jobs/media-stack/navidrome/volume.hcl",
    "chars": 353,
    "preview": "id           = \"navidrome\"\nexternal_id  = \"navidrome\"\nname         = \"navidrome\"\ntype         = \"csi\"\nplugin_id    = \"or"
  },
  {
    "path": "nomad_jobs/media-stack/ombi/nomad.job",
    "chars": 1809,
    "preview": "job \"ombi\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_j"
  },
  {
    "path": "nomad_jobs/media-stack/ombi/volume.hcl",
    "chars": 336,
    "preview": "id           = \"ombi\"\nexternal_id  = \"ombi\"\nname         = \"ombi\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-cs"
  },
  {
    "path": "nomad_jobs/media-stack/overseerr/nomad.job",
    "chars": 2310,
    "preview": "job \"overseerr\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"noma"
  },
  {
    "path": "nomad_jobs/media-stack/overseerr/volume.hcl",
    "chars": 349,
    "preview": "id           = \"overseerr\"\nexternal_id  = \"overseerr\"\nname         = \"overseerr\"\ntype         = \"csi\"\nplugin_id    = \"or"
  },
  {
    "path": "nomad_jobs/media-stack/plex/nomad.job",
    "chars": 4306,
    "preview": "job \"plex\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n  priority    = 80\n\n  meta {\n      "
  },
  {
    "path": "nomad_jobs/media-stack/plex/volume.hcl",
    "chars": 405,
    "preview": "id           = \"plex-database\"\nexternal_id  = \"plex-database\"\nname         = \"plex-database\"\ntype         = \"csi\"\nplugin"
  },
  {
    "path": "nomad_jobs/media-stack/prowlarr/nomad.job",
    "chars": 2361,
    "preview": "job \"prowlarr\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \""
  },
  {
    "path": "nomad_jobs/media-stack/prowlarr/volume.hcl",
    "chars": 347,
    "preview": "id           = \"prowlarr\"\nexternal_id  = \"prowlarr\"\nname         = \"prowlarr\"\ntype         = \"csi\"\nplugin_id    = \"org.d"
  },
  {
    "path": "nomad_jobs/media-stack/qbittorrent/nomad.job",
    "chars": 3251,
    "preview": "job \"qbittorrent\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file "
  },
  {
    "path": "nomad_jobs/media-stack/radarr/nomad.job",
    "chars": 3096,
    "preview": "job \"radarr\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad"
  },
  {
    "path": "nomad_jobs/media-stack/radarr/volume.hcl",
    "chars": 346,
    "preview": "id           = \"radarr2\"\nexternal_id  = \"radarr2\"\nname         = \"radarr2\"\ntype         = \"csi\"\nplugin_id    = \"org.demo"
  },
  {
    "path": "nomad_jobs/media-stack/requestrr/nomad.job",
    "chars": 1377,
    "preview": "job \"requestrr\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"noma"
  },
  {
    "path": "nomad_jobs/media-stack/sabnzbd/nomad.job",
    "chars": 2289,
    "preview": "job \"sabnzbd\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"noma"
  },
  {
    "path": "nomad_jobs/media-stack/sickchill/nomad.job",
    "chars": 1658,
    "preview": "job \"sickchill\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"no"
  },
  {
    "path": "nomad_jobs/media-stack/sonarr/nomad.job",
    "chars": 2885,
    "preview": "job \"sonarr\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad"
  },
  {
    "path": "nomad_jobs/media-stack/synclounge/nomad.job",
    "chars": 2037,
    "preview": "job \"synclounge\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"n"
  },
  {
    "path": "nomad_jobs/media-stack/tautulli/nomad.job",
    "chars": 1421,
    "preview": "job \"tautulli\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nom"
  },
  {
    "path": "nomad_jobs/media-stack/tdarr/nomad.job",
    "chars": 2573,
    "preview": "job \"tdarr\" {\n  region      = var.region\n  datacenters = [\"cheese\"]\n  type        = \"service\"\n  priority    = 50\n\n  meta"
  },
  {
    "path": "nomad_jobs/media-stack/tdarr/volume.hcl",
    "chars": 340,
    "preview": "id           = \"tdarr\"\nexternal_id  = \"tdarr\"\nname         = \"tdarr\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic"
  },
  {
    "path": "nomad_jobs/misc/adb/nomad.job",
    "chars": 2168,
    "preview": "job \"adb\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jo"
  },
  {
    "path": "nomad_jobs/misc/gcp-dns-updater/Dockerfile",
    "chars": 533,
    "preview": "# Use an official Python runtime as a parent image\nFROM python:3.14-slim\n\n# Set the working directory in the container\nW"
  },
  {
    "path": "nomad_jobs/misc/gcp-dns-updater/README.md",
    "chars": 5626,
    "preview": "# GCP Dynamic DNS Updater Service\n\nThis service periodically checks the public IPv4 address of the node it's running on "
  },
  {
    "path": "nomad_jobs/misc/gcp-dns-updater/nomad.job",
    "chars": 14404,
    "preview": "job \"gcp-dns-updater\" {\n  \n  meta {\n    job_file = \"nomad_jobs/misc/gcp-dns-updater/nomad.job\"\n    version  = \"6\"  // Ad"
  },
  {
    "path": "nomad_jobs/misc/gcp-dns-updater/requirements.txt",
    "chars": 37,
    "preview": "google-cloud-dns\nrequests\ngoogle-auth"
  },
  {
    "path": "nomad_jobs/misc/gcp-dns-updater/update_dns.py",
    "chars": 12127,
    "preview": "\nimport os\nimport requests\nimport logging\nimport sys\nimport base64\nimport json\nimport time  # Moved import to top\n\n# Imp"
  },
  {
    "path": "nomad_jobs/misc/gitea/nomad.job",
    "chars": 2799,
    "preview": "job \"gitea\" {\n  \n  meta {\n  job_file = \"nomad_jobs/misc/gitea/nomad.job\"\n  }\nregion = var.region\n  datacenters = [\"dc1\"]"
  },
  {
    "path": "nomad_jobs/misc/linuxgsm/nomad.job",
    "chars": 1338,
    "preview": "job \"linuxgsm\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nom"
  },
  {
    "path": "nomad_jobs/misc/murmur/nomad.job",
    "chars": 19213,
    "preview": "job \"murmur\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad"
  },
  {
    "path": "nomad_jobs/misc/octoprint/nomad.job",
    "chars": 2128,
    "preview": "job \"octoprint\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"no"
  },
  {
    "path": "nomad_jobs/misc/uploader/nomad.job",
    "chars": 1468,
    "preview": "job \"uploader\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type = \"service\"\n  meta {\n      job_file = \"nomad_jobs/"
  },
  {
    "path": "nomad_jobs/observability/alertmanager/nomad.job",
    "chars": 3636,
    "preview": "job \"alertmanager\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type = \"service\"\n\n  meta {\n    job_file = \"nomad_jo"
  },
  {
    "path": "nomad_jobs/observability/alertmanager/volume.hcl",
    "chars": 324,
    "preview": "id        = \"alertmanager\"\nname      = \"alertmanager\"\ntype      = \"csi\"\nplugin_id = \"org.democratic-csi.iscsi\"\n\ncapacity"
  },
  {
    "path": "nomad_jobs/observability/blackbox-exporter/nomad.job",
    "chars": 2218,
    "preview": "job \"blackbox-exporter\" {\n  \n  meta {\n  job_file = \"nomad_jobs/observability/blackbox-exporter/nomad.job\"\n  }\nregion    "
  },
  {
    "path": "nomad_jobs/observability/grafana/nomad.job",
    "chars": 2663,
    "preview": "job \"grafana\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"noma"
  },
  {
    "path": "nomad_jobs/observability/grafana/volume.hcl",
    "chars": 345,
    "preview": "id           = \"grafana\"\nexternal_id  = \"grafana\"\nname         = \"grafana\"\ntype         = \"csi\"\nplugin_id    = \"org.demo"
  },
  {
    "path": "nomad_jobs/observability/loki/nomad.job",
    "chars": 3874,
    "preview": "job \"loki\" {\n  \n  meta {\n  job_file = \"nomad_jobs/observability/loki/nomad.job\"\n  }\nregion      = var.region\n  datacente"
  },
  {
    "path": "nomad_jobs/observability/loki/volume.hcl",
    "chars": 336,
    "preview": "id           = \"loki\"\nexternal_id  = \"loki\"\nname         = \"loki\"\ntype         = \"csi\"\nplugin_id    = \"org.democratic-cs"
  },
  {
    "path": "nomad_jobs/observability/oom-test/nomad.job",
    "chars": 2268,
    "preview": "job \"oom-test\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \""
  },
  {
    "path": "nomad_jobs/observability/prometheus/README.md",
    "chars": 167,
    "preview": "### Prometheus\nThis prometheus is configured to scrape any service launched with the service tag `metrics` in addition t"
  },
  {
    "path": "nomad_jobs/observability/prometheus/nomad.job",
    "chars": 10950,
    "preview": "job \"prometheus\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file ="
  },
  {
    "path": "nomad_jobs/observability/prometheus/volume.hcl",
    "chars": 361,
    "preview": "id           = \"prometheus\"\nexternal_id  = \"prometheus\"\nname         = \"prometheus\"\ntype         = \"csi\"\nplugin_id    = "
  },
  {
    "path": "nomad_jobs/observability/telegraf/nomad.job",
    "chars": 1559,
    "preview": "job \"telegraf\" {\n  region = var.region\n  datacenters = [\"dc1\", \"public\", \"system\"]\n  type = \"system\"\n  priority = 100\n  "
  },
  {
    "path": "nomad_jobs/observability/truenas-graphite-exporter/nomad.job",
    "chars": 1479,
    "preview": "job \"truenas-graphite-exporter\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {"
  },
  {
    "path": "nomad_jobs/observability/vector/nomad.job",
    "chars": 3098,
    "preview": "job \"vector\" {\n  \n  meta {\n  job_file = \"nomad_jobs/observability/vector/nomad.job\"\n  }\ndatacenters = [\"dc1\"]\n  region ="
  },
  {
    "path": "nomad_jobs/personal-cloud/actualbudget/nomad.job",
    "chars": 2142,
    "preview": "job \"actualbudget\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = "
  },
  {
    "path": "nomad_jobs/personal-cloud/actualbudget/volume.hcl",
    "chars": 403,
    "preview": "id           = \"actualbudget-data\"\nexternal_id  = \"actualbudget-data\"\nname         = \"actualbudget-data\"\ntype         = "
  },
  {
    "path": "nomad_jobs/personal-cloud/bitwarden/nomad.job",
    "chars": 1460,
    "preview": "job \"bitwarden\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"no"
  },
  {
    "path": "nomad_jobs/personal-cloud/nextcloud/nomad.job",
    "chars": 6196,
    "preview": "job \"nextcloud\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"no"
  },
  {
    "path": "nomad_jobs/personal-cloud/ntfy/nomad.job",
    "chars": 6121,
    "preview": "job \"ntfy\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"noma"
  },
  {
    "path": "nomad_jobs/personal-cloud/ntfy/volume.hcl",
    "chars": 350,
    "preview": "id           = \"ntfy-data\"\nexternal_id  = \"ntfy-data\"\nname         = \"ntfy-data\"\ntype         = \"csi\"\nplugin_id    = \"or"
  },
  {
    "path": "nomad_jobs/personal-cloud/paperless/nomad.job",
    "chars": 3975,
    "preview": "job \"paperless\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"no"
  },
  {
    "path": "nomad_jobs/personal-cloud/radicale/nomad.job",
    "chars": 1780,
    "preview": "job \"radicale\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nom"
  },
  {
    "path": "nomad_jobs/security/suricata/nomad.job",
    "chars": 5810,
    "preview": "job \"suricata\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"system\"\n  priority    = 100\n\n  meta"
  },
  {
    "path": "nomad_jobs/security/suricata-update/nomad.job",
    "chars": 1022,
    "preview": "job \"suricata-update\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"batch\"\n  priority    = 80\n\n "
  },
  {
    "path": "nomad_jobs/security/wazuh-agent/nomad.job",
    "chars": 8918,
    "preview": "job \"wazuh-agent\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"system\"\n  priority    = 100\n\n  m"
  },
  {
    "path": "nomad_jobs/security/wazuh-server/nomad.job",
    "chars": 21468,
    "preview": "job \"wazuh-server\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  constraint {\n    at"
  },
  {
    "path": "nomad_jobs/security/wazuh-server/volume-dashboard.hcl",
    "chars": 305,
    "preview": "type = \"csi\"\nid = \"wazuh-dashboard\"\nname = \"wazuh-dashboard\"\nplugin_id = \"org.democratic-csi.iscsi\"\n\ncapability {\n  acce"
  },
  {
    "path": "nomad_jobs/security/wazuh-server/volume-indexer.hcl",
    "chars": 301,
    "preview": "type = \"csi\"\nid = \"wazuh-indexer\"\nname = \"wazuh-indexer\"\nplugin_id = \"org.democratic-csi.iscsi\"\n\ncapability {\n  access_m"
  },
  {
    "path": "nomad_jobs/security/wazuh-server/volume-manager.hcl",
    "chars": 301,
    "preview": "type = \"csi\"\nid = \"wazuh-manager\"\nname = \"wazuh-manager\"\nplugin_id = \"org.democratic-csi.iscsi\"\n\ncapability {\n  access_m"
  },
  {
    "path": "nomad_jobs/smart-home/deconz/nomad.job",
    "chars": 2802,
    "preview": "job \"deconz\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad"
  },
  {
    "path": "nomad_jobs/smart-home/deconz/volume.hcl",
    "chars": 342,
    "preview": "id           = \"deconz\"\nexternal_id  = \"deconz\"\nname         = \"deconz\"\ntype         = \"csi\"\nplugin_id    = \"org.democra"
  },
  {
    "path": "nomad_jobs/smart-home/home-assistant/nomad.job",
    "chars": 1780,
    "preview": "job \"home-assistant\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file "
  },
  {
    "path": "nomad_jobs/smart-home/home-assistant/volume.hcl",
    "chars": 366,
    "preview": "id           = \"home-assistant\"\nexternal_id  = \"home-assistant\"\nname         = \"home-assistant\"\ntype         = \"csi\"\nplu"
  },
  {
    "path": "nomad_jobs/smart-home/mqtt/nomad.job",
    "chars": 1360,
    "preview": "job \"mosquitto\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"no"
  },
  {
    "path": "nomad_jobs/smart-home/owntracks-recorder/nomad.job",
    "chars": 2219,
    "preview": "job \"owntracks-recorder\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_f"
  },
  {
    "path": "nomad_jobs/smart-home/zigbee2mqtt/nomad.job",
    "chars": 1219,
    "preview": "job \"zigbee2mqtt\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  constraint {\n    attribut"
  },
  {
    "path": "nomad_jobs/storage-backends/docker-registry/nomad.job",
    "chars": 1797,
    "preview": "job \"docker-registry\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file"
  },
  {
    "path": "nomad_jobs/storage-backends/docker-registry/volume.hcl",
    "chars": 416,
    "preview": "id           = \"docker-registry-data\"\nexternal_id  = \"docker-registry-data\"\nname         = \"docker-registry-data\"\ntype  "
  },
  {
    "path": "nomad_jobs/storage-backends/mariadb/nomad.job",
    "chars": 1272,
    "preview": "job \"mariadb\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"noma"
  },
  {
    "path": "nomad_jobs/storage-backends/neo4j/nomad.job",
    "chars": 1811,
    "preview": "job \"neo4j\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/stor"
  },
  {
    "path": "nomad_jobs/storage-backends/neo4j/setup.job",
    "chars": 955,
    "preview": "job \"neo4j-setup\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type = \"batch\"\n\n  meta {\n    job_file = \"nomad_jobs/"
  },
  {
    "path": "nomad_jobs/storage-backends/neo4j/volume.hcl",
    "chars": 363,
    "preview": "# Neo4j graph database storage volume\nid           = \"neo4j-data\"\nname         = \"neo4j-data\"\ntype         = \"csi\"\nplugi"
  },
  {
    "path": "nomad_jobs/storage-backends/pgvector/nomad.job",
    "chars": 1484,
    "preview": "job \"pgvector\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nomad"
  },
  {
    "path": "nomad_jobs/storage-backends/pgvector/pgvector-setup.job",
    "chars": 1278,
    "preview": "job \"pgvector-setup\" {\n  type = \"batch\"\n  datacenters = [\"dc1\"]\n  \n  meta {\n    job_file = \"nomad_jobs/storage-backends/"
  },
  {
    "path": "nomad_jobs/storage-backends/postgres/nomad.job",
    "chars": 1531,
    "preview": "job \"postgres\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nom"
  },
  {
    "path": "nomad_jobs/storage-backends/postgres/postgres-setup.job",
    "chars": 2059,
    "preview": "job \"postgres-setup\" {\n  type = \"batch\"\n  datacenters = [\"dc1\"]\n  \n  meta {\n      job_file = \"nomad_jobs/storage-backend"
  },
  {
    "path": "nomad_jobs/storage-backends/qdrant/nomad.job",
    "chars": 1512,
    "preview": "job \"qdrant\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type = \"service\"\n\n  meta {\n    job_file = \"nomad_jobs/sto"
  },
  {
    "path": "nomad_jobs/storage-backends/qdrant/volume.hcl",
    "chars": 369,
    "preview": "# Qdrant vector database storage volume\nid           = \"qdrant-data\"\nname         = \"qdrant-data\"\ntype         = \"csi\"\np"
  },
  {
    "path": "nomad_jobs/storage-backends/redis/nomad.job",
    "chars": 2181,
    "preview": "job \"redis\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"nom"
  },
  {
    "path": "nomad_jobs/storage-backends/redis/volume.hcl",
    "chars": 382,
    "preview": "id           = \"redis-data\"\nexternal_id  = \"redis-data\"\nname         = \"redis-data\"\ntype         = \"csi\"\nplugin_id    = "
  },
  {
    "path": "nomad_jobs/storage-backends/volumes/nfs-example.hcl",
    "chars": 397,
    "preview": "type = \"csi\"\nid = \"example\"\nname = \"example\"\nplugin_id = \"nfsofficial\"\nexternal_id = \"example\"\ncapability {\n  access_mod"
  },
  {
    "path": "nomad_jobs/system/docker-cleanup/nomad.job",
    "chars": 1747,
    "preview": "job \"docker-cleanup\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type = \"sysbatch\"\n\n  meta {\n    job_file = \"nomad"
  },
  {
    "path": "nomad_jobs/web-apps/alertmanager-dashboard/nomad.job",
    "chars": 1132,
    "preview": "job \"alertmanager-dashboard\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      j"
  },
  {
    "path": "nomad_jobs/web-apps/firecrawl/nomad.job",
    "chars": 1168,
    "preview": "job \"firecrawl\" {\n  region      = \"global\"\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \"n"
  },
  {
    "path": "nomad_jobs/web-apps/heimdall/nomad.job",
    "chars": 1784,
    "preview": "job \"heimdall\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nom"
  },
  {
    "path": "nomad_jobs/web-apps/homepage/nomad.job",
    "chars": 6587,
    "preview": "job \"homepage\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \""
  },
  {
    "path": "nomad_jobs/web-apps/kideo/nomad.job",
    "chars": 2765,
    "preview": "job \"kideo\" {\n  region      = var.region\n  datacenters = [\"cheese\"]\n  type        = \"service\"\n\n  meta {\n    job_file = \""
  },
  {
    "path": "nomad_jobs/web-apps/minecraftmath/nomad.job",
    "chars": 1924,
    "preview": "job \"minecraftmath\" {\n  region      = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n    job_fil"
  },
  {
    "path": "nomad_jobs/web-apps/wordpress/nomad.job",
    "chars": 2962,
    "preview": "job \"wordpress\" {\n  \n  meta {\n  job_file = \"nomad_jobs/web-apps/wordpress/nomad.job\"\n  }\nregion = var.region\n  datacente"
  },
  {
    "path": "nomad_jobs/web-apps/www/Dockerfile",
    "chars": 96,
    "preview": "FROM nginx\nADD main.jpg /usr/local/www/nginx/images/main.jpg\nCMD [\"nginx\", \"-g\", \"daemon off;\"]\n"
  },
  {
    "path": "nomad_jobs/web-apps/www/nomad.job",
    "chars": 2859,
    "preview": "job \"www\" {\n  region = var.region\n  datacenters = [\"dc1\"]\n  type        = \"service\"\n\n  meta {\n      job_file = \"nomad_jo"
  },
  {
    "path": "renovate.json",
    "chars": 1660,
    "preview": "{\n  \"$schema\": \"https://docs.renovatebot.com/renovate-schema.json\",\n  \"extends\": [\n    \"config:recommended\"\n  ],\n  \"depe"
  },
  {
    "path": "services/beefcake.json",
    "chars": 276,
    "preview": "{\n  \"Service\": {\n    \"Name\": \"beefcake\",\n    \"ID\": \"beefcake-instance-1\",\n    \"Address\": \"192.168.50.208\",\n    \"Port\": 8"
  }
]

About this extraction

This page contains the full source code of the perrymanuk/hashi-homelab GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 180 files (450.4 KB), approximately 128.3k tokens, and a symbol index with 13 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!