Showing preview only (889K chars total). Download the full file or copy to clipboard to get everything.
Repository: NixOS/nixos-org-configurations
Branch: main
Commit: f7fcb33b303e
Files: 381
Total size: 799.2 KB
Directory structure:
gitextract_0_mr47ix/
├── .github/
│ ├── CODEOWNERS
│ ├── ISSUE_TEMPLATE/
│ │ ├── feature_request.md
│ │ └── service_disruption.md
│ ├── scripts/
│ │ └── format-and-absorb.sh
│ └── workflows/
│ ├── ci.yml
│ ├── dns-apply.yml
│ ├── dns-preview.yml
│ ├── format-pr.yml
│ └── zizmor.yml
├── .gitignore
├── LICENSE
├── README.md
├── build/
│ ├── .envrc
│ ├── colmena.nix
│ ├── colmena.sh
│ ├── common.nix
│ ├── datadog/
│ │ ├── hydra.nix
│ │ └── hydra.py
│ ├── flake-module.nix
│ ├── haumea/
│ │ ├── boot.nix
│ │ ├── default.nix
│ │ ├── network.nix
│ │ ├── postgresql.nix
│ │ └── zrepl.yml
│ ├── hydra-proxy.nix
│ ├── hydra.nix
│ ├── id_buildfarm.pub
│ ├── mimas/
│ │ ├── boot.nix
│ │ ├── default.nix
│ │ ├── disko.nix
│ │ ├── firewall.nix
│ │ └── network.nix
│ ├── nginx-error-pages/
│ │ ├── 403.html
│ │ ├── 502.html
│ │ └── 503.html
│ ├── pluto/
│ │ ├── boot.nix
│ │ ├── default.nix
│ │ ├── disko.nix
│ │ ├── grafana.nix
│ │ ├── network.nix
│ │ ├── nginx.nix
│ │ ├── nixos-metrics.nix
│ │ └── prometheus/
│ │ ├── alertmanager.nix
│ │ ├── default.nix
│ │ └── exporters/
│ │ ├── anubis.nix
│ │ ├── blackbox.nix
│ │ ├── channel-exporter.py
│ │ ├── channel.nix
│ │ ├── domain.nix
│ │ ├── fastly.nix
│ │ ├── github.nix
│ │ ├── hydra-queue-runner-reexporter.py
│ │ ├── hydra.nix
│ │ ├── json.nix
│ │ ├── matrix-synapse.nix
│ │ ├── nixos.nix
│ │ ├── node.nix
│ │ ├── owncast.nix
│ │ ├── postgresql.nix
│ │ ├── rasdaemon.nix
│ │ ├── sql.nix
│ │ ├── storagebox.nix
│ │ ├── up.nix
│ │ ├── zfs.nix
│ │ └── zrepl.nix
│ ├── scripts/
│ │ ├── nix-mac-installer.sh
│ │ └── nix-mac-nuke.sh
│ ├── secrets/
│ │ ├── alertmanager-matrix-forwarder.age
│ │ ├── alertmanager-oauth2-proxy-env.age
│ │ ├── eager-heisenberg-queue-runner-token.age
│ │ ├── elated-minsky-queue-runner-token.age
│ │ ├── enormous-catfish-queue-runner-token.age
│ │ ├── fastly-exporter-env.age
│ │ ├── goofy-hopcroft-queue-runner-token.age
│ │ ├── grafana-secret-key.age
│ │ ├── growing-jennet-queue-runner-token.age
│ │ ├── hopeful-rivest-queue-runner-token.age
│ │ ├── hydra-aws-credentials.age
│ │ ├── hydra-github-client-secret.age
│ │ ├── hydra-mirror-aws-credentials.age
│ │ ├── hydra-mirror-git-credentials.age
│ │ ├── intense-heron-queue-runner-token.age
│ │ ├── kind-lumiere-queue-runner-token.age
│ │ ├── maximum-snail-queue-runner-token.age
│ │ ├── norwegian-blue-queue-runner-token.age
│ │ ├── owncast-admin-password.age
│ │ ├── pluto-backup-secret.age
│ │ ├── pluto-backup-ssh-key.age
│ │ ├── rfc39-credentials.age
│ │ ├── rfc39-github.age
│ │ ├── rfc39-record-push.age
│ │ ├── sleepy-brown-queue-runner-token.age
│ │ ├── storagebox-exporter-token.age
│ │ ├── sweeping-filly-queue-runner-token.age
│ │ ├── tarball-mirror-aws-credentials.age
│ │ └── zrepl-ssh-key.age
│ ├── secrets.nix
│ └── titan/
│ ├── boot.nix
│ ├── default.nix
│ ├── disko.nix
│ ├── network.nix
│ ├── postgresql.nix
│ ├── zrepl.nix
│ └── zrepl.yml
├── builders/
│ ├── boot/
│ │ └── efi-grub.nix
│ ├── common/
│ │ ├── hardening.nix
│ │ ├── hydra-queue-builder.nix
│ │ ├── network.nix
│ │ ├── nix.nix
│ │ ├── node-exporter.nix
│ │ ├── ssh.nix
│ │ ├── system.nix
│ │ ├── tools.nix
│ │ ├── update.nix
│ │ └── users.nix
│ ├── disk-layouts/
│ │ └── efi-zfs-raid0.nix
│ ├── flake-module.nix
│ ├── instances/
│ │ ├── elated-minsky.nix
│ │ ├── goofy-hopcroft.nix
│ │ ├── hopeful-rivest.nix
│ │ └── sleepy-brown.nix
│ ├── network/
│ │ └── autoconfig.nix
│ └── profiles/
│ ├── hetzner-ax101r.nix
│ ├── hetzner-rx170.nix
│ └── hetzner-rx220.nix
├── channels.nix
├── checks/
│ └── flake-module.nix
├── dns/
│ ├── .envrc
│ ├── creds.json
│ ├── dnsconfig.js
│ ├── flake-module.nix
│ ├── nix.dev.js
│ ├── nixcon.org.js
│ ├── nixos.org.js
│ └── ofborg.org.js
├── docs/
│ ├── inventory.md
│ └── meeting-notes/
│ ├── 2024-01-11.md
│ ├── 2024-01-25.md
│ ├── 2024-02-08.md
│ ├── 2024-02-22.md
│ ├── 2024-03-07.md
│ ├── 2024-03-21.md
│ ├── 2024-04-18.md
│ ├── 2024-05-30.md
│ ├── 2024-06-13.md
│ ├── 2024-06-27.md
│ ├── 2024-11-14.md
│ ├── 2025-04-03.md
│ ├── 2025-04-17.md
│ ├── 2025-05-01.md
│ ├── 2025-05-15.md
│ ├── 2025-05-29.md
│ └── 2025-06-12.md
├── flake.nix
├── formatter/
│ └── flake-module.nix
├── lib/
│ └── service-order.nix
├── macs/
│ ├── README.md
│ ├── common.nix
│ ├── flake-module.nix
│ ├── hydra-queue-builder.nix
│ ├── mac-exec
│ ├── mac-update
│ └── profiles/
│ ├── m1.nix
│ └── m2.large.nix
├── metrics/
│ └── fastly/
│ ├── README.md
│ ├── cron.sh
│ ├── flake.nix
│ ├── ingest-raw-logs.sh
│ ├── run-queries.sh
│ └── update-asn-list.sh
├── modules/
│ ├── backup.nix
│ ├── common.nix
│ ├── hydra-mirror.nix
│ ├── nftables.nix
│ ├── prometheus/
│ │ ├── default.nix
│ │ ├── nixos-exporter/
│ │ │ ├── default.nix
│ │ │ ├── prometheus_nixos_exporter/
│ │ │ │ └── __main__.py
│ │ │ └── pyproject.toml
│ │ └── system-version-exporter.sh
│ ├── rasdaemon.nix
│ ├── rfc39.nix
│ ├── tarball-mirror.nix
│ └── tarball-mirror.patch
├── non-critical-infra/
│ ├── .envrc
│ ├── .sops.yaml
│ ├── README.md
│ ├── colmena.sh
│ ├── flake-module.nix
│ ├── hosts/
│ │ ├── caliban/
│ │ │ ├── default.nix
│ │ │ ├── disko.nix
│ │ │ ├── hardware.nix
│ │ │ └── nixpkgs-swh.nix
│ │ ├── staging-hydra/
│ │ │ ├── bootstrap-staging-hydra.sh
│ │ │ ├── ca.crt
│ │ │ ├── default.nix
│ │ │ ├── disko.nix
│ │ │ ├── genca.sh
│ │ │ ├── hardware.nix
│ │ │ ├── hydra-proxy.nix
│ │ │ ├── hydra.nix
│ │ │ └── server.crt
│ │ └── umbriel/
│ │ ├── README.md
│ │ ├── default.nix
│ │ ├── disko.nix
│ │ └── hardware.nix
│ ├── modules/
│ │ ├── backup.nix
│ │ ├── common.nix
│ │ ├── draupnir.nix
│ │ ├── element-web.nix
│ │ ├── limesurvey.nix
│ │ ├── mailserver/
│ │ │ ├── README.md
│ │ │ ├── default.nix
│ │ │ ├── freescout.nix
│ │ │ ├── mailing-lists-options.nix
│ │ │ └── mailing-lists.nix
│ │ ├── matrix-synapse.nix
│ │ ├── nginx.nix
│ │ ├── owncast.nix
│ │ ├── postfix.nix
│ │ ├── postgresql.nix
│ │ └── vaultwarden.nix
│ ├── packages/
│ │ └── encrypt-email/
│ │ ├── default.nix
│ │ └── encrypt-email.py
│ └── secrets/
│ ├── 0x4A6F-hardware-email-address.umbriel
│ ├── 0x4A6F-moderation-email-address.umbriel
│ ├── DieracDelta-email-address.umbriel
│ ├── Ericson2314-email-address.umbriel
│ ├── ForsakenHarmony-email-address.umbriel
│ ├── Gabriella439-email-address.umbriel
│ ├── Kranzes-email-address.umbriel
│ ├── LeSuisse-email-address.umbriel
│ ├── MMesch-email-address.umbriel
│ ├── Mic92-email-address.umbriel
│ ├── Mic92-wiki-email-address.umbriel
│ ├── Nebucatnetzer-email-address.umbriel
│ ├── a-kenji-email-address.umbriel
│ ├── aleksana-email-address.umbriel
│ ├── andir-email-address.umbriel
│ ├── avocadoom-email-address.umbriel
│ ├── backup-secret.caliban
│ ├── backup-secret.umbriel
│ ├── bryanhonof-email-address.umbriel
│ ├── das-g-email-address.umbriel
│ ├── djacu-email-address.umbriel
│ ├── edef1c-email-address.umbriel
│ ├── edolstra-admin-email-address.umbriel
│ ├── edolstra-email-address.umbriel
│ ├── edolstra-foundation-email-address.umbriel
│ ├── edolstra-summer-email-address.umbriel
│ ├── elections-email-login.umbriel
│ ├── escherlies-email-address.umbriel
│ ├── finance-email-login.umbriel
│ ├── flyfloh-email-address.umbriel
│ ├── fmehta-email-address.umbriel
│ ├── foundation-email-login.umbriel
│ ├── freescout-app-key.umbriel
│ ├── fricklerhandwerk-email-address.umbriel
│ ├── gefla-email-address.umbriel
│ ├── gytis-ivaskevicius-email-address.umbriel
│ ├── hardware-email-login.umbriel
│ ├── hehongbo-xsa-email-address.umbriel
│ ├── hexa-email-login.umbriel
│ ├── hydra-aws-credentials.staging-hydra
│ ├── hydra-password.staging-hydra
│ ├── hydra-users.staging-hydra
│ ├── idabzo-email-address.umbriel
│ ├── infinisil-email-address.umbriel
│ ├── infinisil-nixcon-email-address.umbriel
│ ├── jfly-email-address.umbriel
│ ├── john-rodewald-email-address.umbriel
│ ├── jtojnar-email-address.umbriel
│ ├── kate-email-address.umbriel
│ ├── lach-xsa-email-address.umbriel
│ ├── lassulus-email-address.umbriel
│ ├── lassulus-nixcon-email-address.umbriel
│ ├── lassulus-wiki-email-address.umbriel
│ ├── limesurvey-encryption-key.caliban
│ ├── limesurvey-encryption-nonce.caliban
│ ├── matrix-synapse-secrets.caliban
│ ├── matrix-synapse-signing-key.caliban
│ ├── mjolnir-access-token.caliban
│ ├── mjolnir-password.caliban
│ ├── moderation-email-login.umbriel
│ ├── mweinelt-email-address.umbriel
│ ├── ners-email-address.umbriel
│ ├── ngi-nixos-org-email-login.umbriel
│ ├── nixcon-email-login.umbriel
│ ├── nixcon.org.mail.key.umbriel
│ ├── nixos.org.mail.key.umbriel
│ ├── nixpkgs-core-email-login.umbriel
│ ├── opendkim-private-key.caliban
│ ├── picnoir-email-address.umbriel
│ ├── postsrsd-secret.umbriel
│ ├── queue-runner-ca.key.staging-hydra
│ ├── queue-runner-server.key.staging-hydra
│ ├── ra33it0-email-address.umbriel
│ ├── ra33ito-email-address.umbriel
│ ├── ral-email-address.umbriel
│ ├── rbvermaa-email-address.umbriel
│ ├── refroni-email-address.umbriel
│ ├── refroni-nixcon-email-address.umbriel
│ ├── risicle-email-address.umbriel
│ ├── roberth-email-address.umbriel
│ ├── rosscomputerguy-email-address.umbriel
│ ├── securitytracker-noreply-email-login.umbriel
│ ├── sigmasquadron-xsa-email-address.umbriel
│ ├── signing-key.staging-hydra
│ ├── staging-hydra-hostkeys.yaml
│ ├── steering-email-login.umbriel
│ ├── storagebox-ssh-key.caliban
│ ├── storagebox-ssh-key.umbriel
│ ├── test-sender-email-login.umbriel
│ ├── therealpxc-email-address.umbriel
│ ├── tomberek-email-address.umbriel
│ ├── uep-email-address.umbriel
│ ├── vaultwarden-env.caliban
│ ├── vcunat-email-address.umbriel
│ ├── winterqt-email-address.umbriel
│ ├── ysndr-email-address.umbriel
│ ├── zimbatm-admin-email-address.umbriel
│ ├── zimbatm-email-address.umbriel
│ └── zmberber-email-address.umbriel
├── pyproject.toml
├── renovate.json
├── ssh-keys.nix
├── terraform/
│ ├── .envrc
│ ├── .envrc.local.template
│ ├── .gitignore
│ ├── README.md
│ ├── artifacts.tf
│ ├── aws-config
│ ├── cache/
│ │ ├── diagnostic.sh
│ │ ├── index.html
│ │ ├── nix-cache-info
│ │ └── s3-authn.vcl
│ ├── cache-bucket/
│ │ ├── main.tf
│ │ └── providers.tf
│ ├── cache-staging/
│ │ ├── diagnostic.sh
│ │ ├── index.html
│ │ ├── new-cache-test-file
│ │ ├── nix-cache-info
│ │ ├── old-cache-test-file
│ │ └── s3-authn.vcl
│ ├── cache-staging.tf
│ ├── cache.tf
│ ├── cache_inventory.tf
│ ├── cache_log.tf
│ ├── channels.tf
│ ├── flake-module.nix
│ ├── locals.tf
│ ├── netlify_sites.tf
│ ├── nixpkgs-tarballs/
│ │ └── index.html
│ ├── nixpkgs-tarballs.tf
│ ├── providers.tf
│ ├── releases.tf
│ ├── releases_inventory.tf
│ ├── s3_listing.html.tpl
│ ├── terraform.tf
│ ├── tf.sh
│ └── wiki-test.tf
└── terraform-iam/
├── .envrc
├── .gitignore
├── README.md
├── archeologist.tf
├── assume_github_actions_policy_document/
│ └── main.tf
├── assume_identity_center_permission_policy/
│ └── main.tf
├── aws-config
├── cache-staging.tf
├── cache.tf
├── cache_eventbridge.tf
├── fastlylog/
│ ├── main.tf
│ ├── outputs.tf
│ └── variables.tf
├── fastlylog.tf
├── iam_users.tf
├── locals.tf
├── nix_repo_oidc.tf
├── outputs.tf
├── providers.tf
├── terraform.tf
└── tf.sh
================================================
FILE CONTENTS
================================================
================================================
FILE: .github/CODEOWNERS
================================================
# Every directory containing configurations impacting the core infra needs a
# review from a member of core infra.
/.github/ @NixOS/infra-build
/build/ @NixOS/infra-build
/builders/ @NixOS/infra-build
/dns/ @NixOS/infra-build
/lib/ @NixOS/infra-build
/macs/ @NixOS/infra-build
/metrics/ @NixOS/infra-build
/modules/ @NixOS/infra-build
/terraform-iam/ @NixOS/infra-build
/terraform/ @NixOS/infra-build
/channels.nix @NixOS/infra-build
/ssh-keys.nix @NixOS/infra-build
================================================
FILE: .github/ISSUE_TEMPLATE/feature_request.md
================================================
---
name: Feature request
about: Suggest an improvement for this project
title: ""
labels: enhancement
assignees: ""
---
**Is your feature request related to a problem? Please describe.**
<!--
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
-->
**Describe the solution you'd like**
<!--
A clear and concise description of what you want to happen.
-->
**Describe alternatives you've considered**
<!--
A clear and concise description of any alternative solutions or features you've considered.
-->
**Additional context**
<!--
Add any other context or screenshots about the feature request here.
-->
================================================
FILE: .github/ISSUE_TEMPLATE/service_disruption.md
================================================
---
name: Service disruption report
about: Use this to report service instabilities
title: "<service-name>: "
labels: bug
assignees: ""
---
**Affected service**
<!-- What service is affected? -->
**Describe the issue**
<!-- A clear and concise description of what the issue is. -->
**System information**
<!-- Relevant system versions. If it's a connectivity issue, `mtr` reports. -->
================================================
FILE: .github/scripts/format-and-absorb.sh
================================================
#!/usr/bin/env -S nix shell --inputs-from . nixpkgs#bash nixpkgs#git-absorb --command bash
# shellcheck shell=bash
set -euo pipefail
# This script runs nix fmt and git absorb to update a pull request
# It's designed to be run in a GitHub Actions workflow
echo "::group::Running nix fmt"
nix fmt
echo "::endgroup::"
echo "::group::Checking for changes"
if git diff --quiet; then
echo "No formatting changes needed"
exit 0
fi
echo "::endgroup::"
echo "::group::Running git absorb"
# Run git absorb with --force to automatically absorb changes
git add -A
# Create fixup commits
# Find the merge base to properly identify which commits can absorb changes
MERGE_BASE=$(git merge-base origin/main HEAD)
git absorb --force --base "$MERGE_BASE"
# Then do a non-interactive autosquash rebase with git identity set
export GIT_EDITOR=:
export GIT_SEQUENCE_EDITOR=:
export GIT_AUTHOR_NAME="github-actions[bot]"
export GIT_AUTHOR_EMAIL="github-actions[bot]@users.noreply.github.com"
export GIT_COMMITTER_NAME="github-actions[bot]"
export GIT_COMMITTER_EMAIL="github-actions[bot]@users.noreply.github.com"
git rebase -i --autosquash origin/main
echo "::endgroup::"
echo "::group::Pushing changes"
git push --force-with-lease
echo "::endgroup::"
echo "Successfully formatted code and absorbed changes!"
================================================
FILE: .github/workflows/ci.yml
================================================
name: CI
on:
push:
branches:
- main
pull_request:
merge_group:
permissions:
contents: read
jobs:
checks:
runs-on: "${{ matrix.os }}"
strategy:
fail-fast: false
matrix:
os:
- ubuntu-latest
- ubuntu-22.04-arm
- macos-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
with:
persist-credentials: false
- uses: cachix/install-nix-action@8aa03977d8d733052d78f4e008a241fd1dbf36b3 # v31
- uses: cachix/cachix-action@1eb2ef646ac0255473d23a5907ad7b04ce94065c # v17
with:
name: nixos-infra-dev
authToken: "${{ secrets.CACHIX_AUTH_TOKEN }}"
- run: nix run --inputs-from . nixpkgs#nix-fast-build -- --skip-cached --no-nom
nixos-x86_64:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
machine:
- caliban
- elated-minsky
- sleepy-brown
- haumea
- pluto
- mimas
steps:
- name: Free disk space
if: matrix.machine == 'mimas'
run: |
sudo rm -rf /usr/share/dotnet /usr/local/lib/android /opt/ghc
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
with:
persist-credentials: false
- uses: cachix/install-nix-action@8aa03977d8d733052d78f4e008a241fd1dbf36b3 # v31
- uses: cachix/cachix-action@1eb2ef646ac0255473d23a5907ad7b04ce94065c # v17
with:
name: nixos-infra-dev
authToken: "${{ secrets.CACHIX_AUTH_TOKEN }}"
- run: nix run --inputs-from . nixpkgs#nix-fast-build -- --skip-cached --no-nom --flake '.#nixosConfigurations."${{ matrix.machine }}".config.system.build.toplevel'
nixos-aarch64:
runs-on: ubuntu-22.04-arm
strategy:
fail-fast: false
matrix:
machine:
- umbriel
- goofy-hopcroft
- staging-hydra
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
with:
persist-credentials: false
- uses: cachix/install-nix-action@8aa03977d8d733052d78f4e008a241fd1dbf36b3 # v31
- uses: cachix/cachix-action@1eb2ef646ac0255473d23a5907ad7b04ce94065c # v17
with:
name: nixos-infra-dev
authToken: "${{ secrets.CACHIX_AUTH_TOKEN }}"
- run: nix run --inputs-from . nixpkgs#nix-fast-build -- --skip-cached --no-nom --flake '.#nixosConfigurations."${{ matrix.machine }}".config.system.build.toplevel'
nix-darwin:
runs-on: macos-latest
strategy:
fail-fast: false
matrix:
machine:
- intense-heron # m1
- kind-lumiere # m2
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
with:
persist-credentials: false
- uses: cachix/install-nix-action@8aa03977d8d733052d78f4e008a241fd1dbf36b3 # v31
- uses: cachix/cachix-action@1eb2ef646ac0255473d23a5907ad7b04ce94065c # v17
with:
name: nixos-infra-dev
authToken: "${{ secrets.CACHIX_AUTH_TOKEN }}"
- run: nix run --inputs-from . nixpkgs#nix-fast-build -- --skip-cached --no-nom --flake '.#darwinConfigurations."${{ matrix.machine }}".config.system.build.toplevel'
================================================
FILE: .github/workflows/dns-apply.yml
================================================
---
name: Apply DNS changes
on:
push:
branches:
- main
paths:
- "dns/**"
workflow_dispatch:
permissions: {}
jobs:
dnscontrol:
runs-on: ubuntu-latest
strategy:
fail-fast: true
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- uses: cachix/install-nix-action@8aa03977d8d733052d78f4e008a241fd1dbf36b3 # v31
- name: dnscontrol push
env:
GANDI_TOKEN: "${{ secrets.GANDI_TOKEN }}" # Expires 2026-04-07
working-directory: ./dns/
run: |
nix run --inputs-from . nixpkgs#dnscontrol -- push
================================================
FILE: .github/workflows/dns-preview.yml
================================================
---
name: Test/Preview DNS changes
on:
pull_request:
paths:
- "dns/**"
permissions: {}
jobs:
dnscontrol:
# only run for local branches
if: github.event.pull_request.head.repo.full_name == github.repository
runs-on: ubuntu-latest
strategy:
fail-fast: false
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- uses: cachix/install-nix-action@8aa03977d8d733052d78f4e008a241fd1dbf36b3 # v31
- name: dnscontrol preview
env:
GANDI_TOKEN: "${{ secrets.GANDI_TOKEN }}" # Expires 2026-04-07
working-directory: ./dns/
run: |
nix run --inputs-from . nixpkgs#dnscontrol -- preview
================================================
FILE: .github/workflows/format-pr.yml
================================================
name: Format PR
on:
issue_comment:
types: [created]
workflow_dispatch:
inputs:
pr_number:
description: "PR number to format"
required: true
type: number
permissions:
contents: write
pull-requests: write
jobs:
format:
if: |
github.event.issue.pull_request &&
github.event.comment.body == '/format'
runs-on: ubuntu-latest
steps:
- name: Check if user has write access
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
with:
script: |
const permission = await github.rest.repos.getCollaboratorPermissionLevel({
owner: context.repo.owner,
repo: context.repo.repo,
username: context.payload.comment.user.login,
});
if (!['admin', 'write'].includes(permission.data.permission)) {
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: '❌ You need write access to run this command.'
});
core.setFailed('User lacks write permission');
}
- name: React to comment
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
with:
script: |
await github.rest.reactions.createForIssueComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: context.payload.comment.id,
content: 'rocket'
});
- name: Get PR branch
id: pr
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
with:
script: |
const pr = await github.rest.pulls.get({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: context.issue.number,
});
core.setOutput('head_ref', pr.data.head.ref);
core.setOutput('head_sha', pr.data.head.sha);
- name: Checkout PR
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
ref: ${{ steps.pr.outputs.head_ref }}
fetch-depth: 0
- name: Install Nix
uses: cachix/install-nix-action@8aa03977d8d733052d78f4e008a241fd1dbf36b3 # v31
- name: Setup Cachix
uses: cachix/cachix-action@1eb2ef646ac0255473d23a5907ad7b04ce94065c # v17
with:
name: nixos-infra-dev
authToken: "${{ secrets.CACHIX_AUTH_TOKEN }}"
- name: Run format and absorb
run: ./.github/scripts/format-and-absorb.sh
- name: Comment on success
if: success()
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: '✅ Successfully formatted and absorbed changes!'
});
- name: Comment on failure
if: failure()
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: '❌ Failed to format and absorb changes. Check the workflow logs for details.'
});
================================================
FILE: .github/workflows/zizmor.yml
================================================
name: GitHub Actions Security Analysis with zizmor 🌈
on:
push:
branches:
- main
paths:
- ".github/**"
- flake.lock
pull_request:
paths:
- ".github/**"
- flake.lock
permissions: {}
jobs:
zizmor:
name: Run zizmor against GitHub Action workflows
runs-on: ubuntu-latest
permissions:
security-events: write
steps:
- name: Clone repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
with:
persist-credentials: false
- name: Install nix
uses: cachix/install-nix-action@8aa03977d8d733052d78f4e008a241fd1dbf36b3 # v31
- name: Run zizmor 🌈
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
nix run --inputs-from . nixpkgs-unstable#zizmor -- \
--format sarif --pedantic . > results.sarif
- name: Upload SARIF file
uses: github/codeql-action/upload-sarif@68bde559dea0fdcac2102bfdf6230c5f70eb485e # v4
with:
sarif_file: results.sarif
category: zizmor
================================================
FILE: .gitignore
================================================
*~
# Terraform
.terraform*
# Direnv
.direnv
# Nix build outputs
result
# Colmena --keep-result roots directory
.gcroots
================================================
FILE: LICENSE
================================================
MIT License
Copyright (c) 2024 NixOS Foundation and contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: README.md
================================================
# The NixOS infrastructure configurations
This repository contains all the hardware configuration for the nixos project
infrastructure.
All the hosts are currently managed using NixOps. Some of the infrastructure is
managed using Terraform. There are still a lot of things configured manually.
## Docs
- [Resources inventory](docs/inventory.md)
## Team
There are two teams managing this repository. The responsibility of both teams
is to provide infrastructure for the Nix and NixOS community.
### [@NixOS/infra-build](https://github.com/orgs/NixOS/teams/infra-build)
This team has access to all the infrastructure, including the build
infrastructure. The members are a subset of the next team.
### [@NixOS/infra](https://github.com/orgs/NixOS/teams/infra)
First level responders. This team helps with the high-level infrastructure.
All the members should be watching this repository for changes.
## Regular catch up
We meet regularly over [Lasuite Meet](https://github.com/suitenumerique/meet) to
catch up and make decisions. Sometimes it helps to have dedicated focus and
higher communication bandwidth.
There is an open team meeting **every other Thursday at
[18:00 (Europe/Zurich)](https://dateful.com/convert/zurich?t=18)**. See the
[google calendar](https://calendar.google.com/calendar/u/0/embed?src=b9o52fobqjak8oq8lfkhg3t0qg@group.calendar.google.com)
(search for "NixOS Infra") to see the next date.
- Location: <https://meet.cccda.de/nix-osin-fra>
- Meeting notes: <https://pad.lassul.us/nixos-infra>
## Reporting issues
If you experience any issues with the infrastructure, please
[post a new issue to this repository][1].
[1]: https://github.com/NixOS/infra/issues/new
================================================
FILE: build/.envrc
================================================
# shellcheck shell=bash
use flake .#build
================================================
FILE: build/colmena.nix
================================================
# heavily adapted from https://github.com/juspay/colmena-flake
# Original license: GNU Affero General Public License v3.0
{
config,
lib,
self,
inputs,
...
}:
{
options.colmena = {
hosts = lib.mkOption {
type = lib.types.attrsOf (
lib.types.submodule (
{ name, ... }:
{
options = {
targetHost = lib.mkOption {
type = lib.types.str;
default = "${name}.nixos.org";
description = ''
The target host for colmena nodes
'';
};
targetUser = lib.mkOption {
type = lib.types.str;
default = "root";
description = ''
The target user for colmena nodes
'';
};
};
}
)
);
description = ''
Deployment configuration for colmena nodes
'';
example = {
node1 = {
targetHost = "node1.nixos.org";
targetUser = "foo";
};
};
};
system = lib.mkOption {
type = lib.types.str;
description = ''
The system for colmena nodes
'';
default = "x86_64-linux";
};
};
config.flake.colmenaHive = inputs.colmena.lib.makeHive self.outputs.colmena;
config.flake.colmena = {
meta = {
nixpkgs = inputs.nixpkgs.legacyPackages.${config.colmena.system};
# https://github.com/zhaofengli/colmena/issues/60#issuecomment-1510496861
nodeSpecialArgs = builtins.mapAttrs (_: value: value._module.specialArgs) self.nixosConfigurations;
};
}
// builtins.mapAttrs (name: _: {
imports = (self.nixosConfigurations.${name})._module.args.modules ++ [
{
deployment = config.colmena.hosts.${name};
}
];
}) config.colmena.hosts;
}
================================================
FILE: build/colmena.sh
================================================
#!/usr/bin/env bash
set -euo pipefail
cd "$(dirname "$0")"
colmena apply "$@"
================================================
FILE: build/common.nix
================================================
{
pkgs,
lib,
...
}:
{
imports = [
../modules/common.nix
../modules/nftables.nix
../modules/prometheus
../modules/rasdaemon.nix
];
nixpkgs.config.allowUnfree = true;
hardware.enableAllFirmware = true;
hardware.cpu.amd.updateMicrocode = true;
hardware.cpu.intel.updateMicrocode = true;
boot.kernel.sysctl = {
# reboot on kernel panic
"kernel.panic" = 60;
"kernel.panic_on_oops" = 1;
};
documentation.nixos.enable = false;
environment = {
enableDebugInfo = true;
systemPackages = with pkgs; [
# debugging
gdb
lsof
sqlite-interactive
# editors
helix
neovim
# utilities
ripgrep
fd
# system introspection
dmidecode
hdparm
htop
iotop
lm_sensors
nvme-cli
powerstat
smartmontools
sysstat
tcpdump
tmux
];
};
services.openssh = {
enable = true;
authorizedKeysFiles = lib.mkForce [ "/etc/ssh/authorized_keys.d/%u" ];
};
nix.extraOptions = ''
allowed-impure-host-deps = /etc/protocols /etc/services /etc/nsswitch.conf
allowed-uris = https://github.com/ https://git.savannah.gnu.org/ github: https://releases.nixos.org/
'';
# we use networkd
networking.useDHCP = false;
services.resolved = {
enable = true;
fallbackDns = [
# https://docs.hetzner.com/de/dns-console/dns/general/recursive-name-servers/
"185.12.64.1"
"185.12.64.2"
"2a01:4ff:ff00::add:1"
"2a01:4ff:ff00::add:2"
];
};
security.acme = {
acceptTerms = true;
defaults.email = "infra@nixos.org";
};
services.zfs.autoScrub.enable = true;
}
================================================
FILE: build/datadog/hydra.nix
================================================
{ pkgs, ... }:
{
systemd.services.dd-agent.environment.PYTHONPATH =
"${pkgs.pythonPackages.requests}/lib/python2.7/site-packages";
environment.etc =
let
hydra-config = pkgs.writeText "hydra.yaml" ''
init_config:
instances:
- check: 1
'';
in
[
{
source = hydra-config;
target = "dd-agent/conf.d/hydra.yaml";
}
{
source = ./hydra.py;
target = "dd-agent/checks.d/hydra.py";
}
];
}
================================================
FILE: build/datadog/hydra.py
================================================
import json
import requests
import checks
class HydraCheck(checks.AgentCheck):
def check(self, instance) -> None:
r = requests.get(
"http://localhost:3000/status", headers={"Content-Type": "application/json"}
)
self.gauge("hydra.active_buildsteps", len(json.loads(r.text)))
================================================
FILE: build/flake-module.nix
================================================
{
inputs,
lib,
...
}:
let
flakesModule = {
imports = [
inputs.agenix.nixosModules.age
inputs.disko.nixosModules.disko
];
nixpkgs.overlays = [
inputs.rfc39.overlays.default
];
};
in
{
imports = [
./colmena.nix
];
colmena.hosts = {
haumea = { };
pluto = { };
mimas = { };
titan = { };
};
flake = {
nixosConfigurations.haumea = lib.nixosSystem {
system = "x86_64-linux";
specialArgs = { inherit inputs; };
modules = [
flakesModule
./haumea
];
};
nixosConfigurations.pluto = lib.nixosSystem {
system = "x86_64-linux";
specialArgs = { inherit inputs; };
modules = [
flakesModule
./pluto
];
};
nixosConfigurations.mimas = lib.nixosSystem {
system = "x86_64-linux";
specialArgs = { inherit inputs; };
modules = [
flakesModule
./mimas
];
};
nixosConfigurations.titan = lib.nixosSystem {
system = "x86_64-linux";
specialArgs = { inherit inputs; };
modules = [
flakesModule
./titan
];
};
};
perSystem =
{ pkgs, inputs', ... }:
{
devShells.build = pkgs.mkShell {
buildInputs = [
inputs'.agenix.packages.agenix
inputs'.colmena.packages.colmena
];
};
};
}
================================================
FILE: build/haumea/boot.nix
================================================
{
boot.loader.grub = {
devices = [
"/dev/nvme0n1"
"/dev/nvme1n1"
];
copyKernels = true;
configurationLimit = 5; # 230 MB /boot capacity
};
boot.initrd.availableKernelModules = [
"ahci"
"nvme"
"usbhid"
];
boot.kernelModules = [ "kvm-amd" ];
}
================================================
FILE: build/haumea/default.nix
================================================
{
lib,
modulesPath,
pkgs,
...
}:
{
imports = [
"${modulesPath}/installer/scan/not-detected.nix"
../common.nix
./boot.nix
./network.nix
./postgresql.nix
];
networking = {
hostId = "83c81a23";
hostName = "haumea";
domain = "nixos.org";
};
environment.systemPackages = [ pkgs.lz4 ];
fileSystems."/" = {
device = "rpool/safe/root";
fsType = "zfs";
};
fileSystems."/boot" = {
device = "/dev/disk/by-label/boot0";
fsType = "ext4";
};
fileSystems."/nix" = {
device = "rpool/local/nix";
fsType = "zfs";
};
fileSystems."/var/db/postgresql" = {
device = "rpool/safe/postgres";
fsType = "zfs";
};
services.zfs.autoScrub.enable = true;
nix.settings.max-jobs = lib.mkDefault 16;
powerManagement.cpuFreqGovernor = lib.mkDefault "ondemand";
system.stateVersion = "14.12";
users.users.root.openssh.authorizedKeys.keys =
with (import ../../ssh-keys.nix);
infra # maybe this isn't needed to add (again)?
++ [
brianmcgee # experiments with the old Hydra's DB
];
}
================================================
FILE: build/haumea/network.nix
================================================
{
systemd.network = {
enable = true;
networks = {
"30-enp35s0" = {
matchConfig = {
MACAddress = "a8:a1:59:04:71:f5";
Type = "ether";
};
address = [
"46.4.89.205/27"
"2a01:4f8:212:41c9::1/64"
];
routes = [
{ Gateway = "46.4.89.193"; }
{ Gateway = "fe80::1"; }
];
vlan = [
"vlan4000"
];
networkConfig.Description = "WAN";
linkConfig.RequiredForOnline = true;
};
};
};
}
================================================
FILE: build/haumea/postgresql.nix
================================================
{
config,
pkgs,
...
}:
{
services.prometheus.exporters.postgres = {
enable = true;
dataSourceName = "user=root database=hydra host=/run/postgresql sslmode=disable";
openFirewall = true;
firewallRules = ''
ip6 saddr $prometheus_inet6 tcp dport ${toString config.services.prometheus.exporters.postgres.port} accept
ip saddr $prometheus_inet4 tcp dport ${toString config.services.prometheus.exporters.postgres.port} accept
'';
};
services.postgresql = {
enable = true;
enableJIT = true;
package = pkgs.postgresql_16;
dataDir = "/var/db/postgresql/16";
# https://pgtune.leopard.in.ua/#/
settings = {
# https://vadosware.io/post/everything-ive-seen-on-optimizing-postgres-on-zfs-on-linux/#zfs-related-tunables-on-the-postgres-side
full_page_writes = "off";
checkpoint_completion_target = "0.9";
default_statistics_target = 100;
log_duration = "off";
log_statement = "none";
# pgbadger-compatible logging
log_transaction_sample_rate = 0.01;
log_min_duration_statement = 5000;
log_checkpoints = "on";
log_connections = "on";
log_disconnections = "on";
log_lock_waits = "on";
log_temp_files = 0;
log_autovacuum_min_duration = 0;
log_line_prefix = "user=%u,db=%d,app=%a,client=%h ";
max_connections = 500;
work_mem = "20MB";
maintenance_work_mem = "2GB";
# 25% of memory
shared_buffers = "16GB";
# Checkpoint every 1GB. (default)
# increased after seeing many warninsg about frequent checkpoints
min_wal_size = "1GB";
max_wal_size = "2GB";
wal_buffers = "16MB";
max_worker_processes = 16;
max_parallel_workers_per_gather = 8;
max_parallel_workers = 16;
# NVMe related performance tuning
effective_io_concurrency = 200;
random_page_cost = "1.1";
# We can risk losing some transactions.
synchronous_commit = "off";
effective_cache_size = "16GB";
# Enable JIT compilation if possible.
jit = "on";
# autovacuum and autoanalyze much more frequently:
# at these values vacuum should run approximately
# every 2 mass rebuilds, or a couple times a day
# on the builds table. Some of those queries really
# benefit from frequent vacuums, so this should
# help. In particular, I'm thinking the jobsets
# pages.
autovacuum_vacuum_scale_factor = 0.02;
autovacuum_analyze_scale_factor = 0.01;
shared_preload_libraries = "pg_stat_statements";
compute_query_id = "on";
};
# FIXME: don't use 'trust'.
authentication = ''
host hydra all 10.0.40.0/32 trust
local all root peer map=prometheus
'';
identMap = ''
prometheus root root
prometheus postgres-exporter root
'';
};
}
================================================
FILE: build/haumea/zrepl.yml
================================================
# root@zh4461b.rsync.net:/usr/local/etc/zrepl/zrepl.yml
# zrepl main configuration file.
# For documentation, refer to https://zrepl.github.io/
#
global:
logging:
- type: "stdout"
level: "error"
format: "human"
- type: "syslog"
level: "info"
format: "logfmt"
# mostly from https://blog.lenny.ninja/zrepl-on-rsync-net.html
jobs:
- name: sink
type: sink
serve:
type: stdinserver
client_identities: [haumea]
recv:
placeholder:
encryption: off
root_fs: "data1"
================================================
FILE: build/hydra-proxy.nix
================================================
{
config,
pkgs,
...
}:
{
networking.firewall.allowedTCPPorts = [
80
443
];
services.anubis.instances."hydra-server" = {
settings = {
TARGET = "http://127.0.0.1:3000";
BIND = ":3001";
BIND_NETWORK = "tcp";
METRICS_BIND = ":9001";
METRICS_BIND_NETWORK = "tcp";
};
};
networking.firewall.extraInputRules = ''
ip6 saddr $prometheus_inet6 tcp dport 9001 accept
ip saddr $prometheus_inet4 tcp dport 9001 accept
'';
services.nginx = {
enable = true;
enableReload = true;
recommendedBrotliSettings = true;
recommendedGzipSettings = true;
recommendedOptimisation = true;
recommendedProxySettings = true;
recommendedTlsSettings = true;
proxyTimeout = "900s";
appendConfig = ''
worker_processes auto;
'';
appendHttpConfig = ''
map $request_uri $backend {
default anubis;
# downloads (e.g. distrobuilder for lxc/incus images)
~^/build/\d+/download/ hydra-server;
~^/build/\d+/download-by-type/ hydra-server;
~^/job/[^/]+/[^/]+/[^/]+/latest/download/ hydra-server;
~^/job/[^/]+/[^/]+/[^/]+/latest/download-by-type/file/ hydra-server;
}
limit_req_zone $binary_remote_addr zone=hydra-server:8m rate=2r/s;
limit_req_status 429;
'';
eventsConfig = ''
worker_connections 1024;
'';
upstreams = {
anubis.servers."127.0.0.1:3001" = { };
hydra-server.servers."127.0.0.1:3000" = { };
};
virtualHosts."hydra.nixos.org" = {
forceSSL = true;
enableACME = true;
extraConfig = ''
error_page 403 /403.html;
error_page 502 /502.html;
error_page 503 /503.html;
location ~ /(403|502|503).html {
root ${./nginx-error-pages};
internal;
}
'';
# Ask robots not to scrape hydra, it has various expensive endpoints
locations."=/robots.txt".alias = pkgs.writeText "hydra.nixos.org-robots.txt" ''
User-agent: *
Disallow: /
Allow: /$
'';
locations."~ ^/job/[^/]+/[^/]+/metrics/metric/" = {
proxyPass = "http://hydra-server";
};
locations."/" = {
proxyPass = "http://$backend";
extraConfig = ''
limit_req zone=hydra-server burst=7;
'';
};
locations."/static/" = {
alias = "${config.services.hydra-dev.package}/libexec/hydra/root/static/";
};
};
};
}
================================================
FILE: build/hydra.nix
================================================
{
config,
lib,
pkgs,
inputs,
...
}:
let
narCache = "/var/cache/hydra/nar-cache";
in
{
imports = [
inputs.hydra.nixosModules.hydra
];
# queue-runner and hydra-notify metrics
networking.firewall.extraInputRules = ''
ip6 saddr $prometheus_inet6 tcp dport { 9198, 9199 } accept
ip saddr $prometheus_inet4 tcp dport { 9198, 9199 } accept
'';
nix.package = config.services.hydra-dev.package.nix;
# garbage collection
nix.gc = {
automatic = true;
options = ''--max-freed "$((400 * 1024**3 - 1024 * $(df -P -k /nix/store | tail -n 1 | ${pkgs.gawk}/bin/awk '{ print $4 }')))"'';
dates = "03,09,15,21:15";
};
# gc outputs as well, since they are served from the cache
nix.settings.keep-outputs = lib.mkForce false;
systemd.services.hydra-prune-build-logs = {
description = "Clean up old build logs";
startAt = "weekly";
serviceConfig = {
User = "hydra-queue-runner";
Group = "hydra";
ExecStart = lib.concatStringsSep " " [
(lib.getExe pkgs.findutils)
"/var/lib/hydra/build-logs/"
"-ignore_readdir_race"
"-type"
"f"
"-mtime"
"+213" # days (~7 months, roughly one release cycle)
"-delete"
];
};
};
# Don't rate-limit the journal.
services.journald.rateLimitBurst = 0;
age.secrets.hydra-aws-credentials = {
file = ./secrets/hydra-aws-credentials.age;
path = "/var/lib/hydra/queue-runner/.aws/credentials";
owner = "hydra-queue-runner";
group = "hydra";
};
age.secrets.hydra-github-client-secret = {
file = ./secrets/hydra-github-client-secret.age;
owner = "hydra-www";
group = "hydra";
};
services.hydra-dev.enable = true;
services.hydra-dev.buildMachinesFiles = [ "/etc/nix/machines" ];
services.hydra-dev.dbi = "dbi:Pg:dbname=hydra;host=10.0.40.3;user=hydra;";
services.hydra-dev.logo = ./hydra-logo.png;
services.hydra-dev.hydraURL = "https://hydra.nixos.org";
services.hydra-dev.notificationSender = "edolstra@gmail.com";
services.hydra-dev.smtpHost = "localhost";
services.hydra-dev.useSubstitutes = false;
services.hydra-dev.extraConfig = ''
max_servers 30
enable_google_login = 1
google_client_id = 816926039128-ia4s4rsqrq998rsevce7i09mo6a4nffg.apps.googleusercontent.com
github_client_id = b022c64ce4531ffc1031
github_client_secret_file = ${config.age.secrets.hydra-github-client-secret.path}
store_uri = s3://nix-cache?secret-key=/var/lib/hydra/queue-runner/keys/cache.nixos.org-1/secret&write-nar-listing=1&ls-compression=br&log-compression=br&index-debug-info=true
server_store_uri = https://cache.nixos.org?local-nar-cache=${narCache}
binary_cache_public_uri = https://cache.nixos.org
<Plugin::Session>
cache_size = 32m
</Plugin::Session>
# patchelf:master:3
xxx-jobset-repeats = nixos:reproducibility:1
upload_logs_to_binary_cache = true
compress_build_logs = false # conflicts with upload_logs_to_binary_cache
log_prefix = https://cache.nixos.org/
evaluator_workers = 16
evaluator_max_memory_size = 8192
max_concurrent_evals = 1
# increase the number of active compress slots (CPU is 48*2 on mimas)
max_local_worker_threads = 144
max_unsupported_time = 86400
allow_import_from_derivation = false
max_output_size = 4294967295 # 4 GiB - 1 B
max_db_connections = 350
queue_runner_metrics_address = [::]:9198
<hydra_notify>
<prometheus>
listen_address = 0.0.0.0
port = 9199
</prometheus>
</hydra_notify>
'';
systemd.tmpfiles.rules = [
"d /var/cache/hydra 0755 hydra hydra - -"
"d ${narCache} 0775 hydra hydra 1d -"
];
# wait for the network before starting hydra, since we require a network
# connection to the remote postgresql database
systemd.services.hydra-init = {
wants = [
"network-online.target"
];
after = [
"network-online.target"
];
};
# eats memory as if it was free
systemd.services.hydra-notify.enable = false;
systemd.services.hydra-queue-runner = {
# restarting the scheduler is very expensive
restartIfChanged = false;
serviceConfig = {
ManagedOOMPreference = "avoid";
LimitNOFILE = 65535;
};
};
programs.ssh.hostKeyAlgorithms = [
"rsa-sha2-512-cert-v01@openssh.com"
"ssh-ed25519"
"ssh-rsa"
"ecdsa-sha2-nistp256"
];
programs.ssh.extraConfig = lib.mkAfter ''
ServerAliveInterval 120
TCPKeepAlive yes
'';
# These IPs and SSH public keys are specifically provisioned for Hydra
services.openssh.knownHosts = {
# x86_64-linux at Hetzner
"elated-minsky.builder.nixos.org".publicKey =
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIvrJpd3aynfPVGGG/s7MtRFz/S6M4dtqvqKI3Da7O7+";
"sleepy-brown.builder.nixos.org".publicKey =
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOh4/3m7o6H3J5QG711aJdlSUVvlC8yW6KoqAES3Fy6I";
# aarch64-linux at Hetzner
"goofy-hopcroft.builder.nixos.org".publicKey =
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICTJEi+nQNd7hzNYN3cLBK/0JCkmwmyC1I+b5nMI7+dd";
"hopeful-rivest.builder.nixos.org".publicKey =
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBgjwpQaNAWdEdnk1YG7JWThM4xQdKNJ3h3arhF7+iFm";
# M1 Macs at Hetzner
"intense-heron.mac.nixos.org".publicKey =
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICeSgOe/cr1yVAJOl30t3AZOLtvzeQa5rnrHGceKeBue";
"sweeping-filly.mac.nixos.org".publicKey =
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE6b/coXQEcFZW1eG4zFyCMCF0mZFahqmadz6Gk9DWMF";
"maximum-snail.mac.nixos.org".publicKey =
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEs+fK4hH8UKo+Pa7u1VYltkMufBHHH5uC93RQ2S6Xy9";
"growing-jennet.mac.nixos.org".publicKey =
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAQGthkSSOnhxrIUCMlRQz8FOo5Y5Nk9f9WnVLNeRJpm";
"enormous-catfish.mac.nixos.org".publicKey =
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMlg7NXxeG5L3s0YqSQIsqVG0MTyvyWDHUyYEfFPazLe";
# M1 Macs at Flying Circus
"norwegian-blue.mac.nixos.org".publicKey =
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDQ6Cjvoq5VBYfXl6ZV/ijQ1q4UxbWRYYfkXe0rzmJjf";
# M2 Macs at Oakhost
"kind-lumiere.mac.nixos.org".publicKey =
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFoqn1AAcOqtG65milpBtWVXP5VcBmTUSMGNfJzPwW8Q";
"eager-heisenberg.mac.nixos.org".publicKey =
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBp9NStfEPu7HdeK8f2KEnynyirjG9BUk+6w2SgJtQyS";
# vcunat
"t2a.cunat.cz".publicKey =
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIu3itg4hn5e4KrnyoreAUN3RIbAcvqc7yWx5i6EWqAu";
"t4b.cunat.cz".publicKey =
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIC/jE8c0lkc/DlK3R7A+zBr6j/lfEQrhqSD/YOEVs8za";
};
}
================================================
FILE: build/id_buildfarm.pub
================================================
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCyM48VC5fpjJssLI8uolFscP4/iEoMHfkPoT9R3iE3OEjadmwa1XCAiXUoa7HSshw79SgPKF2KbGBPEVCascdAcErZKGHeHUzxj7v3IsNjObouUOBbJfpN4DR7RQT28PZRsh3TvTWjWnA9vIrSY/BvAK1uezFRuObvatqAPMrw4c0DK+JuGuCNkKDGHLXNSxYBc5Pmr1oSU7/BDiHVjjyLIsAMIc20+q8SjWswKqL1mY193mN7FpUMBtZrd0Za9fMFRII9AofEIDTOayvOZM6+/1dwRWZXM6jhE6kaPPF++yromHvDPBnd6FfwODKLvSF9BkA3pO5CqrD8zs7ETmrV hydra-queue-runner@chef
================================================
FILE: build/mimas/boot.nix
================================================
{
boot = {
initrd.availableKernelModules = [
"ahci"
"xhci_pci"
"nvme"
"usbhid"
];
supportedFilesystems.zfs = true;
loader = {
efi.canTouchEfiVariables = false;
grub = {
enable = true;
configurationLimit = 10;
efiSupport = true;
efiInstallAsRemovable = true;
mirroredBoots = [
{
devices = [ "nodev" ];
path = "/efi/a";
}
{
devices = [ "nodev" ];
path = "/efi/b";
}
];
};
};
};
}
================================================
FILE: build/mimas/default.nix
================================================
{
imports = [
../common.nix
../hydra.nix
../hydra-proxy.nix
./boot.nix
./firewall.nix
./network.nix
];
disko.devices = import ./disko.nix;
networking = {
hostName = "mimas";
domain = "nixos.org";
hostId = "aba92093";
};
zramSwap = {
enable = true;
memoryPercent = 50;
};
nixpkgs.hostPlatform = "x86_64-linux";
system.stateVersion = "24.11";
}
================================================
FILE: build/mimas/disko.nix
================================================
let
layout = id: {
type = "gpt";
partitions = {
esp = {
type = "EF00";
size = "512M";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/efi/${id}";
};
};
zfs = {
size = "100%";
content = {
type = "zfs";
pool = "zroot";
};
};
};
};
in
{
disk = {
nvme0n1 = {
type = "disk";
device = "/dev/disk/by-id/nvme-SAMSUNG_MZQL21T9HCJR-00A07_S64GNNFX604905";
content = layout "a";
};
nvme1n1 = {
type = "disk";
device = "/dev/disk/by-id/nvme-SAMSUNG_MZQL21T9HCJR-00A07_S64GNNFX604919";
content = layout "b";
};
};
zpool.zroot = {
type = "zpool";
mode = "mirror";
options.ashift = "12";
rootFsOptions = {
acltype = "posixacl";
atime = "off";
compression = "on";
mountpoint = "none";
xattr = "sa";
};
datasets = {
"root" = {
type = "zfs_fs";
mountpoint = "/";
};
"nix/store" = {
type = "zfs_fs";
mountpoint = "/nix";
};
"nix/db" = {
type = "zfs_fs";
mountpoint = "/nix/var/nix/db";
};
"hydra/cache" = {
type = "zfs_fs";
mountpoint = "/var/cache/hydra";
};
"hydra/state" = {
type = "zfs_fs";
mountpoint = "/var/lib/hydra";
};
"reserved" = {
type = "zfs_fs";
options = {
canmount = "off";
refreservation = "16G"; # roughly one system closure
};
};
};
};
}
================================================
FILE: build/mimas/firewall.nix
================================================
{
pkgs,
lib,
inputs,
...
}:
let
blockedAutNums = [
45102 # ALIBABA-CN-NET
45899 # VNPT-AS-VN
132203 # TENCENT-NET-AP-CN
];
in
{
networking.nftables = {
tables."abuse" = {
family = "inet";
content = ''
set ipv4blocks {
type ipv4_addr;
flags interval;
auto-merge;
}
set ipv6blocks {
type ipv6_addr;
auto-merge;
flags interval;
}
chain input-abuse {
type filter hook input priority filter - 5;
ip saddr @ipv4blocks tcp dport 443 counter drop;
ip6 saddr @ipv6blocks tcp dport 443 counter drop;
}
'';
};
};
systemd.services.nft-prefix-import = {
wants = [ "network-online.target" ];
after = [ "network-online.target" ];
wantedBy = [ "multi-user.target" ];
path = with pkgs; [ nftables ];
environment.USER_AGENT = "NixOS.org Infrastructure - infra@nixos.org";
serviceConfig = {
Type = "oneshot";
AmbientCapabilities = [ "CAP_NET_ADMIN" ];
DynamicUser = true;
User = "nft-asblock";
Group = "nft-asblock";
ExecStart = toString (
[
(lib.getExe inputs.nft-prefix-import.packages.${pkgs.stdenv.hostPlatform.system}.default)
"--table"
"abuse"
"--ipv4set"
"ipv4blocks"
"--ipv6set"
"ipv6blocks"
]
++ blockedAutNums
);
RestrictAddressFamilies = [
"AF_NETLINK"
"AF_INET"
"AF_INET6"
];
StateDirectory = "nft-prefix-import";
WorkingDirectory = "/var/lib/nft-prefix-import";
};
};
systemd.timers.nft-prefix-import = {
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = "0/6:00";
RandomizedDelaySec = 3600;
};
};
}
================================================
FILE: build/mimas/network.nix
================================================
{
networking.useDHCP = false;
systemd.network = {
enable = true;
netdevs = {
"20-vlan4000" = {
netdevConfig = {
Kind = "vlan";
Name = "vlan4000";
};
vlanConfig.Id = 4000;
};
};
networks = {
"30-enp5s0" = {
matchConfig = {
MACAddress = "9c:6b:00:70:d1:f8";
Type = "ether";
};
linkConfig.RequiredForOnline = true;
networkConfig.Description = "WAN";
address = [
"157.90.104.34/26"
"2a01:4f8:2220:11c8::1/64"
];
routes = [
{ Gateway = "157.90.104.1"; }
{ Gateway = "fe80::1"; }
];
vlan = [
"vlan4000"
];
};
"30-vlan4000" = {
matchConfig.Name = "vlan4000";
linkConfig = {
MTUBytes = "1400";
RequiredForOnline = "routable";
};
address = [
"10.0.40.2/31"
];
};
};
};
}
================================================
FILE: build/nginx-error-pages/403.html
================================================
<!DOCTYPE html>
<html lang="en">
<head>
<title>Error 403 - hydra.nixos.org</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta http-equiv="X-UA-Compatible" content="IE=Edge" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<link
rel="stylesheet"
href="https://nixos.org/bootstrap/css/bootstrap.min.css"
/>
<link
rel="stylesheet"
href="https://nixos.org/bootstrap/css/bootstrap-responsive.min.css"
/>
<style>
body {
padding-top: 0;
margin-top: 4em;
margin-bottom: 4em;
}
body > div {
max-width: 800px;
text-align: center;
}
h1 {
margin: 0 auto;
text-align: center;
}
p {
text-align: center;
}
ul {
display: inline-block;
text-align: left;
}
</style>
</head>
<body>
<div class="container jumbotron">
<div class="jumbotron">
<p class="lead">
<a href="https://nixos.org/nixos">
<img
src="https://brand.nixos.org/logos/nixos-logo-default-gradient-black-regular-horizontal-minimal.svg"
width="500px"
alt="logo"
/>
</a>
</p>
<h1>HTTP Error 403</h1>
<p class="lead">Access to this resource has been denied!</p>
<p>
This could be caused by one of the following issues:
</p>
<ul>
<li>You are using an extension to spoof your user-agent</li>
<li>The browser you are running is out of date</li>
</ul>
<p>
Feel free to reach out, if you think this request was denied in error.
</p>
</div>
<hr>
<div class="help">
<p>
You can check the following resources for further informations:<br>
<a href="https://prometheus.nixos.org/alerts">Alerts</a> |
<a href="https://grafana.nixos.org/">Dashboards</a> |
<a href="https://github.com/NixOS/infra/issues">Issues</a> |
<a href="https://matrix.to/#/#infra:nixos.org">Chatroom</a>
</p>
</div>
</div>
</body>
</html>
================================================
FILE: build/nginx-error-pages/502.html
================================================
<!DOCTYPE html>
<html lang="en">
<head>
<title>Error 502 - hydra.nixos.org</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta http-equiv="X-UA-Compatible" content="IE=Edge" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<link
rel="stylesheet"
href="https://nixos.org/bootstrap/css/bootstrap.min.css"
/>
<link
rel="stylesheet"
href="https://nixos.org/bootstrap/css/bootstrap-responsive.min.css"
/>
<style>
body {
padding-top: 0;
margin-top: 4em;
margin-bottom: 4em;
}
body > div {
max-width: 800px;
}
h1 {
margin: 0 auto;
text-align: center;
}
p {
text-align: center;
}
</style>
</head>
<body>
<div class="container jumbotron">
<div class="jumbotron">
<p class="lead">
<a href="https://nixos.org/nixos">
<img
src="https://brand.nixos.org/logos/nixos-logo-default-gradient-black-regular-horizontal-minimal.svg"
width="500px"
alt="logo"
/>
</a>
</p>
<h1>HTTP Error 502</h1>
<p class="lead">This service is currently unavailable!</p>
</div>
<hr>
<div class="help">
<p>
You can check the following resources for further informations:<br>
<a href="https://prometheus.nixos.org/alerts">Alerts</a> |
<a href="https://grafana.nixos.org/">Dashboards</a> |
<a href="https://github.com/NixOS/infra/issues">Issues</a> |
<a href="https://matrix.to/#/#infra:nixos.org">Chatroom</a>
</p>
</div>
</div>
</body>
</html>
================================================
FILE: build/nginx-error-pages/503.html
================================================
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>Hydra is down</title>
<style type="text/css" media="screen">
body {
font-family: Helvetica, Arial, sans-serif;
color: rgba(0, 0, 0, 0.7);
}
</style>
</head>
<body>
<center>
<img src="/apache-errors/warning.png" alt="Warning" />
<p>Looks like Hydra is having some problems. Sorry about that!</p>
<p style="font-size: 90%">
<a href="https://nixos.org/">NixOS Homepage</a> |
<a href="https://monitoring.nixos.org/prometheus/alerts"
>System Alerts</a> |
<a href="https://monitoring.nixos.org/grafana/">Dashboards</a> |
<a href="https://github.com/NixOS/nixpkgs/labels/infrastructure"
>Related Issues</a>
</p>
</center>
</body>
</html>
================================================
FILE: build/pluto/boot.nix
================================================
{
boot = {
supportedFilesystems = [ "zfs" ];
loader = {
efi.canTouchEfiVariables = false;
grub = {
enable = true;
efiSupport = true;
efiInstallAsRemovable = true;
mirroredBoots = [
{
devices = [ "nodev" ];
path = "/efi/a";
}
{
devices = [ "nodev" ];
path = "/efi/b";
}
];
};
};
};
}
================================================
FILE: build/pluto/default.nix
================================================
{ config, ... }:
{
imports = [
../common.nix
./boot.nix
./disko.nix
./network.nix
./grafana.nix
./nginx.nix
./nixos-metrics.nix
./prometheus
../../modules/hydra-mirror.nix
../../modules/rfc39.nix
../../modules/tarball-mirror.nix
];
networking = {
hostName = "pluto";
domain = "nixos.org";
hostId = "e4c9bd10";
};
age.secrets.pluto-backup-ssh-key.file = ../secrets/pluto-backup-ssh-key.age;
age.secrets.pluto-backup-secret.file = ../secrets/pluto-backup-secret.age;
services.backup = {
user = "u391032-sub2";
host = "u391032.your-storagebox.de";
hostPublicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIICf9svRenC/PLKIL9nk6K/pxQgoiFC41wTNvoIncOxs";
port = 23;
sshKey = config.age.secrets.pluto-backup-ssh-key.path;
secretPath = config.age.secrets.pluto-backup-secret.path;
};
nixpkgs.hostPlatform = "x86_64-linux";
system.stateVersion = "23.11";
}
================================================
FILE: build/pluto/disko.nix
================================================
{
disko.devices = {
disk = {
nvme0n1 = {
type = "disk";
device = "/dev/disk/by-id/nvme-SAMSUNG_MZVL2512HDJD-00B07_S782NE0W900172";
content = {
type = "gpt";
partitions = {
esp = {
size = "1G";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/efi/a";
};
};
swap = {
size = "16G";
content = {
type = "swap";
};
};
zfs = {
size = "100%";
content = {
type = "zfs";
pool = "zroot";
};
};
};
};
};
nvme1n1 = {
type = "disk";
device = "/dev/disk/by-id/nvme-SAMSUNG_MZVL2512HDJD-00B07_S782NF0YA37531";
content = {
type = "gpt";
partitions = {
esp = {
size = "1G";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/efi/b";
};
};
swap = {
size = "16G";
content = {
type = "swap";
};
};
zfs = {
size = "100%";
content = {
type = "zfs";
pool = "zroot";
};
};
};
};
};
};
zpool = {
zroot = {
type = "zpool";
options = {
ashift = "12";
autotrim = "on";
};
mode = "mirror";
rootFsOptions = {
acltype = "posixacl";
compression = "zstd";
mountpoint = "none";
};
datasets = {
root = {
type = "zfs_fs";
mountpoint = "/";
};
"root/prometheus" = {
type = "zfs_fs";
mountpoint = "/var/lib/prometheus2";
};
"root/victoriametrics" = {
type = "zfs_fs";
mountpoint = "/var/lib/victoriametrics";
};
};
};
};
};
}
================================================
FILE: build/pluto/grafana.nix
================================================
{
config,
...
}:
{
services.backup.includes = [ "/var/lib/grafana" ];
age.secrets."grafana-secret-key" = {
file = ../secrets/grafana-secret-key.age;
owner = "grafana";
};
services.grafana = {
enable = true;
settings = {
"auth.anonymous".enabled = true;
users = {
allow_sign_up = true;
viewers_can_edit = true;
};
server = {
domain = "grafana.nixos.org";
root_url = "https://grafana.nixos.org";
protocol = "socket";
};
security.secret_key = "$__file{${config.age.secrets.grafana-secret-key.path}}";
};
};
systemd.services.nginx.serviceConfig.SupplementaryGroups = [ "grafana" ];
}
================================================
FILE: build/pluto/network.nix
================================================
{
systemd.network = {
enable = true;
networks = {
"30-enp5s0" = {
matchConfig = {
MACAddress = "c8:7f:54:67:bd:31";
Type = "ether";
};
linkConfig.RequiredForOnline = true;
networkConfig.Description = "WAN";
address = [
"37.27.99.100/26"
"2a01:4f9:3070:15e0::1/64"
];
routes = [
{ Gateway = "37.27.99.65"; }
{ Gateway = "fe80::1"; }
];
};
};
};
}
================================================
FILE: build/pluto/nginx.nix
================================================
{ config, ... }:
{
networking.firewall.allowedTCPPorts = [
80
443
];
services.nginx = {
enable = true;
recommendedProxySettings = true;
eventsConfig = ''
worker_connections 4096;
'';
virtualHosts."monitoring.nixos.org" = {
enableACME = true;
forceSSL = true;
default = true;
locations."/".return = "302 https://status.nixos.org";
locations."~ ^/prometheus/?(?<action>[^\\s]+)" = {
return = "301 https://prometheus.nixos.org/$action$is_args$args";
# TODO: Remove after https://github.com/NixOS/nixos-status/pull/21
extraConfig = ''
add_header Access-Control-Allow-Origin "*" always;
'';
};
locations."~ ^/grafana/?(?<action>[^\\s]+)".return =
"301 https://grafana.nixos.org/$action$is_args$args";
};
virtualHosts."prometheus.nixos.org" = {
enableACME = true;
forceSSL = true;
locations."/" = {
proxyPass = "http://${config.services.prometheus.listenAddress}:${toString config.services.prometheus.port}";
};
};
virtualHosts."grafana.nixos.org" = {
enableACME = true;
forceSSL = true;
locations."/" = {
proxyPass = "http://unix:${config.services.grafana.settings.server.socket}";
proxyWebsockets = true;
};
};
};
}
================================================
FILE: build/pluto/nixos-metrics.nix
================================================
{ config, pkgs, ... }:
{
systemd.services.pull-nixos-metrics = {
description = "Pull nixos metrics from github:NixOS/nixos-metrics and push to local VictoriaMetrics";
script =
let
inherit (config.services.victoriametrics) listenAddress;
importURL = "http://localhost${listenAddress}/api/v1/import";
resetURL = "http://localhost${listenAddress}/internal/resetRollupResultCache";
dataURL = "https://raw.githubusercontent.com/NixOS/nixos-metrics/data/victoriametrics.jsonl";
curl = "${pkgs.curl}/bin/curl";
in
''
${curl} ${dataURL} | ${curl} -X POST --data-binary @- ${importURL}
${curl} -G ${resetURL}
'';
serviceConfig = {
Type = "oneshot";
User = "nobody";
};
};
systemd.timers.pull-nixos-metrics = {
description = "Pull nixos metrics, timed for after they're done updating each day.";
wantedBy = [ "timers.target" ];
timerConfig.OnCalendar = "12:00:00";
};
services.backup.includesZfsDatasets = [ "/var/lib/victoriametrics" ];
services.victoriametrics = {
enable = true;
retentionPeriod = "1200w"; # 100 years
};
}
================================================
FILE: build/pluto/prometheus/alertmanager.nix
================================================
{ config, ... }:
{
services.prometheus = {
alertmanagers = [
{
scheme = "http";
static_configs = [
{ targets = [ "localhost:${toString config.services.prometheus.alertmanager.port}" ]; }
];
}
];
alertmanager = {
enable = true;
# Allow alertmanager to start even if it doesn't find an RFC1918 IP on
# the machine's network interfaces.
extraFlags = [ "--cluster.listen-address=''" ];
webExternalUrl = "http://alerts.nixos.org";
configuration = {
global = { };
route = {
receiver = "ignore";
group_wait = "30s";
group_interval = "5m";
repeat_interval = "24h";
group_by = [ "alertname" ];
routes = [
{
receiver = "go-neb";
group_wait = "30s";
match.severity = "warning";
}
];
};
receivers = [
{
# with no *_config, this will drop all alerts directed to it
name = "ignore";
}
{
name = "go-neb";
webhook_configs = [
{
url = "${config.services.go-neb.baseUrl}:4050/services/hooks/YWxlcnRtYW5hZ2VyX3NlcnZpY2U";
send_resolved = true;
}
];
}
];
};
};
};
services.nginx.virtualHosts."alerts.nixos.org" = {
enableACME = true;
forceSSL = true;
locations."/" = {
proxyPass = "http://localhost:9093";
};
};
age.secrets."alertmanager-oauth2-proxy-env".file = ../../secrets/alertmanager-oauth2-proxy-env.age;
services.oauth2-proxy = {
enable = true;
# oidc provider
provider = "github";
clientID = "Ov23liDt1q76okEJpVVE";
keyFile = config.age.secrets."alertmanager-oauth2-proxy-env".path;
# filter criteria
email.domains = [ "*" ];
github = {
org = "NixOS";
team = "infra";
};
# protected domains
nginx = {
domain = "alerts.nixos.org";
virtualHosts."alerts.nixos.org" = { };
};
};
age.secrets.alertmanager-matrix-forwarder = {
file = ../../secrets/alertmanager-matrix-forwarder.age;
owner = config.systemd.services.go-neb.serviceConfig.User;
};
# Create user so that we can set the ownership of the key to
# it. DynamicUser will not take full effect as a result of this.
users.users.go-neb = {
isSystemUser = true;
group = "go-neb";
};
users.groups.go-neb = { };
systemd.services.go-neb.serviceConfig.SupplementaryGroups = [ "keys" ];
nixpkgs.config.permittedInsecurePackages = [ "olm-3.2.16" ];
services.go-neb = {
enable = true;
bindAddress = "localhost:4050";
baseUrl = "http://localhost";
secretFile = config.age.secrets.alertmanager-matrix-forwarder.path;
config = {
clients = [
{
UserId = "@bot:nixos.org";
AccessToken = "$CHANGEME";
HomeServerUrl = "https://matrix.nixos.org";
Sync = true;
AutoJoinRooms = true;
DisplayName = "Bot";
}
];
services = [
{
ID = "alertmanager_service";
Type = "alertmanager";
UserId = "@bot:nixos.org";
Config = {
webhook_url = "http://localhost:4050/services/hooks/YWxlcnRtYW5hZ2VyX3NlcnZpY2U";
rooms = {
# infra-alerts:nixos.org
"!QLQqibtFaVtDgurUAE:nixos.org" = {
text_template = ''
{{range .Alerts -}} [{{ .Status }}] {{index .Labels "alertname" }}: {{index .Annotations "description"}} {{ end -}}
'';
# $$severity otherwise envsubst replaces $severity with an empty string
html_template = ''
{{range .Alerts -}}
{{ $$severity := index .Labels "severity" }}
{{ if eq .Status "firing" }}
{{ if eq $$severity "critical"}}
<font color='red'><b>[FIRING - CRITICAL]</b></font>
{{ else if eq $$severity "warning"}}
<font color='orange'><b>[FIRING - WARNING]</b></font>
{{ else }}
<b>[FIRING - {{ $$severity }}]</b>
{{ end }}
{{ else }}
<font color='green'><b>[RESOLVED]</b></font>
{{ end }}
{{ index .Labels "alertname"}}: {{ index .Annotations "summary"}}
(
{{ if .Annotations.grafana }}
<a href="{{ index .Annotations "grafana" }}">📈 Grafana</a>,
{{ end }}
<a href="{{ .GeneratorURL }}">🔥 Prometheus</a>,
<a href="{{ .SilenceURL }}">🔕 Silence</a>
)<br/>
{{end -}}'';
msg_type = "m.text"; # Must be either `m.text` or `m.notice`
};
};
};
}
];
};
};
}
================================================
FILE: build/pluto/prometheus/default.nix
================================================
{ pkgs, ... }:
{
imports = [
./alertmanager.nix
./exporters/anubis.nix
./exporters/blackbox.nix
./exporters/channel.nix
./exporters/domain.nix
./exporters/fastly.nix
./exporters/github.nix
./exporters/hydra.nix
./exporters/json.nix
./exporters/matrix-synapse.nix
./exporters/nixos.nix
./exporters/node.nix
./exporters/owncast.nix
./exporters/postgresql.nix
./exporters/rasdaemon.nix
./exporters/storagebox.nix
./exporters/sql.nix
./exporters/up.nix
./exporters/zfs.nix
./exporters/zrepl.nix
];
services.backup.includesZfsDatasets = [ "/var/lib/prometheus2" ];
services.prometheus = {
enable = true;
extraFlags = [
"--storage.tsdb.retention.time=${toString (720 * 24)}h"
"--web.external-url=https://prometheus.nixos.org/"
];
globalConfig.scrape_interval = "15s";
ruleFiles = [
(pkgs.writeText "up.rules" (
builtins.toJSON {
groups = [
{
name = "up";
rules = [
{
alert = "NotUp";
expr = ''
up == 0
'';
for = "10m";
labels.severity = "warning";
annotations.summary = "scrape job {{ $labels.job }} is failing on {{ $labels.instance }}";
}
];
}
];
}
))
];
};
}
================================================
FILE: build/pluto/prometheus/exporters/anubis.nix
================================================
{
services.prometheus = {
scrapeConfigs = [
{
job_name = "anubis";
static_configs = [
{
targets = [
"hydra.nixos.org:9001"
];
}
];
}
];
};
}
================================================
FILE: build/pluto/prometheus/exporters/blackbox.nix
================================================
{ config, pkgs, ... }:
let
mkStaticProbe =
{
module,
targets,
job_suffix ? "",
}:
{
job_name = "blackbox-${module}${job_suffix}";
metrics_path = "/probe";
params = {
module = [ module ];
};
static_configs = [ { inherit targets; } ];
relabel_configs = [
{
source_labels = [ "__address__" ];
target_label = "__param_target";
}
{
source_labels = [ "__param_target" ];
target_label = "instance";
}
{
target_label = "__address__";
replacement = "localhost:${toString config.services.prometheus.exporters.blackbox.port}";
}
];
};
mkDnsSdProbe = module: dns_sd_config: {
job_name = "blackbox-${module}";
metrics_path = "/probe";
params = {
module = [ module ];
};
dns_sd_configs = [
dns_sd_config
];
relabel_configs = [
{
source_labels = [ "__address__" ];
target_label = "__param_target";
}
{
source_labels = [ "__address__" ];
target_label = "host";
}
{
source_labels = [ "__meta_dns_name" ];
target_label = "instance";
}
{
target_label = "__address__";
replacement = "localhost:${toString config.services.prometheus.exporters.blackbox.port}";
}
];
};
in
{
services.prometheus = {
exporters.blackbox = {
enable = true;
listenAddress = "127.0.0.1";
configFile = pkgs.writeText "probes.yml" (
builtins.toJSON {
modules.https_success = {
prober = "http";
tcp.tls = true;
http.headers.User-Agent = "blackbox-exporter";
};
# From https://github.com/prometheus/blackbox_exporter/blob/53e78c2b3535ecedfd072327885eeba2e9e51ea2/example.yml#L120-L133
modules.smtp_starttls = {
prober = "tcp";
timeout = "10s";
tcp = {
query_response = [
{ expect = "^220"; }
{ send = "EHLO prober\r"; }
{ expect = "^250-STARTTLS"; }
{ send = "STARTTLS\r"; }
{ expect = "^220"; }
{ starttls = true; }
{ send = "EHLO prober\r"; }
{ expect = "^250-AUTH"; }
{ send = "QUIT\r"; }
];
};
};
}
);
};
scrapeConfigs = [
(mkStaticProbe {
module = "https_success";
targets = [
"https://cache.nixos.org"
"https://channels.nixos.org"
"https://common-styles.nixos.org"
"https://discourse.nixos.org"
"https://hydra.nixos.org"
"https://mobile.nixos.org"
"https://monitoring.nixos.org"
"https://nixos.org"
"https://planet.nixos.org"
"https://releases.nixos.org"
"https://status.nixos.org"
"https://survey.nixos.org"
"https://tarballs.nixos.org"
"https://weekly.nixos.org"
"https://wiki.nixos.org"
"https://www.nixos.org"
"https://tracker.security.nixos.org"
];
})
(mkDnsSdProbe "smtp_starttls" {
names = [
"nixos.org"
];
type = "MX";
port = 25;
})
];
ruleFiles = [
(pkgs.writeText "blackbox-exporter.rules" (
builtins.toJSON {
groups = [
{
name = "blackbox";
rules = [
{
alert = "CertificateExpiry";
expr = ''
probe_ssl_earliest_cert_expiry - time() < 86400 * 14
'';
for = "15m";
labels.severity = "warning";
annotations.summary = "Certificate for {{ $labels.instance }} is expiring soon.";
}
{
alert = "HttpUnreachable";
expr = ''
probe_success{job="blackbox-https_success"} == 0
'';
for = "15m";
labels.severity = "warning";
annotations.summary = "Endpoint {{ $labels.instance }} is unreachable";
}
{
alert = "MxUnreachable";
expr = ''
probe_success{job=~"blackbox-smtp_starttls.*"} == 0
'';
for = "15m";
labels.severity = "warning";
annotations.summary = "Mail server {{ $labels.instance }} is unreachable";
}
];
}
];
}
))
];
};
}
================================================
FILE: build/pluto/prometheus/exporters/channel-exporter.py
================================================
#!/usr/bin/env python3
import json
import logging
import sys
import time
from pprint import pprint
import requests
from dateutil.parser import parse
from prometheus_client import Counter, Gauge, Histogram, start_http_server
CHANNEL_REVISION = Gauge(
"channel_revision",
"Current revision, exported as a hack",
["channel", "revision", "status", "variant", "current"],
)
CHANNEL_REQUEST_TIME = Histogram(
"channel_request_time", "Time spent requesting channel data"
)
CHANNEL_UPDATE_TIME = Gauge(
"channel_update_time",
"Total number of failures to fetch spot market prices",
["channel"],
)
CHANNEL_CURRENT = Gauge(
"channel_current",
"If a channel is expected to be current",
["channel"],
)
CHANNEL_REQUEST_FAILURES = Counter(
"channel_request_failures_total",
"Number of channel status requests which have failed",
)
@CHANNEL_REQUEST_TIME.time()
def measure_channel(name):
try:
with CHANNEL_REQUEST_FAILURES.count_exceptions():
result = requests.get(
f"https://nixos.org/channels/{name}/git-revision", timeout=10
)
try:
return {
"timestamp": parse(result.headers["last-modified"]).timestamp(),
"revision": result.text,
}
except KeyError as e:
print(f"Got KeyError after getting our result for {name}:")
pprint(e)
pprint(result)
except Exception as e:
print(f"Got a mystery error for {name}:")
pprint(e)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
start_http_server(9402)
with open(sys.argv[1]) as channel_data:
channels = json.load(channel_data)
revisions = {}
while True:
for channel, about in channels.items():
measurement = measure_channel(channel)
if measurement is not None:
revision = measurement["revision"]
status = about.get("status", "")
variant = about.get("variant", "")
current = int(status != "unmaintained")
CHANNEL_UPDATE_TIME.labels(channel=channel).set(
measurement["timestamp"]
)
CHANNEL_REVISION.labels(
channel=channel,
revision=revision,
status=status,
variant=variant,
current=current,
).set(1)
CHANNEL_CURRENT.labels(channel=channel).set(current)
print(f"updated {channel}")
previous_revision = revisions.pop(channel, None)
revisions[channel] = revision
if previous_revision and previous_revision != revision:
CHANNEL_REVISION.remove(
channel, previous_revision, status, variant, current
)
time.sleep(55)
================================================
FILE: build/pluto/prometheus/exporters/channel.nix
================================================
{ lib, pkgs, ... }:
let
channels = pkgs.writeText "channels.json" (
builtins.toJSON (import ../../../../channels.nix).channels
);
in
{
systemd.services.channel-update-exporter = {
description = "Check all active channels' last-update times";
path = [
(pkgs.python3.withPackages (
pypkgs: with pypkgs; [
requests
prometheus-client
python-dateutil
]
))
];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
DynamicUser = true;
ExecStart = "${./channel-exporter.py} ${channels}";
};
};
services.prometheus.scrapeConfigs = [
{
job_name = "channel-updates";
metrics_path = "/";
static_configs = [ { targets = [ "127.0.0.1:9402" ]; } ];
}
]
++ lib.mapAttrsToList (name: value: {
job_name = "channel-job-${name}";
scheme = "https";
scrape_interval = "5m";
metrics_path = "/job/${value.job}/prometheus";
static_configs = [
{
labels = {
current = if value.status != "unmaintained" then "1" else "0";
channel = name;
};
targets = [ "hydra.nixos.org:443" ];
}
];
}) (import ../../../../channels.nix).channels;
}
================================================
FILE: build/pluto/prometheus/exporters/domain.nix
================================================
{ pkgs, ... }:
{
services.prometheus = {
exporters.domain = {
enable = true;
listenAddress = "localhost";
};
scrapeConfigs = [
{
# https://github.com/caarlos0/domain_exporter#configuration
job_name = "domain";
metrics_path = "/probe";
relabel_configs = [
{
source_labels = [ "__address__" ];
target_label = "__param_target";
}
{
target_label = "__address__";
replacement = "localhost:9222";
}
];
static_configs = [
{
targets = [
"nix.ci"
"nix.dev"
"nixos.org"
"ofborg.org"
];
}
];
}
];
ruleFiles = [
(pkgs.writeText "domain-exporter.rules" (
builtins.toJSON {
groups = [
{
name = "domain";
rules = [
{
alert = "DomainExpiry";
expr = "domain_expiry_days != -1 and domain_expiry_days < 30";
for = "1h";
labels.severity = "warning";
annotations.summary = "Domain {{ $labels.domain }} will expire in less than 30 days";
}
{
alert = "DomainProbeFailure";
expr = "domain_probe_success == 0";
for = "1d";
labels.severity = "warning";
annotations.summary = "Domain {{ $labels.domain }} probe failing for more than 1 day.";
}
];
}
];
}
))
];
};
}
================================================
FILE: build/pluto/prometheus/exporters/fastly.nix
================================================
{ config, ... }:
{
age.secrets.fastly-exporter-env.file = ../../../secrets/fastly-exporter-env.age;
services.prometheus = {
exporters.fastly = {
enable = true;
listenAddress = "127.0.0.1";
environmentFile = config.age.secrets.fastly-exporter-env.path;
};
scrapeConfigs = [
{
job_name = "fastly";
metrics_path = "/metrics";
static_configs = [ { targets = [ "127.0.0.1:9118" ]; } ];
}
];
};
}
================================================
FILE: build/pluto/prometheus/exporters/github.nix
================================================
{ pkgs, ... }:
let
exporter = pkgs.fetchFromGitHub {
owner = "grahamc";
repo = "prometheus-github-exporter";
rev = "01b6f8ef06b694411baf10f49e7b05afb26ab307";
sha256 = "sha256-Sk/ynhPeXQVIgyZJ3Gj1VynJhPWmBHjrRnGYLjnJvio=";
};
config = pkgs.writeText "config.json" (
builtins.toJSON {
port = 9401;
repos = [
"NixOS/nixpkgs"
"NixOS/nix"
];
}
);
in
{
systemd.services.prometheus-github-exporter = {
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
DynamicUser = true;
User = "github-exporter";
Restart = "always";
RestartSec = "60s";
PrivateTmp = true;
};
path = [
(pkgs.python3.withPackages (
ps: with ps; [
prometheus-client
requests
]
))
];
script = "exec python3 ${exporter}/scrape.py ${config}";
};
services.prometheus.scrapeConfigs = [
{
job_name = "prometheus-github-exporter";
metrics_path = "/";
static_configs = [ { targets = [ "127.0.0.1:9401" ]; } ];
}
];
}
================================================
FILE: build/pluto/prometheus/exporters/hydra-queue-runner-reexporter.py
================================================
#!/usr/bin/env nix-shell
#!nix-shell -i python3 -p python3 -p python3Packages.requests -p python3Packages.prometheus_client
import contextlib
import json
import time
import requests
from prometheus_client import CollectorRegistry, start_http_server
from prometheus_client.core import CounterMetricFamily, GaugeMetricFamily
def debug_remaining_state(edict) -> None:
# pprint(edict.remaining_state())
pass
class EvaporatingDict:
def __init__(self, state) -> None:
self._state = state
def preserving_read(self, key):
val = self._state[key]
if isinstance(val, dict):
return EvaporatingDict(val)
return val
def preserving_read_default(self, key, default):
try:
return self.preserving_read(key)
except KeyError:
return default
def destructive_read(self, key):
val = self.preserving_read(key)
del self._state[key]
return val
def destructive_read_default(self, key, default):
try:
val = self.preserving_read(key)
del self._state[key]
return val
except KeyError:
# Not nice, but accounts for weird conditionals in Hydra
# todo: log bad reads?
return default
def unused_read(self, key) -> None:
self.destructive_read_default(key, default=None)
def remaining_state(self):
return self._state
def items(self):
keys = list(self._state.keys())
for key in keys:
yield (key, self.destructive_read(key))
class HydraScrapeImporter:
def __init__(self, status) -> None:
self._status = EvaporatingDict(status)
def collect(self):
# The metrics are consumed in the order presented by
# https://github.com/NixOS/hydra/blob/adf59a395993d5ed1d7a31108f7666195f789c99/src/hydra-queue-runner/hydra-queue-runner.cc#L536
yield self.trivial_gauge(
"up",
"Is hydra running",
1 if self.destructive_read("status") == "up" else 0,
)
yield self.trivial_counter(
"time", "Hydra's current time", self.destructive_read("time")
)
yield self.trivial_counter(
"uptime", "Hydra's uptime", self.destructive_read("uptime")
)
self.unused_metric("pid")
yield self.trivial_gauge(
"builds_queued",
"Current build queue size",
self.destructive_read("nrQueuedBuilds"),
)
yield self.trivial_gauge(
"steps_queued",
"Current number of steps for the build queue",
self.destructive_read("nrUnfinishedSteps"),
)
yield self.trivial_gauge(
"steps_runnable",
"Current number of steps which can run immediately",
self.destructive_read("nrRunnableSteps"),
)
yield self.trivial_gauge(
"steps_active",
"Current number of steps which are currently active",
self.destructive_read("nrActiveSteps"),
)
yield self.trivial_gauge(
"steps_building",
"Current number of steps which are currently building",
self.destructive_read("nrStepsBuilding"),
)
yield self.trivial_gauge(
"steps_copying_to",
"Current number of steps which are having build inputs copied to a builder",
self.destructive_read("nrStepsCopyingTo"),
)
yield self.trivial_gauge(
"steps_copying_from",
"Current number of steps which are having build results copied from a builder",
self.destructive_read("nrStepsCopyingFrom"),
)
yield self.trivial_gauge(
"steps_waiting",
"Current number of steps which are waiting",
self.destructive_read("nrStepsWaiting"),
)
yield self.trivial_counter(
"build_inputs_sent_bytes",
"Total count of bytes sent due to build inputs",
self.destructive_read("bytesSent"),
)
yield self.trivial_counter(
"build_outputs_received_bytes",
"Total count of bytes received from build outputs",
self.destructive_read("bytesReceived"),
)
yield self.trivial_counter(
"builds_read",
"Total count of builds whose outputs have been read",
self.destructive_read("nrBuildsRead"),
)
yield self.trivial_counter(
"builds_read_seconds",
"Total number of seconds spent reading build outputs",
self.destructive_read("buildReadTimeMs") / 1000,
)
self.unused_metric("buildReadTimeAvgMs") # implementable in prometheus queries
yield self.trivial_counter(
"builds_done",
"Total count of builds performed",
self.destructive_read("nrBuildsDone"),
)
yield self.trivial_counter(
"steps_started",
"Total count of steps started",
self.destructive_read("nrStepsStarted"),
)
yield self.trivial_counter(
"steps_done",
"Total count of steps completed",
self.destructive_read("nrStepsDone"),
)
yield self.trivial_counter(
"retries", "Total count of retries", self.destructive_read("nrRetries")
)
yield self.trivial_counter(
"max_retries",
"Maximum count of retries for any single job",
self.destructive_read("maxNrRetries"),
)
yield self.trivial_counter(
"step_time",
"Total time spent executing steps",
self.destructive_read_default("totalStepTime", 0),
)
yield self.trivial_counter(
"step_build_time",
"Total time spent executing builds steps (???)",
self.destructive_read_default("totalStepBuildTime", 0),
)
self.unused_metric("avgStepTime")
self.unused_metric("avgStepBuildTime")
yield self.trivial_counter(
"queue_wakeup",
"Count of the times the queue runner has been notified of queue changes",
self.destructive_read("nrQueueWakeups"),
)
yield self.trivial_counter(
"dispatcher_wakeup",
"Count of the times the queue runner work dispatcher woke up due to new runnable builds and completed builds.",
self.destructive_read("nrDispatcherWakeups"),
)
yield self.trivial_counter(
"dispatch_execution_seconds",
"Number of seconds the dispatcher has spent working",
self.destructive_read("dispatchTimeMs") / 1000,
)
self.unused_metric("dispatchTimeAvgMs")
yield self.trivial_gauge(
"db_connections",
"Number of connections to the database",
self.destructive_read("nrDbConnections"),
)
yield self.trivial_gauge(
"db_updates",
"Number of in-progress database updates",
self.destructive_read("nrActiveDbUpdates"),
)
yield self.trivial_counter(
"notifications_total",
"Total number of notifications sent",
self.preserving_read_default("nrNotificationsDone", 0)
+ self.preserving_read_default("nrNotificationsFailed", 0),
)
yield self.trivial_counter(
"notifications_done",
"Number of notifications completed",
self.destructive_read_default("nrNotificationsDone", 0),
)
yield self.trivial_counter(
"notifications_failed",
"Number of notifications failed",
self.destructive_read_default("nrNotificationsFailed", 0),
)
yield self.trivial_counter(
"notifications_in_progress",
"Number of notifications in_progress",
self.destructive_read_default("nrNotificationsInProgress", 0),
)
yield self.trivial_counter(
"notifications_pending",
"Number of notifications pending",
self.destructive_read_default("nrNotificationsPending", 0),
)
yield self.trivial_counter(
"notifications_seconds",
"Time spent delivering notifications",
self.destructive_read_default("nrNotificationTimeMs", 0) / 1000,
)
self.unused_metric("nrNotificationTimeAvgMs")
machineCollector = MachineScrapeImporter()
for name, report in self.destructive_read("machines").items():
machineCollector.load_machine(name, report)
for metric in machineCollector.metrics():
yield metric
jobsetCollector = JobsetScrapeImporter()
for name, report in self.destructive_read("jobsets").items():
jobsetCollector.load_jobset(name, report)
for metric in jobsetCollector.metrics():
yield metric
machineTypesCollector = MachineTypeScrapeImporter()
for name, report in self.destructive_read("machineTypes").items():
machineTypesCollector.load_machine_type(name, report)
for metric in machineTypesCollector.metrics():
yield metric
store = self.destructive_read("store")
yield self.trivial_counter(
"store_nar_info_read",
"Number of NarInfo files read from the binary cache",
store.destructive_read("narInfoRead"),
)
yield self.trivial_counter(
"store_nar_info_read_averted",
"Number of NarInfo files reads which were avoided",
store.destructive_read("narInfoReadAverted"),
)
yield self.trivial_counter(
"store_nar_info_missing",
"Number of NarInfo files read attempts which identified a missing narinfo file",
store.destructive_read("narInfoMissing"),
)
yield self.trivial_counter(
"store_nar_info_write",
"Number of NarInfo files written to the binary cache",
store.destructive_read("narInfoWrite"),
)
yield self.trivial_gauge(
"store_nar_info_cache_size",
"Size of the in-memory store path information cache",
store.destructive_read("narInfoCacheSize"),
)
yield self.trivial_counter(
"store_nar_read",
"Number of NAR files read from the binary cache",
store.destructive_read("narRead"),
)
yield self.trivial_counter(
"store_nar_read_bytes",
"Number of NAR file bytes read after decompression from the binary cache",
store.destructive_read("narReadBytes"),
)
yield self.trivial_counter(
"store_nar_read_compressed_bytes",
"Number of NAR file bytes read before decompression from the binary cache",
store.destructive_read("narReadCompressedBytes"),
)
yield self.trivial_counter(
"store_nar_write",
"Number of NAR files written to the binary cache",
store.destructive_read("narWrite"),
)
yield self.trivial_counter(
"store_nar_write_averted",
"Number of NAR files writes skipped due to the NAR already being in the binary cache",
store.destructive_read("narWriteAverted"),
)
yield self.trivial_counter(
"store_nar_write_bytes",
"Number of NAR file bytes written after decompression to the binary cache",
store.destructive_read("narWriteBytes"),
)
yield self.trivial_counter(
"store_nar_write_compressed_bytes",
"Number of NAR file bytes written before decompression to the binary cache",
store.destructive_read("narWriteCompressedBytes"),
)
yield self.trivial_counter(
"store_nar_write_compression_seconds",
"Number of seconds spent compressing data when writing NARs to the binary cache",
store.destructive_read("narWriteCompressionTimeMs") / 1000,
)
store.unused_read("narCompressionSavings")
store.unused_read("narCompressionSpeed")
try:
s3 = self.destructive_read("s3")
except KeyError:
# no key, no metrics
s3 = None
if s3:
# Not in the above try to avoid the try catching mistakes
# in the following code
yield self.trivial_counter(
"store_s3_put", "Number of PUTs to S3", s3.destructive_read("put")
)
yield self.trivial_counter(
"store_s3_put_bytes",
"Number of bytes written to S3",
s3.destructive_read("putBytes"),
)
yield self.trivial_counter(
"store_s3_put_seconds",
"Number of seconds spent writing to S3",
s3.destructive_read("putTimeMs") / 1000,
)
s3.unused_read("putSpeed")
yield self.trivial_counter(
"store_s3_get", "Number of GETs to S3", s3.destructive_read("get")
)
yield self.trivial_counter(
"store_s3_get_bytes",
"Number of bytes read from S3",
s3.destructive_read("getBytes"),
)
yield self.trivial_counter(
"store_s3_get_seconds",
"Number of seconds spent reading from S3",
s3.destructive_read("getTimeMs") / 1000,
)
s3.unused_read("getSpeed")
yield self.trivial_counter(
"store_s3_head", "Number of HEADs to S3", s3.destructive_read("head")
)
yield self.trivial_counter(
"store_s3_cost_approximate_dollars",
"Estimated cost of the S3 bucket activity",
s3.destructive_read("costDollarApprox"),
)
debug_remaining_state(s3)
debug_remaining_state(store)
def trivial_gauge(self, name, help, value):
c = GaugeMetricFamily(f"hydra_{name}", help)
c.add_metric([], value)
return c
def trivial_counter(self, name, help, value):
c = CounterMetricFamily(f"hydra_{name}_total", help)
c.add_metric([], value)
return c
def unused_metric(self, key) -> None:
self._status.unused_read(key)
def preserving_read(self, key):
return self._status.preserving_read(key)
def preserving_read_default(self, key, default):
return self._status.preserving_read_default(key, default)
def destructive_read(self, key):
return self._status.destructive_read(key)
def destructive_read_default(self, key, default):
return self._status.destructive_read_default(key, default)
def uncollected_status(self):
return self._status.remaining_state()
def blackhole(*args, **kwargs) -> None:
return None
class MachineScrapeImporter:
def __init__(self) -> None:
labels = ["host"]
self.consective_failures = GaugeMetricFamily(
"hydra_machine_consecutive_failures",
"Number of consecutive failed builds",
labels=labels,
)
self.current_jobs = GaugeMetricFamily(
"hydra_machine_current_jobs", "Number of current jobs", labels=labels
)
self.idle_since = GaugeMetricFamily(
"hydra_machine_idle_since",
"When the current idle period started",
labels=labels,
)
self.disabled_until = GaugeMetricFamily(
"hydra_machine_disabled_until",
"When the machine will be used again",
labels=labels,
)
self.enabled = GaugeMetricFamily(
"hydra_machine_enabled",
"If the machine is enabled (1) or not (0)",
labels=labels,
)
self.last_failure = CounterMetricFamily(
"hydra_machine_last_failure", "timestamp of the last failure", labels=labels
)
self.number_steps_done = CounterMetricFamily(
"hydra_machine_steps_done_total",
"Total count of the steps completed",
labels=labels,
)
self.total_step_build_time = CounterMetricFamily(
"hydra_machine_step_build_time_total",
"Number of seconds spent building steps",
labels=labels,
)
self.total_step_time = CounterMetricFamily(
"hydra_machine_step_time_total",
"Number of seconds spent on steps",
labels=labels,
)
def load_machine(self, name, report) -> None:
report.unused_read("mandatoryFeatures")
report.unused_read("supportedFeatures")
report.unused_read("systemTypes")
report.unused_read("avgStepBuildTime")
report.unused_read("avgStepTime")
labels = [name]
self.consective_failures.add_metric(
labels, report.destructive_read("consecutiveFailures")
)
self.current_jobs.add_metric(labels, report.destructive_read("currentJobs"))
with contextlib.suppress(KeyError):
self.idle_since.add_metric(labels, report.destructive_read("idleSince"))
self.disabled_until.add_metric(labels, report.destructive_read("disabledUntil"))
self.enabled.add_metric(labels, 1 if report.destructive_read("enabled") else 0)
self.last_failure.add_metric(labels, report.destructive_read("lastFailure"))
self.number_steps_done.add_metric(
labels, report.destructive_read("nrStepsDone")
)
self.total_step_build_time.add_metric(
labels, report.destructive_read_default("totalStepBuildTime", default=0)
)
self.total_step_time.add_metric(
labels, report.destructive_read_default("totalStepTime", default=0)
)
debug_remaining_state(report)
def metrics(self):
yield self.consective_failures
yield self.current_jobs
yield self.idle_since
yield self.disabled_until
yield self.enabled
yield self.last_failure
yield self.number_steps_done
yield self.total_step_build_time
yield self.total_step_time
class JobsetScrapeImporter:
def __init__(self) -> None:
self.seconds = CounterMetricFamily(
"hydra_jobset_seconds_total",
"Total number of seconds the jobset has been building",
labels=["name"],
)
self.shares_used = CounterMetricFamily(
"hydra_jobset_shares_used_total",
"Total shares the jobset has consumed",
labels=["name"],
)
def load_jobset(self, name, report) -> None:
self.seconds.add_metric([name], report.destructive_read("seconds"))
self.shares_used.add_metric([name], report.destructive_read("shareUsed"))
debug_remaining_state(report)
def metrics(self):
yield self.seconds
yield self.shares_used
class MachineTypeScrapeImporter:
def __init__(self) -> None:
self.runnable = GaugeMetricFamily(
"hydra_machine_type_runnable",
"Number of currently runnable builds",
labels=["machineType"],
)
self.running = GaugeMetricFamily(
"hydra_machine_type_running",
"Number of currently running builds",
labels=["machineType"],
)
self.wait_time = CounterMetricFamily(
"hydra_machine_type_wait_time_total",
"Number of seconds spent waiting",
labels=["machineType"],
)
self.last_active = CounterMetricFamily(
"hydra_machine_type_last_active_total",
"Last time this machine type was active",
labels=["machineType"],
)
def load_machine_type(self, name, report) -> None:
self.runnable.add_metric([name], report.destructive_read("runnable"))
self.running.add_metric([name], report.destructive_read("running"))
with contextlib.suppress(KeyError):
self.wait_time.add_metric([name], report.destructive_read("waitTime"))
with contextlib.suppress(KeyError):
self.last_active.add_metric([name], report.destructive_read("lastActive"))
debug_remaining_state(report)
def metrics(self):
yield self.runnable
yield self.running
yield self.wait_time
yield self.last_active
class ScrapeCollector:
def __init__(self) -> None:
pass
def collect(self):
return HydraScrapeImporter(scrape()).collect()
def scrape(cached=None):
if cached:
with open(cached) as f:
return json.load(f)
else:
print("Scraping")
return requests.get(
"https://hydra.nixos.org/queue-runner-status",
headers={"Content-Type": "application/json"},
).json()
registry = CollectorRegistry()
registry.register(ScrapeCollector())
if __name__ == "__main__":
# Start up the server to expose the metrics.
start_http_server(9200, registry=registry)
# Generate some requests.
while True:
time.sleep(30)
================================================
FILE: build/pluto/prometheus/exporters/hydra.nix
================================================
{ pkgs, ... }:
{
systemd.services.prometheus-hydra-queue-runner-exporter = {
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
wants = [ "network.target" ];
serviceConfig = {
DynamicUser = true;
Restart = "always";
RestartSec = "60s";
PrivateTmp = true;
WorkingDirectory = "/tmp";
ExecStart =
let
python = pkgs.python3.withPackages (
ps: with ps; [
requests
prometheus-client
]
);
in
''
${python.interpreter} ${./hydra-queue-runner-reexporter.py}
'';
};
};
services.prometheus = {
scrapeConfigs = [
{
job_name = "hydra";
metrics_path = "/prometheus";
scheme = "https";
static_configs = [ { targets = [ "hydra.nixos.org:443" ]; } ];
}
{
job_name = "hydra_queue_runner";
metrics_path = "/metrics";
scheme = "http";
static_configs = [ { targets = [ "hydra.nixos.org:9198" ]; } ];
}
{
job_name = "hydra-webserver";
metrics_path = "/metrics";
scheme = "https";
static_configs = [ { targets = [ "hydra.nixos.org:443" ]; } ];
}
{
job_name = "hydra-reexport";
metrics_path = "/";
static_configs = [ { targets = [ "localhost:9200" ]; } ];
}
];
ruleFiles = [
(pkgs.writeText "hydra-exporter.rules" (
builtins.toJSON {
groups = [
{
name = "hydra";
rules = [
{
alert = "BuildsStuckOverTwoDays";
expr = ''hydra_machine_build_duration_bucket{le="+Inf"} - ignoring(le) hydra_machine_build_duration_bucket{le="172800"} > 0'';
for = "30m";
labels.severity = "warning";
annotations.summary = "{{ $labels.machine }} has {{ $value }} over-age jobs.";
annotations.grafana = "https://grafana.nixos.org/d/j0hJAY1Wk/in-progress-build-duration-heatmap";
}
];
}
];
}
))
];
};
}
================================================
FILE: build/pluto/prometheus/exporters/json.nix
================================================
{ config, pkgs, ... }:
{
services.prometheus = {
exporters.json = {
enable = true;
listenAddress = "localhost";
configFile = (pkgs.formats.yaml { }).generate "json-exporter-config.yml" {
modules.matrix-federation-checker = {
metrics = [
{
name = "matrix_homeserver_federation_ok";
path = "{.FederationOK}";
help = "False if there's any problem with federation reported.";
type = "value";
value_type = "gauge";
}
];
};
};
};
scrapeConfigs = [
{
job_name = "matrix-federation-checker";
metrics_path = "/probe";
params = {
module = [ "matrix-federation-checker" ];
};
relabel_configs = [
{
source_labels = [ "__address__" ];
target_label = "__param_target";
}
{
source_labels = [ "__address__" ];
target_label = "instance";
}
{
target_label = "__address__";
replacement = "localhost:${toString config.services.prometheus.exporters.json.port}";
}
];
static_configs = [
{
targets = [ "https://federationtester.matrix.org/api/report?server_name=nixos.org" ];
labels.matrix_instance = "nixos.org";
}
];
}
];
ruleFiles = [
(pkgs.writeText "matrix-federation.rules" (
builtins.toJSON {
groups = [
{
name = "matrix-federation";
rules = [
{
alert = "MatrixFederationFailure";
expr = "matrix_homeserver_federation_ok < 1";
for = "30m";
labels.severity = "warning";
annotations.summary = "Matrix federation for {{ $labels.matrix_instance }} appears to be failing.";
}
];
}
];
}
))
];
};
}
================================================
FILE: build/pluto/prometheus/exporters/matrix-synapse.nix
================================================
{
services.prometheus.scrapeConfigs = [
{
job_name = "matrix_synapse";
scheme = "https";
static_configs = [ { targets = [ "matrix.nixos.org:443" ]; } ];
}
];
}
================================================
FILE: build/pluto/prometheus/exporters/nixos.nix
================================================
{
services.prometheus.scrapeConfigs = [
{
job_name = "nixos";
static_configs = [
{
labels.role = "hydra";
targets = [
"mimas.nixos.org:9300"
];
}
{
labels.role = "monitoring";
targets = [
"pluto.nixos.org:9300"
];
}
{
labels.role = "database";
targets = [
"haumea.nixos.org:9300"
"titan.nixos.org:9300"
];
}
];
}
];
}
================================================
FILE: build/pluto/prometheus/exporters/node.nix
================================================
{ pkgs, ... }:
{
services.prometheus = {
scrapeConfigs = [
{
job_name = "node";
static_configs = [
{
labels.role = "hydra";
targets = [
"mimas.nixos.org:9100"
];
}
{
labels.role = "database";
targets = [
"haumea.nixos.org:9100"
"titan.nixos.org:9100"
];
}
{
labels.role = "monitoring";
targets = [
"pluto.nixos.org:9100"
];
}
{
labels.role = "services";
targets = [
"caliban.nixos.org:9100"
"umbriel.nixos.org:9100"
"wiki.nixos.org:9100"
"tracker.security.nixos.org:9100"
"makemake.ngi.nixos.org:9100"
];
}
{
labels.role = "mac";
targets = [
# flying circus
"norwegian-blue.mac.nixos.org:9100"
# hetzner
"intense-heron.mac.nixos.org:9100"
"sweeping-filly.mac.nixos.org:9100"
"maximum-snail.mac.nixos.org:9100"
"growing-jennet.mac.nixos.org:9100"
"enormous-catfish.mac.nixos.org:9100"
# oakhost
"kind-lumiere.mac.nixos.org:9100"
"eager-heisenberg.mac.nixos.org:9100"
# macstadium
"mac01.ofborg.org:9100"
"mac02.ofborg.org:9100"
"mac03.ofborg.org:9100"
"mac04.ofborg.org:9100"
"mac05.ofborg.org:9100"
];
}
{
labels.role = "builders";
targets = [
"elated-minsky.builder.nixos.org:9100"
"sleepy-brown.builder.nixos.org:9100"
"goofy-hopcroft.builder.nixos.org:9100"
"hopeful-rivest.builder.nixos.org:9100"
];
}
{
labels.role = "ofborg";
targets = [
"build01.ofborg.org:9100"
"build02.ofborg.org:9100"
"build03.ofborg.org:9100"
"build04.ofborg.org:9100"
"build05.ofborg.org:9100"
"core01.ofborg.org:9100"
"eval01.ofborg.org:9100"
"eval02.ofborg.org:9100"
"eval03.ofborg.org:9100"
"eval04.ofborg.org:9100"
];
}
];
}
];
ruleFiles =
let
diskSelector = ''mountpoint="/"'';
in
[
(pkgs.writeText "node-exporter.rules" (
builtins.toJSON {
groups = [
{
name = "node";
rules = [
{
alert = "PartitionLowInodes";
expr = ''
node_filesystem_files_free{${diskSelector}} / node_filesystem_files{${diskSelector}} * 100 < 10
'';
for = "60m";
labels.severity = "warning";
annotations.summary = "{{ $labels.device }} mounted to {{ $labels.mountpoint }} ({{ $labels.fstype }}) on {{ $labels.instance }} has only {{ $value }}% free inodes.";
annotations.grafana = "https://grafana.nixos.org/d/rYdddlPWk/node-exporter-full?orgId=1&var-job=node&var-node={{ $labels.instance }}";
}
{
alert = "PartitionLowDiskSpace";
expr = ''
round((node_filesystem_free_bytes{${diskSelector}} * 100) / node_filesystem_size_bytes{${diskSelector}}) < 10 and ON (instance, device, mountpoint) node_filesystem_free_bytes < 100 * 1024^3
'';
for = "60m";
labels.severity = "warning";
annotations.summary = "{{ $labels.device }} mounted to {{ $labels.mountpoint }} ({{ $labels.fstype }}) on {{ $labels.instance }} has {{ $value }}% free.";
annotations.grafana = "https://grafana.nixos.org/d/rYdddlPWk/node-exporter-full?orgId=1&var-job=node&var-node={{ $labels.instance }}";
}
{
alert = "SystemdUnitFailed";
expr = ''
node_systemd_unit_state{state="failed"} == 1
'';
for = "15m";
labels.severity = "warning";
annotations.summary = "systemd unit {{ $labels.name }} on {{ $labels.instance }} has been down for more than 15 minutes.";
}
];
}
{
name = "scheduled-jobs";
rules = [
{
alert = "ChannelUpdateStuck";
expr = ''max_over_time(node_systemd_unit_state{name=~"^update-nix.*.service$",state=~"failed"}[5m]) == 1'';
for = "30m";
labels.severity = "warning";
annotations.summary = "{{ $labels.name }} on {{ $labels.instance }}";
annotations.grafana = "https://grafana.nixos.org/d/fBW4tL1Wz/scheduled-task-state-channels-website?orgId=1&refresh=10s";
}
];
}
];
}
))
];
};
}
================================================
FILE: build/pluto/prometheus/exporters/owncast.nix
================================================
{ config, ... }:
{
age.secrets.owncast-admin-password = {
file = ../../../secrets/owncast-admin-password.age;
owner = "prometheus";
group = "prometheus";
};
services.prometheus.scrapeConfigs = [
{
job_name = "owncast";
metrics_path = "/api/admin/prometheus";
basic_auth = {
username = "admin";
password_file = config.age.secrets.owncast-admin-password.path;
};
scheme = "https";
static_configs = [ { targets = [ "live.nixos.org:443" ]; } ];
}
];
}
================================================
FILE: build/pluto/prometheus/exporters/postgresql.nix
================================================
{
services.prometheus.scrapeConfigs = [
{
job_name = "postgresql";
metrics_path = "/metrics";
static_configs = [
{
targets = [
"haumea.nixos.org:9187"
"titan.nixos.org:9187"
"tracker.security.nixos.org:9187"
];
}
];
}
];
}
================================================
FILE: build/pluto/prometheus/exporters/rasdaemon.nix
================================================
{ pkgs, ... }:
{
services.prometheus = {
scrapeConfigs = [
{
job_name = "rasdaemon";
static_configs = [
{
targets = [
# build
"mimas.nixos.org:10029"
"haumea.nixos.org:10029"
"pluto.nixos.org:10029"
"titan.nixos.org:10029"
# builders
"elated-minsky.builder.nixos.org:10029"
"sleepy-brown.builder.nixos.org:10029"
"goofy-hopcroft.builder.nixos.org:10029"
"hopeful-rivest.builder.nixos.org:10029"
# non-critical
"caliban.nixos.org:10029"
];
}
];
}
];
ruleFiles = [
(pkgs.writeText "rasdaemon.rules" (
builtins.toJSON {
groups = [
{
name = "rasdaemon";
rules = [
{
alert = "MachineCheckError";
expr = ''
increase(rasdaemon_mce_records_total{mce_msg!="Corrected error, no action required."}[1h]) > 0
'';
labels.severity = "warning";
annotations.summary = "Machine check detected an error on {{ $labels.instance }}: {{ $labels.mce_msg }}";
}
];
}
];
}
))
];
};
}
================================================
FILE: build/pluto/prometheus/exporters/sql.nix
================================================
{
services.prometheus.scrapeConfigs = [
{
job_name = "sql";
metrics_path = "/metrics";
static_configs = [ { targets = [ "tracker.security.nixos.org:9237" ]; } ];
}
];
}
================================================
FILE: build/pluto/prometheus/exporters/storagebox.nix
================================================
{
config,
pkgs,
...
}:
{
age.secrets."storagebox-exporter-token".file = ../../../secrets/storagebox-exporter-token.age;
services.prometheus = {
exporters.storagebox = {
enable = true;
listenAddress = "localhost";
tokenFile = config.age.secrets."storagebox-exporter-token".path;
};
scrapeConfigs = [
{
job_name = "storagebox";
scheme = "http";
static_configs = [ { targets = [ "localhost:9509" ]; } ];
}
];
ruleFiles = [
(pkgs.writeText "storagebox-exporter.rules" (
builtins.toJSON {
groups = [
{
name = "storagebox";
rules = [
{
alert = "StorageboxCapacity";
expr = "round(100 * (1 - (storagebox_disk_usage / storagebox_disk_quota))) < 10";
for = "30m";
labels.severity = "warning";
annotations.summary = "StorageBox {{ $labels.name }} ({ $labels.server }}) has less than {{ $value }}% free space.";
}
];
}
];
}
))
];
};
}
================================================
FILE: build/pluto/prometheus/exporters/up.nix
================================================
{ pkgs, ... }:
{
services.prometheus.ruleFiles = [
(pkgs.writeText "up.rules" (
builtins.toJSON {
groups = [
{
name = "up";
rules = [
{
alert = "NotUp";
expr = ''
up == 0
'';
for = "10m";
labels.severity = "warning";
annotations.summary = "scrape job {{ $labels.job }} is failing on {{ $labels.instance }}";
}
];
}
];
}
))
];
}
================================================
FILE: build/pluto/prometheus/exporters/zfs.nix
================================================
{
pkgs,
...
}:
{
services.prometheus = {
scrapeConfigs = [
{
job_name = "zfs";
static_configs = [
{
targets = [
"haumea.nixos.org:9134"
"mimas.nixos.org:9134"
"pluto.nixos.org:9134"
"titan.nixos.org:9134"
];
}
];
}
];
ruleFiles = [
(pkgs.writeText "node-exporter.rules" (
builtins.toJSON {
groups = [
{
name = "zfs";
rules = [
{
alert = "ZfsPoolHealth";
expr = ''
zfs_pool_health > 0
'';
for = "5m";
labels.severity = "WARNING";
annotations.summary = "ZFS pool {{ $labels.pool }} on {{ $labels.instance }} is unhealthy.";
}
{
alert = "ZfsPoolFull";
expr = ''
round((zfs_pool_free_bytes / zfs_pool_size_bytes) * 100, 1) < 15
'';
for = "30m";
labels.severity = "warning";
annotations.summary = "ZFS pool {{ $labels.pool }} on {{ $labels.instance }} has only {{ $value }}% free space.";
annotations.grafana = "https://grafana.nixos.org/d/rYdddlPWk/node-exporter-full?orgId=1&var-job=node&var-node={{ $labels.instance }}";
}
];
}
];
}
))
];
};
}
================================================
FILE: build/pluto/prometheus/exporters/zrepl.nix
================================================
{ pkgs, ... }:
{
services.prometheus = {
scrapeConfigs = [
{
job_name = "zrepl";
static_configs = [
{
labels.role = "database";
targets = [
"titan.nixos.org:9811"
];
}
];
}
];
ruleFiles = [
(pkgs.writeText "zrepl.rules" (
builtins.toJSON {
groups = [
{
name = "zrepl";
rules = [
{
alert = "ZreplLongTimeNoSuccess";
expr = ''
time() - zrepl_replication_last_successful > ${toString (6 * 60 * 60)}
'';
for = "6h";
labels.severity = "warning";
annotations.summary = "zrepl job {{ $labels.zrepl_job }} has not succeeded recently.";
}
];
}
];
}
))
];
};
}
================================================
FILE: build/scripts/nix-mac-installer.sh
================================================
#! /usr/bin/env bash
set -e
if [[ $(id -u) != 0 ]]; then
echo "$0: please run this script as root"
exit 1
fi
export HOME=/var/root
if ! dscl . read /Groups/nixbld >/dev/null 2>&1; then
dseditgroup -o create nixbld -q
fi
gid=$(dscl . -read /Groups/nixbld | awk '($1 == "PrimaryGroupID:") {print $2 }')
echo "created nixbld group with gid $gid"
for i in $(seq 1 10); do
user=/Users/nixbld$i
uid="$((30000 + i))"
dscl . -create "$user"
dscl . -create "$user" RealName "Nix build user $i"
dscl . -create "$user" PrimaryGroupID "$gid"
dscl . -create "$user" UserShell /usr/bin/false
dscl . -create "$user" NFSHomeDirectory /var/empty
dscl . -create "$user" UniqueID "$uid"
dseditgroup -o edit -a "nixbld$i" -t user nixbld
echo "created nixbld$i user with uid $uid"
done
curl https://nixos.org/nix/install | sh
mkdir -p /var/root/.ssh
touch /var/root/.ssh/authorized_keys
grep -v "hydra-queue-runner@chef" /var/root/.ssh/authorized_keys >/var/root/.ssh/authorized_keys.tmp || true
echo 'command="/nix/var/nix/profiles/default/bin/nix-store --serve --write" ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCyM48VC5fpjJssLI8uolFscP4/iEoMHfkPoT9R3iE3OEjadmwa1XCAiXUoa7HSshw79SgPKF2KbGBPEVCascdAcErZKGHeHUzxj7v3IsNjObouUOBbJfpN4DR7RQT28PZRsh3TvTWjWnA9vIrSY/BvAK1uezFRuObvatqAPMrw4c0DK+JuGuCNkKDGHLXNSxYBc5Pmr1oSU7/BDiHVjjyLIsAMIc20+q8SjWswKqL1mY193mN7FpUMBtZrd0Za9fMFRII9AofEIDTOayvOZM6+/1dwRWZXM6jhE6kaPPF++yromHvDPBnd6FfwODKLvSF9BkA3pO5CqrD8zs7ETmrV hydra-queue-runner@chef' >>/var/root/.ssh/authorized_keys.tmp
mv /var/root/.ssh/authorized_keys.tmp /var/root/.ssh/authorized_keys
service_plist=/Library/LaunchDaemons/org.nixos.nix-daemon.plist
ln -sfn /nix/var/nix/profiles/default$service_plist $service_plist
launchctl unload $service_plist || true
launchctl load $service_plist
launchctl start $service_plist
================================================
FILE: build/scripts/nix-mac-nuke.sh
================================================
#! /usr/bin/env bash
service_plist=/Library/LaunchDaemons/org.nixos.nix-daemon.plist
launchctl stop $service_plist
launchctl unload $service_plist
dscl . -delete /Groups/nixbld
for i in $(seq 1 20); do
dscl . -delete "/Users/nixbld$i"
done
sudo rm -f $service_plist
sudo rm -rf /nix /etc/nix/nix.conf
rm -f "$HOME/.nix-channels" "$HOME/.nix-profile"
rm -rf "$HOME/.nix-defexpr"
================================================
FILE: build/secrets/alertmanager-oauth2-proxy-env.age
================================================
age-encryption.org/v1
-> ssh-ed25519 s9hT2g WEFWAkfO/QbTyYHtjbtFU819qNNwdEbxj43CAyoCth8
qoaEcEMG3pioLP8DYEV7am6ARmo/1Fi6859geefy0TQ
-> ssh-ed25519 Gr9EaQ GQAGFJXSwPlg9lh9Uq+gX5dYyEhFGFOgzmT/Ix9vHww
322Zi2PWOPB8UXq+cLNBPCPxnUV+MikURA1SN947pRI
-> ssh-ed25519 3ENwVg YGdKuSB26eLhJivsqJ9yZCtzjDWKCHuf2Az63RgZQhM
BggPA13/FpAAGzOryNoIYZL3S60FFK5pTuB0+eGCrIY
-> ssh-rsa MuWD+w
f81kBsXTgGYsDimMkOrZAJagzqiycmLSxiSYdV+gconCZKrOLIfa9npjbOP26zIf
oWez1vf1d1O/Kzk4XYQXTBpDdX2SBncQTtaBOAaNxG9YakieGbBCV5nypAioq7RF
wYB9R4XseanmgBdXeMIQ54NWX9zsHZkPEIFCvKyTGGV+uvoiILQDreuBMY5EHB2B
o5aqzW8FW1urgRSw9bQnXixuO0QjcAFWyhqCO95P50vnugJFqj7txQpM1vrFjZ78
WCRtHYT3QufvmN0VhiaCTjWQjr1RPptvVoy7M+Q5T29+tnr5gn0DZOSyXNEQmGAU
kiWx6IV1G9l1Mzp4SMr2sQ
-> ssh-ed25519 92bXiA BiUs0UMX4R3F2boMComJcLLKfR4nnHXwtakjnqPz10M
bf9ePOMfN/WSlG0Ef3cgFtcNzTiovZRPKEBzJS+pFww
-> ssh-ed25519 Y121Gw 32ZtEmLRbyOcjtAp7Phdlmb18wHs92+kST1qh1giETw
oxfQmuhtrLplP6jeCXlawzF6wU+EPmHBUei8DIIgXgI
--- GXbCejJoEBQ71qdNg5Wbb8liJVscqX4fHBlfSdvpjkE
Z { ӝ`jg~H<oܞ]U÷^ayUamw1|>T_ң
/[~Ȉ=xkd=."ET<О~ah$ؽ0449'*$jhccҵ
================================================
FILE: build/secrets/eager-heisenberg-queue-runner-token.age
================================================
age-encryption.org/v1
-> ssh-ed25519 cKT5Kw d2hBbAiEI7iLoP1c7WgXkJXnqfsy3GWPy23NZcHrb3A
dIEVrctp2Ryu92cSBILUE+qeeLz0raQ1nTLGAPaZec4
-> ssh-ed25519 NJQh8Q nThSL+PZmkUrXssS5YXqS1x4InMJMJKBma7/UpZcb3E
WIVRniPt17W/GkOySUO/tFk0wlecxIMMZtcgV4caG0M
-> ssh-ed25519 Gr9EaQ MTnHof1JOu4d5vObVatnKyhi20Da0K0v5TSyxhk7gwI
YXIYyvGWR2cf6GJb7VL4aiu0gxKLyK1PyGhgw2vLJz8
-> ssh-ed25519 3ENwVg rIi+Y4H0U+wkaO4zmIEbDd2Bd7tQnesw4yW+klqqQBM
vd1c2lP+A5cyk2bfUoO09oPo49SnGzlXf95FrxuxRlA
-> ssh-rsa MuWD+w
moxeHv57SfIBrPVMvLiWZhh1qJHIii5maadnQZl8JUqjSDFpnPX4hXNIvwrqBau7
Xn2X3tncgQ2Vp33757YembRDSOU7X06QASaRitxFrbHJu4iRIYwcyWoHbYn6jhPc
9yK39sMNliHgZXDq2c0+DThV/PpvZd8yuVlP2oI5FqjlITjiFnTnJf+3c+uquc6v
mxEwWUnrA8dSJD7RzcshW7swHu3FeC+MValEuiIQJaDlMUa211DhTGgtpSebuFrg
Nlx+ZqS2k8LO2qAFyCemoMRMwod7VsCqtid6PxdEuwd8O0v7wfVafu0z+LCGMZoy
SxKlCaVvDQJSzkAcj7EHvA
-> ssh-ed25519 92bXiA bH6FYqVLVNbMBleHCALYbv7nykoIHcvaWlIvQnbyNRg
joPDIXaqdMccBWdXvsvV9/ZlOVbE6pmrOFQ+WgUno68
-> ssh-ed25519 Y121Gw kWm5O/sfXSAYRFsFWgKgWR3dUSKo2OFN5I0npz2x+TI
wfbOq5meojODlRi3RZ+uFNokSPYLZNndB9nhp31wMTo
--- /EhbVaVRVAyPOjTpmhTcRSh3kuyT/KoEkedwitZpTNk
T,hv `HAe
G_ j<O7{둍ˣ
4hH1n QQHu[YײN)UeC"7Q1^vj]z^l."u+9Q
================================================
FILE: build/secrets/fastly-exporter-env.age
================================================
age-encryption.org/v1
-> ssh-ed25519 s9hT2g RO6Blf+MB32dW1vWtwpsdutfPRDhXp6qMh+9K5mP/yI
aojG0tr0pQ172/Sgrcm4ltdGJH5uCdW6hpgvFE/gDFE
-> ssh-ed25519 Gr9EaQ ByRH47STTrDIIyt8d/EitsWGW2zHs3XWE44A3AJVZy4
fhT87Y7e7J41Cfrvldh152mVTz9dD4PuaxN3S6OkXfc
-> ssh-ed25519 3ENwVg Wk0Tt67znuSj137ODLVZ+jmYD+QZ06pnEia24XJau20
1n5AUDJ7G4BrD4jZ/bFtmehX5wqd5nmaIluzVd+bGeY
-> ssh-rsa MuWD+w
swfRBQIzsOuJe0NW1fjEPTNbCNdDCj/tvajEZQexxZV2koyXzCZMZu6WkUE7EWIQ
9dg3dN+SgIBDsBCimVwDLdlKCv07Y4EYVJcUKWQyGrCnyKD0fNL+H/b0NFvkln5d
xpWShnL/zTEa/Bz/1ftzTcDV4B6g75HyIrfXnc5yNQPsk7w4u+tvUIZFiPsUkwj9
2raYpVSZG07xPxDDujADlNLuVNhTCw2MxN/cUS4u7iN9cMilFwND0clRVjQl4APe
Wnzb5iZ73sMi4wg2Qf8+O//zxe9221krnpjhdkyR3k8Oxk4SPACSxuLKKXn5PVcD
Gi8C3sxSSTLzpwAqySR94g
-> ssh-ed25519 92bXiA TXBDrIkPKkagHD7cvWsD0BkE8p0pJYIK5LaCCxDvzF0
gpkhwY7kVYK23ALcahfAucaOP2Tf6UJ9QuFCxbWND3k
-> ssh-ed25519 Y121Gw IMc36vETqcH985olPop763Y/SIPl0GdRDecUFlmqU1A
pWOPIMjlWkKFMxZAhnBNu5nmTn0YA3/pss3vcr2uEvU
--- 0bm0YdyW2rphnkhcSz3jjdUe5eyELylNp4MhcSmAkdU
`NLS^ǩBbܲfEV[oI/JZ_^bYQ5CDўQg~4_aهdկl'$
================================================
FILE: build/secrets/goofy-hopcroft-queue-runner-token.age
================================================
age-encryption.org/v1
-> ssh-ed25519 cKT5Kw hA/K9EJyGfAbGbokosZGVEJqasHjE2bgr2EpEN4O/iQ
7GaeyhJHezMSytl+75UzkiLvbxMpWSKoYb7aEH/D1qU
-> ssh-ed25519 h7xPTg oBM3m/s0x5ue87LfgCOpyTfs0R0N4dmKwa7oW/R+nCQ
HTxdFwkGtkCficUjMSe1bE95fv5gwMEvIlaNPb+LJvM
-> ssh-ed25519 Gr9EaQ GdbCzg5bOJlVsTebVEE+y6StuiH1kZRG07D/bt1zuww
EZqucrVkaX6ZTGJT0aiHmp4o9Z3IUIk82Df1Z2YkU5s
-> ssh-ed25519 3ENwVg Ky1YIXGrt+UX5y745wePV1pulUHrr1yXzFRd+MHEITc
BmWr551rvrtWl2PxD/+qYodybA0xA6Z/1Noza0te+Vo
-> ssh-rsa MuWD+w
RjaIoseiPazdSz75+ly66RqY0IhyQPBtltWLgGEYzhTkmzpnQNcUVpwgiPSzbt5X
y7o+o+QPaHeds5suS42ZzUPahhLp1v5ehVaMXvsmqxkOZfODLxF3GGoFj4SG/YjJ
aDd+bagUql7HX0cZRp51LpnitzOxayd8qeUZg51mqFi8uWV1DBSYrFdcVHBNeGuQ
AbdUl9tqFtYilqcBJhCJOsKsiUsrX2bC6ZP8A6Pmt3gl8UR8nJLhD5TwQH6FCxDO
iKbY21BwiKH8CJhQTNix6uwmTOwlX9mp8N6UNmqWuXB/3F4NmpyubnUvG9t0QGVl
EsS5dlQ04JG/WrWDQpOR/w
-> ssh-ed25519 92bXiA 7EaMly7GPo9fPETY606UO9in6bhbkQhgRxsO2u5Bgws
IzeyNKnkYt8lwTk1TRxLooJJJmPFxIYZJAoDHm1Oqtg
-> ssh-ed25519 Y121Gw 3tlRc4oDBLx1/Dn/KwnyUzg/odwMGLaFDksNB5RTqCk
TJhtG/2/0PL7k84hQyAFEvLAFyZYP1W8erUpCANG7Mw
--- mKpJ626SlxFTL7kt2BJOna043kiReyoMA8hl604J2hc
H&=LG*t2IS_X(k(4NYtrJ^K0b&?#՜S=1jɰVQިFxQFHg4os ͇l~tg;!%O
================================================
FILE: build/secrets/grafana-secret-key.age
================================================
age-encryption.org/v1
-> ssh-ed25519 s9hT2g Q71aJ0AH5YJng/IVw8l5lch8zdGP3Z0QJUIQ+DqYF3w
KI+qnX5ShsgtdtC78UHGwiKjAgWNwahfSJ/nwblBovk
-> ssh-ed25519 Gr9EaQ idUvootpliMS7P2N80vhIirTOz7oJ0o3GscsMu5W4B8
kOiVYBBJhQmtqDBzX1rmGG10tM/oTrwuL0K55VMxh8I
-> ssh-ed25519 3ENwVg d5iQiKR/D0I83d7UznzTQGNRhviceQNGl9ecQyfGlw8
w4MthT7i0KjUSfV9Jh9LuhzAU+hPtr4UGO32UUn3l8M
-> ssh-rsa MuWD+w
mbVZ1olug6y9Hlf2k/NTx3DA9VTlMj/Q0jU1YSjHWvSCr2kiaeeLCm2TebKAsop5
CQc7MCTFJKz9bzitnvLGjl40OVrXKoJzvqJPG0AP6hFvsLVfs0zoX3dpHNDkRFsH
sHtqi/6DikujtsSLgZNYTnaRfMJHdRIkT1UB13TWqA962593NYK3bvuGHyih12SQ
aLxAZ2MzeXflt3V6tYsY66V4RNHCxf8hK2SfZr2sD83JiW3xtRss2UZUyp7geMQY
v8sxbBv8ONzp4FPL+w+/3pX4TO6NmO9rk7S0/xsyfIZAI7xkPKhfMcVgT9Qj/7f1
QwzCHIg9Y+Dt1lh3D0TnGA
-> ssh-ed25519 92bXiA 4oR9LMqITwM+xuuzobwJji2lP/gLjwRtJHUNEZLSEG4
wUYMpR71mFY7wskPXAk/buUZBhY2IQAVAPL61iM411g
-> ssh-ed25519 Y121Gw gbhbUYbMsatS1kaXvl3RkVHB+j+wt/9W+hxK43QiRAs
erkaoQzmzKUFZ5avT07KgS+MojylmrxuggwjOJspMy4
--- rFRS/RCB4dRgJfJBWktxivASq9KSonyLE3h/vfp1Zj0
=AC9#_[T+I>NL>*1>0xh{;mמS|"5
================================================
FILE: build/secrets/hopeful-rivest-queue-runner-token.age
================================================
age-encryption.org/v1
-> ssh-ed25519 cKT5Kw jz7oaOXlftKuXEIeFcFXacn0gcDuQhGkZRLmf0QTPXQ
Br67PR4rBrZaKbP/X8X4vFkPq8L5IiNicvfXBvuaVdw
-> ssh-ed25519 BaUP3w 8o3MNSWRhtrCgaqdQsBfmmg3LCAD9khNCXNlTAgegzE
c137Ep8omrJBRcnqbRMwVB87CyB66u07qj5Xjor8hSY
-> ssh-ed25519 Gr9EaQ tEa19teKlX3ZXJBOmBnOLU9GwnkDlfSdUzxaAMsY+3Y
gWS3dYhg6psO0WNCD+s0kjqzapOnU4hQgWrcKh0iDbk
-> ssh-ed25519 3ENwVg LiSqdv8ukjIjACQwk6203kkNotG+oRgGTkqsITRNjiU
jOnUs9E5Tcu9eEnR8WXW277LZ+tRNyqM4b3Hg8EGu/8
-> ssh-rsa MuWD+w
enx5oiARoCPhm1D/MIdgIh2kjZFx4rxszCmW0j7RaS0SXDPu79c1QENwgemQdvLY
uwX6teB+LkkWdcA6AFqY2FclopBRZq15OQuMoztBjwGPUIlk8H8OHrusViDJuGNm
zdWsL4htncmTUWaX31V1ZX/v+KFl2Zp5Mmpn8x4C21wm5d42SOd5VRnw/OlziJGX
gUG2DqLpoKzXDG9SAsKfk417Akfb8RtlVza6/tb57hThi9EsORK+BnTsUt6r6H84
NvTuqnOJJFOEWqeRz1UjLij/gI10LQvcxCzhXC/SqkG7FaMXQ92WAZ5hH7AePSEE
I/OlAU2wPj+GmPFePPODSA
-> ssh-ed25519 92bXiA nYLjnIjeF+TmJbVdCtdqK042xnYDpF4naM1u7up31SI
yVhUbve1xiySx+dqRcWdJQOYB2TRGdALa0l4hu1UnbM
-> ssh-ed25519 Y121Gw kxYp6X5VV1QRwo1HrTUCbdBHgKMjkI2AUnUnqGe3dCE
Rl2LfKLy9BQi47ktXCm+T7G6sbkBsuYaoxt5oTH2uPI
--- X3Fr2TVxWyEW1hm8h7eKwGJHJg3BjywJddTp5OLolF4
0vOsm̈IK'}"S*߲|OE$xWn;3tp%\XG4lBYymǮ >k+ݲRom)`
================================================
FILE: build/secrets/hydra-github-client-secret.age
================================================
age-encryption.org/v1
-> ssh-ed25519 cKT5Kw krCNPgqeLrULZyGtFdc2VwmEVaKC7uaDabi7tv3dHVw
OOEZQ4o4xqFs42TEYwNNWkOQbSvVkq8nGA38CIpgx+k
-> ssh-ed25519 Gr9EaQ /ciOg7Beq8wMwMlVlj+8qUfFkALaGuz4jV2DtG2HLB8
MU0x/eqLEtUlygWfiBu41bZcPWRWXH40DeLkfTxmgMo
-> ssh-ed25519 3ENwVg HxpXlptq9Zp6AIRo0+poqbuFTHPRi/f/VGbL8ZO5fm8
bt6tn4OrjXV+U6eDKuFEU8/dW5MkqOYqVdqkqVfCrG8
-> ssh-rsa MuWD+w
qyi9QPAHw/dr845IdEOnyw6yu2M0b7nbX3ZCnClemJlmfFx1077RE0CWNEDR7LDt
0g8241mMIr85MYHDZuVPqH1W7ZTv/DFa39MJBhVCyC0Gl62Gz2ayO9d4flrQsvCv
NnaVKJPo0uxuvLTUlcX19WWVrt6v23sDMlChleUFdRJy84lMR8ouhtfZV1ipTqXq
4wZCsXgi1vV0F9oZ37KjV0irGECHNN9ehrrS943357+bJIlZMdVbsYLOXXiI8drr
mGzOwUFLvD5VRHTWgEZJz15oeanknTjpxrIt1AAJki+esPsKFRkEJ7eL6epXMclb
5iHW/MpgBXH0j8ARyg6/jw
-> ssh-ed25519 92bXiA qLAjwconq/2yxJnG91YE9UvpLe69rniXVAwHQYJS52E
X/W4+1RGYG6qCYGPiUl+yUmwwiNwt+zmhYHQ40d6C4k
-> ssh-ed25519 Y121Gw J21DUBHP2EpQPpOdUqNZ+deh/3DLjyYgT310v+EZAW8
a8b8zJgf7DUW03hzGeW8dzvRq+Vl2RbmaG17muHoyDA
--- hAdUvRfRfdfakQXgM/QMbdpTBj+3vX0d0atqQVS6m4c
xG+BKE 5msMD{3,X9-gCksr:en
================================================
FILE: build/secrets/hydra-mirror-aws-credentials.age
================================================
age-encryption.org/v1
-> ssh-ed25519 s9hT2g 3oyWmMcrRcr1Evv9+Srx3z3OyKajSPpJiC3APOYE0RU
RCC/gmOyy0JRkWIRhzK37xckWnpQYQ74HVAKsRJdL+Y
-> ssh-ed25519 Gr9EaQ SW4eNlIrULIh+T/IywhzHe8A6wCxoHBSrg9LmC2yOWM
DbTv2Es+wHfOU6ylHfGi33BnZW9IhtmqawLBax1JPqE
-> ssh-ed25519 3ENwVg SKaButhSVmBUl8IA+yJk/z+An+/JV9oUQ/lAGEI/VXQ
6df01m0908K4WtxWoQZTwaETdm0liOz7U+hj4774rBQ
-> ssh-rsa MuWD+w
Pc51cz+ZOpJ+bakeYitE0Es/gFPjBGMhnACiT7O7shcT7vYSJPNRM8IpTpOxfbf3
HjzPBNjUihVjGshQ1JFaXbwfmnvF0yIImSlJtWDteyGX2x1yzt+/oA3zjj1KDfku
qdrhUSRnnobMrSuSaPE4DSnUddXbaMAY/kzzoxzU+nK9FusvJhCgmZ3XYhN+ew79
aQs+7YXEgTH5J72monWgeYQkj4baTY32xFwqj9qPdx5JjMvtR4cX9xkC7R14EyBd
HJeCU87uiR3Ibc27COMso1YSp2u/quc7TKmjOHyYfyi7mYZU/JC2ccDsEr/HCE4m
x00f74TPjV2UY/raslCgYQ
-> ssh-ed25519 92bXiA 4PM+2XEb8unFUvJXgNqErFmUOToBgF/x5DvCCxWazGM
xn8PfNfujIkDXtbaH0RVtyzOCPCbDig8hnUOgqfsNGI
-> ssh-ed25519 Y121Gw faO3WbLjVR26NrVIJfGO5eSrT5DI6fdTYyxPWxD+DDI
e+WqhJj8EhpXU8nxfB4dDeZZqxvmR/xNfKXj4oT5U7s
--- CJHN+xb4JfmgPyfZ5QoCGQTo2m6jqIqF4EW88S55Ymg
j]}H܈vix:s{3rC#qeaW\ȘFV0:hs+t$ÈsPr\Ы]6Q2Nka؎M
,xN1/;O`2losѹg&`&
================================================
FILE: build/secrets/hydra-mirror-git-credentials.age
================================================
age-encryption.org/v1
-> ssh-ed25519 s9hT2g To6KM19p0hgH9n8iTV5uO0DU0lK94NWPiDV9UkUwwFc
Zc1aT0dmu/6zIYmBgpQjENZpmb5Ob4E8pZRO5zfXSvs
-> ssh-ed25519 Gr9EaQ y2ta9yM3VvELEsvJgza8a/czoSb+kW/OX0QnxCr0PCQ
TNgRqt/szVwTGF+vtCUYq2O9DhN0IhRFaqWAvuvDBRk
-> ssh-ed25519 3ENwVg TtMilL9woCv5knN7L0ruW5KWZb+8M0OE9Q4wBKBwhW8
BUQ5wxtj/GF3WzuP5W5sajrXUnyeenrAJa7uV2usjck
-> ssh-rsa MuWD+w
o1dNC3qH2lvVqLOoEBgRJKcAqyqBYwvFsRAskmembVl9ho1+pEk+iTKaUYXOdA0f
ond7059ehqw7aiJofw0PCtch3IRZnOTMW4MW/aDHrW0iFJKmjsS6ZQ1nrp37awtW
Yb5HTjstJnKR01KgeHGaZVpTN2GCpiLWYAWf5Fg2HGmhhR5dxz0xI4TmnW7PtXiD
hB0Y2m6TUzcTA/Sx0sdEefyBygsCnFXSf7y2/8L611ImGqW09XKAdYbkdvT95d+y
X2fxeiNbJcZxKFH1wlq82WJ03o9UILalZrECYewIUzFqZ55DAjYgJ9F6bPpHeM51
Fa2JZHeeQY7RJ5MghTfQkg
-> ssh-ed25519 92bXiA JRqguU0+6uD8V3LsQ8DzcTJPjlA2mJv5afERNX9delc
8iVz80N/aWNpAhfXvM5UTqqVuPsp00Tai/+Vr9Pyx80
-> ssh-ed25519 Y121Gw IBQ8+sLYJDXFkhFTl8XCT97jAKAt0c4urBWw4z52emg
gT3Ur4zB7J0NJKHpJg5ws3WmCJbfnIrgEd2X4aldUSo
--- NoDwDLL8cK2qb+gi5warllNIzCIu2Linyd+WEMoSx+4
%ezoh[O(8
/[ݑQxX.w]TO9j]eS,-xԭ`="|Kɰw
`2CKA5S
================================================
FILE: build/secrets/kind-lumiere-queue-runner-token.age
================================================
age-encryption.org/v1
-> ssh-ed25519 cKT5Kw 8g2rqFnJ23pFpD4PniCDMPiueSroGH2yShkpHtPvZDc
ZyYcqRHGP4H4ElRs3rNAOzJ7In3MnVT8/2NcLHga8Ho
-> ssh-ed25519 jPdm4A k+8PUnPBFILqbb0Ikf2DMJEYVsLPwDtjYgQ6dVyNenc
e1mhAEQhzVsnznBJRsMEp3gYOO00Gmf4BCvHsXpFELU
-> ssh-ed25519 Gr9EaQ P0yT0M8e8ihKqossmqnIJc6074NXZ8KJmVL03BN7eV0
GHWdPlIDCMFf7Pca4GXfRnhZ2NJAmM0doPsMThY+iVQ
-> ssh-ed25519 3ENwVg UzvZZ0rFG3KaPQ6G6Oq4U/EQ3RRmPxyo6xF0tgadDDs
vPUm8mpqVeiBGpxGUTnYACn7tOQDcuFP3E2gWLToyXY
-> ssh-rsa MuWD+w
qSOhRpEjjuMyt+nRRC8Yd1fInXTReZqLCp6GZoRnYbO69a1AIQwU1HU5CtAHbVFe
8dIerlh4deN/T6wW3EvxM5hAA5co7kV68t3fgHGyQBdVGJvPuQRWaduSv21O/wbv
epmGODM9YwFfnPMDHXqTzt+NYEJIJoUVpH1YTTfeZDyoRza2gJ5hoSPFXtomVHL4
lO1+wcldYuELgY8bCeZpFP0kPmK7STYTa7LZxEF/yjqM2ZXhS6qOTV2+yRZhSKEy
RizOnW0ePWrCSIVvxIr4+sGlKW5cwAqeatxiPZz7/3RFSxHBG9RC/ZZEmaZUF9Er
cjILgCnk3lZJDnmpU6/+JA
-> ssh-ed25519 92bXiA 4jz8lFxCSjJBJKWZTtxYruYiuQuJytQ8utDYZccQwFY
zdLlneAU2P7zjDCC6tWVjySgJctB4Y5VXwEkvzqjhoU
-> ssh-ed25519 Y121Gw Bhy7yX2r7RWBeS/K0bMVwXbvzYVAW88pzOHVtTKKIVQ
Q9wuHdoI4SRXmjSA7iUUljjcO6dzPublR79rvPSlTlg
--- 2DnKmT2R9XL5DR6z7+amRi5Y/8GphgkifpngTogcU/A
,nݔ"%bKDʃWv6#K4(Jf*|N(:gY}ZuR1.EtPkM
;k詜s滁c~|ȶ
================================================
FILE: build/secrets/norwegian-blue-queue-runner-token.age
================================================
age-encryption.org/v1
-> ssh-ed25519 cKT5Kw aMrFTLVt8LAofBa0xq3o4EjsxQjRAPtHm13zmSM+6VA
atGjWVSAl8O9I44eY3BO+QeQ6EDuAEsEBto5matic8s
-> ssh-ed25519 SZ+mDA VmAYOI/l96zfrGL7UwFB1qVJGTGVGjqmjP4z2+0rIjI
RKvi/BBAgHkq9Xvqr/sjCBaTFUg4nOTLpQOGejO2ZAU
-> ssh-ed25519 Gr9EaQ +zszHiPND5T8ORnDZ/tLsOH5F/dtf5/sFMxi/fB4xj4
kaiFkF95SpTTR8eIpuxnktNMBrIokcExYn4Um7AtG9s
-> ssh-ed25519 3ENwVg 9CMlmCc3jammJrza2M22LNzzeASMk+nqH9muX9xkMQc
0J5BDTOKo3HRWNdhVQdv2gzZNrPoqm4bX3zEm05Cwkc
-> ssh-rsa MuWD+w
bhIUyi/2y8zeWwYKJsBwqsG5JnPZ12aY9IuLkflKSLpJchAChTujKELFiCuzlGN4
fvlbqa7mXadzs3pkjnYSz9MjGg/DyFjRsKXfc+jRD+QztNfodFQwJaKn8+9wG0v1
+TmoQ0K5ecSkmzPvS/Ze3itLG2QfZQEIutND7I461ZJK24f4ORt3tANA4F0+INx9
tVnBMimjp3fb7TI6i7cvUmOytNaoOiipnd0j4caPPkqa9fJ3m9aeeZ58uSkmzo7i
XejZWHsE+LACMLk/hXS/h7JZQzPQGGqviATOp6a9s59Oq6eqT8V2CgjEwHIAITVp
vWpuvsxCsxJQkZg4PjLJvA
-> ssh-ed25519 92bXiA XKApXxi8qr82rvTIPYPbZ/ZFj3sadY7eAbN0DKxvLhM
Sw8RRumPRL3AaNcTAd7qJnDqxea9h7wMOEubpfU1wx4
-> ssh-ed25519 Y121Gw bbz5648ZBs2l9WmnP6spocyLNGLx7EvRizuLCH7P4RU
U0/KXGwVE7aUHeWcE+OANVTTfvQ8jVRqfKOzWCww5m0
--- Cd10Opop86bxKKJPSCO9yYSY6oAghX0dllm+efexPCA
r)P-id|wMRs"GXJ=VV9;uQmQMeyTLJ`5&jhWxgXw(F
-;&j¬8=B
================================================
FILE: build/secrets/owncast-admin-password.age
================================================
age-encryption.org/v1
-> ssh-ed25519 s9hT2g M/D2oe8ocLzBBe0VTEO6UZ0gZb+dL13/rfZ38N1KH1I
1KmR71+57D0aBRlU7ZvPz6Prg3mNrYc7myq7JRdQQH8
-> ssh-ed25519 Gr9EaQ iOVXjyLAa/RSGBefsQismPkx53f9OGU1qMzO2rrqhhQ
8I6aGwAs7AFC/GWW7S+lv7vGyJW8T7Icv1bfHBtNdmE
-> ssh-ed25519 3ENwVg 5rP46xlqZkRF7u37BxB5PG5utkRHmfpYFxYiCA++xBY
K0/s0hGBIr88ZHocBrHrEuEUEefAnqH4Fe8dMlCcOHM
-> ssh-rsa MuWD+w
BeO4rjxRzb54rbpEglPIkhluPp2wRBKxL97Ta4utvUnG44IXRnWt6tuj016qVTXZ
Z8OzrDVTwusXJZxmOehsgF/rogFAj1Ju+bf9s4fojv1nC8ITnsXLMQjzA0X/VcTA
DgVWw8+Elrt7sJGiL3C9ws9ATt/suPSdkL+aNhOvJXRwb9NfQUn+XowvJRg1VnzS
AQx9tTyGVB5GcI4LnxHnyqPj+6ZD/F9XqbHijTMrx60GqRlqeEu9JiUa0YtWnBgX
FcIrvoRQ6b7G5QDivbqCQ4VuJDrSd7xqKddQVea1KglrQHQdY3KFNUHVlEs1n49z
Nia8ty+qWIwAEfwyt6c0Tw
-> ssh-ed25519 92bXiA F7v+xOHVTL3wZ5KUHW+nAyrl93/awx5TXv4izicA0BM
OD2ivZ1FQ696Wh+odAA4xiJElXEhqsgBok7AJ3ny10Y
-> ssh-ed25519 Y121Gw YUG5YErjueT2gqqX1x34b6U35uhbdKZWgcTALMXTRXY
0F9uoegrWXP3lzjRk3eJCtu/OGZO/QqafpVPYitUM2k
--- M3nGs3hV2JaDDtPyuNeKpyh/OdpZAk/q39OTk8n9m7g
J!{-FF/"TK#ׅGGN|ZU DQ1d1Ԟş
================================================
FILE: build/secrets/pluto-backup-secret.age
================================================
age-encryption.org/v1
-> ssh-ed25519 s9hT2g reYMr+USW2vh77665Ga/KtPbeu5OrdgrKgI4sYo8plo
4eBoVfWTjRe4w6Vdl6OAXKJr7kaSJqVVm9se0rL7IEA
-> ssh-ed25519 Gr9EaQ lNX/PDcE3MXI0q/o7tnA9AlloF6uncD51FYTqdZP3j4
otONyo6e5INW12x1Al5WqnTwfihRGL6dxdrH1/HYbe8
-> ssh-ed25519 3ENwVg 2ZHD8vTCA+FPMRO1kSvUo937f9thS8IeTApGltFhjkQ
bEN1eLyrqMtY0KuZ3IkRdIJzvX0t4bb73XzlDcuAgII
-> ssh-rsa MuWD+w
FPAZH3iUoF7It9uGw1DHksmbsYZcRqvZqGcjbnJLP/JiHmriUSyELQl7bH4n1+6H
GWhqBiqNKPWJoCq0y3vXaCzN9iFXwGCVaAyNZk3+ox/Q0dBietO0ux4MzajAWl8b
mr/UR3Mk2ybGkIBIfh1Wko8cdA+tWyCsl0CdSyqI2JY523xf/pOwcE0YLQ2kGhQc
ifu+AmIKqXbZiqhS0yj3+BM9rgJ5gVxZMKAp/CjpIBpEu/fmK64mRryAVsL0EEBF
O2CwBsqyFyJvcW3yTBdHxfKhorZrMrGO18d7CGFHGswU/AXi/UxyzrkfpjVgFUfm
b2qeI10f8PZAibqHYcQJBQ
-> ssh-ed25519 92bXiA sXYrwOcZlNpPoGELwRTsjfSNldPr6CVtv9VcYK1flGY
aMhNq6L5M70bUFR/o+7M/KcQyv9/BfVkxgzvU/fD5gk
-> ssh-ed25519 Y121Gw sGVkfMeghciO9g840KPsVsohEkEgC1Rb8mnQI0QZe2Y
uDzza0+uGQRMzTiUkYz9n6Jyt18i7TTHWBrX0p8vHAQ
--- rXFfiiTQ+BEa3Hvs0BTWxI+b1wPBwyTgWeq24QeqXVw
)caEuDW[c(8-_1nJ{6+KߤmUf5M~CiB2Yshg#z?
================================================
FILE: build/secrets/sleepy-brown-queue-runner-token.age
================================================
age-encryption.org/v1
-> ssh-ed25519 cKT5Kw r8aZ+OCr9AE4h0zattrGpFPwBcnb28/Mj7vNC5EEHDE
SaN75cMS6o0bcuIzeKF8siNu0P7rvJN4DLnL0R07t3M
-> ssh-ed25519 le38mA 0syXJIHthuMy1Y6LbrfQX1QcADyJMOfmFbwzf3cQlHM
X9HHBlfYBG64Awu+TZaA463Om18A7kSu7pMYwIDkehk
-> ssh-ed25519 Gr9EaQ Wqex4/CIJTL+sm5GAlb0Du8mIjDz3QmvO7veYAQ+nmo
o//67CmR5wPgSzLuF4exx4mW+FstyQunBqeDgs9HUk8
-> ssh-ed25519 3ENwVg 5XF6k6rMk59p53Hw6nSak8iajZ7XzLJ5jOQ7aPwkdng
+YUOjq/VopumkLhVshF4GdzkjqO1aNMrfkx3TZaPtaA
-> ssh-rsa MuWD+w
gsSEjSCIFzKTsOXvJay3Ij9OpefMoAGL7AjXW1mQ4TvCVWO5M7gqYLrlgANKwMGK
sm9tpNtncFn7hC7G3YWBOU/InMIQ/qlgL5jhRBhZpou/DKMtDA+IDVZJYvSQMcT1
9467zxSpFtnjrmzW/6cnX3jjLlTRCc4AupoS1pMIeJ2gwZBNiCklS+QGPQTQiG/O
oF1nA0h/08pCbrLHIwilhFmekDzg99EesiZ3Hbqc7+kz8kbaIV9iUqFsRvV1Dwzm
K6wIQXf5nhcCkt/SAFSS/ZwwHOr19B0OR3t6L4dYMa+bl/LxW0yXYzvMo4rp07Mn
oXFd+BuBEwzHI1x8wrTmUQ
-> ssh-ed25519 92bXiA +t2D5pUYWeTRPTT7vrNYZirRUWKQO0gw5RB3o+CV0yk
b5DsQ3FUMO14U7NB7H4G9ngpw5gfPTrYXIKa7yy5Wq4
-> ssh-ed25519 Y121Gw X0D49VhFJ2kZqJATUmuKhJfQ6TIAZCkWDl2u6dqnQSk
O0JtjZWXrS/NY/FXYB14kM3MpuoAaTd2Bf1oWw7REc4
--- a+IPhlc1ru44iR5eHXGVe0X2fqgcSj03Lk1lyB3sZZg
*]HHX![(+F;8iOU&J'67=Ia1S6
.ep!4duL*DWG<bG~`ޣڭ˃D]cm|#\Ym;
================================================
FILE: build/secrets.nix
================================================
let
keys = import ../ssh-keys.nix;
secrets = with keys; {
alertmanager-matrix-forwarder = [ machines.pluto ];
alertmanager-oauth2-proxy-env = [ machines.pluto ];
fastly-exporter-env = [ machines.pluto ];
grafana-secret-key = [ machines.pluto ];
hydra-aws-credentials = [ machines.mimas ];
hydra-github-client-secret = [ machines.mimas ];
hydra-mirror-aws-credentials = [ machines.pluto ];
hydra-mirror-git-credentials = [ machines.pluto ];
owncast-admin-password = [ machines.pluto ];
pluto-backup-secret = [ machines.pluto ];
pluto-backup-ssh-key = [ machines.pluto ];
rfc39-credentials = [ machines.pluto ];
rfc39-github = [ machines.pluto ];
rfc39-record-push = [ machines.pluto ];
storagebox-exporter-token = [ machines.pluto ];
tarball-mirror-aws-credentials = [ machines.pluto ];
zrepl-ssh-key = [ machines.titan ];
# builders/
elated-minsky-queue-runner-token = with machines; [
mimas
elated-minsky
];
goofy-hopcroft-queue-runner-token = with machines; [
mimas
goofy-hopcroft
];
hopeful-rivest-queue-runner-token = with machines; [
mimas
hopeful-rivest
];
sleepy-brown-queue-runner-token = with machines; [
mimas
sleepy-brown
];
# macs/
eager-heisenberg-queue-runner-token = with machines; [
mimas
eager-heisenberg
];
enormous-catfish-queue-runner-token = with machines; [
mimas
enormous-catfish
];
growing-jennet-queue-runner-token = with machines; [
mimas
growing-jennet
];
intense-heron-queue-runner-token = with machines; [
mimas
intense-heron
];
kind-lumiere-queue-runner-token = with machines; [
mimas
kind-lumiere
];
maximum-snail-queue-runner-token = with machines; [
mimas
maximum-snail
];
norwegian-blue-queue-runner-token = with machines; [
mimas
norwegian-blue
];
sweeping-filly-queue-runner-token = with machines; [
mimas
sweeping-filly
];
};
in
builtins.listToAttrs (
map (secretName: {
name = "secrets/${secretName}.age";
value.publicKeys = secrets."${secretName}" ++ keys.infra-core;
}) (builtins.attrNames secrets)
)
================================================
FILE: build/titan/boot.nix
================================================
{
boot = {
initrd.availableKernelModules = [
"ahci"
"xhci_pci"
"nvme"
"usbhid"
];
kernelModules = [ "kvm-amd" ];
supportedFilesystems.zfs = true;
loader = {
efi.canTouchEfiVariables = false;
grub = {
enable = true;
efiSupport = true;
efiInstallAsRemovable = true;
mirroredBoots = [
{
devices = [ "nodev" ];
path = "/efi/a";
}
{
devices = [ "nodev" ];
path = "/efi/b";
}
];
};
};
};
}
================================================
FILE: build/titan/default.nix
================================================
{
imports = [
../common.nix
./boot.nix
./network.nix
./postgresql.nix
./zrepl.nix
];
disko.devices = import ./disko.nix;
networking = {
hostId = "e1ce6466";
hostName = "titan";
domain = "nixos.org";
};
services.zfs.autoScrub.enable = true;
system.stateVersion = "25.11";
}
================================================
FILE: build/titan/disko.nix
================================================
let
layout = id: {
type = "gpt";
partitions = {
esp = {
type = "EF00";
size = "1G";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/efi/${id}";
};
};
zfs = {
size = "100%";
content = {
type = "zfs";
pool = "zroot";
};
};
};
};
in
{
disk = {
nvme0n1 = {
type = "disk";
device = "/dev/disk/by-id/nvme-MTFDKCC1T9TGP-1BK1DABYY_0925109FB623";
content = layout "a";
};
nvme1n1 = {
type = "disk";
device = "/dev/disk/by-id/nvme-MTFDKCC1T9TGP-1BK1DABYY_0925109FB922";
content = layout "b";
};
};
zpool.zroot = {
type = "zpool";
mode = "mirror";
options.ashift = "12";
rootFsOptions = {
acltype = "posixacl";
atime = "off";
compression = "zstd-3";
mountpoint = "none";
xattr = "sa";
};
datasets = {
"root" = {
type = "zfs_fs";
mountpoint = "/";
};
"nix" = {
type = "zfs_fs";
mountpoint = "/nix";
};
"pg" = {
type = "zfs_fs";
mountpoint = "/var/lib/postgresql";
options = {
logbias = "latency";
recordsize = "16K";
redundant_metadata = "most";
};
};
"reserved" = {
type = "zfs_fs";
options = {
canmount = "off";
refreservation = "16G"; # roughly one system closure
};
};
};
};
}
================================================
FILE: build/titan/network.nix
================================================
{
systemd.network = {
enable = true;
netdevs = {
"20-vlan4000" = {
netdevConfig = {
Kind = "vlan";
Name = "vlan4000";
};
vlanConfig.Id = 4000;
};
};
networks = {
"30-enp35s0" = {
matchConfig = {
MACAddress = "9c:6b:00:1f:aa:fd";
Type = "ether";
};
address = [
"159.69.62.224/26"
"2a01:4f8:231:e53::1/64"
];
routes = [
{ Gateway = "159.69.62.193"; }
{ Gateway = "fe80::1"; }
];
vlan = [
"vlan4000"
];
networkConfig.Description = "WAN";
linkConfig.RequiredForOnline = true;
};
"30-vlan4000" = {
matchConfig.Name = "vlan4000";
networkConfig = {
DHCP = false;
IPv6AcceptRA = false;
};
linkConfig = {
MTUBytes = "1400";
RequiredForOnline = "routable";
};
address = [
"10.0.40.3/31"
];
};
};
};
}
================================================
FILE: build/titan/postgresql.nix
================================================
{
config,
lib,
pkgs,
...
}:
{
services.prometheus.exporters.postgres = {
enable = true;
dataSourceName = "user=root database=hydra host=/run/postgresql sslmode=disable";
openFirewall = true;
firewallRules = ''
ip6 saddr $prometheus_inet6 tcp dport ${toString config.services.prometheus.exporters.postgres.port} accept
ip saddr $prometheus_inet4 tcp dport ${toString config.services.prometheus.exporters.postgres.port} accept
'';
};
networking.firewall.interfaces."vlan4000".allowedTCPPorts = [ 5432 ];
systemd.services.postgresql = {
wants = [ "network-online.target" ];
after = [ "network-online.target" ];
};
services.postgresql = {
enable = true;
enableJIT = true;
package = pkgs.postgresql_18;
# https://pgtune.leopard.in.ua/#/
settings = {
listen_addresses = lib.mkForce "10.0.40.3";
# https://vadosware.io/post/everything-ive-seen-on-optimizing-postgres-on-zfs-on-linux/#zfs-related-tunables-on-the-postgres-side
full_page_writes = "off";
wal_init_zero = "off";
wal_recycle = "off";
checkpoint_completion_target = "0.9";
default_statistics_target = 100;
log_duration = "off";
log_statement = "none";
# pgbadger-compatible logging
log_transaction_sample_rate = 0.01;
log_min_duration_statement = 5000;
log_checkpoints = "on";
log_connections = "on";
log_disconnections = "on";
log_lock_waits = "on";
log_temp_files = 0;
log_autovacuum_min_duration = 0;
log_line_prefix = "user=%u,db=%d,app=%a,client=%h ";
max_connections = 500;
work_mem = "20MB";
maintenance_work_mem = "2GB";
# 25% of memory
shared_buffers = "32GB";
# Checkpoint every 1GB. (default)
# increased after seeing many warnings about frequent checkpoints
min_wal_size = "1GB";
max_wal_size = "4GB";
wal_buffers = "16MB";
max_worker_processes = 32;
max_parallel_workers_per_gather = 4;
max_parallel_workers = 32;
# NVMe related performance tuning
effective_io_concurrency = 200;
random_page_cost = "1.1";
# We can risk losing some transactions.
synchronous_commit = "off";
effective_cache_size = "64GB";
# try to allocate huge pages, if possible
huge_pages = "try";
# Enable JIT compilation if possible.
jit = "on";
# autovacuum and autoanalyze much more frequently:
# at these values vacuum should run approximately
# every 2 mass rebuilds, or a couple times a day
# on the builds table. Some of those queries really
# benefit from frequent vacuums, so this should
# help. In particular, I'm thinking the jobsets
# pages.
autovacuum_vacuum_scale_factor = 0.02;
autovacuum_analyze_scale_factor = 0.01;
shared_preload_libraries = "pg_stat_statements";
compute_query_id = "on";
};
# FIXME: don't use 'trust'.
authentication = ''
host hydra all 10.0.40.2/32 trust
local all root peer map=prometheus
'';
identMap = ''
prometheus root root
prometheus postgres-exporter root
'';
};
}
================================================
FILE: build/titan/zrepl.nix
================================================
{
config,
lib,
...
}:
let
metricsPort = 9811;
in
{
age.secrets."zrepl-ssh-key" = {
file = ../secrets/zrepl-ssh-key.age;
mode = "0400";
};
programs.ssh = {
knownHosts = {
rsync-net = {
hostNames = [
"zh4461b.rsync.net"
"2001:1620:2019::336"
];
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILtF46LwRn+hC9vuw0vedXBKGNPMSIqrXdxl+EQOI/8J";
};
};
};
services.zrepl =
let
defaultBackupJob = {
type = "push";
filesystems."zroot/pg<" = true;
snapshotting = {
type = "periodic";
interval = "30m";
prefix = "zrepl_snap_";
hooks = [
{
# https://zrepl.github.io/configuration/snapshotting.html#postgres-checkpoint-hook
type = "postgres-checkpoint";
dsn = "host=/run/postgresql dbname=hydra user=root sslmode=disable";
filesystems."zroot/pg" = true;
}
];
};
# The current pruning setup is an exponentially growing scheme, at both sides.
pruning = {
keep_sender = [
{ type = "not_replicated"; }
{
type = "grid";
regex = "^zrepl_snap_.*";
grid = lib.concatStringsSep " | " [
"1x1h(keep=all)"
"1x1h"
"1x2h"
"1x4h"
# "grid" acts weird if an interval isn't a whole-number multiple
# of the previous one, so we jump from 8h to 24h
"2x8h"
"1x1d"
"1x2d"
"1x4d"
"1x8d"
# At this point we keep ~10 snapshots spanning 8--16 days (depends on moment),
# with exponentially increasing spacing (almost).
];
}
];
keep_receiver = [
{
type = "grid";
regex = "^zrepl_snap_.*";
grid = lib.concatStringsSep " | " [
"2x1h(keep=all)"
"2x1h"
"2x2h"
"2x4h"
"4x8h"
# At this point the grid spans 2 days by ~13 snapshots.
# (See note above about 8h -> 24h.)
"2x1d"
"2x2d"
"2x4d"
"2x8d"
"2x16d"
"2x32d"
"2x64d"
"2x128d"
# At this point we keep ~29 snapshots spanning 384--512 days (depends on moment),
# with exponentially increasing spacing (almost).
];
}
];
};
};
in
{
enable = true;
settings = {
global = {
logging = [
{
type = "syslog";
level = "info";
format = "human";
}
];
# https://zrepl.github.io/configuration/monitoring.html
monitoring = [
{
type = "prometheus";
listen = ":${toString metricsPort}";
}
];
};
jobs = [
# Covers 20240629+
(
defaultBackupJob
// {
name = "rsyncnet";
connect = {
identity_file = config.age.secrets."zrepl-ssh-key".path;
type = "ssh+stdinserver";
host = "zh4461b.rsync.net";
user = "root";
port = 22;
};
}
)
/*
rsync.net provides a VM with FreeBSD
- almost nothing is preserved on upgrades except this "data1" zpool
$ scp ./zrepl.yml root@zh4461b.rsync.net:/usr/local/etc/zrepl/zrepl.yml
# pkg install zrepl
# service zrepl enable
# service zrepl start
*/
];
};
};
networking.firewall.extraInputRules = ''
ip6 saddr $prometheus_inet6 tcp dport ${toString metricsPort} accept
ip saddr $prometheus_inet4 tcp dport ${toString metricsPort} accept
'';
}
================================================
FILE: build/titan/zrepl.yml
================================================
# root@zh4461b.rsync.net:/usr/local/etc/zrepl/zrepl.yml
# zrepl main configuration file.
# For documentation, refer to https://zrepl.github.io/
#
global:
logging:
- type: "stdout"
level: "error"
format: "human"
- type: "syslog"
level: "info"
format: "logfmt"
# mostly from https://blog.lenny.ninja/zrepl-on-rsync-net.html
jobs:
- name: sink
type: sink
serve:
type: stdinserver
client_identities: [titan]
recv:
placeholder:
encryption: off
root_fs: "data1"
================================================
FILE: builders/boot/efi-grub.nix
================================================
{
boot.loader = {
efi.canTouchEfiVariables = false;
grub = {
enable = true;
configurationLimit = 5;
efiSupport = true;
efiInstallAsRemovable = true;
mirroredBoots = [
{
devices = [ "nodev" ];
path = "/efi/a";
}
{
devices = [ "nodev" ];
path = "/efi/b";
}
];
};
};
}
================================================
FILE: builders/common/hardening.nix
================================================
{
# no priviledge escalation through sudo or polkit
security.sudo.execWheelOnly = true;
security.polkit.enable = false;
# no password authentication
services.openssh.settings = {
KbdInteractiveAuthentication = false;
PasswordAuthentication = false;
};
}
================================================
FILE: builders/common/hydra-queue-builder.nix
================================================
{
config,
inputs,
lib,
...
}:
{
imports = [
inputs.hydra-staging.nixosModules.builder
];
config = lib.mkIf false {
age.secrets."queue-runner-token" = {
file = ../../build/secrets/${config.networking.hostName}-queue-runner-token.age;
owner = "hydra-queue-builder";
};
services.hydra-queue-builder-dev = {
enable = true;
queueRunnerAddr = "https://queue-runner.hydra.nixos.org";
authorizationFile = config.age.secrets."queue-runner-token".path;
};
};
}
================================================
FILE: builders/common/network.nix
================================================
{
networking = {
domain = "builders.nixos.org";
firewall = {
# too spammy, rotates dmesg too quickly
logRefusedConnections = false;
};
# we use networkd instead
useDHCP = false;
};
}
================================================
FILE: builders/common/nix.nix
================================================
{
config,
lib,
pkgs,
...
}:
{
nix = {
package = pkgs.nix;
nrBuildUsers = config.nix.settings.max-jobs + 32;
gc =
let
maxFreed = 500; # GB
in
{
automatic = true;
dates = "hourly";
options = "--max-freed \"$((${toString maxFreed} * 1024**3 - 1024 * $(df --output=avail /nix/store | tail -n 1)))\"";
};
settings = {
accept-flake-config = false;
builders-use-substitutes = true;
extra-experimental-features = [
"nix-command"
"no-url-literals"
"flakes"
];
system-features = [
"kvm"
"nixos-test"
"benchmark" # we may restrict this in the central /etc/nix/machines anyway
];
trusted-users = [
"build"
"root"
];
max-silent-time = 10800; # 3h
};
};
systemd.services.prune-stale-nix-builds = {
description = "Prune stale nix build roots";
startAt = "hourly";
unitConfig.Documentation = "https://github.com/NixOS/nix/issues/5207";
serviceConfig = {
ExecStart = lib.concatStringsSep " " [
(lib.getExe pkgs.findutils)
"/nix/var/nix/builds"
"-mindepth 1"
"-maxdepth 1"
"-type d"
"-mtime +1" # days
"-exec rm -rf {} +"
];
};
};
}
================================================
FILE: builders/common/node-exporter.nix
================================================
{
config,
...
}:
{
networking.firewall.allowedTCPPorts = [
config.services.prometheus.exporters.node.port
];
services.prometheus.exporters.node = {
enable = true;
enabledCollectors = [ "systemd" ];
};
}
================================================
FILE: builders/common/ssh.nix
================================================
{
lib,
...
}:
{
services.openssh = {
enable = true;
authorizedKeysFiles = lib.mkForce [ "/etc/ssh/authorized_keys.d/%u" ];
};
}
================================================
FILE: builders/common/system.nix
================================================
{
pkgs,
...
}:
{
# apply microcode to fix functional and security issues
hardware.enableRedistributableFirmware = true;
hardware.cpu.amd.updateMicrocode = pkgs.stdenv.isx86_64;
hardware.cpu.intel.updateMicrocode = pkgs.stdenv.isx86_64;
# enable kernel same-page merging for improved vm test performance
hardware.ksm.enable = true;
# discard blocks weekly
services.fstrim.enable = true;
# use memory more efficiently at the cost of some compute
zramSwap.enable = true;
}
================================================
FILE: builders/common/tools.nix
================================================
{
pkgs,
...
}:
{
environment.systemPackages = with pkgs; [
atop
ethtool
htop
lm_sensors
nix-top
nvme-cli
pciutils
smartmontools
usbutils
];
}
================================================
FILE: builders/common/update.nix
================================================
{
system.autoUpgrade = {
enable = true;
dates = "daily";
flake = "git+https://github.com/nixos/infra.git?ref=main";
allowReboot = true;
};
}
================================================
FILE: builders/common/users.nix
================================================
{
config,
lib,
pkgs,
...
}:
let
sshKeys = {
hydra-queue-runner-rhea = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOdxl6gDS7h3oeBBja2RSBxeS51Kp44av8OAJPPJwuU/ hydra-queue-runner@rhea";
};
authorizedNixStoreKey =
key:
let
environment = lib.concatStringsSep " " [
"NIX_SSL_CERT_FILE=${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt"
];
in
"command=\"${environment} ${config.nix.package}/bin/nix-store --serve --write\" ${key}";
in
{
users = {
mutableUsers = false;
users = {
build = {
isNormalUser = true;
uid = 2000;
openssh.authorizedKeys.keys = [
(authorizedNixStoreKey sshKeys.hydra-queue-runner-rhea)
];
};
root.openssh.authorizedKeys.keys = (import ../../ssh-keys.nix).infra-core;
};
};
}
================================================
FILE: builders/disk-layouts/efi-zfs-raid0.nix
================================================
{
disk1 ? "/dev/nvme0n1",
disk2 ? "/dev/nvme1n1",
}:
let
mkDiskLayout = id: {
type = "gpt";
partitions = {
esp = {
type = "EF00";
size = "512M";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/efi/${id}";
};
};
zdev = {
size = "100%";
content = {
type = "zfs";
pool = "zroot";
};
};
};
};
in
{
disk = {
a = {
type = "disk";
device = disk1;
content = mkDiskLayout "a";
};
b = {
type = "disk";
device = disk2;
content = mkDiskLayout "b";
};
};
zpool.zroot = {
mode = ""; # RAID 0
options.ashift = "12"; # 4k blocks
rootFsOptions = {
acltype = "posixacl";
atime = "off";
compression = "on";
mountpoint = "none";
xattr = "sa";
};
datasets = {
root = {
type = "zfs_fs";
mountpoint = "/";
};
reserved = {
type = "zfs_fs";
options = {
canmount = "off";
refreservation = "16G"; # roughly one system closure
};
};
};
};
}
================================================
FILE: builders/flake-module.nix
================================================
{ inputs, ... }:
{
flake.nixosConfigurations =
let
mkNixOS =
system: config:
inputs.nixpkgs.lib.nixosSystem {
inherit system;
specialArgs = { inherit inputs; };
modules = [
inputs.agenix.nixosModules.age
inputs.disko.nixosModules.disko
./common/hardening.nix
./common/network.nix
./common/nix.nix
./common/node-exporter.nix
./common/hydra-queue-builder.nix
./common/system.nix
./common/tools.nix
./common/update.nix
./common/users.nix
./common/ssh.nix
../modules/rasdaemon.nix
config
];
};
in
{
# Epyc 9454P (48C/96T), 256 GB DDR4 RAM, 2x 1.92TB PCIe4 NVME
elated-minsky = mkNixOS "x86_64-linux" ./instances/elated-minsky.nix;
sleepy-brown = mkNixOS "x86_64-linux" ./instances/sleepy-brown.nix;
# Ampere Q80-30 (80C), 256 GB DDR4 RAM, 2x3.84TB PCIe4 NVME
goofy-hopcroft = mkNixOS "aarch64-linux" ./instances/goofy-hopcroft.nix;
# Ampere Q80-30 (80C), 128 GB DDR4 RAM, 2x960GB PCIe4 NVME
hopeful-rivest = mkNixOS "aarch64-linux" ./instances/hopeful-rivest.nix;
};
perSystem =
{ pkgs, inputs', ... }:
{
devShells.builders = pkgs.mkShell {
buildInputs = [
inputs'.agenix.packages.agenix
];
};
};
}
================================================
FILE: builders/instances/elated-minsky.nix
================================================
{
imports = [
../profiles/hetzner-ax101r.nix
];
nix.settings = {
cores = 2;
max-jobs = 48;
};
networking = {
hostName = "elated-minsky";
domain = "builders.nixos.org";
useDHCP = false;
};
systemd.network = {
enable = true;
networks = {
"30-enp193s0f0np0" = {
matchConfig = {
MACAddress = "9c:6b:00:4e:1a:6a";
Type = "ether";
};
linkConfig.RequiredForOnline = true;
networkConfig.Description = "WAN";
address = [
"167.235.95.99/26"
"2a01:4f8:2220:1b03::1/64"
];
routes = [
{ Gateway = "167.235.95.65"; }
{ Gateway = "fe80::1"; }
];
};
};
};
system.stateVersion = "24.11";
}
================================================
FILE: builders/instances/goofy-hopcroft.nix
================================================
{
imports = [
../profiles/hetzner-rx220.nix
];
nix.settings = {
cores = 2;
max-jobs = 40;
};
networking = {
hostName = "goofy-hopcroft";
domain = "builders.nixos.org";
useDHCP = false;
};
systemd.network = {
enable = true;
networks = {
"30-enP3p2s0f0" = {
matchConfig = {
MACAddress = "74:56:3c:8c:01:a9";
Type = "ether";
};
linkConfig.RequiredForOnline = true;
networkConfig.Description = "WAN";
address = [
"135.181.225.104/26"
"2a01:4f9:3071:2d8b::1/64"
];
routes = [
{ Gateway = "135.181.225.65"; }
{ Gateway = "fe80::1"; }
];
};
};
};
system.stateVersion = "24.11";
}
================================================
FILE: builders/instances/hopeful-rivest.nix
================================================
{
imports = [
../profiles/hetzner-rx170.nix
];
nix.settings = {
cores = 20;
max-jobs = 10;
system-features = [ "big-parallel" ];
};
networking = {
hostName = "hopeful-rivest";
domain = "builders.nixos.org";
useDHCP = false;
};
systemd.network = {
enable = true;
networks = {
"30-eno1" = {
matchConfig = {
MACAddress = "74:56:3c:4e:d9:af";
Type = "ether";
};
linkConfig.RequiredForOnline = true;
networkConfig.Description = "WAN";
address = [
"135.181.230.86/26"
"2a01:4f9:3080:388f::1/64"
];
routes = [
{ Gateway = "135.181.230.65"; }
{ Gateway = "fe80::1"; }
];
};
};
};
system.stateVersion = "24.11";
}
================================================
FILE: builders/instances/sleepy-brown.nix
================================================
{
imports = [
../profiles/hetzner-ax101r.nix
];
nix.settings = {
cores = 24;
max-jobs = 4;
system-features = [ "big-parallel" ];
};
networking = {
hostName = "sleepy-brown";
domain = "builders.nixos.org";
useDHCP = false;
};
systemd.network = {
enable = true;
networks = {
"30-enp193s0f0np0" = {
matchConfig = {
MACAddress = "9c:6b:00:4e:fd:2d";
Type = "ether";
};
linkConfig.RequiredForOnline = true;
networkConfig.Description = "WAN";
address = [
"162.55.130.51/26"
"2a01:4f8:271:5c14::1/64"
];
routes = [
{ Gateway = "162.55.130.1"; }
{ Gateway = "fe80::1"; }
];
};
};
};
system.stateVersion = "24.11";
}
================================================
FILE: builders/network/autoconfig.nix
================================================
{
networking.useDHCP = false;
systemd.network = {
enable = true;
networks = {
"99-autoconfig" = {
matchConfig = {
Kind = "!*";
Type = "ether";
};
networkConfig = {
DHCP = "yes";
IPv6AcceptRA = true;
};
};
};
};
}
================================================
FILE: builders/profiles/hetzner-ax101r.nix
================================================
{
config,
lib,
...
}:
{
imports = [
../boot/efi-grub.nix
];
disko.devices = import ../disk-layouts/efi-zfs-raid0.nix { };
boot.supportedFilesystems.zfs = true;
networking.hostId = "91312b0a";
fileSystems."/nix/var/nix/builds" = {
device = "none";
fsType = "tmpfs";
options = [
"huge=within_size"
"mode=0700"
"nosuid"
"nodev"
]
# 128G tmpfs, 128G RAM (+zram swap) for standard builders
# 160GB tmpfs, 96 GB RAM (+zram swap) for big-parallel builders
++ (
if lib.elem "big-parallel" config.nix.settings.system-features then
[ "size=160G" ]
else
[ "size=128G" ]
);
};
boot.initrd.availableKernelModules = [
"nvme"
"usbhid"
];
}
================================================
FILE: builders/profiles/hetzner-rx170.nix
================================================
{
imports = [
../boot/efi-grub.nix
];
disko.devices = import ../disk-layouts/efi-zfs-raid0.nix { };
boot.supportedFilesystems.zfs = true;
networking.hostId = "91312b0a";
boot.initrd.availableKernelModules = [
"nvme"
"usbhid"
];
}
================================================
FILE: builders/profiles/hetzner-rx220.nix
================================================
{
imports = [
../boot/efi-grub.nix
];
disko.devices = import ../disk-layouts/efi-zfs-raid0.nix { };
boot.supportedFilesystems.zfs = true;
networking.hostId = "91312b0a";
boot.initrd.availableKernelModules = [
"nvme"
"usbhid"
];
}
================================================
FILE: channels.nix
================================================
rec {
channels = {
# "Channel name" = {
# # This should be the <value> part of
# # https://hydra.nixos.org/job/<value>/latest-finished
# job = "project/jobset/jobname";
#
# # When adding a new version, determine if it needs to be tagged as a
# # variant -- for example:
# # nixos-xx.xx => primary
# # nixos-xx.xx-small => small
# # nixos-xx.xx-darwin => darwin
# # nixos-xx.xx-aarch64 => aarch64
# variant = "primary";
#
# # Channel Status:
# # '*-unstable' channels are always "rolling"
# # Otherwise a release generally progresses through the following phases:
# #
# # - Directly after branch off => "beta"
# # - Once the channel is released => "stable"
# # - Once the next channel is released => "deprecated"
# # - N months after the next channel is released => "unmaintained"
# # (check the release notes for when this should happen)
# status = "beta";
# };
"nixos-unstable" = {
job = "nixos/unstable/tested";
variant = "primary";
status = "rolling";
};
"nixos-unstable-small" = {
job = "nixos/unstable-small/tested";
variant = "small";
status = "rolling";
};
"nixpkgs-unstable" = {
job = "nixpkgs/unstable/unstable";
status = "rolling";
};
"nixos-25.11" = {
job = "nixos/release-25.11/tested";
variant = "primary";
status = "stable";
};
"nixos-25.11-small" = {
job = "nixos/release-25.11-small/tested";
variant = "small";
status = "stable";
};
"nixpkgs-25.11-darwin" = {
job = "nixpkgs/nixpkgs-25.11-darwin/darwin-tested";
variant = "darwin";
status = "stable";
};
"nixos-25.05" = {
job = "nixos/release-25.05/tested";
variant = "primary";
status = "unmaintained";
};
"nixos-25.05-small" = {
job = "nixos/release-25.05-small/tested";
variant = "small";
status = "unmaintained";
};
"nixpkgs-25.05-darwin" = {
job = "nixpkgs/nixpkgs-25.05-darwin/darwin-tested";
variant = "darwin";
status = "unmaintained";
};
};
channels-with-urls = builtins.mapAttrs (_name: about: about.job) channels;
}
================================================
FILE: checks/flake-module.nix
================================================
{ ... }:
{
perSystem =
{ self', lib, ... }:
{
checks =
let
# TODO: our CI doesn't have a enough space for these just now
#nixosMachines = lib.mapAttrs' (
# name: config: lib.nameValuePair "nixos-${name}" config.config.system.build.toplevel
#) ((lib.filterAttrs (_: config: config.pkgs.system == system)) self.nixosConfigurations);
nixosMachines = { };
packages = lib.mapAttrs' (n: lib.nameValuePair "package-${n}") self'.packages;
devShells = lib.mapAttrs' (n: lib.nameValuePair "devShell-${n}") self'.devShells;
in
nixosMachines // packages // devShells;
};
}
================================================
FILE: dns/.envrc
================================================
# shellcheck shell=bash
use flake .#dnscontrol
================================================
FILE: dns/creds.json
================================================
{
"gandi": {
"TYPE": "GANDI_V5",
"token": "$GANDI_TOKEN"
}
}
================================================
FILE: dns/dnsconfig.js
================================================
DEFAULTS(
DefaultTTL("1h"),
NAMESERVER_TTL("24h")
);
var REG_NONE = NewRegistrar("none");
var DSP_GANDI = NewDnsProvider("gandi");
require("nixcon.org.js");
require("nix.dev.js");
require("nixos.org.js");
require("ofborg.org.js");
================================================
FILE: dns/flake-module.nix
================================================
{
perSystem =
{ pkgs, ... }:
{
devShells.dnscontrol = pkgs.mkShellNoCC {
packages = [
pkgs.dnscontrol
];
};
checks.dnscontrol = pkgs.runCommand "dnscontrol" { } ''
cd ${./.}
${pkgs.dnscontrol}/bin/dnscontrol check
touch $out
'';
};
}
================================================
FILE: dns/nix.dev.js
================================================
D("nix.dev",
REG_NONE,
DnsProvider(DSP_GANDI),
CAA_BUILDER({
label: "@",
iodef: "mailto:infra+caa@nixos.org",
iodef_critical: true,
issue: ["letsencrypt.org"],
issue_critical: true,
issuewild: "none",
issuewild_critical: true,
}),
// Domain is not used for mail
SPF_BUILDER({
label: "@",
parts: [
"v=spf1",
"-all"
]
}),
TXT("*._domainkey", "v=DKIM1; p="),
DMARC_BUILDER({
policy: "reject",
subdomainPolicy: "reject",
alignmentDKIM: "strict",
alignmentSPF: "strict"
}),
TXT("@", "google-site-verification=J55RGHyOPKpHAyIHVfBy1RdY_LuVIvLyuyR8deO62YE"),
ALIAS("@", "nix-dev.netlify.app."),
CNAME("www", "nix-dev.netlify.app.")
);
================================================
FILE: dns/nixcon.org.js
================================================
D("nixcon.org",
REG_NONE,
DnsProvider(DSP_GANDI),
CAA_BUILDER({
label: "@",
iodef: "mailto:infra+caa@nixos.org",
iodef_critical: true,
issue: ["letsencrypt.org"],
issue_critical: true,
issuewild: "none",
issuewild_critical: true,
}),
MX("@", 10, "umbriel.nixos.org."),
SPF_BUILDER({
label: "@",
parts: [
"v=spf1",
"a:umbriel.nixos.org",
"-all"
]
}),
// Matching private key in `non-critical-infra/secrets/nixcon.org.mail.key.umbriel`
TXT("mail._domainkey", "p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC1wQ2uPZfdlGmjDDxeNVet7IEFxS55TpWuqQWNKmd4fX8HcKKw7kVHXU5+gjT37wMUI27ZZnIobYhumnl+BLiXZqbuzAt7s3dbJU2de2ZWxOqcDRbK6m2A3AwIAiMzzRUjx14EWgnw55KRi2enpLyS0pKGdvSquHnxaySkAF8YIwIDAQAB"),
DMARC_BUILDER({
policy: "none",
}),
// Websites
TXT("_github-pages-challenge-nixcon", "6608e513e09036ab8cadb7ca4eb71b"),
// https://docs.github.com/en/pages/configuring-a-custom-domain-for-your-github-pages-site/managing-a-custom-domain-for-your-github-pages-site#configuring-an-apex-domain
A("@", "185.199.109.153"),
A("@", "185.199.111.153"),
AAAA("@", "2606:50c0:8001::153"),
AAAA("@", "2606:50c0:8003::153"),
CNAME("www", "nixcon.github.io."),
CNAME("2015", "nixcon.github.io."),
CNAME("2016", "nixcon.github.io."),
CNAME("2017", "nixcon.github.io."),
CNAME("2018", "nixcon.github.io."),
CNAME("2019", "nixcon.github.io."),
CNAME("2020", "nixcon.github.io."),
CNAME("2022", "nixcon.github.io."),
CNAME("2023", "nixcon.github.io."),
CNAME("2024-na", "nixcon.github.io."),
CNAME("2024", "nixcon.github.io."),
CNAME("2025", "nixcon.github.io."),
CNAME("2026", "nixcon.github.io."),
// Scheduling
CNAME("cfp", "pretalx.com."),
CNAME("talks", "pretalx.com."),
// Ticketing
CNAME("tickets", "nixcon.cname.pretix.eu."),
// 2025 ticket voucher eligibility check
CNAME("vouchers", "cache.ners.ch."),
// 2025 bee game
CNAME("bee", "cache.ners.ch.")
);
================================================
FILE: dns/nixos.org.js
================================================
D("nixos.org",
REG_NONE,
DnsProvider(DSP_GANDI),
TXT("@", "apple-domain-verification=OvacO4lGB9A6dBFg"),
TXT("@", "brevo-code:f580a125e215ecb440363a15cdf47a17"),
TXT("@", "google-site-verification=Pm5opvmNjJOwdb7JnuVJ_eFBPaZYWNcAavY-08AJoGc"),
// bluesky account/domain binding
TXT("_atproto", "did=did:plc:bf43o4nxudgubwt4iljpayb7"),
CAA_BUILDER({
label: "@",
iodef: "mailto:infra+caa@nixos.org",
iodef_critical: true,
issue: ["letsencrypt.org"],
issue_critical: true,
issuewild: "none",
issuewild_critical: true,
}),
// nixos.org mailing
MX("@", 10, "umbriel"),
SPF_BUILDER({
label: "@",
parts: [
"v=spf1",
"a:umbriel.nixos.org",
"-all"
]
}),
DMARC_BUILDER({
policy: "none",
}),
// discourse
A("discourse", "195.62.126.31"),
AAAA("discourse", "2a02:248:101:62::146f"),
MX("discourse", 10, "mail.nixosdiscourse.fcio.net."),
DMARC_BUILDER({
label: "discourse",
policy: "none",
}),
SPF_BUILDER({
label: "discourse",
parts: [
"v=spf1",
"ip4:185.105.252.151",
"ip6:2a02:248:101:62::1479",
"-all"
]
}),
TXT("mail._domainkey.discourse", "v=DKIM1; k=rsa; p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDmxDhMfDl6lnueSRCjYiWIDeTAJXR9Yw0PfpBfG7GPUIkMyqy9jVGpb4ECVTt9S1zfpr4dbtCgir781oVwZiwGIWzC8y8XsD37wernQIPN4Yubnrnpw+6lill4uA/AuyU/ghbeZ5lW03pHD//2EW4YEu+Jw4aS4rF0Wtk+BlJRCwIDAQAB"),
// fastly
CNAME("_acme-challenge.cache", "k2hql6g4rigivyu6nn.fastly-validations.com."),
CNAME("_acme-challenge.cache-staging", "kqwx9cvuf7lvjo8u9b.fastly-validations.com."),
CNAME("_acme-challenge.channels", "9u55qij5w2odiwqxfi.fastly-validations.com."),
CNAME("_acme-challenge.artifacts", "bsk6mjvi6b1r6wekb0.fastly-validations.com."),
CNAME("_acme-challenge.releases", "s731ezp9ameh5f349b.fastly-validations.com."),
CNAME("_acme-challenge.tarballs", "vnqm62k5sjx9jogeqg.fastly-validations.com."),
CNAME("cache", "dualstack.n.sni.global.fastly.net."),
CNAME("cache-staging", "dualstack.n.sni.global.fastly.net."),
CNAME("channels", "dualstack.n.sni.global.fastly.net."),
CNAME("artifacts", "dualstack.n.sni.global.fastly.net."),
CNAME("releases", "dualstack.n.sni.global.fastly.net."),
CNAME("tarballs", "dualstack.n.sni.global.fastly.net."),
// hydra.nixos.org
A("haumea", "46.4.89.205"),
AAAA("haumea", "2a01:4f8:212:41c9::1"),
A("mimas", "157.90.104.34"),
AAAA("mimas", "2a01:4f8:2220:11c8::1"),
CNAME("hydra", "mimas"),
CNAME("queue-runner.hydra", "mimas"),
A("pluto", "37.27.99.100"),
AAAA("pluto", "2a01:4f9:3070:15e0::1"),
CNAME("alerts", "pluto"),
CNAME("grafana", "pluto"),
CNAME("monitoring", "pluto"),
CNAME("prometheus", "pluto"),
A("titan", "159.69.62.224"),
AAAA("titan", "2a01:4f8:231:e53::1"),
// hydra builfarm
AAAA("eager-heisenberg.mac", "2a01:4f8:d1:a027::2"),
A("elated-minsky.builder", "167.235.95.99"),
AAAA("elated-minsky.builder", "2a01:4f8:2220:1b03::1"),
A("enormous-catfish.mac", "142.132.140.199"),
A("goofy-hopcroft.builder", "135.181.225.104"),
AAAA("goofy-hopcroft.builder", "2a01:4f9:3071:2d8b::1"),
A("growing-jennet.mac", "23.88.76.75"),
A("hopeful-rivest.builder", "135.181.230.86"),
AAAA("hopeful-rivest.builder", "2a01:4f9:3080:388f::1"),
A("intense-heron.mac", "23.88.75.215"),
AAAA("kind-lumiere.mac", "2a09:9340:808:60a::1"),
A("maximum-snail.mac", "23.88.76.161"),
A("sleepy-brown.builder", "162.55.130.51"),
AAAA("sleepy-brown.builder", "2a01:4f8:271:5c14::1"),
A("sweeping-filly.mac", "142.132.141.35"),
AAAA("norwegian-blue.mac", "2a06:3a80:0:41:423:898a:1e16:3cf7"),
// hydra staging area
A("staging-hydra", "130.236.254.207"),
AAAA("staging-hydra", "2001:6b0:17:f0a0::cf"),
CNAME("queue-runner.staging-hydra", "staging-hydra"),
// services infra
A("caliban", "65.109.26.213"),
AAAA("caliban", "2a01:4f9:5a:186c::2"),
CNAME("chat", "caliban"),
CNAME("live", "caliban"),
CNAME("matrix", "caliban"),
CNAME("nixpkgs-swh", "caliban"),
CNAME("survey", "caliban"),
CNAME("vault", "caliban"),
DMARC_BUILDER({
label: "caliban",
policy: "none"
}),
SPF_BUILDER({
label: "caliban",
parts: [
"v=spf1",
"ip4:65.109.26.213",
"ip6:2a01:4f9:5a:186c::2",
"-all"
]
}),
TXT("mail._domainkey.caliban", "v=DKIM1; k=rsa; p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDDCLtvNH4Ly+9COXf7InptMvoA7I5O347D7+j+saECt7RRe8yNz4TmhJTyJik+bg7e3+l7EJM0vE6k7xtpGBXACY6CCmg/8EgUi6YnDd126ttJHWpoqO96w4SWX93G+ZnoSC8O5rTPqdaTTkntYDTrw5u5n+7RA8GarZadgmaEzwIDAQAB"),
A("umbriel", "37.27.20.162"),
AAAA("umbriel", "2a01:4f9:c011:8fb5::1"),
// See `nixos.org.mail.key` in `non-critical-infra/modules/mailserver/default.nix`.
TXT("mail._domainkey", "v=DKIM1; k=rsa; p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDcgNq4+Y23GxN8Mdza437tL5DuJJZU1y6VzTCwSi6cBNLyBDci2cmqXx/gm1sA3yv7+h+8/OyJpEgcbCIW/Ygs1XLuECqvXVX8MU6Djn4KY+d2sU1tlUdqvNM86puoneQtjEv9rDsjf3HGqaeOcjetFnQW7H+qcNcaEShxyKztzQIDAQAB"),
CNAME("freescout", "umbriel.nixos.org."),
// ngi
A("makemake.ngi", "116.202.113.248"),
AAAA("makemake.ngi", "2a01:4f8:231:4187::"),
CNAME("buildbot.ngi", "makemake.ngi.nixos.org."),
CNAME("cryptpad.ngi", "makemake.ngi.nixos.org."),
CNAME("cryptpad-sandbox.ngi", "makemake.ngi.nixos.org."),
CNAME("summer", "makemake.ngi.nixos.org."),
A("tracker-staging.security", "188.245.41.195"),
AAAA("tracker-staging.security", "2a01:4f8:1c1b:b87b::1"),
A("tracker.security", "91.99.31.214"),
AAAA("tracker.security", "2a01:4f8:1c1b:6921::1"),
// wiki
A("wiki", "65.21.240.250"),
AAAA("wiki", "2a01:4f9:c012:8178::"),
// Direct access to wiki server in Helsinki (for deployments)
A("he1.wiki", "65.21.240.250"),
AAAA("he1.wiki", "2a01:4f9:c012:8178::"),
DMARC_BUILDER({
label: "wiki",
policy: "none"
}),
SPF_BUILDER({
label: "wiki",
parts: [
"v=spf1",
"ip4:65.21.240.250",
"ip6:2a01:4f9:c012:8178::",
"-all"
]
}),
TXT("mail._domainkey.wiki", "v=DKIM1; k=rsa; p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDa+KjIljYr3q5MWWK7sEYzjR8OcA32zBh9BCPo6/HlY1q2ODTYsmE/FDZWpYMzM5z+ddnuGYdXia322XnZaNpZNoq1TbGYuQ5DsgAEK09CGoLuzONg3PSXTrkG7E2Sd6wstwHGJ5FHxSLKtNoWkknt9F5XAFZgXapO0w54p+BWvwIDAQAB"),
// test.wiki subdomain with Fastly
CNAME("test.wiki", "dualstack.n.sni.global.fastly.net."),
CNAME("_acme-challenge.test.wiki", "zsz0meyel8hxoy9dtb.fastly-validations.com."),
// github org/domain binding
TXT("_github-challenge-nixos", "9e10a04a4b"),
// github pages
CNAME("mobile", "nixos.github.io."),
CNAME("ngi", "ngi-nix.github.io."),
CNAME("reproducible", "nixos.github.io."),
TXT("_github-pages-challenge-ngi-nix.ngi", "4e8bffbb7ced2aec7be1f8cf3561d6"),
TXT("_github-pages-challenge-nixos", "f3a423ba6916e972cfb1e74f82f601"),
// netlify pages
A("@", "75.2.60.5"),
A("@", "99.83.231.61"),
CNAME("brand", "nixos-brand.netlify.app."),
CNAME("common-styles", "nixos-common-styles.netlify.app."),
CNAME("planet", "nixos-planet.netlify.app."),
CNAME("search", "nixos-search.netlify.app."),
CNAME("status", "nixos-status.netlify.app."),
CNAME("weekly", "nixos-weekly.netlify.com."),
CNAME("www", "nixos-homepage.netlify.app."),
);
================================================
FILE: dns/ofborg.org.js
================================================
D("ofborg.org",
REG_NONE,
DnsProvider(DSP_GANDI),
CAA_BUILDER({
label: "@",
iodef: "mailto:infra+caa@nixos.org",
iodef_critical: true,
issue: ["letsencrypt.org"],
issue_critical: true,
issuewild: "none",
issuewild_critical: true,
}),
// Domain is not used for mail
SPF_BUILDER({
label: "@",
parts: [
"v=spf1",
"-all"
]
}),
TXT("*._domainkey", "v=DKIM1; p="),
DMARC_BUILDER({
policy: "reject",
subdomainPolicy: "reject",
alignmentDKIM: "strict",
alignmentSPF: "strict"
}),
A("core", "136.144.57.217"),
AAAA("core", "2604:1380:45f1:400::3"),
CNAME("events", "core"),
CNAME("monitoring", "core"),
CNAME("webhook", "core"),
A("core01", "138.199.148.47"),
AAAA("core01", "2a01:4f8:c012:cda4::1"),
CNAME("gh-webhook", "core01"),
CNAME("logs", "core01"),
CNAME("messages", "core01"),
A("build01", "185.119.168.10"),
A("build02", "185.119.168.11"),
A("build03", "185.119.168.12"),
A("build04", "185.119.168.13"),
A("build05", "142.132.171.106"),
AAAA("build05", "2a01:4f8:1c1b:6d41::"),
A("eval01", "95.217.15.9"),
AAAA("eval01", "2a01:4f9:c012:cf00::1"),
A("eval02", "95.216.209.162"),
AAAA("eval02", "2a01:4f9:c012:17c6::1"),
A("eval03", "37.27.189.4"),
AAAA("eval03", "2a01:4f9:c012:e37b::1"),
A("eval04", "95.217.18.12"),
AAAA("eval04", "2a01:4f9:c012:273b::"),
// nixos-foundation-macstadium-44911305
A("mac01", "208.83.1.173"),
// nixos-foundation-macstadium-44911362
A("mac02", "208.83.1.175"),
// nixos-foundation-macstadium-44911507
A("mac03", "208.83.1.186"),
// nixos-foundation-macstadium-44911207
A("mac04", "208.83.1.145"),
// nixos-foundation-macstadium-44911104
A("mac05", "208.83.1.181"),
);
================================================
FILE: docs/inventory.md
================================================
# NixOS project resource inventory
This is the current list of hardware and services that everyone has access to.
# Accounts
## GitHub
owner: @edolstra @domenkozar @garbas @grahamc @rbvermaa
## Domains
- owner: @edolstra
- nixos.org - https://www.uniteddomains.com/
## DNS
owner: Foundation
Managed by Netlify.
## AWS account
- owner: Infor
- alias: lb-nixos
- access: @rbvermaa and @edolstra
## Packet.net
- owner: @grahamc
## Hetzner Cloud
- owner: Graham
- (for ofborg)
## IRC logging bot
- owner: @samueldr
- url: https://logs.nix.samueldr.com/nixos/
- nick: <code>{\`-\`}</code>
- config: https://gitlab.com/samueldr.nix/overlays/irclogger
## nix.ci
owner: @grahamc
ofborg instance and logs
hosted on Packet.
## arch64 community builder
- owner: @grahamc
- access: community members that have asked access to it
- host: Packet
lots of cores to build for the aarch64 platform
## survey.nixos.org
owner: @davidak
## nixcon2017.org
owner: Christine?
## nixcon2018.org
owner: @zimbatm
## NixOS Wiki
access: see https://wiki.nixos.org/wiki/Official_NixOS_Wiki:About
## Twitter accounts
**nixpkg** owner: Graham
**nixos_org** owner: Rob Vermaas
**nixcon2017** owner: Christine?
**nixcon2018** owner: zimbatm
## IRC
Group registration on FreeNode. Eelco and Graham can get OP on all channels
about NixOS.
The group owns:
#nix
#nix-*
#nixos-*
`#nix` is invite only and is empty, it only redirects to `#nixos`
**List of common channels:**
`**#nixos-dev**`
`#` **nixos**``
- 1 niksnut +AFRefiorstv [modified ? ago]
- 17:30 2 goodwill +o [modified 3y 36w 6d ago] -
- 17:30 3 kmicu +o [modified 2y 32w 5d ago] long time member - left 4 months ago
- 17:30 4 gchristensen +o [modified 1y 37w 1d ago]
`**#nixos-borg**` `**#nixos-aarch64**` `**#nix-darwin**` `#nixos-chat`
`**#nix-core**` `**#nixos-security**` `**#nixos-bots**` `**#nixos-docs**`
`**#nixos-wiki**` `**#nixos-on-your-router**`
## cachix.org
owner: Domen
# Hardware
## On Packet.net
owner: Graham
2 builders: aarch64 packet type 2 : for hydra
1 aarch64 for ofborg _and_ community use
## Hetzner:
owner: Eelco and Rob, owned by the NixOS Foundation
“chef”: runs hydra.nixos.org, postgresql database, queue runner, hydra
provisioner. binary cache signing keys.
monitoring: **DataDog, accessible by Eelco (and Rob?) (Amine?) on the Infor
account**
## Mac Minis at Hetzner Cloud
- owner: the NixOS Foundation
- access: Cole-h & Hexa
- role: build machines
Current machine names:
- intense-heron.mac.nixos.org
- sweeping-filly.mac.nixos.org
- maximum-snail.mac.nixos.org
- growing-jennet.mac.nixos.org
- enormous-catfish.mac.nixos.org
## Mac Minis at Graham's house
- owner: the NixOS Foundation
- access: Cole-h
- role: build machines
- arm64:
- cosmic-stud
- tight-bug
- quality-ram
- becoming-hyena
There are also x86_64 mac minis, but they are offline because they produce too
much heat.
## Mac Stadium
- owner: MacStadium and rented to daniel peebles or the foundation?
- role: build machines
Eelco had a root password
## hydra-provisioner
?
## nixos-org
owner: LogicBlox EC2 instance
deployed from Eelco’s laptop
runs the website runs the channel mirror script, systemd services with timers,
updates /releases buckets and the nixpkgs-channels repository (repo:
nixos-channel-scripts)
The tarball mirror script is running from that machine.
================================================
FILE: docs/meeting-notes/2024-01-11.md
================================================
# 2024-01-11
First meeting of the (revamped) infra team.
Participants: delroth, hexa, raitobezarius, vcunat, zimbatm
## [zimbatm] Presentation
- At NixCon, we added new people to the team, but we were not able to give space
to those new people, with this in mind, I would like to dedicate one hour per
week or two weeks where I can unblock the infrastructure matters.
- I don’t know what people are interested in, I believe this is a volunteer
ecosystem and you should work on what you would like to work on.
- We have big challenges in front of us, e.g. the cache situation, with a new
team, maybe we can tackle those bigger challenges.
## Round of intros
Skipped in these edited notes.
## [raito] Recommending hexa for infra-core
- Consensus: yes please.
- [zimbatm] Done.
- delroth/vcunat to assist with onboarding, provision access, etc.
## [delroth] Matrix Homeserver situation
- EMS is dropping legacy plans after 2024-01-17
- https://github.com/NixOS/infra/issues/325
- We are getting dropped.
- We need to react but Graham, owner of the EMS account, is not reacting.
- The problem is not the cost but access to the account.
- delroth/hexa are in favor of self-hosting.
- But we need the database dump from EMS.
- hexa to prepare the config for this, delroth can act as backup/fallback.
- Fallback: we can always pay the $1200 (excl. VAT) for renewing the 1 year
plan.
## [hexa] Moving NGI out of nixos-org-configurations
- Goals: unblock ngi0 maintainers, less consumption of our review bandwidth.
- Should we move them to a new repo?
- Either in the nixos GitHub org or the ngi-nix org.
- Action item: let's ask them!
- https://github.com/NixOS/infra/issues/326
## Builders
- Context: various cost reduction efforts need to happen on the Hydra/ofborg
builders infra.
- There might be the possibility to get Hetzner to sponsor one more machine.
- [delroth] Pretty sure we are not using our build resources efficiently as it
is (queue-runner bottleneck)
- [vcunat] xz compression is the main problem
- [zimbatm] We should properly analyze where the bottlenecks are.
## Backups
- We are not doing proper backups of the NixOS infra.
- There is an rsync.net account where the Hydra database gets backed up to, at
least.
- Julien's vaultwarden PR is currently blocked by this, we're getting backup
storage space from Hetzner (storage boxes).
================================================
FILE: docs/meeting-notes/2024-01-25.md
================================================
# 2024-01-25
## [hexa, delroth] EMS Migration
- Configuration hasn’t been written yet, hexa might get it done this week.
- When will we get the data?
- Graham still holding it until it can get cleaned up (removing private user
data). Board set a deadline during the last meeting.
- We could talk to EMS directly, to get the account handed over
- We want ~10 days to do the migration (so: we want the data before Feb 7th)
## NixOS 23.11 upgrades
- Infra currently runs on NixOS 23.05
- No blockers, need to be updated individually
## Deployment setup
- Blocked on secret management, will likely be sops
- Machines use network configuration provided by NixOps
## Bitwarden
- Reason: Self-hosting, currently Jonas pays for the hosted plan.
- PR pending needs to be moved forward: https://github.com/NixOS/infra/pull/287
- delroth/hexa can hand out backup storage credentials.
## Binary cache
- Cost of S3 exceeds Foundation income…
- Garbage collection will be started
- Timeline: Start some time in 2024/02
- Advanced communication will be sent out
- Build list of store paths we want to keep and configure gc root for them
- Plan is to keep all FODs
- Make store paths that are about to get deleted unavailable prior to
deletion
- Potentially move parts of the cache to Hetzner
- delroth has capacity to look into this in 2024/02
- Needs a service to decide, where (S3 or Hetzner) the request would need to
go
- Logic could be installed at fastly, to try hetzner first, fallback to s3
- Service is in the critical path, currently fastly/s3 solve availability for
us
================================================
FILE: docs/meeting-notes/2024-02-08.md
================================================
# 2024-02-08
Attendees: delroth, hexa, JulienMalka, lheckemann, raitobezarius, vcunat,
zimbatm
## [hexa, delroth] EMS Migration
Context: https://github.com/NixOS/infra/issues/325
- PR for Synapse and its dependencies is up.
- https://github.com/NixOS/infra/pull/336
- [Julien] What's the status of the backup module?
- Split off into its own PR and merged already:
https://github.com/NixOS/infra/pull/345
- raito and Ron met with Matrix / EMS folks at FOSDEM 2024
- They have scripts for GDPR compliance (user data purge), but we need to ask
them by email.
- Then we can get a clean DB dump, presumably without user data.
- Not sure whether we sent an email or not. But Graham might be in contact
directly, and EMS folks made him an offer to do the data deletion.
- Worst case Graham/DetSys will pay for the extension of the EMS plan.
- Probably no hurry anymore from the infra side. Foundation board is
monitoring this to make sure we have a solution at some point.
## [delroth] Should we publish these notes more widely?
- There is a trend towards publishing notes on Discourse, etc. for visibility.
- [delroth] My thoughts: we should archive (edited) notes in Git somewhere in
our docs/ folder, update a Discourse thread every 2 weeks.
- I of course volunteer to take care of this :)
- Consensus: let’s do it.
## [delroth] Packet/EQM access to infra-core
- Our builders are very, very outdated. But risky to try and update stuff with 0
debugging capabilities.
- Any reason why infra-core shouldn’t have full Packet/EQM access like we have
Hetzner access?
- Not entirely clear who currently has access?
- [zimbatm] Got access from eelco last weekend, will delegate.
- [raito] Does nix-netboot-serve run on our infra?
- [hexa] Yes, on eris. The images are also built from our infra, it’s a Hydra
jobset. But the jobset has not successfully completed for a year.
- [hexa] We can update stuff, but we have no way to debug issues if we do so.
- zimbatm took care of it live, woo!
## [raito] Stay in the loop of infrastructure matters
- How should work be split between zimbatm/raito?
- Would like access to private infra stuff to act as secondary.
- In general: who should have ownership to accounts?
- A bunch of GH org owners for example are inactive.
- Not really aligned with any subgroup e.g. foundation board.
- [zimbatm] I think the foundation should have access, but unfortunately the
foundation also doesn’t have the best personal security to hold those
credentials.
- [zimbatm] Maybe it should be the infra team instead? i.e.
delroth/hexa/vcunat/…
- [raito] That would work too, as long as it’s active folks who can take care
of day to day stuff. I don’t care that it’s specifically me, just that we
don’t get blocked due to not finding an owner.
- [zimbatm] I don’t feel like I can make that decision alone right now. Let’s
find some kind of organization which makes sense.
- Raito got invited into the private infra matrix channel (at least, for now)
## [Julien] NixOS wiki collaboration w/ infra team
- We have a bunch of candidate sysadmins in mind. Do we want to merge this into
non-critical-infra?
- [Julien] I’m a bit biased since I’m sitting on both sides of this discussion,
but I think this would be a good onramp to bring more people into
non-critical-infra.
- [zimbatm] We can subdivide permissions on the Hetzner Cloud side of things,
but I’m not sure whether we should share stuff further.
- [hexa] They have their setup mostly figured out already, including backups. We
can let them run with it for now, and we can always pick it up later.
- [linus] What about inviting them to non-critical-infra and just giving them
access to all the non-critical-infra? Even if they just want to maintain the
wiki.
- [hexa] It’s about responsible for all of it. I don’t think we should grant
unneeded access.
- [Julien] +1.
- [delroth] I feel like if it’s official, we should treat it as such and
onboard it as part of non-critical infra. Doesn’t require giving them access
to everything.
- [linus] If it is official, then it should be maintained by the official
infra team
- [hexa] I think we’re mostly in agreement then.
- [delroth] non-critical-infra should be restricted to the relevant directories
and go through PRs for touching other stuff
- [Julien] They probably want to iterate fast in the beginning
- [delroth] They should get a dedicated machine on Hetzner Cloud, that they
can play with
- [Julien] Too much shared code will increase reliance on core infra members.
- [delroth] Action items
- Let’s give them SSH access to a Hetzner Cloud VM
- Or a separate project so they get direct access to machines. Might already
be done.
- Let’s make sure we agree on the idea of moving this to non-critical-infra in
the short/mid-term future
- Provision DNS etc.
## External requests
- Hydra DB access (raitobezarius)
- Hashing out details in https://github.com/NixOS/infra/issues/348
- CA derivations for Hydra (Ericson2314)
- Nix 2.20 broke interop with the old Nix 2.13 we run on builders. Rolled back
to 2.19.
- https://github.com/NixOS/nix/issues/9961
- DB schema change applied.
## Ongoing projects
- [delroth] Hoping to complete the nixops deprecation this week. Then:
core/non-critical-infra alignment.
================================================
FILE: docs/meeting-notes/2024-02-22.md
================================================
# 2024-02-08
Attendees: delroth, edolstra, hexa, JulienMalka, raitobezarius, vcunat, zimbatm
## [delroth] FYI on availability next few weeks
- Traveling until mid-April, low availability, will be on JST timezone (UTC+9)
- Missing for the next 2 infra meetings
## [delroth] Backups situation
- How do w
gitextract_0_mr47ix/
├── .github/
│ ├── CODEOWNERS
│ ├── ISSUE_TEMPLATE/
│ │ ├── feature_request.md
│ │ └── service_disruption.md
│ ├── scripts/
│ │ └── format-and-absorb.sh
│ └── workflows/
│ ├── ci.yml
│ ├── dns-apply.yml
│ ├── dns-preview.yml
│ ├── format-pr.yml
│ └── zizmor.yml
├── .gitignore
├── LICENSE
├── README.md
├── build/
│ ├── .envrc
│ ├── colmena.nix
│ ├── colmena.sh
│ ├── common.nix
│ ├── datadog/
│ │ ├── hydra.nix
│ │ └── hydra.py
│ ├── flake-module.nix
│ ├── haumea/
│ │ ├── boot.nix
│ │ ├── default.nix
│ │ ├── network.nix
│ │ ├── postgresql.nix
│ │ └── zrepl.yml
│ ├── hydra-proxy.nix
│ ├── hydra.nix
│ ├── id_buildfarm.pub
│ ├── mimas/
│ │ ├── boot.nix
│ │ ├── default.nix
│ │ ├── disko.nix
│ │ ├── firewall.nix
│ │ └── network.nix
│ ├── nginx-error-pages/
│ │ ├── 403.html
│ │ ├── 502.html
│ │ └── 503.html
│ ├── pluto/
│ │ ├── boot.nix
│ │ ├── default.nix
│ │ ├── disko.nix
│ │ ├── grafana.nix
│ │ ├── network.nix
│ │ ├── nginx.nix
│ │ ├── nixos-metrics.nix
│ │ └── prometheus/
│ │ ├── alertmanager.nix
│ │ ├── default.nix
│ │ └── exporters/
│ │ ├── anubis.nix
│ │ ├── blackbox.nix
│ │ ├── channel-exporter.py
│ │ ├── channel.nix
│ │ ├── domain.nix
│ │ ├── fastly.nix
│ │ ├── github.nix
│ │ ├── hydra-queue-runner-reexporter.py
│ │ ├── hydra.nix
│ │ ├── json.nix
│ │ ├── matrix-synapse.nix
│ │ ├── nixos.nix
│ │ ├── node.nix
│ │ ├── owncast.nix
│ │ ├── postgresql.nix
│ │ ├── rasdaemon.nix
│ │ ├── sql.nix
│ │ ├── storagebox.nix
│ │ ├── up.nix
│ │ ├── zfs.nix
│ │ └── zrepl.nix
│ ├── scripts/
│ │ ├── nix-mac-installer.sh
│ │ └── nix-mac-nuke.sh
│ ├── secrets/
│ │ ├── alertmanager-matrix-forwarder.age
│ │ ├── alertmanager-oauth2-proxy-env.age
│ │ ├── eager-heisenberg-queue-runner-token.age
│ │ ├── elated-minsky-queue-runner-token.age
│ │ ├── enormous-catfish-queue-runner-token.age
│ │ ├── fastly-exporter-env.age
│ │ ├── goofy-hopcroft-queue-runner-token.age
│ │ ├── grafana-secret-key.age
│ │ ├── growing-jennet-queue-runner-token.age
│ │ ├── hopeful-rivest-queue-runner-token.age
│ │ ├── hydra-aws-credentials.age
│ │ ├── hydra-github-client-secret.age
│ │ ├── hydra-mirror-aws-credentials.age
│ │ ├── hydra-mirror-git-credentials.age
│ │ ├── intense-heron-queue-runner-token.age
│ │ ├── kind-lumiere-queue-runner-token.age
│ │ ├── maximum-snail-queue-runner-token.age
│ │ ├── norwegian-blue-queue-runner-token.age
│ │ ├── owncast-admin-password.age
│ │ ├── pluto-backup-secret.age
│ │ ├── pluto-backup-ssh-key.age
│ │ ├── rfc39-credentials.age
│ │ ├── rfc39-github.age
│ │ ├── rfc39-record-push.age
│ │ ├── sleepy-brown-queue-runner-token.age
│ │ ├── storagebox-exporter-token.age
│ │ ├── sweeping-filly-queue-runner-token.age
│ │ ├── tarball-mirror-aws-credentials.age
│ │ └── zrepl-ssh-key.age
│ ├── secrets.nix
│ └── titan/
│ ├── boot.nix
│ ├── default.nix
│ ├── disko.nix
│ ├── network.nix
│ ├── postgresql.nix
│ ├── zrepl.nix
│ └── zrepl.yml
├── builders/
│ ├── boot/
│ │ └── efi-grub.nix
│ ├── common/
│ │ ├── hardening.nix
│ │ ├── hydra-queue-builder.nix
│ │ ├── network.nix
│ │ ├── nix.nix
│ │ ├── node-exporter.nix
│ │ ├── ssh.nix
│ │ ├── system.nix
│ │ ├── tools.nix
│ │ ├── update.nix
│ │ └── users.nix
│ ├── disk-layouts/
│ │ └── efi-zfs-raid0.nix
│ ├── flake-module.nix
│ ├── instances/
│ │ ├── elated-minsky.nix
│ │ ├── goofy-hopcroft.nix
│ │ ├── hopeful-rivest.nix
│ │ └── sleepy-brown.nix
│ ├── network/
│ │ └── autoconfig.nix
│ └── profiles/
│ ├── hetzner-ax101r.nix
│ ├── hetzner-rx170.nix
│ └── hetzner-rx220.nix
├── channels.nix
├── checks/
│ └── flake-module.nix
├── dns/
│ ├── .envrc
│ ├── creds.json
│ ├── dnsconfig.js
│ ├── flake-module.nix
│ ├── nix.dev.js
│ ├── nixcon.org.js
│ ├── nixos.org.js
│ └── ofborg.org.js
├── docs/
│ ├── inventory.md
│ └── meeting-notes/
│ ├── 2024-01-11.md
│ ├── 2024-01-25.md
│ ├── 2024-02-08.md
│ ├── 2024-02-22.md
│ ├── 2024-03-07.md
│ ├── 2024-03-21.md
│ ├── 2024-04-18.md
│ ├── 2024-05-30.md
│ ├── 2024-06-13.md
│ ├── 2024-06-27.md
│ ├── 2024-11-14.md
│ ├── 2025-04-03.md
│ ├── 2025-04-17.md
│ ├── 2025-05-01.md
│ ├── 2025-05-15.md
│ ├── 2025-05-29.md
│ └── 2025-06-12.md
├── flake.nix
├── formatter/
│ └── flake-module.nix
├── lib/
│ └── service-order.nix
├── macs/
│ ├── README.md
│ ├── common.nix
│ ├── flake-module.nix
│ ├── hydra-queue-builder.nix
│ ├── mac-exec
│ ├── mac-update
│ └── profiles/
│ ├── m1.nix
│ └── m2.large.nix
├── metrics/
│ └── fastly/
│ ├── README.md
│ ├── cron.sh
│ ├── flake.nix
│ ├── ingest-raw-logs.sh
│ ├── run-queries.sh
│ └── update-asn-list.sh
├── modules/
│ ├── backup.nix
│ ├── common.nix
│ ├── hydra-mirror.nix
│ ├── nftables.nix
│ ├── prometheus/
│ │ ├── default.nix
│ │ ├── nixos-exporter/
│ │ │ ├── default.nix
│ │ │ ├── prometheus_nixos_exporter/
│ │ │ │ └── __main__.py
│ │ │ └── pyproject.toml
│ │ └── system-version-exporter.sh
│ ├── rasdaemon.nix
│ ├── rfc39.nix
│ ├── tarball-mirror.nix
│ └── tarball-mirror.patch
├── non-critical-infra/
│ ├── .envrc
│ ├── .sops.yaml
│ ├── README.md
│ ├── colmena.sh
│ ├── flake-module.nix
│ ├── hosts/
│ │ ├── caliban/
│ │ │ ├── default.nix
│ │ │ ├── disko.nix
│ │ │ ├── hardware.nix
│ │ │ └── nixpkgs-swh.nix
│ │ ├── staging-hydra/
│ │ │ ├── bootstrap-staging-hydra.sh
│ │ │ ├── ca.crt
│ │ │ ├── default.nix
│ │ │ ├── disko.nix
│ │ │ ├── genca.sh
│ │ │ ├── hardware.nix
│ │ │ ├── hydra-proxy.nix
│ │ │ ├── hydra.nix
│ │ │ └── server.crt
│ │ └── umbriel/
│ │ ├── README.md
│ │ ├── default.nix
│ │ ├── disko.nix
│ │ └── hardware.nix
│ ├── modules/
│ │ ├── backup.nix
│ │ ├── common.nix
│ │ ├── draupnir.nix
│ │ ├── element-web.nix
│ │ ├── limesurvey.nix
│ │ ├── mailserver/
│ │ │ ├── README.md
│ │ │ ├── default.nix
│ │ │ ├── freescout.nix
│ │ │ ├── mailing-lists-options.nix
│ │ │ └── mailing-lists.nix
│ │ ├── matrix-synapse.nix
│ │ ├── nginx.nix
│ │ ├── owncast.nix
│ │ ├── postfix.nix
│ │ ├── postgresql.nix
│ │ └── vaultwarden.nix
│ ├── packages/
│ │ └── encrypt-email/
│ │ ├── default.nix
│ │ └── encrypt-email.py
│ └── secrets/
│ ├── 0x4A6F-hardware-email-address.umbriel
│ ├── 0x4A6F-moderation-email-address.umbriel
│ ├── DieracDelta-email-address.umbriel
│ ├── Ericson2314-email-address.umbriel
│ ├── ForsakenHarmony-email-address.umbriel
│ ├── Gabriella439-email-address.umbriel
│ ├── Kranzes-email-address.umbriel
│ ├── LeSuisse-email-address.umbriel
│ ├── MMesch-email-address.umbriel
│ ├── Mic92-email-address.umbriel
│ ├── Mic92-wiki-email-address.umbriel
│ ├── Nebucatnetzer-email-address.umbriel
│ ├── a-kenji-email-address.umbriel
│ ├── aleksana-email-address.umbriel
│ ├── andir-email-address.umbriel
│ ├── avocadoom-email-address.umbriel
│ ├── backup-secret.caliban
│ ├── backup-secret.umbriel
│ ├── bryanhonof-email-address.umbriel
│ ├── das-g-email-address.umbriel
│ ├── djacu-email-address.umbriel
│ ├── edef1c-email-address.umbriel
│ ├── edolstra-admin-email-address.umbriel
│ ├── edolstra-email-address.umbriel
│ ├── edolstra-foundation-email-address.umbriel
│ ├── edolstra-summer-email-address.umbriel
│ ├── elections-email-login.umbriel
│ ├── escherlies-email-address.umbriel
│ ├── finance-email-login.umbriel
│ ├── flyfloh-email-address.umbriel
│ ├── fmehta-email-address.umbriel
│ ├── foundation-email-login.umbriel
│ ├── freescout-app-key.umbriel
│ ├── fricklerhandwerk-email-address.umbriel
│ ├── gefla-email-address.umbriel
│ ├── gytis-ivaskevicius-email-address.umbriel
│ ├── hardware-email-login.umbriel
│ ├── hehongbo-xsa-email-address.umbriel
│ ├── hexa-email-login.umbriel
│ ├── hydra-aws-credentials.staging-hydra
│ ├── hydra-password.staging-hydra
│ ├── hydra-users.staging-hydra
│ ├── idabzo-email-address.umbriel
│ ├── infinisil-email-address.umbriel
│ ├── infinisil-nixcon-email-address.umbriel
│ ├── jfly-email-address.umbriel
│ ├── john-rodewald-email-address.umbriel
│ ├── jtojnar-email-address.umbriel
│ ├── kate-email-address.umbriel
│ ├── lach-xsa-email-address.umbriel
│ ├── lassulus-email-address.umbriel
│ ├── lassulus-nixcon-email-address.umbriel
│ ├── lassulus-wiki-email-address.umbriel
│ ├── limesurvey-encryption-key.caliban
│ ├── limesurvey-encryption-nonce.caliban
│ ├── matrix-synapse-secrets.caliban
│ ├── matrix-synapse-signing-key.caliban
│ ├── mjolnir-access-token.caliban
│ ├── mjolnir-password.caliban
│ ├── moderation-email-login.umbriel
│ ├── mweinelt-email-address.umbriel
│ ├── ners-email-address.umbriel
│ ├── ngi-nixos-org-email-login.umbriel
│ ├── nixcon-email-login.umbriel
│ ├── nixcon.org.mail.key.umbriel
│ ├── nixos.org.mail.key.umbriel
│ ├── nixpkgs-core-email-login.umbriel
│ ├── opendkim-private-key.caliban
│ ├── picnoir-email-address.umbriel
│ ├── postsrsd-secret.umbriel
│ ├── queue-runner-ca.key.staging-hydra
│ ├── queue-runner-server.key.staging-hydra
│ ├── ra33it0-email-address.umbriel
│ ├── ra33ito-email-address.umbriel
│ ├── ral-email-address.umbriel
│ ├── rbvermaa-email-address.umbriel
│ ├── refroni-email-address.umbriel
│ ├── refroni-nixcon-email-address.umbriel
│ ├── risicle-email-address.umbriel
│ ├── roberth-email-address.umbriel
│ ├── rosscomputerguy-email-address.umbriel
│ ├── securitytracker-noreply-email-login.umbriel
│ ├── sigmasquadron-xsa-email-address.umbriel
│ ├── signing-key.staging-hydra
│ ├── staging-hydra-hostkeys.yaml
│ ├── steering-email-login.umbriel
│ ├── storagebox-ssh-key.caliban
│ ├── storagebox-ssh-key.umbriel
│ ├── test-sender-email-login.umbriel
│ ├── therealpxc-email-address.umbriel
│ ├── tomberek-email-address.umbriel
│ ├── uep-email-address.umbriel
│ ├── vaultwarden-env.caliban
│ ├── vcunat-email-address.umbriel
│ ├── winterqt-email-address.umbriel
│ ├── ysndr-email-address.umbriel
│ ├── zimbatm-admin-email-address.umbriel
│ ├── zimbatm-email-address.umbriel
│ └── zmberber-email-address.umbriel
├── pyproject.toml
├── renovate.json
├── ssh-keys.nix
├── terraform/
│ ├── .envrc
│ ├── .envrc.local.template
│ ├── .gitignore
│ ├── README.md
│ ├── artifacts.tf
│ ├── aws-config
│ ├── cache/
│ │ ├── diagnostic.sh
│ │ ├── index.html
│ │ ├── nix-cache-info
│ │ └── s3-authn.vcl
│ ├── cache-bucket/
│ │ ├── main.tf
│ │ └── providers.tf
│ ├── cache-staging/
│ │ ├── diagnostic.sh
│ │ ├── index.html
│ │ ├── new-cache-test-file
│ │ ├── nix-cache-info
│ │ ├── old-cache-test-file
│ │ └── s3-authn.vcl
│ ├── cache-staging.tf
│ ├── cache.tf
│ ├── cache_inventory.tf
│ ├── cache_log.tf
│ ├── channels.tf
│ ├── flake-module.nix
│ ├── locals.tf
│ ├── netlify_sites.tf
│ ├── nixpkgs-tarballs/
│ │ └── index.html
│ ├── nixpkgs-tarballs.tf
│ ├── providers.tf
│ ├── releases.tf
│ ├── releases_inventory.tf
│ ├── s3_listing.html.tpl
│ ├── terraform.tf
│ ├── tf.sh
│ └── wiki-test.tf
└── terraform-iam/
├── .envrc
├── .gitignore
├── README.md
├── archeologist.tf
├── assume_github_actions_policy_document/
│ └── main.tf
├── assume_identity_center_permission_policy/
│ └── main.tf
├── aws-config
├── cache-staging.tf
├── cache.tf
├── cache_eventbridge.tf
├── fastlylog/
│ ├── main.tf
│ ├── outputs.tf
│ └── variables.tf
├── fastlylog.tf
├── iam_users.tf
├── locals.tf
├── nix_repo_oidc.tf
├── outputs.tf
├── providers.tf
├── terraform.tf
└── tf.sh
SYMBOL INDEX (56 symbols across 5 files)
FILE: build/datadog/hydra.py
class HydraCheck (line 8) | class HydraCheck(checks.AgentCheck):
method check (line 9) | def check(self, instance) -> None:
FILE: build/pluto/prometheus/exporters/channel-exporter.py
function measure_channel (line 40) | def measure_channel(name):
FILE: build/pluto/prometheus/exporters/hydra-queue-runner-reexporter.py
function debug_remaining_state (line 13) | def debug_remaining_state(edict) -> None:
class EvaporatingDict (line 18) | class EvaporatingDict:
method __init__ (line 19) | def __init__(self, state) -> None:
method preserving_read (line 22) | def preserving_read(self, key):
method preserving_read_default (line 29) | def preserving_read_default(self, key, default):
method destructive_read (line 35) | def destructive_read(self, key):
method destructive_read_default (line 40) | def destructive_read_default(self, key, default):
method unused_read (line 50) | def unused_read(self, key) -> None:
method remaining_state (line 53) | def remaining_state(self):
method items (line 56) | def items(self):
class HydraScrapeImporter (line 62) | class HydraScrapeImporter:
method __init__ (line 63) | def __init__(self, status) -> None:
method collect (line 66) | def collect(self):
method trivial_gauge (line 374) | def trivial_gauge(self, name, help, value):
method trivial_counter (line 379) | def trivial_counter(self, name, help, value):
method unused_metric (line 384) | def unused_metric(self, key) -> None:
method preserving_read (line 387) | def preserving_read(self, key):
method preserving_read_default (line 390) | def preserving_read_default(self, key, default):
method destructive_read (line 393) | def destructive_read(self, key):
method destructive_read_default (line 396) | def destructive_read_default(self, key, default):
method uncollected_status (line 399) | def uncollected_status(self):
function blackhole (line 403) | def blackhole(*args, **kwargs) -> None:
class MachineScrapeImporter (line 407) | class MachineScrapeImporter:
method __init__ (line 408) | def __init__(self) -> None:
method load_machine (line 452) | def load_machine(self, name, report) -> None:
method metrics (line 479) | def metrics(self):
class JobsetScrapeImporter (line 491) | class JobsetScrapeImporter:
method __init__ (line 492) | def __init__(self) -> None:
method load_jobset (line 504) | def load_jobset(self, name, report) -> None:
method metrics (line 509) | def metrics(self):
class MachineTypeScrapeImporter (line 514) | class MachineTypeScrapeImporter:
method __init__ (line 515) | def __init__(self) -> None:
method load_machine_type (line 537) | def load_machine_type(self, name, report) -> None:
method metrics (line 547) | def metrics(self):
class ScrapeCollector (line 554) | class ScrapeCollector:
method __init__ (line 555) | def __init__(self) -> None:
method collect (line 558) | def collect(self):
function scrape (line 562) | def scrape(cached=None):
FILE: modules/prometheus/nixos-exporter/prometheus_nixos_exporter/__main__.py
class NixosSystemCollector (line 17) | class NixosSystemCollector:
method __init__ (line 18) | def __init__(self) -> None:
method get_nix_version (line 24) | def get_nix_version(self) -> Version:
method collect (line 35) | def collect(self) -> Iterator[GaugeMetricFamily]:
method get_version_id (line 70) | def get_version_id(self, path: str) -> str:
method get_kernel_out (line 81) | def get_kernel_out(self, path: str) -> str:
method get_time (line 84) | def get_time(self, path: str) -> int:
function main (line 102) | def main() -> None:
FILE: non-critical-infra/packages/encrypt-email/encrypt-email.py
function find_project_root (line 12) | def find_project_root(start: Path) -> Path:
function find_relative_project_root (line 21) | def find_relative_project_root() -> Path:
function encrypt_to_file (line 31) | def encrypt_to_file(plaintext: str, secret_path: Path, force: bool) -> N...
function hash_password (line 61) | def hash_password(plaintext: str) -> str:
function main (line 73) | def main() -> None:
function address (line 81) | def address(address_id: str, email: str, force: bool) -> None:
function login (line 120) | def login(address_id: str, force: bool) -> None:
Condensed preview — 381 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (891K chars).
[
{
"path": ".github/CODEOWNERS",
"chars": 667,
"preview": "# Every directory containing configurations impacting the core infra needs a\n# review from a member of core infra.\n/.git"
},
{
"path": ".github/ISSUE_TEMPLATE/feature_request.md",
"chars": 650,
"preview": "---\nname: Feature request\nabout: Suggest an improvement for this project\ntitle: \"\"\nlabels: enhancement\nassignees: \"\"\n---"
},
{
"path": ".github/ISSUE_TEMPLATE/service_disruption.md",
"chars": 391,
"preview": "---\nname: Service disruption report\nabout: Use this to report service instabilities\ntitle: \"<service-name>: \"\nlabels: bu"
},
{
"path": ".github/scripts/format-and-absorb.sh",
"chars": 1298,
"preview": "#!/usr/bin/env -S nix shell --inputs-from . nixpkgs#bash nixpkgs#git-absorb --command bash\n# shellcheck shell=bash\nset -"
},
{
"path": ".github/workflows/ci.yml",
"chars": 3292,
"preview": "name: CI\n\non:\n push:\n branches:\n - main\n pull_request:\n merge_group:\n\npermissions:\n contents: read\n\njobs:\n "
},
{
"path": ".github/workflows/dns-apply.yml",
"chars": 674,
"preview": "---\nname: Apply DNS changes\n\non:\n push:\n branches:\n - main\n paths:\n - \"dns/**\"\n workflow_dispatch:\n\npe"
},
{
"path": ".github/workflows/dns-preview.yml",
"chars": 757,
"preview": "---\nname: Test/Preview DNS changes\n\non:\n pull_request:\n paths:\n - \"dns/**\"\n\npermissions: {}\n\njobs:\n dnscontrol"
},
{
"path": ".github/workflows/format-pr.yml",
"chars": 3644,
"preview": "name: Format PR\n\non:\n issue_comment:\n types: [created]\n workflow_dispatch:\n inputs:\n pr_number:\n des"
},
{
"path": ".github/workflows/zizmor.yml",
"chars": 1082,
"preview": "name: GitHub Actions Security Analysis with zizmor 🌈\n\non:\n push:\n branches:\n - main\n paths:\n - \".github"
},
{
"path": ".gitignore",
"chars": 124,
"preview": "*~\n\n# Terraform\n.terraform*\n\n# Direnv\n.direnv\n\n# Nix build outputs\nresult\n\n# Colmena --keep-result roots directory\n.gcro"
},
{
"path": "LICENSE",
"chars": 1090,
"preview": "MIT License\n\nCopyright (c) 2024 NixOS Foundation and contributors\n\nPermission is hereby granted, free of charge, to any "
},
{
"path": "README.md",
"chars": 1700,
"preview": "# The NixOS infrastructure configurations\n\nThis repository contains all the hardware configuration for the nixos project"
},
{
"path": "build/.envrc",
"chars": 42,
"preview": "# shellcheck shell=bash\nuse flake .#build\n"
},
{
"path": "build/colmena.nix",
"chars": 1866,
"preview": "# heavily adapted from https://github.com/juspay/colmena-flake\n# Original license: GNU Affero General Public License v3."
},
{
"path": "build/colmena.sh",
"chars": 79,
"preview": "#!/usr/bin/env bash\nset -euo pipefail\n\ncd \"$(dirname \"$0\")\"\ncolmena apply \"$@\"\n"
},
{
"path": "build/common.nix",
"chars": 1687,
"preview": "{\n pkgs,\n lib,\n ...\n}:\n\n{\n imports = [\n ../modules/common.nix\n ../modules/nftables.nix\n ../modules/promethe"
},
{
"path": "build/datadog/hydra.nix",
"chars": 497,
"preview": "{ pkgs, ... }:\n{\n systemd.services.dd-agent.environment.PYTHONPATH =\n \"${pkgs.pythonPackages.requests}/lib/python2.7"
},
{
"path": "build/datadog/hydra.py",
"chars": 318,
"preview": "import json\n\nimport requests\n\nimport checks\n\n\nclass HydraCheck(checks.AgentCheck):\n def check(self, instance) -> None"
},
{
"path": "build/flake-module.nix",
"chars": 1383,
"preview": "{\n inputs,\n lib,\n ...\n}:\nlet\n flakesModule = {\n imports = [\n inputs.agenix.nixosModules.age\n inputs.dis"
},
{
"path": "build/haumea/boot.nix",
"chars": 292,
"preview": "{\n boot.loader.grub = {\n devices = [\n \"/dev/nvme0n1\"\n \"/dev/nvme1n1\"\n ];\n copyKernels = true;\n co"
},
{
"path": "build/haumea/default.nix",
"chars": 1087,
"preview": "{\n lib,\n modulesPath,\n pkgs,\n ...\n}:\n\n{\n imports = [\n \"${modulesPath}/installer/scan/not-detected.nix\"\n ../co"
},
{
"path": "build/haumea/network.nix",
"chars": 546,
"preview": "{\n systemd.network = {\n enable = true;\n networks = {\n \"30-enp35s0\" = {\n matchConfig = {\n MAC"
},
{
"path": "build/haumea/postgresql.nix",
"chars": 2870,
"preview": "{\n config,\n pkgs,\n ...\n}:\n\n{\n services.prometheus.exporters.postgres = {\n enable = true;\n dataSourceName = \"us"
},
{
"path": "build/haumea/zrepl.yml",
"chars": 536,
"preview": "# root@zh4461b.rsync.net:/usr/local/etc/zrepl/zrepl.yml\n# zrepl main configuration file.\n# For documentation, refer to h"
},
{
"path": "build/hydra-proxy.nix",
"chars": 2482,
"preview": "{\n config,\n pkgs,\n ...\n}:\n\n{\n networking.firewall.allowedTCPPorts = [\n 80\n 443\n ];\n\n services.anubis.instanc"
},
{
"path": "build/hydra.nix",
"chars": 6713,
"preview": "{\n config,\n lib,\n pkgs,\n inputs,\n ...\n}:\n\nlet\n narCache = \"/var/cache/hydra/nar-cache\";\nin\n\n{\n imports = [\n in"
},
{
"path": "build/id_buildfarm.pub",
"chars": 404,
"preview": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCyM48VC5fpjJssLI8uolFscP4/iEoMHfkPoT9R3iE3OEjadmwa1XCAiXUoa7HSshw79SgPKF2KbGBPEVCa"
},
{
"path": "build/mimas/boot.nix",
"chars": 580,
"preview": "{\n boot = {\n initrd.availableKernelModules = [\n \"ahci\"\n \"xhci_pci\"\n \"nvme\"\n \"usbhid\"\n ];\n "
},
{
"path": "build/mimas/default.nix",
"chars": 410,
"preview": "{\n imports = [\n ../common.nix\n ../hydra.nix\n ../hydra-proxy.nix\n ./boot.nix\n ./firewall.nix\n ./networ"
},
{
"path": "build/mimas/disko.nix",
"chars": 1618,
"preview": "let\n layout = id: {\n type = \"gpt\";\n partitions = {\n esp = {\n type = \"EF00\";\n size = \"512M\";\n "
},
{
"path": "build/mimas/firewall.nix",
"chars": 1846,
"preview": "{\n pkgs,\n lib,\n inputs,\n ...\n}:\n\nlet\n blockedAutNums = [\n 45102 # ALIBABA-CN-NET\n 45899 # VNPT-AS-VN\n 1322"
},
{
"path": "build/mimas/network.nix",
"chars": 989,
"preview": "{\n networking.useDHCP = false;\n\n systemd.network = {\n enable = true;\n netdevs = {\n \"20-vlan4000\" = {\n "
},
{
"path": "build/nginx-error-pages/403.html",
"chars": 2224,
"preview": "<!DOCTYPE html>\n<html lang=\"en\">\n <head>\n <title>Error 403 - hydra.nixos.org</title>\n <meta http-equiv=\"Content-T"
},
{
"path": "build/nginx-error-pages/502.html",
"chars": 1764,
"preview": "<!DOCTYPE html>\n<html lang=\"en\">\n <head>\n <title>Error 502 - hydra.nixos.org</title>\n <meta http-equiv=\"Content-T"
},
{
"path": "build/nginx-error-pages/503.html",
"chars": 872,
"preview": "<!DOCTYPE html>\n\n<html>\n <head>\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" />\n <title>Hy"
},
{
"path": "build/pluto/boot.nix",
"chars": 445,
"preview": "{\n boot = {\n supportedFilesystems = [ \"zfs\" ];\n loader = {\n efi.canTouchEfiVariables = false;\n grub = {"
},
{
"path": "build/pluto/default.nix",
"chars": 952,
"preview": "{ config, ... }:\n\n{\n imports = [\n ../common.nix\n ./boot.nix\n ./disko.nix\n ./network.nix\n\n ./grafana.nix\n"
},
{
"path": "build/pluto/disko.nix",
"chars": 2290,
"preview": "{\n disko.devices = {\n disk = {\n nvme0n1 = {\n type = \"disk\";\n device = \"/dev/disk/by-id/nvme-SAMSU"
},
{
"path": "build/pluto/grafana.nix",
"chars": 695,
"preview": "{\n config,\n ...\n}:\n{\n services.backup.includes = [ \"/var/lib/grafana\" ];\n\n age.secrets.\"grafana-secret-key\" = {\n "
},
{
"path": "build/pluto/network.nix",
"chars": 498,
"preview": "{\n systemd.network = {\n enable = true;\n networks = {\n \"30-enp5s0\" = {\n matchConfig = {\n MACA"
},
{
"path": "build/pluto/nginx.nix",
"chars": 1344,
"preview": "{ config, ... }:\n\n{\n networking.firewall.allowedTCPPorts = [\n 80\n 443\n ];\n\n services.nginx = {\n enable = tru"
},
{
"path": "build/pluto/nixos-metrics.nix",
"chars": 1162,
"preview": "{ config, pkgs, ... }:\n\n{\n systemd.services.pull-nixos-metrics = {\n description = \"Pull nixos metrics from github:Ni"
},
{
"path": "build/pluto/prometheus/alertmanager.nix",
"chars": 5109,
"preview": "{ config, ... }:\n\n{\n services.prometheus = {\n alertmanagers = [\n {\n scheme = \"http\";\n static_conf"
},
{
"path": "build/pluto/prometheus/default.nix",
"chars": 1460,
"preview": "{ pkgs, ... }:\n\n{\n imports = [\n ./alertmanager.nix\n ./exporters/anubis.nix\n ./exporters/blackbox.nix\n ./exp"
},
{
"path": "build/pluto/prometheus/exporters/anubis.nix",
"chars": 247,
"preview": "{\n services.prometheus = {\n scrapeConfigs = [\n {\n job_name = \"anubis\";\n static_configs = [\n "
},
{
"path": "build/pluto/prometheus/exporters/blackbox.nix",
"chars": 4771,
"preview": "{ config, pkgs, ... }:\n\nlet\n mkStaticProbe =\n {\n module,\n targets,\n job_suffix ? \"\",\n }:\n {\n "
},
{
"path": "build/pluto/prometheus/exporters/channel-exporter.py",
"chars": 2991,
"preview": "#!/usr/bin/env python3\n\nimport json\nimport logging\nimport sys\nimport time\nfrom pprint import pprint\n\nimport requests\nfro"
},
{
"path": "build/pluto/prometheus/exporters/channel.nix",
"chars": 1226,
"preview": "{ lib, pkgs, ... }:\n\nlet\n channels = pkgs.writeText \"channels.json\" (\n builtins.toJSON (import ../../../../channels."
},
{
"path": "build/pluto/prometheus/exporters/domain.nix",
"chars": 1695,
"preview": "{ pkgs, ... }:\n\n{\n services.prometheus = {\n exporters.domain = {\n enable = true;\n listenAddress = \"localho"
},
{
"path": "build/pluto/prometheus/exporters/fastly.nix",
"chars": 470,
"preview": "{ config, ... }:\n\n{\n age.secrets.fastly-exporter-env.file = ../../../secrets/fastly-exporter-env.age;\n\n services.prome"
},
{
"path": "build/pluto/prometheus/exporters/github.nix",
"chars": 1113,
"preview": "{ pkgs, ... }:\n\nlet\n exporter = pkgs.fetchFromGitHub {\n owner = \"grahamc\";\n repo = \"prometheus-github-exporter\";\n"
},
{
"path": "build/pluto/prometheus/exporters/hydra-queue-runner-reexporter.py",
"chars": 21401,
"preview": "#!/usr/bin/env nix-shell\n#!nix-shell -i python3 -p python3 -p python3Packages.requests -p python3Packages.prometheus_cli"
},
{
"path": "build/pluto/prometheus/exporters/hydra.nix",
"chars": 2187,
"preview": "{ pkgs, ... }:\n\n{\n systemd.services.prometheus-hydra-queue-runner-exporter = {\n wantedBy = [ \"multi-user.target\" ];\n"
},
{
"path": "build/pluto/prometheus/exporters/json.nix",
"chars": 2061,
"preview": "{ config, pkgs, ... }:\n\n{\n services.prometheus = {\n exporters.json = {\n enable = true;\n listenAddress = \"l"
},
{
"path": "build/pluto/prometheus/exporters/matrix-synapse.nix",
"chars": 190,
"preview": "{\n services.prometheus.scrapeConfigs = [\n {\n job_name = \"matrix_synapse\";\n scheme = \"https\";\n static_"
},
{
"path": "build/pluto/prometheus/exporters/nixos.nix",
"chars": 534,
"preview": "{\n services.prometheus.scrapeConfigs = [\n {\n job_name = \"nixos\";\n static_configs = [\n {\n l"
},
{
"path": "build/pluto/prometheus/exporters/node.nix",
"chars": 5400,
"preview": "{ pkgs, ... }:\n\n{\n services.prometheus = {\n scrapeConfigs = [\n {\n job_name = \"node\";\n static_conf"
},
{
"path": "build/pluto/prometheus/exporters/owncast.nix",
"chars": 530,
"preview": "{ config, ... }:\n\n{\n age.secrets.owncast-admin-password = {\n file = ../../../secrets/owncast-admin-password.age;\n "
},
{
"path": "build/pluto/prometheus/exporters/postgresql.nix",
"chars": 331,
"preview": "{\n services.prometheus.scrapeConfigs = [\n {\n job_name = \"postgresql\";\n metrics_path = \"/metrics\";\n st"
},
{
"path": "build/pluto/prometheus/exporters/rasdaemon.nix",
"chars": 1386,
"preview": "{ pkgs, ... }:\n\n{\n services.prometheus = {\n scrapeConfigs = [\n {\n job_name = \"rasdaemon\";\n static"
},
{
"path": "build/pluto/prometheus/exporters/sql.nix",
"chars": 199,
"preview": "{\n services.prometheus.scrapeConfigs = [\n {\n job_name = \"sql\";\n metrics_path = \"/metrics\";\n static_co"
},
{
"path": "build/pluto/prometheus/exporters/storagebox.nix",
"chars": 1153,
"preview": "{\n config,\n pkgs,\n ...\n}:\n{\n age.secrets.\"storagebox-exporter-token\".file = ../../../secrets/storagebox-exporter-tok"
},
{
"path": "build/pluto/prometheus/exporters/up.nix",
"chars": 567,
"preview": "{ pkgs, ... }:\n\n{\n services.prometheus.ruleFiles = [\n (pkgs.writeText \"up.rules\" (\n builtins.toJSON {\n g"
},
{
"path": "build/pluto/prometheus/exporters/zfs.nix",
"chars": 1554,
"preview": "{\n pkgs,\n ...\n}:\n{\n services.prometheus = {\n scrapeConfigs = [\n {\n job_name = \"zfs\";\n static_co"
},
{
"path": "build/pluto/prometheus/exporters/zrepl.nix",
"chars": 953,
"preview": "{ pkgs, ... }:\n\n{\n services.prometheus = {\n scrapeConfigs = [\n {\n job_name = \"zrepl\";\n static_con"
},
{
"path": "build/scripts/nix-mac-installer.sh",
"chars": 1832,
"preview": "#! /usr/bin/env bash\n\nset -e\n\nif [[ $(id -u) != 0 ]]; then\n echo \"$0: please run this script as root\"\n exit 1\nfi\n\nexpo"
},
{
"path": "build/scripts/nix-mac-nuke.sh",
"chars": 387,
"preview": "#! /usr/bin/env bash\n\nservice_plist=/Library/LaunchDaemons/org.nixos.nix-daemon.plist\n\nlaunchctl stop $service_plist\nlau"
},
{
"path": "build/secrets/alertmanager-oauth2-proxy-env.age",
"chars": 1075,
"preview": "age-encryption.org/v1\n-> ssh-ed25519 s9hT2g WEFWAkfO/QbTyYHtjbtFU819qNNwdEbxj43CAyoCth8\nqoaEcEMG3pioLP8DYEV7am6ARmo/1Fi6"
},
{
"path": "build/secrets/eager-heisenberg-queue-runner-token.age",
"chars": 1161,
"preview": "age-encryption.org/v1\n-> ssh-ed25519 cKT5Kw d2hBbAiEI7iLoP1c7WgXkJXnqfsy3GWPy23NZcHrb3A\ndIEVrctp2Ryu92cSBILUE+qeeLz0raQ1"
},
{
"path": "build/secrets/fastly-exporter-env.age",
"chars": 1031,
"preview": "age-encryption.org/v1\n-> ssh-ed25519 s9hT2g RO6Blf+MB32dW1vWtwpsdutfPRDhXp6qMh+9K5mP/yI\naojG0tr0pQ172/Sgrcm4ltdGJH5uCdW6"
},
{
"path": "build/secrets/goofy-hopcroft-queue-runner-token.age",
"chars": 1167,
"preview": "age-encryption.org/v1\n-> ssh-ed25519 cKT5Kw hA/K9EJyGfAbGbokosZGVEJqasHjE2bgr2EpEN4O/iQ\n7GaeyhJHezMSytl+75UzkiLvbxMpWSKo"
},
{
"path": "build/secrets/grafana-secret-key.age",
"chars": 1018,
"preview": "age-encryption.org/v1\n-> ssh-ed25519 s9hT2g Q71aJ0AH5YJng/IVw8l5lch8zdGP3Z0QJUIQ+DqYF3w\nKI+qnX5ShsgtdtC78UHGwiKjAgWNwahf"
},
{
"path": "build/secrets/hopeful-rivest-queue-runner-token.age",
"chars": 1156,
"preview": "age-encryption.org/v1\n-> ssh-ed25519 cKT5Kw jz7oaOXlftKuXEIeFcFXacn0gcDuQhGkZRLmf0QTPXQ\nBr67PR4rBrZaKbP/X8X4vFkPq8L5IiNi"
},
{
"path": "build/secrets/hydra-github-client-secret.age",
"chars": 1019,
"preview": "age-encryption.org/v1\n-> ssh-ed25519 cKT5Kw krCNPgqeLrULZyGtFdc2VwmEVaKC7uaDabi7tv3dHVw\nOOEZQ4o4xqFs42TEYwNNWkOQbSvVkq8n"
},
{
"path": "build/secrets/hydra-mirror-aws-credentials.age",
"chars": 1062,
"preview": "age-encryption.org/v1\n-> ssh-ed25519 s9hT2g 3oyWmMcrRcr1Evv9+Srx3z3OyKajSPpJiC3APOYE0RU\nRCC/gmOyy0JRkWIRhzK37xckWnpQYQ74"
},
{
"path": "build/secrets/hydra-mirror-git-credentials.age",
"chars": 1047,
"preview": "age-encryption.org/v1\n-> ssh-ed25519 s9hT2g To6KM19p0hgH9n8iTV5uO0DU0lK94NWPiDV9UkUwwFc\nZc1aT0dmu/6zIYmBgpQjENZpmb5Ob4E8"
},
{
"path": "build/secrets/kind-lumiere-queue-runner-token.age",
"chars": 1158,
"preview": "age-encryption.org/v1\n-> ssh-ed25519 cKT5Kw 8g2rqFnJ23pFpD4PniCDMPiueSroGH2yShkpHtPvZDc\nZyYcqRHGP4H4ElRs3rNAOzJ7In3MnVT8"
},
{
"path": "build/secrets/norwegian-blue-queue-runner-token.age",
"chars": 1160,
"preview": "age-encryption.org/v1\n-> ssh-ed25519 cKT5Kw aMrFTLVt8LAofBa0xq3o4EjsxQjRAPtHm13zmSM+6VA\natGjWVSAl8O9I44eY3BO+QeQ6EDuAEsE"
},
{
"path": "build/secrets/owncast-admin-password.age",
"chars": 1019,
"preview": "age-encryption.org/v1\n-> ssh-ed25519 s9hT2g M/D2oe8ocLzBBe0VTEO6UZ0gZb+dL13/rfZ38N1KH1I\n1KmR71+57D0aBRlU7ZvPz6Prg3mNrYc7"
},
{
"path": "build/secrets/pluto-backup-secret.age",
"chars": 1038,
"preview": "age-encryption.org/v1\n-> ssh-ed25519 s9hT2g reYMr+USW2vh77665Ga/KtPbeu5OrdgrKgI4sYo8plo\n4eBoVfWTjRe4w6Vdl6OAXKJr7kaSJqVV"
},
{
"path": "build/secrets/sleepy-brown-queue-runner-token.age",
"chars": 1165,
"preview": "age-encryption.org/v1\n-> ssh-ed25519 cKT5Kw r8aZ+OCr9AE4h0zattrGpFPwBcnb28/Mj7vNC5EEHDE\nSaN75cMS6o0bcuIzeKF8siNu0P7rvJN4"
},
{
"path": "build/secrets.nix",
"chars": 2278,
"preview": "let\n keys = import ../ssh-keys.nix;\n\n secrets = with keys; {\n alertmanager-matrix-forwarder = [ machines.pluto ];\n "
},
{
"path": "build/titan/boot.nix",
"chars": 582,
"preview": "{\n boot = {\n initrd.availableKernelModules = [\n \"ahci\"\n \"xhci_pci\"\n \"nvme\"\n \"usbhid\"\n ];\n "
},
{
"path": "build/titan/default.nix",
"chars": 323,
"preview": "{\n imports = [\n ../common.nix\n ./boot.nix\n ./network.nix\n ./postgresql.nix\n ./zrepl.nix\n ];\n\n disko.de"
},
{
"path": "build/titan/disko.nix",
"chars": 1539,
"preview": "let\n layout = id: {\n type = \"gpt\";\n partitions = {\n esp = {\n type = \"EF00\";\n size = \"1G\";\n "
},
{
"path": "build/titan/network.nix",
"chars": 1051,
"preview": "{\n systemd.network = {\n enable = true;\n netdevs = {\n \"20-vlan4000\" = {\n netdevConfig = {\n Ki"
},
{
"path": "build/titan/postgresql.nix",
"chars": 3216,
"preview": "{\n config,\n lib,\n pkgs,\n ...\n}:\n\n{\n services.prometheus.exporters.postgres = {\n enable = true;\n dataSourceNam"
},
{
"path": "build/titan/zrepl.nix",
"chars": 4166,
"preview": "{\n config,\n lib,\n ...\n}:\n\nlet\n metricsPort = 9811;\nin\n{\n age.secrets.\"zrepl-ssh-key\" = {\n file = ../secrets/zrep"
},
{
"path": "build/titan/zrepl.yml",
"chars": 535,
"preview": "# root@zh4461b.rsync.net:/usr/local/etc/zrepl/zrepl.yml\n# zrepl main configuration file.\n# For documentation, refer to h"
},
{
"path": "builders/boot/efi-grub.nix",
"chars": 390,
"preview": "{\n boot.loader = {\n efi.canTouchEfiVariables = false;\n grub = {\n enable = true;\n configurationLimit = 5"
},
{
"path": "builders/common/hardening.nix",
"chars": 275,
"preview": "{\n # no priviledge escalation through sudo or polkit\n security.sudo.execWheelOnly = true;\n security.polkit.enable = f"
},
{
"path": "builders/common/hydra-queue-builder.nix",
"chars": 520,
"preview": "{\n config,\n inputs,\n lib,\n ...\n}:\n\n{\n imports = [\n inputs.hydra-staging.nixosModules.builder\n ];\n\n config = li"
},
{
"path": "builders/common/network.nix",
"chars": 221,
"preview": "{\n networking = {\n domain = \"builders.nixos.org\";\n\n firewall = {\n # too spammy, rotates dmesg too quickly\n "
},
{
"path": "builders/common/nix.nix",
"chars": 1317,
"preview": "{\n config,\n lib,\n pkgs,\n ...\n}:\n\n{\n nix = {\n package = pkgs.nix;\n nrBuildUsers = config.nix.settings.max-jobs"
},
{
"path": "builders/common/node-exporter.nix",
"chars": 229,
"preview": "{\n config,\n ...\n}:\n\n{\n networking.firewall.allowedTCPPorts = [\n config.services.prometheus.exporters.node.port\n ]"
},
{
"path": "builders/common/ssh.nix",
"chars": 145,
"preview": "{\n lib,\n ...\n}:\n\n{\n services.openssh = {\n enable = true;\n authorizedKeysFiles = lib.mkForce [ \"/etc/ssh/authori"
},
{
"path": "builders/common/system.nix",
"chars": 498,
"preview": "{\n pkgs,\n ...\n}:\n\n{\n # apply microcode to fix functional and security issues\n hardware.enableRedistributableFirmware"
},
{
"path": "builders/common/tools.nix",
"chars": 187,
"preview": "{\n pkgs,\n ...\n}:\n\n{\n environment.systemPackages = with pkgs; [\n atop\n ethtool\n htop\n lm_sensors\n nix-t"
},
{
"path": "builders/common/update.nix",
"chars": 161,
"preview": "{\n system.autoUpgrade = {\n enable = true;\n dates = \"daily\";\n flake = \"git+https://github.com/nixos/infra.git?r"
},
{
"path": "builders/common/users.nix",
"chars": 813,
"preview": "{\n config,\n lib,\n pkgs,\n ...\n}:\nlet\n sshKeys = {\n hydra-queue-runner-rhea = \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAA"
},
{
"path": "builders/disk-layouts/efi-zfs-raid0.nix",
"chars": 1182,
"preview": "{\n disk1 ? \"/dev/nvme0n1\",\n disk2 ? \"/dev/nvme1n1\",\n}:\nlet\n mkDiskLayout = id: {\n type = \"gpt\";\n partitions = {"
},
{
"path": "builders/flake-module.nix",
"chars": 1453,
"preview": "{ inputs, ... }:\n{\n flake.nixosConfigurations =\n let\n mkNixOS =\n system: config:\n inputs.nixpkgs."
},
{
"path": "builders/instances/elated-minsky.nix",
"chars": 767,
"preview": "{\n imports = [\n ../profiles/hetzner-ax101r.nix\n ];\n\n nix.settings = {\n cores = 2;\n max-jobs = 48;\n };\n\n ne"
},
{
"path": "builders/instances/goofy-hopcroft.nix",
"chars": 767,
"preview": "{\n imports = [\n ../profiles/hetzner-rx220.nix\n ];\n\n nix.settings = {\n cores = 2;\n max-jobs = 40;\n };\n\n net"
},
{
"path": "builders/instances/hopeful-rivest.nix",
"chars": 803,
"preview": "{\n imports = [\n ../profiles/hetzner-rx170.nix\n ];\n\n nix.settings = {\n cores = 20;\n max-jobs = 10;\n system"
},
{
"path": "builders/instances/sleepy-brown.nix",
"chars": 806,
"preview": "{\n imports = [\n ../profiles/hetzner-ax101r.nix\n ];\n\n nix.settings = {\n cores = 24;\n max-jobs = 4;\n system"
},
{
"path": "builders/network/autoconfig.nix",
"chars": 316,
"preview": "{\n networking.useDHCP = false;\n\n systemd.network = {\n enable = true;\n networks = {\n \"99-autoconfig\" = {\n "
},
{
"path": "builders/profiles/hetzner-ax101r.nix",
"chars": 749,
"preview": "{\n config,\n lib,\n ...\n}:\n\n{\n imports = [\n ../boot/efi-grub.nix\n ];\n\n disko.devices = import ../disk-layouts/efi"
},
{
"path": "builders/profiles/hetzner-rx170.nix",
"chars": 258,
"preview": "{\n imports = [\n ../boot/efi-grub.nix\n ];\n\n disko.devices = import ../disk-layouts/efi-zfs-raid0.nix { };\n boot.su"
},
{
"path": "builders/profiles/hetzner-rx220.nix",
"chars": 258,
"preview": "{\n imports = [\n ../boot/efi-grub.nix\n ];\n\n disko.devices = import ../disk-layouts/efi-zfs-raid0.nix { };\n boot.su"
},
{
"path": "channels.nix",
"chars": 2327,
"preview": "rec {\n channels = {\n # \"Channel name\" = {\n # # This should be the <value> part of\n # # https://hydra.nixos"
},
{
"path": "checks/flake-module.nix",
"chars": 677,
"preview": "{ ... }:\n{\n perSystem =\n { self', lib, ... }:\n {\n checks =\n let\n # TODO: our CI doesn't have"
},
{
"path": "dns/.envrc",
"chars": 47,
"preview": "# shellcheck shell=bash\nuse flake .#dnscontrol\n"
},
{
"path": "dns/creds.json",
"chars": 73,
"preview": "{\n \"gandi\": {\n \"TYPE\": \"GANDI_V5\",\n \"token\": \"$GANDI_TOKEN\"\n }\n}\n"
},
{
"path": "dns/dnsconfig.js",
"chars": 235,
"preview": "DEFAULTS(\n\tDefaultTTL(\"1h\"),\n\tNAMESERVER_TTL(\"24h\")\n);\nvar REG_NONE = NewRegistrar(\"none\");\nvar DSP_GANDI = NewDnsProvid"
},
{
"path": "dns/flake-module.nix",
"chars": 322,
"preview": "{\n perSystem =\n { pkgs, ... }:\n {\n devShells.dnscontrol = pkgs.mkShellNoCC {\n packages = [\n "
},
{
"path": "dns/nix.dev.js",
"chars": 679,
"preview": "D(\"nix.dev\",\n\tREG_NONE,\n\tDnsProvider(DSP_GANDI),\n\n\tCAA_BUILDER({\n\t\tlabel: \"@\",\n\t\tiodef: \"mailto:infra+caa@nixos.org\",\n\t\t"
},
{
"path": "dns/nixcon.org.js",
"chars": 1907,
"preview": "D(\"nixcon.org\",\n\tREG_NONE,\n\tDnsProvider(DSP_GANDI),\n\n\tCAA_BUILDER({\n\t\tlabel: \"@\",\n\t\tiodef: \"mailto:infra+caa@nixos.org\","
},
{
"path": "dns/nixos.org.js",
"chars": 7022,
"preview": "D(\"nixos.org\",\n\tREG_NONE,\n\tDnsProvider(DSP_GANDI),\n\n\tTXT(\"@\", \"apple-domain-verification=OvacO4lGB9A6dBFg\"),\n\tTXT(\"@\", \""
},
{
"path": "dns/ofborg.org.js",
"chars": 1698,
"preview": "D(\"ofborg.org\",\n\tREG_NONE,\n\tDnsProvider(DSP_GANDI),\n\n\tCAA_BUILDER({\n\t\tlabel: \"@\",\n\t\tiodef: \"mailto:infra+caa@nixos.org\","
},
{
"path": "docs/inventory.md",
"chars": 3391,
"preview": "# NixOS project resource inventory\n\nThis is the current list of hardware and services that everyone has access to.\n\n# Ac"
},
{
"path": "docs/meeting-notes/2024-01-11.md",
"chars": 2389,
"preview": "# 2024-01-11\n\nFirst meeting of the (revamped) infra team.\n\nParticipants: delroth, hexa, raitobezarius, vcunat, zimbatm\n\n"
},
{
"path": "docs/meeting-notes/2024-01-25.md",
"chars": 1633,
"preview": "# 2024-01-25\n\n## [hexa, delroth] EMS Migration\n\n- Configuration hasn’t been written yet, hexa might get it done this wee"
},
{
"path": "docs/meeting-notes/2024-02-08.md",
"chars": 5413,
"preview": "# 2024-02-08\n\nAttendees: delroth, hexa, JulienMalka, lheckemann, raitobezarius, vcunat,\nzimbatm\n\n## [hexa, delroth] EMS "
},
{
"path": "docs/meeting-notes/2024-02-22.md",
"chars": 7304,
"preview": "# 2024-02-08\n\nAttendees: delroth, edolstra, hexa, JulienMalka, raitobezarius, vcunat, zimbatm\n\n## [delroth] FYI on avail"
},
{
"path": "docs/meeting-notes/2024-03-07.md",
"chars": 5023,
"preview": "# 2024-03-07\n\nAttendees: hexa, vcunat, zimbatm (Jonas), Linus, Julien, Raito/Ryan, Jade (most\nof the time)\n\n## [hexa] ar"
},
{
"path": "docs/meeting-notes/2024-03-21.md",
"chars": 2177,
"preview": "# 2024-03-21\n\nAttendees: hexa, vcunat, Linus, zimbatm, Eelco, Janik, raitobezariusm, Alex\n\n## Round table\n\n- zimbatm\n -"
},
{
"path": "docs/meeting-notes/2024-04-18.md",
"chars": 7790,
"preview": "# 2024-04-18\n\nAttendees: delroth, Janik, dgrig, vcunat, raitobezarius, hexa, Linus, Weija\n\n## Topics\n\n- [delroth] Bringi"
},
{
"path": "docs/meeting-notes/2024-05-30.md",
"chars": 1721,
"preview": "# 2024-05-30\n\nAttendees: hexa, vcunat, zimbatm, kenji, sterni\n\n## Round table\n\n- [hexa]\n - Updating Hydra to Nix 2.20\n "
},
{
"path": "docs/meeting-notes/2024-06-13.md",
"chars": 2965,
"preview": "# 2024-06-13\n\nAttendees: hexa, vcunat, Julien (partially), Eelco\n\n## Round table\n\n- Julien\n - currently otherwise occup"
},
{
"path": "docs/meeting-notes/2024-06-27.md",
"chars": 2588,
"preview": "# 2024-06-27\n\nAttendees: edef, hexa, vcunat, zimbatm\n\n## Round table\n\n- hexa\n - Large PostgreSQL snapshot sizes caused "
},
{
"path": "docs/meeting-notes/2024-11-14.md",
"chars": 8834,
"preview": "# 2024-11-14\n\nAttendees: jkarni, zimbatm, mic92, infinisil, kenji, drig/erethon, arian, sam ,\nhexa, jeremy, jeff\n\n## Rou"
},
{
"path": "docs/meeting-notes/2025-04-03.md",
"chars": 2193,
"preview": "# 2025-04-03\n\nAttendees: dgrig/erethon, mic92, vcunat\n\n- erethon:\n - Tested umbriel email server -> works https://githu"
},
{
"path": "docs/meeting-notes/2025-04-17.md",
"chars": 3230,
"preview": "# 2025-04-17\n\nAtttendees: zimbatm, arian, erethon, hexa, Mic92, jfly\n\n- zimbatm:\n - Official leadership rotation to hex"
},
{
"path": "docs/meeting-notes/2025-05-01.md",
"chars": 3122,
"preview": "# 2025-05-01\n\nAttendees: hexa, mightyiam, mic92, jfly, picnoir, mightyiam\n\n- hexa:\n - hydra-server abuse management\n "
},
{
"path": "docs/meeting-notes/2025-05-15.md",
"chars": 1140,
"preview": "# 2025-05-15\n\nAttendees: erethon, hexa, Mic92\n\n- hexa:\n - E-Mail dogfooding: No obvious issues with sender accounts\n\n- "
},
{
"path": "docs/meeting-notes/2025-05-29.md",
"chars": 1816,
"preview": "# 2025-05-29\n\nAttendees: erethon jfly edef infinisil\n\n- erethon:\n - Sent ~600 emails on the 16th of May from ngi@nixos."
},
{
"path": "docs/meeting-notes/2025-06-12.md",
"chars": 1345,
"preview": "# 2025-06-12\n\nAttendees: hexa, Mic92, tal\n\n- erethon:\n - Can't attend today, but here's some updates\n - Security track"
},
{
"path": "flake.nix",
"chars": 3327,
"preview": "{\n description = \"NixOS.org infra\";\n\n nixConfig.extra-substituters = [ \"https://nixos-infra-dev.cachix.org\" ];\n nixCo"
},
{
"path": "formatter/flake-module.nix",
"chars": 994,
"preview": "{ inputs, ... }:\n{\n imports = [ inputs.treefmt-nix.flakeModule ];\n\n perSystem =\n { lib, pkgs, ... }:\n {\n tr"
},
{
"path": "lib/service-order.nix",
"chars": 1484,
"preview": "# Ordering Services\n#\n# Given a set of services, make them run one at a time in a specific\n# order, on a timer.\n{ }:\n{\n "
},
{
"path": "macs/README.md",
"chars": 2163,
"preview": "# Deploying to darwin\n\nSee [inventory](../docs/inventory.md).\n\n## Inventory\n\n### Obisdian Systems (US Hosting)\n\nThey are"
},
{
"path": "macs/common.nix",
"chars": 2428,
"preview": "# used with https://github.com/DeterminateSystems/macos-ephemeral\n{\n config,\n lib,\n pkgs,\n ...\n}:\n\nlet\n sshKeys = {"
},
{
"path": "macs/flake-module.nix",
"chars": 1161,
"preview": "{ inputs, ... }:\n{\n flake.darwinConfigurations =\n let\n mkNixDarwin =\n localHostName: entrypoint:\n "
},
{
"path": "macs/hydra-queue-builder.nix",
"chars": 662,
"preview": "{\n config,\n inputs,\n lib,\n ...\n}:\n\n{\n imports = [\n inputs.agenix.darwinModules.age\n inputs.hydra-staging.darw"
},
{
"path": "macs/mac-exec",
"chars": 510,
"preview": "#!/usr/bin/env bash\n\nHOSTS=(\n\t\"hetzner@enormous-catfish.mac.nixos.org\"\n\t\"hetzner@growing-jennet.mac.nixos.org\"\n\t\"hetzner"
},
{
"path": "macs/mac-update",
"chars": 570,
"preview": "#!/usr/bin/env bash\n\nPIDS=()\n\nupdate() {\n\tlocal HOST=${1}\n\tlocal PROFILE=${2}\n\t(ssh \"$HOST\" -- sudo darwin-rebuild switc"
},
{
"path": "macs/profiles/m1.nix",
"chars": 130,
"preview": "{\n # 8 Cores, 16 GB RAM, 256 GB Disk\n # split into 4 jobs with 2C/4G\n nix.settings = {\n cores = 2;\n max-jobs = "
},
{
"path": "macs/profiles/m2.large.nix",
"chars": 171,
"preview": "{\n # 8 Cores, 24 GB RAM, 1 TB Disk\n # split into 2 jobs with 4C/12G\n nix.settings = {\n cores = 4;\n max-jobs = 2"
},
{
"path": "metrics/fastly/README.md",
"chars": 4460,
"preview": "# Fastly log processing\n\nThis flake provides a systemd timer (`./cron.sh`) that every week:\n\n- Ingests raw Fastly logs f"
},
{
"path": "metrics/fastly/cron.sh",
"chars": 554,
"preview": "#!/usr/bin/env bash\n\nset -e\n\nexport AWS_PROFILE=nixos-org\n\nnow=$(date +%s)\n#now=$((now - 86400))\nprev_week=$((now / 8640"
},
{
"path": "metrics/fastly/flake.nix",
"chars": 674,
"preview": "{\n outputs =\n { }:\n {\n nixosModules.nix-metrics =\n { pkgs, ... }:\n {\n\n users.users.ni"
},
{
"path": "metrics/fastly/ingest-raw-logs.sh",
"chars": 2385,
"preview": "#!/usr/bin/env bash\n\nset -e\n\nregion=eu-west-1\n\nfrom_date_incl=\"$1\"\nto_date_excl=\"$2\"\n\n[[ -n $from_date_incl ]]\n[[ -n $to"
},
{
"path": "metrics/fastly/run-queries.sh",
"chars": 3753,
"preview": "#!/usr/bin/env bash\n\nset -e\n\nregion=eu-west-1\n\nreport_date=\"$(date +%Y-%m-%d)\"\n\nrun_query() {\n local name=\"$1\"\n local "
},
{
"path": "metrics/fastly/update-asn-list.sh",
"chars": 239,
"preview": "#! /bin/sh -e\n\ncurl --fail https://ftp.ripe.net/ripe/asnames/asn.txt >/tmp/asn.txt\n\nsed -e 's/^\\([0-9]\\+\\) \\(.\\+\\), \\([A"
},
{
"path": "modules/backup.nix",
"chars": 5212,
"preview": "{\n lib,\n config,\n pkgs,\n ...\n}:\n\nlet\n cfg = config.services.backup;\n\n mkZfsPreHook = mountpoint: ''\n DATASET=\"$"
},
{
"path": "modules/common.nix",
"chars": 532,
"preview": "{ pkgs, lib, ... }:\n\nwith lib;\n\n{\n imports = [ ./backup.nix ];\n\n time.timeZone = \"UTC\";\n\n users.mutableUsers = false;"
},
{
"path": "modules/hydra-mirror.nix",
"chars": 3189,
"preview": "{\n config,\n lib,\n pkgs,\n inputs,\n ...\n}:\n\nlet\n channels = (import ../channels.nix).channels-with-urls;\n\n orderLib"
},
{
"path": "modules/nftables.nix",
"chars": 538,
"preview": "{\n lib,\n ...\n}:\n\n{\n networking.nftables = {\n enable = true;\n tables.\"nixos-fw\".content = lib.mkBefore ''\n "
},
{
"path": "modules/prometheus/default.nix",
"chars": 1757,
"preview": "{\n config,\n pkgs,\n ...\n}:\n\nlet\n prometheus-nixos-exporter = pkgs.callPackage ./nixos-exporter { };\nin\n{\n services.p"
},
{
"path": "modules/prometheus/nixos-exporter/default.nix",
"chars": 277,
"preview": "{ python3Packages }:\n\nwith python3Packages;\n\nbuildPythonApplication {\n pname = \"prometheus-nixos-exporter\";\n version ="
},
{
"path": "modules/prometheus/nixos-exporter/prometheus_nixos_exporter/__main__.py",
"chars": 3836,
"preview": "#!/usr/bin/env nix-shell\n#!nix-shell -i python3 -p \"python3.withPackages (ps: with ps; [ prometheus-client packaging ])\""
},
{
"path": "modules/prometheus/nixos-exporter/pyproject.toml",
"chars": 370,
"preview": "[build-system]\nrequires = [\"setuptools\"]\nbuild-backend = \"setuptools.build_meta\"\n\n[project]\nname = \"prometheus-nixos-exp"
},
{
"path": "modules/prometheus/system-version-exporter.sh",
"chars": 590,
"preview": "#!/usr/bin/env bash\n\nset -euo pipefail\n\nreadonly VERSION\nVERSION=\"$(cat /run/current-system/nixos-version)\"\nreadonly CUR"
},
{
"path": "modules/rasdaemon.nix",
"chars": 562,
"preview": "{\n config,\n ...\n}:\n\n{\n hardware.rasdaemon = {\n enable = true;\n record = true;\n };\n\n services.prometheus.expor"
},
{
"path": "modules/rfc39.nix",
"chars": 2486,
"preview": "# This module fetches nixpkgs master and syncs the GitHub maintainer team.\n{ config, pkgs, ... }:\nlet\n rfc39Secret = f:"
},
{
"path": "modules/tarball-mirror.nix",
"chars": 2271,
"preview": "# This module mirrors most tarballs reachable from Nixpkgs's\n# release.nix to the content-addressed tarball cache at\n# t"
},
{
"path": "modules/tarball-mirror.patch",
"chars": 5519,
"preview": "From 89093ba05e6f9710aa0dcb500f6226f1be80cc86 Mon Sep 17 00:00:00 2001\nFrom: =?UTF-8?q?J=C3=B6rg=20Thalheim?= <joerg@tha"
},
{
"path": "non-critical-infra/.envrc",
"chars": 55,
"preview": "# shellcheck shell=bash\nuse flake .#non-critical-infra\n"
},
{
"path": "non-critical-infra/.sops.yaml",
"chars": 1503,
"preview": "keys:\n - &hexa age1j3mkgedmeru63vwww6m44zfw09tg8yw6xdzstaq7ejfkvgcau40qwakm8x\n - &zimbatm age1jrh8yyq3swjru09s75s4mspu"
},
{
"path": "non-critical-infra/README.md",
"chars": 842,
"preview": "# Non-critical-infra\n\nThis folder of the repository contains all files relative to the non-critical\ninfra team. Machines"
},
{
"path": "non-critical-infra/colmena.sh",
"chars": 79,
"preview": "#!/usr/bin/env bash\nset -euo pipefail\n\ncd \"$(dirname \"$0\")\"\ncolmena apply \"$@\"\n"
},
{
"path": "non-critical-infra/flake-module.nix",
"chars": 1179,
"preview": "{\n inputs,\n lib,\n ...\n}:\n{\n colmena.hosts = {\n caliban = { };\n umbriel = { };\n staging-hydra = { };\n };\n "
},
{
"path": "non-critical-infra/hosts/caliban/default.nix",
"chars": 2176,
"preview": "{\n config,\n inputs,\n lib,\n ...\n}:\n\n{\n imports = [\n ./hardware.nix\n inputs.srvos.nixosModules.server\n input"
},
{
"path": "non-critical-infra/hosts/caliban/disko.nix",
"chars": 1281,
"preview": "let\n partitions = {\n grub = {\n priority = 1;\n start = \"0\";\n end = \"1M\";\n type = \"EF02\";\n };\n "
},
{
"path": "non-critical-infra/hosts/caliban/hardware.nix",
"chars": 301,
"preview": "{ config, lib, ... }:\n\n{\n\n boot.initrd.kernelModules = [ ];\n boot.kernelModules = [ ];\n boot.extraModulePackages = [ "
},
{
"path": "non-critical-infra/hosts/caliban/nixpkgs-swh.nix",
"chars": 514,
"preview": "{ inputs, config, ... }:\n{\n imports = [\n inputs.nixpkgs-swh.nixosModules.nixpkgs-swh\n ];\n services = {\n nixpkgs"
},
{
"path": "non-critical-infra/hosts/staging-hydra/bootstrap-staging-hydra.sh",
"chars": 880,
"preview": "#!/usr/bin/env bash\n\n# Bootstrap staging-hydra on nixos.lysator.liu.se (130.236.254.207).\n#\n# WARNING: nixos-anywhere wi"
},
{
"path": "non-critical-infra/hosts/staging-hydra/ca.crt",
"chars": 619,
"preview": "-----BEGIN CERTIFICATE-----\nMIIBnTCCAU+gAwIBAgIUQpxYsPwAyTY70yYO9fcCmCaZreIwBQYDK2VwMEMxCzAJ\nBgNVBAYTAkRFMRQwEgYDVQQKDAt"
},
{
"path": "non-critical-infra/hosts/staging-hydra/default.nix",
"chars": 3398,
"preview": "{ inputs, ... }:\n{\n imports = [\n ./hardware.nix\n inputs.srvos.nixosModules.server\n ../../modules/common.nix\n "
},
{
"path": "non-critical-infra/hosts/staging-hydra/disko.nix",
"chars": 1532,
"preview": "# Matches the existing disk layout on nixos.lysator.liu.se:\n# 3x 1.8T disks in raidz1 ZFS pool \"tank\", each with a 1G EF"
},
{
"path": "non-critical-infra/hosts/staging-hydra/genca.sh",
"chars": 1638,
"preview": "#!/usr/bin/env bash\n\nset -x\n\nhosts=\"localhost ofborg-eval02 ofborg-eval03 ofborg-eval04 ofborg-build01 ofborg-build02 of"
},
{
"path": "non-critical-infra/hosts/staging-hydra/hardware.nix",
"chars": 553,
"preview": "{\n config,\n lib,\n modulesPath,\n ...\n}:\n{\n imports = [\n (modulesPath + \"/installer/scan/not-detected.nix\")\n ];\n\n"
},
{
"path": "non-critical-infra/hosts/staging-hydra/hydra-proxy.nix",
"chars": 2230,
"preview": "{\n config,\n lib,\n pkgs,\n ...\n}:\n\nlet\n bannedUserAgentPatterns = [\n \"Trident/\"\n \"Android\\\\s[123456789]\\\\.\"\n "
},
{
"path": "non-critical-infra/hosts/staging-hydra/hydra.nix",
"chars": 7544,
"preview": "{\n lib,\n pkgs,\n config,\n inputs,\n ...\n}:\nlet\n narCache = \"/var/cache/hydra/nar-cache\";\n localSystems = [\n \"bui"
},
{
"path": "non-critical-infra/hosts/staging-hydra/server.crt",
"chars": 753,
"preview": "-----BEGIN CERTIFICATE-----\nMIIB/jCCAbCgAwIBAgIUfUYjDOaJML1lIMkAMvLjnSTscYswBQYDK2VwMEMxCzAJ\nBgNVBAYTAkRFMRQwEgYDVQQKDAt"
},
{
"path": "non-critical-infra/hosts/umbriel/README.md",
"chars": 12,
"preview": "# `umbriel`\n"
},
{
"path": "non-critical-infra/hosts/umbriel/default.nix",
"chars": 2547,
"preview": "{\n config,\n inputs,\n lib,\n ...\n}:\n\n{\n imports = [\n ./hardware.nix\n inputs.srvos.nixosModules.server\n input"
},
{
"path": "non-critical-infra/hosts/umbriel/disko.nix",
"chars": 1171,
"preview": "{\n disk = {\n main = {\n device = \"/dev/sda\";\n type = \"disk\";\n content = {\n type = \"gpt\";\n "
},
{
"path": "non-critical-infra/hosts/umbriel/hardware.nix",
"chars": 313,
"preview": "{ lib, ... }:\n\n{\n\n boot.initrd.availableKernelModules = [\n \"xhci_pci\"\n \"virtio_pci\"\n \"usbhid\"\n \"sr_mod\"\n ]"
},
{
"path": "non-critical-infra/modules/backup.nix",
"chars": 5212,
"preview": "{\n lib,\n config,\n pkgs,\n ...\n}:\n\nlet\n cfg = config.services.backup;\n\n mkZfsPreHook = mountpoint: ''\n DATASET=\"$"
},
{
"path": "non-critical-infra/modules/common.nix",
"chars": 434,
"preview": "{ pkgs, ... }:\n\n{\n imports = [\n ../../modules/nftables.nix\n ../../modules/prometheus\n ];\n\n boot.initrd.systemd."
},
{
"path": "non-critical-infra/modules/draupnir.nix",
"chars": 1837,
"preview": "{\n config,\n ...\n}:\n{\n sops.secrets.mjolnir-access-token = {\n sopsFile = ../secrets/mjolnir-access-token.caliban;\n "
},
{
"path": "non-critical-infra/modules/element-web.nix",
"chars": 1922,
"preview": "{ pkgs, ... }:\nlet\n domainName = \"chat.nixos.org\";\n\n # https://github.com/element-hq/element-web/blob/develop/config.s"
},
{
"path": "non-critical-infra/modules/limesurvey.nix",
"chars": 651,
"preview": "{\n config,\n ...\n}:\n{\n services.limesurvey = {\n enable = true;\n encryptionKeyFile = config.sops.secrets.limesurv"
},
{
"path": "non-critical-infra/modules/mailserver/README.md",
"chars": 354,
"preview": "# NixOS mailserver\n\nThis module provides mail services for `nixos.org`.\n\n## Mailing lists\n\nTo create a new mailing list,"
},
{
"path": "non-critical-infra/modules/mailserver/default.nix",
"chars": 4072,
"preview": "{\n inputs,\n config,\n pkgs,\n ...\n}:\n\n{\n imports = [\n inputs.simple-nixos-mailserver.nixosModule\n ./mailing-lis"
},
{
"path": "non-critical-infra/modules/mailserver/freescout.nix",
"chars": 1460,
"preview": "{\n inputs,\n config,\n pkgs,\n ...\n}:\n\n{\n imports = [\n inputs.freescout.nixosModules.freescout\n ../nginx.nix\n ]"
},
{
"path": "non-critical-infra/modules/mailserver/mailing-lists-options.nix",
"chars": 5232,
"preview": "# This module makes it easy to define mailing lists in `simple-nixos-mailserver`\n# with a couple of features:\n#\n# 1. We"
},
{
"path": "non-critical-infra/modules/mailserver/mailing-lists.nix",
"chars": 7214,
"preview": "{\n imports = [ ./mailing-lists-options.nix ];\n\n # If you wish to hide your email address, you can encrypt it with SOPS"
}
]
// ... and 181 more files (download for full content)
About this extraction
This page contains the full source code of the NixOS/nixos-org-configurations GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 381 files (799.2 KB), approximately 329.5k tokens, and a symbol index with 56 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.