[
  {
    "path": ".dockerignore",
    "content": "// integration tests are not needed in docker\n// ignoring it let us speed up the integration test\n// development\nintegration_test.go\nintegration_test/\n!integration_test/etc_embedded_derp/tls/server.crt\n\nDockerfile*\ndocker-compose*\n.dockerignore\n.goreleaser.yml\n.git\n.github\n.gitignore\nREADME.md\nLICENSE\n.vscode\n\n*.sock\n\nnode_modules/\npackage-lock.json\npackage.json\n"
  },
  {
    "path": ".editorconfig",
    "content": "root = true\n\n[*]\ncharset = utf-8\nend_of_line = lf\nindent_size = 2\nindent_style = space\ninsert_final_newline = true\ntrim_trailing_whitespace = true\nmax_line_length = 120\n\n[*.go]\nindent_style = tab\n\n[Makefile]\nindent_style = tab\n"
  },
  {
    "path": ".envrc",
    "content": "use flake\n"
  },
  {
    "path": ".github/CODEOWNERS",
    "content": "* @juanfont @kradalby\n\n*.md @ohdearaugustin @nblock\n*.yml @ohdearaugustin @nblock\n*.yaml @ohdearaugustin @nblock\nDockerfile* @ohdearaugustin @nblock\n.goreleaser.yaml @ohdearaugustin @nblock\n/docs/ @ohdearaugustin @nblock\n/.github/workflows/ @ohdearaugustin @nblock\n/.github/renovate.json @ohdearaugustin @nblock\n"
  },
  {
    "path": ".github/FUNDING.yml",
    "content": "# These are supported funding model platforms\n\nko_fi: headscale\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/bug_report.yaml",
    "content": "name: 🐞 Bug\ndescription: File a bug/issue\ntitle: \"[Bug] <title>\"\nlabels: [\"bug\", \"needs triage\"]\nbody:\n  - type: checkboxes\n    attributes:\n      label: Is this a support request?\n      description: This issue tracker is for bugs and feature requests only. If you need\n        help, please use ask in our Discord community\n      options:\n        - label: This is not a support request\n          required: true\n  - type: checkboxes\n    attributes:\n      label: Is there an existing issue for this?\n      description: Please search to see if an issue already exists for the bug you\n        encountered.\n      options:\n        - label: I have searched the existing issues\n          required: true\n  - type: textarea\n    attributes:\n      label: Current Behavior\n      description: A concise description of what you're experiencing.\n    validations:\n      required: true\n  - type: textarea\n    attributes:\n      label: Expected Behavior\n      description: A concise description of what you expected to happen.\n    validations:\n      required: true\n  - type: textarea\n    attributes:\n      label: Steps To Reproduce\n      description: Steps to reproduce the behavior.\n      placeholder: |\n        1. In this environment...\n        1. With this config...\n        1. Run '...'\n        1. See error...\n    validations:\n      required: true\n  - type: textarea\n    attributes:\n      label: Environment\n      description: |\n        Please provide information about your environment.\n        If you are using a container, always provide the headscale version and not only the Docker image version.\n        Please do not put \"latest\".\n\n        Describe your \"headscale network\". Is there a lot of nodes, are the nodes all interconnected, are some subnet routers?\n\n        If you are experiencing a problem during an upgrade, please provide the versions of the old and new versions of Headscale and Tailscale.\n\n        examples:\n          - **OS**: Ubuntu 24.04\n          - **Headscale version**: 0.24.3\n          - **Tailscale version**: 1.80.0\n          - **Number of nodes**: 20\n      value: |\n        - OS:\n        - Headscale version:\n        - Tailscale version:\n      render: markdown\n    validations:\n      required: true\n  - type: checkboxes\n    attributes:\n      label: Runtime environment\n      options:\n        - label: Headscale is behind a (reverse) proxy\n          required: false\n        - label: Headscale runs in a container\n          required: false\n  - type: textarea\n    attributes:\n      label: Debug information\n      description: |\n        Please have a look at our [Debugging and troubleshooting\n        guide](https://headscale.net/development/ref/debug/) to learn about\n        common debugging techniques.\n\n        Links? References? Anything that will give us more context about the issue you are encountering.\n        If **any** of these are omitted we will likely close your issue, do **not** ignore them.\n\n        - Client netmap dump (see below)\n        - Policy configuration\n        - Headscale configuration\n        - Headscale log (with `trace` enabled)\n\n        Dump the netmap of tailscale clients:\n        `tailscale debug netmap > DESCRIPTIVE_NAME.json`\n\n        Dump the status of tailscale clients:\n        `tailscale status --json > DESCRIPTIVE_NAME.json`\n\n        Get the logs of a Tailscale client that is not working as expected.\n        `tailscale debug daemon-logs`\n\n        Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.\n        **Ensure** you use formatting for files you attach.\n        Do **not** paste in long files.\n    validations:\n      required: true\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/config.yml",
    "content": "# Issues must have some content\nblank_issues_enabled: false\n\n# Contact links\ncontact_links:\n  - name: \"headscale Discord community\"\n    url: \"https://discord.gg/c84AZQhmpx\"\n    about: \"Please ask and answer questions about usage of headscale here.\"\n  - name: \"headscale usage documentation\"\n    url: \"https://headscale.net/\"\n    about: \"Find documentation about how to configure and run headscale.\"\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/feature_request.yaml",
    "content": "name: 🚀 Feature Request\ndescription: Suggest an idea for Headscale\ntitle: \"[Feature] <title>\"\nlabels: [enhancement]\nbody:\n  - type: textarea\n    attributes:\n      label: Use case\n      description: Please describe the use case for this feature.\n      placeholder: |\n        <!-- Include the reason, why you would need the feature. E.g. what problem\n          does it solve? Or which workflow is currently frustrating and will be improved by\n          this? -->\n    validations:\n      required: true\n  - type: textarea\n    attributes:\n      label: Description\n      description: A clear and precise description of what new or changed feature you want.\n    validations:\n      required: true\n  - type: checkboxes\n    attributes:\n      label: Contribution\n      description: Are you willing to contribute to the implementation of this feature?\n      options:\n        - label: I can write the design doc for this feature\n          required: false\n        - label: I can contribute this feature\n          required: false\n  - type: textarea\n    attributes:\n      label: How can it be implemented?\n      description: Free text for your ideas on how this feature could be implemented.\n    validations:\n      required: false\n"
  },
  {
    "path": ".github/label-response/needs-more-info.md",
    "content": "Thank you for taking the time to report this issue.\n\nTo help us investigate and resolve this, we need more information. Please provide the following:\n\n> [!TIP]\n> Most issues turn out to be configuration errors rather than bugs. We encourage you to discuss your problem in our [Discord community](https://discord.gg/c84AZQhmpx) **before** opening an issue. The community can often help identify misconfigurations quickly, saving everyone time.\n\n## Required Information\n\n### Environment Details\n\n- **Headscale version**: (run `headscale version`)\n- **Tailscale client version**: (run `tailscale version`)\n- **Operating System**: (e.g., Ubuntu 24.04, macOS 14, Windows 11)\n- **Deployment method**: (binary, Docker, Kubernetes, etc.)\n- **Reverse proxy**: (if applicable: nginx, Traefik, Caddy, etc. - include configuration)\n\n### Debug Information\n\nPlease follow our [Debugging and Troubleshooting Guide](https://headscale.net/stable/ref/debug/) and provide:\n\n1. **Client netmap dump** (from affected Tailscale client):\n\n   ```bash\n   tailscale debug netmap > netmap.json\n   ```\n\n2. **Client status dump** (from affected Tailscale client):\n\n   ```bash\n   tailscale status --json > status.json\n   ```\n\n3. **Tailscale client logs** (if experiencing client issues):\n\n   ```bash\n   tailscale debug daemon-logs\n   ```\n\n   > [!IMPORTANT]\n   > We need logs from **multiple nodes** to understand the full picture:\n   >\n   > - The node(s) initiating connections\n   > - The node(s) being connected to\n   >\n   > Without logs from both sides, we cannot diagnose connectivity issues.\n\n4. **Headscale server logs** with `log.level: trace` enabled\n\n5. **Headscale configuration** (with sensitive values redacted - see rules below)\n\n6. **ACL/Policy configuration** (if using ACLs)\n\n7. **Proxy/Docker configuration** (if applicable - nginx.conf, docker-compose.yml, Traefik config, etc.)\n\n## Formatting Requirements\n\n- **Attach long files** - Do not paste large logs or configurations inline. Use GitHub file attachments or GitHub Gists.\n- **Use proper Markdown** - Format code blocks, logs, and configurations with appropriate syntax highlighting.\n- **Structure your response** - Use the headings above to organize your information clearly.\n\n## Redaction Rules\n\n> [!CAUTION]\n> **Replace, do not remove.** Removing information makes debugging impossible.\n\nWhen redacting sensitive information:\n\n- ✅ **Replace consistently** - If you change `alice@company.com` to `user1@example.com`, use `user1@example.com` everywhere (logs, config, policy, etc.)\n- ✅ **Use meaningful placeholders** - `user1@example.com`, `bob@example.com`, `my-secret-key` are acceptable\n- ❌ **Never remove information** - Gaps in data prevent us from correlating events across logs\n- ❌ **Never redact IP addresses** - We need the actual IPs to trace network paths and identify issues\n\n**If redaction rules are not followed, we will be unable to debug the issue and will have to close it.**\n\n---\n\n**Note:** This issue will be automatically closed in 3 days if no additional information is provided. Once you reply with the requested information, the `needs-more-info` label will be removed automatically.\n\nIf you need help gathering this information, please visit our [Discord community](https://discord.gg/c84AZQhmpx).\n"
  },
  {
    "path": ".github/label-response/support-request.md",
    "content": "Thank you for reaching out.\n\nThis issue tracker is used for **bug reports and feature requests** only. Your question appears to be a support or configuration question rather than a bug report.\n\nFor help with setup, configuration, or general questions, please visit our [Discord community](https://discord.gg/c84AZQhmpx) where the community and maintainers can assist you in real-time.\n\n**Before posting in Discord, please check:**\n\n- [Documentation](https://headscale.net/)\n- [FAQ](https://headscale.net/stable/faq/)\n- [Debugging and Troubleshooting Guide](https://headscale.net/stable/ref/debug/)\n\nIf after troubleshooting you determine this is actually a bug, please open a new issue with the required debug information from the troubleshooting guide.\n\nThis issue has been automatically closed.\n"
  },
  {
    "path": ".github/pull_request_template.md",
    "content": "<!--\nHeadscale is \"Open Source, acknowledged contribution\", this means that any\ncontribution will have to be discussed with the Maintainers before being submitted.\n\nThis model has been chosen to reduce the risk of burnout by limiting the\nmaintenance overhead of reviewing and validating third-party code.\n\nHeadscale is open to code contributions for bug fixes without discussion.\n\nIf you find mistakes in the documentation, please submit a fix to the documentation.\n-->\n\n<!-- Please tick if the following things apply. You… -->\n\n- [ ] have read the [CONTRIBUTING.md](./CONTRIBUTING.md) file\n- [ ] raised a GitHub issue or discussed it on the projects chat beforehand\n- [ ] added unit tests\n- [ ] added integration tests\n- [ ] updated documentation if needed\n- [ ] updated CHANGELOG.md\n\n<!-- If applicable, please reference the issue using `Fixes #XXX` and add tests to cover your new code. -->\n"
  },
  {
    "path": ".github/renovate.json",
    "content": "{\n  \"baseBranches\": [\"main\"],\n  \"username\": \"renovate-release\",\n  \"gitAuthor\": \"Renovate Bot <bot@renovateapp.com>\",\n  \"branchPrefix\": \"renovateaction/\",\n  \"onboarding\": false,\n  \"extends\": [\"config:base\", \":rebaseStalePrs\"],\n  \"ignorePresets\": [\":prHourlyLimit2\"],\n  \"enabledManagers\": [\"dockerfile\", \"gomod\", \"github-actions\", \"regex\"],\n  \"includeForks\": true,\n  \"repositories\": [\"juanfont/headscale\"],\n  \"platform\": \"github\",\n  \"packageRules\": [\n    {\n      \"matchDatasources\": [\"go\"],\n      \"groupName\": \"Go modules\",\n      \"groupSlug\": \"gomod\",\n      \"separateMajorMinor\": false\n    },\n    {\n      \"matchDatasources\": [\"docker\"],\n      \"groupName\": \"Dockerfiles\",\n      \"groupSlug\": \"dockerfiles\"\n    }\n  ],\n  \"regexManagers\": [\n    {\n      \"fileMatch\": [\".github/workflows/.*.yml$\"],\n      \"matchStrings\": [\"\\\\s*go-version:\\\\s*\\\"?(?<currentValue>.*?)\\\"?\\\\n\"],\n      \"datasourceTemplate\": \"golang-version\",\n      \"depNameTemplate\": \"actions/go-version\"\n    }\n  ]\n}\n"
  },
  {
    "path": ".github/workflows/build.yml",
    "content": "name: Build\n\non:\n  push:\n    branches:\n      - main\n  pull_request:\n\nconcurrency:\n  group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}\n  cancel-in-progress: true\n\njobs:\n  build-nix:\n    runs-on: ubuntu-latest\n    permissions: write-all\n    steps:\n      - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1\n        with:\n          fetch-depth: 2\n      - name: Get changed files\n        id: changed-files\n        uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2\n        with:\n          filters: |\n            files:\n              - '*.nix'\n              - 'go.*'\n              - '**/*.go'\n              - 'integration_test/'\n              - 'config-example.yaml'\n      - uses: nixbuild/nix-quick-install-action@2c9db80fb984ceb1bcaa77cdda3fdf8cfba92035 # v34\n        if: steps.changed-files.outputs.files == 'true'\n      - uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3\n        if: steps.changed-files.outputs.files == 'true'\n        with:\n          primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',\n            '**/flake.lock') }}\n          restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}\n\n      - name: Run nix build\n        id: build\n        if: steps.changed-files.outputs.files == 'true'\n        run: |\n          nix build |& tee build-result\n          BUILD_STATUS=\"${PIPESTATUS[0]}\"\n\n          OLD_HASH=$(cat build-result | grep specified: | awk -F ':' '{print $2}' | sed 's/ //g')\n          NEW_HASH=$(cat build-result | grep got: | awk -F ':' '{print $2}' | sed 's/ //g')\n\n          echo \"OLD_HASH=$OLD_HASH\" >> $GITHUB_OUTPUT\n          echo \"NEW_HASH=$NEW_HASH\" >> $GITHUB_OUTPUT\n\n          exit $BUILD_STATUS\n\n      - name: Nix gosum diverging\n        uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0\n        if: failure() && steps.build.outcome == 'failure'\n        with:\n          github-token: ${{secrets.GITHUB_TOKEN}}\n          script: |\n            github.rest.pulls.createReviewComment({\n              pull_number: context.issue.number,\n              owner: context.repo.owner,\n              repo: context.repo.repo,\n              body: 'Nix build failed with wrong gosum, please update \"vendorSha256\" (${{ steps.build.outputs.OLD_HASH }}) for the \"headscale\" package in flake.nix with the new SHA: ${{ steps.build.outputs.NEW_HASH }}'\n            })\n\n      - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0\n        if: steps.changed-files.outputs.files == 'true'\n        with:\n          name: headscale-linux\n          path: result/bin/headscale\n  build-cross:\n    runs-on: ubuntu-latest\n    strategy:\n      matrix:\n        env:\n          - \"GOARCH=arm64 GOOS=linux\"\n          - \"GOARCH=amd64 GOOS=linux\"\n          - \"GOARCH=arm64 GOOS=darwin\"\n          - \"GOARCH=amd64 GOOS=darwin\"\n    steps:\n      - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1\n      - uses: nixbuild/nix-quick-install-action@2c9db80fb984ceb1bcaa77cdda3fdf8cfba92035 # v34\n      - uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3\n        with:\n          primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',\n            '**/flake.lock') }}\n          restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}\n\n      - name: Run go cross compile\n        env:\n          CGO_ENABLED: 0\n        run: env ${{ matrix.env }} nix develop --command -- go build -o \"headscale\"\n          ./cmd/headscale\n      - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0\n        with:\n          name: \"headscale-${{ matrix.env }}\"\n          path: \"headscale\"\n"
  },
  {
    "path": ".github/workflows/check-generated.yml",
    "content": "name: Check Generated Files\n\non:\n  push:\n    branches:\n      - main\n  pull_request:\n    branches:\n      - main\n\nconcurrency:\n  group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}\n  cancel-in-progress: true\n\njobs:\n  check-generated:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1\n        with:\n          fetch-depth: 2\n      - name: Get changed files\n        id: changed-files\n        uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2\n        with:\n          filters: |\n            files:\n              - '*.nix'\n              - 'go.*'\n              - '**/*.go'\n              - '**/*.proto'\n              - 'buf.gen.yaml'\n              - 'tools/**'\n      - uses: nixbuild/nix-quick-install-action@2c9db80fb984ceb1bcaa77cdda3fdf8cfba92035 # v34\n        if: steps.changed-files.outputs.files == 'true'\n      - uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3\n        if: steps.changed-files.outputs.files == 'true'\n        with:\n          primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix', '**/flake.lock') }}\n          restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}\n\n      - name: Run make generate\n        if: steps.changed-files.outputs.files == 'true'\n        run: nix develop --command -- make generate\n\n      - name: Check for uncommitted changes\n        if: steps.changed-files.outputs.files == 'true'\n        run: |\n          if ! git diff --exit-code; then\n            echo \"❌ Generated files are not up to date!\"\n            echo \"Please run 'make generate' and commit the changes.\"\n            exit 1\n          else\n            echo \"✅ All generated files are up to date.\"\n          fi\n"
  },
  {
    "path": ".github/workflows/check-tests.yaml",
    "content": "name: Check integration tests workflow\n\non: [pull_request]\n\nconcurrency:\n  group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}\n  cancel-in-progress: true\n\njobs:\n  check-tests:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1\n        with:\n          fetch-depth: 2\n      - name: Get changed files\n        id: changed-files\n        uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2\n        with:\n          filters: |\n            files:\n              - '*.nix'\n              - 'go.*'\n              - '**/*.go'\n              - 'integration_test/'\n              - 'config-example.yaml'\n      - uses: nixbuild/nix-quick-install-action@2c9db80fb984ceb1bcaa77cdda3fdf8cfba92035 # v34\n        if: steps.changed-files.outputs.files == 'true'\n      - uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3\n        if: steps.changed-files.outputs.files == 'true'\n        with:\n          primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',\n            '**/flake.lock') }}\n          restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}\n\n      - name: Generate and check integration tests\n        if: steps.changed-files.outputs.files == 'true'\n        run: |\n          nix develop --command bash -c \"cd .github/workflows && go generate\"\n          git diff --exit-code .github/workflows/test-integration.yaml\n\n      - name: Show missing tests\n        if: failure()\n        run: |\n          git diff .github/workflows/test-integration.yaml\n"
  },
  {
    "path": ".github/workflows/docs-deploy.yml",
    "content": "name: Deploy docs\n\non:\n  push:\n    branches:\n      # Main branch for development docs\n      - main\n\n      # Doc maintenance branches\n      - doc/[0-9]+.[0-9]+.[0-9]+\n    tags:\n      # Stable release tags\n      - v[0-9]+.[0-9]+.[0-9]+\n    paths:\n      - \"docs/**\"\n      - \"mkdocs.yml\"\n  workflow_dispatch:\n\njobs:\n  deploy:\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout repository\n        uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1\n        with:\n          fetch-depth: 0\n      - name: Install python\n        uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0\n        with:\n          python-version: 3.x\n      - name: Setup cache\n        uses: actions/cache@a7833574556fa59680c1b7cb190c1735db73ebf0 # v5.0.0\n        with:\n          key: ${{ github.ref }}\n          path: .cache\n      - name: Setup dependencies\n        run: pip install -r docs/requirements.txt\n      - name: Configure git\n        run: |\n          git config user.name github-actions\n          git config user.email github-actions@github.com\n      - name: Deploy development docs\n        if: github.ref == 'refs/heads/main'\n        run: mike deploy --push development unstable\n      - name: Deploy stable docs from doc branches\n        if: startsWith(github.ref, 'refs/heads/doc/')\n        run: mike deploy --push ${GITHUB_REF_NAME##*/}\n      - name: Deploy stable docs from tag\n        if: startsWith(github.ref, 'refs/tags/v')\n        # This assumes that only newer tags are pushed\n        run: mike deploy --push --update-aliases ${GITHUB_REF_NAME#v} stable latest\n"
  },
  {
    "path": ".github/workflows/docs-test.yml",
    "content": "name: Test documentation build\n\non: [pull_request]\n\nconcurrency:\n  group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}\n  cancel-in-progress: true\n\njobs:\n  test:\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout repository\n        uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1\n      - name: Install python\n        uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0\n        with:\n          python-version: 3.x\n      - name: Setup cache\n        uses: actions/cache@a7833574556fa59680c1b7cb190c1735db73ebf0 # v5.0.0\n        with:\n          key: ${{ github.ref }}\n          path: .cache\n      - name: Setup dependencies\n        run: pip install -r docs/requirements.txt\n      - name: Build docs\n        run: mkdocs build --strict\n"
  },
  {
    "path": ".github/workflows/gh-action-integration-generator.go",
    "content": "package main\n\n//go:generate go run ./gh-action-integration-generator.go\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"os/exec\"\n\t\"strings\"\n)\n\n// testsToSplit defines tests that should be split into multiple CI jobs.\n// Key is the test function name, value is a list of subtest prefixes.\n// Each prefix becomes a separate CI job as \"TestName/prefix\".\n//\n// Example: TestAutoApproveMultiNetwork has subtests like:\n//   - TestAutoApproveMultiNetwork/authkey-tag-advertiseduringup-false-pol-database\n//   - TestAutoApproveMultiNetwork/webauth-user-advertiseduringup-true-pol-file\n//\n// Splitting by approver type (tag, user, group) creates 6 CI jobs with 4 tests each:\n//   - TestAutoApproveMultiNetwork/authkey-tag.* (4 tests)\n//   - TestAutoApproveMultiNetwork/authkey-user.* (4 tests)\n//   - TestAutoApproveMultiNetwork/authkey-group.* (4 tests)\n//   - TestAutoApproveMultiNetwork/webauth-tag.* (4 tests)\n//   - TestAutoApproveMultiNetwork/webauth-user.* (4 tests)\n//   - TestAutoApproveMultiNetwork/webauth-group.* (4 tests)\n//\n// This reduces load per CI job (4 tests instead of 12) to avoid infrastructure\n// flakiness when running many sequential Docker-based integration tests.\nvar testsToSplit = map[string][]string{\n\t\"TestAutoApproveMultiNetwork\": {\n\t\t\"authkey-tag\",\n\t\t\"authkey-user\",\n\t\t\"authkey-group\",\n\t\t\"webauth-tag\",\n\t\t\"webauth-user\",\n\t\t\"webauth-group\",\n\t},\n}\n\n// expandTests takes a list of test names and expands any that need splitting\n// into multiple subtest patterns.\nfunc expandTests(tests []string) []string {\n\tvar expanded []string\n\tfor _, test := range tests {\n\t\tif prefixes, ok := testsToSplit[test]; ok {\n\t\t\t// This test should be split into multiple jobs.\n\t\t\t// We append \".*\" to each prefix because the CI runner wraps patterns\n\t\t\t// with ^...$ anchors. Without \".*\", a pattern like \"authkey$\" wouldn't\n\t\t\t// match \"authkey-tag-advertiseduringup-false-pol-database\".\n\t\t\tfor _, prefix := range prefixes {\n\t\t\t\texpanded = append(expanded, fmt.Sprintf(\"%s/%s.*\", test, prefix))\n\t\t\t}\n\t\t} else {\n\t\t\texpanded = append(expanded, test)\n\t\t}\n\t}\n\treturn expanded\n}\n\nfunc findTests() []string {\n\trgBin, err := exec.LookPath(\"rg\")\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to find rg (ripgrep) binary\")\n\t}\n\n\targs := []string{\n\t\t\"--regexp\", \"func (Test.+)\\\\(.*\",\n\t\t\"../../integration/\",\n\t\t\"--replace\", \"$1\",\n\t\t\"--sort\", \"path\",\n\t\t\"--no-line-number\",\n\t\t\"--no-filename\",\n\t\t\"--no-heading\",\n\t}\n\n\tcmd := exec.Command(rgBin, args...)\n\tvar out bytes.Buffer\n\tcmd.Stdout = &out\n\terr = cmd.Run()\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to run command: %s\", err)\n\t}\n\n\ttests := strings.Split(strings.TrimSpace(out.String()), \"\\n\")\n\treturn tests\n}\n\nfunc updateYAML(tests []string, jobName string, testPath string) {\n\ttestsForYq := fmt.Sprintf(\"[%s]\", strings.Join(tests, \", \"))\n\n\tyqCommand := fmt.Sprintf(\n\t\t\"yq eval '.jobs.%s.strategy.matrix.test = %s' %s -i\",\n\t\tjobName,\n\t\ttestsForYq,\n\t\ttestPath,\n\t)\n\tcmd := exec.Command(\"bash\", \"-c\", yqCommand)\n\n\tvar stdout bytes.Buffer\n\tvar stderr bytes.Buffer\n\tcmd.Stdout = &stdout\n\tcmd.Stderr = &stderr\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlog.Printf(\"stdout: %s\", stdout.String())\n\t\tlog.Printf(\"stderr: %s\", stderr.String())\n\t\tlog.Fatalf(\"failed to run yq command: %s\", err)\n\t}\n\n\tfmt.Printf(\"YAML file (%s) job %s updated successfully\\n\", testPath, jobName)\n}\n\nfunc main() {\n\ttests := findTests()\n\n\t// Expand tests that should be split into multiple jobs\n\texpandedTests := expandTests(tests)\n\n\tquotedTests := make([]string, len(expandedTests))\n\tfor i, test := range expandedTests {\n\t\tquotedTests[i] = fmt.Sprintf(\"\\\"%s\\\"\", test)\n\t}\n\n\t// Define selected tests for PostgreSQL\n\tpostgresTestNames := []string{\n\t\t\"TestACLAllowUserDst\",\n\t\t\"TestPingAllByIP\",\n\t\t\"TestEphemeral2006DeletedTooQuickly\",\n\t\t\"TestPingAllByIPManyUpDown\",\n\t\t\"TestSubnetRouterMultiNetwork\",\n\t}\n\n\tquotedPostgresTests := make([]string, len(postgresTestNames))\n\tfor i, test := range postgresTestNames {\n\t\tquotedPostgresTests[i] = fmt.Sprintf(\"\\\"%s\\\"\", test)\n\t}\n\n\t// Update both SQLite and PostgreSQL job matrices\n\tupdateYAML(quotedTests, \"sqlite\", \"./test-integration.yaml\")\n\tupdateYAML(quotedPostgresTests, \"postgres\", \"./test-integration.yaml\")\n}\n"
  },
  {
    "path": ".github/workflows/gh-actions-updater.yaml",
    "content": "name: GitHub Actions Version Updater\n\non:\n  schedule:\n    # Automatically run on every Sunday\n    - cron: \"0 0 * * 0\"\n\njobs:\n  build:\n    if: github.repository == 'juanfont/headscale'\n    runs-on: ubuntu-latest\n\n    steps:\n      - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1\n        with:\n          # [Required] Access token with `workflow` scope.\n          token: ${{ secrets.WORKFLOW_SECRET }}\n\n      - name: Run GitHub Actions Version Updater\n        uses: saadmk11/github-actions-version-updater@d8781caf11d11168579c8e5e94f62b068038f442 # v0.9.0\n        with:\n          # [Required] Access token with `workflow` scope.\n          token: ${{ secrets.WORKFLOW_SECRET }}\n"
  },
  {
    "path": ".github/workflows/integration-test-template.yml",
    "content": "name: Integration Test Template\n\non:\n  workflow_call:\n    inputs:\n      test:\n        required: true\n        type: string\n      postgres_flag:\n        required: false\n        type: string\n        default: \"\"\n      database_name:\n        required: true\n        type: string\n\njobs:\n  test:\n    runs-on: ubuntu-latest\n    env:\n      # Github does not allow us to access secrets in pull requests,\n      # so this env var is used to check if we have the secret or not.\n      # If we have the secrets, meaning we are running on push in a fork,\n      # there might be secrets available for more debugging.\n      # If TS_OAUTH_CLIENT_ID and TS_OAUTH_SECRET is set, then the job\n      # will join a debug tailscale network, set up SSH and a tmux session.\n      # The SSH will be configured to use the SSH key of the Github user\n      # that triggered the build.\n      HAS_TAILSCALE_SECRET: ${{ secrets.TS_OAUTH_CLIENT_ID }}\n    steps:\n      - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1\n        with:\n          fetch-depth: 2\n      - name: Tailscale\n        if: ${{ env.HAS_TAILSCALE_SECRET }}\n        uses: tailscale/github-action@a392da0a182bba0e9613b6243ebd69529b1878aa # v4.1.0\n        with:\n          oauth-client-id: ${{ secrets.TS_OAUTH_CLIENT_ID }}\n          oauth-secret: ${{ secrets.TS_OAUTH_SECRET }}\n          tags: tag:gh\n      - name: Setup SSH server for Actor\n        if: ${{ env.HAS_TAILSCALE_SECRET }}\n        uses: alexellis/setup-sshd-actor@master\n      - name: Download headscale image\n        uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0\n        with:\n          name: headscale-image\n          path: /tmp/artifacts\n      - name: Download tailscale HEAD image\n        uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0\n        with:\n          name: tailscale-head-image\n          path: /tmp/artifacts\n      - name: Download hi binary\n        uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0\n        with:\n          name: hi-binary\n          path: /tmp/artifacts\n      - name: Download Go cache\n        uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0\n        with:\n          name: go-cache\n          path: /tmp/artifacts\n      - name: Download postgres image\n        if: ${{ inputs.postgres_flag == '--postgres=1' }}\n        uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0\n        with:\n          name: postgres-image\n          path: /tmp/artifacts\n      - name: Pin Docker to v28 (avoid v29 breaking changes)\n        run: |\n          # Docker 29 breaks docker build via Go client libraries and\n          # docker load/save with certain tarball formats.\n          # Pin to Docker 28.x until our tooling is updated.\n          # https://github.com/actions/runner-images/issues/13474\n          sudo install -m 0755 -d /etc/apt/keyrings\n          curl -fsSL https://download.docker.com/linux/ubuntu/gpg \\\n            | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg\n          echo \"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] \\\n            https://download.docker.com/linux/ubuntu $(. /etc/os-release && echo \"$VERSION_CODENAME\") stable\" \\\n            | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null\n          sudo apt-get update -qq\n          VERSION=$(apt-cache madison docker-ce | grep '28\\.5' | head -1 | awk '{print $3}')\n          sudo apt-get install -y --allow-downgrades \\\n            \"docker-ce=${VERSION}\" \"docker-ce-cli=${VERSION}\"\n          sudo systemctl restart docker\n          docker version\n      - name: Load Docker images, Go cache, and prepare binary\n        run: |\n          gunzip -c /tmp/artifacts/headscale-image.tar.gz | docker load\n          gunzip -c /tmp/artifacts/tailscale-head-image.tar.gz | docker load\n          if [ -f /tmp/artifacts/postgres-image.tar.gz ]; then\n            gunzip -c /tmp/artifacts/postgres-image.tar.gz | docker load\n          fi\n          chmod +x /tmp/artifacts/hi\n          docker images\n          # Extract Go cache to host directories for bind mounting\n          mkdir -p /tmp/go-cache\n          tar -xzf /tmp/artifacts/go-cache.tar.gz -C /tmp/go-cache\n          ls -la /tmp/go-cache/ /tmp/go-cache/.cache/\n      - name: Run Integration Test\n        env:\n          HEADSCALE_INTEGRATION_HEADSCALE_IMAGE: headscale:${{ github.sha }}\n          HEADSCALE_INTEGRATION_TAILSCALE_IMAGE: tailscale-head:${{ github.sha }}\n          HEADSCALE_INTEGRATION_POSTGRES_IMAGE: ${{ inputs.postgres_flag == '--postgres=1' && format('postgres:{0}', github.sha) || '' }}\n          HEADSCALE_INTEGRATION_GO_CACHE: /tmp/go-cache/go\n          HEADSCALE_INTEGRATION_GO_BUILD_CACHE: /tmp/go-cache/.cache/go-build\n        run: /tmp/artifacts/hi run --stats --ts-memory-limit=300 --hs-memory-limit=1500 \"^${{ inputs.test }}$\" \\\n          --timeout=120m \\\n          ${{ inputs.postgres_flag }}\n      # Sanitize test name for artifact upload (replace invalid characters: \" : < > | * ? \\ / with -)\n      - name: Sanitize test name for artifacts\n        if: always()\n        id: sanitize\n        run: echo \"name=${TEST_NAME//[\\\":<>|*?\\\\\\/]/-}\" >> $GITHUB_OUTPUT\n        env:\n          TEST_NAME: ${{ inputs.test }}\n      - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0\n        if: always()\n        with:\n          name: ${{ inputs.database_name }}-${{ steps.sanitize.outputs.name }}-logs\n          path: \"control_logs/*/*.log\"\n      - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0\n        if: always()\n        with:\n          name: ${{ inputs.database_name }}-${{ steps.sanitize.outputs.name }}-artifacts\n          path: control_logs/\n      - name: Setup a blocking tmux session\n        if: ${{ env.HAS_TAILSCALE_SECRET }}\n        uses: alexellis/block-with-tmux-action@master\n"
  },
  {
    "path": ".github/workflows/lint.yml",
    "content": "name: Lint\n\non: [pull_request]\n\nconcurrency:\n  group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}\n  cancel-in-progress: true\n\njobs:\n  golangci-lint:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1\n        with:\n          fetch-depth: 2\n      - name: Get changed files\n        id: changed-files\n        uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2\n        with:\n          filters: |\n            files:\n              - '*.nix'\n              - 'go.*'\n              - '**/*.go'\n              - 'integration_test/'\n              - 'config-example.yaml'\n      - uses: nixbuild/nix-quick-install-action@2c9db80fb984ceb1bcaa77cdda3fdf8cfba92035 # v34\n        if: steps.changed-files.outputs.files == 'true'\n      - uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3\n        if: steps.changed-files.outputs.files == 'true'\n        with:\n          primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',\n            '**/flake.lock') }}\n          restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}\n\n      - name: golangci-lint\n        if: steps.changed-files.outputs.files == 'true'\n        run: nix develop --command -- golangci-lint run\n          --new-from-rev=${{github.event.pull_request.base.sha}}\n          --output.text.path=stdout\n          --output.text.print-linter-name\n          --output.text.print-issued-lines\n          --output.text.colors\n\n  prettier-lint:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1\n        with:\n          fetch-depth: 2\n      - name: Get changed files\n        id: changed-files\n        uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2\n        with:\n          filters: |\n            files:\n              - '*.nix'\n              - '**/*.md'\n              - '**/*.yml'\n              - '**/*.yaml'\n              - '**/*.ts'\n              - '**/*.js'\n              - '**/*.sass'\n              - '**/*.css'\n              - '**/*.scss'\n              - '**/*.html'\n      - uses: nixbuild/nix-quick-install-action@2c9db80fb984ceb1bcaa77cdda3fdf8cfba92035 # v34\n        if: steps.changed-files.outputs.files == 'true'\n      - uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3\n        if: steps.changed-files.outputs.files == 'true'\n        with:\n          primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',\n            '**/flake.lock') }}\n          restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}\n\n      - name: Prettify code\n        if: steps.changed-files.outputs.files == 'true'\n        run: nix develop --command -- prettier --no-error-on-unmatched-pattern\n          --ignore-unknown --check **/*.{ts,js,md,yaml,yml,sass,css,scss,html}\n\n  proto-lint:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1\n      - uses: nixbuild/nix-quick-install-action@2c9db80fb984ceb1bcaa77cdda3fdf8cfba92035 # v34\n      - uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3\n        with:\n          primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',\n            '**/flake.lock') }}\n          restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}\n\n      - name: Buf lint\n        run: nix develop --command -- buf lint proto\n"
  },
  {
    "path": ".github/workflows/needs-more-info-comment.yml",
    "content": "name: Needs More Info - Post Comment\n\non:\n  issues:\n    types: [labeled]\n\njobs:\n  post-comment:\n    if: >-\n      github.event.label.name == 'needs-more-info' &&\n      github.repository == 'juanfont/headscale'\n    runs-on: ubuntu-latest\n    permissions:\n      issues: write\n      contents: read\n    steps:\n      - name: Checkout repository\n        uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2\n        with:\n          sparse-checkout: .github/label-response/needs-more-info.md\n          sparse-checkout-cone-mode: false\n\n      - name: Post instruction comment\n        run: gh issue comment \"$NUMBER\" --body-file .github/label-response/needs-more-info.md\n        env:\n          GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n          GH_REPO: ${{ github.repository }}\n          NUMBER: ${{ github.event.issue.number }}\n"
  },
  {
    "path": ".github/workflows/needs-more-info-timer.yml",
    "content": "name: Needs More Info - Timer\n\non:\n  schedule:\n    - cron: \"0 0 * * *\" # Daily at midnight UTC\n  issue_comment:\n    types: [created]\n  workflow_dispatch:\n\njobs:\n  # When a non-bot user comments on a needs-more-info issue, remove the label.\n  remove-label-on-response:\n    if: >-\n      github.repository == 'juanfont/headscale' &&\n      github.event_name == 'issue_comment' &&\n      github.event.comment.user.type != 'Bot' &&\n      contains(github.event.issue.labels.*.name, 'needs-more-info')\n    runs-on: ubuntu-latest\n    permissions:\n      issues: write\n    steps:\n      - name: Remove needs-more-info label\n        run: gh issue edit \"$NUMBER\" --remove-label needs-more-info\n        env:\n          GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n          GH_REPO: ${{ github.repository }}\n          NUMBER: ${{ github.event.issue.number }}\n\n  # On schedule, close issues that have had no human response for 3 days.\n  close-stale:\n    if: >-\n      github.repository == 'juanfont/headscale' &&\n      github.event_name != 'issue_comment'\n    runs-on: ubuntu-latest\n    permissions:\n      issues: write\n    steps:\n      - uses: hustcer/setup-nu@920172d92eb04671776f3ba69d605d3b09351c30 # v3.22\n        with:\n          version: \"*\"\n\n      - name: Close stale needs-more-info issues\n        shell: nu {0}\n        env:\n          GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n          GH_REPO: ${{ github.repository }}\n        run: |\n          let issues = (gh issue list\n            --repo $env.GH_REPO\n            --label \"needs-more-info\"\n            --state open\n            --json number\n            | from json)\n\n          for issue in $issues {\n            let number = $issue.number\n            print $\"Checking issue #($number)\"\n\n            # Find when needs-more-info was last added\n            let events = (gh api $\"repos/($env.GH_REPO)/issues/($number)/events\"\n              --paginate | from json | flatten)\n            let label_event = ($events\n              | where event == \"labeled\" and label.name == \"needs-more-info\"\n              | last)\n            let label_added_at = ($label_event.created_at | into datetime)\n\n            # Check for non-bot comments after the label was added\n            let comments = (gh api $\"repos/($env.GH_REPO)/issues/($number)/comments\"\n              --paginate | from json | flatten)\n            let human_responses = ($comments\n              | where user.type != \"Bot\"\n              | where { ($in.created_at | into datetime) > $label_added_at })\n\n            if ($human_responses | length) > 0 {\n              print $\"  Human responded, removing label\"\n              gh issue edit $number --repo $env.GH_REPO --remove-label needs-more-info\n              continue\n            }\n\n            # Check if 3 days have passed\n            let elapsed = (date now) - $label_added_at\n            if $elapsed < 3day {\n              print $\"  Only ($elapsed | format duration day) elapsed, skipping\"\n              continue\n            }\n\n            print $\"  No response for ($elapsed | format duration day), closing\"\n            let message = [\n              \"This issue has been automatically closed because no additional information was provided within 3 days.\"\n              \"\"\n              \"If you have the requested information, please open a new issue and include the debug information requested above.\"\n              \"\"\n              \"Thank you for your understanding.\"\n            ] | str join \"\\n\"\n            gh issue comment $number --repo $env.GH_REPO --body $message\n            gh issue close $number --repo $env.GH_REPO --reason \"not planned\"\n            gh issue edit $number --repo $env.GH_REPO --remove-label needs-more-info\n          }\n"
  },
  {
    "path": ".github/workflows/nix-module-test.yml",
    "content": "name: NixOS Module Tests\n\non:\n  push:\n    branches:\n      - main\n  pull_request:\n    branches:\n      - main\n\nconcurrency:\n  group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}\n  cancel-in-progress: true\n\njobs:\n  nix-module-check:\n    runs-on: ubuntu-latest\n    permissions:\n      contents: read\n\n    steps:\n      - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1\n        with:\n          fetch-depth: 2\n\n      - name: Get changed files\n        id: changed-files\n        uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2\n        with:\n          filters: |\n            nix:\n              - 'nix/**'\n              - 'flake.nix'\n              - 'flake.lock'\n            go:\n              - 'go.*'\n              - '**/*.go'\n              - 'cmd/**'\n              - 'hscontrol/**'\n\n      - uses: nixbuild/nix-quick-install-action@2c9db80fb984ceb1bcaa77cdda3fdf8cfba92035 # v34\n        if: steps.changed-files.outputs.nix == 'true' || steps.changed-files.outputs.go == 'true'\n\n      - uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3\n        if: steps.changed-files.outputs.nix == 'true' || steps.changed-files.outputs.go == 'true'\n        with:\n          primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',\n            '**/flake.lock') }}\n          restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}\n\n      - name: Run NixOS module tests\n        if: steps.changed-files.outputs.nix == 'true' || steps.changed-files.outputs.go == 'true'\n        run: |\n          echo \"Running NixOS module integration test...\"\n          nix build .#checks.x86_64-linux.headscale -L\n"
  },
  {
    "path": ".github/workflows/release.yml",
    "content": "---\nname: Release\n\non:\n  push:\n    tags:\n      - \"*\" # triggers only if push new tag version\n  workflow_dispatch:\n\njobs:\n  goreleaser:\n    if: github.repository == 'juanfont/headscale'\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout\n        uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1\n        with:\n          fetch-depth: 0\n\n      - name: Pin Docker to v28 (avoid v29 breaking changes)\n        run: |\n          # Docker 29 breaks docker build via Go client libraries and\n          # docker load/save with certain tarball formats.\n          # Pin to Docker 28.x until our tooling is updated.\n          # https://github.com/actions/runner-images/issues/13474\n          sudo install -m 0755 -d /etc/apt/keyrings\n          curl -fsSL https://download.docker.com/linux/ubuntu/gpg \\\n            | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg\n          echo \"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] \\\n            https://download.docker.com/linux/ubuntu $(. /etc/os-release && echo \"$VERSION_CODENAME\") stable\" \\\n            | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null\n          sudo apt-get update -qq\n          VERSION=$(apt-cache madison docker-ce | grep '28\\.5' | head -1 | awk '{print $3}')\n          sudo apt-get install -y --allow-downgrades \\\n            \"docker-ce=${VERSION}\" \"docker-ce-cli=${VERSION}\"\n          sudo systemctl restart docker\n          docker version\n\n      - name: Login to DockerHub\n        uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n\n      - name: Login to GHCR\n        uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0\n        with:\n          registry: ghcr.io\n          username: ${{ github.repository_owner }}\n          password: ${{ secrets.GITHUB_TOKEN }}\n\n      - uses: nixbuild/nix-quick-install-action@2c9db80fb984ceb1bcaa77cdda3fdf8cfba92035 # v34\n      - uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3\n        with:\n          primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',\n            '**/flake.lock') }}\n          restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}\n\n      - name: Run goreleaser\n        run: nix develop --command -- goreleaser release --clean\n        env:\n          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n"
  },
  {
    "path": ".github/workflows/stale.yml",
    "content": "name: Close inactive issues\n\non:\n  schedule:\n    - cron: \"30 1 * * *\"\n\njobs:\n  close-issues:\n    if: github.repository == 'juanfont/headscale'\n    runs-on: ubuntu-latest\n    permissions:\n      issues: write\n      pull-requests: write\n    steps:\n      - uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1\n        with:\n          days-before-issue-stale: 90\n          days-before-issue-close: 7\n          stale-issue-label: \"stale\"\n          stale-issue-message: \"This issue is stale because it has been open for 90 days with no\n            activity.\"\n          close-issue-message: \"This issue was closed because it has been inactive for 14 days\n            since being marked as stale.\"\n          days-before-pr-stale: -1\n          days-before-pr-close: -1\n          exempt-issue-labels: \"no-stale-bot,needs-more-info\"\n          repo-token: ${{ secrets.GITHUB_TOKEN }}\n"
  },
  {
    "path": ".github/workflows/support-request.yml",
    "content": "name: Support Request - Close Issue\n\non:\n  issues:\n    types: [labeled]\n\njobs:\n  close-support-request:\n    if: >-\n      github.event.label.name == 'support-request' &&\n      github.repository == 'juanfont/headscale'\n    runs-on: ubuntu-latest\n    permissions:\n      issues: write\n      contents: read\n    steps:\n      - name: Checkout repository\n        uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2\n        with:\n          sparse-checkout: .github/label-response/support-request.md\n          sparse-checkout-cone-mode: false\n\n      - name: Post comment and close issue\n        run: |\n          gh issue comment \"$NUMBER\" --body-file .github/label-response/support-request.md\n          gh issue close \"$NUMBER\" --reason \"not planned\"\n        env:\n          GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n          GH_REPO: ${{ github.repository }}\n          NUMBER: ${{ github.event.issue.number }}\n"
  },
  {
    "path": ".github/workflows/test-integration.yaml",
    "content": "name: integration\n# To debug locally on a branch, and when needing secrets\n# change this to include `push` so the build is ran on\n# the main repository.\non: [pull_request]\nconcurrency:\n  group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}\n  cancel-in-progress: true\njobs:\n  # build: Builds binaries and Docker images once, uploads as artifacts for reuse.\n  # build-postgres: Pulls postgres image separately to avoid Docker Hub rate limits.\n  # sqlite: Runs all integration tests with SQLite backend.\n  # postgres: Runs a subset of tests with PostgreSQL to verify database compatibility.\n  build:\n    runs-on: ubuntu-latest\n    outputs:\n      files-changed: ${{ steps.changed-files.outputs.files }}\n    steps:\n      - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1\n        with:\n          fetch-depth: 2\n      - name: Get changed files\n        id: changed-files\n        uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2\n        with:\n          filters: |\n            files:\n              - '*.nix'\n              - 'go.*'\n              - '**/*.go'\n              - 'integration/**'\n              - 'config-example.yaml'\n              - '.github/workflows/test-integration.yaml'\n              - '.github/workflows/integration-test-template.yml'\n              - 'Dockerfile.*'\n      - uses: nixbuild/nix-quick-install-action@2c9db80fb984ceb1bcaa77cdda3fdf8cfba92035 # v34\n        if: steps.changed-files.outputs.files == 'true'\n      - uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3\n        if: steps.changed-files.outputs.files == 'true'\n        with:\n          primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix', '**/flake.lock') }}\n          restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}\n      - name: Build binaries and warm Go cache\n        if: steps.changed-files.outputs.files == 'true'\n        run: |\n          # Build all Go binaries in one nix shell to maximize cache reuse\n          nix develop --command -- bash -c '\n            go build -o hi ./cmd/hi\n            CGO_ENABLED=0 GOOS=linux go build -o headscale ./cmd/headscale\n            # Build integration test binary to warm the cache with all dependencies\n            go test -c ./integration -o /dev/null 2>/dev/null || true\n          '\n      - name: Upload hi binary\n        if: steps.changed-files.outputs.files == 'true'\n        uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0\n        with:\n          name: hi-binary\n          path: hi\n          retention-days: 10\n      - name: Package Go cache\n        if: steps.changed-files.outputs.files == 'true'\n        run: |\n          # Package Go module cache and build cache\n          tar -czf go-cache.tar.gz -C ~ go .cache/go-build\n      - name: Upload Go cache\n        if: steps.changed-files.outputs.files == 'true'\n        uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0\n        with:\n          name: go-cache\n          path: go-cache.tar.gz\n          retention-days: 10\n      - name: Pin Docker to v28 (avoid v29 breaking changes)\n        if: steps.changed-files.outputs.files == 'true'\n        run: |\n          # Docker 29 breaks docker build via Go client libraries and\n          # docker load/save with certain tarball formats.\n          # Pin to Docker 28.x until our tooling is updated.\n          # https://github.com/actions/runner-images/issues/13474\n          sudo install -m 0755 -d /etc/apt/keyrings\n          curl -fsSL https://download.docker.com/linux/ubuntu/gpg \\\n            | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg\n          echo \"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] \\\n            https://download.docker.com/linux/ubuntu $(. /etc/os-release && echo \"$VERSION_CODENAME\") stable\" \\\n            | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null\n          sudo apt-get update -qq\n          VERSION=$(apt-cache madison docker-ce | grep '28\\.5' | head -1 | awk '{print $3}')\n          sudo apt-get install -y --allow-downgrades \\\n            \"docker-ce=${VERSION}\" \"docker-ce-cli=${VERSION}\"\n          sudo systemctl restart docker\n          docker version\n      - name: Build headscale image\n        if: steps.changed-files.outputs.files == 'true'\n        run: |\n          docker build \\\n            --file Dockerfile.integration-ci \\\n            --tag headscale:${{ github.sha }} \\\n            .\n          docker save headscale:${{ github.sha }} | gzip > headscale-image.tar.gz\n      - name: Build tailscale HEAD image\n        if: steps.changed-files.outputs.files == 'true'\n        run: |\n          docker build \\\n            --file Dockerfile.tailscale-HEAD \\\n            --tag tailscale-head:${{ github.sha }} \\\n            .\n          docker save tailscale-head:${{ github.sha }} | gzip > tailscale-head-image.tar.gz\n      - name: Upload headscale image\n        if: steps.changed-files.outputs.files == 'true'\n        uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0\n        with:\n          name: headscale-image\n          path: headscale-image.tar.gz\n          retention-days: 10\n      - name: Upload tailscale HEAD image\n        if: steps.changed-files.outputs.files == 'true'\n        uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0\n        with:\n          name: tailscale-head-image\n          path: tailscale-head-image.tar.gz\n          retention-days: 10\n  build-postgres:\n    runs-on: ubuntu-latest\n    needs: build\n    if: needs.build.outputs.files-changed == 'true'\n    steps:\n      - name: Pin Docker to v28 (avoid v29 breaking changes)\n        run: |\n          # Docker 29 breaks docker build via Go client libraries and\n          # docker load/save with certain tarball formats.\n          # Pin to Docker 28.x until our tooling is updated.\n          # https://github.com/actions/runner-images/issues/13474\n          sudo install -m 0755 -d /etc/apt/keyrings\n          curl -fsSL https://download.docker.com/linux/ubuntu/gpg \\\n            | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg\n          echo \"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] \\\n            https://download.docker.com/linux/ubuntu $(. /etc/os-release && echo \"$VERSION_CODENAME\") stable\" \\\n            | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null\n          sudo apt-get update -qq\n          VERSION=$(apt-cache madison docker-ce | grep '28\\.5' | head -1 | awk '{print $3}')\n          sudo apt-get install -y --allow-downgrades \\\n            \"docker-ce=${VERSION}\" \"docker-ce-cli=${VERSION}\"\n          sudo systemctl restart docker\n          docker version\n      - name: Pull and save postgres image\n        run: |\n          docker pull postgres:latest\n          docker tag postgres:latest postgres:${{ github.sha }}\n          docker save postgres:${{ github.sha }} | gzip > postgres-image.tar.gz\n      - name: Upload postgres image\n        uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0\n        with:\n          name: postgres-image\n          path: postgres-image.tar.gz\n          retention-days: 10\n  sqlite:\n    needs: build\n    if: needs.build.outputs.files-changed == 'true'\n    strategy:\n      fail-fast: false\n      matrix:\n        test:\n          - TestACLHostsInNetMapTable\n          - TestACLAllowUser80Dst\n          - TestACLDenyAllPort80\n          - TestACLAllowUserDst\n          - TestACLAllowStarDst\n          - TestACLNamedHostsCanReachBySubnet\n          - TestACLNamedHostsCanReach\n          - TestACLDevice1CanAccessDevice2\n          - TestPolicyUpdateWhileRunningWithCLIInDatabase\n          - TestACLAutogroupMember\n          - TestACLAutogroupTagged\n          - TestACLAutogroupSelf\n          - TestACLPolicyPropagationOverTime\n          - TestACLTagPropagation\n          - TestACLTagPropagationPortSpecific\n          - TestACLGroupWithUnknownUser\n          - TestACLGroupAfterUserDeletion\n          - TestACLGroupDeletionExactReproduction\n          - TestACLDynamicUnknownUserAddition\n          - TestACLDynamicUnknownUserRemoval\n          - TestAPIAuthenticationBypass\n          - TestAPIAuthenticationBypassCurl\n          - TestGRPCAuthenticationBypass\n          - TestCLIWithConfigAuthenticationBypass\n          - TestAuthKeyLogoutAndReloginSameUser\n          - TestAuthKeyLogoutAndReloginNewUser\n          - TestAuthKeyLogoutAndReloginSameUserExpiredKey\n          - TestAuthKeyDeleteKey\n          - TestAuthKeyLogoutAndReloginRoutesPreserved\n          - TestOIDCAuthenticationPingAll\n          - TestOIDCExpireNodesBasedOnTokenExpiry\n          - TestOIDC024UserCreation\n          - TestOIDCAuthenticationWithPKCE\n          - TestOIDCReloginSameNodeNewUser\n          - TestOIDCFollowUpUrl\n          - TestOIDCMultipleOpenedLoginUrls\n          - TestOIDCReloginSameNodeSameUser\n          - TestOIDCExpiryAfterRestart\n          - TestOIDCACLPolicyOnJoin\n          - TestOIDCReloginSameUserRoutesPreserved\n          - TestAuthWebFlowAuthenticationPingAll\n          - TestAuthWebFlowLogoutAndReloginSameUser\n          - TestAuthWebFlowLogoutAndReloginNewUser\n          - TestUserCommand\n          - TestPreAuthKeyCommand\n          - TestPreAuthKeyCommandWithoutExpiry\n          - TestPreAuthKeyCommandReusableEphemeral\n          - TestPreAuthKeyCorrectUserLoggedInCommand\n          - TestTaggedNodesCLIOutput\n          - TestApiKeyCommand\n          - TestNodeCommand\n          - TestNodeExpireCommand\n          - TestNodeRenameCommand\n          - TestPolicyCommand\n          - TestPolicyBrokenConfigCommand\n          - TestDERPVerifyEndpoint\n          - TestResolveMagicDNS\n          - TestResolveMagicDNSExtraRecordsPath\n          - TestDERPServerScenario\n          - TestDERPServerWebsocketScenario\n          - TestPingAllByIP\n          - TestPingAllByIPPublicDERP\n          - TestEphemeral\n          - TestEphemeralInAlternateTimezone\n          - TestEphemeral2006DeletedTooQuickly\n          - TestPingAllByHostname\n          - TestTaildrop\n          - TestUpdateHostnameFromClient\n          - TestExpireNode\n          - TestSetNodeExpiryInFuture\n          - TestDisableNodeExpiry\n          - TestNodeOnlineStatus\n          - TestPingAllByIPManyUpDown\n          - Test2118DeletingOnlineNodePanics\n          - TestEnablingRoutes\n          - TestHASubnetRouterFailover\n          - TestSubnetRouteACL\n          - TestEnablingExitRoutes\n          - TestSubnetRouterMultiNetwork\n          - TestSubnetRouterMultiNetworkExitNode\n          - TestAutoApproveMultiNetwork/authkey-tag.*\n          - TestAutoApproveMultiNetwork/authkey-user.*\n          - TestAutoApproveMultiNetwork/authkey-group.*\n          - TestAutoApproveMultiNetwork/webauth-tag.*\n          - TestAutoApproveMultiNetwork/webauth-user.*\n          - TestAutoApproveMultiNetwork/webauth-group.*\n          - TestSubnetRouteACLFiltering\n          - TestHeadscale\n          - TestTailscaleNodesJoiningHeadcale\n          - TestSSHOneUserToAll\n          - TestSSHMultipleUsersAllToAll\n          - TestSSHNoSSHConfigured\n          - TestSSHIsBlockedInACL\n          - TestSSHUserOnlyIsolation\n          - TestSSHAutogroupSelf\n          - TestSSHOneUserToOneCheckModeCLI\n          - TestSSHOneUserToOneCheckModeOIDC\n          - TestSSHCheckModeUnapprovedTimeout\n          - TestSSHCheckModeCheckPeriodCLI\n          - TestSSHCheckModeAutoApprove\n          - TestSSHCheckModeNegativeCLI\n          - TestSSHLocalpart\n          - TestTagsAuthKeyWithTagRequestDifferentTag\n          - TestTagsAuthKeyWithTagNoAdvertiseFlag\n          - TestTagsAuthKeyWithTagCannotAddViaCLI\n          - TestTagsAuthKeyWithTagCannotChangeViaCLI\n          - TestTagsAuthKeyWithTagAdminOverrideReauthPreserves\n          - TestTagsAuthKeyWithTagCLICannotModifyAdminTags\n          - TestTagsAuthKeyWithoutTagCannotRequestTags\n          - TestTagsAuthKeyWithoutTagRegisterNoTags\n          - TestTagsAuthKeyWithoutTagCannotAddViaCLI\n          - TestTagsAuthKeyWithoutTagCLINoOpAfterAdminWithReset\n          - TestTagsAuthKeyWithoutTagCLINoOpAfterAdminWithEmptyAdvertise\n          - TestTagsAuthKeyWithoutTagCLICannotReduceAdminMultiTag\n          - TestTagsUserLoginOwnedTagAtRegistration\n          - TestTagsUserLoginNonExistentTagAtRegistration\n          - TestTagsUserLoginUnownedTagAtRegistration\n          - TestTagsUserLoginAddTagViaCLIReauth\n          - TestTagsUserLoginRemoveTagViaCLIReauth\n          - TestTagsUserLoginCLINoOpAfterAdminAssignment\n          - TestTagsUserLoginCLICannotRemoveAdminTags\n          - TestTagsAuthKeyWithTagRequestNonExistentTag\n          - TestTagsAuthKeyWithTagRequestUnownedTag\n          - TestTagsAuthKeyWithoutTagRequestNonExistentTag\n          - TestTagsAuthKeyWithoutTagRequestUnownedTag\n          - TestTagsAdminAPICannotSetNonExistentTag\n          - TestTagsAdminAPICanSetUnownedTag\n          - TestTagsAdminAPICannotRemoveAllTags\n          - TestTagsIssue2978ReproTagReplacement\n          - TestTagsAdminAPICannotSetInvalidFormat\n          - TestTagsUserLoginReauthWithEmptyTagsRemovesAllTags\n          - TestTagsAuthKeyWithoutUserInheritsTags\n          - TestTagsAuthKeyWithoutUserRejectsAdvertisedTags\n          - TestTagsAuthKeyConvertToUserViaCLIRegister\n    uses: ./.github/workflows/integration-test-template.yml\n    secrets: inherit\n    with:\n      test: ${{ matrix.test }}\n      postgres_flag: \"--postgres=0\"\n      database_name: \"sqlite\"\n  postgres:\n    needs: [build, build-postgres]\n    if: needs.build.outputs.files-changed == 'true'\n    strategy:\n      fail-fast: false\n      matrix:\n        test:\n          - TestACLAllowUserDst\n          - TestPingAllByIP\n          - TestEphemeral2006DeletedTooQuickly\n          - TestPingAllByIPManyUpDown\n          - TestSubnetRouterMultiNetwork\n    uses: ./.github/workflows/integration-test-template.yml\n    secrets: inherit\n    with:\n      test: ${{ matrix.test }}\n      postgres_flag: \"--postgres=1\"\n      database_name: \"postgres\"\n"
  },
  {
    "path": ".github/workflows/test.yml",
    "content": "name: Tests\n\non: [push, pull_request]\n\nconcurrency:\n  group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}\n  cancel-in-progress: true\n\njobs:\n  test:\n    runs-on: ubuntu-latest\n\n    steps:\n      - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1\n        with:\n          fetch-depth: 2\n\n      - name: Get changed files\n        id: changed-files\n        uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2\n        with:\n          filters: |\n            files:\n              - '*.nix'\n              - 'go.*'\n              - '**/*.go'\n              - 'integration_test/'\n              - 'config-example.yaml'\n\n      - uses: nixbuild/nix-quick-install-action@2c9db80fb984ceb1bcaa77cdda3fdf8cfba92035 # v34\n        if: steps.changed-files.outputs.files == 'true'\n      - uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3\n        if: steps.changed-files.outputs.files == 'true'\n        with:\n          primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',\n            '**/flake.lock') }}\n          restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}\n\n      - name: Run tests\n        if: steps.changed-files.outputs.files == 'true'\n        env:\n          # As of 2025-01-06, these env vars was not automatically\n          # set anymore which breaks the initdb for postgres on\n          # some of the database migration tests.\n          LC_ALL: \"en_US.UTF-8\"\n          LC_CTYPE: \"en_US.UTF-8\"\n        run: nix develop --command -- gotestsum\n"
  },
  {
    "path": ".github/workflows/update-flake.yml",
    "content": "name: update-flake-lock\non:\n  workflow_dispatch: # allows manual triggering\n  schedule:\n    - cron: \"0 0 * * 0\" # runs weekly on Sunday at 00:00\n\njobs:\n  lockfile:\n    if: github.repository == 'juanfont/headscale'\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout repository\n        uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2\n      - name: Install Nix\n        uses: DeterminateSystems/nix-installer-action@21a544727d0c62386e78b4befe52d19ad12692e3 # v17\n      - name: Update flake.lock\n        uses: DeterminateSystems/update-flake-lock@428c2b58a4b7414dabd372acb6a03dba1084d3ab # v25\n        with:\n          pr-title: \"Update flake.lock\"\n"
  },
  {
    "path": ".gitignore",
    "content": "ignored/\ntailscale/\n.vscode/\n.claude/\nlogs/\n\n*.prof\n\n# Binaries for programs and plugins\n*.exe\n*.exe~\n*.dll\n*.so\n*.dylib\n\n# Test binary, built with `go test -c`\n*.test\n\n# Output of the go coverage tool, specifically when used with LiteIDE\n*.out\n\n# Dependency directories (remove the comment below to include it)\nvendor/\n\ndist/\n/headscale\nconfig.yaml\nconfig*.yaml\n!config-example.yaml\nderp.yaml\n*.hujson\n*.key\n/db.sqlite\n*.sqlite3\n\n# Exclude Jetbrains Editors\n.idea\n\ntest_output/\ncontrol_logs/\n\n# Nix build output\nresult\n.direnv/\n\nintegration_test/etc/config.dump.yaml\n\n# MkDocs\n.cache\n/site\n\n__debug_bin\n\nnode_modules/\npackage-lock.json\npackage.json\n"
  },
  {
    "path": ".golangci.yaml",
    "content": "---\nversion: \"2\"\nlinters:\n  default: all\n  disable:\n    - cyclop\n    - depguard\n    - dupl\n    - exhaustruct\n    - funcorder\n    - funlen\n    - gochecknoglobals\n    - gochecknoinits\n    - gocognit\n    - godox\n    - interfacebloat\n    - ireturn\n    - lll\n    - maintidx\n    - makezero\n    - mnd\n    - musttag\n    - nestif\n    - nolintlint\n    - paralleltest\n    - revive\n    - tagliatelle\n    - testpackage\n    - varnamelen\n    - wrapcheck\n    - wsl\n  settings:\n    forbidigo:\n      forbid:\n        # Forbid time.Sleep everywhere with context-appropriate alternatives\n        - pattern: 'time\\.Sleep'\n          msg: >-\n            time.Sleep is forbidden.\n            In tests: use assert.EventuallyWithT for polling/waiting patterns.\n            In production code: use a backoff strategy (e.g., cenkalti/backoff) or proper synchronization primitives.\n        # Forbid inline string literals in zerolog field methods - use zf.* constants\n        - pattern: '\\.(Str|Int|Int8|Int16|Int32|Int64|Uint|Uint8|Uint16|Uint32|Uint64|Float32|Float64|Bool|Dur|Time|TimeDiff|Strs|Ints|Uints|Floats|Bools|Any|Interface)\\(\"[^\"]+\"'\n          msg: >-\n            Use zf.* constants for zerolog field names instead of string literals.\n            Import \"github.com/juanfont/headscale/hscontrol/util/zlog/zf\" and use\n            constants like zf.NodeID, zf.UserName, etc. Add new constants to\n            hscontrol/util/zlog/zf/fields.go if needed.\n        # Forbid ptr.To - use Go 1.26 new(expr) instead\n        - pattern: 'ptr\\.To\\('\n          msg: >-\n            ptr.To is forbidden. Use Go 1.26's new(expr) syntax instead.\n            Example: ptr.To(value) → new(value)\n        # Forbid tsaddr.SortPrefixes - use slices.SortFunc with netip.Prefix.Compare\n        - pattern: 'tsaddr\\.SortPrefixes'\n          msg: >-\n            tsaddr.SortPrefixes is forbidden. Use Go 1.26's netip.Prefix.Compare instead.\n            Example: slices.SortFunc(prefixes, netip.Prefix.Compare)\n      analyze-types: true\n    gocritic:\n      disabled-checks:\n        - appendAssign\n        - ifElseChain\n    nlreturn:\n      block-size: 4\n    varnamelen:\n      ignore-names:\n        - err\n        - db\n        - id\n        - ip\n        - ok\n        - c\n        - tt\n        - tx\n        - rx\n        - sb\n        - wg\n        - pr\n        - p\n        - p2\n      ignore-type-assert-ok: true\n      ignore-map-index-ok: true\n  exclusions:\n    generated: lax\n    presets:\n      - comments\n      - common-false-positives\n      - legacy\n      - std-error-handling\n    paths:\n      - third_party$\n      - builtin$\n      - examples$\n      - gen\n\nformatters:\n  enable:\n    - gci\n    - gofmt\n    - gofumpt\n    - goimports\n  exclusions:\n    generated: lax\n    paths:\n      - third_party$\n      - builtin$\n      - examples$\n      - gen\n"
  },
  {
    "path": ".goreleaser.yml",
    "content": "---\nversion: 2\nbefore:\n  hooks:\n    - go mod tidy -compat=1.26\n    - go mod vendor\n\nrelease:\n  prerelease: auto\n  draft: true\n  header: |\n    ## Upgrade\n\n    Please follow the steps outlined in the [upgrade guide](https://headscale.net/stable/setup/upgrade/) to update your existing Headscale installation.\n\nbuilds:\n  - id: headscale\n    main: ./cmd/headscale\n    mod_timestamp: \"{{ .CommitTimestamp }}\"\n    env:\n      - CGO_ENABLED=0\n    targets:\n      - darwin_amd64\n      - darwin_arm64\n      - freebsd_amd64\n      - linux_amd64\n      - linux_arm64\n    flags:\n      - -mod=readonly\n    tags:\n      - ts2019\n\narchives:\n  - id: golang-cross\n    name_template: '{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ with .Arm }}v{{ . }}{{ end }}{{ with .Mips }}_{{ . }}{{ end }}{{ if not (eq .Amd64 \"v1\") }}{{ .Amd64 }}{{ end }}'\n    formats:\n      - binary\n\nsource:\n  enabled: true\n  name_template: \"{{ .ProjectName }}_{{ .Version }}\"\n  format: tar.gz\n  files:\n    - \"vendor/\"\n\nnfpms:\n  # Configure nFPM for .deb and .rpm releases\n  #\n  # See https://nfpm.goreleaser.com/configuration/\n  # and https://goreleaser.com/customization/nfpm/\n  #\n  # Useful tools for debugging .debs:\n  # List file contents: dpkg -c dist/headscale...deb\n  # Package metadata: dpkg --info dist/headscale....deb\n  #\n  - ids:\n      - headscale\n    package_name: headscale\n    priority: optional\n    vendor: headscale\n    maintainer: Kristoffer Dalby <kristoffer@dalby.cc>\n    homepage: https://github.com/juanfont/headscale\n    description: |-\n      Open source implementation of the Tailscale control server.\n      Headscale aims to implement a self-hosted, open source alternative to the\n      Tailscale control server. Headscale's goal is to provide self-hosters and\n      hobbyists with an open-source server they can use for their projects and\n      labs. It implements a narrow scope, a single Tailscale network (tailnet),\n      suitable for a personal use, or a small open-source organisation.\n    bindir: /usr/bin\n    section: net\n    formats:\n      - deb\n    contents:\n      - src: ./config-example.yaml\n        dst: /etc/headscale/config.yaml\n        type: config|noreplace\n        file_info:\n          mode: 0644\n      - src: ./packaging/systemd/headscale.service\n        dst: /usr/lib/systemd/system/headscale.service\n      - dst: /var/lib/headscale\n        type: dir\n      - src: LICENSE\n        dst: /usr/share/doc/headscale/copyright\n    scripts:\n      postinstall: ./packaging/deb/postinst\n      postremove: ./packaging/deb/postrm\n      preremove: ./packaging/deb/prerm\n    deb:\n      lintian_overrides:\n        - no-changelog # Our CHANGELOG.md uses a different formatting\n        - no-manual-page\n        - statically-linked-binary\n\nkos:\n  - id: ghcr\n    repositories:\n      - ghcr.io/juanfont/headscale\n      - headscale/headscale\n\n    # bare tells KO to only use the repository\n    # for tagging and naming the container.\n    bare: true\n    base_image: gcr.io/distroless/base-debian13\n    build: headscale\n    main: ./cmd/headscale\n    env:\n      - CGO_ENABLED=0\n    platforms:\n      - linux/amd64\n      - linux/arm64\n    tags:\n      - \"{{ if not .Prerelease }}latest{{ end }}\"\n      - \"{{ if not .Prerelease }}{{ .Major }}.{{ .Minor }}.{{ .Patch }}{{ end }}\"\n      - \"{{ if not .Prerelease }}{{ .Major }}.{{ .Minor }}{{ end }}\"\n      - \"{{ if not .Prerelease }}{{ .Major }}{{ end }}\"\n      - \"{{ if not .Prerelease }}v{{ .Major }}.{{ .Minor }}.{{ .Patch }}{{ end }}\"\n      - \"{{ if not .Prerelease }}v{{ .Major }}.{{ .Minor }}{{ end }}\"\n      - \"{{ if not .Prerelease }}v{{ .Major }}{{ end }}\"\n      - \"{{ if not .Prerelease }}stable{{ else }}unstable{{ end }}\"\n      - \"{{ .Tag }}\"\n      - '{{ trimprefix .Tag \"v\" }}'\n      - \"sha-{{ .ShortCommit }}\"\n    creation_time: \"{{.CommitTimestamp}}\"\n    ko_data_creation_time: \"{{.CommitTimestamp}}\"\n\n  - id: ghcr-debug\n    repositories:\n      - ghcr.io/juanfont/headscale\n      - headscale/headscale\n\n    bare: true\n    base_image: gcr.io/distroless/base-debian13:debug\n    build: headscale\n    main: ./cmd/headscale\n    env:\n      - CGO_ENABLED=0\n    platforms:\n      - linux/amd64\n      - linux/arm64\n    tags:\n      - \"{{ if not .Prerelease }}latest-debug{{ end }}\"\n      - \"{{ if not .Prerelease }}{{ .Major }}.{{ .Minor }}.{{ .Patch }}-debug{{ end }}\"\n      - \"{{ if not .Prerelease }}{{ .Major }}.{{ .Minor }}-debug{{ end }}\"\n      - \"{{ if not .Prerelease }}{{ .Major }}-debug{{ end }}\"\n      - \"{{ if not .Prerelease }}v{{ .Major }}.{{ .Minor }}.{{ .Patch }}-debug{{ end }}\"\n      - \"{{ if not .Prerelease }}v{{ .Major }}.{{ .Minor }}-debug{{ end }}\"\n      - \"{{ if not .Prerelease }}v{{ .Major }}-debug{{ end }}\"\n      - \"{{ if not .Prerelease }}stable-debug{{ else }}unstable-debug{{ end }}\"\n      - \"{{ .Tag }}-debug\"\n      - '{{ trimprefix .Tag \"v\" }}-debug'\n      - \"sha-{{ .ShortCommit }}-debug\"\n\nchecksum:\n  name_template: \"checksums.txt\"\nsnapshot:\n  version_template: \"{{ .Tag }}-next\"\nchangelog:\n  sort: asc\n  filters:\n    exclude:\n      - \"^docs:\"\n      - \"^test:\"\n"
  },
  {
    "path": ".mcp.json",
    "content": "{\n  \"mcpServers\": {\n    \"claude-code-mcp\": {\n      \"type\": \"stdio\",\n      \"command\": \"npx\",\n      \"args\": [\"-y\", \"@steipete/claude-code-mcp@latest\"],\n      \"env\": {}\n    },\n    \"sequential-thinking\": {\n      \"type\": \"stdio\",\n      \"command\": \"npx\",\n      \"args\": [\"-y\", \"@modelcontextprotocol/server-sequential-thinking\"],\n      \"env\": {}\n    },\n    \"nixos\": {\n      \"type\": \"stdio\",\n      \"command\": \"uvx\",\n      \"args\": [\"mcp-nixos\"],\n      \"env\": {}\n    },\n    \"context7\": {\n      \"type\": \"stdio\",\n      \"command\": \"npx\",\n      \"args\": [\"-y\", \"@upstash/context7-mcp\"],\n      \"env\": {}\n    },\n    \"git\": {\n      \"type\": \"stdio\",\n      \"command\": \"npx\",\n      \"args\": [\"-y\", \"@cyanheads/git-mcp-server\"],\n      \"env\": {}\n    }\n  }\n}\n"
  },
  {
    "path": ".mdformat.toml",
    "content": "[plugin.mkdocs]\nalign_semantic_breaks_in_lists = true\n"
  },
  {
    "path": ".pre-commit-config.yaml",
    "content": "# prek/pre-commit configuration for headscale\n# See: https://prek.j178.dev/quickstart/\n# See: https://prek.j178.dev/builtin/\n\n# Global exclusions - ignore generated code\nexclude: ^gen/\n\nrepos:\n  # Built-in hooks from pre-commit/pre-commit-hooks\n  # prek will use fast-path optimized versions automatically\n  # See: https://prek.j178.dev/builtin/\n  - repo: https://github.com/pre-commit/pre-commit-hooks\n    rev: v6.0.0\n    hooks:\n      - id: check-added-large-files\n      - id: check-case-conflict\n      - id: check-executables-have-shebangs\n      - id: check-json\n      - id: check-merge-conflict\n      - id: check-symlinks\n      - id: check-toml\n      - id: check-xml\n      - id: check-yaml\n      - id: detect-private-key\n      - id: end-of-file-fixer\n      - id: fix-byte-order-marker\n      - id: mixed-line-ending\n      - id: trailing-whitespace\n\n  # Local hooks for project-specific tooling\n  - repo: local\n    hooks:\n      # nixpkgs-fmt for Nix files\n      - id: nixpkgs-fmt\n        name: nixpkgs-fmt\n        entry: nixpkgs-fmt\n        language: system\n        files: \\.nix$\n\n      # Prettier for formatting\n      - id: prettier\n        name: prettier\n        entry: prettier --write --list-different\n        language: system\n        exclude: ^docs/\n        types_or: [javascript, jsx, ts, tsx, yaml, json, toml, html, css, scss, sass, markdown]\n\n      # mdformat for docs\n      - id: mdformat\n        name: mdformat\n        entry: mdformat\n        language: system\n        types_or: [markdown]\n        files: ^docs/\n\n      # golangci-lint for Go code quality\n      - id: golangci-lint\n        name: golangci-lint\n        entry: nix develop --command -- golangci-lint run --new-from-rev=HEAD~1 --timeout=5m --fix\n        language: system\n        types: [go]\n        pass_filenames: false\n"
  },
  {
    "path": ".prettierignore",
    "content": ".github/workflows/test-integration-v2*\ndocs/\n"
  },
  {
    "path": "AGENTS.md",
    "content": "# AGENTS.md\n\nThis file provides guidance to AI agents when working with code in this repository.\n\n## Overview\n\nHeadscale is an open-source implementation of the Tailscale control server written in Go. It provides self-hosted coordination for Tailscale networks (tailnets), managing node registration, IP allocation, policy enforcement, and DERP routing.\n\n## Development Commands\n\n### Quick Setup\n\n```bash\n# Recommended: Use Nix for dependency management\nnix develop\n\n# Full development workflow\nmake dev  # runs fmt + lint + test + build\n```\n\n### Essential Commands\n\n```bash\n# Build headscale binary\nmake build\n\n# Run tests\nmake test\ngo test ./...                    # All unit tests\ngo test -race ./...              # With race detection\n\n# Run specific integration test\ngo run ./cmd/hi run \"TestName\" --postgres\n\n# Code formatting and linting\nmake fmt         # Format all code (Go, docs, proto)\nmake lint        # Lint all code (Go, proto)\nmake fmt-go      # Format Go code only\nmake lint-go     # Lint Go code only\n\n# Protocol buffer generation (after modifying proto/)\nmake generate\n\n# Clean build artifacts\nmake clean\n```\n\n### Integration Testing\n\n```bash\n# Use the hi (Headscale Integration) test runner\ngo run ./cmd/hi doctor                    # Check system requirements\ngo run ./cmd/hi run \"TestPattern\"         # Run specific test\ngo run ./cmd/hi run \"TestPattern\" --postgres  # With PostgreSQL backend\n\n# Test artifacts are saved to control_logs/ with logs and debug data\n```\n\n## Pre-Commit Quality Checks\n\n### **MANDATORY: Automated Pre-Commit Hooks with prek**\n\n**CRITICAL REQUIREMENT**: This repository uses [prek](https://prek.j178.dev/) for automated pre-commit hooks. All commits are automatically validated for code quality, formatting, and common issues.\n\n### Initial Setup\n\nWhen you first clone the repository or enter the nix shell, install the git hooks:\n\n```bash\n# Enter nix development environment\nnix develop\n\n# Install prek git hooks (one-time setup)\nprek install\n```\n\nThis installs the pre-commit hook at `.git/hooks/pre-commit` which automatically runs all configured checks before each commit.\n\n### Configured Hooks\n\nThe repository uses `.pre-commit-config.yaml` with the following hooks:\n\n**Built-in Checks** (optimized fast-path execution):\n\n- `check-added-large-files` - Prevents accidentally committing large files\n- `check-case-conflict` - Checks for files that would conflict in case-insensitive filesystems\n- `check-executables-have-shebangs` - Ensures executables have proper shebangs\n- `check-json` - Validates JSON syntax\n- `check-merge-conflict` - Prevents committing files with merge conflict markers\n- `check-symlinks` - Checks for broken symlinks\n- `check-toml` - Validates TOML syntax\n- `check-xml` - Validates XML syntax\n- `check-yaml` - Validates YAML syntax\n- `detect-private-key` - Detects accidentally committed private keys\n- `end-of-file-fixer` - Ensures files end with a newline\n- `fix-byte-order-marker` - Removes UTF-8 byte order markers\n- `mixed-line-ending` - Prevents mixed line endings\n- `trailing-whitespace` - Removes trailing whitespace\n\n**Project-Specific Hooks**:\n\n- `nixpkgs-fmt` - Formats Nix files\n- `prettier` - Formats markdown, YAML, JSON, and TOML files\n- `golangci-lint` - Runs Go linter with auto-fix on changed files only\n\n### Manual Hook Execution\n\nRun hooks manually without making a commit:\n\n```bash\n# Run hooks on staged files only\nprek run\n\n# Run hooks on all files in the repository\nprek run --all-files\n\n# Run a specific hook\nprek run golangci-lint\n\n# Run hooks on specific files\nprek run --files path/to/file1.go path/to/file2.go\n```\n\n### Workflow Pattern\n\nWith prek installed, your normal workflow becomes:\n\n```bash\n# 1. Make your code changes\nvim hscontrol/state/state.go\n\n# 2. Stage your changes\ngit add .\n\n# 3. Commit - hooks run automatically\ngit commit -m \"feat: add new feature\"\n\n# If hooks fail, they will show which checks failed\n# Fix the issues and try committing again\n```\n\n### Manual golangci-lint\n\nWhile golangci-lint runs automatically via prek, you can also run it manually:\n\n```bash\n# If you have upstream remote configured (recommended)\ngolangci-lint run --new-from-rev=upstream/main --timeout=5m --fix\n\n# If you only have origin remote\ngolangci-lint run --new-from-rev=main --timeout=5m --fix\n```\n\n**Important**: Always use `--new-from-rev` to only lint changed files. This prevents formatting the entire repository and keeps changes focused on your actual modifications.\n\n### Skipping Hooks (Not Recommended)\n\nIn rare cases where you need to skip hooks (e.g., work-in-progress commits), use:\n\n```bash\ngit commit --no-verify -m \"WIP: work in progress\"\n```\n\n**WARNING**: Only use `--no-verify` for temporary WIP commits on feature branches. All commits to main must pass all hooks.\n\n### Troubleshooting\n\n**Hook installation issues**:\n\n```bash\n# Check if hooks are installed\nls -la .git/hooks/pre-commit\n\n# Reinstall hooks\nprek install\n```\n\n**Hooks running slow**:\n\n```bash\n# prek uses optimized fast-path for built-in hooks\n# If running slow, check which hook is taking time with verbose output\nprek run -v\n```\n\n**Update hook configuration**:\n\n```bash\n# After modifying .pre-commit-config.yaml, hooks will automatically use new config\n# No reinstallation needed\n```\n\n## Project Structure & Architecture\n\n### Top-Level Organization\n\n```\nheadscale/\n├── cmd/                    # Command-line applications\n│   ├── headscale/         # Main headscale server binary\n│   └── hi/               # Headscale Integration test runner\n├── hscontrol/            # Core control plane logic\n├── integration/          # End-to-end Docker-based tests\n├── proto/               # Protocol buffer definitions\n├── gen/                 # Generated code (protobuf)\n├── docs/                # Documentation\n└── packaging/           # Distribution packaging\n```\n\n### Core Packages (`hscontrol/`)\n\n**Main Server (`hscontrol/`)**\n\n- `app.go`: Application setup, dependency injection, server lifecycle\n- `handlers.go`: HTTP/gRPC API endpoints for management operations\n- `grpcv1.go`: gRPC service implementation for headscale API\n- `poll.go`: **Critical** - Handles Tailscale MapRequest/MapResponse protocol\n- `noise.go`: Noise protocol implementation for secure client communication\n- `auth.go`: Authentication flows (web, OIDC, command-line)\n- `oidc.go`: OpenID Connect integration for user authentication\n\n**State Management (`hscontrol/state/`)**\n\n- `state.go`: Central coordinator for all subsystems (database, policy, IP allocation, DERP)\n- `node_store.go`: **Performance-critical** - In-memory cache with copy-on-write semantics\n- Thread-safe operations with deadlock detection\n- Coordinates between database persistence and real-time operations\n\n**Database Layer (`hscontrol/db/`)**\n\n- `db.go`: Database abstraction, GORM setup, migration management\n- `node.go`: Node lifecycle, registration, expiration, IP assignment\n- `users.go`: User management, namespace isolation\n- `api_key.go`: API authentication tokens\n- `preauth_keys.go`: Pre-authentication keys for automated node registration\n- `ip.go`: IP address allocation and management\n- `policy.go`: Policy storage and retrieval\n- Schema migrations in `schema.sql` with extensive test data coverage\n\n**CRITICAL DATABASE MIGRATION RULES**:\n\n1. **NEVER reorder existing migrations** - Migration order is immutable once committed\n2. **ONLY add new migrations to the END** of the migrations array\n3. **NEVER disable foreign keys** in new migrations - no new migrations should be added to `migrationsRequiringFKDisabled`\n4. **Migration ID format**: `YYYYMMDDHHSS-short-description` (timestamp + descriptive suffix)\n   - Example: `202511131500-add-user-roles`\n   - The timestamp must be chronologically ordered\n5. **New migrations go after the comment** \"As of 2025-07-02, no new IDs should be added here\"\n6. If you need to rename a column that other migrations depend on:\n   - Accept that the old column name will exist in intermediate migration states\n   - Update code to work with the new column name\n   - Let AutoMigrate create the new column if needed\n   - Do NOT try to rename columns that later migrations reference\n\n**Policy Engine (`hscontrol/policy/`)**\n\n- `policy.go`: Core ACL evaluation logic, HuJSON parsing\n- `v2/`: Next-generation policy system with improved filtering\n- `matcher/`: ACL rule matching and evaluation engine\n- Determines peer visibility, route approval, and network access rules\n- Supports both file-based and database-stored policies\n\n**Network Management (`hscontrol/`)**\n\n- `derp/`: DERP (Designated Encrypted Relay for Packets) server implementation\n  - NAT traversal when direct connections fail\n  - Fallback relay for firewall-restricted environments\n- `mapper/`: Converts internal Headscale state to Tailscale's wire protocol format\n  - `tail.go`: Tailscale-specific data structure generation\n- `routes/`: Subnet route management and primary route selection\n- `dns/`: DNS record management and MagicDNS implementation\n\n**Utilities & Support (`hscontrol/`)**\n\n- `types/`: Core data structures, configuration, validation\n- `util/`: Helper functions for networking, DNS, key management\n- `templates/`: Client configuration templates (Apple, Windows, etc.)\n- `notifier/`: Event notification system for real-time updates\n- `metrics.go`: Prometheus metrics collection\n- `capver/`: Tailscale capability version management\n\n### Key Subsystem Interactions\n\n**Node Registration Flow**\n\n1. **Client Connection**: `noise.go` handles secure protocol handshake\n2. **Authentication**: `auth.go` validates credentials (web/OIDC/preauth)\n3. **State Creation**: `state.go` coordinates IP allocation via `db/ip.go`\n4. **Storage**: `db/node.go` persists node, `NodeStore` caches in memory\n5. **Network Setup**: `mapper/` generates initial Tailscale network map\n\n**Ongoing Operations**\n\n1. **Poll Requests**: `poll.go` receives periodic client updates\n2. **State Updates**: `NodeStore` maintains real-time node information\n3. **Policy Application**: `policy/` evaluates ACL rules for peer relationships\n4. **Map Distribution**: `mapper/` sends network topology to all affected clients\n\n**Route Management**\n\n1. **Advertisement**: Clients announce routes via `poll.go` Hostinfo updates\n2. **Storage**: `db/` persists routes, `NodeStore` caches for performance\n3. **Approval**: `policy/` auto-approves routes based on ACL rules\n4. **Distribution**: `routes/` selects primary routes, `mapper/` distributes to peers\n\n### Command-Line Tools (`cmd/`)\n\n**Main Server (`cmd/headscale/`)**\n\n- `headscale.go`: CLI parsing, configuration loading, server startup\n- Supports daemon mode, CLI operations (user/node management), database operations\n\n**Integration Test Runner (`cmd/hi/`)**\n\n- `main.go`: Test execution framework with Docker orchestration\n- `run.go`: Individual test execution with artifact collection\n- `doctor.go`: System requirements validation\n- `docker.go`: Container lifecycle management\n- Essential for validating changes against real Tailscale clients\n\n### Generated & External Code\n\n**Protocol Buffers (`proto/` → `gen/`)**\n\n- Defines gRPC API for headscale management operations\n- Client libraries can generate from these definitions\n- Run `make generate` after modifying `.proto` files\n\n**Integration Testing (`integration/`)**\n\n- `scenario.go`: Docker test environment setup\n- `tailscale.go`: Tailscale client container management\n- Individual test files for specific functionality areas\n- Real end-to-end validation with network isolation\n\n### Critical Performance Paths\n\n**High-Frequency Operations**\n\n1. **MapRequest Processing** (`poll.go`): Every 15-60 seconds per client\n2. **NodeStore Reads** (`node_store.go`): Every operation requiring node data\n3. **Policy Evaluation** (`policy/`): On every peer relationship calculation\n4. **Route Lookups** (`routes/`): During network map generation\n\n**Database Write Patterns**\n\n- **Frequent**: Node heartbeats, endpoint updates, route changes\n- **Moderate**: User operations, policy updates, API key management\n- **Rare**: Schema migrations, bulk operations\n\n### Configuration & Deployment\n\n**Configuration** (`hscontrol/types/config.go`)\\*\\*\n\n- Database connection settings (SQLite/PostgreSQL)\n- Network configuration (IP ranges, DNS settings)\n- Policy mode (file vs database)\n- DERP relay configuration\n- OIDC provider settings\n\n**Key Dependencies**\n\n- **GORM**: Database ORM with migration support\n- **Tailscale Libraries**: Core networking and protocol code\n- **Zerolog**: Structured logging throughout the application\n- **Buf**: Protocol buffer toolchain for code generation\n\n### Development Workflow Integration\n\nThe architecture supports incremental development:\n\n- **Unit Tests**: Focus on individual packages (`*_test.go` files)\n- **Integration Tests**: Validate cross-component interactions\n- **Database Tests**: Extensive migration and data integrity validation\n- **Policy Tests**: ACL rule evaluation and edge cases\n- **Performance Tests**: NodeStore and high-frequency operation validation\n\n## Integration Testing System\n\n### Overview\n\nHeadscale uses Docker-based integration tests with real Tailscale clients to validate end-to-end functionality. The integration test system is complex and requires specialized knowledge for effective execution and debugging.\n\n### **MANDATORY: Use the headscale-integration-tester Agent**\n\n**CRITICAL REQUIREMENT**: For ANY integration test execution, analysis, troubleshooting, or validation, you MUST use the `headscale-integration-tester` agent. This agent contains specialized knowledge about:\n\n- Test execution strategies and timing requirements\n- Infrastructure vs code issue distinction (99% vs 1% failure patterns)\n- Security-critical debugging rules and forbidden practices\n- Comprehensive artifact analysis workflows\n- Real-world failure patterns from HA debugging experiences\n\n### Quick Reference Commands\n\n```bash\n# Check system requirements (always run first)\ngo run ./cmd/hi doctor\n\n# Run single test (recommended for development)\ngo run ./cmd/hi run \"TestName\"\n\n# Use PostgreSQL for database-heavy tests\ngo run ./cmd/hi run \"TestName\" --postgres\n\n# Pattern matching for related tests\ngo run ./cmd/hi run \"TestPattern*\"\n\n# Run multiple tests concurrently (each gets isolated run ID)\ngo run ./cmd/hi run \"TestPingAllByIP\" &\ngo run ./cmd/hi run \"TestACLAllowUserDst\" &\ngo run ./cmd/hi run \"TestOIDCAuthenticationPingAll\" &\n```\n\n**Concurrent Execution Support**:\n\nThe test runner supports running multiple tests concurrently on the same Docker daemon:\n\n- Each test run gets a **unique Run ID** (format: `YYYYMMDD-HHMMSS-{6-char-hash}`)\n- All containers are labeled with `hi.run-id` for isolation\n- Container names include the run ID for easy identification (e.g., `ts-{runID}-1-74-{hash}`)\n- Dynamic port allocation prevents port conflicts between concurrent runs\n- Cleanup only affects containers belonging to the specific run ID\n- Log directories are isolated per run: `control_logs/{runID}/`\n\n**Critical Notes**:\n\n- Tests generate ~100MB of logs per run in `control_logs/`\n- Running many tests concurrently may cause resource contention (CPU/memory)\n- Clean stale containers periodically: `docker system prune -f`\n\n### Test Artifacts Location\n\nAll test runs save comprehensive debugging artifacts to `control_logs/TIMESTAMP-ID/` including server logs, client logs, database dumps, MapResponse protocol data, and Prometheus metrics.\n\n**For all integration test work, use the headscale-integration-tester agent - it contains the complete knowledge needed for effective testing and debugging.**\n\n## NodeStore Implementation Details\n\n**Key Insight from Recent Work**: The NodeStore is a critical performance optimization that caches node data in memory while ensuring consistency with the database. When working with route advertisements or node state changes:\n\n1. **Timing Considerations**: Route advertisements need time to propagate from clients to server. Use `require.EventuallyWithT()` patterns in tests instead of immediate assertions.\n\n2. **Synchronization Points**: NodeStore updates happen at specific points like `poll.go:420` after Hostinfo changes. Ensure these are maintained when modifying the polling logic.\n\n3. **Peer Visibility**: The NodeStore's `peersFunc` determines which nodes are visible to each other. Policy-based filtering is separate from monitoring visibility - expired nodes should remain visible for debugging but marked as expired.\n\n## Testing Guidelines\n\n### Integration Test Patterns\n\n#### **CRITICAL: EventuallyWithT Pattern for External Calls**\n\n**All external calls in integration tests MUST be wrapped in EventuallyWithT blocks** to handle eventual consistency in distributed systems. External calls include:\n\n- `client.Status()` - Getting Tailscale client status\n- `client.Curl()` - Making HTTP requests through clients\n- `client.Traceroute()` - Running network diagnostics\n- `headscale.ListNodes()` - Querying headscale server state\n- Any other calls that interact with external systems or network operations\n\n**Key Rules**:\n\n1. **Never use bare `require.NoError(t, err)` with external calls** - Always wrap in EventuallyWithT\n2. **Keep related assertions together** - If multiple assertions depend on the same external call, keep them in the same EventuallyWithT block\n3. **Split unrelated external calls** - Different external calls should be in separate EventuallyWithT blocks\n4. **Never nest EventuallyWithT calls** - Each EventuallyWithT should be at the same level\n5. **Declare shared variables at function scope** - Variables used across multiple EventuallyWithT blocks must be declared before first use\n\n**Examples**:\n\n```go\n// CORRECT: External call wrapped in EventuallyWithT\nassert.EventuallyWithT(t, func(c *assert.CollectT) {\n    status, err := client.Status()\n    assert.NoError(c, err)\n\n    // Related assertions using the same status call\n    for _, peerKey := range status.Peers() {\n        peerStatus := status.Peer[peerKey]\n        assert.NotNil(c, peerStatus.PrimaryRoutes)\n        requirePeerSubnetRoutesWithCollect(c, peerStatus, expectedRoutes)\n    }\n}, 5*time.Second, 200*time.Millisecond, \"Verifying client status and routes\")\n\n// INCORRECT: Bare external call without EventuallyWithT\nstatus, err := client.Status()  // ❌ Will fail intermittently\nrequire.NoError(t, err)\n\n// CORRECT: Separate EventuallyWithT for different external calls\n// First external call - headscale.ListNodes()\nassert.EventuallyWithT(t, func(c *assert.CollectT) {\n    nodes, err := headscale.ListNodes()\n    assert.NoError(c, err)\n    assert.Len(c, nodes, 2)\n    requireNodeRouteCountWithCollect(c, nodes[0], 2, 2, 2)\n}, 10*time.Second, 500*time.Millisecond, \"route state changes should propagate to nodes\")\n\n// Second external call - client.Status()\nassert.EventuallyWithT(t, func(c *assert.CollectT) {\n    status, err := client.Status()\n    assert.NoError(c, err)\n\n    for _, peerKey := range status.Peers() {\n        peerStatus := status.Peer[peerKey]\n        requirePeerSubnetRoutesWithCollect(c, peerStatus, []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()})\n    }\n}, 10*time.Second, 500*time.Millisecond, \"routes should be visible to client\")\n\n// INCORRECT: Multiple unrelated external calls in same EventuallyWithT\nassert.EventuallyWithT(t, func(c *assert.CollectT) {\n    nodes, err := headscale.ListNodes()  // ❌ First external call\n    assert.NoError(c, err)\n\n    status, err := client.Status()  // ❌ Different external call - should be separate\n    assert.NoError(c, err)\n}, 10*time.Second, 500*time.Millisecond, \"mixed calls\")\n\n// CORRECT: Variable scoping for shared data\nvar (\n    srs1, srs2, srs3       *ipnstate.Status\n    clientStatus           *ipnstate.Status\n    srs1PeerStatus         *ipnstate.PeerStatus\n)\n\nassert.EventuallyWithT(t, func(c *assert.CollectT) {\n    srs1 = subRouter1.MustStatus()  // = not :=\n    srs2 = subRouter2.MustStatus()\n    clientStatus = client.MustStatus()\n\n    srs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey]\n    // assertions...\n}, 5*time.Second, 200*time.Millisecond, \"checking router status\")\n\n// CORRECT: Wrapping client operations\nassert.EventuallyWithT(t, func(c *assert.CollectT) {\n    result, err := client.Curl(weburl)\n    assert.NoError(c, err)\n    assert.Len(c, result, 13)\n}, 5*time.Second, 200*time.Millisecond, \"Verifying HTTP connectivity\")\n\nassert.EventuallyWithT(t, func(c *assert.CollectT) {\n    tr, err := client.Traceroute(webip)\n    assert.NoError(c, err)\n    assertTracerouteViaIPWithCollect(c, tr, expectedRouter.MustIPv4())\n}, 5*time.Second, 200*time.Millisecond, \"Verifying network path\")\n```\n\n**Helper Functions**:\n\n- Use `requirePeerSubnetRoutesWithCollect` instead of `requirePeerSubnetRoutes` inside EventuallyWithT\n- Use `requireNodeRouteCountWithCollect` instead of `requireNodeRouteCount` inside EventuallyWithT\n- Use `assertTracerouteViaIPWithCollect` instead of `assertTracerouteViaIP` inside EventuallyWithT\n\n```go\n// Node route checking by actual node properties, not array position\nvar routeNode *v1.Node\nfor _, node := range nodes {\n    if nodeIDStr := fmt.Sprintf(\"%d\", node.GetId()); expectedRoutes[nodeIDStr] != \"\" {\n        routeNode = node\n        break\n    }\n}\n```\n\n### Running Problematic Tests\n\n- Some tests require significant time (e.g., `TestNodeOnlineStatus` runs for 12 minutes)\n- Infrastructure issues like disk space can cause test failures unrelated to code changes\n- Use `--postgres` flag when testing database-heavy scenarios\n\n## Quality Assurance and Testing Requirements\n\n### **MANDATORY: Always Use Specialized Testing Agents**\n\n**CRITICAL REQUIREMENT**: For ANY task involving testing, quality assurance, review, or validation, you MUST use the appropriate specialized agent at the END of your task list. This ensures comprehensive quality validation and prevents regressions.\n\n**Required Agents for Different Task Types**:\n\n1. **Integration Testing**: Use `headscale-integration-tester` agent for:\n   - Running integration tests with `cmd/hi`\n   - Analyzing test failures and artifacts\n   - Troubleshooting Docker-based test infrastructure\n   - Validating end-to-end functionality changes\n\n2. **Quality Control**: Use `quality-control-enforcer` agent for:\n   - Code review and validation\n   - Ensuring best practices compliance\n   - Preventing common pitfalls and anti-patterns\n   - Validating architectural decisions\n\n**Agent Usage Pattern**: Always add the appropriate agent as the FINAL step in any task list to ensure quality validation occurs after all work is complete.\n\n### Integration Test Debugging Reference\n\nTest artifacts are preserved in `control_logs/TIMESTAMP-ID/` including:\n\n- Headscale server logs (stderr/stdout)\n- Tailscale client logs and status\n- Database dumps and network captures\n- MapResponse JSON files for protocol debugging\n\n**For integration test issues, ALWAYS use the headscale-integration-tester agent - do not attempt manual debugging.**\n\n## EventuallyWithT Pattern for Integration Tests\n\n### Overview\n\nEventuallyWithT is a testing pattern used to handle eventual consistency in distributed systems. In Headscale integration tests, many operations are asynchronous - clients advertise routes, the server processes them, updates propagate through the network. EventuallyWithT allows tests to wait for these operations to complete while making assertions.\n\n### External Calls That Must Be Wrapped\n\nThe following operations are **external calls** that interact with the headscale server or tailscale clients and MUST be wrapped in EventuallyWithT:\n\n- `headscale.ListNodes()` - Queries server state\n- `client.Status()` - Gets client network status\n- `client.Curl()` - Makes HTTP requests through the network\n- `client.Traceroute()` - Performs network diagnostics\n- `client.Execute()` when running commands that query state\n- Any operation that reads from the headscale server or tailscale client\n\n### Operations That Must NOT Be Wrapped\n\nThe following are **blocking operations** that modify state and should NOT be wrapped in EventuallyWithT:\n\n- `tailscale set` commands (e.g., `--advertise-routes`, `--exit-node`)\n- Any command that changes configuration or state\n- Use `client.MustStatus()` instead of `client.Status()` when you just need the ID for a blocking operation\n\n### Five Key Rules for EventuallyWithT\n\n1. **One External Call Per EventuallyWithT Block**\n   - Each EventuallyWithT should make ONE external call (e.g., ListNodes OR Status)\n   - Related assertions based on that single call can be grouped together\n   - Unrelated external calls must be in separate EventuallyWithT blocks\n\n2. **Variable Scoping**\n   - Declare variables that need to be shared across EventuallyWithT blocks at function scope\n   - Use `=` for assignment inside EventuallyWithT, not `:=` (unless the variable is only used within that block)\n   - Variables declared with `:=` inside EventuallyWithT are not accessible outside\n\n3. **No Nested EventuallyWithT**\n   - NEVER put an EventuallyWithT inside another EventuallyWithT\n   - This is a critical anti-pattern that must be avoided\n\n4. **Use CollectT for Assertions**\n   - Inside EventuallyWithT, use `assert` methods with the CollectT parameter\n   - Helper functions called within EventuallyWithT must accept `*assert.CollectT`\n\n5. **Descriptive Messages**\n   - Always provide a descriptive message as the last parameter\n   - Message should explain what condition is being waited for\n\n### Correct Pattern Examples\n\n```go\n// CORRECT: Blocking operation NOT wrapped\nfor _, client := range allClients {\n    status := client.MustStatus()\n    command := []string{\n        \"tailscale\",\n        \"set\",\n        \"--advertise-routes=\" + expectedRoutes[string(status.Self.ID)],\n    }\n    _, _, err = client.Execute(command)\n    require.NoErrorf(t, err, \"failed to advertise route: %s\", err)\n}\n\n// CORRECT: Single external call with related assertions\nvar nodes []*v1.Node\nassert.EventuallyWithT(t, func(c *assert.CollectT) {\n    nodes, err = headscale.ListNodes()\n    assert.NoError(c, err)\n    assert.Len(c, nodes, 2)\n    requireNodeRouteCountWithCollect(c, nodes[0], 2, 2, 2)\n}, 10*time.Second, 500*time.Millisecond, \"nodes should have expected route counts\")\n\n// CORRECT: Separate EventuallyWithT for different external call\nassert.EventuallyWithT(t, func(c *assert.CollectT) {\n    status, err := client.Status()\n    assert.NoError(c, err)\n    for _, peerKey := range status.Peers() {\n        peerStatus := status.Peer[peerKey]\n        requirePeerSubnetRoutesWithCollect(c, peerStatus, expectedPrefixes)\n    }\n}, 10*time.Second, 500*time.Millisecond, \"client should see expected routes\")\n```\n\n### Incorrect Patterns to Avoid\n\n```go\n// INCORRECT: Blocking operation wrapped in EventuallyWithT\nassert.EventuallyWithT(t, func(c *assert.CollectT) {\n    status, err := client.Status()\n    assert.NoError(c, err)\n\n    // This is a blocking operation - should NOT be in EventuallyWithT!\n    command := []string{\n        \"tailscale\",\n        \"set\",\n        \"--advertise-routes=\" + expectedRoutes[string(status.Self.ID)],\n    }\n    _, _, err = client.Execute(command)\n    assert.NoError(c, err)\n}, 5*time.Second, 200*time.Millisecond, \"wrong pattern\")\n\n// INCORRECT: Multiple unrelated external calls in same EventuallyWithT\nassert.EventuallyWithT(t, func(c *assert.CollectT) {\n    // First external call\n    nodes, err := headscale.ListNodes()\n    assert.NoError(c, err)\n    assert.Len(c, nodes, 2)\n\n    // Second unrelated external call - WRONG!\n    status, err := client.Status()\n    assert.NoError(c, err)\n    assert.NotNil(c, status)\n}, 10*time.Second, 500*time.Millisecond, \"mixed operations\")\n```\n\n## Tags-as-Identity Architecture\n\n### Overview\n\nHeadscale implements a **tags-as-identity** model where tags and user ownership are mutually exclusive ways to identify nodes. This is a fundamental architectural principle that affects node registration, ownership, ACL evaluation, and API behavior.\n\n### Core Principle: Tags XOR User Ownership\n\nEvery node in Headscale is **either** tagged **or** user-owned, never both:\n\n- **Tagged Nodes**: Ownership is defined by tags (e.g., `tag:server`, `tag:database`)\n  - Tags are set during registration via tagged PreAuthKey\n  - Tags are immutable after registration (cannot be changed via API)\n  - May have `UserID` set for \"created by\" tracking, but ownership is via tags\n  - Identified by: `node.IsTagged()` returns `true`\n\n- **User-Owned Nodes**: Ownership is defined by user assignment\n  - Registered via OIDC, web auth, or untagged PreAuthKey\n  - Node belongs to a specific user's namespace\n  - No tags (empty tags array)\n  - Identified by: `node.UserID().Valid() && !node.IsTagged()`\n\n### Critical Implementation Details\n\n#### Node Identification Methods\n\n```go\n// Primary methods for determining node ownership\nnode.IsTagged()      // Returns true if node has tags OR AuthKey.Tags\nnode.HasTag(tag)     // Returns true if node has specific tag\nnode.IsUserOwned()   // Returns true if UserID set AND not tagged\n\n// IMPORTANT: UserID can be set on tagged nodes for tracking!\n// Always use IsTagged() to determine actual ownership, not just UserID.Valid()\n```\n\n#### UserID Field Semantics\n\n**Critical distinction**: `UserID` has different meanings depending on node type:\n\n- **Tagged nodes**: `UserID` is optional \"created by\" tracking\n  - Indicates which user created the tagged PreAuthKey\n  - Does NOT define ownership (tags define ownership)\n  - Example: User \"alice\" creates tagged PreAuthKey with `tag:server`, node gets `UserID=alice.ID` + `Tags=[\"tag:server\"]`\n\n- **User-owned nodes**: `UserID` defines ownership\n  - Required field for non-tagged nodes\n  - Defines which user namespace the node belongs to\n  - Example: User \"bob\" registers via OIDC, node gets `UserID=bob.ID` + `Tags=[]`\n\n#### Mapper Behavior (mapper/tail.go)\n\nThe mapper converts internal nodes to Tailscale protocol format, handling the TaggedDevices special user:\n\n```go\n// From mapper/tail.go:102-116\nUser: func() tailcfg.UserID {\n    // IMPORTANT: Tags-as-identity model\n    // Tagged nodes ALWAYS use TaggedDevices user, even if UserID is set\n    if node.IsTagged() {\n        return tailcfg.UserID(int64(types.TaggedDevices.ID))\n    }\n    // User-owned nodes: use the actual user ID\n    return tailcfg.UserID(int64(node.UserID().Get()))\n}()\n```\n\n**TaggedDevices constant** (`types.TaggedDevices.ID = 2147455555`): Special user ID for all tagged nodes in MapResponse protocol.\n\n#### Registration Flow\n\n**Tagged Node Registration** (via tagged PreAuthKey):\n\n1. User creates PreAuthKey with tags: `pak.Tags = [\"tag:server\"]`\n2. Node registers with PreAuthKey\n3. Node gets: `Tags = [\"tag:server\"]`, `UserID = user.ID` (optional tracking), `AuthKeyID = pak.ID`\n4. `IsTagged()` returns `true` (ownership via tags)\n5. MapResponse sends `User = TaggedDevices.ID`\n\n**User-Owned Node Registration** (via OIDC/web/untagged PreAuthKey):\n\n1. User authenticates or uses untagged PreAuthKey\n2. Node registers\n3. Node gets: `Tags = []`, `UserID = user.ID` (required)\n4. `IsTagged()` returns `false` (ownership via user)\n5. MapResponse sends `User = user.ID`\n\n#### API Validation (SetTags)\n\nThe SetTags gRPC API enforces tags-as-identity rules:\n\n```go\n// From grpcv1.go:340-347\n// User-owned nodes are nodes with UserID that are NOT tagged\nisUserOwned := nodeView.UserID().Valid() && !nodeView.IsTagged()\nif isUserOwned && len(request.GetTags()) > 0 {\n    return error(\"cannot set tags on user-owned nodes\")\n}\n```\n\n**Key validation rules**:\n\n- ✅ Can call SetTags on tagged nodes (tags already define ownership)\n- ❌ Cannot set tags on user-owned nodes (would violate XOR rule)\n- ❌ Cannot remove all tags from tagged nodes (would orphan the node)\n\n#### Database Layer (db/node.go)\n\n**Tag storage**: Tags are stored in PostgreSQL ARRAY column and SQLite JSON column:\n\n```sql\n-- From schema.sql\ntags TEXT[] DEFAULT '{}' NOT NULL,  -- PostgreSQL\ntags TEXT DEFAULT '[]' NOT NULL,    -- SQLite (JSON array)\n```\n\n**Validation** (`state/tags.go`):\n\n- `validateNodeOwnership()`: Enforces tags XOR user rule\n- `validateAndNormalizeTags()`: Validates tag format (`tag:name`) and uniqueness\n\n#### Policy Layer\n\n**Tag Ownership** (policy/v2/policy.go):\n\n```go\nfunc NodeCanHaveTag(node types.NodeView, tag string) bool {\n    // Checks if node's IP is in the tagOwnerMap IP set\n    // This is IP-based authorization, not UserID-based\n    if ips, ok := pm.tagOwnerMap[Tag(tag)]; ok {\n        if slices.ContainsFunc(node.IPs(), ips.Contains) {\n            return true\n        }\n    }\n    return false\n}\n```\n\n**Important**: Tag authorization is based on IP ranges in ACL, not UserID. Tags define identity, ACL authorizes that identity.\n\n### Testing Tags-as-Identity\n\n**Unit Tests** (`hscontrol/types/node_tags_test.go`):\n\n- `TestNodeIsTagged`: Validates IsTagged() for various scenarios\n- `TestNodeOwnershipModel`: Tests tags XOR user ownership\n- `TestUserTypedID`: Helper method validation\n\n**API Tests** (`hscontrol/grpcv1_test.go`):\n\n- `TestSetTags_UserXORTags`: Validates rejection of setting tags on user-owned nodes\n- `TestSetTags_TaggedNode`: Validates that tagged nodes (even with UserID) are not rejected\n\n**Auth Tests** (`hscontrol/auth_test.go:890-928`):\n\n- Tests node registration with tagged PreAuthKey\n- Validates tags are applied during registration\n\n### Common Pitfalls\n\n1. **Don't check only `UserID.Valid()` to determine user ownership**\n   - ❌ Wrong: `if node.UserID().Valid() { /* user-owned */ }`\n   - ✅ Correct: `if node.UserID().Valid() && !node.IsTagged() { /* user-owned */ }`\n\n2. **Don't assume tagged nodes never have UserID set**\n   - Tagged nodes MAY have UserID for \"created by\" tracking\n   - Always use `IsTagged()` to determine ownership type\n\n3. **Don't allow setting tags on user-owned nodes**\n   - This violates the tags XOR user principle\n   - Use API validation to prevent this\n\n4. **Don't forget TaggedDevices in mapper**\n   - All tagged nodes MUST use `TaggedDevices.ID` in MapResponse\n   - User ID is only for actual user-owned nodes\n\n### Migration Considerations\n\nWhen nodes transition between ownership models:\n\n- **No automatic migration**: Tags-as-identity is set at registration and immutable\n- **Re-registration required**: To change from user-owned to tagged (or vice versa), node must be deleted and re-registered\n- **UserID persistence**: UserID on tagged nodes is informational and not cleared\n\n### Architecture Benefits\n\nThe tags-as-identity model provides:\n\n1. **Clear ownership semantics**: No ambiguity about who/what owns a node\n2. **ACL simplicity**: Tag-based access control without user conflicts\n3. **API safety**: Validation prevents invalid ownership states\n4. **Protocol compatibility**: TaggedDevices special user aligns with Tailscale's model\n\n## Logging Patterns\n\n### Incremental Log Event Building\n\nWhen building log statements with multiple fields, especially with conditional fields, use the **incremental log event pattern** instead of long single-line chains. This improves readability and allows conditional field addition.\n\n**Pattern:**\n\n```go\n// GOOD: Incremental building with conditional fields\nlogEvent := log.Debug().\n    Str(\"node\", node.Hostname).\n    Str(\"machine_key\", node.MachineKey.ShortString()).\n    Str(\"node_key\", node.NodeKey.ShortString())\n\nif node.User != nil {\n    logEvent = logEvent.Str(\"user\", node.User.Username())\n} else if node.UserID != nil {\n    logEvent = logEvent.Uint(\"user_id\", *node.UserID)\n} else {\n    logEvent = logEvent.Str(\"user\", \"none\")\n}\n\nlogEvent.Msg(\"Registering node\")\n```\n\n**Key rules:**\n\n1. **Assign chained calls back to the variable**: `logEvent = logEvent.Str(...)` - zerolog methods return a new event, so you must capture the return value\n2. **Use for conditional fields**: When fields depend on runtime conditions, build incrementally\n3. **Use for long log lines**: When a log line exceeds ~100 characters, split it for readability\n4. **Call `.Msg()` at the end**: The final `.Msg()` or `.Msgf()` sends the log event\n\n**Anti-pattern to avoid:**\n\n```go\n// BAD: Long single-line chains are hard to read and can't have conditional fields\nlog.Debug().Caller().Str(\"node\", node.Hostname).Str(\"machine_key\", node.MachineKey.ShortString()).Str(\"node_key\", node.NodeKey.ShortString()).Str(\"user\", node.User.Username()).Msg(\"Registering node\")\n\n// BAD: Forgetting to assign the return value (field is lost!)\nlogEvent := log.Debug().Str(\"node\", node.Hostname)\nlogEvent.Str(\"user\", username)  // This field is LOST - not assigned back\nlogEvent.Msg(\"message\")         // Only has \"node\" field\n```\n\n**When to use this pattern:**\n\n- Log statements with 4+ fields\n- Any log with conditional fields\n- Complex logging in loops or error handling\n- When you need to add context incrementally\n\n**Example from codebase** (`hscontrol/db/node.go`):\n\n```go\nlogEvent := log.Debug().\n    Str(\"node\", node.Hostname).\n    Str(\"machine_key\", node.MachineKey.ShortString()).\n    Str(\"node_key\", node.NodeKey.ShortString())\n\nif node.User != nil {\n    logEvent = logEvent.Str(\"user\", node.User.Username())\n} else if node.UserID != nil {\n    logEvent = logEvent.Uint(\"user_id\", *node.UserID)\n} else {\n    logEvent = logEvent.Str(\"user\", \"none\")\n}\n\nlogEvent.Msg(\"Registering test node\")\n```\n\n### Avoiding Log Helper Functions\n\nPrefer the incremental log event pattern over creating helper functions that return multiple logging closures. Helper functions like `logPollFunc` create unnecessary indirection and allocate closures.\n\n**Instead of:**\n\n```go\n// AVOID: Helper function returning closures\nfunc logPollFunc(req tailcfg.MapRequest, node *types.Node) (\n    func(string, ...any),  // warnf\n    func(string, ...any),  // infof\n    func(string, ...any),  // tracef\n    func(error, string, ...any),  // errf\n) {\n    return func(msg string, a ...any) {\n        log.Warn().\n            Caller().\n            Bool(\"omitPeers\", req.OmitPeers).\n            Bool(\"stream\", req.Stream).\n            Uint64(\"node.id\", node.ID.Uint64()).\n            Str(\"node.name\", node.Hostname).\n            Msgf(msg, a...)\n    },\n    // ... more closures\n}\n```\n\n**Prefer:**\n\n```go\n// BETTER: Build log events inline with shared context\nfunc (m *mapSession) logTrace(msg string) {\n    log.Trace().\n        Caller().\n        Bool(\"omitPeers\", m.req.OmitPeers).\n        Bool(\"stream\", m.req.Stream).\n        Uint64(\"node.id\", m.node.ID.Uint64()).\n        Str(\"node.name\", m.node.Hostname).\n        Msg(msg)\n}\n\n// Or use incremental building for complex cases\nlogEvent := log.Trace().\n    Caller().\n    Bool(\"omitPeers\", m.req.OmitPeers).\n    Bool(\"stream\", m.req.Stream).\n    Uint64(\"node.id\", m.node.ID.Uint64()).\n    Str(\"node.name\", m.node.Hostname)\n\nif additionalContext {\n    logEvent = logEvent.Str(\"extra\", value)\n}\n\nlogEvent.Msg(\"Operation completed\")\n```\n\n## Important Notes\n\n- **Dependencies**: Use `nix develop` for consistent toolchain (Go, buf, protobuf tools, linting)\n- **Protocol Buffers**: Changes to `proto/` require `make generate` and should be committed separately\n- **Code Style**: Enforced via golangci-lint with golines (width 88) and gofumpt formatting\n- **Linting**: ALL code must pass `golangci-lint run --new-from-rev=upstream/main --timeout=5m --fix` before commit\n- **Database**: Supports both SQLite (development) and PostgreSQL (production/testing)\n- **Integration Tests**: Require Docker and can consume significant disk space - use headscale-integration-tester agent\n- **Performance**: NodeStore optimizations are critical for scale - be careful with changes to state management\n- **Quality Assurance**: Always use appropriate specialized agents for testing and validation tasks\n- **Tags-as-Identity**: Tags and user ownership are mutually exclusive - always use `IsTagged()` to determine ownership\n"
  },
  {
    "path": "CHANGELOG.md",
    "content": "# CHANGELOG\n\n## 0.29.0 (202x-xx-xx)\n\n**Minimum supported Tailscale client version: v1.76.0**\n\n### Tailscale ACL compatibility improvements\n\nExtensive test cases were systematically generated using Tailscale clients and the official SaaS\nto understand how the packet filter should be generated. We discovered a few differences, but\noverall our implementation was very close.\n[#3036](https://github.com/juanfont/headscale/pull/3036)\n\n### SSH check action\n\nSSH rules with `\"action\": \"check\"` are now supported. When a client initiates a SSH connection to a node\nwith a `check` action policy, the user is prompted to authenticate via OIDC or CLI approval before access\nis granted.\n\nA new `headscale auth` CLI command group supports the approval flow:\n\n- `headscale auth approve --auth-id <id>` approves a pending authentication request (SSH check or web auth)\n- `headscale auth reject --auth-id <id>` rejects a pending authentication request\n- `headscale auth register --auth-id <id> --user <user>` registers a node (replaces deprecated `headscale nodes register`)\n\n[#1850](https://github.com/juanfont/headscale/pull/1850)\n\n### BREAKING\n\n- **ACL Policy**: Wildcard (`*`) in ACL sources and destinations now resolves to Tailscale's CGNAT range (`100.64.0.0/10`) and ULA range (`fd7a:115c:a1e0::/48`) instead of all IPs (`0.0.0.0/0` and `::/0`) [#3036](https://github.com/juanfont/headscale/pull/3036)\n  - This better matches Tailscale's security model where `*` means \"any node in the tailnet\" rather than \"any IP address\"\n  - Policies relying on wildcard to match non-Tailscale IPs will need to use explicit CIDR ranges instead\n  - **Note**: Users with non-standard IP ranges configured in `prefixes.ipv4` or `prefixes.ipv6` (which is unsupported and produces a warning) will need to explicitly specify their CIDR ranges in ACL rules instead of using `*`\n- **ACL Policy**: Validate autogroup:self source restrictions matching Tailscale behavior - tags, hosts, and IPs are rejected as sources for autogroup:self destinations [#3036](https://github.com/juanfont/headscale/pull/3036)\n  - Policies using tags, hosts, or IP addresses as sources for autogroup:self destinations will now fail validation\n- **Upgrade path**: Headscale now enforces a strict version upgrade path [#3083](https://github.com/juanfont/headscale/pull/3083)\n  - Skipping minor versions (e.g. 0.27 → 0.29) is blocked; upgrade one minor version at a time\n  - Downgrading to a previous minor version is blocked\n  - Patch version changes within the same minor are always allowed\n- **ACL Policy**: The `proto:icmp` protocol name now only includes ICMPv4 (protocol 1), matching Tailscale behavior [#3036](https://github.com/juanfont/headscale/pull/3036)\n  - Previously, `proto:icmp` included both ICMPv4 and ICMPv6\n  - Use `proto:ipv6-icmp` or protocol number `58` explicitly for ICMPv6\n- **CLI**: `headscale nodes register` is deprecated in favour of `headscale auth register --auth-id <id> --user <user>` [#1850](https://github.com/juanfont/headscale/pull/1850)\n  - The old command continues to work but will be removed in a future release\n\n### Changes\n\n- **SSH Policy**: Add support for `localpart:*@<domain>` in SSH rule `users` field, mapping each matching user's email local-part as their OS username [#3091](https://github.com/juanfont/headscale/pull/3091)\n- **ACL Policy**: Add ICMP and IPv6-ICMP protocols to default filter rules when no protocol is specified [#3036](https://github.com/juanfont/headscale/pull/3036)\n- **ACL Policy**: Fix autogroup:self handling for tagged nodes - tagged nodes no longer incorrectly receive autogroup:self filter rules [#3036](https://github.com/juanfont/headscale/pull/3036)\n- **ACL Policy**: Use CIDR format for autogroup:self destination IPs matching Tailscale behavior [#3036](https://github.com/juanfont/headscale/pull/3036)\n- **ACL Policy**: Merge filter rules with identical SrcIPs and IPProto matching Tailscale behavior - multiple ACL rules with the same source now produce a single FilterRule with combined DstPorts [#3036](https://github.com/juanfont/headscale/pull/3036)\n- Remove deprecated `--namespace` flag from `nodes list`, `nodes register`, and `debug create-node` commands (use `--user` instead) [#3093](https://github.com/juanfont/headscale/pull/3093)\n- Remove deprecated `namespace`/`ns` command aliases for `users` and `machine`/`machines` aliases for `nodes` [#3093](https://github.com/juanfont/headscale/pull/3093)\n- Add SSH `check` action support with OIDC and CLI-based approval flows [#1850](https://github.com/juanfont/headscale/pull/1850)\n- Add `headscale auth register`, `headscale auth approve`, and `headscale auth reject` CLI commands [#1850](https://github.com/juanfont/headscale/pull/1850)\n- Add `auth` related routes to the API. The `auth/register` endpoint now expects data as JSON [#1850](https://github.com/juanfont/headscale/pull/1850)\n- Deprecate `headscale nodes register --key` in favour of `headscale auth register --auth-id` [#1850](https://github.com/juanfont/headscale/pull/1850)\n- Generalise auth templates into reusable `AuthSuccess` and `AuthWeb` components [#1850](https://github.com/juanfont/headscale/pull/1850)\n- Unify auth pipeline with `AuthVerdict` type, supporting registration, reauthentication, and SSH checks [#1850](https://github.com/juanfont/headscale/pull/1850)\n\n## 0.28.0 (2026-02-04)\n\n**Minimum supported Tailscale client version: v1.74.0**\n\n### Tags as identity\n\nTags are now implemented following the Tailscale model where tags and user ownership are mutually exclusive. Devices can be either\nuser-owned (authenticated via web/OIDC) or tagged (authenticated via tagged PreAuthKeys). Tagged devices receive their identity from\ntags rather than users, making them suitable for servers and infrastructure. Applying a tag to a device removes user-based\nownership. See the [Tailscale tags documentation](https://tailscale.com/kb/1068/tags) for details on how tags work.\n\nUser-owned nodes can now request tags during registration using `--advertise-tags`. Tags are validated against the `tagOwners` policy\nand applied at registration time. Tags can be managed via the CLI or API after registration. Tagged nodes can return to user-owned\nby re-authenticating with `tailscale up --advertise-tags= --force-reauth`.\n\nA one-time migration will validate and migrate any `RequestTags` (stored in hostinfo) to the tags column. Tags are validated against\nyour policy's `tagOwners` rules during migration. [#3011](https://github.com/juanfont/headscale/pull/3011)\n\n### Smarter map updates\n\nThe map update system has been rewritten to send smaller, partial updates instead of full network maps whenever possible. This reduces bandwidth usage and improves performance, especially for large networks. The system now properly tracks peer\nchanges and can send removal notifications when nodes are removed due to policy changes.\n[#2856](https://github.com/juanfont/headscale/pull/2856) [#2961](https://github.com/juanfont/headscale/pull/2961)\n\n### Pre-authentication key security improvements\n\nPre-authentication keys now use bcrypt hashing for improved security [#2853](https://github.com/juanfont/headscale/pull/2853). Keys\nare stored as a prefix and bcrypt hash instead of plaintext. The full key is only displayed once at creation time. When listing keys,\nonly the prefix is shown (e.g., `hskey-auth-{prefix}-***`). All new keys use the format `hskey-auth-{prefix}-{secret}`. Legacy plaintext keys in the format `{secret}` will continue to work for backwards compatibility.\n\n### Web registration templates redesign\n\nThe OIDC callback and device registration web pages have been updated to use the Material for MkDocs design system from the official\ndocumentation. The templates now use consistent typography, spacing, and colours across all registration flows.\n\n### Database migration support removed for pre-0.25.0 databases\n\nHeadscale no longer supports direct upgrades from databases created before version 0.25.0. Users on older versions must upgrade\nsequentially through each stable release, selecting the latest patch version available for each minor release.\n\n### BREAKING\n\n- **API**: The Node message in the gRPC/REST API has been simplified - the `ForcedTags`, `InvalidTags`, and `ValidTags` fields have been removed and replaced with a single `Tags` field that contains the node's applied tags [#2993](https://github.com/juanfont/headscale/pull/2993)\n  - API clients should use the `Tags` field instead of `ValidTags`\n  - The `headscale nodes list` CLI command now always shows a Tags column and the `--tags` flag has been removed\n- **PreAuthKey CLI**: Commands now use ID-based operations instead of user+key combinations [#2992](https://github.com/juanfont/headscale/pull/2992)\n  - `headscale preauthkeys create` no longer requires `--user` flag (optional for tracking creation)\n  - `headscale preauthkeys list` lists all keys (no longer filtered by user)\n  - `headscale preauthkeys expire --id <ID>` replaces `--user <USER> <KEY>`\n  - `headscale preauthkeys delete --id <ID>` replaces `--user <USER> <KEY>`\n\n  **Before:**\n\n  ```bash\n  headscale preauthkeys create --user 1 --reusable --tags tag:server\n  headscale preauthkeys list --user 1\n  headscale preauthkeys expire --user 1 <KEY>\n  headscale preauthkeys delete --user 1 <KEY>\n  ```\n\n  **After:**\n\n  ```bash\n  headscale preauthkeys create --reusable --tags tag:server\n  headscale preauthkeys list\n  headscale preauthkeys expire --id 123\n  headscale preauthkeys delete --id 123\n  ```\n\n- **Tags**: The gRPC `SetTags` endpoint now allows converting user-owned nodes to tagged nodes by setting tags. [#2885](https://github.com/juanfont/headscale/pull/2885)\n- **Tags**: Tags are now resolved from the node's stored Tags field only [#2931](https://github.com/juanfont/headscale/pull/2931)\n  - `--advertise-tags` is processed during registration, not on every policy evaluation\n  - PreAuthKey tagged devices ignore `--advertise-tags` from clients\n  - User-owned nodes can use `--advertise-tags` if authorized by `tagOwners` policy\n  - Tags can be managed via CLI (`headscale nodes tag`) or the SetTags API after registration\n- Database migration support removed for pre-0.25.0 databases [#2883](https://github.com/juanfont/headscale/pull/2883)\n  - If you are running a version older than 0.25.0, you must upgrade to 0.25.1 first, then upgrade to this release\n  - See the [upgrade path documentation](https://headscale.net/stable/about/faq/#what-is-the-recommended-update-path-can-i-skip-multiple-versions-while-updating) for detailed guidance\n  - In version 0.29, all migrations before 0.28.0 will also be removed\n- Remove ability to move nodes between users [#2922](https://github.com/juanfont/headscale/pull/2922)\n  - The `headscale nodes move` CLI command has been removed\n  - The `MoveNode` API endpoint has been removed\n  - Nodes are permanently associated with their user or tag at registration time\n- Add `oidc.email_verified_required` config option to control email verification requirement [#2860](https://github.com/juanfont/headscale/pull/2860)\n  - When `true` (default), only verified emails can authenticate via OIDC in conjunction with `oidc.allowed_domains` or\n    `oidc.allowed_users`. Previous versions allowed to authenticate with an unverified email but did not store the email\n    address in the user profile. This is now rejected during authentication with an `unverified email` error.\n  - When `false`, unverified emails are allowed for OIDC authentication and the email address is stored in the user\n    profile regardless of its verification state.\n- **SSH Policy**: Wildcard (`*`) is no longer supported as an SSH destination [#3009](https://github.com/juanfont/headscale/issues/3009)\n  - Use `autogroup:member` for user-owned devices\n  - Use `autogroup:tagged` for tagged devices\n  - Use specific tags (e.g., `tag:server`) for targeted access\n\n  **Before:**\n\n  ```json\n  { \"action\": \"accept\", \"src\": [\"group:admins\"], \"dst\": [\"*\"], \"users\": [\"root\"] }\n  ```\n\n  **After:**\n\n  ```json\n  { \"action\": \"accept\", \"src\": [\"group:admins\"], \"dst\": [\"autogroup:member\", \"autogroup:tagged\"], \"users\": [\"root\"] }\n  ```\n\n- **SSH Policy**: SSH source/destination validation now enforces Tailscale's security model [#3010](https://github.com/juanfont/headscale/issues/3010)\n\n  Per [Tailscale SSH documentation](https://tailscale.com/kb/1193/tailscale-ssh), the following rules are now enforced:\n  1. **Tags cannot SSH to user-owned devices**: SSH rules with `tag:*` or `autogroup:tagged` as source cannot have username destinations (e.g., `alice@`) or `autogroup:member`/`autogroup:self` as destination\n  2. **Username destinations require same-user source**: If destination is a specific username (e.g., `alice@`), the source must be that exact same user only. Use `autogroup:self` for same-user SSH access instead\n\n  **Invalid policies now rejected at load time:**\n\n  ```json\n  // INVALID: tag source to user destination\n  {\"src\": [\"tag:server\"], \"dst\": [\"alice@\"], ...}\n\n  // INVALID: autogroup:tagged to autogroup:member\n  {\"src\": [\"autogroup:tagged\"], \"dst\": [\"autogroup:member\"], ...}\n\n  // INVALID: group to specific user (use autogroup:self instead)\n  {\"src\": [\"group:admins\"], \"dst\": [\"alice@\"], ...}\n  ```\n\n  **Valid patterns:**\n\n  ```json\n  // Users/groups can SSH to their own devices via autogroup:self\n  {\"src\": [\"group:admins\"], \"dst\": [\"autogroup:self\"], ...}\n\n  // Users/groups can SSH to tagged devices\n  {\"src\": [\"group:admins\"], \"dst\": [\"autogroup:tagged\"], ...}\n\n  // Tagged devices can SSH to other tagged devices\n  {\"src\": [\"autogroup:tagged\"], \"dst\": [\"autogroup:tagged\"], ...}\n\n  // Same user can SSH to their own devices\n  {\"src\": [\"alice@\"], \"dst\": [\"alice@\"], ...}\n  ```\n\n### Changes\n\n- Smarter change notifications send partial map updates and node removals instead of full maps [#2961](https://github.com/juanfont/headscale/pull/2961)\n  - Send lightweight endpoint and DERP region updates instead of full maps [#2856](https://github.com/juanfont/headscale/pull/2856)\n- Add NixOS module in repository for faster iteration [#2857](https://github.com/juanfont/headscale/pull/2857)\n- Add favicon to webpages [#2858](https://github.com/juanfont/headscale/pull/2858)\n- Redesign OIDC callback and registration web templates [#2832](https://github.com/juanfont/headscale/pull/2832)\n- Reclaim IPs from the IP allocator when nodes are deleted [#2831](https://github.com/juanfont/headscale/pull/2831)\n- Add bcrypt hashing for pre-authentication keys [#2853](https://github.com/juanfont/headscale/pull/2853)\n- Add prefix to API keys (`hskey-api-{prefix}-{secret}`) [#2853](https://github.com/juanfont/headscale/pull/2853)\n- Add prefix to registration keys for web authentication tracking (`hskey-reg-{random}`) [#2853](https://github.com/juanfont/headscale/pull/2853)\n- Tags can now be tagOwner of other tags [#2930](https://github.com/juanfont/headscale/pull/2930)\n- Add `taildrop.enabled` configuration option to enable/disable Taildrop file sharing [#2955](https://github.com/juanfont/headscale/pull/2955)\n- Allow disabling the metrics server by setting empty `metrics_listen_addr` [#2914](https://github.com/juanfont/headscale/pull/2914)\n- Log ACME/autocert errors for easier debugging [#2933](https://github.com/juanfont/headscale/pull/2933)\n- Improve CLI list output formatting [#2951](https://github.com/juanfont/headscale/pull/2951)\n- Use Debian 13 distroless base images for containers [#2944](https://github.com/juanfont/headscale/pull/2944)\n- Fix ACL policy not applied to new OIDC nodes until client restart [#2890](https://github.com/juanfont/headscale/pull/2890)\n- Fix autogroup:self preventing visibility of nodes matched by other ACL rules [#2882](https://github.com/juanfont/headscale/pull/2882)\n- Fix nodes being rejected after pre-authentication key expiration [#2917](https://github.com/juanfont/headscale/pull/2917)\n- Fix list-routes command respecting identifier filter with JSON output [#2927](https://github.com/juanfont/headscale/pull/2927)\n- Add `--id` flag to expire/delete commands as alternative to `--prefix` for API Keys [#3016](https://github.com/juanfont/headscale/pull/3016)\n\n## 0.27.1 (2025-11-11)\n\n**Minimum supported Tailscale client version: v1.64.0**\n\n### Changes\n\n- Expire nodes with a custom timestamp [#2828](https://github.com/juanfont/headscale/pull/2828)\n- Fix issue where node expiry was reset when tailscaled restarts [#2875](https://github.com/juanfont/headscale/pull/2875)\n- Fix OIDC authentication when multiple login URLs are opened [#2861](https://github.com/juanfont/headscale/pull/2861)\n- Fix node re-registration failing with expired auth keys [#2859](https://github.com/juanfont/headscale/pull/2859)\n- Remove old unused database tables and indices [#2844](https://github.com/juanfont/headscale/pull/2844) [#2872](https://github.com/juanfont/headscale/pull/2872)\n- Ignore litestream tables during database validation [#2843](https://github.com/juanfont/headscale/pull/2843)\n- Fix exit node visibility to respect ACL rules [#2855](https://github.com/juanfont/headscale/pull/2855)\n- Fix SSH policy becoming empty when unknown user is referenced [#2874](https://github.com/juanfont/headscale/pull/2874)\n- Fix policy validation when using bypass-grpc mode [#2854](https://github.com/juanfont/headscale/pull/2854)\n- Fix autogroup:self interaction with other ACL rules [#2842](https://github.com/juanfont/headscale/pull/2842)\n- Fix flaky DERP map shuffle test [#2848](https://github.com/juanfont/headscale/pull/2848)\n- Use current stable base images for Debian and Alpine containers [#2827](https://github.com/juanfont/headscale/pull/2827)\n\n## 0.27.0 (2025-10-27)\n\n**Minimum supported Tailscale client version: v1.64.0**\n\n### Database integrity improvements\n\nThis release includes a significant database migration that addresses\nlongstanding issues with the database schema and data integrity that has\naccumulated over the years. The migration introduces a `schema.sql` file as the\nsource of truth for the expected database schema to ensure new migrations that\nwill cause divergence does not occur again.\n\nThese issues arose from a combination of factors discovered over time: SQLite\nforeign keys not being enforced for many early versions, all migrations being\nrun in one large function until version 0.23.0, and inconsistent use of GORM's\nAutoMigrate feature. Moving forward, all new migrations will be explicit SQL\noperations rather than relying on GORM AutoMigrate, and foreign keys will be\nenforced throughout the migration process.\n\nWe are only improving SQLite databases with this change - PostgreSQL databases\nare not affected.\n\nPlease read the\n[PR description](https://github.com/juanfont/headscale/pull/2617) for more\ntechnical details about the issues and solutions.\n\n**SQLite Database Backup Example:**\n\n```bash\n# Stop headscale\nsystemctl stop headscale\n\n# Backup sqlite database\ncp /var/lib/headscale/db.sqlite /var/lib/headscale/db.sqlite.backup\n\n# Backup sqlite WAL/SHM files (if they exist)\ncp /var/lib/headscale/db.sqlite-wal /var/lib/headscale/db.sqlite-wal.backup\ncp /var/lib/headscale/db.sqlite-shm /var/lib/headscale/db.sqlite-shm.backup\n\n# Start headscale (migration will run automatically)\nsystemctl start headscale\n```\n\n### DERPMap update frequency\n\nThe default DERPMap update frequency has been changed from 24 hours to 3 hours.\nIf you set the `derp.update_frequency` configuration option, it is recommended\nto change it to `3h` to ensure that the headscale instance gets the latest\nDERPMap updates when upstream is changed.\n\n### Autogroups\n\nThis release adds support for the three missing autogroups: `self`\n(experimental), `member`, and `tagged`. Please refer to the\n[documentation](https://tailscale.com/kb/1018/autogroups/) for a detailed\nexplanation.\n\n`autogroup:self` is marked as experimental and should be used with caution, but\nwe need help testing it. Experimental here means two things; first, generating\nthe packet filter from policies that use `autogroup:self` is very expensive, and\nit might perform, or straight up not work on Headscale installations with a\nlarge number of nodes. Second, the implementation might have bugs or edge cases\nwe are not aware of, meaning that nodes or users might gain _more_ access than\nexpected. Please report bugs.\n\n### Node store (in memory database)\n\nUnder the hood, we have added a new datastructure to store nodes in memory. This\ndatastructure is called `NodeStore` and aims to reduce the reading and writing\nof nodes to the database layer. We have not benchmarked it, but expect it to\nimprove performance for read heavy workloads. We think of it as, \"worst case\" we\nhave moved the bottle neck somewhere else, and \"best case\" we should see a good\nimprovement in compute resource usage at the expense of memory usage. We are\nquite excited for this change and think it will make it easier for us to improve\nthe code base over time and make it more correct and efficient.\n\n### BREAKING\n\n- Remove support for 32-bit binaries [#2692](https://github.com/juanfont/headscale/pull/2692)\n- Policy: Zero or empty destination port is no longer allowed [#2606](https://github.com/juanfont/headscale/pull/2606)\n- Stricter hostname validation [#2383](https://github.com/juanfont/headscale/pull/2383)\n  - Hostnames must be valid DNS labels (2-63 characters, alphanumeric and\n    hyphens only, cannot start/end with hyphen)\n  - **Client Registration (New Nodes)**: Invalid hostnames are automatically\n    renamed to `invalid-XXXXXX` format\n    - `my-laptop` → accepted as-is\n    - `My-Laptop` → `my-laptop` (lowercased)\n    - `my_laptop` → `invalid-a1b2c3` (underscore not allowed)\n    - `test@host` → `invalid-d4e5f6` (@ not allowed)\n    - `laptop-🚀` → `invalid-j1k2l3` (emoji not allowed)\n  - **Hostinfo Updates / CLI**: Invalid hostnames are rejected with an error\n    - Valid names are accepted or lowercased\n    - Names with invalid characters, too short (<2), too long (>63), or\n      starting/ending with hyphen are rejected\n\n### Changes\n\n- **Database schema migration improvements for SQLite** [#2617](https://github.com/juanfont/headscale/pull/2617)\n  - **IMPORTANT: Backup your SQLite database before upgrading**\n  - Introduces safer table renaming migration strategy\n  - Addresses longstanding database integrity issues\n- Add flag to directly manipulate the policy in the database [#2765](https://github.com/juanfont/headscale/pull/2765)\n- DERPmap update frequency default changed from 24h to 3h [#2741](https://github.com/juanfont/headscale/pull/2741)\n- DERPmap update mechanism has been improved with retry, and is now failing\n  conservatively, preserving the old map upon failure.\n  [#2741](https://github.com/juanfont/headscale/pull/2741)\n- Add support for `autogroup:member`, `autogroup:tagged` [#2572](https://github.com/juanfont/headscale/pull/2572)\n- Fix bug where return routes were being removed by policy [#2767](https://github.com/juanfont/headscale/pull/2767)\n- Remove policy v1 code [#2600](https://github.com/juanfont/headscale/pull/2600)\n- Refactor Debian/Ubuntu packaging and drop support for Ubuntu 20.04. [#2614](https://github.com/juanfont/headscale/pull/2614)\n- Remove redundant check regarding `noise` config [#2658](https://github.com/juanfont/headscale/pull/2658)\n- Refactor OpenID Connect documentation [#2625](https://github.com/juanfont/headscale/pull/2625)\n- Don't crash if config file is missing [#2656](https://github.com/juanfont/headscale/pull/2656)\n- Adds `/robots.txt` endpoint to avoid crawlers [#2643](https://github.com/juanfont/headscale/pull/2643)\n- OIDC: Use group claim from UserInfo [#2663](https://github.com/juanfont/headscale/pull/2663)\n- OIDC: Update user with claims from UserInfo _before_ comparing with allowed\n  groups, email and domain\n  [#2663](https://github.com/juanfont/headscale/pull/2663)\n- Policy will now reject invalid fields, making it easier to spot spelling\n  errors [#2764](https://github.com/juanfont/headscale/pull/2764)\n- Add FAQ entry on how to recover from an invalid policy in the database [#2776](https://github.com/juanfont/headscale/pull/2776)\n- EXPERIMENTAL: Add support for `autogroup:self` [#2789](https://github.com/juanfont/headscale/pull/2789)\n- Add healthcheck command [#2659](https://github.com/juanfont/headscale/pull/2659)\n\n## 0.26.1 (2025-06-06)\n\n### Changes\n\n- Ensure nodes are matching both node key and machine key when connecting. [#2642](https://github.com/juanfont/headscale/pull/2642)\n\n## 0.26.0 (2025-05-14)\n\n### BREAKING\n\n#### Routes\n\nRoute internals have been rewritten, removing the dedicated route table in the\ndatabase. This was done to simplify the codebase, which had grown unnecessarily\ncomplex after the routes were split into separate tables. The overhead of having\nto go via the database and keeping the state in sync made the code very hard to\nreason about and prone to errors. The majority of the route state is only\nrelevant when headscale is running, and is now only kept in memory. As part of\nthis, the CLI and API has been simplified to reflect the changes;\n\n```console\n$ headscale nodes list-routes\nID | Hostname           | Approved | Available       | Serving (Primary)\n1  | ts-head-ruqsg8     |          | 0.0.0.0/0, ::/0 |\n2  | ts-unstable-fq7ob4 |          | 0.0.0.0/0, ::/0 |\n\n$ headscale nodes approve-routes --identifier 1 --routes 0.0.0.0/0,::/0\nNode updated\n\n$ headscale nodes list-routes\nID | Hostname           | Approved        | Available       | Serving (Primary)\n1  | ts-head-ruqsg8     | 0.0.0.0/0, ::/0 | 0.0.0.0/0, ::/0 | 0.0.0.0/0, ::/0\n2  | ts-unstable-fq7ob4 |                 | 0.0.0.0/0, ::/0 |\n```\n\nNote that if an exit route is approved (0.0.0.0/0 or ::/0), both IPv4 and IPv6\nwill be approved.\n\n- Route API and CLI has been removed [#2422](https://github.com/juanfont/headscale/pull/2422)\n- Routes are now managed via the Node API [#2422](https://github.com/juanfont/headscale/pull/2422)\n- Only routes accessible to the node will be sent to the node [#2561](https://github.com/juanfont/headscale/pull/2561)\n\n#### Policy v2\n\nThis release introduces a new policy implementation. The new policy is a\ncomplete rewrite, and it introduces some significant quality and consistency\nimprovements. In principle, there are not really any new features, but some long\nstanding bugs should have been resolved, or be easier to fix in the future. The\nnew policy code passes all of our tests.\n\n**Changes**\n\n- The policy is validated and \"resolved\" when loading, providing errors for\n  invalid rules and conditions.\n  - Previously this was done as a mix between load and runtime (when it was\n    applied to a node).\n  - This means that when you convert the first time, what was previously a\n    policy that loaded, but failed at runtime, will now fail at load time.\n- Error messages should be more descriptive and informative.\n  - There is still work to be here, but it is already improved with \"typing\"\n    (e.g. only Users can be put in Groups)\n- All users in the policy must contain an `@` character.\n  - If your user naturally contains and `@`, like an email, this will just work.\n  - If its based on usernames, or other identifiers not containing an `@`, an\n    `@` should be appended at the end. For example, if your user is `john`, it\n    must be written as `john@` in the policy.\n\n<details>\n\n<summary>Migration notes when the policy is stored in the database.</summary>\n\nThis section **only** applies if the policy is stored in the database and\nHeadscale 0.26 doesn't start due to a policy error\n(`failed to load ACL policy`).\n\n- Start Headscale 0.26 with the environment variable `HEADSCALE_POLICY_V1=1`\n  set. You can check that Headscale picked up the environment variable by\n  observing this message during startup: `Using policy manager version: 1`\n- Dump the policy to a file: `headscale policy get > policy.json`\n- Edit `policy.json` and migrate to policy V2. Use the command\n  `headscale policy check --file policy.json` to check for policy errors.\n- Load the modified policy: `headscale policy set --file policy.json`\n- Restart Headscale **without** the environment variable `HEADSCALE_POLICY_V1`.\n  Headscale should now print the message `Using policy manager version: 2` and\n  startup successfully.\n\n</details>\n\n**SSH**\n\nThe SSH policy has been reworked to be more consistent with the rest of the\npolicy. In addition, several inconsistencies between our implementation and\nTailscale's upstream has been closed and this might be a breaking change for\nsome users. Please refer to the\n[upstream documentation](https://tailscale.com/kb/1337/acl-syntax#tailscale-ssh)\nfor more information on which types are allowed in `src`, `dst` and `users`.\n\nThere is one large inconsistency left, we allow `*` as a destination as we\ncurrently do not support `autogroup:self`, `autogroup:member` and\n`autogroup:tagged`. The support for `*` will be removed when we have support for\nthe autogroups.\n\n**Current state**\n\nThe new policy is passing all tests, both integration and unit tests. This does\nnot mean it is perfect, but it is a good start. Corner cases that is currently\nworking in v1 and not tested might be broken in v2 (and vice versa).\n\n**We do need help testing this code**\n\n#### Other breaking changes\n\n- Disallow `server_url` and `base_domain` to be equal [#2544](https://github.com/juanfont/headscale/pull/2544)\n- Return full user in API for pre auth keys instead of string [#2542](https://github.com/juanfont/headscale/pull/2542)\n- Pre auth key API/CLI now uses ID over username [#2542](https://github.com/juanfont/headscale/pull/2542)\n- A non-empty list of global nameservers needs to be specified via\n  `dns.nameservers.global` if the configuration option `dns.override_local_dns`\n  is enabled or is not specified in the configuration file. This aligns with\n  behaviour of tailscale.com.\n  [#2438](https://github.com/juanfont/headscale/pull/2438)\n\n### Changes\n\n- Use Go 1.24 [#2427](https://github.com/juanfont/headscale/pull/2427)\n- Add `headscale policy check` command to check policy [#2553](https://github.com/juanfont/headscale/pull/2553)\n- `oidc.map_legacy_users` and `oidc.strip_email_domain` has been removed [#2411](https://github.com/juanfont/headscale/pull/2411)\n- Add more information to `/debug` endpoint [#2420](https://github.com/juanfont/headscale/pull/2420)\n  - It is now possible to inspect running goroutines and take profiles\n  - View of config, policy, filter, ssh policy per node, connected nodes and\n    DERPmap\n- OIDC: Fetch UserInfo to get EmailVerified if necessary [#2493](https://github.com/juanfont/headscale/pull/2493)\n  - If a OIDC provider doesn't include the `email_verified` claim in its ID\n    tokens, Headscale will attempt to get it from the UserInfo endpoint.\n- OIDC: Try to populate name, email and username from UserInfo [#2545](https://github.com/juanfont/headscale/pull/2545)\n- Improve performance by only querying relevant nodes from the database for node\n  updates [#2509](https://github.com/juanfont/headscale/pull/2509)\n- node FQDNs in the netmap will now contain a dot (\".\") at the end. This aligns\n  with behaviour of tailscale.com\n  [#2503](https://github.com/juanfont/headscale/pull/2503)\n- Restore support for \"Override local DNS\" [#2438](https://github.com/juanfont/headscale/pull/2438)\n- Add documentation for routes [#2496](https://github.com/juanfont/headscale/pull/2496)\n\n## 0.25.1 (2025-02-25)\n\n### Changes\n\n- Fix issue where registration errors are sent correctly [#2435](https://github.com/juanfont/headscale/pull/2435)\n- Fix issue where routes passed on registration were not saved [#2444](https://github.com/juanfont/headscale/pull/2444)\n- Fix issue where registration page was displayed twice [#2445](https://github.com/juanfont/headscale/pull/2445)\n\n## 0.25.0 (2025-02-11)\n\n### BREAKING\n\n- Authentication flow has been rewritten [#2374](https://github.com/juanfont/headscale/pull/2374) This change should be\n  transparent to users with the exception of some buxfixes that has been\n  discovered and was fixed as part of the rewrite.\n  - When a node is registered with _a new user_, it will be registered as a new\n    node ([#2327](https://github.com/juanfont/headscale/issues/2327) and\n    [#1310](https://github.com/juanfont/headscale/issues/1310)).\n  - A logged out node logging in with the same user will replace the existing\n    node.\n- Remove support for Tailscale clients older than 1.62 (Capability version 87) [#2405](https://github.com/juanfont/headscale/pull/2405)\n\n### Changes\n\n- `oidc.map_legacy_users` is now `false` by default [#2350](https://github.com/juanfont/headscale/pull/2350)\n- Print Tailscale version instead of capability versions for outdated nodes [#2391](https://github.com/juanfont/headscale/pull/2391)\n- Do not allow renaming of users from OIDC [#2393](https://github.com/juanfont/headscale/pull/2393)\n- Change minimum hostname length to 2 [#2393](https://github.com/juanfont/headscale/pull/2393)\n- Fix migration error caused by nodes having invalid auth keys [#2412](https://github.com/juanfont/headscale/pull/2412)\n- Pre auth keys belonging to a user are no longer deleted with the user [#2396](https://github.com/juanfont/headscale/pull/2396)\n- Pre auth keys that are used by a node can no longer be deleted [#2396](https://github.com/juanfont/headscale/pull/2396)\n- Rehaul HTTP errors, return better status code and errors to users [#2398](https://github.com/juanfont/headscale/pull/2398)\n- Print headscale version and commit on server startup [#2415](https://github.com/juanfont/headscale/pull/2415)\n\n## 0.24.3 (2025-02-07)\n\n### Changes\n\n- Fix migration error caused by nodes having invalid auth keys [#2412](https://github.com/juanfont/headscale/pull/2412)\n- Pre auth keys belonging to a user are no longer deleted with the user [#2396](https://github.com/juanfont/headscale/pull/2396)\n- Pre auth keys that are used by a node can no longer be deleted [#2396](https://github.com/juanfont/headscale/pull/2396)\n\n## 0.24.2 (2025-01-30)\n\n### Changes\n\n- Fix issue where email and username being equal fails to match in Policy [#2388](https://github.com/juanfont/headscale/pull/2388)\n- Delete invalid routes before adding a NOT NULL constraint on node_id [#2386](https://github.com/juanfont/headscale/pull/2386)\n\n## 0.24.1 (2025-01-23)\n\n### Changes\n\n- Fix migration issue with user table for PostgreSQL [#2367](https://github.com/juanfont/headscale/pull/2367)\n- Relax username validation to allow emails [#2364](https://github.com/juanfont/headscale/pull/2364)\n- Remove invalid routes and add stronger constraints for routes to avoid API\n  panic [#2371](https://github.com/juanfont/headscale/pull/2371)\n- Fix panic when `derp.update_frequency` is 0 [#2368](https://github.com/juanfont/headscale/pull/2368)\n\n## 0.24.0 (2025-01-17)\n\n### Security fix: OIDC changes in Headscale 0.24.0\n\nThe following issue _only_ affects Headscale installations which authenticate\nwith OIDC.\n\n_Headscale v0.23.0 and earlier_ identified OIDC users by the \"username\" part of\ntheir email address (when `strip_email_domain: true`, the default) or whole\nemail address (when `strip_email_domain: false`).\n\nDepending on how Headscale and your Identity Provider (IdP) were configured,\nonly using the `email` claim could allow a malicious user with an IdP account to\ntake over another Headscale user's account, even when\n`strip_email_domain: false`.\n\nThis would also cause a user to lose access to their Headscale account if they\nchanged their email address.\n\n_Headscale v0.24.0_ now identifies OIDC users by the `iss` and `sub` claims.\n[These are guaranteed by the OIDC specification to be stable and unique](https://openid.net/specs/openid-connect-core-1_0.html#ClaimStability),\neven if a user changes email address. A well-designed IdP will typically set\n`sub` to an opaque identifier like a UUID or numeric ID, which has no relation\nto the user's name or email address.\n\nHeadscale v0.24.0 and later will also automatically update profile fields with\nOIDC data on login. This means that users can change those details in your IdP,\nand have it populate to Headscale automatically the next time they log in.\nHowever, this may affect the way you reference users in policies.\n\nHeadscale v0.23.0 and earlier never recorded the `iss` and `sub` fields, so all\nlegacy (existing) OIDC accounts _need to be migrated_ to be properly secured.\n\n#### What do I need to do to migrate?\n\nHeadscale v0.24.0 has an automatic migration feature, which is enabled by\ndefault (`map_legacy_users: true`). **This will be disabled by default in a\nfuture version of Headscale – any unmigrated users will get new accounts.**\n\nThe migration will mostly be done automatically, with one exception. If your\nOIDC does not provide an `email_verified` claim, Headscale will ignore the\n`email`. This means that either the administrator will have to mark the user\nemails as verified, or ensure the users verify their emails. Any unverified\nemails will be ignored, meaning that the users will get new accounts instead of\nbeing migrated.\n\nAfter this exception is ensured, make all users log into Headscale with their\naccount, and Headscale will automatically update the account record. This will\nbe transparent to the users.\n\nWhen all users have logged in, you can disable the automatic migration by\nsetting `map_legacy_users: false` in your configuration file.\n\nPlease note that `map_legacy_users` will be set to `false` by default in v0.25.0\nand the migration mechanism will be removed in v0.26.0.\n\n<details>\n\n<summary>What does automatic migration do?</summary>\n\n##### What does automatic migration do?\n\nWhen automatic migration is enabled (`map_legacy_users: true`), Headscale will\nfirst match an OIDC account to a Headscale account by `iss` and `sub`, and then\nfall back to matching OIDC users similarly to how Headscale v0.23.0 did:\n\n- If `strip_email_domain: true` (the default): the Headscale username matches\n  the \"username\" part of their email address.\n- If `strip_email_domain: false`: the Headscale username matches the _whole_\n  email address.\n\nOn migration, Headscale will change the account's username to their\n`preferred_username`. **This could break any ACLs or policies which are\nconfigured to match by username.**\n\nLike with Headscale v0.23.0 and earlier, this migration only works for users who\nhaven't changed their email address since their last Headscale login.\n\nA _successful_ automated migration should otherwise be transparent to users.\n\nOnce a Headscale account has been migrated, it will be _unavailable_ to be\nmatched by the legacy process. An OIDC login with a matching username, but\n_non-matching_ `iss` and `sub` will instead get a _new_ Headscale account.\n\nBecause of the way OIDC works, Headscale's automated migration process can\n_only_ work when a user tries to log in after the update.\n\nLegacy account migration should have no effect on new installations where all\nusers have a recorded `sub` and `iss`.\n\n</details>\n\n<details>\n\n<summary>What happens when automatic migration is disabled?</summary>\n\n##### What happens when automatic migration is disabled?\n\nWhen automatic migration is disabled (`map_legacy_users: false`), Headscale will\nonly try to match an OIDC account to a Headscale account by `iss` and `sub`.\n\nIf there is no match, it will get a _new_ Headscale account – even if there was\na legacy account which _could_ have matched and migrated.\n\nWe recommend new Headscale users explicitly disable automatic migration – but it\nshould otherwise have no effect if every account has a recorded `iss` and `sub`.\n\nWhen automatic migration is disabled, the `strip_email_domain` setting will have\nno effect.\n\n</details>\n\nSpecial thanks to @micolous for reviewing, proposing and working with us on\nthese changes.\n\n#### Other OIDC changes\n\nHeadscale now uses\n[the standard OIDC claims](https://openid.net/specs/openid-connect-core-1_0.html#StandardClaims)\nto populate and update user information every time they log in:\n\n| Headscale profile field | OIDC claim           | Notes / examples                                                                                          |\n| ----------------------- | -------------------- | --------------------------------------------------------------------------------------------------------- |\n| email address           | `email`              | Only used when `\"email_verified\": true`                                                                   |\n| display name            | `name`               | eg: `Sam Smith`                                                                                           |\n| username                | `preferred_username` | Varies depending on IdP and configuration, eg: `ssmith`, `ssmith@idp.example.com`, `\\\\example.com\\ssmith` |\n| profile picture         | `picture`            | URL to a profile picture or avatar                                                                        |\n\nThese should show up nicely in the Tailscale client.\n\nThis will also affect the way you\n[reference users in policies](https://github.com/juanfont/headscale/pull/2205).\n\n### BREAKING\n\n- Remove `dns.use_username_in_magic_dns` configuration option [#2020](https://github.com/juanfont/headscale/pull/2020),\n  [#2279](https://github.com/juanfont/headscale/pull/2279)\n  - Having usernames in magic DNS is no longer possible.\n- Remove versions older than 1.56 [#2149](https://github.com/juanfont/headscale/pull/2149)\n  - Clean up old code required by old versions\n- User gRPC/API [#2261](https://github.com/juanfont/headscale/pull/2261):\n  - If you depend on a Headscale Web UI, you should wait with this update until\n    the UI have been updated to match the new API.\n  - `GET /api/v1/user/{name}` and `GetUser` have been removed in favour of\n    `ListUsers` with an ID parameter\n  - `RenameUser` and `DeleteUser` now require an ID instead of a name.\n\n### Changes\n\n- Improved compatibility of built-in DERP server with clients connecting over\n  WebSocket [#2132](https://github.com/juanfont/headscale/pull/2132)\n- Allow nodes to use SSH agent forwarding [#2145](https://github.com/juanfont/headscale/pull/2145)\n- Fixed processing of fields in post request in MoveNode rpc [#2179](https://github.com/juanfont/headscale/pull/2179)\n- Added conversion of 'Hostname' to 'givenName' in a node with FQDN rules\n  applied [#2198](https://github.com/juanfont/headscale/pull/2198)\n- Fixed updating of hostname and givenName when it is updated in HostInfo [#2199](https://github.com/juanfont/headscale/pull/2199)\n- Fixed missing `stable-debug` container tag [#2232](https://github.com/juanfont/headscale/pull/2232)\n- Loosened up `server_url` and `base_domain` check. It was overly strict in some\n  cases. [#2248](https://github.com/juanfont/headscale/pull/2248)\n- CLI for managing users now accepts `--identifier` in addition to `--name`,\n  usage of `--identifier` is recommended\n  [#2261](https://github.com/juanfont/headscale/pull/2261)\n- Add `dns.extra_records_path` configuration option [#2262](https://github.com/juanfont/headscale/issues/2262)\n- Support client verify for DERP [#2046](https://github.com/juanfont/headscale/pull/2046)\n- Add PKCE Verifier for OIDC [#2314](https://github.com/juanfont/headscale/pull/2314)\n\n## 0.23.0 (2024-09-18)\n\nThis release was intended to be mainly a code reorganisation and refactoring,\nsignificantly improving the maintainability of the codebase. This should allow\nus to improve further and make it easier for the maintainers to keep on top of\nthe project. However, as you all have noticed, it turned out to become a much\nlarger, much longer release cycle than anticipated. It has ended up to be a\nrelease with a lot of rewrites and changes to the code base and functionality of\nHeadscale, cleaning up a lot of technical debt and introducing a lot of\nimprovements. This does come with some breaking changes,\n\n**Please remember to always back up your database between versions**\n\n#### Here is a short summary of the broad topics of changes:\n\nCode has been organised into modules, reducing use of global variables/objects,\nisolating concerns and “putting the right things in the logical place”.\n\nThe new\n[policy](https://github.com/juanfont/headscale/tree/main/hscontrol/policy) and\n[mapper](https://github.com/juanfont/headscale/tree/main/hscontrol/mapper)\npackage, containing the ACL/Policy logic and the logic for creating the data\nserved to clients (the network “map”) has been rewritten and improved. This\nchange has allowed us to finish SSH support and add additional tests throughout\nthe code to ensure correctness.\n\nThe\n[“poller”, or streaming logic](https://github.com/juanfont/headscale/blob/main/hscontrol/poll.go)\nhas been rewritten and instead of keeping track of the latest updates, checking\nat a fixed interval, it now uses go channels, implemented in our new\n[notifier](https://github.com/juanfont/headscale/tree/main/hscontrol/notifier)\npackage and it allows us to send updates to connected clients immediately. This\nshould both improve performance and potential latency before a client picks up\nan update.\n\nHeadscale now supports sending “delta” updates, thanks to the new mapper and\npoller logic, allowing us to only inform nodes about new nodes, changed nodes\nand removed nodes. Previously we sent the entire state of the network every time\nan update was due.\n\nWhile we have a pretty good\n[test harness](https://github.com/search?q=repo%3Ajuanfont%2Fheadscale+path%3A_test.go&type=code)\nfor validating our changes, the changes came down to\n[284 changed files with 32,316 additions and 24,245 deletions](https://github.com/juanfont/headscale/compare/b01f1f1867136d9b2d7b1392776eb363b482c525...ed78ecd)\nand bugs are expected. We need help testing this release. In addition, while we\nthink the performance should in general be better, there might be regressions in\nparts of the platform, particularly where we prioritised correctness over speed.\n\nThere are also several bugfixes that has been encountered and fixed as part of\nimplementing these changes, particularly after improving the test harness as\npart of adopting [#1460](https://github.com/juanfont/headscale/pull/1460).\n\n### BREAKING\n\n- Code reorganisation, a lot of code has moved, please review the following PRs\n  accordingly [#1473](https://github.com/juanfont/headscale/pull/1473)\n- Change the structure of database configuration, see\n  [config-example.yaml](./config-example.yaml) for the new structure.\n  [#1700](https://github.com/juanfont/headscale/pull/1700)\n  - Old structure has been remove and the configuration _must_ be converted.\n  - Adds additional configuration for PostgreSQL for setting max open, idle\n    connection and idle connection lifetime.\n- API: Machine is now Node [#1553](https://github.com/juanfont/headscale/pull/1553)\n- Remove support for older Tailscale clients [#1611](https://github.com/juanfont/headscale/pull/1611)\n  - The oldest supported client is 1.42\n- Headscale checks that _at least_ one DERP is defined at start [#1564](https://github.com/juanfont/headscale/pull/1564)\n  - If no DERP is configured, the server will fail to start, this can be because\n    it cannot load the DERPMap from file or url.\n- Embedded DERP server requires a private key [#1611](https://github.com/juanfont/headscale/pull/1611)\n  - Add a filepath entry to\n    [`derp.server.private_key_path`](https://github.com/juanfont/headscale/blob/b35993981297e18393706b2c963d6db882bba6aa/config-example.yaml#L95)\n- Docker images are now built with goreleaser (ko) [#1716](https://github.com/juanfont/headscale/pull/1716)\n  [#1763](https://github.com/juanfont/headscale/pull/1763)\n  - Entrypoint of container image has changed from shell to headscale, require\n    change from `headscale serve` to `serve`\n  - `/var/lib/headscale` and `/var/run/headscale` is no longer created\n    automatically, see [container docs](./docs/setup/install/container.md)\n- Prefixes are now defined per v4 and v6 range. [#1756](https://github.com/juanfont/headscale/pull/1756)\n  - `ip_prefixes` option is now `prefixes.v4` and `prefixes.v6`\n  - `prefixes.allocation` can be set to assign IPs at `sequential` or `random`.\n    [#1869](https://github.com/juanfont/headscale/pull/1869)\n- MagicDNS domains no longer contain usernames []()\n  - This is in preparation to fix Headscales implementation of tags which\n    currently does not correctly remove the link between a tagged device and a\n    user. As tagged devices will not have a user, this will require a change to\n    the DNS generation, removing the username, see\n    [#1369](https://github.com/juanfont/headscale/issues/1369) for more\n    information.\n  - `use_username_in_magic_dns` can be used to turn this behaviour on again, but\n    note that this option _will be removed_ when tags are fixed.\n    - dns.base_domain can no longer be the same as (or part of) server_url.\n    - This option brings Headscales behaviour in line with Tailscale.\n- YAML files are no longer supported for headscale policy. [#1792](https://github.com/juanfont/headscale/pull/1792)\n  - HuJSON is now the only supported format for policy.\n- DNS configuration has been restructured [#2034](https://github.com/juanfont/headscale/pull/2034)\n  - Please review the new [config-example.yaml](./config-example.yaml) for the\n    new structure.\n\n### Changes\n\n- Use versioned migrations [#1644](https://github.com/juanfont/headscale/pull/1644)\n- Make the OIDC callback page better [#1484](https://github.com/juanfont/headscale/pull/1484)\n- SSH support [#1487](https://github.com/juanfont/headscale/pull/1487)\n- State management has been improved [#1492](https://github.com/juanfont/headscale/pull/1492)\n- Use error group handling to ensure tests actually pass [#1535](https://github.com/juanfont/headscale/pull/1535) based on\n  [#1460](https://github.com/juanfont/headscale/pull/1460)\n- Fix hang on SIGTERM [#1492](https://github.com/juanfont/headscale/pull/1492)\n  taken from [#1480](https://github.com/juanfont/headscale/pull/1480)\n- Send logs to stderr by default [#1524](https://github.com/juanfont/headscale/pull/1524)\n- Fix [TS-2023-006](https://tailscale.com/security-bulletins/#ts-2023-006)\n  security UPnP issue [#1563](https://github.com/juanfont/headscale/pull/1563)\n- Turn off gRPC logging [#1640](https://github.com/juanfont/headscale/pull/1640)\n  fixes [#1259](https://github.com/juanfont/headscale/issues/1259)\n- Added the possibility to manually create a DERP-map entry which can be\n  customized, instead of automatically creating it.\n  [#1565](https://github.com/juanfont/headscale/pull/1565)\n- Add support for deleting api keys [#1702](https://github.com/juanfont/headscale/pull/1702)\n- Add command to backfill IP addresses for nodes missing IPs from configured\n  prefixes. [#1869](https://github.com/juanfont/headscale/pull/1869)\n- Log available update as warning [#1877](https://github.com/juanfont/headscale/pull/1877)\n- Add `autogroup:internet` to Policy [#1917](https://github.com/juanfont/headscale/pull/1917)\n- Restore foreign keys and add constraints [#1562](https://github.com/juanfont/headscale/pull/1562)\n- Make registration page easier to use on mobile devices\n- Make write-ahead-log default on and configurable for SQLite [#1985](https://github.com/juanfont/headscale/pull/1985)\n- Add APIs for managing headscale policy. [#1792](https://github.com/juanfont/headscale/pull/1792)\n- Fix for registering nodes using preauthkeys when running on a postgres\n  database in a non-UTC timezone.\n  [#764](https://github.com/juanfont/headscale/issues/764)\n- Make sure integration tests cover postgres for all scenarios\n- CLI commands (all except `serve`) only requires minimal configuration, no more\n  errors or warnings from unset settings\n  [#2109](https://github.com/juanfont/headscale/pull/2109)\n- CLI results are now concistently sent to stdout and errors to stderr [#2109](https://github.com/juanfont/headscale/pull/2109)\n- Fix issue where shutting down headscale would hang [#2113](https://github.com/juanfont/headscale/pull/2113)\n\n## 0.22.3 (2023-05-12)\n\n### Changes\n\n- Added missing ca-certificates in Docker image [#1463](https://github.com/juanfont/headscale/pull/1463)\n\n## 0.22.2 (2023-05-10)\n\n### Changes\n\n- Add environment flags to enable pprof (profiling) [#1382](https://github.com/juanfont/headscale/pull/1382)\n  - Profiles are continuously generated in our integration tests.\n- Fix systemd service file location in `.deb` packages [#1391](https://github.com/juanfont/headscale/pull/1391)\n- Improvements on Noise implementation [#1379](https://github.com/juanfont/headscale/pull/1379)\n- Replace node filter logic, ensuring nodes with access can see each other [#1381](https://github.com/juanfont/headscale/pull/1381)\n- Disable (or delete) both exit routes at the same time [#1428](https://github.com/juanfont/headscale/pull/1428)\n- Ditch distroless for Docker image, create default socket dir in\n  `/var/run/headscale` [#1450](https://github.com/juanfont/headscale/pull/1450)\n\n## 0.22.1 (2023-04-20)\n\n### Changes\n\n- Fix issue where systemd could not bind to port 80 [#1365](https://github.com/juanfont/headscale/pull/1365)\n\n## 0.22.0 (2023-04-20)\n\n### Changes\n\n- Add `.deb` packages to release process [#1297](https://github.com/juanfont/headscale/pull/1297)\n- Update and simplify the documentation to use new `.deb` packages [#1349](https://github.com/juanfont/headscale/pull/1349)\n- Add 32-bit Arm platforms to release process [#1297](https://github.com/juanfont/headscale/pull/1297)\n- Fix longstanding bug that would prevent \"\\*\" from working properly in ACLs\n  (issue [#699](https://github.com/juanfont/headscale/issues/699))\n  [#1279](https://github.com/juanfont/headscale/pull/1279)\n- Fix issue where IPv6 could not be used in, or while using ACLs (part of [#809](https://github.com/juanfont/headscale/issues/809))\n  [#1339](https://github.com/juanfont/headscale/pull/1339)\n- Target Go 1.20 and Tailscale 1.38 for Headscale [#1323](https://github.com/juanfont/headscale/pull/1323)\n\n## 0.21.0 (2023-03-20)\n\n### Changes\n\n- Adding \"configtest\" CLI command. [#1230](https://github.com/juanfont/headscale/pull/1230)\n- Add documentation on connecting with iOS to `/apple` [#1261](https://github.com/juanfont/headscale/pull/1261)\n- Update iOS compatibility and added documentation for iOS [#1264](https://github.com/juanfont/headscale/pull/1264)\n- Allow to delete routes [#1244](https://github.com/juanfont/headscale/pull/1244)\n\n## 0.20.0 (2023-02-03)\n\n### Changes\n\n- Fix wrong behaviour in exit nodes [#1159](https://github.com/juanfont/headscale/pull/1159)\n- Align behaviour of `dns_config.restricted_nameservers` to tailscale [#1162](https://github.com/juanfont/headscale/pull/1162)\n- Make OpenID Connect authenticated client expiry time configurable [#1191](https://github.com/juanfont/headscale/pull/1191)\n  - defaults to 180 days like Tailscale SaaS\n  - adds option to use the expiry time from the OpenID token for the node (see\n    config-example.yaml)\n- Set ControlTime in Map info sent to nodes [#1195](https://github.com/juanfont/headscale/pull/1195)\n- Populate Tags field on Node updates sent [#1195](https://github.com/juanfont/headscale/pull/1195)\n\n## 0.19.0 (2023-01-29)\n\n### BREAKING\n\n- Rename Namespace to User [#1144](https://github.com/juanfont/headscale/pull/1144)\n  - **BACKUP your database before upgrading**\n- Command line flags previously taking `--namespace` or `-n` will now require\n  `--user` or `-u`\n\n## 0.18.0 (2023-01-14)\n\n### Changes\n\n- Reworked routing and added support for subnet router failover [#1024](https://github.com/juanfont/headscale/pull/1024)\n- Added an OIDC AllowGroups Configuration options and authorization check [#1041](https://github.com/juanfont/headscale/pull/1041)\n- Set `db_ssl` to false by default [#1052](https://github.com/juanfont/headscale/pull/1052)\n- Fix duplicate nodes due to incorrect implementation of the protocol [#1058](https://github.com/juanfont/headscale/pull/1058)\n- Report if a machine is online in CLI more accurately [#1062](https://github.com/juanfont/headscale/pull/1062)\n- Added config option for custom DNS records [#1035](https://github.com/juanfont/headscale/pull/1035)\n- Expire nodes based on OIDC token expiry [#1067](https://github.com/juanfont/headscale/pull/1067)\n- Remove ephemeral nodes on logout [#1098](https://github.com/juanfont/headscale/pull/1098)\n- Performance improvements in ACLs [#1129](https://github.com/juanfont/headscale/pull/1129)\n- OIDC client secret can be passed via a file [#1127](https://github.com/juanfont/headscale/pull/1127)\n\n## 0.17.1 (2022-12-05)\n\n### Changes\n\n- Correct typo on macOS standalone profile link [#1028](https://github.com/juanfont/headscale/pull/1028)\n- Update platform docs with Fast User Switching [#1016](https://github.com/juanfont/headscale/pull/1016)\n\n## 0.17.0 (2022-11-26)\n\n### BREAKING\n\n- `noise.private_key_path` has been added and is required for the new noise\n  protocol.\n- Log level option `log_level` was moved to a distinct `log` config section and\n  renamed to `level` [#768](https://github.com/juanfont/headscale/pull/768)\n- Removed Alpine Linux container image [#962](https://github.com/juanfont/headscale/pull/962)\n\n### Important Changes\n\n- Added support for Tailscale TS2021 protocol [#738](https://github.com/juanfont/headscale/pull/738)\n- Add experimental support for\n  [SSH ACL](https://tailscale.com/kb/1018/acls/#tailscale-ssh) (see docs for\n  limitations) [#847](https://github.com/juanfont/headscale/pull/847)\n  - Please note that this support should be considered _partially_ implemented\n  - SSH ACLs status:\n    - Support `accept` and `check` (SSH can be enabled and used for connecting\n      and authentication)\n    - Rejecting connections **are not supported**, meaning that if you enable\n      SSH, then assume that _all_ `ssh` connections **will be allowed**.\n    - If you decided to try this feature, please carefully managed permissions\n      by blocking port `22` with regular ACLs or do _not_ set `--ssh` on your\n      clients.\n    - We are currently improving our testing of the SSH ACLs, help us get an\n      overview by testing and giving feedback.\n  - This feature should be considered dangerous and it is disabled by default.\n    Enable by setting `HEADSCALE_EXPERIMENTAL_FEATURE_SSH=1`.\n\n### Changes\n\n- Add ability to specify config location via env var `HEADSCALE_CONFIG` [#674](https://github.com/juanfont/headscale/issues/674)\n- Target Go 1.19 for Headscale [#778](https://github.com/juanfont/headscale/pull/778)\n- Target Tailscale v1.30.0 to build Headscale [#780](https://github.com/juanfont/headscale/pull/780)\n- Give a warning when running Headscale with reverse proxy improperly configured\n  for WebSockets [#788](https://github.com/juanfont/headscale/pull/788)\n- Fix subnet routers with Primary Routes [#811](https://github.com/juanfont/headscale/pull/811)\n- Added support for JSON logs [#653](https://github.com/juanfont/headscale/issues/653)\n- Sanitise the node key passed to registration url [#823](https://github.com/juanfont/headscale/pull/823)\n- Add support for generating pre-auth keys with tags [#767](https://github.com/juanfont/headscale/pull/767)\n- Add support for evaluating `autoApprovers` ACL entries when a machine is\n  registered [#763](https://github.com/juanfont/headscale/pull/763)\n- Add config flag to allow Headscale to start if OIDC provider is down [#829](https://github.com/juanfont/headscale/pull/829)\n- Fix prefix length comparison bug in AutoApprovers route evaluation [#862](https://github.com/juanfont/headscale/pull/862)\n- Random node DNS suffix only applied if names collide in namespace. [#766](https://github.com/juanfont/headscale/issues/766)\n- Remove `ip_prefix` configuration option and warning [#899](https://github.com/juanfont/headscale/pull/899)\n- Add `dns_config.override_local_dns` option [#905](https://github.com/juanfont/headscale/pull/905)\n- Fix some DNS config issues [#660](https://github.com/juanfont/headscale/issues/660)\n- Make it possible to disable TS2019 with build flag [#928](https://github.com/juanfont/headscale/pull/928)\n- Fix OIDC registration issues [#960](https://github.com/juanfont/headscale/pull/960) and\n  [#971](https://github.com/juanfont/headscale/pull/971)\n- Add support for specifying NextDNS DNS-over-HTTPS resolver [#940](https://github.com/juanfont/headscale/pull/940)\n- Make more sslmode available for postgresql connection [#927](https://github.com/juanfont/headscale/pull/927)\n\n## 0.16.4 (2022-08-21)\n\n### Changes\n\n- Add ability to connect to PostgreSQL over TLS/SSL [#745](https://github.com/juanfont/headscale/pull/745)\n- Fix CLI registration of expired machines [#754](https://github.com/juanfont/headscale/pull/754)\n\n## 0.16.3 (2022-08-17)\n\n### Changes\n\n- Fix issue with OIDC authentication [#747](https://github.com/juanfont/headscale/pull/747)\n\n## 0.16.2 (2022-08-14)\n\n### Changes\n\n- Fixed bugs in the client registration process after migration to NodeKey [#735](https://github.com/juanfont/headscale/pull/735)\n\n## 0.16.1 (2022-08-12)\n\n### Changes\n\n- Updated dependencies (including the library that lacked armhf support) [#722](https://github.com/juanfont/headscale/pull/722)\n- Fix missing group expansion in function `excludeCorrectlyTaggedNodes` [#563](https://github.com/juanfont/headscale/issues/563)\n- Improve registration protocol implementation and switch to NodeKey as main\n  identifier [#725](https://github.com/juanfont/headscale/pull/725)\n- Add ability to connect to PostgreSQL via unix socket [#734](https://github.com/juanfont/headscale/pull/734)\n\n## 0.16.0 (2022-07-25)\n\n**Note:** Take a backup of your database before upgrading.\n\n### BREAKING\n\n- Old ACL syntax is no longer supported (\"users\" & \"ports\" -> \"src\" & \"dst\").\n  Please check [the new syntax](https://tailscale.com/kb/1018/acls/).\n\n### Changes\n\n- **Drop** armhf (32-bit ARM) support. [#609](https://github.com/juanfont/headscale/pull/609)\n- Headscale fails to serve if the ACL policy file cannot be parsed [#537](https://github.com/juanfont/headscale/pull/537)\n- Fix labels cardinality error when registering unknown pre-auth key [#519](https://github.com/juanfont/headscale/pull/519)\n- Fix send on closed channel crash in polling [#542](https://github.com/juanfont/headscale/pull/542)\n- Fixed spurious calls to setLastStateChangeToNow from ephemeral nodes [#566](https://github.com/juanfont/headscale/pull/566)\n- Add command for moving nodes between namespaces [#362](https://github.com/juanfont/headscale/issues/362)\n- Added more configuration parameters for OpenID Connect (scopes, free-form\n  parameters, domain and user allowlist)\n- Add command to set tags on a node [#525](https://github.com/juanfont/headscale/issues/525)\n- Add command to view tags of nodes [#356](https://github.com/juanfont/headscale/issues/356)\n- Add --all (-a) flag to enable routes command [#360](https://github.com/juanfont/headscale/issues/360)\n- Fix issue where nodes was not updated across namespaces [#560](https://github.com/juanfont/headscale/pull/560)\n- Add the ability to rename a nodes name [#560](https://github.com/juanfont/headscale/pull/560)\n  - Node DNS names are now unique, a random suffix will be added when a node\n    joins\n  - This change contains database changes, remember to **backup** your database\n    before upgrading\n- Add option to enable/disable logtail (Tailscale's logging infrastructure) [#596](https://github.com/juanfont/headscale/pull/596)\n  - This change disables the logs by default\n- Use [Prometheus]'s duration parser, supporting days (`d`), weeks (`w`) and\n  years (`y`) [#598](https://github.com/juanfont/headscale/pull/598)\n- Add support for reloading ACLs with SIGHUP [#601](https://github.com/juanfont/headscale/pull/601)\n- Use new ACL syntax [#618](https://github.com/juanfont/headscale/pull/618)\n- Add -c option to specify config file from command line [#285](https://github.com/juanfont/headscale/issues/285)\n  [#612](https://github.com/juanfont/headscale/pull/601)\n- Add configuration option to allow Tailscale clients to use a random WireGuard\n  port. [kb/1181/firewalls](https://tailscale.com/kb/1181/firewalls)\n  [#624](https://github.com/juanfont/headscale/pull/624)\n- Improve obtuse UX regarding missing configuration\n  (`ephemeral_node_inactivity_timeout` not set)\n  [#639](https://github.com/juanfont/headscale/pull/639)\n- Fix nodes being shown as 'offline' in `tailscale status` [#648](https://github.com/juanfont/headscale/pull/648)\n- Improve shutdown behaviour [#651](https://github.com/juanfont/headscale/pull/651)\n- Drop Gin as web framework in Headscale\n  [648](https://github.com/juanfont/headscale/pull/648)\n  [677](https://github.com/juanfont/headscale/pull/677)\n- Make tailnet node updates check interval configurable [#675](https://github.com/juanfont/headscale/pull/675)\n- Fix regression with HTTP API [#684](https://github.com/juanfont/headscale/pull/684)\n- nodes ls now print both Hostname and Name(Issue [#647](https://github.com/juanfont/headscale/issues/647) PR\n  [#687](https://github.com/juanfont/headscale/pull/687))\n\n## 0.15.0 (2022-03-20)\n\n**Note:** Take a backup of your database before upgrading.\n\n### BREAKING\n\n- Boundaries between Namespaces has been removed and all nodes can communicate\n  by default [#357](https://github.com/juanfont/headscale/pull/357)\n  - To limit access between nodes, use [ACLs](./docs/ref/acls.md).\n- `/metrics` is now a configurable host:port endpoint: [#344](https://github.com/juanfont/headscale/pull/344). You must update your\n  `config.yaml` file to include:\n  ```yaml\n  metrics_listen_addr: 127.0.0.1:9090\n  ```\n\n### Features\n\n- Add support for writing ACL files with YAML [#359](https://github.com/juanfont/headscale/pull/359)\n- Users can now use emails in ACL's groups [#372](https://github.com/juanfont/headscale/issues/372)\n- Add shorthand aliases for commands and subcommands [#376](https://github.com/juanfont/headscale/pull/376)\n- Add `/windows` endpoint for Windows configuration instructions + registry file\n  download [#392](https://github.com/juanfont/headscale/pull/392)\n- Added embedded DERP (and STUN) server into Headscale [#388](https://github.com/juanfont/headscale/pull/388)\n\n### Changes\n\n- Fix a bug were the same IP could be assigned to multiple hosts if joined in\n  quick succession [#346](https://github.com/juanfont/headscale/pull/346)\n- Simplify the code behind registration of machines [#366](https://github.com/juanfont/headscale/pull/366)\n  - Nodes are now only written to database if they are registered successfully\n- Fix a limitation in the ACLs that prevented users to write rules with `*` as\n  source [#374](https://github.com/juanfont/headscale/issues/374)\n- Reduce the overhead of marshal/unmarshal for Hostinfo, routes and endpoints by\n  using specific types in Machine\n  [#371](https://github.com/juanfont/headscale/pull/371)\n- Apply normalization function to FQDN on hostnames when hosts registers and\n  retrieve information [#363](https://github.com/juanfont/headscale/issues/363)\n- Fix a bug that prevented the use of `tailscale logout` with OIDC [#508](https://github.com/juanfont/headscale/issues/508)\n- Added Tailscale repo HEAD and unstable releases channel to the integration\n  tests targets [#513](https://github.com/juanfont/headscale/pull/513)\n\n## 0.14.0 (2022-02-24)\n\n**UPCOMING ### BREAKING From the **next\\*\\* version (`0.15.0`), all machines\nwill be able to communicate regardless of if they are in the same namespace.\nThis means that the behaviour currently limited to ACLs will become default.\nFrom version `0.15.0`, all limitation of communications must be done with ACLs.\n\nThis is a part of aligning `headscale`'s behaviour with Tailscale's upstream\nbehaviour.\n\n### BREAKING\n\n- ACLs have been rewritten to align with the bevaviour Tailscale Control Panel\n  provides. **NOTE:** This is only active if you use ACLs\n  - Namespaces are now treated as Users\n  - All machines can communicate with all machines by default\n  - Tags should now work correctly and adding a host to Headscale should now\n    reload the rules.\n  - The documentation have a [fictional example](./docs/ref/acls.md) that should\n    cover some use cases of the ACLs features\n\n### Features\n\n- Add support for configurable mTLS [docs](./docs/ref/tls.md) [#297](https://github.com/juanfont/headscale/pull/297)\n\n### Changes\n\n- Remove dependency on CGO (switch from CGO SQLite to pure Go) [#346](https://github.com/juanfont/headscale/pull/346)\n\n**0.13.0 (2022-02-18):**\n\n### Features\n\n- Add IPv6 support to the prefix assigned to namespaces\n- Add API Key support\n  - Enable remote control of `headscale` via CLI\n    [docs](./docs/ref/api.md#grpc)\n  - Enable HTTP API (beta, subject to change)\n- OpenID Connect users will be mapped per namespaces\n  - Each user will get its own namespace, created if it does not exist\n  - `oidc.domain_map` option has been removed\n  - `strip_email_domain` option has been added (see\n    [config-example.yaml](./config-example.yaml))\n\n### Changes\n\n- `ip_prefix` is now superseded by `ip_prefixes` in the configuration [#208](https://github.com/juanfont/headscale/pull/208)\n- Upgrade `tailscale` (1.20.4) and other dependencies to latest [#314](https://github.com/juanfont/headscale/pull/314)\n- fix swapped machine<->namespace labels in `/metrics` [#312](https://github.com/juanfont/headscale/pull/312)\n- remove key-value based update mechanism for namespace changes [#316](https://github.com/juanfont/headscale/pull/316)\n\n**0.12.4 (2022-01-29):**\n\n### Changes\n\n- Make gRPC Unix Socket permissions configurable [#292](https://github.com/juanfont/headscale/pull/292)\n- Trim whitespace before reading Private Key from file [#289](https://github.com/juanfont/headscale/pull/289)\n- Add new command to generate a private key for `headscale` [#290](https://github.com/juanfont/headscale/pull/290)\n- Fixed issue where hosts deleted from control server may be written back to the\n  database, as long as they are connected to the control server\n  [#278](https://github.com/juanfont/headscale/pull/278)\n\n## 0.12.3 (2022-01-13)\n\n### Changes\n\n- Added Alpine container [#270](https://github.com/juanfont/headscale/pull/270)\n- Minor updates in dependencies [#271](https://github.com/juanfont/headscale/pull/271)\n\n## 0.12.2 (2022-01-11)\n\nHappy New Year!\n\n### Changes\n\n- Fix Docker release [#258](https://github.com/juanfont/headscale/pull/258)\n- Rewrite main docs [#262](https://github.com/juanfont/headscale/pull/262)\n- Improve Docker docs [#263](https://github.com/juanfont/headscale/pull/263)\n\n## 0.12.1 (2021-12-24)\n\n(We are skipping 0.12.0 to correct a mishap done weeks ago with the version\ntagging)\n\n### BREAKING\n\n- Upgrade to Tailscale 1.18 [#229](https://github.com/juanfont/headscale/pull/229)\n  - This change requires a new format for private key, private keys are now\n    generated automatically:\n    1. Delete your current key\n    2. Restart `headscale`, a new key will be generated.\n    3. Restart all Tailscale clients to fetch the new key\n\n### Changes\n\n- Unify configuration example [#197](https://github.com/juanfont/headscale/pull/197)\n- Add stricter linting and formatting [#223](https://github.com/juanfont/headscale/pull/223)\n\n### Features\n\n- Add gRPC and HTTP API (HTTP API is currently disabled) [#204](https://github.com/juanfont/headscale/pull/204)\n- Use gRPC between the CLI and the server [#206](https://github.com/juanfont/headscale/pull/206),\n  [#212](https://github.com/juanfont/headscale/pull/212)\n- Beta OpenID Connect support [#126](https://github.com/juanfont/headscale/pull/126),\n  [#227](https://github.com/juanfont/headscale/pull/227)\n\n## 0.11.0 (2021-10-25)\n\n### BREAKING\n\n- Make headscale fetch DERP map from URL and file [#196](https://github.com/juanfont/headscale/pull/196)\n"
  },
  {
    "path": "CLAUDE.md",
    "content": "@AGENTS.md\n"
  },
  {
    "path": "CODE_OF_CONDUCT.md",
    "content": "# Contributor Covenant Code of Conduct\n\n## Our Pledge\n\nWe as members, contributors, and leaders pledge to make participation\nin our community a harassment-free experience for everyone, regardless\nof age, body size, visible or invisible disability, ethnicity, sex\ncharacteristics, gender identity and expression, level of experience,\neducation, socio-economic status, nationality, personal appearance,\nrace, religion, or sexual identity and orientation.\n\nWe pledge to act and interact in ways that contribute to an open,\nwelcoming, diverse, inclusive, and healthy community.\n\n## Our Standards\n\nExamples of behavior that contributes to a positive environment for\nour community include:\n\n- Demonstrating empathy and kindness toward other people\n- Being respectful of differing opinions, viewpoints, and experiences\n- Giving and gracefully accepting constructive feedback\n- Accepting responsibility and apologizing to those affected by our\n  mistakes, and learning from the experience\n- Focusing on what is best not just for us as individuals, but for the\n  overall community\n\nExamples of unacceptable behavior include:\n\n- The use of sexualized language or imagery, and sexual attention or\n  advances of any kind\n- Trolling, insulting or derogatory comments, and personal or\n  political attacks\n- Public or private harassment\n- Publishing others' private information, such as a physical or email\n  address, without their explicit permission\n- Other conduct which could reasonably be considered inappropriate in\n  a professional setting\n\n## Enforcement Responsibilities\n\nCommunity leaders are responsible for clarifying and enforcing our\nstandards of acceptable behavior and will take appropriate and fair\ncorrective action in response to any behavior that they deem\ninappropriate, threatening, offensive, or harmful.\n\nCommunity leaders have the right and responsibility to remove, edit,\nor reject comments, commits, code, wiki edits, issues, and other\ncontributions that are not aligned to this Code of Conduct, and will\ncommunicate reasons for moderation decisions when appropriate.\n\n## Scope\n\nThis Code of Conduct applies within all community spaces, and also\napplies when an individual is officially representing the community in\npublic spaces. Examples of representing our community include using an\nofficial e-mail address, posting via an official social media account,\nor acting as an appointed representative at an online or offline\nevent.\n\n## Enforcement\n\nInstances of abusive, harassing, or otherwise unacceptable behavior\nmay be reported to the community leaders responsible for enforcement\non our [Discord server](https://discord.gg/c84AZQhmpx). All complaints\nwill be reviewed and investigated promptly and fairly.\n\nAll community leaders are obligated to respect the privacy and\nsecurity of the reporter of any incident.\n\n## Enforcement Guidelines\n\nCommunity leaders will follow these Community Impact Guidelines in\ndetermining the consequences for any action they deem in violation of\nthis Code of Conduct:\n\n### 1. Correction\n\n**Community Impact**: Use of inappropriate language or other behavior\ndeemed unprofessional or unwelcome in the community.\n\n**Consequence**: A private, written warning from community leaders,\nproviding clarity around the nature of the violation and an\nexplanation of why the behavior was inappropriate. A public apology\nmay be requested.\n\n### 2. Warning\n\n**Community Impact**: A violation through a single incident or series\nof actions.\n\n**Consequence**: A warning with consequences for continued\nbehavior. No interaction with the people involved, including\nunsolicited interaction with those enforcing the Code of Conduct, for\na specified period of time. This includes avoiding interactions in\ncommunity spaces as well as external channels like social\nmedia. Violating these terms may lead to a temporary or permanent ban.\n\n### 3. Temporary Ban\n\n**Community Impact**: A serious violation of community standards,\nincluding sustained inappropriate behavior.\n\n**Consequence**: A temporary ban from any sort of interaction or\npublic communication with the community for a specified period of\ntime. No public or private interaction with the people involved,\nincluding unsolicited interaction with those enforcing the Code of\nConduct, is allowed during this period. Violating these terms may lead\nto a permanent ban.\n\n### 4. Permanent Ban\n\n**Community Impact**: Demonstrating a pattern of violation of\ncommunity standards, including sustained inappropriate behavior,\nharassment of an individual, or aggression toward or disparagement of\nclasses of individuals.\n\n**Consequence**: A permanent ban from any sort of public interaction\nwithin the community.\n\n## Attribution\n\nThis Code of Conduct is adapted from the [Contributor\nCovenant][homepage], version 2.0, available at\nhttps://www.contributor-covenant.org/version/2/0/code_of_conduct.html.\n\nCommunity Impact Guidelines were inspired by [Mozilla's code of\nconduct enforcement ladder](https://github.com/mozilla/diversity).\n\n[homepage]: https://www.contributor-covenant.org\n\nFor answers to common questions about this code of conduct, see the\nFAQ at https://www.contributor-covenant.org/faq. Translations are\navailable at https://www.contributor-covenant.org/translations.\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "# Contributing\n\nHeadscale is \"Open Source, acknowledged contribution\", this means that any contribution will have to be discussed with the maintainers before being added to the project.\nThis model has been chosen to reduce the risk of burnout by limiting the maintenance overhead of reviewing and validating third-party code.\n\n## Why do we have this model?\n\nHeadscale has a small maintainer team that tries to balance working on the project, fixing bugs and reviewing contributions.\n\nWhen we work on issues ourselves, we develop first hand knowledge of the code and it makes it possible for us to maintain and own the code as the project develops.\n\nCode contributions are seen as a positive thing. People enjoy and engage with our project, but it also comes with some challenges; we have to understand the code, we have to understand the feature, we might have to become familiar with external libraries or services and we think about security implications. All those steps are required during the reviewing process. After the code has been merged, the feature has to be maintained. Any changes reliant on external services must be updated and expanded accordingly.\n\nThe review and day-1 maintenance adds a significant burden on the maintainers. Often we hope that the contributor will help out, but we found that most of the time, they disappear after their new feature was added.\n\nThis means that when someone contributes, we are mostly happy about it, but we do have to run it through a series of checks to establish if we actually can maintain this feature.\n\n## What do we require?\n\nA general description is provided here and an explicit list is provided in our pull request template.\n\nAll new features have to start out with a design document, which should be discussed on the issue tracker (not discord). It should include a use case for the feature, how it can be implemented, who will implement it and a plan for maintaining it.\n\nAll features have to be end-to-end tested (integration tests) and have good unit test coverage to ensure that they work as expected. This will also ensure that the feature continues to work as expected over time. If a change cannot be tested, a strong case for why this is not possible needs to be presented.\n\nThe contributor should help to maintain the feature over time. In case the feature is not maintained probably, the maintainers reserve themselves the right to remove features they redeem as unmaintainable. This should help to improve the quality of the software and keep it in a maintainable state.\n\n## Bug fixes\n\nHeadscale is open to code contributions for bug fixes without discussion.\n\n## Documentation\n\nIf you find mistakes in the documentation, please submit a fix to the documentation.\n"
  },
  {
    "path": "Dockerfile.derper",
    "content": "# For testing purposes only\n\nFROM golang:1.26.1-alpine AS build-env\n\nWORKDIR /go/src\n\nRUN apk add --no-cache git\nARG VERSION_BRANCH=main\nRUN git clone https://github.com/tailscale/tailscale.git --branch=$VERSION_BRANCH --depth=1\nWORKDIR /go/src/tailscale\n\nARG TARGETARCH\nRUN GOARCH=$TARGETARCH go install -v ./cmd/derper\n\nFROM alpine:3.22\nRUN apk add --no-cache ca-certificates iptables iproute2 ip6tables curl\n\nCOPY --from=build-env /go/bin/* /usr/local/bin/\nENTRYPOINT [ \"/usr/local/bin/derper\" ]\n"
  },
  {
    "path": "Dockerfile.integration",
    "content": "# This Dockerfile and the images produced are for testing headscale,\n# and are in no way endorsed by Headscale's maintainers as an\n# official nor supported release or distribution.\n\nFROM docker.io/golang:1.26.1-trixie AS builder\nARG VERSION=dev\nENV GOPATH /go\nWORKDIR /go/src/headscale\n\n# Install delve debugger first - rarely changes, good cache candidate\nRUN go install github.com/go-delve/delve/cmd/dlv@latest\n\n# Download dependencies - only invalidated when go.mod/go.sum change\nCOPY go.mod go.sum /go/src/headscale/\nRUN go mod download\n\n# Copy source and build - invalidated on any source change\nCOPY . .\n\n# Build debug binary with debug symbols for delve\nRUN CGO_ENABLED=0 GOOS=linux go build -gcflags=\"all=-N -l\" -o /go/bin/headscale ./cmd/headscale\n\n# Runtime stage\nFROM debian:trixie-slim\n\nRUN apt-get --update install --no-install-recommends --yes \\\n    bash ca-certificates curl dnsutils findutils iproute2 jq less procps python3 sqlite3 \\\n  && apt-get dist-clean\n\nRUN mkdir -p /var/run/headscale\n\n# Copy binaries from builder\nCOPY --from=builder /go/bin/headscale /usr/local/bin/headscale\nCOPY --from=builder /go/bin/dlv /usr/local/bin/dlv\n\n# Copy source code for delve source-level debugging\nCOPY --from=builder /go/src/headscale /go/src/headscale\n\nWORKDIR /go/src/headscale\n\n# Need to reset the entrypoint or everything will run as a busybox script\nENTRYPOINT []\nEXPOSE 8080/tcp 40000/tcp\nCMD [\"dlv\", \"--listen=0.0.0.0:40000\", \"--headless=true\", \"--api-version=2\", \"--accept-multiclient\", \"exec\", \"/usr/local/bin/headscale\", \"--\"]\n"
  },
  {
    "path": "Dockerfile.integration-ci",
    "content": "# Minimal CI image - expects pre-built headscale binary in build context\n# For local development with delve debugging, use Dockerfile.integration instead\n\nFROM debian:trixie-slim\n\nRUN apt-get --update install --no-install-recommends --yes \\\n    bash ca-certificates curl dnsutils findutils iproute2 jq less procps python3 sqlite3 \\\n  && apt-get dist-clean\n\nRUN mkdir -p /var/run/headscale\n\n# Copy pre-built headscale binary from build context\nCOPY headscale /usr/local/bin/headscale\n\nENTRYPOINT []\nEXPOSE 8080/tcp\nCMD [\"/usr/local/bin/headscale\"]\n"
  },
  {
    "path": "Dockerfile.tailscale-HEAD",
    "content": "# Copyright (c) Tailscale Inc & AUTHORS\n# SPDX-License-Identifier: BSD-3-Clause\n\n# This Dockerfile is more or less lifted from tailscale/tailscale\n# to ensure a similar build process when testing the HEAD of tailscale.\n\nFROM golang:1.26.1-alpine AS build-env\n\nWORKDIR /go/src\n\nRUN apk add --no-cache git\n\n# Replace `RUN git...` with `COPY` and a local checked out version of Tailscale in `./tailscale`\n# to test specific commits of the Tailscale client. This is useful when trying to find out why\n# something specific broke between two versions of Tailscale with for example `git bisect`.\n# COPY ./tailscale .\nRUN git clone https://github.com/tailscale/tailscale.git\n\nWORKDIR /go/src/tailscale\n\n\n# see build_docker.sh\nARG VERSION_LONG=\"\"\nENV VERSION_LONG=$VERSION_LONG\nARG VERSION_SHORT=\"\"\nENV VERSION_SHORT=$VERSION_SHORT\nARG VERSION_GIT_HASH=\"\"\nENV VERSION_GIT_HASH=$VERSION_GIT_HASH\nARG TARGETARCH\n\nARG BUILD_TAGS=\"\"\n\nRUN GOARCH=$TARGETARCH go install -tags=\"${BUILD_TAGS}\" -ldflags=\"\\\n      -X tailscale.com/version.longStamp=$VERSION_LONG \\\n      -X tailscale.com/version.shortStamp=$VERSION_SHORT \\\n      -X tailscale.com/version.gitCommitStamp=$VERSION_GIT_HASH\" \\\n      -v ./cmd/tailscale ./cmd/tailscaled ./cmd/containerboot\n\nFROM alpine:3.22\n# Upstream: ca-certificates ip6tables iptables iproute2\n# Tests: curl python3 (traceroute via BusyBox)\nRUN apk add --no-cache ca-certificates curl ip6tables iptables iproute2 python3\n\nCOPY --from=build-env /go/bin/* /usr/local/bin/\n# For compat with the previous run.sh, although ideally you should be\n# using build_docker.sh which sets an entrypoint for the image.\nRUN mkdir /tailscale && ln -s /usr/local/bin/containerboot /tailscale/run.sh\n"
  },
  {
    "path": "LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2020, Juan Font\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its\n   contributors may be used to endorse or promote products derived from\n   this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "Makefile",
    "content": "# Headscale Makefile\n# Modern Makefile following best practices\n\n# Version calculation\nVERSION ?= $(shell git describe --always --tags --dirty)\n\n# Build configuration\nGOOS ?= $(shell uname | tr '[:upper:]' '[:lower:]')\nifeq ($(filter $(GOOS), openbsd netbsd solaris plan9), )\n\tPIE_FLAGS = -buildmode=pie\nendif\n\n# Tool availability check with nix warning\ndefine check_tool\n\t@command -v $(1) >/dev/null 2>&1 || { \\\n\t\techo \"Warning: $(1) not found. Run 'nix develop' to ensure all dependencies are available.\"; \\\n\t\texit 1; \\\n\t}\nendef\n\n# Source file collections using shell find for better performance\nGO_SOURCES := $(shell find . -name '*.go' -not -path './gen/*' -not -path './vendor/*')\nPROTO_SOURCES := $(shell find . -name '*.proto' -not -path './gen/*' -not -path './vendor/*')\nPRETTIER_SOURCES := $(shell find . \\( -name '*.md' -o -name '*.yaml' -o -name '*.yml' -o -name '*.ts' -o -name '*.js' -o -name '*.html' -o -name '*.css' -o -name '*.scss' -o -name '*.sass' \\) -not -path './gen/*' -not -path './vendor/*' -not -path './node_modules/*')\n\n# Default target\n.PHONY: all\nall: lint test build\n\n# Dependency checking\n.PHONY: check-deps\ncheck-deps:\n\t$(call check_tool,go)\n\t$(call check_tool,golangci-lint)\n\t$(call check_tool,gofumpt)\n\t$(call check_tool,mdformat)\n\t$(call check_tool,prettier)\n\t$(call check_tool,clang-format)\n\t$(call check_tool,buf)\n\n# Build targets\n.PHONY: build\nbuild: check-deps $(GO_SOURCES) go.mod go.sum\n\t@echo \"Building headscale...\"\n\tgo build $(PIE_FLAGS) -ldflags \"-X main.version=$(VERSION)\" -o headscale ./cmd/headscale\n\n# Test targets\n.PHONY: test\ntest: check-deps $(GO_SOURCES) go.mod go.sum\n\t@echo \"Running Go tests...\"\n\tgo test -race ./...\n\n\n# Formatting targets\n.PHONY: fmt\nfmt: fmt-go fmt-mdformat fmt-prettier fmt-proto\n\n.PHONY: fmt-go\nfmt-go: check-deps $(GO_SOURCES)\n\t@echo \"Formatting Go code...\"\n\tgofumpt -l -w .\n\tgolangci-lint run --fix\n\n.PHONY: fmt-mdformat\nfmt-mdformat: check-deps\n\t@echo \"Formatting documentation...\"\n\tmdformat docs/\n\n.PHONY: fmt-prettier\nfmt-prettier: check-deps $(PRETTIER_SOURCES)\n\t@echo \"Formatting markup and config files...\"\n\tprettier --write '**/*.{ts,js,md,yaml,yml,sass,css,scss,html}'\n\n.PHONY: fmt-proto\nfmt-proto: check-deps $(PROTO_SOURCES)\n\t@echo \"Formatting Protocol Buffer files...\"\n\tclang-format -i $(PROTO_SOURCES)\n\n# Linting targets\n.PHONY: lint\nlint: lint-go lint-proto\n\n.PHONY: lint-go\nlint-go: check-deps $(GO_SOURCES) go.mod go.sum\n\t@echo \"Linting Go code...\"\n\tgolangci-lint run --timeout 10m\n\n.PHONY: lint-proto\nlint-proto: check-deps $(PROTO_SOURCES)\n\t@echo \"Linting Protocol Buffer files...\"\n\tcd proto/ && buf lint\n\n# Code generation\n.PHONY: generate\ngenerate: check-deps\n\t@echo \"Generating code...\"\n\tgo generate ./...\n\n# Clean targets\n.PHONY: clean\nclean:\n\trm -rf headscale gen\n\n# Development workflow\n.PHONY: dev\ndev: fmt lint test build\n\n# Help target\n.PHONY: help\nhelp:\n\t@echo \"Headscale Development Makefile\"\n\t@echo \"\"\n\t@echo \"Main targets:\"\n\t@echo \"  all          - Run lint, test, and build (default)\"\n\t@echo \"  build        - Build headscale binary\"\n\t@echo \"  test         - Run Go tests\"\n\t@echo \"  fmt          - Format all code (Go, docs, proto)\"\n\t@echo \"  lint         - Lint all code (Go, proto)\"\n\t@echo \"  generate     - Generate code from Protocol Buffers\"\n\t@echo \"  dev          - Full development workflow (fmt + lint + test + build)\"\n\t@echo \"  clean        - Clean build artifacts\"\n\t@echo \"\"\n\t@echo \"Specific targets:\"\n\t@echo \"  fmt-go       - Format Go code only\"\n\t@echo \"  fmt-mdformat - Format documentation only\"\n\t@echo \"  fmt-prettier - Format markup and config files only\"\n\t@echo \"  fmt-proto    - Format Protocol Buffer files only\"\n\t@echo \"  lint-go      - Lint Go code only\"\n\t@echo \"  lint-proto   - Lint Protocol Buffer files only\"\n\t@echo \"\"\n\t@echo \"Dependencies:\"\n\t@echo \"  check-deps   - Verify required tools are available\"\n\t@echo \"\"\n\t@echo \"Note: If not running in a nix shell, ensure dependencies are available:\"\n\t@echo \"  nix develop\"\n"
  },
  {
    "path": "README.md",
    "content": "![headscale logo](./docs/assets/logo/headscale3_header_stacked_left.png)\n\n![ci](https://github.com/juanfont/headscale/actions/workflows/test.yml/badge.svg)\n\nAn open source, self-hosted implementation of the Tailscale control server.\n\nJoin our [Discord server](https://discord.gg/c84AZQhmpx) for a chat.\n\n**Note:** Always select the same GitHub tag as the released version you use\nto ensure you have the correct example configuration. The `main` branch might\ncontain unreleased changes. The documentation is available for stable and\ndevelopment versions:\n\n- [Documentation for the stable version](https://headscale.net/stable/)\n- [Documentation for the development version](https://headscale.net/development/)\n\n## What is Tailscale\n\nTailscale is [a modern VPN](https://tailscale.com/) built on top of\n[Wireguard](https://www.wireguard.com/).\nIt [works like an overlay network](https://tailscale.com/blog/how-tailscale-works/)\nbetween the computers of your networks - using\n[NAT traversal](https://tailscale.com/blog/how-nat-traversal-works/).\n\nEverything in Tailscale is Open Source, except the GUI clients for proprietary OS\n(Windows and macOS/iOS), and the control server.\n\nThe control server works as an exchange point of Wireguard public keys for the\nnodes in the Tailscale network. It assigns the IP addresses of the clients,\ncreates the boundaries between each user, enables sharing machines between users,\nand exposes the advertised routes of your nodes.\n\nA [Tailscale network (tailnet)](https://tailscale.com/kb/1136/tailnet/) is private\nnetwork which Tailscale assigns to a user in terms of private users or an\norganisation.\n\n## Design goal\n\nHeadscale aims to implement a self-hosted, open source alternative to the\n[Tailscale](https://tailscale.com/) control server. Headscale's goal is to\nprovide self-hosters and hobbyists with an open-source server they can use for\ntheir projects and labs. It implements a narrow scope, a _single_ Tailscale\nnetwork (tailnet), suitable for a personal use, or a small open-source\norganisation.\n\n## Supporting Headscale\n\nIf you like `headscale` and find it useful, there is a sponsorship and donation\nbuttons available in the repo.\n\n## Features\n\nPlease see [\"Features\" in the documentation](https://headscale.net/stable/about/features/).\n\n## Client OS support\n\nPlease see [\"Client and operating system support\" in the documentation](https://headscale.net/stable/about/clients/).\n\n## Running headscale\n\n**Please note that we do not support nor encourage the use of reverse proxies\nand container to run Headscale.**\n\nPlease have a look at the [`documentation`](https://headscale.net/stable/).\n\nFor NixOS users, a module is available in [`nix/`](./nix/).\n\n## Talks\n\n- Fosdem 2026 (video): [Headscale & Tailscale: The complementary open source clone](https://fosdem.org/2026/schedule/event/KYQ3LL-headscale-the-complementary-open-source-clone/)\n  - presented by Kristoffer Dalby\n- Fosdem 2023 (video): [Headscale: How we are using integration testing to reimplement Tailscale](https://fosdem.org/2023/schedule/event/goheadscale/)\n  - presented by Juan Font Alonso and Kristoffer Dalby\n\n## Disclaimer\n\nThis project is not associated with Tailscale Inc.\n\nHowever, one of the active maintainers for Headscale [is employed by Tailscale](https://tailscale.com/blog/opensource) and he is allowed to spend work hours contributing to the project. Contributions from this maintainer are reviewed by other maintainers.\n\nThe maintainers work together on setting the direction for the project. The underlying principle is to serve the community of self-hosters, enthusiasts and hobbyists - while having a sustainable project.\n\n## Contributing\n\nPlease read the [CONTRIBUTING.md](./CONTRIBUTING.md) file.\n\n### Requirements\n\nTo contribute to headscale you would need the latest version of [Go](https://golang.org)\nand [Buf](https://buf.build) (Protobuf generator).\n\nWe recommend using [Nix](https://nixos.org/) to setup a development environment. This can\nbe done with `nix develop`, which will install the tools and give you a shell.\nThis guarantees that you will have the same dev env as `headscale` maintainers.\n\n### Code style\n\nTo ensure we have some consistency with a growing number of contributions,\nthis project has adopted linting and style/formatting rules:\n\nThe **Go** code is linted with [`golangci-lint`](https://golangci-lint.run) and\nformatted with [`golines`](https://github.com/segmentio/golines) (width 88) and\n[`gofumpt`](https://github.com/mvdan/gofumpt).\nPlease configure your editor to run the tools while developing and make sure to\nrun `make lint` and `make fmt` before committing any code.\n\nThe **Proto** code is linted with [`buf`](https://docs.buf.build/lint/overview) and\nformatted with [`clang-format`](https://clang.llvm.org/docs/ClangFormat.html).\n\nThe **docs** are formatted with [`mdformat`](https://mdformat.readthedocs.io).\n\nThe **rest** (Markdown, YAML, etc) is formatted with [`prettier`](https://prettier.io).\n\nCheck out the `.golangci.yaml` and `Makefile` to see the specific configuration.\n\n### Install development tools\n\n- Go\n- Buf\n- Protobuf tools\n\nInstall and activate:\n\n```shell\nnix develop\n```\n\n### Testing and building\n\nSome parts of the project require the generation of Go code from Protobuf\n(if changes are made in `proto/`) and it must be (re-)generated with:\n\n```shell\nmake generate\n```\n\n**Note**: Please check in changes from `gen/` in a separate commit to make it easier to review.\n\nTo run the tests:\n\n```shell\nmake test\n```\n\nTo build the program:\n\n```shell\nmake build\n```\n\n### Development workflow\n\nWe recommend using Nix for dependency management to ensure you have all required tools. If you prefer to manage dependencies yourself, you can use Make directly:\n\n**With Nix (recommended):**\n\n```shell\nnix develop\nmake test\nmake build\n```\n\n**With your own dependencies:**\n\n```shell\nmake test\nmake build\n```\n\nThe Makefile will warn you if any required tools are missing and suggest running `nix develop`. Run `make help` to see all available targets.\n\n## Contributors\n\n<a href=\"https://github.com/juanfont/headscale/graphs/contributors\">\n  <img src=\"https://contrib.rocks/image?repo=juanfont/headscale\" />\n</a>\n\nMade with [contrib.rocks](https://contrib.rocks).\n"
  },
  {
    "path": "buf.gen.yaml",
    "content": "version: v1\nplugins:\n  - name: go\n    out: gen/go\n    opt:\n      - paths=source_relative\n  - name: go-grpc\n    out: gen/go\n    opt:\n      - paths=source_relative\n  - name: grpc-gateway\n    out: gen/go\n    opt:\n      - paths=source_relative\n      - generate_unbound_methods=true\n  # - name: gorm\n  #   out: gen/go\n  #   opt:\n  #     - paths=source_relative,enums=string,gateway=true\n  - name: openapiv2\n    out: gen/openapiv2\n"
  },
  {
    "path": "cmd/headscale/cli/api_key.go",
    "content": "package cli\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strconv\"\n\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/pterm/pterm\"\n\t\"github.com/spf13/cobra\"\n)\n\nconst (\n\t// DefaultAPIKeyExpiry is 90 days.\n\tDefaultAPIKeyExpiry = \"90d\"\n)\n\nfunc init() {\n\trootCmd.AddCommand(apiKeysCmd)\n\tapiKeysCmd.AddCommand(listAPIKeys)\n\n\tcreateAPIKeyCmd.Flags().\n\t\tStringP(\"expiration\", \"e\", DefaultAPIKeyExpiry, \"Human-readable expiration of the key (e.g. 30m, 24h)\")\n\n\tapiKeysCmd.AddCommand(createAPIKeyCmd)\n\n\texpireAPIKeyCmd.Flags().StringP(\"prefix\", \"p\", \"\", \"ApiKey prefix\")\n\texpireAPIKeyCmd.Flags().Uint64P(\"id\", \"i\", 0, \"ApiKey ID\")\n\tapiKeysCmd.AddCommand(expireAPIKeyCmd)\n\n\tdeleteAPIKeyCmd.Flags().StringP(\"prefix\", \"p\", \"\", \"ApiKey prefix\")\n\tdeleteAPIKeyCmd.Flags().Uint64P(\"id\", \"i\", 0, \"ApiKey ID\")\n\tapiKeysCmd.AddCommand(deleteAPIKeyCmd)\n}\n\nvar apiKeysCmd = &cobra.Command{\n\tUse:     \"apikeys\",\n\tShort:   \"Handle the Api keys in Headscale\",\n\tAliases: []string{\"apikey\", \"api\"},\n}\n\nvar listAPIKeys = &cobra.Command{\n\tUse:     \"list\",\n\tShort:   \"List the Api keys for headscale\",\n\tAliases: []string{\"ls\", \"show\"},\n\tRunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {\n\t\tresponse, err := client.ListApiKeys(ctx, &v1.ListApiKeysRequest{})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"listing api keys: %w\", err)\n\t\t}\n\n\t\treturn printListOutput(cmd, response.GetApiKeys(), func() error {\n\t\t\ttableData := pterm.TableData{\n\t\t\t\t{\"ID\", \"Prefix\", \"Expiration\", \"Created\"},\n\t\t\t}\n\n\t\t\tfor _, key := range response.GetApiKeys() {\n\t\t\t\texpiration := \"-\"\n\n\t\t\t\tif key.GetExpiration() != nil {\n\t\t\t\t\texpiration = ColourTime(key.GetExpiration().AsTime())\n\t\t\t\t}\n\n\t\t\t\ttableData = append(tableData, []string{\n\t\t\t\t\tstrconv.FormatUint(key.GetId(), util.Base10),\n\t\t\t\t\tkey.GetPrefix(),\n\t\t\t\t\texpiration,\n\t\t\t\t\tkey.GetCreatedAt().AsTime().Format(HeadscaleDateTimeFormat),\n\t\t\t\t})\n\t\t\t}\n\n\t\t\treturn pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()\n\t\t})\n\t}),\n}\n\nvar createAPIKeyCmd = &cobra.Command{\n\tUse:   \"create\",\n\tShort: \"Creates a new Api key\",\n\tLong: `\nCreates a new Api key, the Api key is only visible on creation\nand cannot be retrieved again.\nIf you loose a key, create a new one and revoke (expire) the old one.`,\n\tAliases: []string{\"c\", \"new\"},\n\tRunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {\n\t\texpiration, err := expirationFromFlag(cmd)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tresponse, err := client.CreateApiKey(ctx, &v1.CreateApiKeyRequest{\n\t\t\tExpiration: expiration,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"creating api key: %w\", err)\n\t\t}\n\n\t\treturn printOutput(cmd, response.GetApiKey(), response.GetApiKey())\n\t}),\n}\n\n// apiKeyIDOrPrefix reads --id and --prefix from cmd and validates that\n// exactly one is provided.\nfunc apiKeyIDOrPrefix(cmd *cobra.Command) (uint64, string, error) {\n\tid, _ := cmd.Flags().GetUint64(\"id\")\n\tprefix, _ := cmd.Flags().GetString(\"prefix\")\n\n\tswitch {\n\tcase id == 0 && prefix == \"\":\n\t\treturn 0, \"\", fmt.Errorf(\"either --id or --prefix must be provided: %w\", errMissingParameter)\n\tcase id != 0 && prefix != \"\":\n\t\treturn 0, \"\", fmt.Errorf(\"only one of --id or --prefix can be provided: %w\", errMissingParameter)\n\t}\n\n\treturn id, prefix, nil\n}\n\nvar expireAPIKeyCmd = &cobra.Command{\n\tUse:     \"expire\",\n\tShort:   \"Expire an ApiKey\",\n\tAliases: []string{\"revoke\", \"exp\", \"e\"},\n\tRunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {\n\t\tid, prefix, err := apiKeyIDOrPrefix(cmd)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tresponse, err := client.ExpireApiKey(ctx, &v1.ExpireApiKeyRequest{\n\t\t\tId:     id,\n\t\t\tPrefix: prefix,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"expiring api key: %w\", err)\n\t\t}\n\n\t\treturn printOutput(cmd, response, \"Key expired\")\n\t}),\n}\n\nvar deleteAPIKeyCmd = &cobra.Command{\n\tUse:     \"delete\",\n\tShort:   \"Delete an ApiKey\",\n\tAliases: []string{\"remove\", \"del\"},\n\tRunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {\n\t\tid, prefix, err := apiKeyIDOrPrefix(cmd)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tresponse, err := client.DeleteApiKey(ctx, &v1.DeleteApiKeyRequest{\n\t\t\tId:     id,\n\t\t\tPrefix: prefix,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"deleting api key: %w\", err)\n\t\t}\n\n\t\treturn printOutput(cmd, response, \"Key deleted\")\n\t}),\n}\n"
  },
  {
    "path": "cmd/headscale/cli/auth.go",
    "content": "package cli\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\t\"github.com/spf13/cobra\"\n)\n\nfunc init() {\n\trootCmd.AddCommand(authCmd)\n\n\tauthRegisterCmd.Flags().StringP(\"user\", \"u\", \"\", \"User\")\n\tauthRegisterCmd.Flags().String(\"auth-id\", \"\", \"Auth ID\")\n\tmustMarkRequired(authRegisterCmd, \"user\", \"auth-id\")\n\tauthCmd.AddCommand(authRegisterCmd)\n\n\tauthApproveCmd.Flags().String(\"auth-id\", \"\", \"Auth ID\")\n\tmustMarkRequired(authApproveCmd, \"auth-id\")\n\tauthCmd.AddCommand(authApproveCmd)\n\n\tauthRejectCmd.Flags().String(\"auth-id\", \"\", \"Auth ID\")\n\tmustMarkRequired(authRejectCmd, \"auth-id\")\n\tauthCmd.AddCommand(authRejectCmd)\n}\n\nvar authCmd = &cobra.Command{\n\tUse:   \"auth\",\n\tShort: \"Manage node authentication and approval\",\n}\n\nvar authRegisterCmd = &cobra.Command{\n\tUse:   \"register\",\n\tShort: \"Register a node to your network\",\n\tRunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {\n\t\tuser, _ := cmd.Flags().GetString(\"user\")\n\t\tauthID, _ := cmd.Flags().GetString(\"auth-id\")\n\n\t\trequest := &v1.AuthRegisterRequest{\n\t\t\tAuthId: authID,\n\t\t\tUser:   user,\n\t\t}\n\n\t\tresponse, err := client.AuthRegister(ctx, request)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"registering node: %w\", err)\n\t\t}\n\n\t\treturn printOutput(\n\t\t\tcmd,\n\t\t\tresponse.GetNode(),\n\t\t\tfmt.Sprintf(\"Node %s registered\", response.GetNode().GetGivenName()))\n\t}),\n}\n\nvar authApproveCmd = &cobra.Command{\n\tUse:   \"approve\",\n\tShort: \"Approve a pending authentication request\",\n\tRunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {\n\t\tauthID, _ := cmd.Flags().GetString(\"auth-id\")\n\n\t\trequest := &v1.AuthApproveRequest{\n\t\t\tAuthId: authID,\n\t\t}\n\n\t\tresponse, err := client.AuthApprove(ctx, request)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"approving auth request: %w\", err)\n\t\t}\n\n\t\treturn printOutput(cmd, response, \"Auth request approved\")\n\t}),\n}\n\nvar authRejectCmd = &cobra.Command{\n\tUse:   \"reject\",\n\tShort: \"Reject a pending authentication request\",\n\tRunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {\n\t\tauthID, _ := cmd.Flags().GetString(\"auth-id\")\n\n\t\trequest := &v1.AuthRejectRequest{\n\t\t\tAuthId: authID,\n\t\t}\n\n\t\tresponse, err := client.AuthReject(ctx, request)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"rejecting auth request: %w\", err)\n\t\t}\n\n\t\treturn printOutput(cmd, response, \"Auth request rejected\")\n\t}),\n}\n"
  },
  {
    "path": "cmd/headscale/cli/configtest.go",
    "content": "package cli\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/spf13/cobra\"\n)\n\nfunc init() {\n\trootCmd.AddCommand(configTestCmd)\n}\n\nvar configTestCmd = &cobra.Command{\n\tUse:   \"configtest\",\n\tShort: \"Test the configuration.\",\n\tLong:  \"Run a test of the configuration and exit.\",\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t_, err := newHeadscaleServerWithConfig()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"configuration error: %w\", err)\n\t\t}\n\n\t\treturn nil\n\t},\n}\n"
  },
  {
    "path": "cmd/headscale/cli/debug.go",
    "content": "package cli\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/spf13/cobra\"\n)\n\nfunc init() {\n\trootCmd.AddCommand(debugCmd)\n\n\tcreateNodeCmd.Flags().StringP(\"name\", \"\", \"\", \"Name\")\n\tcreateNodeCmd.Flags().StringP(\"user\", \"u\", \"\", \"User\")\n\tcreateNodeCmd.Flags().StringP(\"key\", \"k\", \"\", \"Key\")\n\tmustMarkRequired(createNodeCmd, \"name\", \"user\", \"key\")\n\n\tcreateNodeCmd.Flags().\n\t\tStringSliceP(\"route\", \"r\", []string{}, \"List (or repeated flags) of routes to advertise\")\n\n\tdebugCmd.AddCommand(createNodeCmd)\n}\n\nvar debugCmd = &cobra.Command{\n\tUse:   \"debug\",\n\tShort: \"debug and testing commands\",\n\tLong:  \"debug contains extra commands used for debugging and testing headscale\",\n}\n\nvar createNodeCmd = &cobra.Command{\n\tUse:   \"create-node\",\n\tShort: \"Create a node that can be registered with `auth register <>` command\",\n\tRunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {\n\t\tuser, _ := cmd.Flags().GetString(\"user\")\n\t\tname, _ := cmd.Flags().GetString(\"name\")\n\t\tregistrationID, _ := cmd.Flags().GetString(\"key\")\n\n\t\t_, err := types.AuthIDFromString(registrationID)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"parsing machine key: %w\", err)\n\t\t}\n\n\t\troutes, _ := cmd.Flags().GetStringSlice(\"route\")\n\n\t\trequest := &v1.DebugCreateNodeRequest{\n\t\t\tKey:    registrationID,\n\t\t\tName:   name,\n\t\t\tUser:   user,\n\t\t\tRoutes: routes,\n\t\t}\n\n\t\tresponse, err := client.DebugCreateNode(ctx, request)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"creating node: %w\", err)\n\t\t}\n\n\t\treturn printOutput(cmd, response.GetNode(), \"Node created\")\n\t}),\n}\n"
  },
  {
    "path": "cmd/headscale/cli/dump_config.go",
    "content": "package cli\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/spf13/cobra\"\n\t\"github.com/spf13/viper\"\n)\n\nfunc init() {\n\trootCmd.AddCommand(dumpConfigCmd)\n}\n\nvar dumpConfigCmd = &cobra.Command{\n\tUse:    \"dumpConfig\",\n\tShort:  \"dump current config to /etc/headscale/config.dump.yaml, integration test only\",\n\tHidden: true,\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\terr := viper.WriteConfigAs(\"/etc/headscale/config.dump.yaml\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"dumping config: %w\", err)\n\t\t}\n\n\t\treturn nil\n\t},\n}\n"
  },
  {
    "path": "cmd/headscale/cli/generate.go",
    "content": "package cli\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/spf13/cobra\"\n\t\"tailscale.com/types/key\"\n)\n\nfunc init() {\n\trootCmd.AddCommand(generateCmd)\n\tgenerateCmd.AddCommand(generatePrivateKeyCmd)\n}\n\nvar generateCmd = &cobra.Command{\n\tUse:     \"generate\",\n\tShort:   \"Generate commands\",\n\tAliases: []string{\"gen\"},\n}\n\nvar generatePrivateKeyCmd = &cobra.Command{\n\tUse:   \"private-key\",\n\tShort: \"Generate a private key for the headscale server\",\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\tmachineKey := key.NewMachine()\n\n\t\tmachineKeyStr, err := machineKey.MarshalText()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"marshalling machine key: %w\", err)\n\t\t}\n\n\t\treturn printOutput(cmd, map[string]string{\n\t\t\t\"private_key\": string(machineKeyStr),\n\t\t},\n\t\t\tstring(machineKeyStr))\n\t},\n}\n"
  },
  {
    "path": "cmd/headscale/cli/health.go",
    "content": "package cli\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\t\"github.com/spf13/cobra\"\n)\n\nfunc init() {\n\trootCmd.AddCommand(healthCmd)\n}\n\nvar healthCmd = &cobra.Command{\n\tUse:   \"health\",\n\tShort: \"Check the health of the Headscale server\",\n\tLong:  \"Check the health of the Headscale server. This command will return an exit code of 0 if the server is healthy, or 1 if it is not.\",\n\tRunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {\n\t\tresponse, err := client.Health(ctx, &v1.HealthRequest{})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"checking health: %w\", err)\n\t\t}\n\n\t\treturn printOutput(cmd, response, \"\")\n\t}),\n}\n"
  },
  {
    "path": "cmd/headscale/cli/mockoidc.go",
    "content": "package cli\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net\"\n\t\"net/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/util/zlog/zf\"\n\t\"github.com/oauth2-proxy/mockoidc\"\n\t\"github.com/rs/zerolog/log\"\n\t\"github.com/spf13/cobra\"\n)\n\n// Error is used to compare errors as per https://dave.cheney.net/2016/04/07/constant-errors\ntype Error string\n\nfunc (e Error) Error() string { return string(e) }\n\nconst (\n\terrMockOidcClientIDNotDefined     = Error(\"MOCKOIDC_CLIENT_ID not defined\")\n\terrMockOidcClientSecretNotDefined = Error(\"MOCKOIDC_CLIENT_SECRET not defined\")\n\terrMockOidcPortNotDefined         = Error(\"MOCKOIDC_PORT not defined\")\n\terrMockOidcUsersNotDefined        = Error(\"MOCKOIDC_USERS not defined\")\n\trefreshTTL                        = 60 * time.Minute\n)\n\nvar accessTTL = 2 * time.Minute\n\nfunc init() {\n\trootCmd.AddCommand(mockOidcCmd)\n}\n\nvar mockOidcCmd = &cobra.Command{\n\tUse:   \"mockoidc\",\n\tShort: \"Runs a mock OIDC server for testing\",\n\tLong:  \"This internal command runs a OpenID Connect for testing purposes\",\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\terr := mockOIDC()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"running mock OIDC server: %w\", err)\n\t\t}\n\n\t\treturn nil\n\t},\n}\n\nfunc mockOIDC() error {\n\tclientID := os.Getenv(\"MOCKOIDC_CLIENT_ID\")\n\tif clientID == \"\" {\n\t\treturn errMockOidcClientIDNotDefined\n\t}\n\n\tclientSecret := os.Getenv(\"MOCKOIDC_CLIENT_SECRET\")\n\tif clientSecret == \"\" {\n\t\treturn errMockOidcClientSecretNotDefined\n\t}\n\n\taddrStr := os.Getenv(\"MOCKOIDC_ADDR\")\n\tif addrStr == \"\" {\n\t\treturn errMockOidcPortNotDefined\n\t}\n\n\tportStr := os.Getenv(\"MOCKOIDC_PORT\")\n\tif portStr == \"\" {\n\t\treturn errMockOidcPortNotDefined\n\t}\n\n\taccessTTLOverride := os.Getenv(\"MOCKOIDC_ACCESS_TTL\")\n\tif accessTTLOverride != \"\" {\n\t\tnewTTL, err := time.ParseDuration(accessTTLOverride)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\taccessTTL = newTTL\n\t}\n\n\tuserStr := os.Getenv(\"MOCKOIDC_USERS\")\n\tif userStr == \"\" {\n\t\treturn errMockOidcUsersNotDefined\n\t}\n\n\tvar users []mockoidc.MockUser\n\n\terr := json.Unmarshal([]byte(userStr), &users)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling users: %w\", err)\n\t}\n\n\tlog.Info().Interface(zf.Users, users).Msg(\"loading users from JSON\")\n\n\tlog.Info().Msgf(\"access token TTL: %s\", accessTTL)\n\n\tport, err := strconv.Atoi(portStr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmock, err := getMockOIDC(clientID, clientSecret, users)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlistener, err := new(net.ListenConfig).Listen(context.Background(), \"tcp\", fmt.Sprintf(\"%s:%d\", addrStr, port))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = mock.Start(listener, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Info().Msgf(\"mock OIDC server listening on %s\", listener.Addr().String())\n\tlog.Info().Msgf(\"issuer: %s\", mock.Issuer())\n\n\tc := make(chan struct{})\n\t<-c\n\n\treturn nil\n}\n\nfunc getMockOIDC(clientID string, clientSecret string, users []mockoidc.MockUser) (*mockoidc.MockOIDC, error) {\n\tkeypair, err := mockoidc.NewKeypair(nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tuserQueue := mockoidc.UserQueue{}\n\n\tfor _, user := range users {\n\t\tuserQueue.Push(&user)\n\t}\n\n\tmock := mockoidc.MockOIDC{\n\t\tClientID:                      clientID,\n\t\tClientSecret:                  clientSecret,\n\t\tAccessTTL:                     accessTTL,\n\t\tRefreshTTL:                    refreshTTL,\n\t\tCodeChallengeMethodsSupported: []string{\"plain\", \"S256\"},\n\t\tKeypair:                       keypair,\n\t\tSessionStore:                  mockoidc.NewSessionStore(),\n\t\tUserQueue:                     &userQueue,\n\t\tErrorQueue:                    &mockoidc.ErrorQueue{},\n\t}\n\n\t_ = mock.AddMiddleware(func(h http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tlog.Info().Msgf(\"request: %+v\", r)\n\t\t\th.ServeHTTP(w, r)\n\n\t\t\tif r.Response != nil {\n\t\t\t\tlog.Info().Msgf(\"response: %+v\", r.Response)\n\t\t\t}\n\t\t})\n\t})\n\n\treturn &mock, nil\n}\n"
  },
  {
    "path": "cmd/headscale/cli/nodes.go",
    "content": "package cli\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/netip\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/pterm/pterm\"\n\t\"github.com/samber/lo\"\n\t\"github.com/spf13/cobra\"\n\t\"google.golang.org/protobuf/types/known/timestamppb\"\n\t\"tailscale.com/types/key\"\n)\n\nfunc init() {\n\trootCmd.AddCommand(nodeCmd)\n\tlistNodesCmd.Flags().StringP(\"user\", \"u\", \"\", \"Filter by user\")\n\tnodeCmd.AddCommand(listNodesCmd)\n\n\tlistNodeRoutesCmd.Flags().Uint64P(\"identifier\", \"i\", 0, \"Node identifier (ID)\")\n\tnodeCmd.AddCommand(listNodeRoutesCmd)\n\n\tregisterNodeCmd.Flags().StringP(\"user\", \"u\", \"\", \"User\")\n\tregisterNodeCmd.Flags().StringP(\"key\", \"k\", \"\", \"Key\")\n\tmustMarkRequired(registerNodeCmd, \"user\", \"key\")\n\tnodeCmd.AddCommand(registerNodeCmd)\n\n\texpireNodeCmd.Flags().Uint64P(\"identifier\", \"i\", 0, \"Node identifier (ID)\")\n\texpireNodeCmd.Flags().StringP(\"expiry\", \"e\", \"\", \"Set expire to (RFC3339 format, e.g. 2025-08-27T10:00:00Z), or leave empty to expire immediately.\")\n\texpireNodeCmd.Flags().BoolP(\"disable\", \"d\", false, \"Disable key expiry (node will never expire)\")\n\tmustMarkRequired(expireNodeCmd, \"identifier\")\n\tnodeCmd.AddCommand(expireNodeCmd)\n\n\trenameNodeCmd.Flags().Uint64P(\"identifier\", \"i\", 0, \"Node identifier (ID)\")\n\tmustMarkRequired(renameNodeCmd, \"identifier\")\n\tnodeCmd.AddCommand(renameNodeCmd)\n\n\tdeleteNodeCmd.Flags().Uint64P(\"identifier\", \"i\", 0, \"Node identifier (ID)\")\n\tmustMarkRequired(deleteNodeCmd, \"identifier\")\n\tnodeCmd.AddCommand(deleteNodeCmd)\n\n\ttagCmd.Flags().Uint64P(\"identifier\", \"i\", 0, \"Node identifier (ID)\")\n\tmustMarkRequired(tagCmd, \"identifier\")\n\ttagCmd.Flags().StringSliceP(\"tags\", \"t\", []string{}, \"List of tags to add to the node\")\n\tnodeCmd.AddCommand(tagCmd)\n\n\tapproveRoutesCmd.Flags().Uint64P(\"identifier\", \"i\", 0, \"Node identifier (ID)\")\n\tmustMarkRequired(approveRoutesCmd, \"identifier\")\n\tapproveRoutesCmd.Flags().StringSliceP(\"routes\", \"r\", []string{}, `List of routes that will be approved (comma-separated, e.g. \"10.0.0.0/8,192.168.0.0/24\" or empty string to remove all approved routes)`)\n\tnodeCmd.AddCommand(approveRoutesCmd)\n\n\tnodeCmd.AddCommand(backfillNodeIPsCmd)\n}\n\nvar nodeCmd = &cobra.Command{\n\tUse:     \"nodes\",\n\tShort:   \"Manage the nodes of Headscale\",\n\tAliases: []string{\"node\"},\n}\n\nvar registerNodeCmd = &cobra.Command{\n\tUse:        \"register\",\n\tShort:      \"Registers a node to your network\",\n\tDeprecated: \"use 'headscale auth register --auth-id <id> --user <user>' instead\",\n\tRunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {\n\t\tuser, _ := cmd.Flags().GetString(\"user\")\n\t\tregistrationID, _ := cmd.Flags().GetString(\"key\")\n\n\t\trequest := &v1.RegisterNodeRequest{\n\t\t\tKey:  registrationID,\n\t\t\tUser: user,\n\t\t}\n\n\t\tresponse, err := client.RegisterNode(ctx, request)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"registering node: %w\", err)\n\t\t}\n\n\t\treturn printOutput(\n\t\t\tcmd,\n\t\t\tresponse.GetNode(),\n\t\t\tfmt.Sprintf(\"Node %s registered\", response.GetNode().GetGivenName()))\n\t}),\n}\n\nvar listNodesCmd = &cobra.Command{\n\tUse:     \"list\",\n\tShort:   \"List nodes\",\n\tAliases: []string{\"ls\", \"show\"},\n\tRunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {\n\t\tuser, _ := cmd.Flags().GetString(\"user\")\n\n\t\tresponse, err := client.ListNodes(ctx, &v1.ListNodesRequest{User: user})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"listing nodes: %w\", err)\n\t\t}\n\n\t\treturn printListOutput(cmd, response.GetNodes(), func() error {\n\t\t\ttableData, err := nodesToPtables(user, response.GetNodes())\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"converting to table: %w\", err)\n\t\t\t}\n\n\t\t\treturn pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()\n\t\t})\n\t}),\n}\n\nvar listNodeRoutesCmd = &cobra.Command{\n\tUse:     \"list-routes\",\n\tShort:   \"List routes available on nodes\",\n\tAliases: []string{\"lsr\", \"routes\"},\n\tRunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {\n\t\tidentifier, _ := cmd.Flags().GetUint64(\"identifier\")\n\n\t\tresponse, err := client.ListNodes(ctx, &v1.ListNodesRequest{})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"listing nodes: %w\", err)\n\t\t}\n\n\t\tnodes := response.GetNodes()\n\t\tif identifier != 0 {\n\t\t\tfor _, node := range response.GetNodes() {\n\t\t\t\tif node.GetId() == identifier {\n\t\t\t\t\tnodes = []*v1.Node{node}\n\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tnodes = lo.Filter(nodes, func(n *v1.Node, _ int) bool {\n\t\t\treturn (n.GetSubnetRoutes() != nil && len(n.GetSubnetRoutes()) > 0) || (n.GetApprovedRoutes() != nil && len(n.GetApprovedRoutes()) > 0) || (n.GetAvailableRoutes() != nil && len(n.GetAvailableRoutes()) > 0)\n\t\t})\n\n\t\treturn printListOutput(cmd, nodes, func() error {\n\t\t\treturn pterm.DefaultTable.WithHasHeader().WithData(nodeRoutesToPtables(nodes)).Render()\n\t\t})\n\t}),\n}\n\nvar expireNodeCmd = &cobra.Command{\n\tUse:   \"expire\",\n\tShort: \"Expire (log out) a node in your network\",\n\tLong: `Expiring a node will keep the node in the database and force it to reauthenticate.\n\nUse --disable to disable key expiry (node will never expire).`,\n\tAliases: []string{\"logout\", \"exp\", \"e\"},\n\tRunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {\n\t\tidentifier, _ := cmd.Flags().GetUint64(\"identifier\")\n\t\tdisableExpiry, _ := cmd.Flags().GetBool(\"disable\")\n\n\t\t// Handle disable expiry - node will never expire.\n\t\tif disableExpiry {\n\t\t\trequest := &v1.ExpireNodeRequest{\n\t\t\t\tNodeId:        identifier,\n\t\t\t\tDisableExpiry: true,\n\t\t\t}\n\n\t\t\tresponse, err := client.ExpireNode(ctx, request)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"disabling node expiry: %w\", err)\n\t\t\t}\n\n\t\t\treturn printOutput(cmd, response.GetNode(), \"Node expiry disabled\")\n\t\t}\n\n\t\texpiry, _ := cmd.Flags().GetString(\"expiry\")\n\n\t\tnow := time.Now()\n\n\t\texpiryTime := now\n\t\tif expiry != \"\" {\n\t\t\tvar err error\n\t\t\texpiryTime, err = time.Parse(time.RFC3339, expiry)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"parsing expiry time: %w\", err)\n\t\t\t}\n\t\t}\n\n\t\trequest := &v1.ExpireNodeRequest{\n\t\t\tNodeId: identifier,\n\t\t\tExpiry: timestamppb.New(expiryTime),\n\t\t}\n\n\t\tresponse, err := client.ExpireNode(ctx, request)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"expiring node: %w\", err)\n\t\t}\n\n\t\tif now.Equal(expiryTime) || now.After(expiryTime) {\n\t\t\treturn printOutput(cmd, response.GetNode(), \"Node expired\")\n\t\t}\n\n\t\treturn printOutput(cmd, response.GetNode(), \"Node expiration updated\")\n\t}),\n}\n\nvar renameNodeCmd = &cobra.Command{\n\tUse:   \"rename NEW_NAME\",\n\tShort: \"Renames a node in your network\",\n\tRunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {\n\t\tidentifier, _ := cmd.Flags().GetUint64(\"identifier\")\n\n\t\tnewName := \"\"\n\t\tif len(args) > 0 {\n\t\t\tnewName = args[0]\n\t\t}\n\n\t\trequest := &v1.RenameNodeRequest{\n\t\t\tNodeId:  identifier,\n\t\t\tNewName: newName,\n\t\t}\n\n\t\tresponse, err := client.RenameNode(ctx, request)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"renaming node: %w\", err)\n\t\t}\n\n\t\treturn printOutput(cmd, response.GetNode(), \"Node renamed\")\n\t}),\n}\n\nvar deleteNodeCmd = &cobra.Command{\n\tUse:     \"delete\",\n\tShort:   \"Delete a node\",\n\tAliases: []string{\"del\"},\n\tRunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {\n\t\tidentifier, _ := cmd.Flags().GetUint64(\"identifier\")\n\n\t\tgetRequest := &v1.GetNodeRequest{\n\t\t\tNodeId: identifier,\n\t\t}\n\n\t\tgetResponse, err := client.GetNode(ctx, getRequest)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"getting node: %w\", err)\n\t\t}\n\n\t\tdeleteRequest := &v1.DeleteNodeRequest{\n\t\t\tNodeId: identifier,\n\t\t}\n\n\t\tif !confirmAction(cmd, fmt.Sprintf(\n\t\t\t\"Do you want to remove the node %s?\",\n\t\t\tgetResponse.GetNode().GetName(),\n\t\t)) {\n\t\t\treturn printOutput(cmd, map[string]string{\"Result\": \"Node not deleted\"}, \"Node not deleted\")\n\t\t}\n\n\t\t_, err = client.DeleteNode(ctx, deleteRequest)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"deleting node: %w\", err)\n\t\t}\n\n\t\treturn printOutput(\n\t\t\tcmd,\n\t\t\tmap[string]string{\"Result\": \"Node deleted\"},\n\t\t\t\"Node deleted\",\n\t\t)\n\t}),\n}\n\nvar backfillNodeIPsCmd = &cobra.Command{\n\tUse:   \"backfillips\",\n\tShort: \"Backfill IPs missing from nodes\",\n\tLong: `\nBackfill IPs can be used to add/remove IPs from nodes\nbased on the current configuration of Headscale.\n\nIf there are nodes that does not have IPv4 or IPv6\neven if prefixes for both are configured in the config,\nthis command can be used to assign IPs of the sort to\nall nodes that are missing.\n\nIf you remove IPv4 or IPv6 prefixes from the config,\nit can be run to remove the IPs that should no longer\nbe assigned to nodes.`,\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\tif !confirmAction(cmd, \"Are you sure that you want to assign/remove IPs to/from nodes?\") {\n\t\t\treturn nil\n\t\t}\n\n\t\tctx, client, conn, cancel, err := newHeadscaleCLIWithConfig()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"connecting to headscale: %w\", err)\n\t\t}\n\t\tdefer cancel()\n\t\tdefer conn.Close()\n\n\t\tchanges, err := client.BackfillNodeIPs(ctx, &v1.BackfillNodeIPsRequest{Confirmed: true})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"backfilling IPs: %w\", err)\n\t\t}\n\n\t\treturn printOutput(cmd, changes, \"Node IPs backfilled successfully\")\n\t},\n}\n\nfunc nodesToPtables(\n\tcurrentUser string,\n\tnodes []*v1.Node,\n) (pterm.TableData, error) {\n\ttableHeader := []string{\n\t\t\"ID\",\n\t\t\"Hostname\",\n\t\t\"Name\",\n\t\t\"MachineKey\",\n\t\t\"NodeKey\",\n\t\t\"User\",\n\t\t\"Tags\",\n\t\t\"IP addresses\",\n\t\t\"Ephemeral\",\n\t\t\"Last seen\",\n\t\t\"Expiration\",\n\t\t\"Connected\",\n\t\t\"Expired\",\n\t}\n\ttableData := pterm.TableData{tableHeader}\n\n\tfor _, node := range nodes {\n\t\tvar ephemeral bool\n\t\tif node.GetPreAuthKey() != nil && node.GetPreAuthKey().GetEphemeral() {\n\t\t\tephemeral = true\n\t\t}\n\n\t\tvar (\n\t\t\tlastSeen     time.Time\n\t\t\tlastSeenTime string\n\t\t)\n\n\t\tif node.GetLastSeen() != nil {\n\t\t\tlastSeen = node.GetLastSeen().AsTime()\n\t\t\tlastSeenTime = lastSeen.Format(HeadscaleDateTimeFormat)\n\t\t}\n\n\t\tvar (\n\t\t\texpiry     time.Time\n\t\t\texpiryTime string\n\t\t)\n\n\t\tif node.GetExpiry() != nil {\n\t\t\texpiry = node.GetExpiry().AsTime()\n\t\t\texpiryTime = expiry.Format(HeadscaleDateTimeFormat)\n\t\t} else {\n\t\t\texpiryTime = \"N/A\"\n\t\t}\n\n\t\tvar machineKey key.MachinePublic\n\n\t\terr := machineKey.UnmarshalText(\n\t\t\t[]byte(node.GetMachineKey()),\n\t\t)\n\t\tif err != nil {\n\t\t\tmachineKey = key.MachinePublic{}\n\t\t}\n\n\t\tvar nodeKey key.NodePublic\n\n\t\terr = nodeKey.UnmarshalText(\n\t\t\t[]byte(node.GetNodeKey()),\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvar online string\n\t\tif node.GetOnline() {\n\t\t\tonline = pterm.LightGreen(\"online\")\n\t\t} else {\n\t\t\tonline = pterm.LightRed(\"offline\")\n\t\t}\n\n\t\tvar expired string\n\t\tif node.GetExpiry() != nil && node.GetExpiry().AsTime().Before(time.Now()) {\n\t\t\texpired = pterm.LightRed(\"yes\")\n\t\t} else {\n\t\t\texpired = pterm.LightGreen(\"no\")\n\t\t}\n\n\t\tvar tagsBuilder strings.Builder\n\n\t\tfor _, tag := range node.GetTags() {\n\t\t\ttagsBuilder.WriteString(\"\\n\" + tag)\n\t\t}\n\n\t\ttags := strings.TrimLeft(tagsBuilder.String(), \"\\n\")\n\n\t\tvar user string\n\t\tif node.GetUser() != nil {\n\t\t\tuser = node.GetUser().GetName()\n\t\t}\n\n\t\tvar ipBuilder strings.Builder\n\t\tfor _, addr := range node.GetIpAddresses() {\n\t\t\tip, err := netip.ParseAddr(addr)\n\t\t\tif err == nil {\n\t\t\t\tif ipBuilder.Len() > 0 {\n\t\t\t\t\tipBuilder.WriteString(\"\\n\")\n\t\t\t\t}\n\n\t\t\t\tipBuilder.WriteString(ip.String())\n\t\t\t}\n\t\t}\n\n\t\tipAddresses := ipBuilder.String()\n\n\t\tnodeData := []string{\n\t\t\tstrconv.FormatUint(node.GetId(), util.Base10),\n\t\t\tnode.GetName(),\n\t\t\tnode.GetGivenName(),\n\t\t\tmachineKey.ShortString(),\n\t\t\tnodeKey.ShortString(),\n\t\t\tuser,\n\t\t\ttags,\n\t\t\tipAddresses,\n\t\t\tstrconv.FormatBool(ephemeral),\n\t\t\tlastSeenTime,\n\t\t\texpiryTime,\n\t\t\tonline,\n\t\t\texpired,\n\t\t}\n\t\ttableData = append(\n\t\t\ttableData,\n\t\t\tnodeData,\n\t\t)\n\t}\n\n\treturn tableData, nil\n}\n\nfunc nodeRoutesToPtables(\n\tnodes []*v1.Node,\n) pterm.TableData {\n\ttableHeader := []string{\n\t\t\"ID\",\n\t\t\"Hostname\",\n\t\t\"Approved\",\n\t\t\"Available\",\n\t\t\"Serving (Primary)\",\n\t}\n\ttableData := pterm.TableData{tableHeader}\n\n\tfor _, node := range nodes {\n\t\tnodeData := []string{\n\t\t\tstrconv.FormatUint(node.GetId(), util.Base10),\n\t\t\tnode.GetGivenName(),\n\t\t\tstrings.Join(node.GetApprovedRoutes(), \"\\n\"),\n\t\t\tstrings.Join(node.GetAvailableRoutes(), \"\\n\"),\n\t\t\tstrings.Join(node.GetSubnetRoutes(), \"\\n\"),\n\t\t}\n\t\ttableData = append(\n\t\t\ttableData,\n\t\t\tnodeData,\n\t\t)\n\t}\n\n\treturn tableData\n}\n\nvar tagCmd = &cobra.Command{\n\tUse:     \"tag\",\n\tShort:   \"Manage the tags of a node\",\n\tAliases: []string{\"tags\", \"t\"},\n\tRunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {\n\t\tidentifier, _ := cmd.Flags().GetUint64(\"identifier\")\n\t\ttagsToSet, _ := cmd.Flags().GetStringSlice(\"tags\")\n\n\t\t// Sending tags to node\n\t\trequest := &v1.SetTagsRequest{\n\t\t\tNodeId: identifier,\n\t\t\tTags:   tagsToSet,\n\t\t}\n\n\t\tresp, err := client.SetTags(ctx, request)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"setting tags: %w\", err)\n\t\t}\n\n\t\treturn printOutput(cmd, resp.GetNode(), \"Node updated\")\n\t}),\n}\n\nvar approveRoutesCmd = &cobra.Command{\n\tUse:   \"approve-routes\",\n\tShort: \"Manage the approved routes of a node\",\n\tRunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {\n\t\tidentifier, _ := cmd.Flags().GetUint64(\"identifier\")\n\t\troutes, _ := cmd.Flags().GetStringSlice(\"routes\")\n\n\t\t// Sending routes to node\n\t\trequest := &v1.SetApprovedRoutesRequest{\n\t\t\tNodeId: identifier,\n\t\t\tRoutes: routes,\n\t\t}\n\n\t\tresp, err := client.SetApprovedRoutes(ctx, request)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"setting approved routes: %w\", err)\n\t\t}\n\n\t\treturn printOutput(cmd, resp.GetNode(), \"Node updated\")\n\t}),\n}\n"
  },
  {
    "path": "cmd/headscale/cli/policy.go",
    "content": "package cli\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\t\"github.com/juanfont/headscale/hscontrol/db\"\n\t\"github.com/juanfont/headscale/hscontrol/policy\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/spf13/cobra\"\n\t\"tailscale.com/types/views\"\n)\n\nconst (\n\tbypassFlag = \"bypass-grpc-and-access-database-directly\" //nolint:gosec // not a credential\n)\n\nvar errAborted = errors.New(\"command aborted by user\")\n\n// bypassDatabase loads the server config and opens the database directly,\n// bypassing the gRPC server. The caller is responsible for closing the\n// returned database handle.\nfunc bypassDatabase() (*db.HSDatabase, error) {\n\tcfg, err := types.LoadServerConfig()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"loading config: %w\", err)\n\t}\n\n\td, err := db.NewHeadscaleDatabase(cfg, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"opening database: %w\", err)\n\t}\n\n\treturn d, nil\n}\n\nfunc init() {\n\trootCmd.AddCommand(policyCmd)\n\n\tgetPolicy.Flags().BoolP(bypassFlag, \"\", false, \"Uses the headscale config to directly access the database, bypassing gRPC and does not require the server to be running\")\n\tpolicyCmd.AddCommand(getPolicy)\n\n\tsetPolicy.Flags().StringP(\"file\", \"f\", \"\", \"Path to a policy file in HuJSON format\")\n\tsetPolicy.Flags().BoolP(bypassFlag, \"\", false, \"Uses the headscale config to directly access the database, bypassing gRPC and does not require the server to be running\")\n\tmustMarkRequired(setPolicy, \"file\")\n\tpolicyCmd.AddCommand(setPolicy)\n\n\tcheckPolicy.Flags().StringP(\"file\", \"f\", \"\", \"Path to a policy file in HuJSON format\")\n\tmustMarkRequired(checkPolicy, \"file\")\n\tpolicyCmd.AddCommand(checkPolicy)\n}\n\nvar policyCmd = &cobra.Command{\n\tUse:   \"policy\",\n\tShort: \"Manage the Headscale ACL Policy\",\n}\n\nvar getPolicy = &cobra.Command{\n\tUse:     \"get\",\n\tShort:   \"Print the current ACL Policy\",\n\tAliases: []string{\"show\", \"view\", \"fetch\"},\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\tvar policyData string\n\t\tif bypass, _ := cmd.Flags().GetBool(bypassFlag); bypass {\n\t\t\tif !confirmAction(cmd, \"DO NOT run this command if an instance of headscale is running, are you sure headscale is not running?\") {\n\t\t\t\treturn errAborted\n\t\t\t}\n\n\t\t\td, err := bypassDatabase()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer d.Close()\n\n\t\t\tpol, err := d.GetPolicy()\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"loading policy from database: %w\", err)\n\t\t\t}\n\n\t\t\tpolicyData = pol.Data\n\t\t} else {\n\t\t\tctx, client, conn, cancel, err := newHeadscaleCLIWithConfig()\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"connecting to headscale: %w\", err)\n\t\t\t}\n\t\t\tdefer cancel()\n\t\t\tdefer conn.Close()\n\n\t\t\tresponse, err := client.GetPolicy(ctx, &v1.GetPolicyRequest{})\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"loading ACL policy: %w\", err)\n\t\t\t}\n\n\t\t\tpolicyData = response.GetPolicy()\n\t\t}\n\n\t\t// This does not pass output format as we don't support yaml, json or\n\t\t// json-line output for this command. It is HuJSON already.\n\t\tfmt.Println(policyData)\n\n\t\treturn nil\n\t},\n}\n\nvar setPolicy = &cobra.Command{\n\tUse:   \"set\",\n\tShort: \"Updates the ACL Policy\",\n\tLong: `\n\tUpdates the existing ACL Policy with the provided policy. The policy must be a valid HuJSON object.\n\tThis command only works when the acl.policy_mode is set to \"db\", and the policy will be stored in the database.`,\n\tAliases: []string{\"put\", \"update\"},\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\tpolicyPath, _ := cmd.Flags().GetString(\"file\")\n\n\t\tpolicyBytes, err := os.ReadFile(policyPath)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"reading policy file: %w\", err)\n\t\t}\n\n\t\tif bypass, _ := cmd.Flags().GetBool(bypassFlag); bypass {\n\t\t\tif !confirmAction(cmd, \"DO NOT run this command if an instance of headscale is running, are you sure headscale is not running?\") {\n\t\t\t\treturn errAborted\n\t\t\t}\n\n\t\t\td, err := bypassDatabase()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer d.Close()\n\n\t\t\tusers, err := d.ListUsers()\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"loading users for policy validation: %w\", err)\n\t\t\t}\n\n\t\t\t_, err = policy.NewPolicyManager(policyBytes, users, views.Slice[types.NodeView]{})\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"parsing policy file: %w\", err)\n\t\t\t}\n\n\t\t\t_, err = d.SetPolicy(string(policyBytes))\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"setting ACL policy: %w\", err)\n\t\t\t}\n\t\t} else {\n\t\t\trequest := &v1.SetPolicyRequest{Policy: string(policyBytes)}\n\n\t\t\tctx, client, conn, cancel, err := newHeadscaleCLIWithConfig()\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"connecting to headscale: %w\", err)\n\t\t\t}\n\t\t\tdefer cancel()\n\t\t\tdefer conn.Close()\n\n\t\t\t_, err = client.SetPolicy(ctx, request)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"setting ACL policy: %w\", err)\n\t\t\t}\n\t\t}\n\n\t\tfmt.Println(\"Policy updated.\")\n\n\t\treturn nil\n\t},\n}\n\nvar checkPolicy = &cobra.Command{\n\tUse:   \"check\",\n\tShort: \"Check the Policy file for errors\",\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\tpolicyPath, _ := cmd.Flags().GetString(\"file\")\n\n\t\tpolicyBytes, err := os.ReadFile(policyPath)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"reading policy file: %w\", err)\n\t\t}\n\n\t\t_, err = policy.NewPolicyManager(policyBytes, nil, views.Slice[types.NodeView]{})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"parsing policy file: %w\", err)\n\t\t}\n\n\t\tfmt.Println(\"Policy is valid\")\n\n\t\treturn nil\n\t},\n}\n"
  },
  {
    "path": "cmd/headscale/cli/preauthkeys.go",
    "content": "package cli\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/pterm/pterm\"\n\t\"github.com/spf13/cobra\"\n)\n\nconst (\n\tDefaultPreAuthKeyExpiry = \"1h\"\n)\n\nfunc init() {\n\trootCmd.AddCommand(preauthkeysCmd)\n\tpreauthkeysCmd.AddCommand(listPreAuthKeys)\n\tpreauthkeysCmd.AddCommand(createPreAuthKeyCmd)\n\tpreauthkeysCmd.AddCommand(expirePreAuthKeyCmd)\n\tpreauthkeysCmd.AddCommand(deletePreAuthKeyCmd)\n\tcreatePreAuthKeyCmd.PersistentFlags().\n\t\tBool(\"reusable\", false, \"Make the preauthkey reusable\")\n\tcreatePreAuthKeyCmd.PersistentFlags().\n\t\tBool(\"ephemeral\", false, \"Preauthkey for ephemeral nodes\")\n\tcreatePreAuthKeyCmd.Flags().\n\t\tStringP(\"expiration\", \"e\", DefaultPreAuthKeyExpiry, \"Human-readable expiration of the key (e.g. 30m, 24h)\")\n\tcreatePreAuthKeyCmd.Flags().\n\t\tStringSlice(\"tags\", []string{}, \"Tags to automatically assign to node\")\n\tcreatePreAuthKeyCmd.PersistentFlags().Uint64P(\"user\", \"u\", 0, \"User identifier (ID)\")\n\texpirePreAuthKeyCmd.PersistentFlags().Uint64P(\"id\", \"i\", 0, \"Authkey ID\")\n\tdeletePreAuthKeyCmd.PersistentFlags().Uint64P(\"id\", \"i\", 0, \"Authkey ID\")\n}\n\nvar preauthkeysCmd = &cobra.Command{\n\tUse:     \"preauthkeys\",\n\tShort:   \"Handle the preauthkeys in Headscale\",\n\tAliases: []string{\"preauthkey\", \"authkey\", \"pre\"},\n}\n\nvar listPreAuthKeys = &cobra.Command{\n\tUse:     \"list\",\n\tShort:   \"List all preauthkeys\",\n\tAliases: []string{\"ls\", \"show\"},\n\tRunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {\n\t\tresponse, err := client.ListPreAuthKeys(ctx, &v1.ListPreAuthKeysRequest{})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"listing preauthkeys: %w\", err)\n\t\t}\n\n\t\treturn printListOutput(cmd, response.GetPreAuthKeys(), func() error {\n\t\t\ttableData := pterm.TableData{\n\t\t\t\t{\n\t\t\t\t\t\"ID\",\n\t\t\t\t\t\"Key/Prefix\",\n\t\t\t\t\t\"Reusable\",\n\t\t\t\t\t\"Ephemeral\",\n\t\t\t\t\t\"Used\",\n\t\t\t\t\t\"Expiration\",\n\t\t\t\t\t\"Created\",\n\t\t\t\t\t\"Owner\",\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tfor _, key := range response.GetPreAuthKeys() {\n\t\t\t\texpiration := \"-\"\n\t\t\t\tif key.GetExpiration() != nil {\n\t\t\t\t\texpiration = ColourTime(key.GetExpiration().AsTime())\n\t\t\t\t}\n\n\t\t\t\tvar owner string\n\t\t\t\tif len(key.GetAclTags()) > 0 {\n\t\t\t\t\towner = strings.Join(key.GetAclTags(), \"\\n\")\n\t\t\t\t} else if key.GetUser() != nil {\n\t\t\t\t\towner = key.GetUser().GetName()\n\t\t\t\t} else {\n\t\t\t\t\towner = \"-\"\n\t\t\t\t}\n\n\t\t\t\ttableData = append(tableData, []string{\n\t\t\t\t\tstrconv.FormatUint(key.GetId(), util.Base10),\n\t\t\t\t\tkey.GetKey(),\n\t\t\t\t\tstrconv.FormatBool(key.GetReusable()),\n\t\t\t\t\tstrconv.FormatBool(key.GetEphemeral()),\n\t\t\t\t\tstrconv.FormatBool(key.GetUsed()),\n\t\t\t\t\texpiration,\n\t\t\t\t\tkey.GetCreatedAt().AsTime().Format(HeadscaleDateTimeFormat),\n\t\t\t\t\towner,\n\t\t\t\t})\n\t\t\t}\n\n\t\t\treturn pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()\n\t\t})\n\t}),\n}\n\nvar createPreAuthKeyCmd = &cobra.Command{\n\tUse:     \"create\",\n\tShort:   \"Creates a new preauthkey\",\n\tAliases: []string{\"c\", \"new\"},\n\tRunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {\n\t\tuser, _ := cmd.Flags().GetUint64(\"user\")\n\t\treusable, _ := cmd.Flags().GetBool(\"reusable\")\n\t\tephemeral, _ := cmd.Flags().GetBool(\"ephemeral\")\n\t\ttags, _ := cmd.Flags().GetStringSlice(\"tags\")\n\n\t\texpiration, err := expirationFromFlag(cmd)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trequest := &v1.CreatePreAuthKeyRequest{\n\t\t\tUser:       user,\n\t\t\tReusable:   reusable,\n\t\t\tEphemeral:  ephemeral,\n\t\t\tAclTags:    tags,\n\t\t\tExpiration: expiration,\n\t\t}\n\n\t\tresponse, err := client.CreatePreAuthKey(ctx, request)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"creating preauthkey: %w\", err)\n\t\t}\n\n\t\treturn printOutput(cmd, response.GetPreAuthKey(), response.GetPreAuthKey().GetKey())\n\t}),\n}\n\nvar expirePreAuthKeyCmd = &cobra.Command{\n\tUse:     \"expire\",\n\tShort:   \"Expire a preauthkey\",\n\tAliases: []string{\"revoke\", \"exp\", \"e\"},\n\tRunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {\n\t\tid, _ := cmd.Flags().GetUint64(\"id\")\n\n\t\tif id == 0 {\n\t\t\treturn fmt.Errorf(\"missing --id parameter: %w\", errMissingParameter)\n\t\t}\n\n\t\trequest := &v1.ExpirePreAuthKeyRequest{\n\t\t\tId: id,\n\t\t}\n\n\t\tresponse, err := client.ExpirePreAuthKey(ctx, request)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"expiring preauthkey: %w\", err)\n\t\t}\n\n\t\treturn printOutput(cmd, response, \"Key expired\")\n\t}),\n}\n\nvar deletePreAuthKeyCmd = &cobra.Command{\n\tUse:     \"delete\",\n\tShort:   \"Delete a preauthkey\",\n\tAliases: []string{\"del\", \"rm\", \"d\"},\n\tRunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {\n\t\tid, _ := cmd.Flags().GetUint64(\"id\")\n\n\t\tif id == 0 {\n\t\t\treturn fmt.Errorf(\"missing --id parameter: %w\", errMissingParameter)\n\t\t}\n\n\t\trequest := &v1.DeletePreAuthKeyRequest{\n\t\t\tId: id,\n\t\t}\n\n\t\tresponse, err := client.DeletePreAuthKey(ctx, request)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"deleting preauthkey: %w\", err)\n\t\t}\n\n\t\treturn printOutput(cmd, response, \"Key deleted\")\n\t}),\n}\n"
  },
  {
    "path": "cmd/headscale/cli/pterm_style.go",
    "content": "package cli\n\nimport (\n\t\"time\"\n\n\t\"github.com/pterm/pterm\"\n)\n\nfunc ColourTime(date time.Time) string {\n\tdateStr := date.Format(HeadscaleDateTimeFormat)\n\n\tif date.After(time.Now()) {\n\t\tdateStr = pterm.LightGreen(dateStr)\n\t} else {\n\t\tdateStr = pterm.LightRed(dateStr)\n\t}\n\n\treturn dateStr\n}\n"
  },
  {
    "path": "cmd/headscale/cli/root.go",
    "content": "package cli\n\nimport (\n\t\"os\"\n\t\"runtime\"\n\t\"slices\"\n\t\"strings\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/rs/zerolog\"\n\t\"github.com/rs/zerolog/log\"\n\t\"github.com/spf13/cobra\"\n\t\"github.com/spf13/viper\"\n\t\"github.com/tcnksm/go-latest\"\n)\n\nvar cfgFile string = \"\"\n\nfunc init() {\n\tif len(os.Args) > 1 &&\n\t\t(os.Args[1] == \"version\" || os.Args[1] == \"mockoidc\" || os.Args[1] == \"completion\") {\n\t\treturn\n\t}\n\n\tif slices.Contains(os.Args, \"policy\") && slices.Contains(os.Args, \"check\") {\n\t\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\t\treturn\n\t}\n\n\tcobra.OnInitialize(initConfig)\n\trootCmd.PersistentFlags().\n\t\tStringVarP(&cfgFile, \"config\", \"c\", \"\", \"config file (default is /etc/headscale/config.yaml)\")\n\trootCmd.PersistentFlags().\n\t\tStringP(\"output\", \"o\", \"\", \"Output format. Empty for human-readable, 'json', 'json-line' or 'yaml'\")\n\trootCmd.PersistentFlags().\n\t\tBool(\"force\", false, \"Disable prompts and forces the execution\")\n\n\t// Re-enable usage output only for flag-parsing errors; runtime errors\n\t// from RunE should never dump usage text.\n\trootCmd.SetFlagErrorFunc(func(cmd *cobra.Command, err error) error {\n\t\tcmd.SilenceUsage = false\n\n\t\treturn err\n\t})\n}\n\nfunc initConfig() {\n\tif cfgFile == \"\" {\n\t\tcfgFile = os.Getenv(\"HEADSCALE_CONFIG\")\n\t}\n\n\tif cfgFile != \"\" {\n\t\terr := types.LoadConfig(cfgFile, true)\n\t\tif err != nil {\n\t\t\tlog.Fatal().Caller().Err(err).Msgf(\"error loading config file %s\", cfgFile)\n\t\t}\n\t} else {\n\t\terr := types.LoadConfig(\"\", false)\n\t\tif err != nil {\n\t\t\tlog.Fatal().Caller().Err(err).Msgf(\"error loading config\")\n\t\t}\n\t}\n\n\tmachineOutput := hasMachineOutputFlag()\n\n\t// If the user has requested a \"node\" readable format,\n\t// then disable login so the output remains valid.\n\tif machineOutput {\n\t\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\t}\n\n\tlogFormat := viper.GetString(\"log.format\")\n\tif logFormat == types.JSONLogFormat {\n\t\tlog.Logger = log.Output(os.Stdout)\n\t}\n\n\tdisableUpdateCheck := viper.GetBool(\"disable_check_updates\")\n\tif !disableUpdateCheck && !machineOutput {\n\t\tversionInfo := types.GetVersionInfo()\n\t\tif (runtime.GOOS == \"linux\" || runtime.GOOS == \"darwin\") &&\n\t\t\t!versionInfo.Dirty {\n\t\t\tgithubTag := &latest.GithubTag{\n\t\t\t\tOwner:         \"juanfont\",\n\t\t\t\tRepository:    \"headscale\",\n\t\t\t\tTagFilterFunc: filterPreReleasesIfStable(func() string { return versionInfo.Version }),\n\t\t\t}\n\n\t\t\tres, err := latest.Check(githubTag, versionInfo.Version)\n\t\t\tif err == nil && res.Outdated {\n\t\t\t\t//nolint\n\t\t\t\tlog.Warn().Msgf(\n\t\t\t\t\t\"An updated version of Headscale has been found (%s vs. your current %s). Check it out https://github.com/juanfont/headscale/releases\\n\",\n\t\t\t\t\tres.Current,\n\t\t\t\t\tversionInfo.Version,\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\t}\n}\n\nvar prereleases = []string{\"alpha\", \"beta\", \"rc\", \"dev\"}\n\nfunc isPreReleaseVersion(version string) bool {\n\tfor _, unstable := range prereleases {\n\t\tif strings.Contains(version, unstable) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n// filterPreReleasesIfStable returns a function that filters out\n// pre-release tags if the current version is stable.\n// If the current version is a pre-release, it does not filter anything.\n// versionFunc is a function that returns the current version string, it is\n// a func for testability.\nfunc filterPreReleasesIfStable(versionFunc func() string) func(string) bool {\n\treturn func(tag string) bool {\n\t\tversion := versionFunc()\n\n\t\t// If we are on a pre-release version, then we do not filter anything\n\t\t// as we want to recommend the user the latest pre-release.\n\t\tif isPreReleaseVersion(version) {\n\t\t\treturn false\n\t\t}\n\n\t\t// If we are on a stable release, filter out pre-releases.\n\t\tfor _, ignore := range prereleases {\n\t\t\tif strings.Contains(tag, ignore) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\treturn false\n\t}\n}\n\nvar rootCmd = &cobra.Command{\n\tUse:   \"headscale\",\n\tShort: \"headscale - a Tailscale control server\",\n\tLong: `\nheadscale is an open source implementation of the Tailscale control server\n\nhttps://github.com/juanfont/headscale`,\n\tSilenceErrors: true,\n\tSilenceUsage:  true,\n}\n\nfunc Execute() {\n\tcmd, err := rootCmd.ExecuteC()\n\tif err != nil {\n\t\toutputFormat, _ := cmd.Flags().GetString(\"output\")\n\t\tprintError(err, outputFormat)\n\t\tos.Exit(1)\n\t}\n}\n"
  },
  {
    "path": "cmd/headscale/cli/root_test.go",
    "content": "package cli\n\nimport (\n\t\"testing\"\n)\n\nfunc TestFilterPreReleasesIfStable(t *testing.T) {\n\ttests := []struct {\n\t\tname           string\n\t\tcurrentVersion string\n\t\ttag            string\n\t\texpectedFilter bool\n\t\tdescription    string\n\t}{\n\t\t{\n\t\t\tname:           \"stable version filters alpha tag\",\n\t\t\tcurrentVersion: \"0.23.0\",\n\t\t\ttag:            \"v0.24.0-alpha.1\",\n\t\t\texpectedFilter: true,\n\t\t\tdescription:    \"When on stable release, alpha tags should be filtered\",\n\t\t},\n\t\t{\n\t\t\tname:           \"stable version filters beta tag\",\n\t\t\tcurrentVersion: \"0.23.0\",\n\t\t\ttag:            \"v0.24.0-beta.2\",\n\t\t\texpectedFilter: true,\n\t\t\tdescription:    \"When on stable release, beta tags should be filtered\",\n\t\t},\n\t\t{\n\t\t\tname:           \"stable version filters rc tag\",\n\t\t\tcurrentVersion: \"0.23.0\",\n\t\t\ttag:            \"v0.24.0-rc.1\",\n\t\t\texpectedFilter: true,\n\t\t\tdescription:    \"When on stable release, rc tags should be filtered\",\n\t\t},\n\t\t{\n\t\t\tname:           \"stable version allows stable tag\",\n\t\t\tcurrentVersion: \"0.23.0\",\n\t\t\ttag:            \"v0.24.0\",\n\t\t\texpectedFilter: false,\n\t\t\tdescription:    \"When on stable release, stable tags should not be filtered\",\n\t\t},\n\t\t{\n\t\t\tname:           \"alpha version allows alpha tag\",\n\t\t\tcurrentVersion: \"0.23.0-alpha.1\",\n\t\t\ttag:            \"v0.24.0-alpha.2\",\n\t\t\texpectedFilter: false,\n\t\t\tdescription:    \"When on alpha release, alpha tags should not be filtered\",\n\t\t},\n\t\t{\n\t\t\tname:           \"alpha version allows beta tag\",\n\t\t\tcurrentVersion: \"0.23.0-alpha.1\",\n\t\t\ttag:            \"v0.24.0-beta.1\",\n\t\t\texpectedFilter: false,\n\t\t\tdescription:    \"When on alpha release, beta tags should not be filtered\",\n\t\t},\n\t\t{\n\t\t\tname:           \"alpha version allows rc tag\",\n\t\t\tcurrentVersion: \"0.23.0-alpha.1\",\n\t\t\ttag:            \"v0.24.0-rc.1\",\n\t\t\texpectedFilter: false,\n\t\t\tdescription:    \"When on alpha release, rc tags should not be filtered\",\n\t\t},\n\t\t{\n\t\t\tname:           \"alpha version allows stable tag\",\n\t\t\tcurrentVersion: \"0.23.0-alpha.1\",\n\t\t\ttag:            \"v0.24.0\",\n\t\t\texpectedFilter: false,\n\t\t\tdescription:    \"When on alpha release, stable tags should not be filtered\",\n\t\t},\n\t\t{\n\t\t\tname:           \"beta version allows alpha tag\",\n\t\t\tcurrentVersion: \"0.23.0-beta.1\",\n\t\t\ttag:            \"v0.24.0-alpha.1\",\n\t\t\texpectedFilter: false,\n\t\t\tdescription:    \"When on beta release, alpha tags should not be filtered\",\n\t\t},\n\t\t{\n\t\t\tname:           \"beta version allows beta tag\",\n\t\t\tcurrentVersion: \"0.23.0-beta.2\",\n\t\t\ttag:            \"v0.24.0-beta.3\",\n\t\t\texpectedFilter: false,\n\t\t\tdescription:    \"When on beta release, beta tags should not be filtered\",\n\t\t},\n\t\t{\n\t\t\tname:           \"beta version allows rc tag\",\n\t\t\tcurrentVersion: \"0.23.0-beta.1\",\n\t\t\ttag:            \"v0.24.0-rc.1\",\n\t\t\texpectedFilter: false,\n\t\t\tdescription:    \"When on beta release, rc tags should not be filtered\",\n\t\t},\n\t\t{\n\t\t\tname:           \"beta version allows stable tag\",\n\t\t\tcurrentVersion: \"0.23.0-beta.1\",\n\t\t\ttag:            \"v0.24.0\",\n\t\t\texpectedFilter: false,\n\t\t\tdescription:    \"When on beta release, stable tags should not be filtered\",\n\t\t},\n\t\t{\n\t\t\tname:           \"rc version allows alpha tag\",\n\t\t\tcurrentVersion: \"0.23.0-rc.1\",\n\t\t\ttag:            \"v0.24.0-alpha.1\",\n\t\t\texpectedFilter: false,\n\t\t\tdescription:    \"When on rc release, alpha tags should not be filtered\",\n\t\t},\n\t\t{\n\t\t\tname:           \"rc version allows beta tag\",\n\t\t\tcurrentVersion: \"0.23.0-rc.1\",\n\t\t\ttag:            \"v0.24.0-beta.1\",\n\t\t\texpectedFilter: false,\n\t\t\tdescription:    \"When on rc release, beta tags should not be filtered\",\n\t\t},\n\t\t{\n\t\t\tname:           \"rc version allows rc tag\",\n\t\t\tcurrentVersion: \"0.23.0-rc.2\",\n\t\t\ttag:            \"v0.24.0-rc.3\",\n\t\t\texpectedFilter: false,\n\t\t\tdescription:    \"When on rc release, rc tags should not be filtered\",\n\t\t},\n\t\t{\n\t\t\tname:           \"rc version allows stable tag\",\n\t\t\tcurrentVersion: \"0.23.0-rc.1\",\n\t\t\ttag:            \"v0.24.0\",\n\t\t\texpectedFilter: false,\n\t\t\tdescription:    \"When on rc release, stable tags should not be filtered\",\n\t\t},\n\t\t{\n\t\t\tname:           \"stable version with patch filters alpha\",\n\t\t\tcurrentVersion: \"0.23.1\",\n\t\t\ttag:            \"v0.24.0-alpha.1\",\n\t\t\texpectedFilter: true,\n\t\t\tdescription:    \"Stable version with patch number should filter alpha tags\",\n\t\t},\n\t\t{\n\t\t\tname:           \"stable version with patch allows stable\",\n\t\t\tcurrentVersion: \"0.23.1\",\n\t\t\ttag:            \"v0.24.0\",\n\t\t\texpectedFilter: false,\n\t\t\tdescription:    \"Stable version with patch number should allow stable tags\",\n\t\t},\n\t\t{\n\t\t\tname:           \"tag with alpha substring in version number\",\n\t\t\tcurrentVersion: \"0.23.0\",\n\t\t\ttag:            \"v1.0.0-alpha.1\",\n\t\t\texpectedFilter: true,\n\t\t\tdescription:    \"Tags with alpha in version string should be filtered on stable\",\n\t\t},\n\t\t{\n\t\t\tname:           \"tag with beta substring in version number\",\n\t\t\tcurrentVersion: \"0.23.0\",\n\t\t\ttag:            \"v1.0.0-beta.1\",\n\t\t\texpectedFilter: true,\n\t\t\tdescription:    \"Tags with beta in version string should be filtered on stable\",\n\t\t},\n\t\t{\n\t\t\tname:           \"tag with rc substring in version number\",\n\t\t\tcurrentVersion: \"0.23.0\",\n\t\t\ttag:            \"v1.0.0-rc.1\",\n\t\t\texpectedFilter: true,\n\t\t\tdescription:    \"Tags with rc in version string should be filtered on stable\",\n\t\t},\n\t\t{\n\t\t\tname:           \"empty tag on stable version\",\n\t\t\tcurrentVersion: \"0.23.0\",\n\t\t\ttag:            \"\",\n\t\t\texpectedFilter: false,\n\t\t\tdescription:    \"Empty tags should not be filtered\",\n\t\t},\n\t\t{\n\t\t\tname:           \"dev version allows all tags\",\n\t\t\tcurrentVersion: \"0.23.0-dev\",\n\t\t\ttag:            \"v0.24.0-alpha.1\",\n\t\t\texpectedFilter: false,\n\t\t\tdescription:    \"Dev versions should not filter any tags (pre-release allows all)\",\n\t\t},\n\t\t{\n\t\t\tname:           \"stable version filters dev tag\",\n\t\t\tcurrentVersion: \"0.23.0\",\n\t\t\ttag:            \"v0.24.0-dev\",\n\t\t\texpectedFilter: true,\n\t\t\tdescription:    \"When on stable release, dev tags should be filtered\",\n\t\t},\n\t\t{\n\t\t\tname:           \"dev version allows dev tag\",\n\t\t\tcurrentVersion: \"0.23.0-dev\",\n\t\t\ttag:            \"v0.24.0-dev.1\",\n\t\t\texpectedFilter: false,\n\t\t\tdescription:    \"When on dev release, dev tags should not be filtered\",\n\t\t},\n\t\t{\n\t\t\tname:           \"dev version allows stable tag\",\n\t\t\tcurrentVersion: \"0.23.0-dev\",\n\t\t\ttag:            \"v0.24.0\",\n\t\t\texpectedFilter: false,\n\t\t\tdescription:    \"When on dev release, stable tags should not be filtered\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tresult := filterPreReleasesIfStable(func() string { return tt.currentVersion })(tt.tag)\n\t\t\tif result != tt.expectedFilter {\n\t\t\t\tt.Errorf(\"%s: got %v, want %v\\nDescription: %s\\nCurrent version: %s, Tag: %s\",\n\t\t\t\t\ttt.name,\n\t\t\t\t\tresult,\n\t\t\t\t\ttt.expectedFilter,\n\t\t\t\t\ttt.description,\n\t\t\t\t\ttt.currentVersion,\n\t\t\t\t\ttt.tag,\n\t\t\t\t)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestIsPreReleaseVersion(t *testing.T) {\n\ttests := []struct {\n\t\tname        string\n\t\tversion     string\n\t\texpected    bool\n\t\tdescription string\n\t}{\n\t\t{\n\t\t\tname:        \"stable version\",\n\t\t\tversion:     \"0.23.0\",\n\t\t\texpected:    false,\n\t\t\tdescription: \"Stable version should not be pre-release\",\n\t\t},\n\t\t{\n\t\t\tname:        \"alpha version\",\n\t\t\tversion:     \"0.23.0-alpha.1\",\n\t\t\texpected:    true,\n\t\t\tdescription: \"Alpha version should be pre-release\",\n\t\t},\n\t\t{\n\t\t\tname:        \"beta version\",\n\t\t\tversion:     \"0.23.0-beta.1\",\n\t\t\texpected:    true,\n\t\t\tdescription: \"Beta version should be pre-release\",\n\t\t},\n\t\t{\n\t\t\tname:        \"rc version\",\n\t\t\tversion:     \"0.23.0-rc.1\",\n\t\t\texpected:    true,\n\t\t\tdescription: \"RC version should be pre-release\",\n\t\t},\n\t\t{\n\t\t\tname:        \"version with alpha substring\",\n\t\t\tversion:     \"0.23.0-alphabetical\",\n\t\t\texpected:    true,\n\t\t\tdescription: \"Version containing 'alpha' should be pre-release\",\n\t\t},\n\t\t{\n\t\t\tname:        \"version with beta substring\",\n\t\t\tversion:     \"0.23.0-betamax\",\n\t\t\texpected:    true,\n\t\t\tdescription: \"Version containing 'beta' should be pre-release\",\n\t\t},\n\t\t{\n\t\t\tname:        \"dev version\",\n\t\t\tversion:     \"0.23.0-dev\",\n\t\t\texpected:    true,\n\t\t\tdescription: \"Dev version should be pre-release\",\n\t\t},\n\t\t{\n\t\t\tname:        \"empty version\",\n\t\t\tversion:     \"\",\n\t\t\texpected:    false,\n\t\t\tdescription: \"Empty version should not be pre-release\",\n\t\t},\n\t\t{\n\t\t\tname:        \"version with patch number\",\n\t\t\tversion:     \"0.23.1\",\n\t\t\texpected:    false,\n\t\t\tdescription: \"Stable version with patch should not be pre-release\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tresult := isPreReleaseVersion(tt.version)\n\t\t\tif result != tt.expected {\n\t\t\t\tt.Errorf(\"%s: got %v, want %v\\nDescription: %s\\nVersion: %s\",\n\t\t\t\t\ttt.name,\n\t\t\t\t\tresult,\n\t\t\t\t\ttt.expected,\n\t\t\t\t\ttt.description,\n\t\t\t\t\ttt.version,\n\t\t\t\t)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "cmd/headscale/cli/serve.go",
    "content": "package cli\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\n\t\"github.com/spf13/cobra\"\n\t\"github.com/tailscale/squibble\"\n)\n\nfunc init() {\n\trootCmd.AddCommand(serveCmd)\n}\n\nvar serveCmd = &cobra.Command{\n\tUse:   \"serve\",\n\tShort: \"Launches the headscale server\",\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\tapp, err := newHeadscaleServerWithConfig()\n\t\tif err != nil {\n\t\t\tif squibbleErr, ok := errors.AsType[squibble.ValidationError](err); ok {\n\t\t\t\tfmt.Printf(\"SQLite schema failed to validate:\\n\")\n\t\t\t\tfmt.Println(squibbleErr.Diff)\n\t\t\t}\n\n\t\t\treturn fmt.Errorf(\"initializing: %w\", err)\n\t\t}\n\n\t\terr = app.Serve()\n\t\tif err != nil && !errors.Is(err, http.ErrServerClosed) {\n\t\t\treturn fmt.Errorf(\"headscale ran into an error and had to shut down: %w\", err)\n\t\t}\n\n\t\treturn nil\n\t},\n}\n"
  },
  {
    "path": "cmd/headscale/cli/users.go",
    "content": "package cli\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/url\"\n\t\"strconv\"\n\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/juanfont/headscale/hscontrol/util/zlog/zf\"\n\t\"github.com/pterm/pterm\"\n\t\"github.com/rs/zerolog/log\"\n\t\"github.com/spf13/cobra\"\n)\n\n// CLI user errors.\nvar (\n\terrFlagRequired       = errors.New(\"--name or --identifier flag is required\")\n\terrMultipleUsersMatch = errors.New(\"multiple users match query, specify an ID\")\n)\n\nfunc usernameAndIDFlag(cmd *cobra.Command) {\n\tcmd.Flags().Int64P(\"identifier\", \"i\", -1, \"User identifier (ID)\")\n\tcmd.Flags().StringP(\"name\", \"n\", \"\", \"Username\")\n}\n\n// usernameAndIDFromFlag returns the username and ID from the flags of the command.\nfunc usernameAndIDFromFlag(cmd *cobra.Command) (uint64, string, error) {\n\tusername, _ := cmd.Flags().GetString(\"name\")\n\n\tidentifier, _ := cmd.Flags().GetInt64(\"identifier\")\n\tif username == \"\" && identifier < 0 {\n\t\treturn 0, \"\", errFlagRequired\n\t}\n\n\t// Normalise unset/negative identifiers to 0 so the uint64\n\t// conversion does not produce a bogus large value.\n\tif identifier < 0 {\n\t\tidentifier = 0\n\t}\n\n\treturn uint64(identifier), username, nil //nolint:gosec // identifier is clamped to >= 0 above\n}\n\nfunc init() {\n\trootCmd.AddCommand(userCmd)\n\tuserCmd.AddCommand(createUserCmd)\n\tcreateUserCmd.Flags().StringP(\"display-name\", \"d\", \"\", \"Display name\")\n\tcreateUserCmd.Flags().StringP(\"email\", \"e\", \"\", \"Email\")\n\tcreateUserCmd.Flags().StringP(\"picture-url\", \"p\", \"\", \"Profile picture URL\")\n\tuserCmd.AddCommand(listUsersCmd)\n\tusernameAndIDFlag(listUsersCmd)\n\tlistUsersCmd.Flags().StringP(\"email\", \"e\", \"\", \"Email\")\n\tuserCmd.AddCommand(destroyUserCmd)\n\tusernameAndIDFlag(destroyUserCmd)\n\tuserCmd.AddCommand(renameUserCmd)\n\tusernameAndIDFlag(renameUserCmd)\n\trenameUserCmd.Flags().StringP(\"new-name\", \"r\", \"\", \"New username\")\n\tmustMarkRequired(renameUserCmd, \"new-name\")\n}\n\nvar userCmd = &cobra.Command{\n\tUse:     \"users\",\n\tShort:   \"Manage the users of Headscale\",\n\tAliases: []string{\"user\"},\n}\n\nvar createUserCmd = &cobra.Command{\n\tUse:     \"create NAME\",\n\tShort:   \"Creates a new user\",\n\tAliases: []string{\"c\", \"new\"},\n\tArgs: func(cmd *cobra.Command, args []string) error {\n\t\tif len(args) < 1 {\n\t\t\treturn errMissingParameter\n\t\t}\n\n\t\treturn nil\n\t},\n\tRunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {\n\t\tuserName := args[0]\n\n\t\tlog.Trace().Interface(zf.Client, client).Msg(\"obtained gRPC client\")\n\n\t\trequest := &v1.CreateUserRequest{Name: userName}\n\n\t\tif displayName, _ := cmd.Flags().GetString(\"display-name\"); displayName != \"\" {\n\t\t\trequest.DisplayName = displayName\n\t\t}\n\n\t\tif email, _ := cmd.Flags().GetString(\"email\"); email != \"\" {\n\t\t\trequest.Email = email\n\t\t}\n\n\t\tif pictureURL, _ := cmd.Flags().GetString(\"picture-url\"); pictureURL != \"\" {\n\t\t\tif _, err := url.Parse(pictureURL); err != nil { //nolint:noinlineerr\n\t\t\t\treturn fmt.Errorf(\"invalid picture URL: %w\", err)\n\t\t\t}\n\n\t\t\trequest.PictureUrl = pictureURL\n\t\t}\n\n\t\tlog.Trace().Interface(zf.Request, request).Msg(\"sending CreateUser request\")\n\n\t\tresponse, err := client.CreateUser(ctx, request)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"creating user: %w\", err)\n\t\t}\n\n\t\treturn printOutput(cmd, response.GetUser(), \"User created\")\n\t}),\n}\n\nvar destroyUserCmd = &cobra.Command{\n\tUse:     \"destroy --identifier ID or --name NAME\",\n\tShort:   \"Destroys a user\",\n\tAliases: []string{\"delete\"},\n\tRunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {\n\t\tid, username, err := usernameAndIDFromFlag(cmd)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trequest := &v1.ListUsersRequest{\n\t\t\tName: username,\n\t\t\tId:   id,\n\t\t}\n\n\t\tusers, err := client.ListUsers(ctx, request)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"listing users: %w\", err)\n\t\t}\n\n\t\tif len(users.GetUsers()) != 1 {\n\t\t\treturn errMultipleUsersMatch\n\t\t}\n\n\t\tuser := users.GetUsers()[0]\n\n\t\tif !confirmAction(cmd, fmt.Sprintf(\n\t\t\t\"Do you want to remove the user %q (%d) and any associated preauthkeys?\",\n\t\t\tuser.GetName(), user.GetId(),\n\t\t)) {\n\t\t\treturn printOutput(cmd, map[string]string{\"Result\": \"User not destroyed\"}, \"User not destroyed\")\n\t\t}\n\n\t\tdeleteRequest := &v1.DeleteUserRequest{Id: user.GetId()}\n\n\t\tresponse, err := client.DeleteUser(ctx, deleteRequest)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"destroying user: %w\", err)\n\t\t}\n\n\t\treturn printOutput(cmd, response, \"User destroyed\")\n\t}),\n}\n\nvar listUsersCmd = &cobra.Command{\n\tUse:     \"list\",\n\tShort:   \"List all the users\",\n\tAliases: []string{\"ls\", \"show\"},\n\tRunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {\n\t\trequest := &v1.ListUsersRequest{}\n\n\t\tid, _ := cmd.Flags().GetInt64(\"identifier\")\n\t\tusername, _ := cmd.Flags().GetString(\"name\")\n\t\temail, _ := cmd.Flags().GetString(\"email\")\n\n\t\t// filter by one param at most\n\t\tswitch {\n\t\tcase id > 0:\n\t\t\trequest.Id = uint64(id)\n\t\tcase username != \"\":\n\t\t\trequest.Name = username\n\t\tcase email != \"\":\n\t\t\trequest.Email = email\n\t\t}\n\n\t\tresponse, err := client.ListUsers(ctx, request)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"listing users: %w\", err)\n\t\t}\n\n\t\treturn printListOutput(cmd, response.GetUsers(), func() error {\n\t\t\ttableData := pterm.TableData{{\"ID\", \"Name\", \"Username\", \"Email\", \"Created\"}}\n\t\t\tfor _, user := range response.GetUsers() {\n\t\t\t\ttableData = append(\n\t\t\t\t\ttableData,\n\t\t\t\t\t[]string{\n\t\t\t\t\t\tstrconv.FormatUint(user.GetId(), util.Base10),\n\t\t\t\t\t\tuser.GetDisplayName(),\n\t\t\t\t\t\tuser.GetName(),\n\t\t\t\t\t\tuser.GetEmail(),\n\t\t\t\t\t\tuser.GetCreatedAt().AsTime().Format(HeadscaleDateTimeFormat),\n\t\t\t\t\t},\n\t\t\t\t)\n\t\t\t}\n\n\t\t\treturn pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()\n\t\t})\n\t}),\n}\n\nvar renameUserCmd = &cobra.Command{\n\tUse:     \"rename\",\n\tShort:   \"Renames a user\",\n\tAliases: []string{\"mv\"},\n\tRunE: grpcRunE(func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error {\n\t\tid, username, err := usernameAndIDFromFlag(cmd)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tlistReq := &v1.ListUsersRequest{\n\t\t\tName: username,\n\t\t\tId:   id,\n\t\t}\n\n\t\tusers, err := client.ListUsers(ctx, listReq)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"listing users: %w\", err)\n\t\t}\n\n\t\tif len(users.GetUsers()) != 1 {\n\t\t\treturn errMultipleUsersMatch\n\t\t}\n\n\t\tnewName, _ := cmd.Flags().GetString(\"new-name\")\n\n\t\trenameReq := &v1.RenameUserRequest{\n\t\t\tOldId:   id,\n\t\t\tNewName: newName,\n\t\t}\n\n\t\tresponse, err := client.RenameUser(ctx, renameReq)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"renaming user: %w\", err)\n\t\t}\n\n\t\treturn printOutput(cmd, response.GetUser(), \"User renamed\")\n\t}),\n}\n"
  },
  {
    "path": "cmd/headscale/cli/utils.go",
    "content": "package cli\n\nimport (\n\t\"context\"\n\t\"crypto/tls\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\t\"github.com/juanfont/headscale/hscontrol\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/juanfont/headscale/hscontrol/util/zlog/zf\"\n\t\"github.com/prometheus/common/model\"\n\t\"github.com/rs/zerolog/log\"\n\t\"github.com/spf13/cobra\"\n\t\"google.golang.org/grpc\"\n\t\"google.golang.org/grpc/credentials\"\n\t\"google.golang.org/grpc/credentials/insecure\"\n\t\"google.golang.org/protobuf/types/known/timestamppb\"\n\t\"gopkg.in/yaml.v3\"\n)\n\nconst (\n\tHeadscaleDateTimeFormat = \"2006-01-02 15:04:05\"\n\tSocketWritePermissions  = 0o666\n\n\toutputFormatJSON     = \"json\"\n\toutputFormatJSONLine = \"json-line\"\n\toutputFormatYAML     = \"yaml\"\n)\n\nvar (\n\terrAPIKeyNotSet     = errors.New(\"HEADSCALE_CLI_API_KEY environment variable needs to be set\")\n\terrMissingParameter = errors.New(\"missing parameters\")\n)\n\n// mustMarkRequired marks the named flags as required on cmd, panicking\n// if any name does not match a registered flag.  This is only called\n// from init() where a failure indicates a programming error.\nfunc mustMarkRequired(cmd *cobra.Command, names ...string) {\n\tfor _, n := range names {\n\t\terr := cmd.MarkFlagRequired(n)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"marking flag %q required on %q: %v\", n, cmd.Name(), err))\n\t\t}\n\t}\n}\n\nfunc newHeadscaleServerWithConfig() (*hscontrol.Headscale, error) {\n\tcfg, err := types.LoadServerConfig()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"loading configuration: %w\",\n\t\t\terr,\n\t\t)\n\t}\n\n\tapp, err := hscontrol.NewHeadscale(cfg)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating new headscale: %w\", err)\n\t}\n\n\treturn app, nil\n}\n\n// grpcRunE wraps a cobra RunE func, injecting a ready gRPC client and\n// context. Connection lifecycle is managed by the wrapper — callers\n// never see the underlying conn or cancel func.\nfunc grpcRunE(\n\tfn func(ctx context.Context, client v1.HeadscaleServiceClient, cmd *cobra.Command, args []string) error,\n) func(*cobra.Command, []string) error {\n\treturn func(cmd *cobra.Command, args []string) error {\n\t\tctx, client, conn, cancel, err := newHeadscaleCLIWithConfig()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"connecting to headscale: %w\", err)\n\t\t}\n\t\tdefer cancel()\n\t\tdefer conn.Close()\n\n\t\treturn fn(ctx, client, cmd, args)\n\t}\n}\n\nfunc newHeadscaleCLIWithConfig() (context.Context, v1.HeadscaleServiceClient, *grpc.ClientConn, context.CancelFunc, error) {\n\tcfg, err := types.LoadCLIConfig()\n\tif err != nil {\n\t\treturn nil, nil, nil, nil, fmt.Errorf(\"loading configuration: %w\", err)\n\t}\n\n\tlog.Debug().\n\t\tDur(\"timeout\", cfg.CLI.Timeout).\n\t\tMsgf(\"Setting timeout\")\n\n\tctx, cancel := context.WithTimeout(context.Background(), cfg.CLI.Timeout)\n\n\tgrpcOptions := []grpc.DialOption{\n\t\tgrpc.WithBlock(), //nolint:staticcheck // SA1019: deprecated but supported in 1.x\n\t}\n\n\taddress := cfg.CLI.Address\n\n\t// If the address is not set, we assume that we are on the server hosting hscontrol.\n\tif address == \"\" {\n\t\tlog.Debug().\n\t\t\tStr(\"socket\", cfg.UnixSocket).\n\t\t\tMsgf(\"HEADSCALE_CLI_ADDRESS environment is not set, connecting to unix socket.\")\n\n\t\taddress = cfg.UnixSocket\n\n\t\t// Try to give the user better feedback if we cannot write to the headscale\n\t\t// socket.  Note: os.OpenFile on a Unix domain socket returns ENXIO on\n\t\t// Linux which is expected — only permission errors are actionable here.\n\t\t// The actual gRPC connection uses net.Dial which handles sockets properly.\n\t\tsocket, err := os.OpenFile(cfg.UnixSocket, os.O_WRONLY, SocketWritePermissions) //nolint\n\t\tif err != nil {\n\t\t\tif os.IsPermission(err) {\n\t\t\t\tcancel()\n\n\t\t\t\treturn nil, nil, nil, nil, fmt.Errorf(\n\t\t\t\t\t\"unable to read/write to headscale socket %q, do you have the correct permissions? %w\",\n\t\t\t\t\tcfg.UnixSocket,\n\t\t\t\t\terr,\n\t\t\t\t)\n\t\t\t}\n\t\t} else {\n\t\t\tsocket.Close()\n\t\t}\n\n\t\tgrpcOptions = append(\n\t\t\tgrpcOptions,\n\t\t\tgrpc.WithTransportCredentials(insecure.NewCredentials()),\n\t\t\tgrpc.WithContextDialer(util.GrpcSocketDialer),\n\t\t)\n\t} else {\n\t\t// If we are not connecting to a local server, require an API key for authentication\n\t\tapiKey := cfg.CLI.APIKey\n\t\tif apiKey == \"\" {\n\t\t\tcancel()\n\n\t\t\treturn nil, nil, nil, nil, errAPIKeyNotSet\n\t\t}\n\n\t\tgrpcOptions = append(grpcOptions,\n\t\t\tgrpc.WithPerRPCCredentials(tokenAuth{\n\t\t\t\ttoken: apiKey,\n\t\t\t}),\n\t\t)\n\n\t\tif cfg.CLI.Insecure {\n\t\t\ttlsConfig := &tls.Config{\n\t\t\t\t// turn of gosec as we are intentionally setting\n\t\t\t\t// insecure.\n\t\t\t\t//nolint:gosec\n\t\t\t\tInsecureSkipVerify: true,\n\t\t\t}\n\n\t\t\tgrpcOptions = append(grpcOptions,\n\t\t\t\tgrpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)),\n\t\t\t)\n\t\t} else {\n\t\t\tgrpcOptions = append(grpcOptions,\n\t\t\t\tgrpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, \"\")),\n\t\t\t)\n\t\t}\n\t}\n\n\tlog.Trace().Caller().Str(zf.Address, address).Msg(\"connecting via gRPC\")\n\n\tconn, err := grpc.DialContext(ctx, address, grpcOptions...) //nolint:staticcheck // SA1019: deprecated but supported in 1.x\n\tif err != nil {\n\t\tcancel()\n\n\t\treturn nil, nil, nil, nil, fmt.Errorf(\"connecting to %s: %w\", address, err)\n\t}\n\n\tclient := v1.NewHeadscaleServiceClient(conn)\n\n\treturn ctx, client, conn, cancel, nil\n}\n\n// formatOutput serialises result into the requested format. For the\n// default (empty) format the human-readable override string is returned.\nfunc formatOutput(result any, override string, outputFormat string) (string, error) {\n\tswitch outputFormat {\n\tcase outputFormatJSON:\n\t\tb, err := json.MarshalIndent(result, \"\", \"\\t\")\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"marshalling JSON output: %w\", err)\n\t\t}\n\n\t\treturn string(b), nil\n\tcase outputFormatJSONLine:\n\t\tb, err := json.Marshal(result)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"marshalling JSON-line output: %w\", err)\n\t\t}\n\n\t\treturn string(b), nil\n\tcase outputFormatYAML:\n\t\tb, err := yaml.Marshal(result)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"marshalling YAML output: %w\", err)\n\t\t}\n\n\t\treturn string(b), nil\n\tdefault:\n\t\treturn override, nil\n\t}\n}\n\n// printOutput formats result and writes it to stdout. It reads the --output\n// flag from cmd to decide the serialisation format.\nfunc printOutput(cmd *cobra.Command, result any, override string) error {\n\tformat, _ := cmd.Flags().GetString(\"output\")\n\n\tout, err := formatOutput(result, override, format)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Println(out)\n\n\treturn nil\n}\n\n// expirationFromFlag parses the --expiration flag as a Prometheus-style\n// duration (e.g. \"90d\", \"1h\") and returns an absolute timestamp.\nfunc expirationFromFlag(cmd *cobra.Command) (*timestamppb.Timestamp, error) {\n\tdurationStr, _ := cmd.Flags().GetString(\"expiration\")\n\n\tduration, err := model.ParseDuration(durationStr)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"parsing duration: %w\", err)\n\t}\n\n\treturn timestamppb.New(time.Now().UTC().Add(time.Duration(duration))), nil\n}\n\n// confirmAction returns true when the user confirms a prompt, or when\n// --force is set.  Callers decide what to do when it returns false.\nfunc confirmAction(cmd *cobra.Command, prompt string) bool {\n\tforce, _ := cmd.Flags().GetBool(\"force\")\n\tif force {\n\t\treturn true\n\t}\n\n\treturn util.YesNo(prompt)\n}\n\n// printListOutput checks the --output flag: when a machine-readable format is\n// requested it serialises data as JSON/YAML; otherwise it calls renderTable\n// to produce the human-readable pterm table.\nfunc printListOutput(\n\tcmd *cobra.Command,\n\tdata any,\n\trenderTable func() error,\n) error {\n\tformat, _ := cmd.Flags().GetString(\"output\")\n\tif format != \"\" {\n\t\treturn printOutput(cmd, data, \"\")\n\t}\n\n\treturn renderTable()\n}\n\n// printError writes err to stderr, formatting it as JSON/YAML when the\n// --output flag requests machine-readable output.  Used exclusively by\n// Execute() so that every error surfaces in the format the caller asked for.\nfunc printError(err error, outputFormat string) {\n\ttype errOutput struct {\n\t\tError string `json:\"error\"`\n\t}\n\n\te := errOutput{Error: err.Error()}\n\n\tvar formatted []byte\n\n\tswitch outputFormat {\n\tcase outputFormatJSON:\n\t\tformatted, _ = json.MarshalIndent(e, \"\", \"\\t\") //nolint:errchkjson // errOutput contains only a string field\n\tcase outputFormatJSONLine:\n\t\tformatted, _ = json.Marshal(e) //nolint:errchkjson // errOutput contains only a string field\n\tcase outputFormatYAML:\n\t\tformatted, _ = yaml.Marshal(e)\n\tdefault:\n\t\tfmt.Fprintf(os.Stderr, \"Error: %s\\n\", err)\n\n\t\treturn\n\t}\n\n\tfmt.Fprintf(os.Stderr, \"%s\\n\", formatted)\n}\n\nfunc hasMachineOutputFlag() bool {\n\tfor _, arg := range os.Args {\n\t\tif arg == outputFormatJSON || arg == outputFormatJSONLine || arg == outputFormatYAML {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\ntype tokenAuth struct {\n\ttoken string\n}\n\n// Return value is mapped to request headers.\nfunc (t tokenAuth) GetRequestMetadata(\n\tctx context.Context,\n\tin ...string,\n) (map[string]string, error) {\n\treturn map[string]string{\n\t\t\"authorization\": \"Bearer \" + t.token,\n\t}, nil\n}\n\nfunc (tokenAuth) RequireTransportSecurity() bool {\n\treturn true\n}\n"
  },
  {
    "path": "cmd/headscale/cli/version.go",
    "content": "package cli\n\nimport (\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/spf13/cobra\"\n)\n\nfunc init() {\n\trootCmd.AddCommand(versionCmd)\n\tversionCmd.Flags().StringP(\"output\", \"o\", \"\", \"Output format. Empty for human-readable, 'json', 'json-line' or 'yaml'\")\n}\n\nvar versionCmd = &cobra.Command{\n\tUse:   \"version\",\n\tShort: \"Print the version.\",\n\tLong:  \"The version of headscale.\",\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\tinfo := types.GetVersionInfo()\n\n\t\treturn printOutput(cmd, info, info.String())\n\t},\n}\n"
  },
  {
    "path": "cmd/headscale/headscale.go",
    "content": "package main\n\nimport (\n\t\"os\"\n\t\"time\"\n\n\t\"github.com/jagottsicher/termcolor\"\n\t\"github.com/juanfont/headscale/cmd/headscale/cli\"\n\t\"github.com/rs/zerolog\"\n\t\"github.com/rs/zerolog/log\"\n)\n\nfunc main() {\n\tvar colors bool\n\n\tswitch l := termcolor.SupportLevel(os.Stderr); l {\n\tcase termcolor.Level16M:\n\t\tcolors = true\n\tcase termcolor.Level256:\n\t\tcolors = true\n\tcase termcolor.LevelBasic:\n\t\tcolors = true\n\tcase termcolor.LevelNone:\n\t\tcolors = false\n\tdefault:\n\t\t// no color, return text as is.\n\t\tcolors = false\n\t}\n\n\t// Adhere to no-color.org manifesto of allowing users to\n\t// turn off color in cli/services\n\tif _, noColorIsSet := os.LookupEnv(\"NO_COLOR\"); noColorIsSet {\n\t\tcolors = false\n\t}\n\n\tzerolog.TimeFieldFormat = zerolog.TimeFormatUnix\n\tlog.Logger = log.Output(zerolog.ConsoleWriter{\n\t\tOut:        os.Stderr,\n\t\tTimeFormat: time.RFC3339,\n\t\tNoColor:    !colors,\n\t})\n\n\tcli.Execute()\n}\n"
  },
  {
    "path": "cmd/headscale/headscale_test.go",
    "content": "package main\n\nimport (\n\t\"io/fs\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/spf13/viper\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestConfigFileLoading(t *testing.T) {\n\ttmpDir := t.TempDir()\n\n\tpath, err := os.Getwd()\n\trequire.NoError(t, err)\n\n\tcfgFile := filepath.Join(tmpDir, \"config.yaml\")\n\n\t// Symlink the example config file\n\terr = os.Symlink(\n\t\tfilepath.Clean(path+\"/../../config-example.yaml\"),\n\t\tcfgFile,\n\t)\n\trequire.NoError(t, err)\n\n\t// Load example config, it should load without validation errors\n\terr = types.LoadConfig(cfgFile, true)\n\trequire.NoError(t, err)\n\n\t// Test that config file was interpreted correctly\n\tassert.Equal(t, \"http://127.0.0.1:8080\", viper.GetString(\"server_url\"))\n\tassert.Equal(t, \"127.0.0.1:8080\", viper.GetString(\"listen_addr\"))\n\tassert.Equal(t, \"127.0.0.1:9090\", viper.GetString(\"metrics_listen_addr\"))\n\tassert.Equal(t, \"sqlite\", viper.GetString(\"database.type\"))\n\tassert.Equal(t, \"/var/lib/headscale/db.sqlite\", viper.GetString(\"database.sqlite.path\"))\n\tassert.Empty(t, viper.GetString(\"tls_letsencrypt_hostname\"))\n\tassert.Equal(t, \":http\", viper.GetString(\"tls_letsencrypt_listen\"))\n\tassert.Equal(t, \"HTTP-01\", viper.GetString(\"tls_letsencrypt_challenge_type\"))\n\tassert.Equal(t, fs.FileMode(0o770), util.GetFileMode(\"unix_socket_permission\"))\n\tassert.False(t, viper.GetBool(\"logtail.enabled\"))\n}\n\nfunc TestConfigLoading(t *testing.T) {\n\ttmpDir := t.TempDir()\n\n\tpath, err := os.Getwd()\n\trequire.NoError(t, err)\n\n\t// Symlink the example config file\n\terr = os.Symlink(\n\t\tfilepath.Clean(path+\"/../../config-example.yaml\"),\n\t\tfilepath.Join(tmpDir, \"config.yaml\"),\n\t)\n\trequire.NoError(t, err)\n\n\t// Load example config, it should load without validation errors\n\terr = types.LoadConfig(tmpDir, false)\n\trequire.NoError(t, err)\n\n\t// Test that config file was interpreted correctly\n\tassert.Equal(t, \"http://127.0.0.1:8080\", viper.GetString(\"server_url\"))\n\tassert.Equal(t, \"127.0.0.1:8080\", viper.GetString(\"listen_addr\"))\n\tassert.Equal(t, \"127.0.0.1:9090\", viper.GetString(\"metrics_listen_addr\"))\n\tassert.Equal(t, \"sqlite\", viper.GetString(\"database.type\"))\n\tassert.Equal(t, \"/var/lib/headscale/db.sqlite\", viper.GetString(\"database.sqlite.path\"))\n\tassert.Empty(t, viper.GetString(\"tls_letsencrypt_hostname\"))\n\tassert.Equal(t, \":http\", viper.GetString(\"tls_letsencrypt_listen\"))\n\tassert.Equal(t, \"HTTP-01\", viper.GetString(\"tls_letsencrypt_challenge_type\"))\n\tassert.Equal(t, fs.FileMode(0o770), util.GetFileMode(\"unix_socket_permission\"))\n\tassert.False(t, viper.GetBool(\"logtail.enabled\"))\n\tassert.False(t, viper.GetBool(\"randomize_client_port\"))\n}\n"
  },
  {
    "path": "cmd/hi/README.md",
    "content": "# hi\n\nhi (headscale integration runner) is an entirely \"vibe coded\" wrapper around our\n[integration test suite](../integration). It essentially runs the docker\ncommands for you with some added benefits of extracting resources like logs and\ndatabases.\n"
  },
  {
    "path": "cmd/hi/cleanup.go",
    "content": "package main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/cenkalti/backoff/v5\"\n\t\"github.com/docker/docker/api/types/container\"\n\t\"github.com/docker/docker/api/types/filters\"\n\t\"github.com/docker/docker/api/types/image\"\n\t\"github.com/docker/docker/client\"\n\t\"github.com/docker/docker/errdefs\"\n)\n\n// cleanupBeforeTest performs cleanup operations before running tests.\n// Only removes stale (stopped/exited) test containers to avoid interfering with concurrent test runs.\nfunc cleanupBeforeTest(ctx context.Context) error {\n\terr := cleanupStaleTestContainers(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cleaning stale test containers: %w\", err)\n\t}\n\n\tif err := pruneDockerNetworks(ctx); err != nil { //nolint:noinlineerr\n\t\treturn fmt.Errorf(\"pruning networks: %w\", err)\n\t}\n\n\treturn nil\n}\n\n// cleanupAfterTest removes the test container and all associated integration test containers for the run.\nfunc cleanupAfterTest(ctx context.Context, cli *client.Client, containerID, runID string) error {\n\t// Remove the main test container\n\terr := cli.ContainerRemove(ctx, containerID, container.RemoveOptions{\n\t\tForce: true,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"removing test container: %w\", err)\n\t}\n\n\t// Clean up integration test containers for this run only\n\tif runID != \"\" {\n\t\terr := killTestContainersByRunID(ctx, runID)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"cleaning up containers for run %s: %w\", runID, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// killTestContainers terminates and removes all test containers.\nfunc killTestContainers(ctx context.Context) error {\n\tcli, err := createDockerClient(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating Docker client: %w\", err)\n\t}\n\tdefer cli.Close()\n\n\tcontainers, err := cli.ContainerList(ctx, container.ListOptions{\n\t\tAll: true,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"listing containers: %w\", err)\n\t}\n\n\tremoved := 0\n\n\tfor _, cont := range containers {\n\t\tshouldRemove := false\n\n\t\tfor _, name := range cont.Names {\n\t\t\tif strings.Contains(name, \"headscale-test-suite\") ||\n\t\t\t\tstrings.Contains(name, \"hs-\") ||\n\t\t\t\tstrings.Contains(name, \"ts-\") ||\n\t\t\t\tstrings.Contains(name, \"derp-\") {\n\t\t\t\tshouldRemove = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif shouldRemove {\n\t\t\t// First kill the container if it's running\n\t\t\tif cont.State == \"running\" {\n\t\t\t\t_ = cli.ContainerKill(ctx, cont.ID, \"KILL\")\n\t\t\t}\n\n\t\t\t// Then remove the container with retry logic\n\t\t\tif removeContainerWithRetry(ctx, cli, cont.ID) {\n\t\t\t\tremoved++\n\t\t\t}\n\t\t}\n\t}\n\n\tif removed > 0 {\n\t\tfmt.Printf(\"Removed %d test containers\\n\", removed)\n\t} else {\n\t\tfmt.Println(\"No test containers found to remove\")\n\t}\n\n\treturn nil\n}\n\n// killTestContainersByRunID terminates and removes all test containers for a specific run ID.\n// This function filters containers by the hi.run-id label to only affect containers\n// belonging to the specified test run, leaving other concurrent test runs untouched.\nfunc killTestContainersByRunID(ctx context.Context, runID string) error {\n\tcli, err := createDockerClient(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating Docker client: %w\", err)\n\t}\n\tdefer cli.Close()\n\n\t// Filter containers by hi.run-id label\n\tcontainers, err := cli.ContainerList(ctx, container.ListOptions{\n\t\tAll: true,\n\t\tFilters: filters.NewArgs(\n\t\t\tfilters.Arg(\"label\", \"hi.run-id=\"+runID),\n\t\t),\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"listing containers for run %s: %w\", runID, err)\n\t}\n\n\tremoved := 0\n\n\tfor _, cont := range containers {\n\t\t// Kill the container if it's running\n\t\tif cont.State == \"running\" {\n\t\t\t_ = cli.ContainerKill(ctx, cont.ID, \"KILL\")\n\t\t}\n\n\t\t// Remove the container with retry logic\n\t\tif removeContainerWithRetry(ctx, cli, cont.ID) {\n\t\t\tremoved++\n\t\t}\n\t}\n\n\tif removed > 0 {\n\t\tfmt.Printf(\"Removed %d containers for run ID %s\\n\", removed, runID)\n\t}\n\n\treturn nil\n}\n\n// cleanupStaleTestContainers removes stopped/exited test containers without affecting running tests.\n// This is useful for cleaning up leftover containers from previous crashed or interrupted test runs\n// without interfering with currently running concurrent tests.\nfunc cleanupStaleTestContainers(ctx context.Context) error {\n\tcli, err := createDockerClient(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating Docker client: %w\", err)\n\t}\n\tdefer cli.Close()\n\n\t// Only get stopped/exited containers\n\tcontainers, err := cli.ContainerList(ctx, container.ListOptions{\n\t\tAll: true,\n\t\tFilters: filters.NewArgs(\n\t\t\tfilters.Arg(\"status\", \"exited\"),\n\t\t\tfilters.Arg(\"status\", \"dead\"),\n\t\t),\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"listing stopped containers: %w\", err)\n\t}\n\n\tremoved := 0\n\n\tfor _, cont := range containers {\n\t\t// Only remove containers that look like test containers\n\t\tshouldRemove := false\n\n\t\tfor _, name := range cont.Names {\n\t\t\tif strings.Contains(name, \"headscale-test-suite\") ||\n\t\t\t\tstrings.Contains(name, \"hs-\") ||\n\t\t\t\tstrings.Contains(name, \"ts-\") ||\n\t\t\t\tstrings.Contains(name, \"derp-\") {\n\t\t\t\tshouldRemove = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif shouldRemove {\n\t\t\tif removeContainerWithRetry(ctx, cli, cont.ID) {\n\t\t\t\tremoved++\n\t\t\t}\n\t\t}\n\t}\n\n\tif removed > 0 {\n\t\tfmt.Printf(\"Removed %d stale test containers\\n\", removed)\n\t}\n\n\treturn nil\n}\n\nconst (\n\tcontainerRemoveInitialInterval = 100 * time.Millisecond\n\tcontainerRemoveMaxElapsedTime  = 2 * time.Second\n)\n\n// removeContainerWithRetry attempts to remove a container with exponential backoff retry logic.\nfunc removeContainerWithRetry(ctx context.Context, cli *client.Client, containerID string) bool {\n\texpBackoff := backoff.NewExponentialBackOff()\n\texpBackoff.InitialInterval = containerRemoveInitialInterval\n\n\t_, err := backoff.Retry(ctx, func() (struct{}, error) {\n\t\terr := cli.ContainerRemove(ctx, containerID, container.RemoveOptions{\n\t\t\tForce: true,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn struct{}{}, err\n\t\t}\n\n\t\treturn struct{}{}, nil\n\t}, backoff.WithBackOff(expBackoff), backoff.WithMaxElapsedTime(containerRemoveMaxElapsedTime))\n\n\treturn err == nil\n}\n\n// pruneDockerNetworks removes unused Docker networks.\nfunc pruneDockerNetworks(ctx context.Context) error {\n\tcli, err := createDockerClient(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating Docker client: %w\", err)\n\t}\n\tdefer cli.Close()\n\n\treport, err := cli.NetworksPrune(ctx, filters.Args{})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"pruning networks: %w\", err)\n\t}\n\n\tif len(report.NetworksDeleted) > 0 {\n\t\tfmt.Printf(\"Removed %d unused networks\\n\", len(report.NetworksDeleted))\n\t} else {\n\t\tfmt.Println(\"No unused networks found to remove\")\n\t}\n\n\treturn nil\n}\n\n// cleanOldImages removes test-related and old dangling Docker images.\nfunc cleanOldImages(ctx context.Context) error {\n\tcli, err := createDockerClient(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating Docker client: %w\", err)\n\t}\n\tdefer cli.Close()\n\n\timages, err := cli.ImageList(ctx, image.ListOptions{\n\t\tAll: true,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"listing images: %w\", err)\n\t}\n\n\tremoved := 0\n\n\tfor _, img := range images {\n\t\tshouldRemove := false\n\n\t\tfor _, tag := range img.RepoTags {\n\t\t\tif strings.Contains(tag, \"hs-\") ||\n\t\t\t\tstrings.Contains(tag, \"headscale-integration\") ||\n\t\t\t\tstrings.Contains(tag, \"tailscale\") {\n\t\t\t\tshouldRemove = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif len(img.RepoTags) == 0 && time.Unix(img.Created, 0).Before(time.Now().Add(-7*24*time.Hour)) {\n\t\t\tshouldRemove = true\n\t\t}\n\n\t\tif shouldRemove {\n\t\t\t_, err := cli.ImageRemove(ctx, img.ID, image.RemoveOptions{\n\t\t\t\tForce: true,\n\t\t\t})\n\t\t\tif err == nil {\n\t\t\t\tremoved++\n\t\t\t}\n\t\t}\n\t}\n\n\tif removed > 0 {\n\t\tfmt.Printf(\"Removed %d test images\\n\", removed)\n\t} else {\n\t\tfmt.Println(\"No test images found to remove\")\n\t}\n\n\treturn nil\n}\n\n// cleanCacheVolume removes the Docker volume used for Go module cache.\nfunc cleanCacheVolume(ctx context.Context) error {\n\tcli, err := createDockerClient(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating Docker client: %w\", err)\n\t}\n\tdefer cli.Close()\n\n\tvolumeName := \"hs-integration-go-cache\"\n\n\terr = cli.VolumeRemove(ctx, volumeName, true)\n\tif err != nil {\n\t\tif errdefs.IsNotFound(err) { //nolint:staticcheck // SA1019: deprecated but functional\n\t\t\tfmt.Printf(\"Go module cache volume not found: %s\\n\", volumeName)\n\t\t} else if errdefs.IsConflict(err) { //nolint:staticcheck // SA1019: deprecated but functional\n\t\t\tfmt.Printf(\"Go module cache volume is in use and cannot be removed: %s\\n\", volumeName)\n\t\t} else {\n\t\t\tfmt.Printf(\"Failed to remove Go module cache volume %s: %v\\n\", volumeName, err)\n\t\t}\n\t} else {\n\t\tfmt.Printf(\"Removed Go module cache volume: %s\\n\", volumeName)\n\t}\n\n\treturn nil\n}\n\n// cleanupSuccessfulTestArtifacts removes artifacts from successful test runs to save disk space.\n// This function removes large artifacts that are mainly useful for debugging failures:\n// - Database dumps (.db files)\n// - Profile data (pprof directories)\n// - MapResponse data (mapresponses directories)\n// - Prometheus metrics files\n//\n// It preserves:\n// - Log files (.log) which are small and useful for verification.\nfunc cleanupSuccessfulTestArtifacts(logsDir string, verbose bool) error {\n\tentries, err := os.ReadDir(logsDir)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"reading logs directory: %w\", err)\n\t}\n\n\tvar (\n\t\tremovedFiles, removedDirs int\n\t\ttotalSize                 int64\n\t)\n\n\tfor _, entry := range entries {\n\t\tname := entry.Name()\n\t\tfullPath := filepath.Join(logsDir, name)\n\n\t\tif entry.IsDir() {\n\t\t\t// Remove pprof and mapresponses directories (typically large)\n\t\t\t// These directories contain artifacts from all containers in the test run\n\t\t\tif name == \"pprof\" || name == \"mapresponses\" {\n\t\t\t\tsize, sizeErr := getDirSize(fullPath)\n\t\t\t\tif sizeErr == nil {\n\t\t\t\t\ttotalSize += size\n\t\t\t\t}\n\n\t\t\t\terr := os.RemoveAll(fullPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif verbose {\n\t\t\t\t\t\tlog.Printf(\"Warning: failed to remove directory %s: %v\", name, err)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tremovedDirs++\n\n\t\t\t\t\tif verbose {\n\t\t\t\t\t\tlog.Printf(\"Removed directory: %s/\", name)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t// Only process test-related files (headscale and tailscale)\n\t\t\tif !strings.HasPrefix(name, \"hs-\") && !strings.HasPrefix(name, \"ts-\") {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Remove database, metrics, and status files, but keep logs\n\t\t\tshouldRemove := strings.HasSuffix(name, \".db\") ||\n\t\t\t\tstrings.HasSuffix(name, \"_metrics.txt\") ||\n\t\t\t\tstrings.HasSuffix(name, \"_status.json\")\n\n\t\t\tif shouldRemove {\n\t\t\t\tinfo, infoErr := entry.Info()\n\t\t\t\tif infoErr == nil {\n\t\t\t\t\ttotalSize += info.Size()\n\t\t\t\t}\n\n\t\t\t\terr := os.Remove(fullPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif verbose {\n\t\t\t\t\t\tlog.Printf(\"Warning: failed to remove file %s: %v\", name, err)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tremovedFiles++\n\n\t\t\t\t\tif verbose {\n\t\t\t\t\t\tlog.Printf(\"Removed file: %s\", name)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif removedFiles > 0 || removedDirs > 0 {\n\t\tconst bytesPerMB = 1024 * 1024\n\t\tlog.Printf(\"Cleaned up %d files and %d directories (freed ~%.2f MB)\",\n\t\t\tremovedFiles, removedDirs, float64(totalSize)/bytesPerMB)\n\t}\n\n\treturn nil\n}\n\n// getDirSize calculates the total size of a directory.\nfunc getDirSize(path string) (int64, error) {\n\tvar size int64\n\n\terr := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif !info.IsDir() {\n\t\t\tsize += info.Size()\n\t\t}\n\n\t\treturn nil\n\t})\n\n\treturn size, err\n}\n"
  },
  {
    "path": "cmd/hi/docker.go",
    "content": "package main\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/docker/docker/api/types/container\"\n\t\"github.com/docker/docker/api/types/image\"\n\t\"github.com/docker/docker/api/types/mount\"\n\t\"github.com/docker/docker/client\"\n\t\"github.com/docker/docker/pkg/stdcopy\"\n\t\"github.com/juanfont/headscale/integration/dockertestutil\"\n)\n\nconst defaultDirPerm = 0o755\n\nvar (\n\tErrTestFailed              = errors.New(\"test failed\")\n\tErrUnexpectedContainerWait = errors.New(\"unexpected end of container wait\")\n\tErrNoDockerContext         = errors.New(\"no docker context found\")\n\tErrMemoryLimitViolations   = errors.New(\"container(s) exceeded memory limits\")\n)\n\n// runTestContainer executes integration tests in a Docker container.\n//\n//nolint:gocyclo // complex test orchestration function\nfunc runTestContainer(ctx context.Context, config *RunConfig) error {\n\tcli, err := createDockerClient(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating Docker client: %w\", err)\n\t}\n\tdefer cli.Close()\n\n\trunID := dockertestutil.GenerateRunID()\n\tcontainerName := \"headscale-test-suite-\" + runID\n\tlogsDir := filepath.Join(config.LogsDir, runID)\n\n\tif config.Verbose {\n\t\tlog.Printf(\"Run ID: %s\", runID)\n\t\tlog.Printf(\"Container name: %s\", containerName)\n\t\tlog.Printf(\"Logs directory: %s\", logsDir)\n\t}\n\n\tabsLogsDir, err := filepath.Abs(logsDir)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting absolute path for logs directory: %w\", err)\n\t}\n\n\tconst dirPerm = 0o755\n\tif err := os.MkdirAll(absLogsDir, dirPerm); err != nil { //nolint:noinlineerr\n\t\treturn fmt.Errorf(\"creating logs directory: %w\", err)\n\t}\n\n\tif config.CleanBefore {\n\t\tif config.Verbose {\n\t\t\tlog.Printf(\"Running pre-test cleanup...\")\n\t\t}\n\n\t\terr := cleanupBeforeTest(ctx)\n\t\tif err != nil && config.Verbose {\n\t\t\tlog.Printf(\"Warning: pre-test cleanup failed: %v\", err)\n\t\t}\n\t}\n\n\tgoTestCmd := buildGoTestCommand(config)\n\tif config.Verbose {\n\t\tlog.Printf(\"Command: %s\", strings.Join(goTestCmd, \" \"))\n\t}\n\n\timageName := \"golang:\" + config.GoVersion\n\tif err := ensureImageAvailable(ctx, cli, imageName, config.Verbose); err != nil { //nolint:noinlineerr\n\t\treturn fmt.Errorf(\"ensuring image availability: %w\", err)\n\t}\n\n\tresp, err := createGoTestContainer(ctx, cli, config, containerName, absLogsDir, goTestCmd)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating container: %w\", err)\n\t}\n\n\tif config.Verbose {\n\t\tlog.Printf(\"Created container: %s\", resp.ID)\n\t}\n\n\tif err := cli.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil { //nolint:noinlineerr\n\t\treturn fmt.Errorf(\"starting container: %w\", err)\n\t}\n\n\tlog.Printf(\"Starting test: %s\", config.TestPattern)\n\tlog.Printf(\"Run ID: %s\", runID)\n\tlog.Printf(\"Monitor with: docker logs -f %s\", containerName)\n\tlog.Printf(\"Logs directory: %s\", logsDir)\n\n\t// Start stats collection for container resource monitoring (if enabled)\n\tvar statsCollector *StatsCollector\n\n\tif config.Stats {\n\t\tvar err error\n\n\t\tstatsCollector, err = NewStatsCollector(ctx)\n\t\tif err != nil {\n\t\t\tif config.Verbose {\n\t\t\t\tlog.Printf(\"Warning: failed to create stats collector: %v\", err)\n\t\t\t}\n\n\t\t\tstatsCollector = nil\n\t\t}\n\n\t\tif statsCollector != nil {\n\t\t\tdefer statsCollector.Close()\n\n\t\t\t// Start stats collection immediately - no need for complex retry logic\n\t\t\t// The new implementation monitors Docker events and will catch containers as they start\n\t\t\terr := statsCollector.StartCollection(ctx, runID, config.Verbose)\n\t\t\tif err != nil {\n\t\t\t\tif config.Verbose {\n\t\t\t\t\tlog.Printf(\"Warning: failed to start stats collection: %v\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tdefer statsCollector.StopCollection()\n\t\t}\n\t}\n\n\texitCode, err := streamAndWait(ctx, cli, resp.ID)\n\n\t// Ensure all containers have finished and logs are flushed before extracting artifacts\n\twaitErr := waitForContainerFinalization(ctx, cli, resp.ID, config.Verbose)\n\tif waitErr != nil && config.Verbose {\n\t\tlog.Printf(\"Warning: failed to wait for container finalization: %v\", waitErr)\n\t}\n\n\t// Extract artifacts from test containers before cleanup\n\tif err := extractArtifactsFromContainers(ctx, resp.ID, logsDir, config.Verbose); err != nil && config.Verbose { //nolint:noinlineerr\n\t\tlog.Printf(\"Warning: failed to extract artifacts from containers: %v\", err)\n\t}\n\n\t// Always list control files regardless of test outcome\n\tlistControlFiles(logsDir)\n\n\t// Print stats summary and check memory limits if enabled\n\tif config.Stats && statsCollector != nil {\n\t\tviolations := statsCollector.PrintSummaryAndCheckLimits(config.HSMemoryLimit, config.TSMemoryLimit)\n\t\tif len(violations) > 0 {\n\t\t\tlog.Printf(\"MEMORY LIMIT VIOLATIONS DETECTED:\")\n\t\t\tlog.Printf(\"=================================\")\n\n\t\t\tfor _, violation := range violations {\n\t\t\t\tlog.Printf(\"Container %s exceeded memory limit: %.1f MB > %.1f MB\",\n\t\t\t\t\tviolation.ContainerName, violation.MaxMemoryMB, violation.LimitMB)\n\t\t\t}\n\n\t\t\treturn fmt.Errorf(\"test failed: %d %w\", len(violations), ErrMemoryLimitViolations)\n\t\t}\n\t}\n\n\tshouldCleanup := config.CleanAfter && (!config.KeepOnFailure || exitCode == 0)\n\tif shouldCleanup {\n\t\tif config.Verbose {\n\t\t\tlog.Printf(\"Running post-test cleanup for run %s...\", runID)\n\t\t}\n\n\t\tcleanErr := cleanupAfterTest(ctx, cli, resp.ID, runID)\n\n\t\tif cleanErr != nil && config.Verbose {\n\t\t\tlog.Printf(\"Warning: post-test cleanup failed: %v\", cleanErr)\n\t\t}\n\n\t\t// Clean up artifacts from successful tests to save disk space in CI\n\t\tif exitCode == 0 {\n\t\t\tif config.Verbose {\n\t\t\t\tlog.Printf(\"Test succeeded, cleaning up artifacts to save disk space...\")\n\t\t\t}\n\n\t\t\tcleanErr := cleanupSuccessfulTestArtifacts(logsDir, config.Verbose)\n\n\t\t\tif cleanErr != nil && config.Verbose {\n\t\t\t\tlog.Printf(\"Warning: artifact cleanup failed: %v\", cleanErr)\n\t\t\t}\n\t\t}\n\t}\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"executing test: %w\", err)\n\t}\n\n\tif exitCode != 0 {\n\t\treturn fmt.Errorf(\"%w: exit code %d\", ErrTestFailed, exitCode)\n\t}\n\n\tlog.Printf(\"Test completed successfully!\")\n\n\treturn nil\n}\n\n// buildGoTestCommand constructs the go test command arguments.\nfunc buildGoTestCommand(config *RunConfig) []string {\n\tcmd := []string{\"go\", \"test\", \"./...\"}\n\n\tif config.TestPattern != \"\" {\n\t\tcmd = append(cmd, \"-run\", config.TestPattern)\n\t}\n\n\tif config.FailFast {\n\t\tcmd = append(cmd, \"-failfast\")\n\t}\n\n\tcmd = append(cmd, \"-timeout\", config.Timeout.String())\n\tcmd = append(cmd, \"-v\")\n\n\treturn cmd\n}\n\n// createGoTestContainer creates a Docker container configured for running integration tests.\nfunc createGoTestContainer(ctx context.Context, cli *client.Client, config *RunConfig, containerName, logsDir string, goTestCmd []string) (container.CreateResponse, error) {\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn container.CreateResponse{}, fmt.Errorf(\"getting working directory: %w\", err)\n\t}\n\n\tprojectRoot := findProjectRoot(pwd)\n\n\trunID := dockertestutil.ExtractRunIDFromContainerName(containerName)\n\n\tenv := []string{\n\t\tfmt.Sprintf(\"HEADSCALE_INTEGRATION_POSTGRES=%d\", boolToInt(config.UsePostgres)),\n\t\t\"HEADSCALE_INTEGRATION_RUN_ID=\" + runID,\n\t}\n\n\t// Pass through CI environment variable for CI detection\n\tif ci := os.Getenv(\"CI\"); ci != \"\" {\n\t\tenv = append(env, \"CI=\"+ci)\n\t}\n\n\t// Pass through all HEADSCALE_INTEGRATION_* environment variables\n\tfor _, e := range os.Environ() {\n\t\tif strings.HasPrefix(e, \"HEADSCALE_INTEGRATION_\") {\n\t\t\t// Skip the ones we already set explicitly\n\t\t\tif strings.HasPrefix(e, \"HEADSCALE_INTEGRATION_POSTGRES=\") ||\n\t\t\t\tstrings.HasPrefix(e, \"HEADSCALE_INTEGRATION_RUN_ID=\") {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tenv = append(env, e)\n\t\t}\n\t}\n\n\t// Set GOCACHE to a known location (used by both bind mount and volume cases)\n\tenv = append(env, \"GOCACHE=/cache/go-build\")\n\n\tcontainerConfig := &container.Config{\n\t\tImage:      \"golang:\" + config.GoVersion,\n\t\tCmd:        goTestCmd,\n\t\tEnv:        env,\n\t\tWorkingDir: projectRoot + \"/integration\",\n\t\tTty:        true,\n\t\tLabels: map[string]string{\n\t\t\t\"hi.run-id\":    runID,\n\t\t\t\"hi.test-type\": \"test-runner\",\n\t\t},\n\t}\n\n\t// Get the correct Docker socket path from the current context\n\tdockerSocketPath := getDockerSocketPath()\n\n\tif config.Verbose {\n\t\tlog.Printf(\"Using Docker socket: %s\", dockerSocketPath)\n\t}\n\n\tbinds := []string{\n\t\tfmt.Sprintf(\"%s:%s\", projectRoot, projectRoot),\n\t\tdockerSocketPath + \":/var/run/docker.sock\",\n\t\tlogsDir + \":/tmp/control\",\n\t}\n\n\t// Use bind mounts for Go cache if provided via environment variables,\n\t// otherwise fall back to Docker volumes for local development\n\tvar mounts []mount.Mount\n\n\tgoCache := os.Getenv(\"HEADSCALE_INTEGRATION_GO_CACHE\")\n\tgoBuildCache := os.Getenv(\"HEADSCALE_INTEGRATION_GO_BUILD_CACHE\")\n\n\tif goCache != \"\" {\n\t\tbinds = append(binds, goCache+\":/go\")\n\t} else {\n\t\tmounts = append(mounts, mount.Mount{\n\t\t\tType:   mount.TypeVolume,\n\t\t\tSource: \"hs-integration-go-cache\",\n\t\t\tTarget: \"/go\",\n\t\t})\n\t}\n\n\tif goBuildCache != \"\" {\n\t\tbinds = append(binds, goBuildCache+\":/cache/go-build\")\n\t} else {\n\t\tmounts = append(mounts, mount.Mount{\n\t\t\tType:   mount.TypeVolume,\n\t\t\tSource: \"hs-integration-go-build-cache\",\n\t\t\tTarget: \"/cache/go-build\",\n\t\t})\n\t}\n\n\thostConfig := &container.HostConfig{\n\t\tAutoRemove: false, // We'll remove manually for better control\n\t\tBinds:      binds,\n\t\tMounts:     mounts,\n\t}\n\n\treturn cli.ContainerCreate(ctx, containerConfig, hostConfig, nil, nil, containerName)\n}\n\n// streamAndWait streams container output and waits for completion.\nfunc streamAndWait(ctx context.Context, cli *client.Client, containerID string) (int, error) {\n\tout, err := cli.ContainerLogs(ctx, containerID, container.LogsOptions{\n\t\tShowStdout: true,\n\t\tShowStderr: true,\n\t\tFollow:     true,\n\t})\n\tif err != nil {\n\t\treturn -1, fmt.Errorf(\"getting container logs: %w\", err)\n\t}\n\tdefer out.Close()\n\n\tgo func() {\n\t\t_, _ = io.Copy(os.Stdout, out)\n\t}()\n\n\tstatusCh, errCh := cli.ContainerWait(ctx, containerID, container.WaitConditionNotRunning)\n\tselect {\n\tcase err := <-errCh:\n\t\tif err != nil {\n\t\t\treturn -1, fmt.Errorf(\"waiting for container: %w\", err)\n\t\t}\n\tcase status := <-statusCh:\n\t\treturn int(status.StatusCode), nil\n\t}\n\n\treturn -1, ErrUnexpectedContainerWait\n}\n\n// waitForContainerFinalization ensures all test containers have properly finished and flushed their output.\nfunc waitForContainerFinalization(ctx context.Context, cli *client.Client, testContainerID string, verbose bool) error {\n\t// First, get all related test containers\n\tcontainers, err := cli.ContainerList(ctx, container.ListOptions{All: true})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"listing containers: %w\", err)\n\t}\n\n\ttestContainers := getCurrentTestContainers(containers, testContainerID, verbose)\n\n\t// Wait for all test containers to reach a final state\n\tmaxWaitTime := 10 * time.Second\n\tcheckInterval := 500 * time.Millisecond\n\ttimeout := time.After(maxWaitTime)\n\n\tticker := time.NewTicker(checkInterval)\n\tdefer ticker.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-timeout:\n\t\t\tif verbose {\n\t\t\t\tlog.Printf(\"Timeout waiting for container finalization, proceeding with artifact extraction\")\n\t\t\t}\n\n\t\t\treturn nil\n\t\tcase <-ticker.C:\n\t\t\tallFinalized := true\n\n\t\t\tfor _, testCont := range testContainers {\n\t\t\t\tinspect, err := cli.ContainerInspect(ctx, testCont.ID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif verbose {\n\t\t\t\t\t\tlog.Printf(\"Warning: failed to inspect container %s: %v\", testCont.name, err)\n\t\t\t\t\t}\n\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// Check if container is in a final state\n\t\t\t\tif !isContainerFinalized(inspect.State) {\n\t\t\t\t\tallFinalized = false\n\n\t\t\t\t\tif verbose {\n\t\t\t\t\t\tlog.Printf(\"Container %s still finalizing (state: %s)\", testCont.name, inspect.State.Status)\n\t\t\t\t\t}\n\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif allFinalized {\n\t\t\t\tif verbose {\n\t\t\t\t\tlog.Printf(\"All test containers finalized, ready for artifact extraction\")\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n}\n\n// isContainerFinalized checks if a container has reached a final state where logs are flushed.\nfunc isContainerFinalized(state *container.State) bool {\n\t// Container is finalized if it's not running and has a finish time\n\treturn !state.Running && state.FinishedAt != \"\"\n}\n\n// findProjectRoot locates the project root by finding the directory containing go.mod.\nfunc findProjectRoot(startPath string) string {\n\tcurrent := startPath\n\tfor {\n\t\tif _, err := os.Stat(filepath.Join(current, \"go.mod\")); err == nil { //nolint:noinlineerr\n\t\t\treturn current\n\t\t}\n\n\t\tparent := filepath.Dir(current)\n\t\tif parent == current {\n\t\t\treturn startPath\n\t\t}\n\n\t\tcurrent = parent\n\t}\n}\n\n// boolToInt converts a boolean to an integer for environment variables.\nfunc boolToInt(b bool) int {\n\tif b {\n\t\treturn 1\n\t}\n\n\treturn 0\n}\n\n// DockerContext represents Docker context information.\ntype DockerContext struct {\n\tName      string         `json:\"Name\"`\n\tMetadata  map[string]any `json:\"Metadata\"`\n\tEndpoints map[string]any `json:\"Endpoints\"`\n\tCurrent   bool           `json:\"Current\"`\n}\n\n// createDockerClient creates a Docker client with context detection.\nfunc createDockerClient(ctx context.Context) (*client.Client, error) {\n\tcontextInfo, err := getCurrentDockerContext(ctx)\n\tif err != nil {\n\t\treturn client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())\n\t}\n\n\tvar clientOpts []client.Opt\n\n\tclientOpts = append(clientOpts, client.WithAPIVersionNegotiation())\n\n\tif contextInfo != nil {\n\t\tif endpoints, ok := contextInfo.Endpoints[\"docker\"]; ok {\n\t\t\tif endpointMap, ok := endpoints.(map[string]any); ok {\n\t\t\t\tif host, ok := endpointMap[\"Host\"].(string); ok {\n\t\t\t\t\tif runConfig.Verbose {\n\t\t\t\t\t\tlog.Printf(\"Using Docker host from context '%s': %s\", contextInfo.Name, host)\n\t\t\t\t\t}\n\n\t\t\t\t\tclientOpts = append(clientOpts, client.WithHost(host))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(clientOpts) == 1 {\n\t\tclientOpts = append(clientOpts, client.FromEnv)\n\t}\n\n\treturn client.NewClientWithOpts(clientOpts...)\n}\n\n// getCurrentDockerContext retrieves the current Docker context information.\nfunc getCurrentDockerContext(ctx context.Context) (*DockerContext, error) {\n\tcmd := exec.CommandContext(ctx, \"docker\", \"context\", \"inspect\")\n\n\toutput, err := cmd.Output()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"getting docker context: %w\", err)\n\t}\n\n\tvar contexts []DockerContext\n\tif err := json.Unmarshal(output, &contexts); err != nil { //nolint:noinlineerr\n\t\treturn nil, fmt.Errorf(\"parsing docker context: %w\", err)\n\t}\n\n\tif len(contexts) > 0 {\n\t\treturn &contexts[0], nil\n\t}\n\n\treturn nil, ErrNoDockerContext\n}\n\n// getDockerSocketPath returns the correct Docker socket path for the current context.\nfunc getDockerSocketPath() string {\n\t// Always use the default socket path for mounting since Docker handles\n\t// the translation to the actual socket (e.g., colima socket) internally\n\treturn \"/var/run/docker.sock\"\n}\n\n// checkImageAvailableLocally checks if the specified Docker image is available locally.\nfunc checkImageAvailableLocally(ctx context.Context, cli *client.Client, imageName string) (bool, error) {\n\t_, _, err := cli.ImageInspectWithRaw(ctx, imageName) //nolint:staticcheck // SA1019: deprecated but functional\n\tif err != nil {\n\t\tif client.IsErrNotFound(err) { //nolint:staticcheck // SA1019: deprecated but functional\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn false, fmt.Errorf(\"inspecting image %s: %w\", imageName, err)\n\t}\n\n\treturn true, nil\n}\n\n// ensureImageAvailable checks if the image is available locally first, then pulls if needed.\nfunc ensureImageAvailable(ctx context.Context, cli *client.Client, imageName string, verbose bool) error {\n\t// First check if image is available locally\n\tavailable, err := checkImageAvailableLocally(ctx, cli, imageName)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"checking local image availability: %w\", err)\n\t}\n\n\tif available {\n\t\tif verbose {\n\t\t\tlog.Printf(\"Image %s is available locally\", imageName)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\t// Image not available locally, try to pull it\n\tif verbose {\n\t\tlog.Printf(\"Image %s not found locally, pulling...\", imageName)\n\t}\n\n\treader, err := cli.ImagePull(ctx, imageName, image.PullOptions{})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"pulling image %s: %w\", imageName, err)\n\t}\n\tdefer reader.Close()\n\n\tif verbose {\n\t\t_, err = io.Copy(os.Stdout, reader)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"reading pull output: %w\", err)\n\t\t}\n\t} else {\n\t\t_, err = io.Copy(io.Discard, reader)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"reading pull output: %w\", err)\n\t\t}\n\n\t\tlog.Printf(\"Image %s pulled successfully\", imageName)\n\t}\n\n\treturn nil\n}\n\n// listControlFiles displays the headscale test artifacts created in the control logs directory.\nfunc listControlFiles(logsDir string) {\n\tentries, err := os.ReadDir(logsDir)\n\tif err != nil {\n\t\tlog.Printf(\"Logs directory: %s\", logsDir)\n\t\treturn\n\t}\n\n\tvar (\n\t\tlogFiles  []string\n\t\tdataFiles []string\n\t\tdataDirs  []string\n\t)\n\n\tfor _, entry := range entries {\n\t\tname := entry.Name()\n\t\t// Only show headscale (hs-*) files and directories\n\t\tif !strings.HasPrefix(name, \"hs-\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tif entry.IsDir() {\n\t\t\t// Include directories (pprof, mapresponses)\n\t\t\tif strings.Contains(name, \"-pprof\") || strings.Contains(name, \"-mapresponses\") {\n\t\t\t\tdataDirs = append(dataDirs, name)\n\t\t\t}\n\t\t} else {\n\t\t\t// Include files\n\t\t\tswitch {\n\t\t\tcase strings.HasSuffix(name, \".stderr.log\") || strings.HasSuffix(name, \".stdout.log\"):\n\t\t\t\tlogFiles = append(logFiles, name)\n\t\t\tcase strings.HasSuffix(name, \".db\"):\n\t\t\t\tdataFiles = append(dataFiles, name)\n\t\t\t}\n\t\t}\n\t}\n\n\tlog.Printf(\"Test artifacts saved to: %s\", logsDir)\n\n\tif len(logFiles) > 0 {\n\t\tlog.Printf(\"Headscale logs:\")\n\n\t\tfor _, file := range logFiles {\n\t\t\tlog.Printf(\"  %s\", file)\n\t\t}\n\t}\n\n\tif len(dataFiles) > 0 || len(dataDirs) > 0 {\n\t\tlog.Printf(\"Headscale data:\")\n\n\t\tfor _, file := range dataFiles {\n\t\t\tlog.Printf(\"  %s\", file)\n\t\t}\n\n\t\tfor _, dir := range dataDirs {\n\t\t\tlog.Printf(\"  %s/\", dir)\n\t\t}\n\t}\n}\n\n// extractArtifactsFromContainers collects container logs and files from the specific test run.\nfunc extractArtifactsFromContainers(ctx context.Context, testContainerID, logsDir string, verbose bool) error {\n\tcli, err := createDockerClient(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating Docker client: %w\", err)\n\t}\n\tdefer cli.Close()\n\n\t// List all containers\n\tcontainers, err := cli.ContainerList(ctx, container.ListOptions{All: true})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"listing containers: %w\", err)\n\t}\n\n\t// Get containers from the specific test run\n\tcurrentTestContainers := getCurrentTestContainers(containers, testContainerID, verbose)\n\n\textractedCount := 0\n\n\tfor _, cont := range currentTestContainers {\n\t\t// Extract container logs and tar files\n\t\terr := extractContainerArtifacts(ctx, cli, cont.ID, cont.name, logsDir, verbose)\n\t\tif err != nil {\n\t\t\tif verbose {\n\t\t\t\tlog.Printf(\"Warning: failed to extract artifacts from container %s (%s): %v\", cont.name, cont.ID[:12], err)\n\t\t\t}\n\t\t} else {\n\t\t\tif verbose {\n\t\t\t\tlog.Printf(\"Extracted artifacts from container %s (%s)\", cont.name, cont.ID[:12])\n\t\t\t}\n\n\t\t\textractedCount++\n\t\t}\n\t}\n\n\tif verbose && extractedCount > 0 {\n\t\tlog.Printf(\"Extracted artifacts from %d containers\", extractedCount)\n\t}\n\n\treturn nil\n}\n\n// testContainer represents a container from the current test run.\ntype testContainer struct {\n\tID   string\n\tname string\n}\n\n// getCurrentTestContainers filters containers to only include those from the current test run.\nfunc getCurrentTestContainers(containers []container.Summary, testContainerID string, verbose bool) []testContainer {\n\tvar testRunContainers []testContainer\n\n\t// Find the test container to get its run ID label\n\tvar runID string\n\n\tfor _, cont := range containers {\n\t\tif cont.ID == testContainerID {\n\t\t\tif cont.Labels != nil {\n\t\t\t\trunID = cont.Labels[\"hi.run-id\"]\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif runID == \"\" {\n\t\tlog.Printf(\"Error: test container %s missing required hi.run-id label\", testContainerID[:12])\n\t\treturn testRunContainers\n\t}\n\n\tif verbose {\n\t\tlog.Printf(\"Looking for containers with run ID: %s\", runID)\n\t}\n\n\t// Find all containers with the same run ID\n\tfor _, cont := range containers {\n\t\tfor _, name := range cont.Names {\n\t\t\tcontainerName := strings.TrimPrefix(name, \"/\")\n\t\t\tif strings.HasPrefix(containerName, \"hs-\") || strings.HasPrefix(containerName, \"ts-\") {\n\t\t\t\t// Check if container has matching run ID label\n\t\t\t\tif cont.Labels != nil && cont.Labels[\"hi.run-id\"] == runID {\n\t\t\t\t\ttestRunContainers = append(testRunContainers, testContainer{\n\t\t\t\t\t\tID:   cont.ID,\n\t\t\t\t\t\tname: containerName,\n\t\t\t\t\t})\n\t\t\t\t\tif verbose {\n\t\t\t\t\t\tlog.Printf(\"Including container %s (run ID: %s)\", containerName, runID)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\treturn testRunContainers\n}\n\n// extractContainerArtifacts saves logs and tar files from a container.\nfunc extractContainerArtifacts(ctx context.Context, cli *client.Client, containerID, containerName, logsDir string, verbose bool) error {\n\t// Ensure the logs directory exists\n\terr := os.MkdirAll(logsDir, defaultDirPerm)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating logs directory: %w\", err)\n\t}\n\n\t// Extract container logs\n\terr = extractContainerLogs(ctx, cli, containerID, containerName, logsDir, verbose)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"extracting logs: %w\", err)\n\t}\n\n\t// Extract tar files for headscale containers only\n\tif strings.HasPrefix(containerName, \"hs-\") {\n\t\terr := extractContainerFiles(ctx, cli, containerID, containerName, logsDir, verbose)\n\t\tif err != nil {\n\t\t\tif verbose {\n\t\t\t\tlog.Printf(\"Warning: failed to extract files from %s: %v\", containerName, err)\n\t\t\t}\n\t\t\t// Don't fail the whole extraction if files are missing\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// extractContainerLogs saves the stdout and stderr logs from a container to files.\nfunc extractContainerLogs(ctx context.Context, cli *client.Client, containerID, containerName, logsDir string, verbose bool) error {\n\t// Get container logs\n\tlogReader, err := cli.ContainerLogs(ctx, containerID, container.LogsOptions{\n\t\tShowStdout: true,\n\t\tShowStderr: true,\n\t\tTimestamps: false,\n\t\tFollow:     false,\n\t\tTail:       \"all\",\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting container logs: %w\", err)\n\t}\n\tdefer logReader.Close()\n\n\t// Create log files following the headscale naming convention\n\tstdoutPath := filepath.Join(logsDir, containerName+\".stdout.log\")\n\tstderrPath := filepath.Join(logsDir, containerName+\".stderr.log\")\n\n\t// Create buffers to capture stdout and stderr separately\n\tvar stdoutBuf, stderrBuf bytes.Buffer\n\n\t// Demultiplex the Docker logs stream to separate stdout and stderr\n\t_, err = stdcopy.StdCopy(&stdoutBuf, &stderrBuf, logReader)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"demultiplexing container logs: %w\", err)\n\t}\n\n\t// Write stdout logs\n\tif err := os.WriteFile(stdoutPath, stdoutBuf.Bytes(), 0o644); err != nil { //nolint:gosec,noinlineerr // log files should be readable\n\t\treturn fmt.Errorf(\"writing stdout log: %w\", err)\n\t}\n\n\t// Write stderr logs\n\tif err := os.WriteFile(stderrPath, stderrBuf.Bytes(), 0o644); err != nil { //nolint:gosec,noinlineerr // log files should be readable\n\t\treturn fmt.Errorf(\"writing stderr log: %w\", err)\n\t}\n\n\tif verbose {\n\t\tlog.Printf(\"Saved logs for %s: %s, %s\", containerName, stdoutPath, stderrPath)\n\t}\n\n\treturn nil\n}\n\n// extractContainerFiles extracts database file and directories from headscale containers.\n// Note: The actual file extraction is now handled by the integration tests themselves\n// via SaveProfile, SaveMapResponses, and SaveDatabase functions in hsic.go.\nfunc extractContainerFiles(ctx context.Context, cli *client.Client, containerID, containerName, logsDir string, verbose bool) error {\n\t// Files are now extracted directly by the integration tests\n\t// This function is kept for potential future use or other file types\n\treturn nil\n}\n"
  },
  {
    "path": "cmd/hi/doctor.go",
    "content": "package main\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"os/exec\"\n\t\"strings\"\n)\n\nvar ErrSystemChecksFailed = errors.New(\"system checks failed\")\n\n// DoctorResult represents the result of a single health check.\ntype DoctorResult struct {\n\tName        string\n\tStatus      string // \"PASS\", \"FAIL\", \"WARN\"\n\tMessage     string\n\tSuggestions []string\n}\n\n// runDoctorCheck performs comprehensive pre-flight checks for integration testing.\nfunc runDoctorCheck(ctx context.Context) error {\n\tresults := []DoctorResult{}\n\n\t// Check 1: Docker binary availability\n\tresults = append(results, checkDockerBinary())\n\n\t// Check 2: Docker daemon connectivity\n\tdockerResult := checkDockerDaemon(ctx)\n\tresults = append(results, dockerResult)\n\n\t// If Docker is available, run additional checks\n\tif dockerResult.Status == \"PASS\" {\n\t\tresults = append(results, checkDockerContext(ctx))\n\t\tresults = append(results, checkDockerSocket(ctx))\n\t\tresults = append(results, checkGolangImage(ctx))\n\t}\n\n\t// Check 3: Go installation\n\tresults = append(results, checkGoInstallation(ctx))\n\n\t// Check 4: Git repository\n\tresults = append(results, checkGitRepository(ctx))\n\n\t// Check 5: Required files\n\tresults = append(results, checkRequiredFiles(ctx))\n\n\t// Display results\n\tdisplayDoctorResults(results)\n\n\t// Return error if any critical checks failed\n\tfor _, result := range results {\n\t\tif result.Status == \"FAIL\" {\n\t\t\treturn fmt.Errorf(\"%w - see details above\", ErrSystemChecksFailed)\n\t\t}\n\t}\n\n\tlog.Printf(\"✅ All system checks passed - ready to run integration tests!\")\n\n\treturn nil\n}\n\n// checkDockerBinary verifies Docker binary is available.\nfunc checkDockerBinary() DoctorResult {\n\t_, err := exec.LookPath(\"docker\")\n\tif err != nil {\n\t\treturn DoctorResult{\n\t\t\tName:    \"Docker Binary\",\n\t\t\tStatus:  \"FAIL\",\n\t\t\tMessage: \"Docker binary not found in PATH\",\n\t\t\tSuggestions: []string{\n\t\t\t\t\"Install Docker: https://docs.docker.com/get-docker/\",\n\t\t\t\t\"For macOS: consider using colima or Docker Desktop\",\n\t\t\t\t\"Ensure docker is in your PATH\",\n\t\t\t},\n\t\t}\n\t}\n\n\treturn DoctorResult{\n\t\tName:    \"Docker Binary\",\n\t\tStatus:  \"PASS\",\n\t\tMessage: \"Docker binary found\",\n\t}\n}\n\n// checkDockerDaemon verifies Docker daemon is running and accessible.\nfunc checkDockerDaemon(ctx context.Context) DoctorResult {\n\tcli, err := createDockerClient(ctx)\n\tif err != nil {\n\t\treturn DoctorResult{\n\t\t\tName:    \"Docker Daemon\",\n\t\t\tStatus:  \"FAIL\",\n\t\t\tMessage: fmt.Sprintf(\"Cannot create Docker client: %v\", err),\n\t\t\tSuggestions: []string{\n\t\t\t\t\"Start Docker daemon/service\",\n\t\t\t\t\"Check Docker Desktop is running (if using Docker Desktop)\",\n\t\t\t\t\"For colima: run 'colima start'\",\n\t\t\t\t\"Verify DOCKER_HOST environment variable if set\",\n\t\t\t},\n\t\t}\n\t}\n\tdefer cli.Close()\n\n\t_, err = cli.Ping(ctx)\n\tif err != nil {\n\t\treturn DoctorResult{\n\t\t\tName:    \"Docker Daemon\",\n\t\t\tStatus:  \"FAIL\",\n\t\t\tMessage: fmt.Sprintf(\"Cannot ping Docker daemon: %v\", err),\n\t\t\tSuggestions: []string{\n\t\t\t\t\"Ensure Docker daemon is running\",\n\t\t\t\t\"Check Docker socket permissions\",\n\t\t\t\t\"Try: docker info\",\n\t\t\t},\n\t\t}\n\t}\n\n\treturn DoctorResult{\n\t\tName:    \"Docker Daemon\",\n\t\tStatus:  \"PASS\",\n\t\tMessage: \"Docker daemon is running and accessible\",\n\t}\n}\n\n// checkDockerContext verifies Docker context configuration.\nfunc checkDockerContext(ctx context.Context) DoctorResult {\n\tcontextInfo, err := getCurrentDockerContext(ctx)\n\tif err != nil {\n\t\treturn DoctorResult{\n\t\t\tName:    \"Docker Context\",\n\t\t\tStatus:  \"WARN\",\n\t\t\tMessage: \"Could not detect Docker context, using default settings\",\n\t\t\tSuggestions: []string{\n\t\t\t\t\"Check: docker context ls\",\n\t\t\t\t\"Consider setting up a specific context if needed\",\n\t\t\t},\n\t\t}\n\t}\n\n\tif contextInfo == nil {\n\t\treturn DoctorResult{\n\t\t\tName:    \"Docker Context\",\n\t\t\tStatus:  \"PASS\",\n\t\t\tMessage: \"Using default Docker context\",\n\t\t}\n\t}\n\n\treturn DoctorResult{\n\t\tName:    \"Docker Context\",\n\t\tStatus:  \"PASS\",\n\t\tMessage: \"Using Docker context: \" + contextInfo.Name,\n\t}\n}\n\n// checkDockerSocket verifies Docker socket accessibility.\nfunc checkDockerSocket(ctx context.Context) DoctorResult {\n\tcli, err := createDockerClient(ctx)\n\tif err != nil {\n\t\treturn DoctorResult{\n\t\t\tName:    \"Docker Socket\",\n\t\t\tStatus:  \"FAIL\",\n\t\t\tMessage: fmt.Sprintf(\"Cannot access Docker socket: %v\", err),\n\t\t\tSuggestions: []string{\n\t\t\t\t\"Check Docker socket permissions\",\n\t\t\t\t\"Add user to docker group: sudo usermod -aG docker $USER\",\n\t\t\t\t\"For colima: ensure socket is accessible\",\n\t\t\t},\n\t\t}\n\t}\n\tdefer cli.Close()\n\n\tinfo, err := cli.Info(ctx)\n\tif err != nil {\n\t\treturn DoctorResult{\n\t\t\tName:    \"Docker Socket\",\n\t\t\tStatus:  \"FAIL\",\n\t\t\tMessage: fmt.Sprintf(\"Cannot get Docker info: %v\", err),\n\t\t\tSuggestions: []string{\n\t\t\t\t\"Check Docker daemon status\",\n\t\t\t\t\"Verify socket permissions\",\n\t\t\t},\n\t\t}\n\t}\n\n\treturn DoctorResult{\n\t\tName:    \"Docker Socket\",\n\t\tStatus:  \"PASS\",\n\t\tMessage: fmt.Sprintf(\"Docker socket accessible (Server: %s)\", info.ServerVersion),\n\t}\n}\n\n// checkGolangImage verifies the golang Docker image is available locally or can be pulled.\nfunc checkGolangImage(ctx context.Context) DoctorResult {\n\tcli, err := createDockerClient(ctx)\n\tif err != nil {\n\t\treturn DoctorResult{\n\t\t\tName:    \"Golang Image\",\n\t\t\tStatus:  \"FAIL\",\n\t\t\tMessage: \"Cannot create Docker client for image check\",\n\t\t}\n\t}\n\tdefer cli.Close()\n\n\tgoVersion := detectGoVersion()\n\timageName := \"golang:\" + goVersion\n\n\t// First check if image is available locally\n\tavailable, err := checkImageAvailableLocally(ctx, cli, imageName)\n\tif err != nil {\n\t\treturn DoctorResult{\n\t\t\tName:    \"Golang Image\",\n\t\t\tStatus:  \"FAIL\",\n\t\t\tMessage: fmt.Sprintf(\"Cannot check golang image %s: %v\", imageName, err),\n\t\t\tSuggestions: []string{\n\t\t\t\t\"Check Docker daemon status\",\n\t\t\t\t\"Try: docker images | grep golang\",\n\t\t\t},\n\t\t}\n\t}\n\n\tif available {\n\t\treturn DoctorResult{\n\t\t\tName:    \"Golang Image\",\n\t\t\tStatus:  \"PASS\",\n\t\t\tMessage: fmt.Sprintf(\"Golang image %s is available locally\", imageName),\n\t\t}\n\t}\n\n\t// Image not available locally, try to pull it\n\terr = ensureImageAvailable(ctx, cli, imageName, false)\n\tif err != nil {\n\t\treturn DoctorResult{\n\t\t\tName:    \"Golang Image\",\n\t\t\tStatus:  \"FAIL\",\n\t\t\tMessage: fmt.Sprintf(\"Golang image %s not available locally and cannot pull: %v\", imageName, err),\n\t\t\tSuggestions: []string{\n\t\t\t\t\"Check internet connectivity\",\n\t\t\t\t\"Verify Docker Hub access\",\n\t\t\t\t\"Try: docker pull \" + imageName,\n\t\t\t\t\"Or run tests offline if image was pulled previously\",\n\t\t\t},\n\t\t}\n\t}\n\n\treturn DoctorResult{\n\t\tName:    \"Golang Image\",\n\t\tStatus:  \"PASS\",\n\t\tMessage: fmt.Sprintf(\"Golang image %s is now available\", imageName),\n\t}\n}\n\n// checkGoInstallation verifies Go is installed and working.\nfunc checkGoInstallation(ctx context.Context) DoctorResult {\n\t_, err := exec.LookPath(\"go\")\n\tif err != nil {\n\t\treturn DoctorResult{\n\t\t\tName:    \"Go Installation\",\n\t\t\tStatus:  \"FAIL\",\n\t\t\tMessage: \"Go binary not found in PATH\",\n\t\t\tSuggestions: []string{\n\t\t\t\t\"Install Go: https://golang.org/dl/\",\n\t\t\t\t\"Ensure go is in your PATH\",\n\t\t\t},\n\t\t}\n\t}\n\n\tcmd := exec.CommandContext(ctx, \"go\", \"version\")\n\n\toutput, err := cmd.Output()\n\tif err != nil {\n\t\treturn DoctorResult{\n\t\t\tName:    \"Go Installation\",\n\t\t\tStatus:  \"FAIL\",\n\t\t\tMessage: fmt.Sprintf(\"Cannot get Go version: %v\", err),\n\t\t}\n\t}\n\n\tversion := strings.TrimSpace(string(output))\n\n\treturn DoctorResult{\n\t\tName:    \"Go Installation\",\n\t\tStatus:  \"PASS\",\n\t\tMessage: version,\n\t}\n}\n\n// checkGitRepository verifies we're in a git repository.\nfunc checkGitRepository(ctx context.Context) DoctorResult {\n\tcmd := exec.CommandContext(ctx, \"git\", \"rev-parse\", \"--git-dir\")\n\n\terr := cmd.Run()\n\tif err != nil {\n\t\treturn DoctorResult{\n\t\t\tName:    \"Git Repository\",\n\t\t\tStatus:  \"FAIL\",\n\t\t\tMessage: \"Not in a Git repository\",\n\t\t\tSuggestions: []string{\n\t\t\t\t\"Run from within the headscale git repository\",\n\t\t\t\t\"Clone the repository: git clone https://github.com/juanfont/headscale.git\",\n\t\t\t},\n\t\t}\n\t}\n\n\treturn DoctorResult{\n\t\tName:    \"Git Repository\",\n\t\tStatus:  \"PASS\",\n\t\tMessage: \"Running in Git repository\",\n\t}\n}\n\n// checkRequiredFiles verifies required files exist.\nfunc checkRequiredFiles(ctx context.Context) DoctorResult {\n\trequiredFiles := []string{\n\t\t\"go.mod\",\n\t\t\"integration/\",\n\t\t\"cmd/hi/\",\n\t}\n\n\tvar missingFiles []string\n\n\tfor _, file := range requiredFiles {\n\t\tcmd := exec.CommandContext(ctx, \"test\", \"-e\", file)\n\n\t\terr := cmd.Run()\n\t\tif err != nil {\n\t\t\tmissingFiles = append(missingFiles, file)\n\t\t}\n\t}\n\n\tif len(missingFiles) > 0 {\n\t\treturn DoctorResult{\n\t\t\tName:    \"Required Files\",\n\t\t\tStatus:  \"FAIL\",\n\t\t\tMessage: \"Missing required files: \" + strings.Join(missingFiles, \", \"),\n\t\t\tSuggestions: []string{\n\t\t\t\t\"Ensure you're in the headscale project root directory\",\n\t\t\t\t\"Check that integration/ directory exists\",\n\t\t\t\t\"Verify this is a complete headscale repository\",\n\t\t\t},\n\t\t}\n\t}\n\n\treturn DoctorResult{\n\t\tName:    \"Required Files\",\n\t\tStatus:  \"PASS\",\n\t\tMessage: \"All required files found\",\n\t}\n}\n\n// displayDoctorResults shows the results in a formatted way.\nfunc displayDoctorResults(results []DoctorResult) {\n\tlog.Printf(\"🔍 System Health Check Results\")\n\tlog.Printf(\"================================\")\n\n\tfor _, result := range results {\n\t\tvar icon string\n\n\t\tswitch result.Status {\n\t\tcase \"PASS\":\n\t\t\ticon = \"✅\"\n\t\tcase \"WARN\":\n\t\t\ticon = \"⚠️\"\n\t\tcase \"FAIL\":\n\t\t\ticon = \"❌\"\n\t\tdefault:\n\t\t\ticon = \"❓\"\n\t\t}\n\n\t\tlog.Printf(\"%s %s: %s\", icon, result.Name, result.Message)\n\n\t\tif len(result.Suggestions) > 0 {\n\t\t\tfor _, suggestion := range result.Suggestions {\n\t\t\t\tlog.Printf(\"   💡 %s\", suggestion)\n\t\t\t}\n\t\t}\n\t}\n\n\tlog.Printf(\"================================\")\n}\n"
  },
  {
    "path": "cmd/hi/main.go",
    "content": "package main\n\nimport (\n\t\"context\"\n\t\"os\"\n\n\t\"github.com/creachadair/command\"\n\t\"github.com/creachadair/flax\"\n)\n\nvar runConfig RunConfig\n\nfunc main() {\n\troot := command.C{\n\t\tName: \"hi\",\n\t\tHelp: \"Headscale Integration test runner\",\n\t\tCommands: []*command.C{\n\t\t\t{\n\t\t\t\tName:     \"run\",\n\t\t\t\tHelp:     \"Run integration tests\",\n\t\t\t\tUsage:    \"run [test-pattern] [flags]\",\n\t\t\t\tSetFlags: command.Flags(flax.MustBind, &runConfig),\n\t\t\t\tRun:      runIntegrationTest,\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"doctor\",\n\t\t\t\tHelp: \"Check system requirements for running integration tests\",\n\t\t\t\tRun: func(env *command.Env) error {\n\t\t\t\t\treturn runDoctorCheck(env.Context())\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"clean\",\n\t\t\t\tHelp: \"Clean Docker resources\",\n\t\t\t\tCommands: []*command.C{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"networks\",\n\t\t\t\t\t\tHelp: \"Prune unused Docker networks\",\n\t\t\t\t\t\tRun: func(env *command.Env) error {\n\t\t\t\t\t\t\treturn pruneDockerNetworks(env.Context())\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"images\",\n\t\t\t\t\t\tHelp: \"Clean old test images\",\n\t\t\t\t\t\tRun: func(env *command.Env) error {\n\t\t\t\t\t\t\treturn cleanOldImages(env.Context())\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"containers\",\n\t\t\t\t\t\tHelp: \"Kill all test containers\",\n\t\t\t\t\t\tRun: func(env *command.Env) error {\n\t\t\t\t\t\t\treturn killTestContainers(env.Context())\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"cache\",\n\t\t\t\t\t\tHelp: \"Clean Go module cache volume\",\n\t\t\t\t\t\tRun: func(env *command.Env) error {\n\t\t\t\t\t\t\treturn cleanCacheVolume(env.Context())\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"all\",\n\t\t\t\t\t\tHelp: \"Run all cleanup operations\",\n\t\t\t\t\t\tRun: func(env *command.Env) error {\n\t\t\t\t\t\t\treturn cleanAll(env.Context())\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tcommand.HelpCommand(nil),\n\t\t},\n\t}\n\n\tenv := root.NewEnv(nil).MergeFlags(true)\n\tcommand.RunOrFail(env, os.Args[1:])\n}\n\nfunc cleanAll(ctx context.Context) error {\n\terr := killTestContainers(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = pruneDockerNetworks(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = cleanOldImages(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn cleanCacheVolume(ctx)\n}\n"
  },
  {
    "path": "cmd/hi/run.go",
    "content": "package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"time\"\n\n\t\"github.com/creachadair/command\"\n)\n\nvar ErrTestPatternRequired = errors.New(\"test pattern is required as first argument or use --test flag\")\n\ntype RunConfig struct {\n\tTestPattern   string        `flag:\"test,Test pattern to run\"`\n\tTimeout       time.Duration `flag:\"timeout,default=120m,Test timeout\"`\n\tFailFast      bool          `flag:\"failfast,default=true,Stop on first test failure\"`\n\tUsePostgres   bool          `flag:\"postgres,default=false,Use PostgreSQL instead of SQLite\"`\n\tGoVersion     string        `flag:\"go-version,Go version to use (auto-detected from go.mod)\"`\n\tCleanBefore   bool          `flag:\"clean-before,default=true,Clean stale resources before test\"`\n\tCleanAfter    bool          `flag:\"clean-after,default=true,Clean resources after test\"`\n\tKeepOnFailure bool          `flag:\"keep-on-failure,default=false,Keep containers on test failure\"`\n\tLogsDir       string        `flag:\"logs-dir,default=control_logs,Control logs directory\"`\n\tVerbose       bool          `flag:\"verbose,default=false,Verbose output\"`\n\tStats         bool          `flag:\"stats,default=false,Collect and display container resource usage statistics\"`\n\tHSMemoryLimit float64       `flag:\"hs-memory-limit,default=0,Fail test if any Headscale container exceeds this memory limit in MB (0 = disabled)\"`\n\tTSMemoryLimit float64       `flag:\"ts-memory-limit,default=0,Fail test if any Tailscale container exceeds this memory limit in MB (0 = disabled)\"`\n}\n\n// runIntegrationTest executes the integration test workflow.\nfunc runIntegrationTest(env *command.Env) error {\n\targs := env.Args\n\tif len(args) > 0 && runConfig.TestPattern == \"\" {\n\t\trunConfig.TestPattern = args[0]\n\t}\n\n\tif runConfig.TestPattern == \"\" {\n\t\treturn ErrTestPatternRequired\n\t}\n\n\tif runConfig.GoVersion == \"\" {\n\t\trunConfig.GoVersion = detectGoVersion()\n\t}\n\n\t// Run pre-flight checks\n\tif runConfig.Verbose {\n\t\tlog.Printf(\"Running pre-flight system checks...\")\n\t}\n\n\terr := runDoctorCheck(env.Context())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"pre-flight checks failed: %w\", err)\n\t}\n\n\tif runConfig.Verbose {\n\t\tlog.Printf(\"Running test: %s\", runConfig.TestPattern)\n\t\tlog.Printf(\"Go version: %s\", runConfig.GoVersion)\n\t\tlog.Printf(\"Timeout: %s\", runConfig.Timeout)\n\t\tlog.Printf(\"Use PostgreSQL: %t\", runConfig.UsePostgres)\n\t}\n\n\treturn runTestContainer(env.Context(), &runConfig)\n}\n\n// detectGoVersion reads the Go version from go.mod file.\nfunc detectGoVersion() string {\n\tgoModPath := filepath.Join(\"..\", \"..\", \"go.mod\")\n\n\tif _, err := os.Stat(\"go.mod\"); err == nil { //nolint:noinlineerr\n\t\tgoModPath = \"go.mod\"\n\t} else if _, err := os.Stat(\"../../go.mod\"); err == nil { //nolint:noinlineerr\n\t\tgoModPath = \"../../go.mod\"\n\t}\n\n\tcontent, err := os.ReadFile(goModPath)\n\tif err != nil {\n\t\treturn \"1.26.1\"\n\t}\n\n\tlines := splitLines(string(content))\n\tfor _, line := range lines {\n\t\tif len(line) > 3 && line[:3] == \"go \" {\n\t\t\tversion := line[3:]\n\t\t\tif idx := indexOf(version, \" \"); idx != -1 {\n\t\t\t\tversion = version[:idx]\n\t\t\t}\n\n\t\t\treturn version\n\t\t}\n\t}\n\n\treturn \"1.26.1\"\n}\n\n// splitLines splits a string into lines without using strings.Split.\nfunc splitLines(s string) []string {\n\tvar (\n\t\tlines   []string\n\t\tcurrent string\n\t)\n\n\tfor _, char := range s {\n\t\tif char == '\\n' {\n\t\t\tlines = append(lines, current)\n\t\t\tcurrent = \"\"\n\t\t} else {\n\t\t\tcurrent += string(char)\n\t\t}\n\t}\n\n\tif current != \"\" {\n\t\tlines = append(lines, current)\n\t}\n\n\treturn lines\n}\n\n// indexOf finds the first occurrence of substr in s.\nfunc indexOf(s, substr string) int {\n\tfor i := 0; i <= len(s)-len(substr); i++ {\n\t\tif s[i:i+len(substr)] == substr {\n\t\t\treturn i\n\t\t}\n\t}\n\n\treturn -1\n}\n"
  },
  {
    "path": "cmd/hi/stats.go",
    "content": "package main\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/docker/docker/api/types\"\n\t\"github.com/docker/docker/api/types/container\"\n\t\"github.com/docker/docker/api/types/events\"\n\t\"github.com/docker/docker/api/types/filters\"\n\t\"github.com/docker/docker/client\"\n)\n\n// ErrStatsCollectionAlreadyStarted is returned when trying to start stats collection that is already running.\nvar ErrStatsCollectionAlreadyStarted = errors.New(\"stats collection already started\")\n\n// ContainerStats represents statistics for a single container.\ntype ContainerStats struct {\n\tContainerID   string\n\tContainerName string\n\tStats         []StatsSample\n\tmutex         sync.RWMutex\n}\n\n// StatsSample represents a single stats measurement.\ntype StatsSample struct {\n\tTimestamp time.Time\n\tCPUUsage  float64 // CPU usage percentage\n\tMemoryMB  float64 // Memory usage in MB\n}\n\n// StatsCollector manages collection of container statistics.\ntype StatsCollector struct {\n\tclient            *client.Client\n\tcontainers        map[string]*ContainerStats\n\tstopChan          chan struct{}\n\twg                sync.WaitGroup\n\tmutex             sync.RWMutex\n\tcollectionStarted bool\n}\n\n// NewStatsCollector creates a new stats collector instance.\nfunc NewStatsCollector(ctx context.Context) (*StatsCollector, error) {\n\tcli, err := createDockerClient(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating Docker client: %w\", err)\n\t}\n\n\treturn &StatsCollector{\n\t\tclient:     cli,\n\t\tcontainers: make(map[string]*ContainerStats),\n\t\tstopChan:   make(chan struct{}),\n\t}, nil\n}\n\n// StartCollection begins monitoring all containers and collecting stats for hs- and ts- containers with matching run ID.\nfunc (sc *StatsCollector) StartCollection(ctx context.Context, runID string, verbose bool) error {\n\tsc.mutex.Lock()\n\tdefer sc.mutex.Unlock()\n\n\tif sc.collectionStarted {\n\t\treturn ErrStatsCollectionAlreadyStarted\n\t}\n\n\tsc.collectionStarted = true\n\n\t// Start monitoring existing containers\n\tsc.wg.Add(1)\n\n\tgo sc.monitorExistingContainers(ctx, runID, verbose)\n\n\t// Start Docker events monitoring for new containers\n\tsc.wg.Add(1)\n\n\tgo sc.monitorDockerEvents(ctx, runID, verbose)\n\n\tif verbose {\n\t\tlog.Printf(\"Started container monitoring for run ID %s\", runID)\n\t}\n\n\treturn nil\n}\n\n// StopCollection stops all stats collection.\nfunc (sc *StatsCollector) StopCollection() {\n\t// Check if already stopped without holding lock\n\tsc.mutex.RLock()\n\n\tif !sc.collectionStarted {\n\t\tsc.mutex.RUnlock()\n\t\treturn\n\t}\n\n\tsc.mutex.RUnlock()\n\n\t// Signal stop to all goroutines\n\tclose(sc.stopChan)\n\n\t// Wait for all goroutines to finish\n\tsc.wg.Wait()\n\n\t// Mark as stopped\n\tsc.mutex.Lock()\n\tsc.collectionStarted = false\n\tsc.mutex.Unlock()\n}\n\n// monitorExistingContainers checks for existing containers that match our criteria.\nfunc (sc *StatsCollector) monitorExistingContainers(ctx context.Context, runID string, verbose bool) {\n\tdefer sc.wg.Done()\n\n\tcontainers, err := sc.client.ContainerList(ctx, container.ListOptions{})\n\tif err != nil {\n\t\tif verbose {\n\t\t\tlog.Printf(\"Failed to list existing containers: %v\", err)\n\t\t}\n\n\t\treturn\n\t}\n\n\tfor _, cont := range containers {\n\t\tif sc.shouldMonitorContainer(cont, runID) {\n\t\t\tsc.startStatsForContainer(ctx, cont.ID, cont.Names[0], verbose)\n\t\t}\n\t}\n}\n\n// monitorDockerEvents listens for container start events and begins monitoring relevant containers.\nfunc (sc *StatsCollector) monitorDockerEvents(ctx context.Context, runID string, verbose bool) {\n\tdefer sc.wg.Done()\n\n\tfilter := filters.NewArgs()\n\tfilter.Add(\"type\", \"container\")\n\tfilter.Add(\"event\", \"start\")\n\n\teventOptions := events.ListOptions{\n\t\tFilters: filter,\n\t}\n\n\tevents, errs := sc.client.Events(ctx, eventOptions)\n\n\tfor {\n\t\tselect {\n\t\tcase <-sc.stopChan:\n\t\t\treturn\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase event := <-events:\n\t\t\tif event.Type == \"container\" && event.Action == \"start\" {\n\t\t\t\t// Get container details\n\t\t\t\tcontainerInfo, err := sc.client.ContainerInspect(ctx, event.ID) //nolint:staticcheck // SA1019: use Actor.ID\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// Convert to types.Container format for consistency\n\t\t\t\tcont := types.Container{ //nolint:staticcheck // SA1019: use container.Summary\n\t\t\t\t\tID:     containerInfo.ID,\n\t\t\t\t\tNames:  []string{containerInfo.Name},\n\t\t\t\t\tLabels: containerInfo.Config.Labels,\n\t\t\t\t}\n\n\t\t\t\tif sc.shouldMonitorContainer(cont, runID) {\n\t\t\t\t\tsc.startStatsForContainer(ctx, cont.ID, cont.Names[0], verbose)\n\t\t\t\t}\n\t\t\t}\n\t\tcase err := <-errs:\n\t\t\tif verbose {\n\t\t\t\tlog.Printf(\"Error in Docker events stream: %v\", err)\n\t\t\t}\n\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// shouldMonitorContainer determines if a container should be monitored.\nfunc (sc *StatsCollector) shouldMonitorContainer(cont types.Container, runID string) bool { //nolint:staticcheck // SA1019: use container.Summary\n\t// Check if it has the correct run ID label\n\tif cont.Labels == nil || cont.Labels[\"hi.run-id\"] != runID {\n\t\treturn false\n\t}\n\n\t// Check if it's an hs- or ts- container\n\tfor _, name := range cont.Names {\n\t\tcontainerName := strings.TrimPrefix(name, \"/\")\n\t\tif strings.HasPrefix(containerName, \"hs-\") || strings.HasPrefix(containerName, \"ts-\") {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n// startStatsForContainer begins stats collection for a specific container.\nfunc (sc *StatsCollector) startStatsForContainer(ctx context.Context, containerID, containerName string, verbose bool) {\n\tcontainerName = strings.TrimPrefix(containerName, \"/\")\n\n\tsc.mutex.Lock()\n\t// Check if we're already monitoring this container\n\tif _, exists := sc.containers[containerID]; exists {\n\t\tsc.mutex.Unlock()\n\t\treturn\n\t}\n\n\tsc.containers[containerID] = &ContainerStats{\n\t\tContainerID:   containerID,\n\t\tContainerName: containerName,\n\t\tStats:         make([]StatsSample, 0),\n\t}\n\tsc.mutex.Unlock()\n\n\tif verbose {\n\t\tlog.Printf(\"Starting stats collection for container %s (%s)\", containerName, containerID[:12])\n\t}\n\n\tsc.wg.Add(1)\n\n\tgo sc.collectStatsForContainer(ctx, containerID, verbose)\n}\n\n// collectStatsForContainer collects stats for a specific container using Docker API streaming.\nfunc (sc *StatsCollector) collectStatsForContainer(ctx context.Context, containerID string, verbose bool) {\n\tdefer sc.wg.Done()\n\n\t// Use Docker API streaming stats - much more efficient than CLI\n\tstatsResponse, err := sc.client.ContainerStats(ctx, containerID, true)\n\tif err != nil {\n\t\tif verbose {\n\t\t\tlog.Printf(\"Failed to get stats stream for container %s: %v\", containerID[:12], err)\n\t\t}\n\n\t\treturn\n\t}\n\tdefer statsResponse.Body.Close()\n\n\tdecoder := json.NewDecoder(statsResponse.Body)\n\n\tvar prevStats *container.Stats //nolint:staticcheck // SA1019: use StatsResponse\n\n\tfor {\n\t\tselect {\n\t\tcase <-sc.stopChan:\n\t\t\treturn\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t\tvar stats container.Stats //nolint:staticcheck // SA1019: use StatsResponse\n\n\t\t\terr := decoder.Decode(&stats)\n\t\t\tif err != nil {\n\t\t\t\t// EOF is expected when container stops or stream ends\n\t\t\t\tif err.Error() != \"EOF\" && verbose {\n\t\t\t\t\tlog.Printf(\"Failed to decode stats for container %s: %v\", containerID[:12], err)\n\t\t\t\t}\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Calculate CPU percentage (only if we have previous stats)\n\t\t\tvar cpuPercent float64\n\t\t\tif prevStats != nil {\n\t\t\t\tcpuPercent = calculateCPUPercent(prevStats, &stats)\n\t\t\t}\n\n\t\t\t// Calculate memory usage in MB\n\t\t\tmemoryMB := float64(stats.MemoryStats.Usage) / (1024 * 1024)\n\n\t\t\t// Store the sample (skip first sample since CPU calculation needs previous stats)\n\t\t\tif prevStats != nil {\n\t\t\t\t// Get container stats reference without holding the main mutex\n\t\t\t\tvar (\n\t\t\t\t\tcontainerStats *ContainerStats\n\t\t\t\t\texists         bool\n\t\t\t\t)\n\n\t\t\t\tsc.mutex.RLock()\n\t\t\t\tcontainerStats, exists = sc.containers[containerID]\n\t\t\t\tsc.mutex.RUnlock()\n\n\t\t\t\tif exists && containerStats != nil {\n\t\t\t\t\tcontainerStats.mutex.Lock()\n\t\t\t\t\tcontainerStats.Stats = append(containerStats.Stats, StatsSample{\n\t\t\t\t\t\tTimestamp: time.Now(),\n\t\t\t\t\t\tCPUUsage:  cpuPercent,\n\t\t\t\t\t\tMemoryMB:  memoryMB,\n\t\t\t\t\t})\n\t\t\t\t\tcontainerStats.mutex.Unlock()\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Save current stats for next iteration\n\t\t\tprevStats = &stats\n\t\t}\n\t}\n}\n\n// calculateCPUPercent calculates CPU usage percentage from Docker stats.\nfunc calculateCPUPercent(prevStats, stats *container.Stats) float64 { //nolint:staticcheck // SA1019: use StatsResponse\n\t// CPU calculation based on Docker's implementation\n\tcpuDelta := float64(stats.CPUStats.CPUUsage.TotalUsage) - float64(prevStats.CPUStats.CPUUsage.TotalUsage)\n\tsystemDelta := float64(stats.CPUStats.SystemUsage) - float64(prevStats.CPUStats.SystemUsage)\n\n\tif systemDelta > 0 && cpuDelta >= 0 {\n\t\t// Calculate CPU percentage: (container CPU delta / system CPU delta) * number of CPUs * 100\n\t\tnumCPUs := float64(len(stats.CPUStats.CPUUsage.PercpuUsage))\n\t\tif numCPUs == 0 {\n\t\t\t// Fallback: if PercpuUsage is not available, assume 1 CPU\n\t\t\tnumCPUs = 1.0\n\t\t}\n\n\t\treturn (cpuDelta / systemDelta) * numCPUs * 100.0\n\t}\n\n\treturn 0.0\n}\n\n// ContainerStatsSummary represents summary statistics for a container.\ntype ContainerStatsSummary struct {\n\tContainerName string\n\tSampleCount   int\n\tCPU           StatsSummary\n\tMemory        StatsSummary\n}\n\n// MemoryViolation represents a container that exceeded the memory limit.\ntype MemoryViolation struct {\n\tContainerName string\n\tMaxMemoryMB   float64\n\tLimitMB       float64\n}\n\n// StatsSummary represents min, max, and average for a metric.\ntype StatsSummary struct {\n\tMin     float64\n\tMax     float64\n\tAverage float64\n}\n\n// GetSummary returns a summary of collected statistics.\nfunc (sc *StatsCollector) GetSummary() []ContainerStatsSummary {\n\t// Take snapshot of container references without holding main lock long\n\tsc.mutex.RLock()\n\n\tcontainerRefs := make([]*ContainerStats, 0, len(sc.containers))\n\tfor _, containerStats := range sc.containers {\n\t\tcontainerRefs = append(containerRefs, containerStats)\n\t}\n\n\tsc.mutex.RUnlock()\n\n\tsummaries := make([]ContainerStatsSummary, 0, len(containerRefs))\n\n\tfor _, containerStats := range containerRefs {\n\t\tcontainerStats.mutex.RLock()\n\t\tstats := make([]StatsSample, len(containerStats.Stats))\n\t\tcopy(stats, containerStats.Stats)\n\t\tcontainerName := containerStats.ContainerName\n\t\tcontainerStats.mutex.RUnlock()\n\n\t\tif len(stats) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tsummary := ContainerStatsSummary{\n\t\t\tContainerName: containerName,\n\t\t\tSampleCount:   len(stats),\n\t\t}\n\n\t\t// Calculate CPU stats\n\t\tcpuValues := make([]float64, len(stats))\n\t\tmemoryValues := make([]float64, len(stats))\n\n\t\tfor i, sample := range stats {\n\t\t\tcpuValues[i] = sample.CPUUsage\n\t\t\tmemoryValues[i] = sample.MemoryMB\n\t\t}\n\n\t\tsummary.CPU = calculateStatsSummary(cpuValues)\n\t\tsummary.Memory = calculateStatsSummary(memoryValues)\n\n\t\tsummaries = append(summaries, summary)\n\t}\n\n\t// Sort by container name for consistent output\n\tsort.Slice(summaries, func(i, j int) bool {\n\t\treturn summaries[i].ContainerName < summaries[j].ContainerName\n\t})\n\n\treturn summaries\n}\n\n// calculateStatsSummary calculates min, max, and average for a slice of values.\nfunc calculateStatsSummary(values []float64) StatsSummary {\n\tif len(values) == 0 {\n\t\treturn StatsSummary{}\n\t}\n\n\tminVal := values[0]\n\tmaxVal := values[0]\n\tsum := 0.0\n\n\tfor _, value := range values {\n\t\tif value < minVal {\n\t\t\tminVal = value\n\t\t}\n\n\t\tif value > maxVal {\n\t\t\tmaxVal = value\n\t\t}\n\n\t\tsum += value\n\t}\n\n\treturn StatsSummary{\n\t\tMin:     minVal,\n\t\tMax:     maxVal,\n\t\tAverage: sum / float64(len(values)),\n\t}\n}\n\n// PrintSummary prints the statistics summary to the console.\nfunc (sc *StatsCollector) PrintSummary() {\n\tsummaries := sc.GetSummary()\n\n\tif len(summaries) == 0 {\n\t\tlog.Printf(\"No container statistics collected\")\n\t\treturn\n\t}\n\n\tlog.Printf(\"Container Resource Usage Summary:\")\n\tlog.Printf(\"================================\")\n\n\tfor _, summary := range summaries {\n\t\tlog.Printf(\"Container: %s (%d samples)\", summary.ContainerName, summary.SampleCount)\n\t\tlog.Printf(\"  CPU Usage:    Min: %6.2f%%  Max: %6.2f%%  Avg: %6.2f%%\",\n\t\t\tsummary.CPU.Min, summary.CPU.Max, summary.CPU.Average)\n\t\tlog.Printf(\"  Memory Usage: Min: %6.1f MB Max: %6.1f MB Avg: %6.1f MB\",\n\t\t\tsummary.Memory.Min, summary.Memory.Max, summary.Memory.Average)\n\t\tlog.Printf(\"\")\n\t}\n}\n\n// CheckMemoryLimits checks if any containers exceeded their memory limits.\nfunc (sc *StatsCollector) CheckMemoryLimits(hsLimitMB, tsLimitMB float64) []MemoryViolation {\n\tif hsLimitMB <= 0 && tsLimitMB <= 0 {\n\t\treturn nil\n\t}\n\n\tsummaries := sc.GetSummary()\n\n\tvar violations []MemoryViolation\n\n\tfor _, summary := range summaries {\n\t\tvar limitMB float64\n\t\tif strings.HasPrefix(summary.ContainerName, \"hs-\") {\n\t\t\tlimitMB = hsLimitMB\n\t\t} else if strings.HasPrefix(summary.ContainerName, \"ts-\") {\n\t\t\tlimitMB = tsLimitMB\n\t\t} else {\n\t\t\tcontinue // Skip containers that don't match our patterns\n\t\t}\n\n\t\tif limitMB > 0 && summary.Memory.Max > limitMB {\n\t\t\tviolations = append(violations, MemoryViolation{\n\t\t\t\tContainerName: summary.ContainerName,\n\t\t\t\tMaxMemoryMB:   summary.Memory.Max,\n\t\t\t\tLimitMB:       limitMB,\n\t\t\t})\n\t\t}\n\t}\n\n\treturn violations\n}\n\n// PrintSummaryAndCheckLimits prints the statistics summary and returns memory violations if any.\nfunc (sc *StatsCollector) PrintSummaryAndCheckLimits(hsLimitMB, tsLimitMB float64) []MemoryViolation {\n\tsc.PrintSummary()\n\treturn sc.CheckMemoryLimits(hsLimitMB, tsLimitMB)\n}\n\n// Close closes the stats collector and cleans up resources.\nfunc (sc *StatsCollector) Close() error {\n\tsc.StopCollection()\n\treturn sc.client.Close()\n}\n"
  },
  {
    "path": "cmd/mapresponses/main.go",
    "content": "package main\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com/creachadair/command\"\n\t\"github.com/creachadair/flax\"\n\t\"github.com/juanfont/headscale/hscontrol/mapper\"\n\t\"github.com/juanfont/headscale/integration/integrationutil\"\n)\n\ntype MapConfig struct {\n\tDirectory string `flag:\"directory,Directory to read map responses from\"`\n}\n\nvar (\n\tmapConfig            MapConfig\n\terrDirectoryRequired = errors.New(\"directory is required\")\n)\n\nfunc main() {\n\troot := command.C{\n\t\tName: \"mapresponses\",\n\t\tHelp: \"MapResponses is a tool to map and compare map responses from a directory\",\n\t\tCommands: []*command.C{\n\t\t\t{\n\t\t\t\tName:     \"online\",\n\t\t\t\tHelp:     \"\",\n\t\t\t\tUsage:    \"run [test-pattern] [flags]\",\n\t\t\t\tSetFlags: command.Flags(flax.MustBind, &mapConfig),\n\t\t\t\tRun:      runOnline,\n\t\t\t},\n\t\t\tcommand.HelpCommand(nil),\n\t\t},\n\t}\n\n\tenv := root.NewEnv(nil).MergeFlags(true)\n\tcommand.RunOrFail(env, os.Args[1:])\n}\n\n// runIntegrationTest executes the integration test workflow.\nfunc runOnline(env *command.Env) error {\n\tif mapConfig.Directory == \"\" {\n\t\treturn errDirectoryRequired\n\t}\n\n\tresps, err := mapper.ReadMapResponsesFromDirectory(mapConfig.Directory)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"reading map responses from directory: %w\", err)\n\t}\n\n\texpected := integrationutil.BuildExpectedOnlineMap(resps)\n\n\tout, err := json.MarshalIndent(expected, \"\", \"  \")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"marshaling expected online map: %w\", err)\n\t}\n\n\tos.Stderr.Write(out)\n\tos.Stderr.Write([]byte(\"\\n\"))\n\n\treturn nil\n}\n"
  },
  {
    "path": "config-example.yaml",
    "content": "---\n# headscale will look for a configuration file named `config.yaml` (or `config.json`) in the following order:\n#\n# - `/etc/headscale`\n# - `~/.headscale`\n# - current working directory\n\n# The url clients will connect to.\n# Typically this will be a domain like:\n#\n# https://myheadscale.example.com:443\n#\nserver_url: http://127.0.0.1:8080\n\n# Address to listen to / bind to on the server\n#\n# For production:\n# listen_addr: 0.0.0.0:8080\nlisten_addr: 127.0.0.1:8080\n\n# Address to listen to /metrics and /debug, you may want\n# to keep this endpoint private to your internal network\n# Use an emty value to disable the metrics listener.\nmetrics_listen_addr: 127.0.0.1:9090\n\n# Address to listen for gRPC.\n# gRPC is used for controlling a headscale server\n# remotely with the CLI\n# Note: Remote access _only_ works if you have\n# valid certificates.\n#\n# For production:\n# grpc_listen_addr: 0.0.0.0:50443\ngrpc_listen_addr: 127.0.0.1:50443\n\n# Allow the gRPC admin interface to run in INSECURE\n# mode. This is not recommended as the traffic will\n# be unencrypted. Only enable if you know what you\n# are doing.\ngrpc_allow_insecure: false\n\n# The Noise section includes specific configuration for the\n# TS2021 Noise protocol\nnoise:\n  # The Noise private key is used to encrypt the traffic between headscale and\n  # Tailscale clients when using the new Noise-based protocol. A missing key\n  # will be automatically generated.\n  private_key_path: /var/lib/headscale/noise_private.key\n\n# List of IP prefixes to allocate tailaddresses from.\n# Each prefix consists of either an IPv4 or IPv6 address,\n# and the associated prefix length, delimited by a slash.\n#\n# WARNING: These prefixes MUST be subsets of the standard Tailscale ranges:\n#   - IPv4: 100.64.0.0/10 (CGNAT range)\n#   - IPv6: fd7a:115c:a1e0::/48 (Tailscale ULA range)\n#\n# Using a SUBSET of these ranges is supported and useful if you want to\n# limit IP allocation to a smaller block (e.g., 100.64.0.0/24).\n#\n# Using ranges OUTSIDE of CGNAT/ULA is NOT supported and will cause\n# undefined behaviour. The Tailscale client has hard-coded assumptions\n# about these ranges and will break in subtle, hard-to-debug ways.\n#\n# See:\n# IPv4: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#L33\n# IPv6: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#LL81C52-L81C71\nprefixes:\n  v4: 100.64.0.0/10\n  v6: fd7a:115c:a1e0::/48\n\n  # Strategy used for allocation of IPs to nodes, available options:\n  # - sequential (default): assigns the next free IP from the previous given\n  #   IP. A best-effort approach is used and Headscale might leave holes in the\n  #   IP range or fill up existing holes in the IP range.\n  # - random: assigns the next free IP from a pseudo-random IP generator (crypto/rand).\n  allocation: sequential\n\n# DERP is a relay system that Tailscale uses when a direct\n# connection cannot be established.\n# https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp\n#\n# headscale needs a list of DERP servers that can be presented\n# to the clients.\nderp:\n  server:\n    # If enabled, runs the embedded DERP server and merges it into the rest of the DERP config\n    # The Headscale server_url defined above MUST be using https, DERP requires TLS to be in place\n    enabled: false\n\n    # Region ID to use for the embedded DERP server.\n    # The local DERP prevails if the region ID collides with other region ID coming from\n    # the regular DERP config.\n    region_id: 999\n\n    # Region code and name are displayed in the Tailscale UI to identify a DERP region\n    region_code: \"headscale\"\n    region_name: \"Headscale Embedded DERP\"\n\n    # Only allow clients associated with this server access\n    verify_clients: true\n\n    # Listens over UDP at the configured address for STUN connections - to help with NAT traversal.\n    # When the embedded DERP server is enabled stun_listen_addr MUST be defined.\n    #\n    # For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/\n    stun_listen_addr: \"0.0.0.0:3478\"\n\n    # Private key used to encrypt the traffic between headscale DERP and\n    # Tailscale clients. A missing key will be automatically generated.\n    private_key_path: /var/lib/headscale/derp_server_private.key\n\n    # This flag can be used, so the DERP map entry for the embedded DERP server is not written automatically,\n    # it enables the creation of your very own DERP map entry using a locally available file with the parameter DERP.paths\n    # If you enable the DERP server and set this to false, it is required to add the DERP server to the DERP map using DERP.paths\n    automatically_add_embedded_derp_region: true\n\n    # For better connection stability (especially when using an Exit-Node and DNS is not working),\n    # it is possible to optionally add the public IPv4 and IPv6 address to the Derp-Map using:\n    ipv4: 198.51.100.1\n    ipv6: 2001:db8::1\n\n  # List of externally available DERP maps encoded in JSON\n  urls:\n    - https://controlplane.tailscale.com/derpmap/default\n\n  # Locally available DERP map files encoded in YAML\n  #\n  # This option is mostly interesting for people hosting\n  # their own DERP servers:\n  # https://tailscale.com/kb/1118/custom-derp-servers/\n  #\n  # paths:\n  #   - /etc/headscale/derp-example.yaml\n  paths: []\n\n  # If enabled, a worker will be set up to periodically\n  # refresh the given sources and update the derpmap\n  # will be set up.\n  auto_update_enabled: true\n\n  # How often should we check for DERP updates?\n  update_frequency: 3h\n\n# Disables the automatic check for headscale updates on startup\ndisable_check_updates: false\n\n# Time before an inactive ephemeral node is deleted?\nephemeral_node_inactivity_timeout: 30m\n\ndatabase:\n  # Database type. Available options: sqlite, postgres\n  # Please note that using Postgres is highly discouraged as it is only supported for legacy reasons.\n  # All new development, testing and optimisations are done with SQLite in mind.\n  type: sqlite\n\n  # Enable debug mode. This setting requires the log.level to be set to \"debug\" or \"trace\".\n  debug: false\n\n  # GORM configuration settings.\n  gorm:\n    # Enable prepared statements.\n    prepare_stmt: true\n\n    # Enable parameterized queries.\n    parameterized_queries: true\n\n    # Skip logging \"record not found\" errors.\n    skip_err_record_not_found: true\n\n    # Threshold for slow queries in milliseconds.\n    slow_threshold: 1000\n\n  # SQLite config\n  sqlite:\n    path: /var/lib/headscale/db.sqlite\n\n    # Enable WAL mode for SQLite. This is recommended for production environments.\n    # https://www.sqlite.org/wal.html\n    write_ahead_log: true\n\n    # Maximum number of WAL file frames before the WAL file is automatically checkpointed.\n    # https://www.sqlite.org/c3ref/wal_autocheckpoint.html\n    # Set to 0 to disable automatic checkpointing.\n    wal_autocheckpoint: 1000\n\n  # # Postgres config\n  # Please note that using Postgres is highly discouraged as it is only supported for legacy reasons.\n  # See database.type for more information.\n  # postgres:\n  #   # If using a Unix socket to connect to Postgres, set the socket path in the 'host' field and leave 'port' blank.\n  #   host: localhost\n  #   port: 5432\n  #   name: headscale\n  #   user: foo\n  #   pass: bar\n  #   max_open_conns: 10\n  #   max_idle_conns: 10\n  #   conn_max_idle_time_secs: 3600\n\n  #   # If other 'sslmode' is required instead of 'require(true)' and 'disabled(false)', set the 'sslmode' you need\n  #   # in the 'ssl' field. Refers to https://www.postgresql.org/docs/current/libpq-ssl.html Table 34.1.\n  #   ssl: false\n\n### TLS configuration\n#\n## Let's encrypt / ACME\n#\n# headscale supports automatically requesting and setting up\n# TLS for a domain with Let's Encrypt.\n#\n# URL to ACME directory\nacme_url: https://acme-v02.api.letsencrypt.org/directory\n\n# Email to register with ACME provider\nacme_email: \"\"\n\n# Domain name to request a TLS certificate for:\ntls_letsencrypt_hostname: \"\"\n\n# Path to store certificates and metadata needed by\n# letsencrypt\n# For production:\ntls_letsencrypt_cache_dir: /var/lib/headscale/cache\n\n# Type of ACME challenge to use, currently supported types:\n# HTTP-01 or TLS-ALPN-01\n# See: docs/ref/tls.md for more information\ntls_letsencrypt_challenge_type: HTTP-01\n# When HTTP-01 challenge is chosen, letsencrypt must set up a\n# verification endpoint, and it will be listening on:\n# :http = port 80\ntls_letsencrypt_listen: \":http\"\n\n## Use already defined certificates:\ntls_cert_path: \"\"\ntls_key_path: \"\"\n\nlog:\n  # Valid log levels: panic, fatal, error, warn, info, debug, trace\n  level: info\n\n  # Output formatting for logs: text or json\n  format: text\n\n## Policy\n# headscale supports Tailscale's ACL policies.\n# Please have a look to their KB to better\n# understand the concepts: https://tailscale.com/kb/1018/acls/\npolicy:\n  # The mode can be \"file\" or \"database\" that defines\n  # where the ACL policies are stored and read from.\n  mode: file\n  # If the mode is set to \"file\", the path to a\n  # HuJSON file containing ACL policies.\n  path: \"\"\n\n## DNS\n#\n# headscale supports Tailscale's DNS configuration and MagicDNS.\n# Please have a look to their KB to better understand the concepts:\n#\n# - https://tailscale.com/kb/1054/dns/\n# - https://tailscale.com/kb/1081/magicdns/\n# - https://tailscale.com/blog/2021-09-private-dns-with-magicdns/\n#\n# Please note that for the DNS configuration to have any effect,\n# clients must have the `--accept-dns=true` option enabled. This is the\n# default for the Tailscale client. This option is enabled by default\n# in the Tailscale client.\n#\n# Setting _any_ of the configuration and `--accept-dns=true` on the\n# clients will integrate with the DNS manager on the client or\n# overwrite /etc/resolv.conf.\n# https://tailscale.com/kb/1235/resolv-conf\n#\n# If you want stop Headscale from managing the DNS configuration\n# all the fields under `dns` should be set to empty values.\ndns:\n  # Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).\n  magic_dns: true\n\n  # Defines the base domain to create the hostnames for MagicDNS.\n  # This domain _must_ be different from the server_url domain.\n  # `base_domain` must be a FQDN, without the trailing dot.\n  # The FQDN of the hosts will be\n  # `hostname.base_domain` (e.g., _myhost.example.com_).\n  base_domain: example.com\n\n  # Whether to use the local DNS settings of a node or override the local DNS\n  # settings (default) and force the use of Headscale's DNS configuration.\n  override_local_dns: true\n\n  # List of DNS servers to expose to clients.\n  nameservers:\n    global:\n      - 1.1.1.1\n      - 1.0.0.1\n      - 2606:4700:4700::1111\n      - 2606:4700:4700::1001\n\n      # NextDNS (see https://tailscale.com/kb/1218/nextdns/).\n      # \"abc123\" is example NextDNS ID, replace with yours.\n      # - https://dns.nextdns.io/abc123\n\n    # Split DNS (see https://tailscale.com/kb/1054/dns/),\n    # a map of domains and which DNS server to use for each.\n    split: {}\n      # foo.bar.com:\n      #   - 1.1.1.1\n      # darp.headscale.net:\n      #   - 1.1.1.1\n      #   - 8.8.8.8\n\n  # Set custom DNS search domains. With MagicDNS enabled,\n  # your tailnet base_domain is always the first search domain.\n  search_domains: []\n\n  # Extra DNS records\n  # so far only A and AAAA records are supported (on the tailscale side)\n  # See: docs/ref/dns.md\n  extra_records: []\n  #   - name: \"grafana.myvpn.example.com\"\n  #     type: \"A\"\n  #     value: \"100.64.0.3\"\n  #\n  #   # you can also put it in one line\n  #   - { name: \"prometheus.myvpn.example.com\", type: \"A\", value: \"100.64.0.3\" }\n  #\n  # Alternatively, extra DNS records can be loaded from a JSON file.\n  # Headscale processes this file on each change.\n  # extra_records_path: /var/lib/headscale/extra-records.json\n\n# Unix socket used for the CLI to connect without authentication\n# Note: for production you will want to set this to something like:\nunix_socket: /var/run/headscale/headscale.sock\nunix_socket_permission: \"0770\"\n\n# OpenID Connect\n# oidc:\n#   # Block startup until the identity provider is available and healthy.\n#   only_start_if_oidc_is_available: true\n#\n#   # OpenID Connect Issuer URL from the identity provider\n#   issuer: \"https://your-oidc.issuer.com/path\"\n#\n#   # Client ID from the identity provider\n#   client_id: \"your-oidc-client-id\"\n#\n#   # Client secret generated by the identity provider\n#   # Note: client_secret and client_secret_path are mutually exclusive.\n#   client_secret: \"your-oidc-client-secret\"\n#   # Alternatively, set `client_secret_path` to read the secret from the file.\n#   # It resolves environment variables, making integration to systemd's\n#   # `LoadCredential` straightforward:\n#   client_secret_path: \"${CREDENTIALS_DIRECTORY}/oidc_client_secret\"\n#\n#   # The amount of time a node is authenticated with OpenID until it expires\n#   # and needs to reauthenticate.\n#   # Setting the value to \"0\" will mean no expiry.\n#   expiry: 180d\n#\n#   # Use the expiry from the token received from OpenID when the user logged\n#   # in. This will typically lead to frequent need to reauthenticate and should\n#   # only be enabled if you know what you are doing.\n#   # Note: enabling this will cause `oidc.expiry` to be ignored.\n#   use_expiry_from_token: false\n#\n#   # The OIDC scopes to use, defaults to \"openid\", \"profile\" and \"email\".\n#   # Custom scopes can be configured as needed, be sure to always include the\n#   # required \"openid\" scope.\n#   scope: [\"openid\", \"profile\", \"email\"]\n#\n#   # Only verified email addresses are synchronized to the user profile by\n#   # default. Unverified emails may be allowed in case an identity provider\n#   # does not send the \"email_verified: true\" claim or email verification is\n#   # not required.\n#   email_verified_required: true\n#\n#   # Provide custom key/value pairs which get sent to the identity provider's\n#   # authorization endpoint.\n#   extra_params:\n#     domain_hint: example.com\n#\n#   # Only accept users whose email domain is part of the allowed_domains list.\n#   allowed_domains:\n#     - example.com\n#\n#   # Only accept users whose email address is part of the allowed_users list.\n#   allowed_users:\n#     - alice@example.com\n#\n#   # Only accept users which are members of at least one group in the\n#   # allowed_groups list.\n#   allowed_groups:\n#     - /headscale\n#\n#   # Optional: PKCE (Proof Key for Code Exchange) configuration\n#   # PKCE adds an additional layer of security to the OAuth 2.0 authorization code flow\n#   # by preventing authorization code interception attacks\n#   # See https://datatracker.ietf.org/doc/html/rfc7636\n#   pkce:\n#     # Enable or disable PKCE support (default: false)\n#     enabled: false\n#\n#     # PKCE method to use:\n#     # - plain: Use plain code verifier\n#     # - S256: Use SHA256 hashed code verifier (default, recommended)\n#     method: S256\n\n# Logtail configuration\n# Logtail is Tailscales logging and auditing infrastructure, it allows the\n# control panel to instruct tailscale nodes to log their activity to a remote\n# server. To disable logging on the client side, please refer to:\n# https://tailscale.com/kb/1011/log-mesh-traffic#opting-out-of-client-logging\nlogtail:\n  # Enable logtail for tailscale nodes of this Headscale instance.\n  # As there is currently no support for overriding the log server in Headscale, this is\n  # disabled by default. Enabling this will make your clients send logs to Tailscale Inc.\n  enabled: false\n\n# Enabling this option makes devices prefer a random port for WireGuard traffic over the\n# default static port 41641. This option is intended as a workaround for some buggy\n# firewall devices. See https://tailscale.com/kb/1181/firewalls/ for more information.\nrandomize_client_port: false\n\n# Taildrop configuration\n# Taildrop is the file sharing feature of Tailscale, allowing nodes to send files to each other.\n# https://tailscale.com/kb/1106/taildrop/\ntaildrop:\n  # Enable or disable Taildrop for all nodes.\n  # When enabled, nodes can send files to other nodes owned by the same user.\n  # Tagged devices and cross-user transfers are not permitted by Tailscale clients.\n  enabled: true\n# Advanced performance tuning parameters.\n# The defaults are carefully chosen and should rarely need adjustment.\n# Only modify these if you have identified a specific performance issue.\n#\n# tuning:\n#   # NodeStore write batching configuration.\n#   # The NodeStore batches write operations before rebuilding peer relationships,\n#   # which is computationally expensive. Batching reduces rebuild frequency.\n#   #\n#   # node_store_batch_size: 100\n#   # node_store_batch_timeout: 500ms\n"
  },
  {
    "path": "derp-example.yaml",
    "content": "# If you plan to somehow use headscale, please deploy your own DERP infra: https://tailscale.com/kb/1118/custom-derp-servers/\nregions:\n  1: null # Disable DERP region with ID 1\n  900:\n    regionid: 900\n    regioncode: custom\n    regionname: My Region\n    nodes:\n      - name: 900a\n        regionid: 900\n        hostname: myderp.example.com\n        ipv4: 198.51.100.1\n        ipv6: 2001:db8::1\n        stunport: 0\n        stunonly: false\n        derpport: 0\n"
  },
  {
    "path": "docs/about/clients.md",
    "content": "# Client and operating system support\n\nWe aim to support the [**last 10 releases** of the Tailscale client](https://tailscale.com/changelog#client) on all\nprovided operating systems and platforms. Some platforms might require additional configuration to connect with\nheadscale.\n\n| OS      | Supports headscale                                                                                    |\n| ------- | ----------------------------------------------------------------------------------------------------- |\n| Linux   | Yes                                                                                                   |\n| OpenBSD | Yes                                                                                                   |\n| FreeBSD | Yes                                                                                                   |\n| Windows | Yes (see [docs](../usage/connect/windows.md) and `/windows` on your headscale for more information)   |\n| Android | Yes (see [docs](../usage/connect/android.md) for more information)                                    |\n| macOS   | Yes (see [docs](../usage/connect/apple.md#macos) and `/apple` on your headscale for more information) |\n| iOS     | Yes (see [docs](../usage/connect/apple.md#ios) and `/apple` on your headscale for more information)   |\n| tvOS    | Yes (see [docs](../usage/connect/apple.md#tvos) and `/apple` on your headscale for more information)  |\n"
  },
  {
    "path": "docs/about/contributing.md",
    "content": "{%\ninclude-markdown \"../../CONTRIBUTING.md\"\n%}\n"
  },
  {
    "path": "docs/about/faq.md",
    "content": "# Frequently Asked Questions\n\n## What is the design goal of headscale?\n\nHeadscale aims to implement a self-hosted, open source alternative to the\n[Tailscale](https://tailscale.com/) control server. Headscale's goal is to\nprovide self-hosters and hobbyists with an open-source server they can use for\ntheir projects and labs. It implements a narrow scope, a _single_ Tailscale\nnetwork (tailnet), suitable for a personal use, or a small open-source\norganisation.\n\n## How can I contribute?\n\nHeadscale is \"Open Source, acknowledged contribution\", this means that any\ncontribution will have to be discussed with the Maintainers before being submitted.\n\nPlease see [Contributing](contributing.md) for more information.\n\n## Why is 'acknowledged contribution' the chosen model?\n\nBoth maintainers have full-time jobs and families, and we want to avoid burnout. We also want to avoid frustration from contributors when their PRs are not accepted.\n\nWe are more than happy to exchange emails, or to have dedicated calls before a PR is submitted.\n\n## When/Why is Feature X going to be implemented?\n\nWe use [GitHub Milestones to plan for upcoming Headscale releases](https://github.com/juanfont/headscale/milestones).\nHave a look at [our current plan](https://github.com/juanfont/headscale/milestones) to get an idea when a specific\nfeature is about to be implemented. The release plan is subject to change at any time.\n\nIf you're interested in contributing, please post a feature request about it. Please be aware that there are a number of\nreasons why we might not accept specific contributions:\n\n- It is not possible to implement the feature in a way that makes sense in a self-hosted environment.\n- Given that we are reverse-engineering Tailscale to satisfy our own curiosity, we might be interested in implementing the feature ourselves.\n- You are not sending unit and integration tests with it.\n\n## Do you support Y method of deploying headscale?\n\nWe currently support deploying headscale using our binaries and the DEB packages. Visit our [installation guide using\nofficial releases](../setup/install/official.md) for more information.\n\nIn addition to that, you may use packages provided by the community or from distributions. Learn more in the\n[installation guide using community packages](../setup/install/community.md).\n\nFor convenience, we also [build container images with headscale](../setup/install/container.md). But **please be aware that\nwe don't officially support deploying headscale using Docker**. On our [Discord server](https://discord.gg/c84AZQhmpx)\nwe have a \"docker-issues\" channel where you can ask for Docker-specific help to the community.\n\n## What is the recommended update path? Can I skip multiple versions while updating?\n\nPlease follow the steps outlined in the [upgrade guide](../setup/upgrade.md) to update your existing Headscale\ninstallation. Its required to update from one stable version to the next (e.g. 0.26.0 → 0.27.1 → 0.28.0) without\nskipping minor versions in between. You should always pick the latest available patch release.\n\nBe sure to check the [changelog](https://github.com/juanfont/headscale/blob/main/CHANGELOG.md) for version specific\nupgrade instructions and breaking changes.\n\n## Scaling / How many clients does Headscale support?\n\nIt depends. As often stated, Headscale is not enterprise software and our focus\nis homelabbers and self-hosters. Of course, we do not prevent people from using\nit in a commercial/professional setting and often get questions about scaling.\n\nPlease note that when Headscale is developed, performance is not part of the\nconsideration as the main audience is considered to be users with a modest\namount of devices. We focus on correctness and feature parity with Tailscale\nSaaS over time.\n\nTo understand if you might be able to use Headscale for your use case, I will\ndescribe two scenarios in an effort to explain what is the central bottleneck\nof Headscale:\n\n1. An environment with 1000 servers\n\n    - they rarely \"move\" (change their endpoints)\n    - new nodes are added rarely\n\n1. An environment with 80 laptops/phones (end user devices)\n\n    - nodes move often, e.g. switching from home to office\n\nHeadscale calculates a map of all nodes that need to talk to each other,\ncreating this \"world map\" requires a lot of CPU time. When an event that\nrequires changes to this map happens, the whole \"world\" is recalculated, and a\nnew \"world map\" is created for every node in the network.\n\nThis means that under certain conditions, Headscale can likely handle 100s\nof devices (maybe more), if there is _little to no change_ happening in the\nnetwork. For example, in Scenario 1, the process of computing the world map is\nextremely demanding due to the size of the network, but when the map has been\ncreated and the nodes are not changing, the Headscale instance will likely\nreturn to a very low resource usage until the next time there is an event\nrequiring the new map.\n\nIn the case of Scenario 2, the process of computing the world map is less\ndemanding due to the smaller size of the network, however, the type of nodes\nwill likely change frequently, which would lead to a constant resource usage.\n\nHeadscale will start to struggle when the two scenarios overlap, e.g. many nodes\nwith frequent changes will cause the resource usage to remain constantly high.\nIn the worst case scenario, the queue of nodes waiting for their map will grow\nto a point where Headscale never will be able to catch up, and nodes will never\nlearn about the current state of the world.\n\nWe expect that the performance will improve over time as we improve the code\nbase, but it is not a focus. In general, we will never make the tradeoff to make\nthings faster on the cost of less maintainable or readable code. We are a small\nteam and have to optimise for maintainability.\n\n## Which database should I use?\n\nWe recommend the use of SQLite as database for headscale:\n\n- SQLite is simple to setup and easy to use\n- It scales well for all of headscale's use cases\n- Development and testing happens primarily on SQLite\n- PostgreSQL is still supported, but is considered to be in \"maintenance mode\"\n\nThe headscale project itself does not provide a tool to migrate from PostgreSQL to SQLite. Please have a look at [the\nrelated tools documentation](../ref/integration/tools.md) for migration tooling provided by the community.\n\nThe choice of database has little to no impact on the performance of the server,\nsee [Scaling / How many clients does Headscale support?](#scaling-how-many-clients-does-headscale-support) for understanding how Headscale spends its resources.\n\n## Why is my reverse proxy not working with headscale?\n\nWe don't know. We don't use reverse proxies with headscale ourselves, so we don't have any experience with them. We have\n[community documentation](../ref/integration/reverse-proxy.md) on how to configure various reverse proxies, and a\ndedicated \"reverse-proxy-issues\" channel on our [Discord server](https://discord.gg/c84AZQhmpx) where you can ask for\nhelp to the community.\n\n## Can I use headscale and tailscale on the same machine?\n\nRunning headscale on a machine that is also in the tailnet can cause problems with subnet routers, traffic relay nodes, and MagicDNS. It might work, but it is not supported.\n\n## Why do two nodes see each other in their status, even if an ACL allows traffic only in one direction?\n\nA frequent use case is to allow traffic only from one node to another, but not the other way around. For example, the\nworkstation of an administrator should be able to connect to all nodes but the nodes themselves shouldn't be able to\nconnect back to the administrator's node. Why do all nodes see the administrator's workstation in the output of\n`tailscale status`?\n\nThis is essentially how Tailscale works. If traffic is allowed to flow in one direction, then both nodes see each other\nin their output of `tailscale status`. Traffic is still filtered according to the ACL, with the exception of\n`tailscale ping` which is always allowed in either direction.\n\nSee also <https://tailscale.com/kb/1087/device-visibility>.\n\n## My policy is stored in the database and Headscale refuses to start due to an invalid policy. How can I recover?\n\nHeadscale checks if the policy is valid during startup and refuses to start if it detects an error. The error message\nindicates which part of the policy is invalid. Follow these steps to fix your policy:\n\n- Dump the policy to a file: `headscale policy get --bypass-grpc-and-access-database-directly > policy.json`\n- Edit and fixup `policy.json`. Use the command `headscale policy check --file policy.json` to validate the policy.\n- Load the modified policy: `headscale policy set --bypass-grpc-and-access-database-directly --file policy.json`\n- Start Headscale as usual.\n\n!!! warning \"Full server configuration required\"\n\n    The above commands to get/set the policy require a complete server configuration file including database settings. A\n    minimal config to [control Headscale via remote CLI](../ref/api.md#grpc) is not sufficient. You may use\n    `headscale -c /path/to/config.yaml` to specify the path to an alternative configuration file.\n\n## How can I migrate back to the recommended IP prefixes?\n\nTailscale only supports the IP prefixes `100.64.0.0/10` and `fd7a:115c:a1e0::/48` or smaller subnets thereof. The\nfollowing steps can be used to migrate from unsupported IP prefixes back to the supported and recommended ones.\n\n!!! warning \"Backup and test in a demo environment required\"\n\n    The commands below update the IP addresses of all nodes in your tailnet and this might have a severe impact in your\n    specific environment. At a minimum:\n\n    - [Create a backup of your database](../setup/upgrade.md#backup)\n    - Test the commands below in a representive demo environment. This allows to catch subsequent connectivity errors\n      early and see how the tailnet behaves in your specific environment.\n\n- Stop Headscale\n- Restore the default prefixes in the [configuration file](../ref/configuration.md):\n    ```yaml\n    prefixes:\n      v4: 100.64.0.0/10\n      v6: fd7a:115c:a1e0::/48\n    ```\n- Update the `nodes.ipv4` and `nodes.ipv6` columns in the database and assign each node a unique IPv4 and IPv6 address.\n  The following SQL statement assigns IP addresses based on the node ID:\n    ```sql\n    UPDATE nodes\n    SET ipv4=concat('100.64.', id/256, '.', id%256),\n        ipv6=concat('fd7a:115c:a1e0::', format('%x', id));\n    ```\n- Update the [policy](../ref/acls.md) to reflect the IP address changes (if any)\n- Start Headscale\n\nNodes should reconnect within a few seconds and pickup their newly assigned IP addresses.\n\n## How can I avoid to send logs to Tailscale Inc?\n\nA Tailscale client [collects logs about its operation and connection attempts with other\nclients](https://tailscale.com/kb/1011/log-mesh-traffic#client-logs) and sends them to a central log service operated by\nTailscale Inc.\n\nHeadscale, by default, instructs clients to disable log submission to the central log service. This configuration is\napplied by a client once it successfully connected with Headscale. See the configuration option `logtail.enabled` in the\n[configuration file](../ref/configuration.md) for details.\n\nAlternatively, logging can also be disabled on the client side. This is independent of Headscale and opting out of\nclient logging disables log submission early during client startup. The configuration is operating system specific and\nis usually achieved by setting the environment variable `TS_NO_LOGS_NO_SUPPORT=true` or by passing the flag\n`--no-logs-no-support` to `tailscaled`. See\n<https://tailscale.com/kb/1011/log-mesh-traffic#opting-out-of-client-logging> for details.\n"
  },
  {
    "path": "docs/about/features.md",
    "content": "# Features\n\nHeadscale aims to implement a self-hosted, open source alternative to the Tailscale control server. Headscale's goal is\nto provide self-hosters and hobbyists with an open-source server they can use for their projects and labs. This page\nprovides on overview of Headscale's feature and compatibility with the Tailscale control server:\n\n- [x] Full \"base\" support of Tailscale's features\n- [x] [Node registration](../ref/registration.md)\n    - [x] [Web authentication](../ref/registration.md#web-authentication)\n    - [x] [Pre authenticated key](../ref/registration.md#pre-authenticated-key)\n- [x] [DNS](../ref/dns.md)\n    - [x] [MagicDNS](https://tailscale.com/kb/1081/magicdns)\n    - [x] [Global and restricted nameservers (split DNS)](https://tailscale.com/kb/1054/dns#nameservers)\n    - [x] [search domains](https://tailscale.com/kb/1054/dns#search-domains)\n    - [x] [Extra DNS records (Headscale only)](../ref/dns.md#setting-extra-dns-records)\n- [x] [Taildrop (File Sharing)](https://tailscale.com/kb/1106/taildrop)\n- [x] [Tags](../ref/tags.md)\n- [x] [Routes](../ref/routes.md)\n    - [x] [Subnet routers](../ref/routes.md#subnet-router)\n    - [x] [Exit nodes](../ref/routes.md#exit-node)\n- [x] Dual stack (IPv4 and IPv6)\n- [x] Ephemeral nodes\n- [x] Embedded [DERP server](../ref/derp.md)\n- [x] Access control lists ([GitHub label \"policy\"](https://github.com/juanfont/headscale/labels/policy%20%F0%9F%93%9D))\n    - [x] ACL management via API\n    - [x] Some [Autogroups](https://tailscale.com/kb/1396/targets#autogroups), currently: `autogroup:internet`,\n      `autogroup:nonroot`, `autogroup:member`, `autogroup:tagged`, `autogroup:self`\n    - [x] [Auto approvers](https://tailscale.com/kb/1337/acl-syntax#auto-approvers) for [subnet\n      routers](../ref/routes.md#automatically-approve-routes-of-a-subnet-router) and [exit\n      nodes](../ref/routes.md#automatically-approve-an-exit-node-with-auto-approvers)\n    - [x] [Tailscale SSH](https://tailscale.com/kb/1193/tailscale-ssh)\n- [x] [Node registration using Single-Sign-On (OpenID Connect)](../ref/oidc.md) ([GitHub label \"OIDC\"](https://github.com/juanfont/headscale/labels/OIDC))\n    - [x] Basic registration\n    - [x] Update user profile from identity provider\n    - [ ] OIDC groups cannot be used in ACLs\n- [ ] [Funnel](https://tailscale.com/kb/1223/funnel) ([#1040](https://github.com/juanfont/headscale/issues/1040))\n- [ ] [Serve](https://tailscale.com/kb/1312/serve) ([#1234](https://github.com/juanfont/headscale/issues/1921))\n- [ ] [Network flow logs](https://tailscale.com/kb/1219/network-flow-logs) ([#1687](https://github.com/juanfont/headscale/issues/1687))\n"
  },
  {
    "path": "docs/about/help.md",
    "content": "# Getting help\n\nJoin our [Discord server](https://discord.gg/c84AZQhmpx) for announcements and community support.\n\nPlease report bugs via [GitHub issues](https://github.com/juanfont/headscale/issues)\n"
  },
  {
    "path": "docs/about/releases.md",
    "content": "# Releases\n\nAll headscale releases are available on the [GitHub release page](https://github.com/juanfont/headscale/releases). Those\nreleases are available as binaries for various platforms and architectures, packages for Debian based systems and source\ncode archives. Container images are available on [Docker Hub](https://hub.docker.com/r/headscale/headscale) and\n[GitHub Container Registry](https://github.com/juanfont/headscale/pkgs/container/headscale).\n\nAn Atom/RSS feed of headscale releases is available [here](https://github.com/juanfont/headscale/releases.atom).\n\nSee the \"announcements\" channel on our [Discord server](https://discord.gg/c84AZQhmpx) for news about headscale.\n"
  },
  {
    "path": "docs/about/sponsor.md",
    "content": "# Sponsor\n\nIf you like to support the development of headscale, please consider a donation via\n[ko-fi.com/headscale](https://ko-fi.com/headscale). Thank you!\n"
  },
  {
    "path": "docs/index.md",
    "content": "---\nhide:\n  - navigation\n  - toc\n---\n\n# Welcome to headscale\n\nHeadscale is an open source, self-hosted implementation of the Tailscale control server.\n\nThis page contains the documentation for the latest version of headscale. Please also check our [FAQ](./about/faq.md).\n\nJoin our [Discord server](https://discord.gg/c84AZQhmpx) for a chat and community support.\n\n## Design goal\n\nHeadscale aims to implement a self-hosted, open source alternative to the\n[Tailscale](https://tailscale.com/) control server. Headscale's goal is to\nprovide self-hosters and hobbyists with an open-source server they can use for\ntheir projects and labs. It implements a narrow scope, a _single_ Tailscale\nnetwork (tailnet), suitable for a personal use, or a small open-source\norganisation.\n\n## Supporting headscale\n\nPlease see [Sponsor](about/sponsor.md) for more information.\n\n## Contributing\n\nHeadscale is \"Open Source, acknowledged contribution\", this means that any\ncontribution will have to be discussed with the Maintainers before being submitted.\n\nPlease see [Contributing](about/contributing.md) for more information.\n\n## About\n\nHeadscale is maintained by [Kristoffer Dalby](https://kradalby.no/) and [Juan Font](https://font.eu).\n"
  },
  {
    "path": "docs/ref/acls.md",
    "content": "Headscale implements the same policy ACLs as Tailscale.com, adapted to the self-hosted environment.\n\nFor instance, instead of referring to users when defining groups you must\nuse users (which are the equivalent to user/logins in Tailscale.com).\n\nPlease check https://tailscale.com/kb/1018/acls/ for further information.\n\nWhen using ACL's the User borders are no longer applied. All machines\nwhichever the User have the ability to communicate with other hosts as\nlong as the ACL's permits this exchange.\n\n## ACL Setup\n\nTo enable and configure ACLs in Headscale, you need to specify the path to your ACL policy file in the `policy.path` key in `config.yaml`.\n\nYour ACL policy file must be formatted using [huJSON](https://github.com/tailscale/hujson).\n\nInfo on how these policies are written can be found\n[here](https://tailscale.com/kb/1018/acls/).\n\nPlease reload or restart Headscale after updating the ACL file. Headscale may be reloaded either via its systemd service\n(`sudo systemctl reload headscale`) or by sending a SIGHUP signal (`sudo kill -HUP $(pidof headscale)`) to the main\nprocess. Headscale logs the result of ACL policy processing after each reload.\n\n## Simple Examples\n\n- [**Allow All**](https://tailscale.com/kb/1192/acl-samples#allow-all-default-acl): If you define an ACL file but completely omit the `\"acls\"` field from its content, Headscale will default to an \"allow all\" policy. This means all devices connected to your tailnet will be able to communicate freely with each other.\n\n    ```json\n    {}\n    ```\n\n- [**Deny All**](https://tailscale.com/kb/1192/acl-samples#deny-all): To prevent all communication within your tailnet, you can include an empty array for the `\"acls\"` field in your policy file.\n\n    ```json\n    {\n      \"acls\": []\n    }\n    ```\n\n## Complex Example\n\nLet's build a more complex example use case for a small business (It may be the place where\nACL's are the most useful).\n\nWe have a small company with a boss, an admin, two developers and an intern.\n\nThe boss should have access to all servers but not to the user's hosts. Admin\nshould also have access to all hosts except that their permissions should be\nlimited to maintaining the hosts (for example purposes). The developers can do\nanything they want on dev hosts but only watch on productions hosts. Intern\ncan only interact with the development servers.\n\nThere's an additional server that acts as a router, connecting the VPN users\nto an internal network `10.20.0.0/16`. Developers must have access to those\ninternal resources.\n\nEach user have at least a device connected to the network and we have some\nservers.\n\n- database.prod\n- database.dev\n- app-server1.prod\n- app-server1.dev\n- billing.internal\n- router.internal\n\n![ACL implementation example](../assets/images/headscale-acl-network.png)\n\nWhen [registering the servers](../usage/getting-started.md#register-a-node) we\nwill need to add the flag `--advertise-tags=tag:<tag1>,tag:<tag2>`, and the user\nthat is registering the server should be allowed to do it. Since anyone can add\ntags to a server they can register, the check of the tags is done on headscale\nserver and only valid tags are applied. A tag is valid if the user that is\nregistering it is allowed to do it.\n\nHere are the ACL's to implement the same permissions as above:\n\n```json title=\"acl.json\"\n{\n  // groups are collections of users having a common scope. A user can be in multiple groups\n  // groups cannot be composed of groups\n  \"groups\": {\n    \"group:boss\": [\"boss@\"],\n    \"group:dev\": [\"dev1@\", \"dev2@\"],\n    \"group:admin\": [\"admin1@\"],\n    \"group:intern\": [\"intern1@\"]\n  },\n  // tagOwners in tailscale is an association between a TAG and the people allowed to set this TAG on a server.\n  // This is documented [here](https://tailscale.com/kb/1068/acl-tags#defining-a-tag)\n  // and explained [here](https://tailscale.com/blog/rbac-like-it-was-meant-to-be/)\n  \"tagOwners\": {\n    // the administrators can add servers in production\n    \"tag:prod-databases\": [\"group:admin\"],\n    \"tag:prod-app-servers\": [\"group:admin\"],\n\n    // the boss can tag any server as internal\n    \"tag:internal\": [\"group:boss\"],\n\n    // dev can add servers for dev purposes as well as admins\n    \"tag:dev-databases\": [\"group:admin\", \"group:dev\"],\n    \"tag:dev-app-servers\": [\"group:admin\", \"group:dev\"]\n\n    // interns cannot add servers\n  },\n  // hosts should be defined using its IP addresses and a subnet mask.\n  // to define a single host, use a /32 mask. You cannot use DNS entries here,\n  // as they're prone to be hijacked by replacing their IP addresses.\n  // see https://github.com/tailscale/tailscale/issues/3800 for more information.\n  \"hosts\": {\n    \"postgresql.internal\": \"10.20.0.2/32\",\n    \"webservers.internal\": \"10.20.10.1/29\"\n  },\n  \"acls\": [\n    // boss have access to all servers\n    {\n      \"action\": \"accept\",\n      \"src\": [\"group:boss\"],\n      \"dst\": [\n        \"tag:prod-databases:*\",\n        \"tag:prod-app-servers:*\",\n        \"tag:internal:*\",\n        \"tag:dev-databases:*\",\n        \"tag:dev-app-servers:*\"\n      ]\n    },\n\n    // admin have only access to administrative ports of the servers, in tcp/22\n    {\n      \"action\": \"accept\",\n      \"src\": [\"group:admin\"],\n      \"proto\": \"tcp\",\n      \"dst\": [\n        \"tag:prod-databases:22\",\n        \"tag:prod-app-servers:22\",\n        \"tag:internal:22\",\n        \"tag:dev-databases:22\",\n        \"tag:dev-app-servers:22\"\n      ]\n    },\n\n    // we also allow admin to ping the servers\n    {\n      \"action\": \"accept\",\n      \"src\": [\"group:admin\"],\n      \"proto\": \"icmp\",\n      \"dst\": [\n        \"tag:prod-databases:*\",\n        \"tag:prod-app-servers:*\",\n        \"tag:internal:*\",\n        \"tag:dev-databases:*\",\n        \"tag:dev-app-servers:*\"\n      ]\n    },\n\n    // developers have access to databases servers and application servers on all ports\n    // they can only view the applications servers in prod and have no access to databases servers in production\n    {\n      \"action\": \"accept\",\n      \"src\": [\"group:dev\"],\n      \"dst\": [\n        \"tag:dev-databases:*\",\n        \"tag:dev-app-servers:*\",\n        \"tag:prod-app-servers:80,443\"\n      ]\n    },\n    // developers have access to the internal network through the router.\n    // the internal network is composed of HTTPS endpoints and Postgresql\n    // database servers.\n    {\n      \"action\": \"accept\",\n      \"src\": [\"group:dev\"],\n      \"dst\": [\"10.20.0.0/16:443,5432\"]\n    },\n\n    // servers should be able to talk to database in tcp/5432. Database should not be able to initiate connections to\n    // applications servers\n    {\n      \"action\": \"accept\",\n      \"src\": [\"tag:dev-app-servers\"],\n      \"proto\": \"tcp\",\n      \"dst\": [\"tag:dev-databases:5432\"]\n    },\n    {\n      \"action\": \"accept\",\n      \"src\": [\"tag:prod-app-servers\"],\n      \"dst\": [\"tag:prod-databases:5432\"]\n    },\n\n    // interns have access to dev-app-servers only in reading mode\n    {\n      \"action\": \"accept\",\n      \"src\": [\"group:intern\"],\n      \"dst\": [\"tag:dev-app-servers:80,443\"]\n    },\n\n    // Allow users to access their own devices using autogroup:self (see below for more details about performance impact)\n    {\n      \"action\": \"accept\",\n      \"src\": [\"autogroup:member\"],\n      \"dst\": [\"autogroup:self:*\"]\n    }\n  ]\n}\n```\n\n## Autogroups\n\nHeadscale supports several autogroups that automatically include users, destinations, or devices with specific properties. Autogroups provide a convenient way to write ACL rules without manually listing individual users or devices.\n\n### `autogroup:internet`\n\nAllows access to the internet through [exit nodes](routes.md#exit-node). Can only be used in ACL destinations.\n\n```json\n{\n  \"action\": \"accept\",\n  \"src\": [\"group:users\"],\n  \"dst\": [\"autogroup:internet:*\"]\n}\n```\n\n### `autogroup:member`\n\nIncludes all [personal (untagged) devices](registration.md/#identity-model).\n\n```json\n{\n  \"action\": \"accept\",\n  \"src\": [\"autogroup:member\"],\n  \"dst\": [\"tag:prod-app-servers:80,443\"]\n}\n```\n\n### `autogroup:tagged`\n\nIncludes all devices that [have at least one tag](registration.md/#identity-model).\n\n```json\n{\n  \"action\": \"accept\",\n  \"src\": [\"autogroup:tagged\"],\n  \"dst\": [\"tag:monitoring:9090\"]\n}\n```\n\n### `autogroup:self`\n\n!!! warning \"The current implementation of `autogroup:self` is inefficient\"\n\nIncludes devices where the same user is authenticated on both the source and destination. Does not include tagged devices. Can only be used in ACL destinations.\n\n```json\n{\n  \"action\": \"accept\",\n  \"src\": [\"autogroup:member\"],\n  \"dst\": [\"autogroup:self:*\"]\n}\n```\n\n*Using `autogroup:self` may cause performance degradation on the Headscale coordinator server in large deployments, as filter rules must be compiled per-node rather than globally and the current implementation is not very efficient.*\n\nIf you experience performance issues, consider using more specific ACL rules or limiting the use of `autogroup:self`.\n\n```json\n{\n  // The following rules allow internal users to communicate with their\n  // own nodes in case autogroup:self is causing performance issues.\n  { \"action\": \"accept\", \"src\": [\"boss@\"], \"dst\": [\"boss@:*\"] },\n  { \"action\": \"accept\", \"src\": [\"dev1@\"], \"dst\": [\"dev1@:*\"] },\n  { \"action\": \"accept\", \"src\": [\"dev2@\"], \"dst\": [\"dev2@:*\"] },\n  { \"action\": \"accept\", \"src\": [\"admin1@\"], \"dst\": [\"admin1@:*\"] },\n  { \"action\": \"accept\", \"src\": [\"intern1@\"], \"dst\": [\"intern1@:*\"] }\n}\n```\n\n### `autogroup:nonroot`\n\nUsed in Tailscale SSH rules to allow access to any user except root. Can only be used in the `users` field of SSH rules.\n\n```json\n{\n  \"action\": \"accept\",\n  \"src\": [\"autogroup:member\"],\n  \"dst\": [\"autogroup:self\"],\n  \"users\": [\"autogroup:nonroot\"]\n}\n```\n"
  },
  {
    "path": "docs/ref/api.md",
    "content": "# API\n\nHeadscale provides a [HTTP REST API](#rest-api) and a [gRPC interface](#grpc) which may be used to integrate a [web\ninterface](integration/web-ui.md), [remote control Headscale](#setup-remote-control) or provide a base for custom\nintegration and tooling.\n\nBoth interfaces require a valid API key before use. To create an API key, log into your Headscale server and generate\none with the default expiration of 90 days:\n\n```shell\nheadscale apikeys create\n```\n\nCopy the output of the command and save it for later. Please note that you can not retrieve an API key again. If the API\nkey is lost, expire the old one, and create a new one.\n\nTo list the API keys currently associated with the server:\n\n```shell\nheadscale apikeys list\n```\n\nand to expire an API key:\n\n```shell\nheadscale apikeys expire --prefix <PREFIX>\n```\n\n## REST API\n\n- API endpoint: `/api/v1`, e.g. `https://headscale.example.com/api/v1`\n- Documentation: `/swagger`, e.g. `https://headscale.example.com/swagger`\n- Headscale Version: `/version`, e.g. `https://headscale.example.com/version`\n- Authenticate using HTTP Bearer authentication by sending the [API key](#api) with the HTTP `Authorization: Bearer <API_KEY>` header.\n\nStart by [creating an API key](#api) and test it with the examples below. Read the API documentation provided by your\nHeadscale server at `/swagger` for details.\n\n=== \"Get details for all users\"\n\n    ```console\n    curl -H \"Authorization: Bearer <API_KEY>\" \\\n        https://headscale.example.com/api/v1/user\n    ```\n\n=== \"Get details for user 'bob'\"\n\n    ```console\n    curl -H \"Authorization: Bearer <API_KEY>\" \\\n        https://headscale.example.com/api/v1/user?name=bob\n    ```\n\n=== \"Register a node\"\n\n    ```console\n    curl -H \"Authorization: Bearer <API_KEY>\" \\\n        --json '{\"user\": \"<USER>\", \"authId\": \"AUTH_ID>\"}' \\\n        https://headscale.example.com/api/v1/auth/register\n    ```\n\n## gRPC\n\nThe gRPC interface can be used to control a Headscale instance from a remote machine with the `headscale` binary.\n\n### Prerequisite\n\n- A workstation to run `headscale` (any supported platform, e.g. Linux).\n- A Headscale server with gRPC enabled.\n- Connections to the gRPC port (default: `50443`) are allowed.\n- Remote access requires an encrypted connection via TLS.\n- An [API key](#api) to authenticate with the Headscale server.\n\n### Setup remote control\n\n1. Download the [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases). Make\n   sure to use the same version as on the server.\n\n1. Put the binary somewhere in your `PATH`, e.g. `/usr/local/bin/headscale`\n\n1. Make `headscale` executable: `chmod +x /usr/local/bin/headscale`\n\n1. [Create an API key](#api) on the Headscale server.\n\n1. Provide the connection parameters for the remote Headscale server either via a minimal YAML configuration file or\n   via environment variables:\n\n    === \"Minimal YAML configuration file\"\n\n        ```yaml title=\"config.yaml\"\n        cli:\n            address: <HEADSCALE_ADDRESS>:<PORT>\n            api_key: <API_KEY>\n        ```\n\n    === \"Environment variables\"\n\n        ```shell\n        export HEADSCALE_CLI_ADDRESS=\"<HEADSCALE_ADDRESS>:<PORT>\"\n        export HEADSCALE_CLI_API_KEY=\"<API_KEY>\"\n        ```\n\n    This instructs the `headscale` binary to connect to a remote instance at `<HEADSCALE_ADDRESS>:<PORT>`, instead of\n    connecting to the local instance.\n\n1. Test the connection by listing all nodes:\n\n    ```shell\n    headscale nodes list\n    ```\n\n    You should now be able to see a list of your nodes from your workstation, and you can\n    now control the Headscale server from your workstation.\n\n### Behind a proxy\n\nIt's possible to run the gRPC remote endpoint behind a reverse proxy, like Nginx, and have it run on the _same_ port as Headscale.\n\nWhile this is _not a supported_ feature, an example on how this can be set up on\n[NixOS is shown here](https://github.com/kradalby/dotfiles/blob/4489cdbb19cddfbfae82cd70448a38fde5a76711/machines/headscale.oracldn/headscale.nix#L61-L91).\n\n### Troubleshooting\n\n- Make sure you have the _same_ Headscale version on your server and workstation.\n- Ensure that connections to the gRPC port are allowed.\n- Verify that your TLS certificate is valid and trusted.\n- If you don't have access to a trusted certificate (e.g. from Let's Encrypt), either:\n    - Add your self-signed certificate to the trust store of your OS _or_\n    - Disable certificate verification by either setting `cli.insecure: true` in the configuration file or by setting\n      `HEADSCALE_CLI_INSECURE=1` via an environment variable. We do **not** recommend to disable certificate validation.\n"
  },
  {
    "path": "docs/ref/configuration.md",
    "content": "# Configuration\n\n- Headscale loads its configuration from a YAML file\n- It searches for `config.yaml` in the following paths:\n    - `/etc/headscale`\n    - `$HOME/.headscale`\n    - the current working directory\n- To load the configuration from a different path, use:\n    - the command line flag `-c`, `--config`\n    - the environment variable `HEADSCALE_CONFIG`\n- Validate the configuration file with: `headscale configtest`\n\n!!! example \"Get the [example configuration from the GitHub repository](https://github.com/juanfont/headscale/blob/main/config-example.yaml)\"\n\n    Always select the [same GitHub tag](https://github.com/juanfont/headscale/tags) as the released version you use to\n    ensure you have the correct example configuration. The `main` branch might contain unreleased changes.\n\n    === \"View on GitHub\"\n\n        - Development version: <https://github.com/juanfont/headscale/blob/main/config-example.yaml>\n        - Version {{ headscale.version }}: https://github.com/juanfont/headscale/blob/v{{ headscale.version }}/config-example.yaml\n\n    === \"Download with `wget`\"\n\n        ```shell\n        # Development version\n        wget -O config.yaml https://raw.githubusercontent.com/juanfont/headscale/main/config-example.yaml\n\n        # Version {{ headscale.version }}\n        wget -O config.yaml https://raw.githubusercontent.com/juanfont/headscale/v{{ headscale.version }}/config-example.yaml\n        ```\n\n    === \"Download with `curl`\"\n\n        ```shell\n        # Development version\n        curl -o config.yaml https://raw.githubusercontent.com/juanfont/headscale/main/config-example.yaml\n\n        # Version {{ headscale.version }}\n        curl -o config.yaml https://raw.githubusercontent.com/juanfont/headscale/v{{ headscale.version }}/config-example.yaml\n        ```\n"
  },
  {
    "path": "docs/ref/debug.md",
    "content": "# Debugging and troubleshooting\n\nHeadscale and Tailscale provide debug and introspection capabilities that can be helpful when things don't work as\nexpected. This page explains some debugging techniques to help pinpoint problems.\n\nPlease also have a look at [Tailscale's Troubleshooting guide](https://tailscale.com/kb/1023/troubleshooting). It offers\na many tips and suggestions to troubleshoot common issues.\n\n## Tailscale\n\nThe Tailscale client itself offers many commands to introspect its state as well as the state of the network:\n\n- [Check local network conditions](https://tailscale.com/kb/1080/cli#netcheck): `tailscale netcheck`\n- [Get the client status](https://tailscale.com/kb/1080/cli#status): `tailscale status --json`\n- [Get DNS status](https://tailscale.com/kb/1080/cli#dns): `tailscale dns status --all`\n- Client logs: `tailscale debug daemon-logs`\n- Client netmap: `tailscale debug netmap`\n- Test DERP connection: `tailscale debug derp headscale`\n- And many more, see: `tailscale debug --help`\n\nMany of the commands are helpful when trying to understand differences between Headscale and Tailscale SaaS.\n\n## Headscale\n\n### Application logging\n\nThe log levels `debug` and `trace` can be useful to get more information from Headscale.\n\n```yaml hl_lines=\"3\"\nlog:\n  # Valid log levels: panic, fatal, error, warn, info, debug, trace\n  level: debug\n```\n\n### Database logging\n\nThe database debug mode logs all database queries. Enable it to see how Headscale interacts with its database. This also\nrequires the application log level to be set to either `debug` or `trace`.\n\n```yaml hl_lines=\"3 7\"\ndatabase:\n  # Enable debug mode. This setting requires the log.level to be set to \"debug\" or \"trace\".\n  debug: false\n\nlog:\n  # Valid log levels: panic, fatal, error, warn, info, debug, trace\n  level: debug\n```\n\n### Metrics and debug endpoint\n\nHeadscale provides a metrics and debug endpoint. It allows to introspect different aspects such as:\n\n- Information about the Go runtime, memory usage and statistics\n- Connected nodes and pending registrations\n- Active ACLs, filters and SSH policy\n- Current DERPMap\n- Prometheus metrics\n\n!!! warning \"Keep the metrics and debug endpoint private\"\n\n    The listen address and port can be configured with the `metrics_listen_addr` variable in the [configuration\n    file](./configuration.md). By default it listens on localhost, port 9090.\n\n    Keep the metrics and debug endpoint private to your internal network and don't expose it to the Internet.\n\n    The metrics and debug interface can be disabled completely by setting `metrics_listen_addr: null` in the\n    [configuration file](./configuration.md).\n\nQuery metrics via <http://localhost:9090/metrics> and get an overview of available debug information via\n<http://localhost:9090/debug/>. Metrics may be queried from outside localhost but the debug interface is subject to\nadditional protection despite listening on all interfaces.\n\n=== \"Direct access\"\n\n    Access the debug interface directly on the server where Headscale is installed.\n\n    ```console\n    curl http://localhost:9090/debug/\n    ```\n\n=== \"SSH port forwarding\"\n\n    Use SSH port forwarding to forward Headscale's metrics and debug port to your device.\n\n    ```console\n    ssh <HEADSCALE_SERVER> -L 9090:localhost:9090\n    ```\n\n    Access the debug interface on your device by opening <http://localhost:9090/debug/> in your web browser.\n\n=== \"Via debug key\"\n\n    The access control of the debug interface supports the use of a debug key. Traffic is accepted if the path to a\n    debug key is set via the environment variable `TS_DEBUG_KEY_PATH` and the debug key sent as value for `debugkey`\n    parameter with each request.\n\n    ```console\n    openssl rand -hex 32 | tee debugkey.txt\n    export TS_DEBUG_KEY_PATH=debugkey.txt\n    headscale serve\n    ```\n\n    Access the debug interface on your device by opening `http://<IP_OF_HEADSCALE>:9090/debug/?debugkey=<DEBUG_KEY>` in\n    your web browser. The `debugkey` parameter must be sent with every request.\n\n=== \"Via debug IP address\"\n\n    The debug endpoint expects traffic from localhost. A different debug IP address may be configured by setting the\n    `TS_ALLOW_DEBUG_IP` environment variable before starting Headscale. The debug IP address is ignored when the HTTP\n    header `X-Forwarded-For` is present.\n\n    ```console\n    export TS_ALLOW_DEBUG_IP=192.168.0.10       # IP address of your device\n    headscale serve\n    ```\n\n    Access the debug interface on your device by opening `http://<IP_OF_HEADSCALE>:9090/debug/` in your web browser.\n"
  },
  {
    "path": "docs/ref/derp.md",
    "content": "# DERP\n\nA [DERP (Designated Encrypted Relay for Packets) server](https://tailscale.com/kb/1232/derp-servers) is mainly used to\nrelay traffic between two nodes in case a direct connection can't be established. Headscale provides an embedded DERP\nserver to ensure seamless connectivity between nodes.\n\n## Configuration\n\nDERP related settings are configured within the `derp` section of the [configuration file](./configuration.md). The\nfollowing sections only use a few of the available settings, check the [example configuration](./configuration.md) for\nall available configuration options.\n\n### Enable embedded DERP\n\nHeadscale ships with an embedded DERP server which allows to run your own self-hosted DERP server easily. The embedded\nDERP server is disabled by default and needs to be enabled. In addition, you should configure the public IPv4 and public\nIPv6 address of your Headscale server for improved connection stability:\n\n```yaml title=\"config.yaml\" hl_lines=\"3-5\"\nderp:\n  server:\n    enabled: true\n    ipv4: 198.51.100.1\n    ipv6: 2001:db8::1\n```\n\nKeep in mind that [additional ports are needed to run a DERP server](../setup/requirements.md#ports-in-use). Besides\nrelaying traffic, it also uses STUN (udp/3478) to help clients discover their public IP addresses and perform NAT\ntraversal. [Check DERP server connectivity](#check-derp-server-connectivity) to see if everything works.\n\n### Remove Tailscale's DERP servers\n\nOnce enabled, Headscale's embedded DERP is added to the list of free-to-use [DERP\nservers](https://tailscale.com/kb/1232/derp-servers) offered by Tailscale Inc. To only use Headscale's embedded DERP\nserver, disable the loading of the default DERP map:\n\n```yaml title=\"config.yaml\" hl_lines=\"6\"\nderp:\n  server:\n    enabled: true\n    ipv4: 198.51.100.1\n    ipv6: 2001:db8::1\n  urls: []\n```\n\n!!! warning \"Single point of failure\"\n\n    Removing Tailscale's DERP servers means that there is now just a single DERP server available for clients. This is a\n    single point of failure and could hamper connectivity.\n\n    [Check DERP server connectivity](#check-derp-server-connectivity) with your embedded DERP server before removing\n    Tailscale's DERP servers.\n\n### Customize DERP map\n\nThe DERP map offered to clients can be customized with a [dedicated YAML-configuration\nfile](https://github.com/juanfont/headscale/blob/main/derp-example.yaml). This allows to modify previously loaded DERP\nmaps fetched via URL or to offer your own, custom DERP servers to nodes.\n\n=== \"Remove specific DERP regions\"\n\n    The free-to-use [DERP servers](https://tailscale.com/kb/1232/derp-servers) are organized into regions via a region\n    ID. You can explicitly disable a specific region by setting its region ID to `null`. The following sample\n    `derp.yaml` disables the New York DERP region (which has the region ID 1):\n\n    ```yaml title=\"derp.yaml\"\n    regions:\n      1: null\n    ```\n\n    Use the following configuration to serve the default DERP map (excluding New York) to nodes:\n\n    ```yaml title=\"config.yaml\" hl_lines=\"6 7\"\n    derp:\n      server:\n        enabled: false\n      urls:\n        - https://controlplane.tailscale.com/derpmap/default\n      paths:\n        - /etc/headscale/derp.yaml\n    ```\n\n=== \"Provide custom DERP servers\"\n\n    The following sample `derp.yaml` references two custom regions (`custom-east` with ID 900 and `custom-west` with ID 901)\n    with one custom DERP server in each region. Each DERP server offers DERP relay via HTTPS on tcp/443, support for captive\n    portal checks via HTTP on tcp/80 and STUN on udp/3478. See the definitions of\n    [DERPMap](https://pkg.go.dev/tailscale.com/tailcfg#DERPMap),\n    [DERPRegion](https://pkg.go.dev/tailscale.com/tailcfg#DERPRegion) and\n    [DERPNode](https://pkg.go.dev/tailscale.com/tailcfg#DERPNode) for all available options.\n\n    ```yaml title=\"derp.yaml\"\n    regions:\n      900:\n        regionid: 900\n        regioncode: custom-east\n        regionname: My region (east)\n        nodes:\n          - name: 900a\n            regionid: 900\n            hostname: derp900a.example.com\n            ipv4: 198.51.100.1\n            ipv6: 2001:db8::1\n            canport80: true\n      901:\n        regionid: 901\n        regioncode: custom-west\n        regionname: My Region (west)\n        nodes:\n          - name: 901a\n            regionid: 901\n            hostname: derp901a.example.com\n            ipv4: 198.51.100.2\n            ipv6: 2001:db8::2\n            canport80: true\n    ```\n\n    Use the following configuration to only serve the two DERP servers from the above `derp.yaml`:\n\n    ```yaml title=\"config.yaml\" hl_lines=\"5 6\"\n    derp:\n      server:\n        enabled: false\n      urls: []\n      paths:\n        - /etc/headscale/derp.yaml\n    ```\n\nIndependent of the custom DERP map, you may choose to [enable the embedded DERP server and have it automatically added\nto the custom DERP map](#enable-embedded-derp).\n\n### Verify clients\n\nAccess to DERP serves can be restricted to nodes that are members of your Tailnet. Relay access is denied for unknown\nclients.\n\n=== \"Embedded DERP\"\n\n    Client verification is enabled by default.\n\n    ```yaml title=\"config.yaml\" hl_lines=\"3\"\n    derp:\n      server:\n        verify_clients: true\n    ```\n\n=== \"3rd-party DERP\"\n\n    Tailscale's `derper` provides two parameters to configure client verification:\n\n    - Use the `-verify-client-url` parameter of the `derper` and point it towards the `/verify` endpoint of your\n      Headscale server (e.g `https://headscale.example.com/verify`). The DERP server will query your Headscale instance\n      as soon as a client connects with it to ask whether access should be allowed or denied. Access is allowed if\n      Headscale knows about the connecting client and denied otherwise.\n    - The parameter `-verify-client-url-fail-open` controls what should happen when the DERP server can't reach the\n      Headscale instance. By default, it will allow access if Headscale is unreachable.\n\n## Check DERP server connectivity\n\nAny Tailscale client may be used to introspect the DERP map and to check for connectivity issues with DERP servers.\n\n- Display DERP map: `tailscale debug derp-map`\n- Check connectivity with the embedded DERP[^1]:`tailscale debug derp headscale`\n\nAdditional DERP related metrics and information is available via the [metrics and debug\nendpoint](./debug.md#metrics-and-debug-endpoint).\n\n## Limitations\n\n- The embedded DERP server can't be used for Tailscale's captive portal checks as it doesn't support the `/generate_204`\n  endpoint via HTTP on port tcp/80.\n- There are no speed or throughput optimisations, the main purpose is to assist in node connectivity.\n\n[^1]: This assumes that the default region code of the [configuration file](./configuration.md) is used.\n"
  },
  {
    "path": "docs/ref/dns.md",
    "content": "# DNS\n\nHeadscale supports [most DNS features](../about/features.md) from Tailscale. DNS related settings can be configured\nwithin the `dns` section of the [configuration file](./configuration.md).\n\n## Setting extra DNS records\n\nHeadscale allows to set extra DNS records which are made available via\n[MagicDNS](https://tailscale.com/kb/1081/magicdns). Extra DNS records can be configured either via static entries in the\n[configuration file](./configuration.md) or from a JSON file that Headscale continuously watches for changes:\n\n- Use the `dns.extra_records` option in the [configuration file](./configuration.md) for entries that are static and\n  don't change while Headscale is running. Those entries are processed when Headscale is starting up and changes to the\n  configuration require a restart of Headscale.\n- For dynamic DNS records that may be added, updated or removed while Headscale is running or DNS records that are\n  generated by scripts the option `dns.extra_records_path` in the [configuration file](./configuration.md) is useful.\n  Set it to the absolute path of the JSON file containing DNS records and Headscale processes this file as it detects\n  changes.\n\nAn example use case is to serve multiple apps on the same host via a reverse proxy like NGINX, in this case a Prometheus\nmonitoring stack. This allows to nicely access the service with \"http://grafana.myvpn.example.com\" instead of the\nhostname and port combination \"http://hostname-in-magic-dns.myvpn.example.com:3000\".\n\n!!! warning \"Limitations\"\n\n    Currently, [only A and AAAA records are processed by Tailscale](https://github.com/tailscale/tailscale/blob/v1.86.5/ipn/ipnlocal/node_backend.go#L662).\n\n1. Configure extra DNS records using one of the available configuration options:\n\n    === \"Static entries, via `dns.extra_records`\"\n\n        ```yaml title=\"config.yaml\"\n        dns:\n          ...\n          extra_records:\n            - name: \"grafana.myvpn.example.com\"\n              type: \"A\"\n              value: \"100.64.0.3\"\n\n            - name: \"prometheus.myvpn.example.com\"\n              type: \"A\"\n              value: \"100.64.0.3\"\n          ...\n        ```\n\n        Restart your headscale instance.\n\n    === \"Dynamic entries, via `dns.extra_records_path`\"\n\n        ```json title=\"extra-records.json\"\n        [\n          {\n            \"name\": \"grafana.myvpn.example.com\",\n            \"type\": \"A\",\n            \"value\": \"100.64.0.3\"\n          },\n          {\n            \"name\": \"prometheus.myvpn.example.com\",\n            \"type\": \"A\",\n            \"value\": \"100.64.0.3\"\n          }\n        ]\n        ```\n\n        Headscale picks up changes to the above JSON file automatically.\n\n        !!! tip \"Good to know\"\n\n            - The `dns.extra_records_path` option in the [configuration file](./configuration.md) needs to reference the\n              JSON file containing extra DNS records.\n            - Be sure to \"sort keys\" and produce a stable output in case you generate the JSON file with a script.\n              Headscale uses a checksum to detect changes to the file and a stable output avoids unnecessary processing.\n\n1. Verify that DNS records are properly set using the DNS querying tool of your choice:\n\n    === \"Query with dig\"\n\n        ```console\n        dig +short grafana.myvpn.example.com\n        100.64.0.3\n        ```\n\n    === \"Query with drill\"\n\n        ```console\n        drill -Q grafana.myvpn.example.com\n        100.64.0.3\n        ```\n\n1. Optional: Setup the reverse proxy\n\n    The motivating example here was to be able to access internal monitoring services on the same host without\n    specifying a port, depicted as NGINX configuration snippet:\n\n    ```nginx title=\"nginx.conf\"\n    server {\n        listen 80;\n        listen [::]:80;\n\n        server_name grafana.myvpn.example.com;\n\n        location / {\n            proxy_pass http://localhost:3000;\n            proxy_set_header Host $http_host;\n            proxy_set_header X-Real-IP $remote_addr;\n            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n            proxy_set_header X-Forwarded-Proto $scheme;\n        }\n\n    }\n    ```\n"
  },
  {
    "path": "docs/ref/integration/reverse-proxy.md",
    "content": "# Running headscale behind a reverse proxy\n\n!!! warning \"Community documentation\"\n\n    This page is not actively maintained by the headscale authors and is\n    written by community members. It is _not_ verified by headscale developers.\n\n    **It might be outdated and it might miss necessary steps**.\n\nRunning headscale behind a reverse proxy is useful when running multiple applications on the same server, and you want to reuse the same external IP and port - usually tcp/443 for HTTPS.\n\n### WebSockets\n\nThe reverse proxy MUST be configured to support WebSockets to communicate with Tailscale clients.\n\nWebSockets support is also required when using the Headscale [embedded DERP server](../derp.md). In this case, you will also need to expose the UDP port used for STUN (by default, udp/3478). Please check our [config-example.yaml](https://github.com/juanfont/headscale/blob/main/config-example.yaml).\n\n### Cloudflare\n\nRunning headscale behind a cloudflare proxy or cloudflare tunnel is not supported and will not work as Cloudflare does not support WebSocket POSTs as required by the Tailscale protocol. See [this issue](https://github.com/juanfont/headscale/issues/1468)\n\n### TLS\n\nHeadscale can be configured not to use TLS, leaving it to the reverse proxy to handle. Add the following configuration values to your headscale config file.\n\n```yaml title=\"config.yaml\"\nserver_url: https://<YOUR_SERVER_NAME> # This should be the FQDN at which headscale will be served\nlisten_addr: 0.0.0.0:8080\nmetrics_listen_addr: 0.0.0.0:9090\ntls_cert_path: \"\"\ntls_key_path: \"\"\n```\n\n## nginx\n\nThe following example configuration can be used in your nginx setup, substituting values as necessary. `<IP:PORT>` should be the IP address and port where headscale is running. In most cases, this will be `http://localhost:8080`.\n\n```nginx title=\"nginx.conf\"\nmap $http_upgrade $connection_upgrade {\n    default      upgrade;\n    ''           close;\n}\n\nserver {\n    listen 80;\n\tlisten [::]:80;\n\n\tlisten 443      ssl http2;\n\tlisten [::]:443 ssl http2;\n\n    server_name <YOUR_SERVER_NAME>;\n\n    ssl_certificate <PATH_TO_CERT>;\n    ssl_certificate_key <PATH_CERT_KEY>;\n    ssl_protocols TLSv1.2 TLSv1.3;\n\n    location / {\n        proxy_pass http://<IP:PORT>;\n        proxy_http_version 1.1;\n        proxy_set_header Upgrade $http_upgrade;\n        proxy_set_header Connection $connection_upgrade;\n        proxy_set_header Host $server_name;\n        proxy_redirect http:// https://;\n        proxy_buffering off;\n        proxy_set_header X-Real-IP $remote_addr;\n        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n        proxy_set_header X-Forwarded-Proto $scheme;\n        add_header Strict-Transport-Security \"max-age=15552000; includeSubDomains\" always;\n    }\n}\n```\n\n## istio/envoy\n\nIf you using [Istio](https://istio.io/) ingressgateway or [Envoy](https://www.envoyproxy.io/) as reverse proxy, there are some tips for you. If not set, you may see some debug log in proxy as below:\n\n```log\nSending local reply with details upgrade_failed\n```\n\n### Envoy\n\nYou need to add a new upgrade_type named `tailscale-control-protocol`. [see details](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto#extensions-filters-network-http-connection-manager-v3-httpconnectionmanager-upgradeconfig)\n\n### Istio\n\nSame as envoy, we can use `EnvoyFilter` to add upgrade_type.\n\n```yaml\napiVersion: networking.istio.io/v1alpha3\nkind: EnvoyFilter\nmetadata:\n  name: headscale-behind-istio-ingress\n  namespace: istio-system\nspec:\n  configPatches:\n    - applyTo: NETWORK_FILTER\n      match:\n        listener:\n          filterChain:\n            filter:\n              name: envoy.filters.network.http_connection_manager\n      patch:\n        operation: MERGE\n        value:\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n            upgrade_configs:\n              - upgrade_type: tailscale-control-protocol\n```\n\n## Caddy\n\nThe following Caddyfile is all that is necessary to use Caddy as a reverse proxy for headscale, in combination with the `config.yaml` specifications above to disable headscale's built in TLS. Replace values as necessary - `<YOUR_SERVER_NAME>` should be the FQDN at which headscale will be served, and `<IP:PORT>` should be the IP address and port where headscale is running. In most cases, this will be `localhost:8080`.\n\n```none title=\"Caddyfile\"\n<YOUR_SERVER_NAME> {\n    reverse_proxy <IP:PORT>\n}\n```\n\nCaddy v2 will [automatically](https://caddyserver.com/docs/automatic-https) provision a certificate for your domain/subdomain, force HTTPS, and proxy websockets - no further configuration is necessary.\n\nFor a slightly more complex configuration which utilizes Docker containers to manage Caddy, headscale, and Headscale-UI, [Guru Computing's guide](https://blog.gurucomputing.com.au/smart-vpns-with-headscale/) is an excellent reference.\n\n## Apache\n\nThe following minimal Apache config will proxy traffic to the headscale instance on `<IP:PORT>`. Note that `upgrade=any` is required as a parameter for `ProxyPass` so that WebSockets traffic whose `Upgrade` header value is not equal to `WebSocket` (i. e. Tailscale Control Protocol) is forwarded correctly. See the [Apache docs](https://httpd.apache.org/docs/2.4/mod/mod_proxy_wstunnel.html) for more information on this.\n\n```apache title=\"apache.conf\"\n<VirtualHost *:443>\n\tServerName <YOUR_SERVER_NAME>\n\n\tProxyPreserveHost On\n\tProxyPass / http://<IP:PORT>/ upgrade=any\n\n\tSSLEngine On\n\tSSLCertificateFile <PATH_TO_CERT>\n\tSSLCertificateKeyFile <PATH_CERT_KEY>\n</VirtualHost>\n```\n"
  },
  {
    "path": "docs/ref/integration/tools.md",
    "content": "# Tools related to headscale\n\n!!! warning \"Community contributions\"\n\n    This page contains community contributions. The projects listed here are not\n    maintained by the headscale authors and are written by community members.\n\nThis page collects third-party tools, client libraries, and scripts related to headscale.\n\n- [headscale-operator](https://github.com/infradohq/headscale-operator) - Headscale Kubernetes Operator\n- [tailscale-manager](https://github.com/singlestore-labs/tailscale-manager) - Dynamically manage Tailscale route\n  advertisements\n- [headscalebacktosqlite](https://github.com/bigbozza/headscalebacktosqlite) - Migrate headscale from PostgreSQL back to\n  SQLite\n- [headscale-pf](https://github.com/YouSysAdmin/headscale-pf) - Populates user groups based on user groups in Jumpcloud\n  or Authentik\n- [headscale-client-go](https://github.com/hibare/headscale-client-go) - A Go client implementation for the Headscale\n  HTTP API.\n- [headscale-zabbix](https://github.com/dblanque/headscale-zabbix) - A Zabbix Monitoring Template for the Headscale\n  Service.\n- [tailscale-exporter](https://github.com/adinhodovic/tailscale-exporter) - A Prometheus exporter for Headscale that\n  provides network-level metrics using the Headscale API.\n"
  },
  {
    "path": "docs/ref/integration/web-ui.md",
    "content": "# Web interfaces for headscale\n\n!!! warning \"Community contributions\"\n\n    This page contains community contributions. The projects listed here are not\n    maintained by the headscale authors and are written by community members.\n\nHeadscale doesn't provide a built-in web interface but users may pick one from the available options.\n\n- [headscale-ui](https://github.com/gurucomputing/headscale-ui) - A web frontend for the headscale Tailscale-compatible\n  coordination server\n- [HeadscaleUi](https://github.com/simcu/headscale-ui) - A static headscale admin ui, no backend environment required\n- [Headplane](https://github.com/tale/headplane) - An advanced Tailscale inspired frontend for headscale\n- [headscale-admin](https://github.com/GoodiesHQ/headscale-admin) - Headscale-Admin is meant to be a simple, modern web\n  interface for headscale\n- [ouroboros](https://github.com/yellowsink/ouroboros) - Ouroboros is designed for users to manage their own devices,\n  rather than for admins\n- [unraid-headscale-admin](https://github.com/ich777/unraid-headscale-admin) - A simple headscale admin UI for Unraid,\n  it offers Local (`docker exec`) and API Mode\n- [headscale-console](https://github.com/rickli-cloud/headscale-console) - WebAssembly-based client supporting SSH, VNC\n  and RDP with optional self-service capabilities\n- [headscale-piying](https://github.com/wszgrcy/headscale-piying) - headscale web ui,support visual ACL configuration\n- [HeadControl](https://github.com/ahmadzip/HeadControl) - Minimal Headscale admin dashboard, built with Go and HTMX\n- [Headscale Manager](https://github.com/hkdone/headscalemanager) - Headscale UI for Android\n\nYou can ask for support on our [Discord server](https://discord.gg/c84AZQhmpx) in the \"web-interfaces\" channel.\n"
  },
  {
    "path": "docs/ref/oidc.md",
    "content": "# OpenID Connect\n\nHeadscale supports authentication via external identity providers using OpenID Connect (OIDC). It features:\n\n- Auto configuration via OpenID Connect Discovery Protocol\n- [Proof Key for Code Exchange (PKCE) code verification](#enable-pkce-recommended)\n- [Authorization based on a user's domain, email address or group membership](#authorize-users-with-filters)\n- Synchronization of [standard OIDC claims](#supported-oidc-claims)\n\nPlease see [limitations](#limitations) for known issues and limitations.\n\n## Configuration\n\nOpenID requires configuration in Headscale and your identity provider:\n\n- Headscale: The `oidc` section of the Headscale [configuration](configuration.md) contains all available configuration\n  options along with a description and their default values.\n- Identity provider: Please refer to the official documentation of your identity provider for specific instructions.\n  Additionally, there might be some useful hints in the [Identity provider specific\n  configuration](#identity-provider-specific-configuration) section below.\n\n### Basic configuration\n\nA basic configuration connects Headscale to an identity provider and typically requires:\n\n- OpenID Connect Issuer URL from the identity provider. Headscale uses the OpenID Connect Discovery Protocol 1.0 to\n  automatically obtain OpenID configuration parameters (example: `https://sso.example.com`).\n- Client ID from the identity provider (example: `headscale`).\n- Client secret generated by the identity provider (example: `generated-secret`).\n- Redirect URI for your identity provider (example: `https://headscale.example.com/oidc/callback`).\n\n=== \"Headscale\"\n\n    ```yaml\n    oidc:\n      issuer: \"https://sso.example.com\"\n      client_id: \"headscale\"\n      client_secret: \"generated-secret\"\n    ```\n\n=== \"Identity provider\"\n\n    - Create a new confidential client (`Client ID`, `Client secret`)\n    - Add Headscale's OIDC callback URL as valid redirect URL: `https://headscale.example.com/oidc/callback`\n    - Configure additional parameters to improve user experience such as: name, description, logo, …\n\n### Enable PKCE (recommended)\n\nProof Key for Code Exchange (PKCE) adds an additional layer of security to the OAuth 2.0 authorization code flow by\npreventing authorization code interception attacks, see: <https://datatracker.ietf.org/doc/html/rfc7636>. PKCE is\nrecommended and needs to be configured for Headscale and the identity provider alike:\n\n=== \"Headscale\"\n\n    ```yaml hl_lines=\"5-6\"\n    oidc:\n      issuer: \"https://sso.example.com\"\n      client_id: \"headscale\"\n      client_secret: \"generated-secret\"\n      pkce:\n        enabled: true\n    ```\n\n=== \"Identity provider\"\n\n    - Enable PKCE for the headscale client\n    - Set the PKCE challenge method to \"S256\"\n\n### Authorize users with filters\n\nHeadscale allows to filter for allowed users based on their domain, email address or group membership. These filters can\nbe helpful to apply additional restrictions and control which users are allowed to join. Filters are disabled by\ndefault, users are allowed to join once the authentication with the identity provider succeeds. In case multiple filters\nare configured, a user needs to pass all of them.\n\n=== \"Allowed domains\"\n\n    - Check the email domain of each authenticating user against the list of allowed domains and only authorize users\n      whose email domain matches `example.com`.\n    - A verified email address is required [unless email verification is disabled](#control-email-verification).\n    - Access allowed: `alice@example.com`\n    - Access denied: `bob@example.net`\n\n    ```yaml hl_lines=\"5-6\"\n    oidc:\n      issuer: \"https://sso.example.com\"\n      client_id: \"headscale\"\n      client_secret: \"generated-secret\"\n      allowed_domains:\n        - \"example.com\"\n    ```\n\n=== \"Allowed users/emails\"\n\n    - Check the email address of each authenticating user against the list of allowed email addresses and only authorize\n      users whose email is part of the `allowed_users` list.\n    - A verified email address is required [unless email verification is disabled](#control-email-verification).\n    - Access allowed: `alice@example.com`, `bob@example.net`\n    - Access denied: `mallory@example.net`\n\n    ```yaml hl_lines=\"5-7\"\n    oidc:\n      issuer: \"https://sso.example.com\"\n      client_id: \"headscale\"\n      client_secret: \"generated-secret\"\n      allowed_users:\n        - \"alice@example.com\"\n        - \"bob@example.net\"\n    ```\n\n=== \"Allowed groups\"\n\n    - Use the OIDC `groups` claim of each authenticating user to get their group membership and only authorize users\n      which are members in at least one of the referenced groups.\n    - Access allowed: users in the `headscale_users` group\n    - Access denied: users without groups, users with other groups\n\n    ```yaml hl_lines=\"5-7\"\n    oidc:\n      issuer: \"https://sso.example.com\"\n      client_id: \"headscale\"\n      client_secret: \"generated-secret\"\n      scope: [\"openid\", \"profile\", \"email\", \"groups\"]\n      allowed_groups:\n        - \"headscale_users\"\n    ```\n\n### Control email verification\n\nHeadscale uses the `email` claim from the identity provider to synchronize the email address to its user profile. By\ndefault, a user's email address is only synchronized when the identity provider reports the email address as verified\nvia the `email_verified: true` claim.\n\nUnverified emails may be allowed in case an identity provider does not send the `email_verified` claim or email\nverification is not required. In that case, a user's email address is always synchronized to the user profile.\n\n```yaml hl_lines=\"5\"\noidc:\n  issuer: \"https://sso.example.com\"\n  client_id: \"headscale\"\n  client_secret: \"generated-secret\"\n  email_verified_required: false\n```\n\n### Customize node expiration\n\nThe node expiration is the amount of time a node is authenticated with OpenID Connect until it expires and needs to\nreauthenticate. The default node expiration is 180 days. This can either be customized or set to the expiration from the\nAccess Token.\n\n=== \"Customize node expiration\"\n\n    ```yaml hl_lines=\"5\"\n    oidc:\n      issuer: \"https://sso.example.com\"\n      client_id: \"headscale\"\n      client_secret: \"generated-secret\"\n      expiry: 30d   # Use 0 to disable node expiration\n    ```\n\n=== \"Use expiration from Access Token\"\n\n    Please keep in mind that the Access Token is typically a short-lived token that expires within a few minutes. You\n    will have to configure token expiration in your identity provider to avoid frequent re-authentication.\n\n    ```yaml hl_lines=\"5\"\n    oidc:\n      issuer: \"https://sso.example.com\"\n      client_id: \"headscale\"\n      client_secret: \"generated-secret\"\n      use_expiry_from_token: true\n    ```\n\n!!! tip \"Expire a node and force re-authentication\"\n\n    A node can be expired immediately via:\n\n    ```console\n    headscale node expire -i <NODE_ID>\n    ```\n\n### Reference a user in the policy\n\nYou may refer to users in the Headscale policy via:\n\n- Email address\n- Username\n- Provider identifier (this value is currently only available from the [API](api.md), database or directly from your\n  identity provider)\n\n!!! note \"A user identifier in the policy must contain a single `@`\"\n\n    The Headscale policy requires a single `@` to reference a user. If the username or provider identifier doesn't\n    already contain a single `@`, it needs to be appended at the end. For example: the username `ssmith` has to be\n    written as `ssmith@` to be correctly identified as user within the policy.\n\n!!! warning \"Email address or username might be updated by users\"\n\n    Many identity providers allow users to update their own profile. Depending on the identity provider and its\n    configuration, the values for username or email address might change over time. This might have unexpected\n    consequences for Headscale where a policy might no longer work or a user might obtain more access by hijacking an\n    existing username or email address.\n\n!!! tip \"Howto use the provider identifier in the policy\"\n\n    The provider identifier uniquely identifies an OIDC user and a well-behaving identity provider guarantees that this\n    value never changes for a particular user. It is usually an opaque and long string and its value is currently only\n    available from the [API](api.md), database or directly from your identity provider).\n\n    Use the [API](api.md) with the `/api/v1/user` endpoint to fetch the provider identifier (`providerId`). The value\n    (be sure to append an `@` in case the provider identifier doesn't already contain an `@` somewhere) can be used\n    directly to reference a user in the policy. To improve readability of the policy, one may use the `groups` section\n    as an alias:\n\n    ```json\n    {\n      \"groups\": {\n        \"group:alice\": [\n          \"https://soo.example.com/oauth2/openid/59ac9125-c31b-46c5-814e-06242908cf57@\"\n        ]\n      },\n      \"acls\": [\n        {\n          \"action\": \"accept\",\n          \"src\": [\"group:alice\"],\n          \"dst\": [\"*:*\"]\n        }\n      ]\n    }\n    ```\n\n## Supported OIDC claims\n\nHeadscale uses [the standard OIDC claims](https://openid.net/specs/openid-connect-core-1_0.html#StandardClaims) to\npopulate and update its local user profile on each login. OIDC claims are read from the ID Token and from the UserInfo\nendpoint.\n\n| Headscale profile   | OIDC claim           | Notes / examples                                                                                  |\n| ------------------- | -------------------- | ------------------------------------------------------------------------------------------------- |\n| email address       | `email`              | Only verified emails are synchronized, unless `email_verified_required: false` is configured      |\n| display name        | `name`               | eg: `Sam Smith`                                                                                   |\n| username            | `preferred_username` | Depends on identity provider, eg: `ssmith`, `ssmith@idp.example.com`, `\\\\example.com\\ssmith`      |\n| profile picture     | `picture`            | URL to a profile picture or avatar                                                                |\n| provider identifier | `iss`, `sub`         | A stable and unique identifier for a user, typically a combination of `iss` and `sub` OIDC claims |\n|                     | `groups`             | [Only used to filter for allowed groups](#authorize-users-with-filters)                           |\n\n## Limitations\n\n- Support for OpenID Connect aims to be generic and vendor independent. It offers only limited support for quirks of\n  specific identity providers.\n- OIDC groups cannot be used in ACLs.\n- The username provided by the identity provider needs to adhere to this pattern:\n    - The username must be at least two characters long.\n    - It must only contain letters, digits, hyphens, dots, underscores, and up to a single `@`.\n    - The username must start with a letter.\n\nPlease see the [GitHub label \"OIDC\"](https://github.com/juanfont/headscale/labels/OIDC) for OIDC related issues.\n\n## Identity provider specific configuration\n\n!!! warning \"Third-party software and services\"\n\n    This section of the documentation is specific for third-party software and services. We recommend users read the\n    third-party documentation on how to configure and integrate an OIDC client. Please see the [Configuration\n    section](#configuration) for a description of Headscale's OIDC related configuration settings.\n\nAny identity provider with OpenID Connect support should \"just work\" with Headscale. The following identity providers\nare known to work:\n\n- [Authelia](#authelia)\n- [Authentik](#authentik)\n- [Kanidm](#kanidm)\n- [Keycloak](#keycloak)\n\n### Authelia\n\nAuthelia is fully supported by Headscale.\n\n### Authentik\n\n- Authentik is fully supported by Headscale.\n- [Headscale does not support JSON Web Encryption](https://github.com/juanfont/headscale/issues/2446). Leave the field\n  `Encryption Key` in the providers section unset.\n- See Authentik's [Integrate with Headscale](https://integrations.goauthentik.io/networking/headscale/)\n\n### Google OAuth\n\n!!! warning \"No username due to missing preferred_username\"\n\n    Google OAuth does not send the `preferred_username` claim when the scope `profile` is requested. The username in\n    Headscale will be blank/not set.\n\nIn order to integrate Headscale with Google, you'll need to have a [Google Cloud\nConsole](https://console.cloud.google.com) account.\n\nGoogle OAuth has a [verification process](https://support.google.com/cloud/answer/9110914?hl=en) if you need to have\nusers authenticate who are outside of your domain. If you only need to authenticate users from your domain name (ie\n`@example.com`), you don't need to go through the verification process.\n\nHowever if you don't have a domain, or need to add users outside of your domain, you can manually add emails via Google\nConsole.\n\n#### Steps\n\n1. Go to [Google Console](https://console.cloud.google.com) and login or create an account if you don't have one.\n1. Create a project (if you don't already have one).\n1. On the left hand menu, go to `APIs and services` -> `Credentials`\n1. Click `Create Credentials` -> `OAuth client ID`\n1. Under `Application Type`, choose `Web Application`\n1. For `Name`, enter whatever you like\n1. Under `Authorised redirect URIs`, add Headscale's OIDC callback URL: `https://headscale.example.com/oidc/callback`\n1. Click `Save` at the bottom of the form\n1. Take note of the `Client ID` and `Client secret`, you can also download it for reference if you need it.\n1. [Configure Headscale following the \"Basic configuration\" steps](#basic-configuration). The issuer URL for Google\n   OAuth is: `https://accounts.google.com`.\n\n### Kanidm\n\n- Kanidm is fully supported by Headscale.\n- Groups for the [allowed groups filter](#authorize-users-with-filters) need to be specified with their full SPN, for\n  example: `headscale_users@sso.example.com`.\n- Kanidm sends the full SPN (`alice@sso.example.com`) as `preferred_username` by default. Headscale stores this value as\n  username which might be confusing as the username and email fields now contain values that look like an email address.\n  [Kanidm can be configured to send the short username as `preferred_username` attribute\n  instead](https://kanidm.github.io/kanidm/stable/integrations/oauth2.html#short-names):\n    ```console\n    kanidm system oauth2 prefer-short-username <client name>\n    ```\n    Once configured, the short username in Headscale will be `alice` and can be referred to as `alice@` in the policy.\n\n### Keycloak\n\nKeycloak is fully supported by Headscale.\n\n#### Additional configuration to use the allowed groups filter\n\nKeycloak has no built-in client scope for the OIDC `groups` claim. This extra configuration step is **only** needed if\nyou need to [authorize access based on group membership](#authorize-users-with-filters).\n\n- Create a new client scope `groups` for OpenID Connect:\n    - Configure a `Group Membership` mapper with name `groups` and the token claim name `groups`.\n    - Add the mapper to at least the UserInfo endpoint.\n- Configure the new client scope for your Headscale client:\n    - Edit the Headscale client.\n    - Search for the client scope `group`.\n    - Add it with assigned type `Default`.\n- [Configure the allowed groups in Headscale](#authorize-users-with-filters). How groups need to be specified depends on\n  Keycloak's `Full group path` option:\n    - `Full group path` is enabled: groups contain their full path, e.g. `/top/group1`\n    - `Full group path` is disabled: only the name of the group is used, e.g. `group1`\n\n### Microsoft Entra ID\n\nIn order to integrate Headscale with Microsoft Entra ID, you'll need to provision an App Registration with the correct\nscopes and redirect URI.\n\n[Configure Headscale following the \"Basic configuration\" steps](#basic-configuration). The issuer URL for Microsoft\nEntra ID is: `https://login.microsoftonline.com/<tenant-UUID>/v2.0`. The following `extra_params` might be useful:\n\n- `domain_hint: example.com` to use your own domain\n- `prompt: select_account` to force an account picker during login\n\nWhen using Microsoft Entra ID together with the [allowed groups filter](#authorize-users-with-filters), configure the\nHeadscale OIDC scope without the `groups` claim, for example:\n\n```yaml\noidc:\n  scope: [\"openid\", \"profile\", \"email\"]\n```\n\nGroups for the [allowed groups filter](#authorize-users-with-filters) need to be specified with their group ID(UUID) instead\nof the group name.\n\n## Switching OIDC providers\n\nHeadscale only supports a single OIDC provider in its configuration, but it does store the provider identifier of each user. When switching providers, this might lead to issues with existing users: all user details (name, email, groups) might be identical with the new provider, but the identifier will differ. Headscale will be unable to create a new user as the name and email will already be in use for the existing users.\n\nAt this time, you will need to manually update the `provider_identifier` column in the `users` table for each user with the appropriate value for the new provider. The identifier is built from the `iss` and `sub` claims of the OIDC ID token, for example `https://id.example.com/12340987`.\n"
  },
  {
    "path": "docs/ref/registration.md",
    "content": "# Registration methods\n\nHeadscale supports multiple ways to register a node. The preferred registration method depends on the identity of a node\nand your use case.\n\n## Identity model\n\nTailscale's identity model distinguishes between personal and tagged nodes:\n\n- A personal node (or user-owned node) is owned by a human and typically refers to end-user devices such as laptops,\n  workstations or mobile phones. End-user devices are managed by a single user.\n- A tagged node (or service-based node or non-human node) provides services to the network. Common examples include web-\n  and database servers. Those nodes are typically managed by a team of users. Some additional restrictions apply for\n  tagged nodes, e.g. a tagged node is not allowed to [Tailscale SSH](https://tailscale.com/kb/1193/tailscale-ssh) into a\n  personal node.\n\nHeadscale implements Tailscale's identity model and distinguishes between personal and tagged nodes where a personal\nnode is owned by a Headscale user and a tagged node is owned by a tag. Tagged devices are grouped under the special user\n`tagged-devices`.\n\n## Registration methods\n\nThere are two main ways to register new nodes, [web authentication](#web-authentication) and [registration with a pre\nauthenticated key](#pre-authenticated-key). Both methods can be used to register personal and tagged nodes.\n\n### Web authentication\n\nWeb authentication is the default method to register a new node. It's interactive, where the client initiates the\nregistration and the Headscale administrator needs to approve the new node before it is allowed to join the network. A\nnode can be approved with:\n\n- Headscale CLI (described in this documentation)\n- [Headscale API](api.md)\n- Or delegated to an identity provider via [OpenID Connect](oidc.md)\n\nWeb authentication relies on the presence of a Headscale user. Use the `headscale users` command to create a new user:\n\n```console\nheadscale users create <USER>\n```\n\n=== \"Personal devices\"\n\n    Run `tailscale up` to login your personal device:\n\n    ```console\n    tailscale up --login-server <YOUR_HEADSCALE_URL>\n    ```\n\n    Usually, a browser window with further instructions is opened. This page explains how to complete the registration\n    on your Headscale server and it also prints the Auth ID required to approve the node:\n\n    ```console\n    headscale auth register --user <USER> --auth-id <AUTH_ID>\n    ```\n\n    Congrations, the registration of your personal node is complete and it should be listed as \"online\" in the output of\n    `headscale nodes list`. The \"User\" column displays `<USER>` as the owner of the node.\n\n=== \"Tagged devices\"\n\n    Your Headscale user needs to be authorized to register tagged devices. This authorization is specified in the\n    [`tagOwners`](https://tailscale.com/kb/1337/policy-syntax#tag-owners) section of the [ACL](acls.md). A simple\n    example looks like this:\n\n    ```json title=\"The user alice can register nodes tagged with tag:server\"\n    {\n      \"tagOwners\": {\n        \"tag:server\": [\"alice@\"]\n      },\n      // more rules\n    }\n    ```\n\n    Run `tailscale up` and provide at least one tag to login a tagged device:\n\n    ```console\n    tailscale up --login-server <YOUR_HEADSCALE_URL> --advertise-tags tag:<TAG>\n    ```\n\n    Usually, a browser window with further instructions is opened. This page explains how to complete the registration\n    on your Headscale server and it also prints the Auth ID required to approve the node:\n\n    ```console\n    headscale auth register --user <USER> --auth-id <AUTH_ID>\n    ```\n\n    Headscale checks that `<USER>` is allowed to register a node with the specified tag(s) and then transfers ownership\n    of the new node to the special user `tagged-devices`. The registration of a tagged node is complete and it should be\n    listed as \"online\" in the output of `headscale nodes list`. The \"User\" column displays `tagged-devices` as the owner\n    of the node. See the \"Tags\" column for the list of assigned tags.\n\n### Pre authenticated key\n\nRegistration with a pre authenticated key (or auth key) is a non-interactive way to register a new node. The Headscale\nadministrator creates a preauthkey upfront and this preauthkey can then be used to register a node non-interactively.\nIts best suited for automation.\n\n=== \"Personal devices\"\n\n    A personal node is always assigned to a Headscale user. Use the `headscale users` command to create a new user:\n\n    ```console\n    headscale users create <USER>\n    ```\n\n    Use the `headscale user list` command to learn its `<USER_ID>` and create a new pre authenticated key for your user:\n\n    ```console\n    headscale preauthkeys create --user <USER_ID>\n    ```\n\n    The above prints a pre authenticated key with the default settings (can be used once and is valid for one hour). Use\n    this auth key to register a node non-interactively:\n\n    ```console\n    tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>\n    ```\n\n    Congrations, the registration of your personal node is complete and it should be listed as \"online\" in the output of\n    `headscale nodes list`. The \"User\" column displays `<USER>` as the owner of the node.\n\n=== \"Tagged devices\"\n\n    Create a new pre authenticated key and provide at least one tag:\n\n    ```console\n    headscale preauthkeys create --tags tag:<TAG>\n    ```\n\n    The above prints a pre authenticated key with the default settings (can be used once and is valid for one hour). Use\n    this auth key to register a node non-interactively. You don't need to provide the `--advertise-tags` parameter as\n    the tags are automatically read from the pre authenticated key:\n\n    ```console\n    tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>\n    ```\n\n    The registration of a tagged node is complete and it should be listed as \"online\" in the output of\n    `headscale nodes list`. The \"User\" column displays `tagged-devices` as the owner of the node. See the \"Tags\" column for the list of\n    assigned tags.\n"
  },
  {
    "path": "docs/ref/routes.md",
    "content": "# Routes\n\nHeadscale supports route advertising and can be used to manage [subnet routers](https://tailscale.com/kb/1019/subnets)\nand [exit nodes](https://tailscale.com/kb/1103/exit-nodes) for a tailnet.\n\n- [Subnet routers](#subnet-router) may be used to connect an existing network such as a virtual\n  private cloud or an on-premise network with your tailnet. Use a subnet router to access devices where Tailscale can't\n  be installed or to gradually rollout Tailscale.\n- [Exit nodes](#exit-node) can be used to route all Internet traffic for another Tailscale\n  node. Use it to securely access the Internet on an untrusted Wi-Fi or to access online services that expect traffic\n  from a specific IP address.\n\n## Subnet router\n\nThe setup of a subnet router requires double opt-in, once from a subnet router and once on the control server to allow\nits use within the tailnet. Optionally, use [`autoApprovers` to automatically approve routes from a subnet\nrouter](#automatically-approve-routes-of-a-subnet-router).\n\n### Setup a subnet router\n\n#### Configure a node as subnet router\n\nRegister a node and advertise the routes it should handle as comma separated list:\n\n```console\n$ sudo tailscale up --login-server <YOUR_HEADSCALE_URL> --advertise-routes=10.0.0.0/8,192.168.0.0/24\n```\n\nIf the node is already registered, it can advertise new routes or update previously announced routes with:\n\n```console\n$ sudo tailscale set --advertise-routes=10.0.0.0/8,192.168.0.0/24\n```\n\nFinally, [enable IP forwarding](#enable-ip-forwarding) to route traffic.\n\n#### Enable the subnet router on the control server\n\nThe routes of a tailnet can be displayed with the `headscale nodes list-routes` command. A subnet router with the\nhostname `myrouter` announced the IPv4 networks `10.0.0.0/8` and `192.168.0.0/24`. Those need to be approved before they\ncan be used.\n\n```console\n$ headscale nodes list-routes\nID | Hostname | Approved | Available      | Serving (Primary)\n1  | myrouter |          | 10.0.0.0/8     |\n   |          |          | 192.168.0.0/24 |\n```\n\nApprove all desired routes of a subnet router by specifying them as comma separated list:\n\n```console\n$ headscale nodes approve-routes --identifier 1 --routes 10.0.0.0/8,192.168.0.0/24\nNode updated\n```\n\nThe node `myrouter` can now route the IPv4 networks `10.0.0.0/8` and `192.168.0.0/24` for the tailnet.\n\n```console\n$ headscale nodes list-routes\nID | Hostname | Approved       | Available      | Serving (Primary)\n1  | myrouter | 10.0.0.0/8     | 10.0.0.0/8     | 10.0.0.0/8\n   |          | 192.168.0.0/24 | 192.168.0.0/24 | 192.168.0.0/24\n```\n\n#### Use the subnet router\n\nTo accept routes advertised by a subnet router on a node:\n\n```console\n$ sudo tailscale set --accept-routes\n```\n\nPlease refer to the official [Tailscale\ndocumentation](https://tailscale.com/kb/1019/subnets#use-your-subnet-routes-from-other-devices) for how to use a subnet\nrouter on different operating systems.\n\n### Restrict the use of a subnet router with ACL\n\nThe routes announced by subnet routers are available to the nodes in a tailnet. By default, without an ACL enabled, all\nnodes can accept and use such routes. Configure an ACL to explicitly manage who can use routes.\n\nThe ACL snippet below defines three hosts, a subnet router `router`, a regular node `node` and `service.example.net` as\ninternal service that can be reached via a route on the subnet router `router`. It allows the node `node` to access\n`service.example.net` on port 80 and 443 which is reachable via the subnet router. Access to the subnet router itself is\ndenied.\n\n```json title=\"Access the routes of a subnet router without the subnet router itself\"\n{\n  \"hosts\": {\n    // the router is not referenced but announces 192.168.0.0/24\"\n    \"router\": \"100.64.0.1/32\",\n    \"node\": \"100.64.0.2/32\",\n    \"service.example.net\": \"192.168.0.1/32\"\n  },\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\"node\"],\n      \"dst\": [\"service.example.net:80,443\"]\n    }\n  ]\n}\n```\n\n### Automatically approve routes of a subnet router\n\nThe initial setup of a subnet router usually requires manual approval of their announced routes on the control server\nbefore they can be used by a node in a tailnet. Headscale supports the `autoApprovers` section of an ACL to automate the\napproval of routes served with a subnet router.\n\nThe ACL snippet below defines the tag `tag:router` owned by the user `alice`. This tag is used for `routes` in the\n`autoApprovers` section. The IPv4 route `192.168.0.0/24` is automatically approved once announced by a subnet router\nthat advertises the tag `tag:router`.\n\n```json title=\"Subnet routers tagged with tag:router are automatically approved\"\n{\n  \"tagOwners\": {\n    \"tag:router\": [\"alice@\"]\n  },\n  \"autoApprovers\": {\n    \"routes\": {\n      \"192.168.0.0/24\": [\"tag:router\"]\n    }\n  },\n  \"acls\": [\n    // more rules\n  ]\n}\n```\n\nAdvertise the route `192.168.0.0/24` from a subnet router that also advertises the tag `tag:router` when joining the tailnet:\n\n```console\n$ sudo tailscale up --login-server <YOUR_HEADSCALE_URL> --advertise-tags tag:router --advertise-routes 192.168.0.0/24\n```\n\nPlease see the [official Tailscale documentation](https://tailscale.com/kb/1337/acl-syntax#autoapprovers) for more\ninformation on auto approvers.\n\n## Exit node\n\nThe setup of an exit node requires double opt-in, once from an exit node and once on the control server to allow its use\nwithin the tailnet. Optionally, use [`autoApprovers` to automatically approve an exit\nnode](#automatically-approve-an-exit-node-with-auto-approvers).\n\n### Setup an exit node\n\n#### Configure a node as exit node\n\nRegister a node and make it advertise itself as an exit node:\n\n```console\n$ sudo tailscale up --login-server <YOUR_HEADSCALE_URL> --advertise-exit-node\n```\n\nIf the node is already registered, it can advertise exit capabilities like this:\n\n```console\n$ sudo tailscale set --advertise-exit-node\n```\n\nFinally, [enable IP forwarding](#enable-ip-forwarding) to route traffic.\n\n#### Enable the exit node on the control server\n\nThe routes of a tailnet can be displayed with the `headscale nodes list-routes` command. An exit node can be recognized\nby its announced routes: `0.0.0.0/0` for IPv4 and `::/0` for IPv6. The exit node with the hostname `myexit` is already\navailable, but needs to be approved:\n\n```console\n$ headscale nodes list-routes\nID | Hostname | Approved | Available | Serving (Primary)\n1  | myexit   |          | 0.0.0.0/0 |\n   |          |          | ::/0      |\n```\n\nFor exit nodes, it is sufficient to approve either the IPv4 or IPv6 route. The other will be approved automatically.\n\n```console\n$ headscale nodes approve-routes --identifier 1 --routes 0.0.0.0/0\nNode updated\n```\n\nThe node `myexit` is now approved as exit node for the tailnet:\n\n```console\n$ headscale nodes list-routes\nID | Hostname | Approved  | Available | Serving (Primary)\n1  | myexit   | 0.0.0.0/0 | 0.0.0.0/0 | 0.0.0.0/0\n   |          | ::/0      | ::/0      | ::/0\n```\n\n#### Use the exit node\n\nThe exit node can now be used on a node with:\n\n```console\n$ sudo tailscale set --exit-node myexit\n```\n\nPlease refer to the official [Tailscale documentation](https://tailscale.com/kb/1103/exit-nodes#use-the-exit-node) for\nhow to use an exit node on different operating systems.\n\n### Restrict the use of an exit node with ACL\n\nAn exit node is offered to all nodes in a tailnet. By default, without an ACL enabled, all nodes in a tailnet can select\nand use an exit node. Configure `autogroup:internet` in an ACL rule to restrict who can use _any_ of the available exit\nnodes.\n\n```json title=\"Example use of autogroup:internet\"\n{\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\"...\"],\n      \"dst\": [\"autogroup:internet:*\"]\n    }\n  ]\n}\n```\n\n### Restrict access to exit nodes per user or group\n\nA user can use _any_ of the available exit nodes with `autogroup:internet`. Alternatively, the ACL snippet below assigns\neach user a specific exit node while hiding all other exit nodes. The user `alice` can only use exit node `exit1` while\nuser `bob` can only use exit node `exit2`.\n\n```json title=\"Assign each user a dedicated exit node\"\n{\n  \"hosts\": {\n    \"exit1\": \"100.64.0.1/32\",\n    \"exit2\": \"100.64.0.2/32\"\n  },\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\"alice@\"],\n      \"dst\": [\"exit1:*\"]\n    },\n    {\n      \"action\": \"accept\",\n      \"src\": [\"bob@\"],\n      \"dst\": [\"exit2:*\"]\n    }\n  ]\n}\n```\n\n!!! warning\n\n    - The above implementation is Headscale specific and will likely be removed once [support for\n      `via`](https://github.com/juanfont/headscale/issues/2409) is available.\n    - Beware that a user can also connect to any port of the exit node itself.\n\n### Automatically approve an exit node with auto approvers\n\nThe initial setup of an exit node usually requires manual approval on the control server before it can be used by a node\nin a tailnet. Headscale supports the `autoApprovers` section of an ACL to automate the approval of a new exit node as\nsoon as it joins the tailnet.\n\nThe ACL snippet below defines the tag `tag:exit` owned by the user `alice`. This tag is used for `exitNode` in the\n`autoApprovers` section. A new exit node that advertises the tag `tag:exit` is automatically approved:\n\n```json title=\"Exit nodes tagged with tag:exit are automatically approved\"\n{\n  \"tagOwners\": {\n    \"tag:exit\": [\"alice@\"]\n  },\n  \"autoApprovers\": {\n    \"exitNode\": [\"tag:exit\"]\n  },\n  \"acls\": [\n    // more rules\n  ]\n}\n```\n\nAdvertise a node as exit node and also advertise the tag `tag:exit` when joining the tailnet:\n\n```console\n$ sudo tailscale up --login-server <YOUR_HEADSCALE_URL> --advertise-tags tag:exit --advertise-exit-node\n```\n\nPlease see the [official Tailscale documentation](https://tailscale.com/kb/1337/acl-syntax#autoapprovers) for more\ninformation on auto approvers.\n\n## High availability\n\nHeadscale has limited support for high availability routing. Multiple subnet routers with overlapping routes or multiple\nexit nodes can be used to provide high availability for users. If one router node goes offline, another one can serve\nthe same routes to clients. Please see the official [Tailscale documentation on high\navailability](https://tailscale.com/kb/1115/high-availability#subnet-router-high-availability) for details.\n\n!!! bug\n\n    In certain situations it might take up to 16 minutes for Headscale to detect a node as offline. A failover node\n    might not be selected fast enough, if such a node is used as subnet router or exit node causing service\n    interruptions for clients. See [issue 2129](https://github.com/juanfont/headscale/issues/2129) for more information.\n\n## Troubleshooting\n\n### Enable IP forwarding\n\nA subnet router or exit node is routing traffic on behalf of other nodes and thus requires IP forwarding. Check the\nofficial [Tailscale documentation](https://tailscale.com/kb/1019/subnets/?tab=linux#enable-ip-forwarding) for how to\nenable IP forwarding.\n"
  },
  {
    "path": "docs/ref/tags.md",
    "content": "# Tags\n\nHeadscale supports Tailscale tags. Please read [Tailscale's tag documentation](https://tailscale.com/kb/1068/tags) to\nlearn how tags work and how to use them.\n\nTags can be applied during [node registration](registration.md):\n\n- using the `--advertise-tags` flag, see [web authentication for tagged devices](registration.md#__tabbed_1_2)\n- using a tagged pre authenticated key, see [how to create and use it](registration.md#__tabbed_2_2)\n\nAdministrators can manage tags with:\n\n- Headscale CLI\n- [Headscale API](api.md)\n\n## Common operations\n\n### Manage tags for a node\n\nRun `headscale nodes list` to list the tags for a node.\n\nUse the `headscale nodes tag` command to modify the tags for a node. At least one tag is required and multiple tags can\nbe provided as comma separated list. The following command sets the tags `tag:server` and `tag:prod` on node with ID 1:\n\n```console\nheadscale nodes tag -i 1 -t tag:server,tag:prod\n```\n\n### Convert from personal to tagged node\n\nUse the `headscale nodes tag` command to convert a personal (user-owned) node to a tagged node:\n\n```console\nheadscale nodes tag -i <NODE_ID> -t <TAG>\n```\n\nThe node is now owned by the special user `tagged-devices` and has the specified tags assigned to it.\n\n### Convert from tagged to personal node\n\nTagged nodes can return to personal (user-owned) nodes by re-authenticating with:\n\n```console\ntailscale up --login-server <YOUR_HEADSCALE_URL> --advertise-tags= --force-reauth\n```\n\nUsually, a browser window with further instructions is opened. This page explains how to complete the registration on\nyour Headscale server and it also prints the Auth ID required to approve the node:\n\n```console\nheadscale auth register --user <USER> --auth-id <AUTH_ID>\n```\n\nAll previously assigned tags get removed and the node is now owned by the user specified in the above command.\n"
  },
  {
    "path": "docs/ref/tls.md",
    "content": "# Running the service via TLS (optional)\n\n## Bring your own certificate\n\nHeadscale can be configured to expose its web service via TLS. To configure the certificate and key file manually, set the `tls_cert_path` and `tls_key_path` configuration parameters. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from.\n\n```yaml title=\"config.yaml\"\ntls_cert_path: \"\"\ntls_key_path: \"\"\n```\n\nThe certificate should contain the full chain, else some clients, like the Tailscale Android client, will reject it.\n\n## Let's Encrypt / ACME\n\nTo get a certificate automatically via [Let's Encrypt](https://letsencrypt.org/), set `tls_letsencrypt_hostname` to the desired certificate hostname. This name must resolve to the IP address(es) headscale is reachable on (i.e., it must correspond to the `server_url` configuration parameter). The certificate and Let's Encrypt account credentials will be stored in the directory configured in `tls_letsencrypt_cache_dir`. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from.\n\n```yaml title=\"config.yaml\"\ntls_letsencrypt_hostname: \"\"\ntls_letsencrypt_listen: \":http\"\ntls_letsencrypt_cache_dir: \".cache\"\ntls_letsencrypt_challenge_type: HTTP-01\n```\n\n### Challenge types\n\nHeadscale only supports two values for `tls_letsencrypt_challenge_type`: `HTTP-01` (default) and `TLS-ALPN-01`.\n\n#### HTTP-01\n\nFor `HTTP-01`, headscale must be reachable on port 80 for the Let's Encrypt automated validation, in addition to whatever port is configured in `listen_addr`. By default, headscale listens on port 80 on all local IPs for Let's Encrypt automated validation.\n\nIf you need to change the ip and/or port used by headscale for the Let's Encrypt validation process, set `tls_letsencrypt_listen` to the appropriate value. This can be handy if you are running headscale as a non-root user (or can't run `setcap`). Keep in mind, however, that Let's Encrypt will _only_ connect to port 80 for the validation callback, so if you change `tls_letsencrypt_listen` you will also need to configure something else (e.g. a firewall rule) to forward the traffic from port 80 to the ip:port combination specified in `tls_letsencrypt_listen`.\n\n#### TLS-ALPN-01\n\nFor `TLS-ALPN-01`, headscale listens on the ip:port combination defined in `listen_addr`. Let's Encrypt will _only_ connect to port 443 for the validation callback, so if `listen_addr` is not set to port 443, something else (e.g. a firewall rule) will be required to forward the traffic from port 443 to the ip:port combination specified in `listen_addr`.\n\n### Technical description\n\nHeadscale uses [autocert](https://pkg.go.dev/golang.org/x/crypto/acme/autocert), a Golang library providing [ACME protocol](https://en.wikipedia.org/wiki/Automatic_Certificate_Management_Environment) verification, to facilitate certificate renewals via [Let's Encrypt](https://letsencrypt.org/about/). Certificates will be renewed automatically, and the following can be expected:\n\n- Certificates provided from Let's Encrypt have a validity of 3 months from date issued.\n- Renewals are only attempted by headscale when 30 days or less remains until certificate expiry.\n- Renewal attempts by autocert are triggered at a random interval of 30-60 minutes.\n- No log output is generated when renewals are skipped, or successful.\n\n#### Checking certificate expiry\n\nIf you want to validate that certificate renewal completed successfully, this can be done either manually, or through external monitoring software. Two examples of doing this manually:\n\n1. Open the URL for your headscale server in your browser of choice, and manually inspecting the expiry date of the certificate you receive.\n1. Or, check remotely from CLI using `openssl`:\n\n```console\n$ openssl s_client -servername [hostname] -connect [hostname]:443 | openssl x509 -noout -dates\n(...)\nnotBefore=Feb  8 09:48:26 2024 GMT\nnotAfter=May  8 09:48:25 2024 GMT\n```\n\n#### Log output from the autocert library\n\nAs these log lines are from the autocert library, they are not strictly generated by headscale itself.\n\n```plaintext\nacme/autocert: missing server name\n```\n\nLikely caused by an incoming connection that does not specify a hostname, for example a `curl` request directly against the IP of the server, or an unexpected hostname.\n\n```plaintext\nacme/autocert: host \"[foo]\" not configured in HostWhitelist\n```\n\nSimilarly to the above, this likely indicates an invalid incoming request for an incorrect hostname, commonly just the IP itself.\n\nThe source code for autocert can be found [here](https://cs.opensource.google/go/x/crypto/+/refs/tags/v0.19.0:acme/autocert/autocert.go)\n"
  },
  {
    "path": "docs/requirements.txt",
    "content": "mike~=2.1\nmkdocs-include-markdown-plugin~=7.1\nmkdocs-macros-plugin~=1.3\nmkdocs-material[imaging]~=9.5\nmkdocs-minify-plugin~=0.7\nmkdocs-redirects~=1.2\n"
  },
  {
    "path": "docs/setup/install/community.md",
    "content": "# Community packages\n\nSeveral Linux distributions and community members provide packages for headscale. Those packages may be used instead of\nthe [official releases](./official.md) provided by the headscale maintainers. Such packages offer improved integration\nfor their targeted operating system and usually:\n\n- setup a dedicated local user account to run headscale\n- provide a default configuration\n- install headscale as system service\n\n!!! warning \"Community packages might be outdated\"\n\n    The packages mentioned on this page might be outdated or unmaintained. Use the [official releases](./official.md) to\n    get the current stable version or to test pre-releases.\n\n    [![Packaging status](https://repology.org/badge/vertical-allrepos/headscale.svg)](https://repology.org/project/headscale/versions)\n\n## Arch Linux\n\nArch Linux offers a package for headscale, install via:\n\n```shell\npacman -S headscale\n```\n\nThe [AUR package `headscale-git`](https://aur.archlinux.org/packages/headscale-git) can be used to build the current\ndevelopment version.\n\n## Fedora, RHEL, CentOS\n\nA third-party repository for various RPM based distributions is available at:\n<https://copr.fedorainfracloud.org/coprs/jonathanspw/headscale/>. The site provides detailed setup and installation\ninstructions.\n\n## Nix, NixOS\n\nA Nix package is available as: `headscale`. See the [NixOS package site for installation\ndetails](https://search.nixos.org/packages?show=headscale).\n\n## Gentoo\n\n```shell\nemerge --ask net-vpn/headscale\n```\n\nGentoo specific documentation is available [here](https://wiki.gentoo.org/wiki/User:Maffblaster/Drafts/Headscale).\n\n## OpenBSD\n\nHeadscale is available in ports. The port installs headscale as system service with `rc.d` and provides usage\ninstructions upon installation.\n\n```shell\npkg_add headscale\n```\n"
  },
  {
    "path": "docs/setup/install/container.md",
    "content": "# Running headscale in a container\n\n!!! warning \"Community documentation\"\n\n    This page is not actively maintained by the headscale authors and is\n    written by community members. It is _not_ verified by headscale developers.\n\n    **It might be outdated and it might miss necessary steps**.\n\nThis documentation has the goal of showing a user how-to set up and run headscale in a container. A container runtime\nsuch as [Docker](https://www.docker.com) or [Podman](https://podman.io) is required. The container image can be found on\n[Docker Hub](https://hub.docker.com/r/headscale/headscale) and [GitHub Container\nRegistry](https://github.com/juanfont/headscale/pkgs/container/headscale). The container image URLs are:\n\n- [Docker Hub](https://hub.docker.com/r/headscale/headscale): `docker.io/headscale/headscale:<VERSION>`\n- [GitHub Container Registry](https://github.com/juanfont/headscale/pkgs/container/headscale):\n  `ghcr.io/juanfont/headscale:<VERSION>`\n\n## Configure and run headscale\n\n1. Create a directory on the container host to store headscale's [configuration](../../ref/configuration.md) and the [SQLite](https://www.sqlite.org/) database:\n\n    ```shell\n    mkdir -p ./headscale/{config,lib}\n    cd ./headscale\n    ```\n\n1. Download the example configuration for your chosen version and save it as: `$(pwd)/config/config.yaml`. Adjust the\n   configuration to suit your local environment. See [Configuration](../../ref/configuration.md) for details.\n\n1. Start headscale from within the previously created `./headscale` directory:\n\n    ```shell\n    docker run \\\n      --name headscale \\\n      --detach \\\n      --read-only \\\n      --tmpfs /var/run/headscale \\\n      --volume \"$(pwd)/config:/etc/headscale:ro\" \\\n      --volume \"$(pwd)/lib:/var/lib/headscale\" \\\n      --publish 127.0.0.1:8080:8080 \\\n      --publish 127.0.0.1:9090:9090 \\\n      --health-cmd \"CMD headscale health\" \\\n      docker.io/headscale/headscale:<VERSION> \\\n      serve\n    ```\n\n    Note: use `0.0.0.0:8080:8080` instead of `127.0.0.1:8080:8080` if you want to expose the container externally.\n\n    This command mounts the local directories inside the container, forwards port 8080 and 9090 out of the container so\n    the headscale instance becomes available and then detaches so headscale runs in the background.\n\n    A similar configuration for `docker-compose`:\n\n    ```yaml title=\"docker-compose.yaml\"\n    services:\n      headscale:\n        image: docker.io/headscale/headscale:<VERSION>\n        restart: unless-stopped\n        container_name: headscale\n        read_only: true\n        tmpfs:\n          - /var/run/headscale\n        ports:\n          - \"127.0.0.1:8080:8080\"\n          - \"127.0.0.1:9090:9090\"\n        volumes:\n          # Please set <HEADSCALE_PATH> to the absolute path\n          # of the previously created headscale directory.\n          - <HEADSCALE_PATH>/config:/etc/headscale:ro\n          - <HEADSCALE_PATH>/lib:/var/lib/headscale\n        command: serve\n        healthcheck:\n            test: [\"CMD\", \"headscale\", \"health\"]\n    ```\n\n1. Verify headscale is running:\n\n    Follow the container logs:\n\n    ```shell\n    docker logs --follow headscale\n    ```\n\n    Verify running containers:\n\n    ```shell\n    docker ps\n    ```\n\n    Verify headscale is available:\n\n    ```shell\n    curl http://127.0.0.1:8080/health\n    ```\n\nContinue on the [getting started page](../../usage/getting-started.md) to register your first machine.\n\n## Debugging headscale running in Docker\n\nThe Headscale container image is based on a \"distroless\" image that does not contain a shell or any other debug tools. If you need to debug headscale running in the Docker container, you can use the `-debug` variant, for example `docker.io/headscale/headscale:x.x.x-debug`.\n\n### Running the debug Docker container\n\nTo run the debug Docker container, use the exact same commands as above, but replace `docker.io/headscale/headscale:x.x.x` with `docker.io/headscale/headscale:x.x.x-debug` (`x.x.x` is the version of headscale). The two containers are compatible with each other, so you can alternate between them.\n\n### Executing commands in the debug container\n\nThe default command in the debug container is to run `headscale`, which is located at `/ko-app/headscale` inside the container.\n\nAdditionally, the debug container includes a minimalist Busybox shell.\n\nTo launch a shell in the container, use:\n\n```shell\ndocker run -it docker.io/headscale/headscale:x.x.x-debug sh\n```\n\nYou can also execute commands directly, such as `ls /ko-app` in this example:\n\n```shell\ndocker run docker.io/headscale/headscale:x.x.x-debug ls /ko-app\n```\n\nUsing `docker exec -it` allows you to run commands in an existing container.\n"
  },
  {
    "path": "docs/setup/install/official.md",
    "content": "# Official releases\n\nOfficial releases for headscale are available as binaries for various platforms and DEB packages for Debian and Ubuntu.\nBoth are available on the [GitHub releases page](https://github.com/juanfont/headscale/releases).\n\n## Using packages for Debian/Ubuntu (recommended)\n\nIt is recommended to use our DEB packages to install headscale on a Debian based system as those packages configure a\nlocal user to run headscale, provide a default configuration and ship with a systemd service file. Supported\ndistributions are Ubuntu 22.04 or newer, Debian 12 or newer.\n\n1. Download the [latest headscale package](https://github.com/juanfont/headscale/releases/latest) for your platform (`.deb` for Ubuntu and Debian).\n\n    ```shell\n    HEADSCALE_VERSION=\"\" # See above URL for latest version, e.g. \"X.Y.Z\" (NOTE: do not add the \"v\" prefix!)\n    HEADSCALE_ARCH=\"\" # Your system architecture, e.g. \"amd64\"\n    wget --output-document=headscale.deb \\\n     \"https://github.com/juanfont/headscale/releases/download/v${HEADSCALE_VERSION}/headscale_${HEADSCALE_VERSION}_linux_${HEADSCALE_ARCH}.deb\"\n    ```\n\n1. Install headscale:\n\n    ```shell\n    sudo apt install ./headscale.deb\n    ```\n\n1. [Configure headscale by editing the configuration file](../../ref/configuration.md):\n\n    ```shell\n    sudo nano /etc/headscale/config.yaml\n    ```\n\n1. Enable and start the headscale service:\n\n    ```shell\n    sudo systemctl enable --now headscale\n    ```\n\n1. Verify that headscale is running as intended:\n\n    ```shell\n    sudo systemctl status headscale\n    ```\n\nContinue on the [getting started page](../../usage/getting-started.md) to register your first machine.\n\n## Using standalone binaries (advanced)\n\n!!! warning \"Advanced\"\n\n    This installation method is considered advanced as one needs to take care of the local user and the systemd\n    service themselves. If possible, use the [DEB packages](#using-packages-for-debianubuntu-recommended) or a\n    [community package](./community.md) instead.\n\nThis section describes the installation of headscale according to the [Requirements and\nassumptions](../requirements.md#assumptions). Headscale is run by a dedicated local user and the service itself is\nmanaged by systemd.\n\n1. Download the latest [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases):\n\n    ```shell\n    sudo wget --output-document=/usr/bin/headscale \\\n    https://github.com/juanfont/headscale/releases/download/v<HEADSCALE VERSION>/headscale_<HEADSCALE VERSION>_linux_<ARCH>\n    ```\n\n1. Make `headscale` executable:\n\n    ```shell\n    sudo chmod +x /usr/bin/headscale\n    ```\n\n1. Add a dedicated local user to run headscale:\n\n    ```shell\n    sudo useradd \\\n     --create-home \\\n     --home-dir /var/lib/headscale/ \\\n     --system \\\n     --user-group \\\n     --shell /usr/sbin/nologin \\\n     headscale\n    ```\n\n1. Download the example configuration for your chosen version and save it as: `/etc/headscale/config.yaml`. Adjust the\n   configuration to suit your local environment. See [Configuration](../../ref/configuration.md) for details.\n\n    ```shell\n    sudo mkdir -p /etc/headscale\n    sudo nano /etc/headscale/config.yaml\n    ```\n\n1. Copy [headscale's systemd service file](https://github.com/juanfont/headscale/blob/main/packaging/systemd/headscale.service)\n   to `/etc/systemd/system/headscale.service` and adjust it to suit your local setup. The following parameters likely need\n   to be modified: `ExecStart`, `WorkingDirectory`, `ReadWritePaths`.\n\n1. In `/etc/headscale/config.yaml`, override the default `headscale` unix socket with a path that is writable by the\n   `headscale` user or group:\n\n    ```yaml title=\"config.yaml\"\n    unix_socket: /var/run/headscale/headscale.sock\n    ```\n\n1. Reload systemd to load the new configuration file:\n\n    ```shell\n    systemctl daemon-reload\n    ```\n\n1. Enable and start the new headscale service:\n\n    ```shell\n    systemctl enable --now headscale\n    ```\n\n1. Verify that headscale is running as intended:\n\n    ```shell\n    systemctl status headscale\n    ```\n\nContinue on the [getting started page](../../usage/getting-started.md) to register your first machine.\n"
  },
  {
    "path": "docs/setup/install/source.md",
    "content": "# Build from source\n\n!!! warning \"Community documentation\"\n\n    This page is not actively maintained by the headscale authors and is\n    written by community members. It is _not_ verified by headscale developers.\n\n    **It might be outdated and it might miss necessary steps**.\n\nHeadscale can be built from source using the latest version of [Go](https://golang.org) and [Buf](https://buf.build)\n(Protobuf generator). See the [Contributing section in the GitHub\nREADME](https://github.com/juanfont/headscale#contributing) for more information.\n\n## OpenBSD\n\n### Install from source\n\n```shell\n# Install prerequisites\npkg_add go git\n\ngit clone https://github.com/juanfont/headscale.git\n\ncd headscale\n\n# optionally checkout a release\n# option a. you can find official release at https://github.com/juanfont/headscale/releases/latest\n# option b. get latest tag, this may be a beta release\nlatestTag=$(git describe --tags `git rev-list --tags --max-count=1`)\n\ngit checkout $latestTag\n\ngo build -ldflags=\"-s -w -X github.com/juanfont/headscale/hscontrol/types.Version=$latestTag\" -X github.com/juanfont/headscale/hscontrol/types.GitCommitHash=HASH\" github.com/juanfont/headscale\n\n# make it executable\nchmod a+x headscale\n\n# copy it to /usr/local/sbin\ncp headscale /usr/local/sbin\n```\n\n### Install from source via cross compile\n\n```shell\n# Install prerequisites\n# 1. go v1.20+: headscale newer than 0.21 needs go 1.20+ to compile\n# 2. gmake: Makefile in the headscale repo is written in GNU make syntax\n\ngit clone https://github.com/juanfont/headscale.git\n\ncd headscale\n\n# optionally checkout a release\n# option a. you can find official release at https://github.com/juanfont/headscale/releases/latest\n# option b. get latest tag, this may be a beta release\nlatestTag=$(git describe --tags `git rev-list --tags --max-count=1`)\n\ngit checkout $latestTag\n\nmake build GOOS=openbsd\n\n# copy headscale to openbsd machine and put it in /usr/local/sbin\n```\n"
  },
  {
    "path": "docs/setup/requirements.md",
    "content": "# Requirements\n\nHeadscale should just work as long as the following requirements are met:\n\n- A server with a public IP address for headscale. A dual-stack setup with a public IPv4 and a public IPv6 address is\n  recommended.\n- Headscale is served via HTTPS on port 443[^1] and [may use additional ports](#ports-in-use).\n- A reasonably modern Linux or BSD based operating system.\n- A dedicated local user account to run headscale.\n- A little bit of command line knowledge to configure and operate headscale.\n\n## Ports in use\n\nThe ports in use vary with the intended scenario and enabled features. Some of the listed ports may be changed via the\n[configuration file](../ref/configuration.md) but we recommend to stick with the default values.\n\n- tcp/80\n    - Expose publicly: yes\n    - HTTP, used by Let's Encrypt to verify ownership via the HTTP-01 challenge.\n    - Only required if the built-in Let's Enrypt client with the HTTP-01 challenge is used. See [TLS](../ref/tls.md) for\n      details.\n- tcp/443\n    - Expose publicly: yes\n    - HTTPS, required to make Headscale available to Tailscale clients[^1]\n    - Required if the [embedded DERP server](../ref/derp.md) is enabled\n- udp/3478\n    - Expose publicly: yes\n    - STUN, required if the [embedded DERP server](../ref/derp.md) is enabled\n- tcp/50443\n    - Expose publicly: yes\n    - Only required if the gRPC interface is used to [remote-control Headscale](../ref/api.md#grpc).\n- tcp/9090\n    - Expose publicly: no\n    - [Metrics and debug endpoint](../ref/debug.md#metrics-and-debug-endpoint)\n\n## Assumptions\n\nThe headscale documentation and the provided examples are written with a few assumptions in mind:\n\n- Headscale is running as system service via a dedicated local user `headscale`.\n- The [configuration](../ref/configuration.md) is loaded from `/etc/headscale/config.yaml`.\n- SQLite is used as database.\n- The data directory for headscale (used for private keys, ACLs, SQLite database, …) is located in `/var/lib/headscale`.\n- URLs and values that need to be replaced by the user are either denoted as `<VALUE_TO_CHANGE>` or use placeholder\n  values such as `headscale.example.com`.\n\nPlease adjust to your local environment accordingly.\n\n[^1]: The Tailscale client assumes HTTPS on port 443 in certain situations. Serving headscale either via HTTP or via\n    HTTPS on a port other than 443 is possible but sticking with HTTPS on port 443 is strongly recommended for\n    production setups. See [issue 2164](https://github.com/juanfont/headscale/issues/2164) for more information.\n"
  },
  {
    "path": "docs/setup/upgrade.md",
    "content": "# Upgrade an existing installation\n\n!!! tip \"Required update path\"\n\n    Its required to update from one stable version to the next (e.g. 0.26.0 → 0.27.1 → 0.28.0) without skipping minor\n    versions in between. You should always pick the latest available patch release.\n\nUpdate an existing Headscale installation to a new version:\n\n- Read the announcement on the [GitHub releases](https://github.com/juanfont/headscale/releases) page for the new\n  version. It lists the changes of the release along with possible breaking changes and version-specific upgrade\n  instructions.\n- Stop Headscale\n- **[Create a backup of your installation](#backup)**\n- Update Headscale to the new version, preferably by following the same installation method.\n- Compare and update the [configuration](../ref/configuration.md) file.\n- Start Headscale\n\n## Backup\n\nHeadscale applies database migrations during upgrades and we highly recommend to create a backup of your database before\nupgrading. A full backup of Headscale depends on your individual setup, but below are some typical setup scenarios.\n\n=== \"Standard installation\"\n\n    A installation that follows our [official releases](install/official.md) setup guide uses the following paths:\n\n    - [Configuration file](../ref/configuration.md): `/etc/headscale/config.yaml`\n    - Data directory: `/var/lib/headscale`\n    - SQLite as database: `/var/lib/headscale/db.sqlite`\n\n    ```console\n    TIMESTAMP=$(date +%Y%m%d%H%M%S)\n    cp -aR /etc/headscale /etc/headscale.backup-$TIMESTAMP\n    cp -aR /var/lib/headscale /var/lib/headscale.backup-$TIMESTAMP\n    ```\n\n=== \"Container\"\n\n    A installation that follows our [container](install/container.md) setup guide uses a single source volume directory\n    that contains the configuration file, data directory and the SQLite database.\n\n    ```console\n    cp -aR /path/to/headscale /path/to/headscale.backup-$(date +%Y%m%d%H%M%S)\n    ```\n\n=== \"PostgreSQL\"\n\n    Please follow PostgreSQL's [Backup and Restore](https://www.postgresql.org/docs/current/backup.html) documentation\n    to create a backup of your PostgreSQL database.\n"
  },
  {
    "path": "docs/usage/connect/android.md",
    "content": "# Connecting an Android client\n\nThis documentation has the goal of showing how a user can use the official Android [Tailscale](https://tailscale.com) client with headscale.\n\n## Installation\n\nInstall the official Tailscale Android client from the [Google Play Store](https://play.google.com/store/apps/details?id=com.tailscale.ipn) or [F-Droid](https://f-droid.org/packages/com.tailscale.ipn/).\n\n## Connect via web authentication\n\n- Open the app and select the settings menu in the upper-right corner\n- Tap on `Accounts`\n- In the kebab menu icon (three dots) in the upper-right corner select `Use an alternate server`\n- Enter your server URL (e.g `https://headscale.example.com`) and follow the instructions\n- The client connects automatically as soon as the node registration is complete on headscale. Until then, nothing is\n  visible in the server logs.\n\n## Connect using a pre authenticated key\n\n- Open the app and select the settings menu in the upper-right corner\n- Tap on `Accounts`\n- In the kebab menu icon (three dots) in the upper-right corner select `Use an alternate server`\n- Enter your server URL (e.g `https://headscale.example.com`). If login prompts open, close it and continue\n- Open the settings menu in the upper-right corner\n- Tap on `Accounts`\n- In the kebab menu icon (three dots) in the upper-right corner select `Use an auth key`\n- Enter your [preauthkey generated from headscale](../../ref/registration.md#pre-authenticated-key)\n- If needed, tap `Log in` on the main screen. You should now be connected to your headscale.\n"
  },
  {
    "path": "docs/usage/connect/apple.md",
    "content": "# Connecting an Apple client\n\nThis documentation has the goal of showing how a user can use the official iOS and macOS [Tailscale](https://tailscale.com) clients with headscale.\n\n!!! info \"Instructions on your headscale instance\"\n\n    An endpoint with information on how to connect your Apple device\n    is also available at `/apple` on your running instance.\n\n## iOS\n\n### Installation\n\nInstall the official Tailscale iOS client from the [App Store](https://apps.apple.com/app/tailscale/id1470499037).\n\n### Configuring the headscale URL\n\n- Open the Tailscale app\n- Click the account icon in the top-right corner and select `Log in…`.\n- Tap the top-right options menu button and select `Use custom coordination server`.\n- Enter your instance url (e.g `https://headscale.example.com`)\n- Enter your credentials and log in. Headscale should now be working on your iOS device.\n\n## macOS\n\n### Installation\n\nChoose one of the available [Tailscale clients for macOS](https://tailscale.com/kb/1065/macos-variants) and install it.\n\n### Configuring the headscale URL\n\n#### Command line\n\nUse Tailscale's login command to connect with your headscale instance (e.g `https://headscale.example.com`):\n\n```\ntailscale login --login-server <YOUR_HEADSCALE_URL>\n```\n\n#### GUI\n\n- Option + Click the Tailscale icon in the menu and hover over the Debug menu\n- Under `Custom Login Server`, select `Add Account...`\n- Enter the URL of your headscale instance (e.g `https://headscale.example.com`) and press `Add Account`\n- Follow the login procedure in the browser\n\n## tvOS\n\n### Installation\n\nInstall the official Tailscale tvOS client from the [App Store](https://apps.apple.com/app/tailscale/id1470499037).\n\n!!! danger\n\n    **Don't** open the Tailscale App after installation!\n\n### Configuring the headscale URL\n\n- Open Settings (the Apple tvOS settings) > Apps > Tailscale\n- Under `ALTERNATE COORDINATION SERVER URL`, select `URL`\n- Enter the URL of your headscale instance (e.g `https://headscale.example.com`) and press `OK`\n- Return to the tvOS Home screen\n- Open Tailscale\n- Click the button `Install VPN configuration` and confirm the appearing popup by clicking the `Allow` button\n- Scan the QR code and follow the login procedure\n"
  },
  {
    "path": "docs/usage/connect/windows.md",
    "content": "# Connecting a Windows client\n\nThis documentation has the goal of showing how a user can use the official Windows [Tailscale](https://tailscale.com) client with headscale.\n\n!!! info \"Instructions on your headscale instance\"\n\n    An endpoint with information on how to connect your Windows device\n    is also available at `/windows` on your running instance.\n\n## Installation\n\nDownload the [Official Windows Client](https://tailscale.com/download/windows) and install it.\n\n## Configuring the headscale URL\n\nOpen a Command Prompt or Powershell and use Tailscale's login command to connect with your headscale instance (e.g\n`https://headscale.example.com`):\n\n```\ntailscale login --login-server <YOUR_HEADSCALE_URL>\n```\n\nFollow the instructions in the opened browser window to finish the configuration.\n\n## Troubleshooting\n\n### Unattended mode\n\nBy default, Tailscale's Windows client is only running when the user is logged in. If you want to keep Tailscale running\nall the time, please enable \"Unattended mode\":\n\n- Click on the Tailscale tray icon and select `Preferences`\n- Enable `Run unattended`\n- Confirm the \"Unattended mode\" message\n\nSee also [Keep Tailscale running when I'm not logged in to my computer](https://tailscale.com/kb/1088/run-unattended)\n\n### Failing node registration\n\nIf you are seeing repeated messages like:\n\n```\n[GIN] 2022/02/10 - 16:39:34 | 200 |    1.105306ms |       127.0.0.1 | POST     \"/machine/redacted\"\n```\n\nin your headscale output, turn on `DEBUG` logging and look for:\n\n```\n2022-02-11T00:59:29Z DBG Machine registration has expired. Sending a authurl to register machine=redacted\n```\n\nThis typically means that the registry keys above was not set appropriately.\n\nTo reset and try again, it is important to do the following:\n\n1. Shut down the Tailscale service (or the client running in the tray)\n1. Delete Tailscale Application data folder, located at `C:\\Users\\<USERNAME>\\AppData\\Local\\Tailscale` and try to connect again.\n1. Ensure the Windows node is deleted from headscale (to ensure fresh setup)\n1. Start Tailscale on the Windows machine and retry the login.\n"
  },
  {
    "path": "docs/usage/getting-started.md",
    "content": "# Getting started\n\nThis page helps you get started with headscale and provides a few usage examples for the headscale command line tool\n`headscale`.\n\n!!! note \"Prerequisites\"\n\n    - Headscale is installed and running as system service. Read the [setup section](../setup/requirements.md) for\n      installation instructions.\n    - The configuration file exists and is adjusted to suit your environment, see\n      [Configuration](../ref/configuration.md) for details.\n    - Headscale is reachable from the Internet. Verify this by visiting the health endpoint:\n      https://headscale.example.com/health\n    - The Tailscale client is installed, see [Client and operating system support](../about/clients.md) for more\n      information.\n\n## Getting help\n\nThe `headscale` command line tool provides built-in help. To show available commands along with their arguments and\noptions, run:\n\n=== \"Native\"\n\n    ```shell\n    # Show help\n    headscale help\n\n    # Show help for a specific command\n    headscale <COMMAND> --help\n    ```\n\n=== \"Container\"\n\n    ```shell\n    # Show help\n    docker exec -it headscale \\\n      headscale help\n\n    # Show help for a specific command\n    docker exec -it headscale \\\n      headscale <COMMAND> --help\n    ```\n\n!!! note \"Manage headscale from another local user\"\n\n    By default only the user `headscale` or `root` will have the necessary permissions to access the unix socket\n    (`/var/run/headscale/headscale.sock`) that is used to communicate with the service. In order to be able to\n    communicate with the headscale service you have to make sure the unix socket is accessible by the user that runs\n    the commands. In general you can achieve this by any of the following methods:\n\n    - using `sudo`\n    - run the commands as user `headscale`\n    - add your user to the `headscale` group\n\n    To verify you can run the following command using your preferred method:\n\n    ```shell\n    headscale users list\n    ```\n\n## Manage headscale users\n\nIn headscale, a node (also known as machine or device) is [typically assigned to a headscale\nuser](../ref/registration.md#identity-model). Such a headscale user may have many nodes assigned to them and can be\nmanaged with the `headscale users` command. Invoke the built-in help for more information: `headscale users --help`.\n\n### Create a headscale user\n\n=== \"Native\"\n\n    ```shell\n    headscale users create <USER>\n    ```\n\n=== \"Container\"\n\n    ```shell\n    docker exec -it headscale \\\n      headscale users create <USER>\n    ```\n\n### List existing headscale users\n\n=== \"Native\"\n\n    ```shell\n    headscale users list\n    ```\n\n=== \"Container\"\n\n    ```shell\n    docker exec -it headscale \\\n      headscale users list\n    ```\n\n## Register a node\n\nOne has to [register a node](../ref/registration.md) first to use headscale as coordination server with Tailscale. The\nfollowing examples work for the Tailscale client on Linux/BSD operating systems. Alternatively, follow the instructions\nto connect [Android](connect/android.md), [Apple](connect/apple.md) or [Windows](connect/windows.md) devices. Read\n[registration methods](../ref/registration.md) for an overview of available registration methods.\n\n### [Web authentication](../ref/registration.md#web-authentication)\n\nOn a client machine, run the `tailscale up` command and provide the FQDN of your headscale instance as argument:\n\n```shell\ntailscale up --login-server <YOUR_HEADSCALE_URL>\n```\n\nUsually, a browser window with further instructions is opened. This page explains how to complete the registration on\nyour headscale server and it also prints the Auth ID required to approve the node:\n\n=== \"Native\"\n\n    ```shell\n    headscale auth register --user <USER> --auth-id <AUTH_ID>\n    ```\n\n=== \"Container\"\n\n    ```shell\n    docker exec -it headscale \\\n      headscale auth register --user <USER> --auth-id <AUTH_ID>\n    ```\n\n### [Pre authenticated key](../ref/registration.md#pre-authenticated-key)\n\nIt is also possible to generate a preauthkey and register a node non-interactively. First, generate a preauthkey on the\nheadscale instance. By default, the key is valid for one hour and can only be used once (see `headscale preauthkeys --help` for other options):\n\n=== \"Native\"\n\n    ```shell\n    headscale preauthkeys create --user <USER_ID>\n    ```\n\n=== \"Container\"\n\n    ```shell\n    docker exec -it headscale \\\n      headscale preauthkeys create --user <USER_ID>\n    ```\n\nThe command returns the preauthkey on success which is used to connect a node to the headscale instance via the\n`tailscale up` command:\n\n```shell\ntailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>\n```\n"
  },
  {
    "path": "flake.nix",
    "content": "{\n  description = \"headscale - Open Source Tailscale Control server\";\n\n  inputs = {\n    nixpkgs.url = \"github:NixOS/nixpkgs/nixpkgs-unstable\";\n    flake-utils.url = \"github:numtide/flake-utils\";\n  };\n\n  outputs =\n    { self\n    , nixpkgs\n    , flake-utils\n    , ...\n    }:\n    let\n      headscaleVersion = self.shortRev or self.dirtyShortRev;\n      commitHash = self.rev or self.dirtyRev;\n    in\n    {\n      # NixOS module\n      nixosModules = rec {\n        headscale = import ./nix/module.nix;\n        default = headscale;\n      };\n\n      overlays.default = _: prev:\n        let\n          pkgs = nixpkgs.legacyPackages.${prev.stdenv.hostPlatform.system};\n          buildGo = pkgs.buildGo126Module;\n          vendorHash = \"sha256-jom1279Lx2Knff93rfoEgGeBBk+EjJO7GAkaQYlchgY=\";\n        in\n        {\n          headscale = buildGo {\n            pname = \"headscale\";\n            version = headscaleVersion;\n            src = pkgs.lib.cleanSource self;\n\n            # Only run unit tests when testing a build\n            checkFlags = [ \"-short\" ];\n\n            # When updating go.mod or go.sum, a new sha will need to be calculated,\n            # update this if you have a mismatch after doing a change to those files.\n            inherit vendorHash;\n\n            subPackages = [ \"cmd/headscale\" ];\n\n            meta = {\n              mainProgram = \"headscale\";\n            };\n          };\n\n          hi = buildGo {\n            pname = \"hi\";\n            version = headscaleVersion;\n            src = pkgs.lib.cleanSource self;\n\n            checkFlags = [ \"-short\" ];\n            inherit vendorHash;\n\n            subPackages = [ \"cmd/hi\" ];\n          };\n\n          protoc-gen-grpc-gateway = buildGo rec {\n            pname = \"grpc-gateway\";\n            version = \"2.27.7\";\n\n            src = pkgs.fetchFromGitHub {\n              owner = \"grpc-ecosystem\";\n              repo = \"grpc-gateway\";\n              rev = \"v${version}\";\n              sha256 = \"sha256-6R0EhNnOBEISJddjkbVTcBvUuU5U3r9Hu2UPfAZDep4=\";\n            };\n\n            vendorHash = \"sha256-SOAbRrzMf2rbKaG9PGSnPSLY/qZVgbHcNjOLmVonycY=\";\n\n            nativeBuildInputs = [ pkgs.installShellFiles ];\n\n            subPackages = [ \"protoc-gen-grpc-gateway\" \"protoc-gen-openapiv2\" ];\n          };\n\n          protobuf-language-server = buildGo rec {\n            pname = \"protobuf-language-server\";\n            version = \"1cf777d\";\n\n            src = pkgs.fetchFromGitHub {\n              owner = \"lasorda\";\n              repo = \"protobuf-language-server\";\n              rev = \"1cf777de4d35a6e493a689e3ca1a6183ce3206b6\";\n              sha256 = \"sha256-9MkBQPxr/TDr/sNz/Sk7eoZwZwzdVbE5u6RugXXk5iY=\";\n            };\n\n            vendorHash = \"sha256-4nTpKBe7ekJsfQf+P6edT/9Vp2SBYbKz1ITawD3bhkI=\";\n\n            subPackages = [ \".\" ];\n          };\n\n          # Build golangci-lint with Go 1.26 (upstream uses hardcoded Go version)\n          golangci-lint = buildGo rec {\n            pname = \"golangci-lint\";\n            version = \"2.9.0\";\n\n            src = pkgs.fetchFromGitHub {\n              owner = \"golangci\";\n              repo = \"golangci-lint\";\n              rev = \"v${version}\";\n              hash = \"sha256-8LEtm1v0slKwdLBtS41OilKJLXytSxcI9fUlZbj5Gfw=\";\n            };\n\n            vendorHash = \"sha256-w8JfF6n1ylrU652HEv/cYdsOdDZz9J2uRQDqxObyhkY=\";\n\n            subPackages = [ \"cmd/golangci-lint\" ];\n\n            nativeBuildInputs = [ pkgs.installShellFiles ];\n\n            ldflags = [\n              \"-s\"\n              \"-w\"\n              \"-X main.version=${version}\"\n              \"-X main.commit=v${version}\"\n              \"-X main.date=1970-01-01T00:00:00Z\"\n            ];\n\n            postInstall = ''\n              for shell in bash zsh fish; do\n                HOME=$TMPDIR $out/bin/golangci-lint completion $shell > golangci-lint.$shell\n                installShellCompletion golangci-lint.$shell\n              done\n            '';\n\n            meta = {\n              description = \"Fast linters runner for Go\";\n              homepage = \"https://golangci-lint.run/\";\n              changelog = \"https://github.com/golangci/golangci-lint/blob/v${version}/CHANGELOG.md\";\n              mainProgram = \"golangci-lint\";\n            };\n          };\n\n          gotestsum = prev.gotestsum.override {\n            buildGoModule = buildGo;\n          };\n\n          gotests = prev.gotests.override {\n            buildGoModule = buildGo;\n          };\n\n          gofumpt = prev.gofumpt.override {\n            buildGoModule = buildGo;\n          };\n\n          gopls = prev.gopls.override {\n            buildGoLatestModule = buildGo;\n          };\n        };\n    }\n    // flake-utils.lib.eachDefaultSystem\n      (system:\n      let\n        pkgs = import nixpkgs {\n          overlays = [ self.overlays.default ];\n          inherit system;\n        };\n        buildDeps = with pkgs; [ git go_1_26 gnumake ];\n        devDeps = with pkgs;\n          buildDeps\n          ++ [\n            golangci-lint\n            golangci-lint-langserver\n            golines\n            nodePackages.prettier\n            nixpkgs-fmt\n            goreleaser\n            nfpm\n            gotestsum\n            gotests\n            gofumpt\n            gopls\n            ksh\n            ko\n            yq-go\n            ripgrep\n            postgresql\n            python314Packages.mdformat\n            python314Packages.mdformat-footnote\n            python314Packages.mdformat-frontmatter\n            python314Packages.mdformat-mkdocs\n            prek\n\n            # 'dot' is needed for pprof graphs\n            # go tool pprof -http=: <source>\n            graphviz\n\n            # Protobuf dependencies\n            protobuf\n            protoc-gen-go\n            protoc-gen-go-grpc\n            protoc-gen-grpc-gateway\n            buf\n            clang-tools # clang-format\n            protobuf-language-server\n          ]\n          ++ lib.optional pkgs.stdenv.isLinux [ traceroute ];\n\n        # Add entry to build a docker image with headscale\n        # caveat: only works on Linux\n        #\n        # Usage:\n        # nix build .#headscale-docker\n        # docker load < result\n        headscale-docker = pkgs.dockerTools.buildLayeredImage {\n          name = \"headscale\";\n          tag = headscaleVersion;\n          contents = [ pkgs.headscale ];\n          config.Entrypoint = [ (pkgs.headscale + \"/bin/headscale\") ];\n        };\n      in\n      {\n        # `nix develop`\n        devShells.default = pkgs.mkShell {\n          buildInputs =\n            devDeps\n            ++ [\n              (pkgs.writeShellScriptBin\n                \"nix-vendor-sri\"\n                ''\n                  set -eu\n\n                  OUT=$(mktemp -d -t nar-hash-XXXXXX)\n                  rm -rf \"$OUT\"\n\n                  go mod vendor -o \"$OUT\"\n                  go run tailscale.com/cmd/nardump --sri \"$OUT\"\n                  rm -rf \"$OUT\"\n                '')\n\n              (pkgs.writeShellScriptBin\n                \"go-mod-update-all\"\n                ''\n                  cat go.mod | ${pkgs.silver-searcher}/bin/ag \"\\t\" | ${pkgs.silver-searcher}/bin/ag -v indirect | ${pkgs.gawk}/bin/awk '{print $1}' | ${pkgs.findutils}/bin/xargs go get -u\n                  go mod tidy\n                '')\n            ];\n\n          shellHook = ''\n            export PATH=\"$PWD/result/bin:$PATH\"\n            export CGO_ENABLED=0\n          '';\n        };\n\n        # `nix build`\n        packages = with pkgs; {\n          inherit headscale;\n          inherit headscale-docker;\n          default = headscale;\n        };\n\n        # `nix run`\n        apps.headscale = flake-utils.lib.mkApp {\n          drv = pkgs.headscale;\n        };\n        apps.default = flake-utils.lib.mkApp {\n          drv = pkgs.headscale;\n        };\n\n        checks = {\n          headscale = pkgs.testers.nixosTest (import ./nix/tests/headscale.nix);\n        };\n      });\n}\n"
  },
  {
    "path": "gen/go/headscale/v1/apikey.pb.go",
    "content": "// Code generated by protoc-gen-go. DO NOT EDIT.\n// versions:\n// \tprotoc-gen-go v1.36.11\n// \tprotoc        (unknown)\n// source: headscale/v1/apikey.proto\n\npackage v1\n\nimport (\n\tprotoreflect \"google.golang.org/protobuf/reflect/protoreflect\"\n\tprotoimpl \"google.golang.org/protobuf/runtime/protoimpl\"\n\ttimestamppb \"google.golang.org/protobuf/types/known/timestamppb\"\n\treflect \"reflect\"\n\tsync \"sync\"\n\tunsafe \"unsafe\"\n)\n\nconst (\n\t// Verify that this generated code is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)\n\t// Verify that runtime/protoimpl is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)\n)\n\ntype ApiKey struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tId            uint64                 `protobuf:\"varint,1,opt,name=id,proto3\" json:\"id,omitempty\"`\n\tPrefix        string                 `protobuf:\"bytes,2,opt,name=prefix,proto3\" json:\"prefix,omitempty\"`\n\tExpiration    *timestamppb.Timestamp `protobuf:\"bytes,3,opt,name=expiration,proto3\" json:\"expiration,omitempty\"`\n\tCreatedAt     *timestamppb.Timestamp `protobuf:\"bytes,4,opt,name=created_at,json=createdAt,proto3\" json:\"created_at,omitempty\"`\n\tLastSeen      *timestamppb.Timestamp `protobuf:\"bytes,5,opt,name=last_seen,json=lastSeen,proto3\" json:\"last_seen,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *ApiKey) Reset() {\n\t*x = ApiKey{}\n\tmi := &file_headscale_v1_apikey_proto_msgTypes[0]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *ApiKey) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*ApiKey) ProtoMessage() {}\n\nfunc (x *ApiKey) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_apikey_proto_msgTypes[0]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use ApiKey.ProtoReflect.Descriptor instead.\nfunc (*ApiKey) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_apikey_proto_rawDescGZIP(), []int{0}\n}\n\nfunc (x *ApiKey) GetId() uint64 {\n\tif x != nil {\n\t\treturn x.Id\n\t}\n\treturn 0\n}\n\nfunc (x *ApiKey) GetPrefix() string {\n\tif x != nil {\n\t\treturn x.Prefix\n\t}\n\treturn \"\"\n}\n\nfunc (x *ApiKey) GetExpiration() *timestamppb.Timestamp {\n\tif x != nil {\n\t\treturn x.Expiration\n\t}\n\treturn nil\n}\n\nfunc (x *ApiKey) GetCreatedAt() *timestamppb.Timestamp {\n\tif x != nil {\n\t\treturn x.CreatedAt\n\t}\n\treturn nil\n}\n\nfunc (x *ApiKey) GetLastSeen() *timestamppb.Timestamp {\n\tif x != nil {\n\t\treturn x.LastSeen\n\t}\n\treturn nil\n}\n\ntype CreateApiKeyRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tExpiration    *timestamppb.Timestamp `protobuf:\"bytes,1,opt,name=expiration,proto3\" json:\"expiration,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *CreateApiKeyRequest) Reset() {\n\t*x = CreateApiKeyRequest{}\n\tmi := &file_headscale_v1_apikey_proto_msgTypes[1]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *CreateApiKeyRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*CreateApiKeyRequest) ProtoMessage() {}\n\nfunc (x *CreateApiKeyRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_apikey_proto_msgTypes[1]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use CreateApiKeyRequest.ProtoReflect.Descriptor instead.\nfunc (*CreateApiKeyRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_apikey_proto_rawDescGZIP(), []int{1}\n}\n\nfunc (x *CreateApiKeyRequest) GetExpiration() *timestamppb.Timestamp {\n\tif x != nil {\n\t\treturn x.Expiration\n\t}\n\treturn nil\n}\n\ntype CreateApiKeyResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tApiKey        string                 `protobuf:\"bytes,1,opt,name=api_key,json=apiKey,proto3\" json:\"api_key,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *CreateApiKeyResponse) Reset() {\n\t*x = CreateApiKeyResponse{}\n\tmi := &file_headscale_v1_apikey_proto_msgTypes[2]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *CreateApiKeyResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*CreateApiKeyResponse) ProtoMessage() {}\n\nfunc (x *CreateApiKeyResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_apikey_proto_msgTypes[2]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use CreateApiKeyResponse.ProtoReflect.Descriptor instead.\nfunc (*CreateApiKeyResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_apikey_proto_rawDescGZIP(), []int{2}\n}\n\nfunc (x *CreateApiKeyResponse) GetApiKey() string {\n\tif x != nil {\n\t\treturn x.ApiKey\n\t}\n\treturn \"\"\n}\n\ntype ExpireApiKeyRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tPrefix        string                 `protobuf:\"bytes,1,opt,name=prefix,proto3\" json:\"prefix,omitempty\"`\n\tId            uint64                 `protobuf:\"varint,2,opt,name=id,proto3\" json:\"id,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *ExpireApiKeyRequest) Reset() {\n\t*x = ExpireApiKeyRequest{}\n\tmi := &file_headscale_v1_apikey_proto_msgTypes[3]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *ExpireApiKeyRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*ExpireApiKeyRequest) ProtoMessage() {}\n\nfunc (x *ExpireApiKeyRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_apikey_proto_msgTypes[3]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use ExpireApiKeyRequest.ProtoReflect.Descriptor instead.\nfunc (*ExpireApiKeyRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_apikey_proto_rawDescGZIP(), []int{3}\n}\n\nfunc (x *ExpireApiKeyRequest) GetPrefix() string {\n\tif x != nil {\n\t\treturn x.Prefix\n\t}\n\treturn \"\"\n}\n\nfunc (x *ExpireApiKeyRequest) GetId() uint64 {\n\tif x != nil {\n\t\treturn x.Id\n\t}\n\treturn 0\n}\n\ntype ExpireApiKeyResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *ExpireApiKeyResponse) Reset() {\n\t*x = ExpireApiKeyResponse{}\n\tmi := &file_headscale_v1_apikey_proto_msgTypes[4]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *ExpireApiKeyResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*ExpireApiKeyResponse) ProtoMessage() {}\n\nfunc (x *ExpireApiKeyResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_apikey_proto_msgTypes[4]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use ExpireApiKeyResponse.ProtoReflect.Descriptor instead.\nfunc (*ExpireApiKeyResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_apikey_proto_rawDescGZIP(), []int{4}\n}\n\ntype ListApiKeysRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *ListApiKeysRequest) Reset() {\n\t*x = ListApiKeysRequest{}\n\tmi := &file_headscale_v1_apikey_proto_msgTypes[5]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *ListApiKeysRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*ListApiKeysRequest) ProtoMessage() {}\n\nfunc (x *ListApiKeysRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_apikey_proto_msgTypes[5]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use ListApiKeysRequest.ProtoReflect.Descriptor instead.\nfunc (*ListApiKeysRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_apikey_proto_rawDescGZIP(), []int{5}\n}\n\ntype ListApiKeysResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tApiKeys       []*ApiKey              `protobuf:\"bytes,1,rep,name=api_keys,json=apiKeys,proto3\" json:\"api_keys,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *ListApiKeysResponse) Reset() {\n\t*x = ListApiKeysResponse{}\n\tmi := &file_headscale_v1_apikey_proto_msgTypes[6]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *ListApiKeysResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*ListApiKeysResponse) ProtoMessage() {}\n\nfunc (x *ListApiKeysResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_apikey_proto_msgTypes[6]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use ListApiKeysResponse.ProtoReflect.Descriptor instead.\nfunc (*ListApiKeysResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_apikey_proto_rawDescGZIP(), []int{6}\n}\n\nfunc (x *ListApiKeysResponse) GetApiKeys() []*ApiKey {\n\tif x != nil {\n\t\treturn x.ApiKeys\n\t}\n\treturn nil\n}\n\ntype DeleteApiKeyRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tPrefix        string                 `protobuf:\"bytes,1,opt,name=prefix,proto3\" json:\"prefix,omitempty\"`\n\tId            uint64                 `protobuf:\"varint,2,opt,name=id,proto3\" json:\"id,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *DeleteApiKeyRequest) Reset() {\n\t*x = DeleteApiKeyRequest{}\n\tmi := &file_headscale_v1_apikey_proto_msgTypes[7]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *DeleteApiKeyRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*DeleteApiKeyRequest) ProtoMessage() {}\n\nfunc (x *DeleteApiKeyRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_apikey_proto_msgTypes[7]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use DeleteApiKeyRequest.ProtoReflect.Descriptor instead.\nfunc (*DeleteApiKeyRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_apikey_proto_rawDescGZIP(), []int{7}\n}\n\nfunc (x *DeleteApiKeyRequest) GetPrefix() string {\n\tif x != nil {\n\t\treturn x.Prefix\n\t}\n\treturn \"\"\n}\n\nfunc (x *DeleteApiKeyRequest) GetId() uint64 {\n\tif x != nil {\n\t\treturn x.Id\n\t}\n\treturn 0\n}\n\ntype DeleteApiKeyResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *DeleteApiKeyResponse) Reset() {\n\t*x = DeleteApiKeyResponse{}\n\tmi := &file_headscale_v1_apikey_proto_msgTypes[8]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *DeleteApiKeyResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*DeleteApiKeyResponse) ProtoMessage() {}\n\nfunc (x *DeleteApiKeyResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_apikey_proto_msgTypes[8]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use DeleteApiKeyResponse.ProtoReflect.Descriptor instead.\nfunc (*DeleteApiKeyResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_apikey_proto_rawDescGZIP(), []int{8}\n}\n\nvar File_headscale_v1_apikey_proto protoreflect.FileDescriptor\n\nconst file_headscale_v1_apikey_proto_rawDesc = \"\" +\n\t\"\\n\" +\n\t\"\\x19headscale/v1/apikey.proto\\x12\\fheadscale.v1\\x1a\\x1fgoogle/protobuf/timestamp.proto\\\"\\xe0\\x01\\n\" +\n\t\"\\x06ApiKey\\x12\\x0e\\n\" +\n\t\"\\x02id\\x18\\x01 \\x01(\\x04R\\x02id\\x12\\x16\\n\" +\n\t\"\\x06prefix\\x18\\x02 \\x01(\\tR\\x06prefix\\x12:\\n\" +\n\t\"\\n\" +\n\t\"expiration\\x18\\x03 \\x01(\\v2\\x1a.google.protobuf.TimestampR\\n\" +\n\t\"expiration\\x129\\n\" +\n\t\"\\n\" +\n\t\"created_at\\x18\\x04 \\x01(\\v2\\x1a.google.protobuf.TimestampR\\tcreatedAt\\x127\\n\" +\n\t\"\\tlast_seen\\x18\\x05 \\x01(\\v2\\x1a.google.protobuf.TimestampR\\blastSeen\\\"Q\\n\" +\n\t\"\\x13CreateApiKeyRequest\\x12:\\n\" +\n\t\"\\n\" +\n\t\"expiration\\x18\\x01 \\x01(\\v2\\x1a.google.protobuf.TimestampR\\n\" +\n\t\"expiration\\\"/\\n\" +\n\t\"\\x14CreateApiKeyResponse\\x12\\x17\\n\" +\n\t\"\\aapi_key\\x18\\x01 \\x01(\\tR\\x06apiKey\\\"=\\n\" +\n\t\"\\x13ExpireApiKeyRequest\\x12\\x16\\n\" +\n\t\"\\x06prefix\\x18\\x01 \\x01(\\tR\\x06prefix\\x12\\x0e\\n\" +\n\t\"\\x02id\\x18\\x02 \\x01(\\x04R\\x02id\\\"\\x16\\n\" +\n\t\"\\x14ExpireApiKeyResponse\\\"\\x14\\n\" +\n\t\"\\x12ListApiKeysRequest\\\"F\\n\" +\n\t\"\\x13ListApiKeysResponse\\x12/\\n\" +\n\t\"\\bapi_keys\\x18\\x01 \\x03(\\v2\\x14.headscale.v1.ApiKeyR\\aapiKeys\\\"=\\n\" +\n\t\"\\x13DeleteApiKeyRequest\\x12\\x16\\n\" +\n\t\"\\x06prefix\\x18\\x01 \\x01(\\tR\\x06prefix\\x12\\x0e\\n\" +\n\t\"\\x02id\\x18\\x02 \\x01(\\x04R\\x02id\\\"\\x16\\n\" +\n\t\"\\x14DeleteApiKeyResponseB)Z'github.com/juanfont/headscale/gen/go/v1b\\x06proto3\"\n\nvar (\n\tfile_headscale_v1_apikey_proto_rawDescOnce sync.Once\n\tfile_headscale_v1_apikey_proto_rawDescData []byte\n)\n\nfunc file_headscale_v1_apikey_proto_rawDescGZIP() []byte {\n\tfile_headscale_v1_apikey_proto_rawDescOnce.Do(func() {\n\t\tfile_headscale_v1_apikey_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_apikey_proto_rawDesc), len(file_headscale_v1_apikey_proto_rawDesc)))\n\t})\n\treturn file_headscale_v1_apikey_proto_rawDescData\n}\n\nvar file_headscale_v1_apikey_proto_msgTypes = make([]protoimpl.MessageInfo, 9)\nvar file_headscale_v1_apikey_proto_goTypes = []any{\n\t(*ApiKey)(nil),                // 0: headscale.v1.ApiKey\n\t(*CreateApiKeyRequest)(nil),   // 1: headscale.v1.CreateApiKeyRequest\n\t(*CreateApiKeyResponse)(nil),  // 2: headscale.v1.CreateApiKeyResponse\n\t(*ExpireApiKeyRequest)(nil),   // 3: headscale.v1.ExpireApiKeyRequest\n\t(*ExpireApiKeyResponse)(nil),  // 4: headscale.v1.ExpireApiKeyResponse\n\t(*ListApiKeysRequest)(nil),    // 5: headscale.v1.ListApiKeysRequest\n\t(*ListApiKeysResponse)(nil),   // 6: headscale.v1.ListApiKeysResponse\n\t(*DeleteApiKeyRequest)(nil),   // 7: headscale.v1.DeleteApiKeyRequest\n\t(*DeleteApiKeyResponse)(nil),  // 8: headscale.v1.DeleteApiKeyResponse\n\t(*timestamppb.Timestamp)(nil), // 9: google.protobuf.Timestamp\n}\nvar file_headscale_v1_apikey_proto_depIdxs = []int32{\n\t9, // 0: headscale.v1.ApiKey.expiration:type_name -> google.protobuf.Timestamp\n\t9, // 1: headscale.v1.ApiKey.created_at:type_name -> google.protobuf.Timestamp\n\t9, // 2: headscale.v1.ApiKey.last_seen:type_name -> google.protobuf.Timestamp\n\t9, // 3: headscale.v1.CreateApiKeyRequest.expiration:type_name -> google.protobuf.Timestamp\n\t0, // 4: headscale.v1.ListApiKeysResponse.api_keys:type_name -> headscale.v1.ApiKey\n\t5, // [5:5] is the sub-list for method output_type\n\t5, // [5:5] is the sub-list for method input_type\n\t5, // [5:5] is the sub-list for extension type_name\n\t5, // [5:5] is the sub-list for extension extendee\n\t0, // [0:5] is the sub-list for field type_name\n}\n\nfunc init() { file_headscale_v1_apikey_proto_init() }\nfunc file_headscale_v1_apikey_proto_init() {\n\tif File_headscale_v1_apikey_proto != nil {\n\t\treturn\n\t}\n\ttype x struct{}\n\tout := protoimpl.TypeBuilder{\n\t\tFile: protoimpl.DescBuilder{\n\t\t\tGoPackagePath: reflect.TypeOf(x{}).PkgPath(),\n\t\t\tRawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_apikey_proto_rawDesc), len(file_headscale_v1_apikey_proto_rawDesc)),\n\t\t\tNumEnums:      0,\n\t\t\tNumMessages:   9,\n\t\t\tNumExtensions: 0,\n\t\t\tNumServices:   0,\n\t\t},\n\t\tGoTypes:           file_headscale_v1_apikey_proto_goTypes,\n\t\tDependencyIndexes: file_headscale_v1_apikey_proto_depIdxs,\n\t\tMessageInfos:      file_headscale_v1_apikey_proto_msgTypes,\n\t}.Build()\n\tFile_headscale_v1_apikey_proto = out.File\n\tfile_headscale_v1_apikey_proto_goTypes = nil\n\tfile_headscale_v1_apikey_proto_depIdxs = nil\n}\n"
  },
  {
    "path": "gen/go/headscale/v1/auth.pb.go",
    "content": "// Code generated by protoc-gen-go. DO NOT EDIT.\n// versions:\n// \tprotoc-gen-go v1.36.11\n// \tprotoc        (unknown)\n// source: headscale/v1/auth.proto\n\npackage v1\n\nimport (\n\tprotoreflect \"google.golang.org/protobuf/reflect/protoreflect\"\n\tprotoimpl \"google.golang.org/protobuf/runtime/protoimpl\"\n\treflect \"reflect\"\n\tsync \"sync\"\n\tunsafe \"unsafe\"\n)\n\nconst (\n\t// Verify that this generated code is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)\n\t// Verify that runtime/protoimpl is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)\n)\n\ntype AuthRegisterRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tUser          string                 `protobuf:\"bytes,1,opt,name=user,proto3\" json:\"user,omitempty\"`\n\tAuthId        string                 `protobuf:\"bytes,2,opt,name=auth_id,json=authId,proto3\" json:\"auth_id,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *AuthRegisterRequest) Reset() {\n\t*x = AuthRegisterRequest{}\n\tmi := &file_headscale_v1_auth_proto_msgTypes[0]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *AuthRegisterRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*AuthRegisterRequest) ProtoMessage() {}\n\nfunc (x *AuthRegisterRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_auth_proto_msgTypes[0]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use AuthRegisterRequest.ProtoReflect.Descriptor instead.\nfunc (*AuthRegisterRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_auth_proto_rawDescGZIP(), []int{0}\n}\n\nfunc (x *AuthRegisterRequest) GetUser() string {\n\tif x != nil {\n\t\treturn x.User\n\t}\n\treturn \"\"\n}\n\nfunc (x *AuthRegisterRequest) GetAuthId() string {\n\tif x != nil {\n\t\treturn x.AuthId\n\t}\n\treturn \"\"\n}\n\ntype AuthRegisterResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tNode          *Node                  `protobuf:\"bytes,1,opt,name=node,proto3\" json:\"node,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *AuthRegisterResponse) Reset() {\n\t*x = AuthRegisterResponse{}\n\tmi := &file_headscale_v1_auth_proto_msgTypes[1]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *AuthRegisterResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*AuthRegisterResponse) ProtoMessage() {}\n\nfunc (x *AuthRegisterResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_auth_proto_msgTypes[1]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use AuthRegisterResponse.ProtoReflect.Descriptor instead.\nfunc (*AuthRegisterResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_auth_proto_rawDescGZIP(), []int{1}\n}\n\nfunc (x *AuthRegisterResponse) GetNode() *Node {\n\tif x != nil {\n\t\treturn x.Node\n\t}\n\treturn nil\n}\n\ntype AuthApproveRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tAuthId        string                 `protobuf:\"bytes,1,opt,name=auth_id,json=authId,proto3\" json:\"auth_id,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *AuthApproveRequest) Reset() {\n\t*x = AuthApproveRequest{}\n\tmi := &file_headscale_v1_auth_proto_msgTypes[2]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *AuthApproveRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*AuthApproveRequest) ProtoMessage() {}\n\nfunc (x *AuthApproveRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_auth_proto_msgTypes[2]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use AuthApproveRequest.ProtoReflect.Descriptor instead.\nfunc (*AuthApproveRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_auth_proto_rawDescGZIP(), []int{2}\n}\n\nfunc (x *AuthApproveRequest) GetAuthId() string {\n\tif x != nil {\n\t\treturn x.AuthId\n\t}\n\treturn \"\"\n}\n\ntype AuthApproveResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *AuthApproveResponse) Reset() {\n\t*x = AuthApproveResponse{}\n\tmi := &file_headscale_v1_auth_proto_msgTypes[3]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *AuthApproveResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*AuthApproveResponse) ProtoMessage() {}\n\nfunc (x *AuthApproveResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_auth_proto_msgTypes[3]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use AuthApproveResponse.ProtoReflect.Descriptor instead.\nfunc (*AuthApproveResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_auth_proto_rawDescGZIP(), []int{3}\n}\n\ntype AuthRejectRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tAuthId        string                 `protobuf:\"bytes,1,opt,name=auth_id,json=authId,proto3\" json:\"auth_id,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *AuthRejectRequest) Reset() {\n\t*x = AuthRejectRequest{}\n\tmi := &file_headscale_v1_auth_proto_msgTypes[4]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *AuthRejectRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*AuthRejectRequest) ProtoMessage() {}\n\nfunc (x *AuthRejectRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_auth_proto_msgTypes[4]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use AuthRejectRequest.ProtoReflect.Descriptor instead.\nfunc (*AuthRejectRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_auth_proto_rawDescGZIP(), []int{4}\n}\n\nfunc (x *AuthRejectRequest) GetAuthId() string {\n\tif x != nil {\n\t\treturn x.AuthId\n\t}\n\treturn \"\"\n}\n\ntype AuthRejectResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *AuthRejectResponse) Reset() {\n\t*x = AuthRejectResponse{}\n\tmi := &file_headscale_v1_auth_proto_msgTypes[5]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *AuthRejectResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*AuthRejectResponse) ProtoMessage() {}\n\nfunc (x *AuthRejectResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_auth_proto_msgTypes[5]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use AuthRejectResponse.ProtoReflect.Descriptor instead.\nfunc (*AuthRejectResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_auth_proto_rawDescGZIP(), []int{5}\n}\n\nvar File_headscale_v1_auth_proto protoreflect.FileDescriptor\n\nconst file_headscale_v1_auth_proto_rawDesc = \"\" +\n\t\"\\n\" +\n\t\"\\x17headscale/v1/auth.proto\\x12\\fheadscale.v1\\x1a\\x17headscale/v1/node.proto\\\"B\\n\" +\n\t\"\\x13AuthRegisterRequest\\x12\\x12\\n\" +\n\t\"\\x04user\\x18\\x01 \\x01(\\tR\\x04user\\x12\\x17\\n\" +\n\t\"\\aauth_id\\x18\\x02 \\x01(\\tR\\x06authId\\\">\\n\" +\n\t\"\\x14AuthRegisterResponse\\x12&\\n\" +\n\t\"\\x04node\\x18\\x01 \\x01(\\v2\\x12.headscale.v1.NodeR\\x04node\\\"-\\n\" +\n\t\"\\x12AuthApproveRequest\\x12\\x17\\n\" +\n\t\"\\aauth_id\\x18\\x01 \\x01(\\tR\\x06authId\\\"\\x15\\n\" +\n\t\"\\x13AuthApproveResponse\\\",\\n\" +\n\t\"\\x11AuthRejectRequest\\x12\\x17\\n\" +\n\t\"\\aauth_id\\x18\\x01 \\x01(\\tR\\x06authId\\\"\\x14\\n\" +\n\t\"\\x12AuthRejectResponseB)Z'github.com/juanfont/headscale/gen/go/v1b\\x06proto3\"\n\nvar (\n\tfile_headscale_v1_auth_proto_rawDescOnce sync.Once\n\tfile_headscale_v1_auth_proto_rawDescData []byte\n)\n\nfunc file_headscale_v1_auth_proto_rawDescGZIP() []byte {\n\tfile_headscale_v1_auth_proto_rawDescOnce.Do(func() {\n\t\tfile_headscale_v1_auth_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_auth_proto_rawDesc), len(file_headscale_v1_auth_proto_rawDesc)))\n\t})\n\treturn file_headscale_v1_auth_proto_rawDescData\n}\n\nvar file_headscale_v1_auth_proto_msgTypes = make([]protoimpl.MessageInfo, 6)\nvar file_headscale_v1_auth_proto_goTypes = []any{\n\t(*AuthRegisterRequest)(nil),  // 0: headscale.v1.AuthRegisterRequest\n\t(*AuthRegisterResponse)(nil), // 1: headscale.v1.AuthRegisterResponse\n\t(*AuthApproveRequest)(nil),   // 2: headscale.v1.AuthApproveRequest\n\t(*AuthApproveResponse)(nil),  // 3: headscale.v1.AuthApproveResponse\n\t(*AuthRejectRequest)(nil),    // 4: headscale.v1.AuthRejectRequest\n\t(*AuthRejectResponse)(nil),   // 5: headscale.v1.AuthRejectResponse\n\t(*Node)(nil),                 // 6: headscale.v1.Node\n}\nvar file_headscale_v1_auth_proto_depIdxs = []int32{\n\t6, // 0: headscale.v1.AuthRegisterResponse.node:type_name -> headscale.v1.Node\n\t1, // [1:1] is the sub-list for method output_type\n\t1, // [1:1] is the sub-list for method input_type\n\t1, // [1:1] is the sub-list for extension type_name\n\t1, // [1:1] is the sub-list for extension extendee\n\t0, // [0:1] is the sub-list for field type_name\n}\n\nfunc init() { file_headscale_v1_auth_proto_init() }\nfunc file_headscale_v1_auth_proto_init() {\n\tif File_headscale_v1_auth_proto != nil {\n\t\treturn\n\t}\n\tfile_headscale_v1_node_proto_init()\n\ttype x struct{}\n\tout := protoimpl.TypeBuilder{\n\t\tFile: protoimpl.DescBuilder{\n\t\t\tGoPackagePath: reflect.TypeOf(x{}).PkgPath(),\n\t\t\tRawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_auth_proto_rawDesc), len(file_headscale_v1_auth_proto_rawDesc)),\n\t\t\tNumEnums:      0,\n\t\t\tNumMessages:   6,\n\t\t\tNumExtensions: 0,\n\t\t\tNumServices:   0,\n\t\t},\n\t\tGoTypes:           file_headscale_v1_auth_proto_goTypes,\n\t\tDependencyIndexes: file_headscale_v1_auth_proto_depIdxs,\n\t\tMessageInfos:      file_headscale_v1_auth_proto_msgTypes,\n\t}.Build()\n\tFile_headscale_v1_auth_proto = out.File\n\tfile_headscale_v1_auth_proto_goTypes = nil\n\tfile_headscale_v1_auth_proto_depIdxs = nil\n}\n"
  },
  {
    "path": "gen/go/headscale/v1/device.pb.go",
    "content": "// Code generated by protoc-gen-go. DO NOT EDIT.\n// versions:\n// \tprotoc-gen-go v1.36.11\n// \tprotoc        (unknown)\n// source: headscale/v1/device.proto\n\npackage v1\n\nimport (\n\tprotoreflect \"google.golang.org/protobuf/reflect/protoreflect\"\n\tprotoimpl \"google.golang.org/protobuf/runtime/protoimpl\"\n\ttimestamppb \"google.golang.org/protobuf/types/known/timestamppb\"\n\treflect \"reflect\"\n\tsync \"sync\"\n\tunsafe \"unsafe\"\n)\n\nconst (\n\t// Verify that this generated code is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)\n\t// Verify that runtime/protoimpl is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)\n)\n\ntype Latency struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tLatencyMs     float32                `protobuf:\"fixed32,1,opt,name=latency_ms,json=latencyMs,proto3\" json:\"latency_ms,omitempty\"`\n\tPreferred     bool                   `protobuf:\"varint,2,opt,name=preferred,proto3\" json:\"preferred,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *Latency) Reset() {\n\t*x = Latency{}\n\tmi := &file_headscale_v1_device_proto_msgTypes[0]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *Latency) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*Latency) ProtoMessage() {}\n\nfunc (x *Latency) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_device_proto_msgTypes[0]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use Latency.ProtoReflect.Descriptor instead.\nfunc (*Latency) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_device_proto_rawDescGZIP(), []int{0}\n}\n\nfunc (x *Latency) GetLatencyMs() float32 {\n\tif x != nil {\n\t\treturn x.LatencyMs\n\t}\n\treturn 0\n}\n\nfunc (x *Latency) GetPreferred() bool {\n\tif x != nil {\n\t\treturn x.Preferred\n\t}\n\treturn false\n}\n\ntype ClientSupports struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tHairPinning   bool                   `protobuf:\"varint,1,opt,name=hair_pinning,json=hairPinning,proto3\" json:\"hair_pinning,omitempty\"`\n\tIpv6          bool                   `protobuf:\"varint,2,opt,name=ipv6,proto3\" json:\"ipv6,omitempty\"`\n\tPcp           bool                   `protobuf:\"varint,3,opt,name=pcp,proto3\" json:\"pcp,omitempty\"`\n\tPmp           bool                   `protobuf:\"varint,4,opt,name=pmp,proto3\" json:\"pmp,omitempty\"`\n\tUdp           bool                   `protobuf:\"varint,5,opt,name=udp,proto3\" json:\"udp,omitempty\"`\n\tUpnp          bool                   `protobuf:\"varint,6,opt,name=upnp,proto3\" json:\"upnp,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *ClientSupports) Reset() {\n\t*x = ClientSupports{}\n\tmi := &file_headscale_v1_device_proto_msgTypes[1]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *ClientSupports) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*ClientSupports) ProtoMessage() {}\n\nfunc (x *ClientSupports) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_device_proto_msgTypes[1]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use ClientSupports.ProtoReflect.Descriptor instead.\nfunc (*ClientSupports) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_device_proto_rawDescGZIP(), []int{1}\n}\n\nfunc (x *ClientSupports) GetHairPinning() bool {\n\tif x != nil {\n\t\treturn x.HairPinning\n\t}\n\treturn false\n}\n\nfunc (x *ClientSupports) GetIpv6() bool {\n\tif x != nil {\n\t\treturn x.Ipv6\n\t}\n\treturn false\n}\n\nfunc (x *ClientSupports) GetPcp() bool {\n\tif x != nil {\n\t\treturn x.Pcp\n\t}\n\treturn false\n}\n\nfunc (x *ClientSupports) GetPmp() bool {\n\tif x != nil {\n\t\treturn x.Pmp\n\t}\n\treturn false\n}\n\nfunc (x *ClientSupports) GetUdp() bool {\n\tif x != nil {\n\t\treturn x.Udp\n\t}\n\treturn false\n}\n\nfunc (x *ClientSupports) GetUpnp() bool {\n\tif x != nil {\n\t\treturn x.Upnp\n\t}\n\treturn false\n}\n\ntype ClientConnectivity struct {\n\tstate                 protoimpl.MessageState `protogen:\"open.v1\"`\n\tEndpoints             []string               `protobuf:\"bytes,1,rep,name=endpoints,proto3\" json:\"endpoints,omitempty\"`\n\tDerp                  string                 `protobuf:\"bytes,2,opt,name=derp,proto3\" json:\"derp,omitempty\"`\n\tMappingVariesByDestIp bool                   `protobuf:\"varint,3,opt,name=mapping_varies_by_dest_ip,json=mappingVariesByDestIp,proto3\" json:\"mapping_varies_by_dest_ip,omitempty\"`\n\tLatency               map[string]*Latency    `protobuf:\"bytes,4,rep,name=latency,proto3\" json:\"latency,omitempty\" protobuf_key:\"bytes,1,opt,name=key\" protobuf_val:\"bytes,2,opt,name=value\"`\n\tClientSupports        *ClientSupports        `protobuf:\"bytes,5,opt,name=client_supports,json=clientSupports,proto3\" json:\"client_supports,omitempty\"`\n\tunknownFields         protoimpl.UnknownFields\n\tsizeCache             protoimpl.SizeCache\n}\n\nfunc (x *ClientConnectivity) Reset() {\n\t*x = ClientConnectivity{}\n\tmi := &file_headscale_v1_device_proto_msgTypes[2]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *ClientConnectivity) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*ClientConnectivity) ProtoMessage() {}\n\nfunc (x *ClientConnectivity) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_device_proto_msgTypes[2]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use ClientConnectivity.ProtoReflect.Descriptor instead.\nfunc (*ClientConnectivity) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_device_proto_rawDescGZIP(), []int{2}\n}\n\nfunc (x *ClientConnectivity) GetEndpoints() []string {\n\tif x != nil {\n\t\treturn x.Endpoints\n\t}\n\treturn nil\n}\n\nfunc (x *ClientConnectivity) GetDerp() string {\n\tif x != nil {\n\t\treturn x.Derp\n\t}\n\treturn \"\"\n}\n\nfunc (x *ClientConnectivity) GetMappingVariesByDestIp() bool {\n\tif x != nil {\n\t\treturn x.MappingVariesByDestIp\n\t}\n\treturn false\n}\n\nfunc (x *ClientConnectivity) GetLatency() map[string]*Latency {\n\tif x != nil {\n\t\treturn x.Latency\n\t}\n\treturn nil\n}\n\nfunc (x *ClientConnectivity) GetClientSupports() *ClientSupports {\n\tif x != nil {\n\t\treturn x.ClientSupports\n\t}\n\treturn nil\n}\n\ntype GetDeviceRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tId            string                 `protobuf:\"bytes,1,opt,name=id,proto3\" json:\"id,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *GetDeviceRequest) Reset() {\n\t*x = GetDeviceRequest{}\n\tmi := &file_headscale_v1_device_proto_msgTypes[3]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *GetDeviceRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*GetDeviceRequest) ProtoMessage() {}\n\nfunc (x *GetDeviceRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_device_proto_msgTypes[3]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use GetDeviceRequest.ProtoReflect.Descriptor instead.\nfunc (*GetDeviceRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_device_proto_rawDescGZIP(), []int{3}\n}\n\nfunc (x *GetDeviceRequest) GetId() string {\n\tif x != nil {\n\t\treturn x.Id\n\t}\n\treturn \"\"\n}\n\ntype GetDeviceResponse struct {\n\tstate                     protoimpl.MessageState `protogen:\"open.v1\"`\n\tAddresses                 []string               `protobuf:\"bytes,1,rep,name=addresses,proto3\" json:\"addresses,omitempty\"`\n\tId                        string                 `protobuf:\"bytes,2,opt,name=id,proto3\" json:\"id,omitempty\"`\n\tUser                      string                 `protobuf:\"bytes,3,opt,name=user,proto3\" json:\"user,omitempty\"`\n\tName                      string                 `protobuf:\"bytes,4,opt,name=name,proto3\" json:\"name,omitempty\"`\n\tHostname                  string                 `protobuf:\"bytes,5,opt,name=hostname,proto3\" json:\"hostname,omitempty\"`\n\tClientVersion             string                 `protobuf:\"bytes,6,opt,name=client_version,json=clientVersion,proto3\" json:\"client_version,omitempty\"`\n\tUpdateAvailable           bool                   `protobuf:\"varint,7,opt,name=update_available,json=updateAvailable,proto3\" json:\"update_available,omitempty\"`\n\tOs                        string                 `protobuf:\"bytes,8,opt,name=os,proto3\" json:\"os,omitempty\"`\n\tCreated                   *timestamppb.Timestamp `protobuf:\"bytes,9,opt,name=created,proto3\" json:\"created,omitempty\"`\n\tLastSeen                  *timestamppb.Timestamp `protobuf:\"bytes,10,opt,name=last_seen,json=lastSeen,proto3\" json:\"last_seen,omitempty\"`\n\tKeyExpiryDisabled         bool                   `protobuf:\"varint,11,opt,name=key_expiry_disabled,json=keyExpiryDisabled,proto3\" json:\"key_expiry_disabled,omitempty\"`\n\tExpires                   *timestamppb.Timestamp `protobuf:\"bytes,12,opt,name=expires,proto3\" json:\"expires,omitempty\"`\n\tAuthorized                bool                   `protobuf:\"varint,13,opt,name=authorized,proto3\" json:\"authorized,omitempty\"`\n\tIsExternal                bool                   `protobuf:\"varint,14,opt,name=is_external,json=isExternal,proto3\" json:\"is_external,omitempty\"`\n\tMachineKey                string                 `protobuf:\"bytes,15,opt,name=machine_key,json=machineKey,proto3\" json:\"machine_key,omitempty\"`\n\tNodeKey                   string                 `protobuf:\"bytes,16,opt,name=node_key,json=nodeKey,proto3\" json:\"node_key,omitempty\"`\n\tBlocksIncomingConnections bool                   `protobuf:\"varint,17,opt,name=blocks_incoming_connections,json=blocksIncomingConnections,proto3\" json:\"blocks_incoming_connections,omitempty\"`\n\tEnabledRoutes             []string               `protobuf:\"bytes,18,rep,name=enabled_routes,json=enabledRoutes,proto3\" json:\"enabled_routes,omitempty\"`\n\tAdvertisedRoutes          []string               `protobuf:\"bytes,19,rep,name=advertised_routes,json=advertisedRoutes,proto3\" json:\"advertised_routes,omitempty\"`\n\tClientConnectivity        *ClientConnectivity    `protobuf:\"bytes,20,opt,name=client_connectivity,json=clientConnectivity,proto3\" json:\"client_connectivity,omitempty\"`\n\tunknownFields             protoimpl.UnknownFields\n\tsizeCache                 protoimpl.SizeCache\n}\n\nfunc (x *GetDeviceResponse) Reset() {\n\t*x = GetDeviceResponse{}\n\tmi := &file_headscale_v1_device_proto_msgTypes[4]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *GetDeviceResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*GetDeviceResponse) ProtoMessage() {}\n\nfunc (x *GetDeviceResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_device_proto_msgTypes[4]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use GetDeviceResponse.ProtoReflect.Descriptor instead.\nfunc (*GetDeviceResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_device_proto_rawDescGZIP(), []int{4}\n}\n\nfunc (x *GetDeviceResponse) GetAddresses() []string {\n\tif x != nil {\n\t\treturn x.Addresses\n\t}\n\treturn nil\n}\n\nfunc (x *GetDeviceResponse) GetId() string {\n\tif x != nil {\n\t\treturn x.Id\n\t}\n\treturn \"\"\n}\n\nfunc (x *GetDeviceResponse) GetUser() string {\n\tif x != nil {\n\t\treturn x.User\n\t}\n\treturn \"\"\n}\n\nfunc (x *GetDeviceResponse) GetName() string {\n\tif x != nil {\n\t\treturn x.Name\n\t}\n\treturn \"\"\n}\n\nfunc (x *GetDeviceResponse) GetHostname() string {\n\tif x != nil {\n\t\treturn x.Hostname\n\t}\n\treturn \"\"\n}\n\nfunc (x *GetDeviceResponse) GetClientVersion() string {\n\tif x != nil {\n\t\treturn x.ClientVersion\n\t}\n\treturn \"\"\n}\n\nfunc (x *GetDeviceResponse) GetUpdateAvailable() bool {\n\tif x != nil {\n\t\treturn x.UpdateAvailable\n\t}\n\treturn false\n}\n\nfunc (x *GetDeviceResponse) GetOs() string {\n\tif x != nil {\n\t\treturn x.Os\n\t}\n\treturn \"\"\n}\n\nfunc (x *GetDeviceResponse) GetCreated() *timestamppb.Timestamp {\n\tif x != nil {\n\t\treturn x.Created\n\t}\n\treturn nil\n}\n\nfunc (x *GetDeviceResponse) GetLastSeen() *timestamppb.Timestamp {\n\tif x != nil {\n\t\treturn x.LastSeen\n\t}\n\treturn nil\n}\n\nfunc (x *GetDeviceResponse) GetKeyExpiryDisabled() bool {\n\tif x != nil {\n\t\treturn x.KeyExpiryDisabled\n\t}\n\treturn false\n}\n\nfunc (x *GetDeviceResponse) GetExpires() *timestamppb.Timestamp {\n\tif x != nil {\n\t\treturn x.Expires\n\t}\n\treturn nil\n}\n\nfunc (x *GetDeviceResponse) GetAuthorized() bool {\n\tif x != nil {\n\t\treturn x.Authorized\n\t}\n\treturn false\n}\n\nfunc (x *GetDeviceResponse) GetIsExternal() bool {\n\tif x != nil {\n\t\treturn x.IsExternal\n\t}\n\treturn false\n}\n\nfunc (x *GetDeviceResponse) GetMachineKey() string {\n\tif x != nil {\n\t\treturn x.MachineKey\n\t}\n\treturn \"\"\n}\n\nfunc (x *GetDeviceResponse) GetNodeKey() string {\n\tif x != nil {\n\t\treturn x.NodeKey\n\t}\n\treturn \"\"\n}\n\nfunc (x *GetDeviceResponse) GetBlocksIncomingConnections() bool {\n\tif x != nil {\n\t\treturn x.BlocksIncomingConnections\n\t}\n\treturn false\n}\n\nfunc (x *GetDeviceResponse) GetEnabledRoutes() []string {\n\tif x != nil {\n\t\treturn x.EnabledRoutes\n\t}\n\treturn nil\n}\n\nfunc (x *GetDeviceResponse) GetAdvertisedRoutes() []string {\n\tif x != nil {\n\t\treturn x.AdvertisedRoutes\n\t}\n\treturn nil\n}\n\nfunc (x *GetDeviceResponse) GetClientConnectivity() *ClientConnectivity {\n\tif x != nil {\n\t\treturn x.ClientConnectivity\n\t}\n\treturn nil\n}\n\ntype DeleteDeviceRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tId            string                 `protobuf:\"bytes,1,opt,name=id,proto3\" json:\"id,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *DeleteDeviceRequest) Reset() {\n\t*x = DeleteDeviceRequest{}\n\tmi := &file_headscale_v1_device_proto_msgTypes[5]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *DeleteDeviceRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*DeleteDeviceRequest) ProtoMessage() {}\n\nfunc (x *DeleteDeviceRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_device_proto_msgTypes[5]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use DeleteDeviceRequest.ProtoReflect.Descriptor instead.\nfunc (*DeleteDeviceRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_device_proto_rawDescGZIP(), []int{5}\n}\n\nfunc (x *DeleteDeviceRequest) GetId() string {\n\tif x != nil {\n\t\treturn x.Id\n\t}\n\treturn \"\"\n}\n\ntype DeleteDeviceResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *DeleteDeviceResponse) Reset() {\n\t*x = DeleteDeviceResponse{}\n\tmi := &file_headscale_v1_device_proto_msgTypes[6]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *DeleteDeviceResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*DeleteDeviceResponse) ProtoMessage() {}\n\nfunc (x *DeleteDeviceResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_device_proto_msgTypes[6]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use DeleteDeviceResponse.ProtoReflect.Descriptor instead.\nfunc (*DeleteDeviceResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_device_proto_rawDescGZIP(), []int{6}\n}\n\ntype GetDeviceRoutesRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tId            string                 `protobuf:\"bytes,1,opt,name=id,proto3\" json:\"id,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *GetDeviceRoutesRequest) Reset() {\n\t*x = GetDeviceRoutesRequest{}\n\tmi := &file_headscale_v1_device_proto_msgTypes[7]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *GetDeviceRoutesRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*GetDeviceRoutesRequest) ProtoMessage() {}\n\nfunc (x *GetDeviceRoutesRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_device_proto_msgTypes[7]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use GetDeviceRoutesRequest.ProtoReflect.Descriptor instead.\nfunc (*GetDeviceRoutesRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_device_proto_rawDescGZIP(), []int{7}\n}\n\nfunc (x *GetDeviceRoutesRequest) GetId() string {\n\tif x != nil {\n\t\treturn x.Id\n\t}\n\treturn \"\"\n}\n\ntype GetDeviceRoutesResponse struct {\n\tstate            protoimpl.MessageState `protogen:\"open.v1\"`\n\tEnabledRoutes    []string               `protobuf:\"bytes,1,rep,name=enabled_routes,json=enabledRoutes,proto3\" json:\"enabled_routes,omitempty\"`\n\tAdvertisedRoutes []string               `protobuf:\"bytes,2,rep,name=advertised_routes,json=advertisedRoutes,proto3\" json:\"advertised_routes,omitempty\"`\n\tunknownFields    protoimpl.UnknownFields\n\tsizeCache        protoimpl.SizeCache\n}\n\nfunc (x *GetDeviceRoutesResponse) Reset() {\n\t*x = GetDeviceRoutesResponse{}\n\tmi := &file_headscale_v1_device_proto_msgTypes[8]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *GetDeviceRoutesResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*GetDeviceRoutesResponse) ProtoMessage() {}\n\nfunc (x *GetDeviceRoutesResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_device_proto_msgTypes[8]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use GetDeviceRoutesResponse.ProtoReflect.Descriptor instead.\nfunc (*GetDeviceRoutesResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_device_proto_rawDescGZIP(), []int{8}\n}\n\nfunc (x *GetDeviceRoutesResponse) GetEnabledRoutes() []string {\n\tif x != nil {\n\t\treturn x.EnabledRoutes\n\t}\n\treturn nil\n}\n\nfunc (x *GetDeviceRoutesResponse) GetAdvertisedRoutes() []string {\n\tif x != nil {\n\t\treturn x.AdvertisedRoutes\n\t}\n\treturn nil\n}\n\ntype EnableDeviceRoutesRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tId            string                 `protobuf:\"bytes,1,opt,name=id,proto3\" json:\"id,omitempty\"`\n\tRoutes        []string               `protobuf:\"bytes,2,rep,name=routes,proto3\" json:\"routes,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *EnableDeviceRoutesRequest) Reset() {\n\t*x = EnableDeviceRoutesRequest{}\n\tmi := &file_headscale_v1_device_proto_msgTypes[9]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *EnableDeviceRoutesRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*EnableDeviceRoutesRequest) ProtoMessage() {}\n\nfunc (x *EnableDeviceRoutesRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_device_proto_msgTypes[9]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use EnableDeviceRoutesRequest.ProtoReflect.Descriptor instead.\nfunc (*EnableDeviceRoutesRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_device_proto_rawDescGZIP(), []int{9}\n}\n\nfunc (x *EnableDeviceRoutesRequest) GetId() string {\n\tif x != nil {\n\t\treturn x.Id\n\t}\n\treturn \"\"\n}\n\nfunc (x *EnableDeviceRoutesRequest) GetRoutes() []string {\n\tif x != nil {\n\t\treturn x.Routes\n\t}\n\treturn nil\n}\n\ntype EnableDeviceRoutesResponse struct {\n\tstate            protoimpl.MessageState `protogen:\"open.v1\"`\n\tEnabledRoutes    []string               `protobuf:\"bytes,1,rep,name=enabled_routes,json=enabledRoutes,proto3\" json:\"enabled_routes,omitempty\"`\n\tAdvertisedRoutes []string               `protobuf:\"bytes,2,rep,name=advertised_routes,json=advertisedRoutes,proto3\" json:\"advertised_routes,omitempty\"`\n\tunknownFields    protoimpl.UnknownFields\n\tsizeCache        protoimpl.SizeCache\n}\n\nfunc (x *EnableDeviceRoutesResponse) Reset() {\n\t*x = EnableDeviceRoutesResponse{}\n\tmi := &file_headscale_v1_device_proto_msgTypes[10]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *EnableDeviceRoutesResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*EnableDeviceRoutesResponse) ProtoMessage() {}\n\nfunc (x *EnableDeviceRoutesResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_device_proto_msgTypes[10]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use EnableDeviceRoutesResponse.ProtoReflect.Descriptor instead.\nfunc (*EnableDeviceRoutesResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_device_proto_rawDescGZIP(), []int{10}\n}\n\nfunc (x *EnableDeviceRoutesResponse) GetEnabledRoutes() []string {\n\tif x != nil {\n\t\treturn x.EnabledRoutes\n\t}\n\treturn nil\n}\n\nfunc (x *EnableDeviceRoutesResponse) GetAdvertisedRoutes() []string {\n\tif x != nil {\n\t\treturn x.AdvertisedRoutes\n\t}\n\treturn nil\n}\n\nvar File_headscale_v1_device_proto protoreflect.FileDescriptor\n\nconst file_headscale_v1_device_proto_rawDesc = \"\" +\n\t\"\\n\" +\n\t\"\\x19headscale/v1/device.proto\\x12\\fheadscale.v1\\x1a\\x1fgoogle/protobuf/timestamp.proto\\\"F\\n\" +\n\t\"\\aLatency\\x12\\x1d\\n\" +\n\t\"\\n\" +\n\t\"latency_ms\\x18\\x01 \\x01(\\x02R\\tlatencyMs\\x12\\x1c\\n\" +\n\t\"\\tpreferred\\x18\\x02 \\x01(\\bR\\tpreferred\\\"\\x91\\x01\\n\" +\n\t\"\\x0eClientSupports\\x12!\\n\" +\n\t\"\\fhair_pinning\\x18\\x01 \\x01(\\bR\\vhairPinning\\x12\\x12\\n\" +\n\t\"\\x04ipv6\\x18\\x02 \\x01(\\bR\\x04ipv6\\x12\\x10\\n\" +\n\t\"\\x03pcp\\x18\\x03 \\x01(\\bR\\x03pcp\\x12\\x10\\n\" +\n\t\"\\x03pmp\\x18\\x04 \\x01(\\bR\\x03pmp\\x12\\x10\\n\" +\n\t\"\\x03udp\\x18\\x05 \\x01(\\bR\\x03udp\\x12\\x12\\n\" +\n\t\"\\x04upnp\\x18\\x06 \\x01(\\bR\\x04upnp\\\"\\xe3\\x02\\n\" +\n\t\"\\x12ClientConnectivity\\x12\\x1c\\n\" +\n\t\"\\tendpoints\\x18\\x01 \\x03(\\tR\\tendpoints\\x12\\x12\\n\" +\n\t\"\\x04derp\\x18\\x02 \\x01(\\tR\\x04derp\\x128\\n\" +\n\t\"\\x19mapping_varies_by_dest_ip\\x18\\x03 \\x01(\\bR\\x15mappingVariesByDestIp\\x12G\\n\" +\n\t\"\\alatency\\x18\\x04 \\x03(\\v2-.headscale.v1.ClientConnectivity.LatencyEntryR\\alatency\\x12E\\n\" +\n\t\"\\x0fclient_supports\\x18\\x05 \\x01(\\v2\\x1c.headscale.v1.ClientSupportsR\\x0eclientSupports\\x1aQ\\n\" +\n\t\"\\fLatencyEntry\\x12\\x10\\n\" +\n\t\"\\x03key\\x18\\x01 \\x01(\\tR\\x03key\\x12+\\n\" +\n\t\"\\x05value\\x18\\x02 \\x01(\\v2\\x15.headscale.v1.LatencyR\\x05value:\\x028\\x01\\\"\\\"\\n\" +\n\t\"\\x10GetDeviceRequest\\x12\\x0e\\n\" +\n\t\"\\x02id\\x18\\x01 \\x01(\\tR\\x02id\\\"\\xa0\\x06\\n\" +\n\t\"\\x11GetDeviceResponse\\x12\\x1c\\n\" +\n\t\"\\taddresses\\x18\\x01 \\x03(\\tR\\taddresses\\x12\\x0e\\n\" +\n\t\"\\x02id\\x18\\x02 \\x01(\\tR\\x02id\\x12\\x12\\n\" +\n\t\"\\x04user\\x18\\x03 \\x01(\\tR\\x04user\\x12\\x12\\n\" +\n\t\"\\x04name\\x18\\x04 \\x01(\\tR\\x04name\\x12\\x1a\\n\" +\n\t\"\\bhostname\\x18\\x05 \\x01(\\tR\\bhostname\\x12%\\n\" +\n\t\"\\x0eclient_version\\x18\\x06 \\x01(\\tR\\rclientVersion\\x12)\\n\" +\n\t\"\\x10update_available\\x18\\a \\x01(\\bR\\x0fupdateAvailable\\x12\\x0e\\n\" +\n\t\"\\x02os\\x18\\b \\x01(\\tR\\x02os\\x124\\n\" +\n\t\"\\acreated\\x18\\t \\x01(\\v2\\x1a.google.protobuf.TimestampR\\acreated\\x127\\n\" +\n\t\"\\tlast_seen\\x18\\n\" +\n\t\" \\x01(\\v2\\x1a.google.protobuf.TimestampR\\blastSeen\\x12.\\n\" +\n\t\"\\x13key_expiry_disabled\\x18\\v \\x01(\\bR\\x11keyExpiryDisabled\\x124\\n\" +\n\t\"\\aexpires\\x18\\f \\x01(\\v2\\x1a.google.protobuf.TimestampR\\aexpires\\x12\\x1e\\n\" +\n\t\"\\n\" +\n\t\"authorized\\x18\\r \\x01(\\bR\\n\" +\n\t\"authorized\\x12\\x1f\\n\" +\n\t\"\\vis_external\\x18\\x0e \\x01(\\bR\\n\" +\n\t\"isExternal\\x12\\x1f\\n\" +\n\t\"\\vmachine_key\\x18\\x0f \\x01(\\tR\\n\" +\n\t\"machineKey\\x12\\x19\\n\" +\n\t\"\\bnode_key\\x18\\x10 \\x01(\\tR\\anodeKey\\x12>\\n\" +\n\t\"\\x1bblocks_incoming_connections\\x18\\x11 \\x01(\\bR\\x19blocksIncomingConnections\\x12%\\n\" +\n\t\"\\x0eenabled_routes\\x18\\x12 \\x03(\\tR\\renabledRoutes\\x12+\\n\" +\n\t\"\\x11advertised_routes\\x18\\x13 \\x03(\\tR\\x10advertisedRoutes\\x12Q\\n\" +\n\t\"\\x13client_connectivity\\x18\\x14 \\x01(\\v2 .headscale.v1.ClientConnectivityR\\x12clientConnectivity\\\"%\\n\" +\n\t\"\\x13DeleteDeviceRequest\\x12\\x0e\\n\" +\n\t\"\\x02id\\x18\\x01 \\x01(\\tR\\x02id\\\"\\x16\\n\" +\n\t\"\\x14DeleteDeviceResponse\\\"(\\n\" +\n\t\"\\x16GetDeviceRoutesRequest\\x12\\x0e\\n\" +\n\t\"\\x02id\\x18\\x01 \\x01(\\tR\\x02id\\\"m\\n\" +\n\t\"\\x17GetDeviceRoutesResponse\\x12%\\n\" +\n\t\"\\x0eenabled_routes\\x18\\x01 \\x03(\\tR\\renabledRoutes\\x12+\\n\" +\n\t\"\\x11advertised_routes\\x18\\x02 \\x03(\\tR\\x10advertisedRoutes\\\"C\\n\" +\n\t\"\\x19EnableDeviceRoutesRequest\\x12\\x0e\\n\" +\n\t\"\\x02id\\x18\\x01 \\x01(\\tR\\x02id\\x12\\x16\\n\" +\n\t\"\\x06routes\\x18\\x02 \\x03(\\tR\\x06routes\\\"p\\n\" +\n\t\"\\x1aEnableDeviceRoutesResponse\\x12%\\n\" +\n\t\"\\x0eenabled_routes\\x18\\x01 \\x03(\\tR\\renabledRoutes\\x12+\\n\" +\n\t\"\\x11advertised_routes\\x18\\x02 \\x03(\\tR\\x10advertisedRoutesB)Z'github.com/juanfont/headscale/gen/go/v1b\\x06proto3\"\n\nvar (\n\tfile_headscale_v1_device_proto_rawDescOnce sync.Once\n\tfile_headscale_v1_device_proto_rawDescData []byte\n)\n\nfunc file_headscale_v1_device_proto_rawDescGZIP() []byte {\n\tfile_headscale_v1_device_proto_rawDescOnce.Do(func() {\n\t\tfile_headscale_v1_device_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_device_proto_rawDesc), len(file_headscale_v1_device_proto_rawDesc)))\n\t})\n\treturn file_headscale_v1_device_proto_rawDescData\n}\n\nvar file_headscale_v1_device_proto_msgTypes = make([]protoimpl.MessageInfo, 12)\nvar file_headscale_v1_device_proto_goTypes = []any{\n\t(*Latency)(nil),                    // 0: headscale.v1.Latency\n\t(*ClientSupports)(nil),             // 1: headscale.v1.ClientSupports\n\t(*ClientConnectivity)(nil),         // 2: headscale.v1.ClientConnectivity\n\t(*GetDeviceRequest)(nil),           // 3: headscale.v1.GetDeviceRequest\n\t(*GetDeviceResponse)(nil),          // 4: headscale.v1.GetDeviceResponse\n\t(*DeleteDeviceRequest)(nil),        // 5: headscale.v1.DeleteDeviceRequest\n\t(*DeleteDeviceResponse)(nil),       // 6: headscale.v1.DeleteDeviceResponse\n\t(*GetDeviceRoutesRequest)(nil),     // 7: headscale.v1.GetDeviceRoutesRequest\n\t(*GetDeviceRoutesResponse)(nil),    // 8: headscale.v1.GetDeviceRoutesResponse\n\t(*EnableDeviceRoutesRequest)(nil),  // 9: headscale.v1.EnableDeviceRoutesRequest\n\t(*EnableDeviceRoutesResponse)(nil), // 10: headscale.v1.EnableDeviceRoutesResponse\n\tnil,                                // 11: headscale.v1.ClientConnectivity.LatencyEntry\n\t(*timestamppb.Timestamp)(nil),      // 12: google.protobuf.Timestamp\n}\nvar file_headscale_v1_device_proto_depIdxs = []int32{\n\t11, // 0: headscale.v1.ClientConnectivity.latency:type_name -> headscale.v1.ClientConnectivity.LatencyEntry\n\t1,  // 1: headscale.v1.ClientConnectivity.client_supports:type_name -> headscale.v1.ClientSupports\n\t12, // 2: headscale.v1.GetDeviceResponse.created:type_name -> google.protobuf.Timestamp\n\t12, // 3: headscale.v1.GetDeviceResponse.last_seen:type_name -> google.protobuf.Timestamp\n\t12, // 4: headscale.v1.GetDeviceResponse.expires:type_name -> google.protobuf.Timestamp\n\t2,  // 5: headscale.v1.GetDeviceResponse.client_connectivity:type_name -> headscale.v1.ClientConnectivity\n\t0,  // 6: headscale.v1.ClientConnectivity.LatencyEntry.value:type_name -> headscale.v1.Latency\n\t7,  // [7:7] is the sub-list for method output_type\n\t7,  // [7:7] is the sub-list for method input_type\n\t7,  // [7:7] is the sub-list for extension type_name\n\t7,  // [7:7] is the sub-list for extension extendee\n\t0,  // [0:7] is the sub-list for field type_name\n}\n\nfunc init() { file_headscale_v1_device_proto_init() }\nfunc file_headscale_v1_device_proto_init() {\n\tif File_headscale_v1_device_proto != nil {\n\t\treturn\n\t}\n\ttype x struct{}\n\tout := protoimpl.TypeBuilder{\n\t\tFile: protoimpl.DescBuilder{\n\t\t\tGoPackagePath: reflect.TypeOf(x{}).PkgPath(),\n\t\t\tRawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_device_proto_rawDesc), len(file_headscale_v1_device_proto_rawDesc)),\n\t\t\tNumEnums:      0,\n\t\t\tNumMessages:   12,\n\t\t\tNumExtensions: 0,\n\t\t\tNumServices:   0,\n\t\t},\n\t\tGoTypes:           file_headscale_v1_device_proto_goTypes,\n\t\tDependencyIndexes: file_headscale_v1_device_proto_depIdxs,\n\t\tMessageInfos:      file_headscale_v1_device_proto_msgTypes,\n\t}.Build()\n\tFile_headscale_v1_device_proto = out.File\n\tfile_headscale_v1_device_proto_goTypes = nil\n\tfile_headscale_v1_device_proto_depIdxs = nil\n}\n"
  },
  {
    "path": "gen/go/headscale/v1/headscale.pb.go",
    "content": "// Code generated by protoc-gen-go. DO NOT EDIT.\n// versions:\n// \tprotoc-gen-go v1.36.11\n// \tprotoc        (unknown)\n// source: headscale/v1/headscale.proto\n\npackage v1\n\nimport (\n\t_ \"google.golang.org/genproto/googleapis/api/annotations\"\n\tprotoreflect \"google.golang.org/protobuf/reflect/protoreflect\"\n\tprotoimpl \"google.golang.org/protobuf/runtime/protoimpl\"\n\treflect \"reflect\"\n\tsync \"sync\"\n\tunsafe \"unsafe\"\n)\n\nconst (\n\t// Verify that this generated code is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)\n\t// Verify that runtime/protoimpl is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)\n)\n\ntype HealthRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *HealthRequest) Reset() {\n\t*x = HealthRequest{}\n\tmi := &file_headscale_v1_headscale_proto_msgTypes[0]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *HealthRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*HealthRequest) ProtoMessage() {}\n\nfunc (x *HealthRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_headscale_proto_msgTypes[0]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use HealthRequest.ProtoReflect.Descriptor instead.\nfunc (*HealthRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_headscale_proto_rawDescGZIP(), []int{0}\n}\n\ntype HealthResponse struct {\n\tstate                protoimpl.MessageState `protogen:\"open.v1\"`\n\tDatabaseConnectivity bool                   `protobuf:\"varint,1,opt,name=database_connectivity,json=databaseConnectivity,proto3\" json:\"database_connectivity,omitempty\"`\n\tunknownFields        protoimpl.UnknownFields\n\tsizeCache            protoimpl.SizeCache\n}\n\nfunc (x *HealthResponse) Reset() {\n\t*x = HealthResponse{}\n\tmi := &file_headscale_v1_headscale_proto_msgTypes[1]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *HealthResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*HealthResponse) ProtoMessage() {}\n\nfunc (x *HealthResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_headscale_proto_msgTypes[1]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use HealthResponse.ProtoReflect.Descriptor instead.\nfunc (*HealthResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_headscale_proto_rawDescGZIP(), []int{1}\n}\n\nfunc (x *HealthResponse) GetDatabaseConnectivity() bool {\n\tif x != nil {\n\t\treturn x.DatabaseConnectivity\n\t}\n\treturn false\n}\n\nvar File_headscale_v1_headscale_proto protoreflect.FileDescriptor\n\nconst file_headscale_v1_headscale_proto_rawDesc = \"\" +\n\t\"\\n\" +\n\t\"\\x1cheadscale/v1/headscale.proto\\x12\\fheadscale.v1\\x1a\\x1cgoogle/api/annotations.proto\\x1a\\x17headscale/v1/user.proto\\x1a\\x1dheadscale/v1/preauthkey.proto\\x1a\\x17headscale/v1/node.proto\\x1a\\x19headscale/v1/apikey.proto\\x1a\\x17headscale/v1/auth.proto\\x1a\\x19headscale/v1/policy.proto\\\"\\x0f\\n\" +\n\t\"\\rHealthRequest\\\"E\\n\" +\n\t\"\\x0eHealthResponse\\x123\\n\" +\n\t\"\\x15database_connectivity\\x18\\x01 \\x01(\\bR\\x14databaseConnectivity2\\xeb\\x19\\n\" +\n\t\"\\x10HeadscaleService\\x12h\\n\" +\n\t\"\\n\" +\n\t\"CreateUser\\x12\\x1f.headscale.v1.CreateUserRequest\\x1a .headscale.v1.CreateUserResponse\\\"\\x17\\x82\\xd3\\xe4\\x93\\x02\\x11:\\x01*\\\"\\f/api/v1/user\\x12\\x80\\x01\\n\" +\n\t\"\\n\" +\n\t\"RenameUser\\x12\\x1f.headscale.v1.RenameUserRequest\\x1a .headscale.v1.RenameUserResponse\\\"/\\x82\\xd3\\xe4\\x93\\x02)\\\"'/api/v1/user/{old_id}/rename/{new_name}\\x12j\\n\" +\n\t\"\\n\" +\n\t\"DeleteUser\\x12\\x1f.headscale.v1.DeleteUserRequest\\x1a .headscale.v1.DeleteUserResponse\\\"\\x19\\x82\\xd3\\xe4\\x93\\x02\\x13*\\x11/api/v1/user/{id}\\x12b\\n\" +\n\t\"\\tListUsers\\x12\\x1e.headscale.v1.ListUsersRequest\\x1a\\x1f.headscale.v1.ListUsersResponse\\\"\\x14\\x82\\xd3\\xe4\\x93\\x02\\x0e\\x12\\f/api/v1/user\\x12\\x80\\x01\\n\" +\n\t\"\\x10CreatePreAuthKey\\x12%.headscale.v1.CreatePreAuthKeyRequest\\x1a&.headscale.v1.CreatePreAuthKeyResponse\\\"\\x1d\\x82\\xd3\\xe4\\x93\\x02\\x17:\\x01*\\\"\\x12/api/v1/preauthkey\\x12\\x87\\x01\\n\" +\n\t\"\\x10ExpirePreAuthKey\\x12%.headscale.v1.ExpirePreAuthKeyRequest\\x1a&.headscale.v1.ExpirePreAuthKeyResponse\\\"$\\x82\\xd3\\xe4\\x93\\x02\\x1e:\\x01*\\\"\\x19/api/v1/preauthkey/expire\\x12}\\n\" +\n\t\"\\x10DeletePreAuthKey\\x12%.headscale.v1.DeletePreAuthKeyRequest\\x1a&.headscale.v1.DeletePreAuthKeyResponse\\\"\\x1a\\x82\\xd3\\xe4\\x93\\x02\\x14*\\x12/api/v1/preauthkey\\x12z\\n\" +\n\t\"\\x0fListPreAuthKeys\\x12$.headscale.v1.ListPreAuthKeysRequest\\x1a%.headscale.v1.ListPreAuthKeysResponse\\\"\\x1a\\x82\\xd3\\xe4\\x93\\x02\\x14\\x12\\x12/api/v1/preauthkey\\x12}\\n\" +\n\t\"\\x0fDebugCreateNode\\x12$.headscale.v1.DebugCreateNodeRequest\\x1a%.headscale.v1.DebugCreateNodeResponse\\\"\\x1d\\x82\\xd3\\xe4\\x93\\x02\\x17:\\x01*\\\"\\x12/api/v1/debug/node\\x12f\\n\" +\n\t\"\\aGetNode\\x12\\x1c.headscale.v1.GetNodeRequest\\x1a\\x1d.headscale.v1.GetNodeResponse\\\"\\x1e\\x82\\xd3\\xe4\\x93\\x02\\x18\\x12\\x16/api/v1/node/{node_id}\\x12n\\n\" +\n\t\"\\aSetTags\\x12\\x1c.headscale.v1.SetTagsRequest\\x1a\\x1d.headscale.v1.SetTagsResponse\\\"&\\x82\\xd3\\xe4\\x93\\x02 :\\x01*\\\"\\x1b/api/v1/node/{node_id}/tags\\x12\\x96\\x01\\n\" +\n\t\"\\x11SetApprovedRoutes\\x12&.headscale.v1.SetApprovedRoutesRequest\\x1a'.headscale.v1.SetApprovedRoutesResponse\\\"0\\x82\\xd3\\xe4\\x93\\x02*:\\x01*\\\"%/api/v1/node/{node_id}/approve_routes\\x12t\\n\" +\n\t\"\\fRegisterNode\\x12!.headscale.v1.RegisterNodeRequest\\x1a\\\".headscale.v1.RegisterNodeResponse\\\"\\x1d\\x82\\xd3\\xe4\\x93\\x02\\x17\\\"\\x15/api/v1/node/register\\x12o\\n\" +\n\t\"\\n\" +\n\t\"DeleteNode\\x12\\x1f.headscale.v1.DeleteNodeRequest\\x1a .headscale.v1.DeleteNodeResponse\\\"\\x1e\\x82\\xd3\\xe4\\x93\\x02\\x18*\\x16/api/v1/node/{node_id}\\x12v\\n\" +\n\t\"\\n\" +\n\t\"ExpireNode\\x12\\x1f.headscale.v1.ExpireNodeRequest\\x1a .headscale.v1.ExpireNodeResponse\\\"%\\x82\\xd3\\xe4\\x93\\x02\\x1f\\\"\\x1d/api/v1/node/{node_id}/expire\\x12\\x81\\x01\\n\" +\n\t\"\\n\" +\n\t\"RenameNode\\x12\\x1f.headscale.v1.RenameNodeRequest\\x1a .headscale.v1.RenameNodeResponse\\\"0\\x82\\xd3\\xe4\\x93\\x02*\\\"(/api/v1/node/{node_id}/rename/{new_name}\\x12b\\n\" +\n\t\"\\tListNodes\\x12\\x1e.headscale.v1.ListNodesRequest\\x1a\\x1f.headscale.v1.ListNodesResponse\\\"\\x14\\x82\\xd3\\xe4\\x93\\x02\\x0e\\x12\\f/api/v1/node\\x12\\x80\\x01\\n\" +\n\t\"\\x0fBackfillNodeIPs\\x12$.headscale.v1.BackfillNodeIPsRequest\\x1a%.headscale.v1.BackfillNodeIPsResponse\\\" \\x82\\xd3\\xe4\\x93\\x02\\x1a\\\"\\x18/api/v1/node/backfillips\\x12w\\n\" +\n\t\"\\fAuthRegister\\x12!.headscale.v1.AuthRegisterRequest\\x1a\\\".headscale.v1.AuthRegisterResponse\\\" \\x82\\xd3\\xe4\\x93\\x02\\x1a:\\x01*\\\"\\x15/api/v1/auth/register\\x12s\\n\" +\n\t\"\\vAuthApprove\\x12 .headscale.v1.AuthApproveRequest\\x1a!.headscale.v1.AuthApproveResponse\\\"\\x1f\\x82\\xd3\\xe4\\x93\\x02\\x19:\\x01*\\\"\\x14/api/v1/auth/approve\\x12o\\n\" +\n\t\"\\n\" +\n\t\"AuthReject\\x12\\x1f.headscale.v1.AuthRejectRequest\\x1a .headscale.v1.AuthRejectResponse\\\"\\x1e\\x82\\xd3\\xe4\\x93\\x02\\x18:\\x01*\\\"\\x13/api/v1/auth/reject\\x12p\\n\" +\n\t\"\\fCreateApiKey\\x12!.headscale.v1.CreateApiKeyRequest\\x1a\\\".headscale.v1.CreateApiKeyResponse\\\"\\x19\\x82\\xd3\\xe4\\x93\\x02\\x13:\\x01*\\\"\\x0e/api/v1/apikey\\x12w\\n\" +\n\t\"\\fExpireApiKey\\x12!.headscale.v1.ExpireApiKeyRequest\\x1a\\\".headscale.v1.ExpireApiKeyResponse\\\" \\x82\\xd3\\xe4\\x93\\x02\\x1a:\\x01*\\\"\\x15/api/v1/apikey/expire\\x12j\\n\" +\n\t\"\\vListApiKeys\\x12 .headscale.v1.ListApiKeysRequest\\x1a!.headscale.v1.ListApiKeysResponse\\\"\\x16\\x82\\xd3\\xe4\\x93\\x02\\x10\\x12\\x0e/api/v1/apikey\\x12v\\n\" +\n\t\"\\fDeleteApiKey\\x12!.headscale.v1.DeleteApiKeyRequest\\x1a\\\".headscale.v1.DeleteApiKeyResponse\\\"\\x1f\\x82\\xd3\\xe4\\x93\\x02\\x19*\\x17/api/v1/apikey/{prefix}\\x12d\\n\" +\n\t\"\\tGetPolicy\\x12\\x1e.headscale.v1.GetPolicyRequest\\x1a\\x1f.headscale.v1.GetPolicyResponse\\\"\\x16\\x82\\xd3\\xe4\\x93\\x02\\x10\\x12\\x0e/api/v1/policy\\x12g\\n\" +\n\t\"\\tSetPolicy\\x12\\x1e.headscale.v1.SetPolicyRequest\\x1a\\x1f.headscale.v1.SetPolicyResponse\\\"\\x19\\x82\\xd3\\xe4\\x93\\x02\\x13:\\x01*\\x1a\\x0e/api/v1/policy\\x12[\\n\" +\n\t\"\\x06Health\\x12\\x1b.headscale.v1.HealthRequest\\x1a\\x1c.headscale.v1.HealthResponse\\\"\\x16\\x82\\xd3\\xe4\\x93\\x02\\x10\\x12\\x0e/api/v1/healthB)Z'github.com/juanfont/headscale/gen/go/v1b\\x06proto3\"\n\nvar (\n\tfile_headscale_v1_headscale_proto_rawDescOnce sync.Once\n\tfile_headscale_v1_headscale_proto_rawDescData []byte\n)\n\nfunc file_headscale_v1_headscale_proto_rawDescGZIP() []byte {\n\tfile_headscale_v1_headscale_proto_rawDescOnce.Do(func() {\n\t\tfile_headscale_v1_headscale_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_headscale_proto_rawDesc), len(file_headscale_v1_headscale_proto_rawDesc)))\n\t})\n\treturn file_headscale_v1_headscale_proto_rawDescData\n}\n\nvar file_headscale_v1_headscale_proto_msgTypes = make([]protoimpl.MessageInfo, 2)\nvar file_headscale_v1_headscale_proto_goTypes = []any{\n\t(*HealthRequest)(nil),             // 0: headscale.v1.HealthRequest\n\t(*HealthResponse)(nil),            // 1: headscale.v1.HealthResponse\n\t(*CreateUserRequest)(nil),         // 2: headscale.v1.CreateUserRequest\n\t(*RenameUserRequest)(nil),         // 3: headscale.v1.RenameUserRequest\n\t(*DeleteUserRequest)(nil),         // 4: headscale.v1.DeleteUserRequest\n\t(*ListUsersRequest)(nil),          // 5: headscale.v1.ListUsersRequest\n\t(*CreatePreAuthKeyRequest)(nil),   // 6: headscale.v1.CreatePreAuthKeyRequest\n\t(*ExpirePreAuthKeyRequest)(nil),   // 7: headscale.v1.ExpirePreAuthKeyRequest\n\t(*DeletePreAuthKeyRequest)(nil),   // 8: headscale.v1.DeletePreAuthKeyRequest\n\t(*ListPreAuthKeysRequest)(nil),    // 9: headscale.v1.ListPreAuthKeysRequest\n\t(*DebugCreateNodeRequest)(nil),    // 10: headscale.v1.DebugCreateNodeRequest\n\t(*GetNodeRequest)(nil),            // 11: headscale.v1.GetNodeRequest\n\t(*SetTagsRequest)(nil),            // 12: headscale.v1.SetTagsRequest\n\t(*SetApprovedRoutesRequest)(nil),  // 13: headscale.v1.SetApprovedRoutesRequest\n\t(*RegisterNodeRequest)(nil),       // 14: headscale.v1.RegisterNodeRequest\n\t(*DeleteNodeRequest)(nil),         // 15: headscale.v1.DeleteNodeRequest\n\t(*ExpireNodeRequest)(nil),         // 16: headscale.v1.ExpireNodeRequest\n\t(*RenameNodeRequest)(nil),         // 17: headscale.v1.RenameNodeRequest\n\t(*ListNodesRequest)(nil),          // 18: headscale.v1.ListNodesRequest\n\t(*BackfillNodeIPsRequest)(nil),    // 19: headscale.v1.BackfillNodeIPsRequest\n\t(*AuthRegisterRequest)(nil),       // 20: headscale.v1.AuthRegisterRequest\n\t(*AuthApproveRequest)(nil),        // 21: headscale.v1.AuthApproveRequest\n\t(*AuthRejectRequest)(nil),         // 22: headscale.v1.AuthRejectRequest\n\t(*CreateApiKeyRequest)(nil),       // 23: headscale.v1.CreateApiKeyRequest\n\t(*ExpireApiKeyRequest)(nil),       // 24: headscale.v1.ExpireApiKeyRequest\n\t(*ListApiKeysRequest)(nil),        // 25: headscale.v1.ListApiKeysRequest\n\t(*DeleteApiKeyRequest)(nil),       // 26: headscale.v1.DeleteApiKeyRequest\n\t(*GetPolicyRequest)(nil),          // 27: headscale.v1.GetPolicyRequest\n\t(*SetPolicyRequest)(nil),          // 28: headscale.v1.SetPolicyRequest\n\t(*CreateUserResponse)(nil),        // 29: headscale.v1.CreateUserResponse\n\t(*RenameUserResponse)(nil),        // 30: headscale.v1.RenameUserResponse\n\t(*DeleteUserResponse)(nil),        // 31: headscale.v1.DeleteUserResponse\n\t(*ListUsersResponse)(nil),         // 32: headscale.v1.ListUsersResponse\n\t(*CreatePreAuthKeyResponse)(nil),  // 33: headscale.v1.CreatePreAuthKeyResponse\n\t(*ExpirePreAuthKeyResponse)(nil),  // 34: headscale.v1.ExpirePreAuthKeyResponse\n\t(*DeletePreAuthKeyResponse)(nil),  // 35: headscale.v1.DeletePreAuthKeyResponse\n\t(*ListPreAuthKeysResponse)(nil),   // 36: headscale.v1.ListPreAuthKeysResponse\n\t(*DebugCreateNodeResponse)(nil),   // 37: headscale.v1.DebugCreateNodeResponse\n\t(*GetNodeResponse)(nil),           // 38: headscale.v1.GetNodeResponse\n\t(*SetTagsResponse)(nil),           // 39: headscale.v1.SetTagsResponse\n\t(*SetApprovedRoutesResponse)(nil), // 40: headscale.v1.SetApprovedRoutesResponse\n\t(*RegisterNodeResponse)(nil),      // 41: headscale.v1.RegisterNodeResponse\n\t(*DeleteNodeResponse)(nil),        // 42: headscale.v1.DeleteNodeResponse\n\t(*ExpireNodeResponse)(nil),        // 43: headscale.v1.ExpireNodeResponse\n\t(*RenameNodeResponse)(nil),        // 44: headscale.v1.RenameNodeResponse\n\t(*ListNodesResponse)(nil),         // 45: headscale.v1.ListNodesResponse\n\t(*BackfillNodeIPsResponse)(nil),   // 46: headscale.v1.BackfillNodeIPsResponse\n\t(*AuthRegisterResponse)(nil),      // 47: headscale.v1.AuthRegisterResponse\n\t(*AuthApproveResponse)(nil),       // 48: headscale.v1.AuthApproveResponse\n\t(*AuthRejectResponse)(nil),        // 49: headscale.v1.AuthRejectResponse\n\t(*CreateApiKeyResponse)(nil),      // 50: headscale.v1.CreateApiKeyResponse\n\t(*ExpireApiKeyResponse)(nil),      // 51: headscale.v1.ExpireApiKeyResponse\n\t(*ListApiKeysResponse)(nil),       // 52: headscale.v1.ListApiKeysResponse\n\t(*DeleteApiKeyResponse)(nil),      // 53: headscale.v1.DeleteApiKeyResponse\n\t(*GetPolicyResponse)(nil),         // 54: headscale.v1.GetPolicyResponse\n\t(*SetPolicyResponse)(nil),         // 55: headscale.v1.SetPolicyResponse\n}\nvar file_headscale_v1_headscale_proto_depIdxs = []int32{\n\t2,  // 0: headscale.v1.HeadscaleService.CreateUser:input_type -> headscale.v1.CreateUserRequest\n\t3,  // 1: headscale.v1.HeadscaleService.RenameUser:input_type -> headscale.v1.RenameUserRequest\n\t4,  // 2: headscale.v1.HeadscaleService.DeleteUser:input_type -> headscale.v1.DeleteUserRequest\n\t5,  // 3: headscale.v1.HeadscaleService.ListUsers:input_type -> headscale.v1.ListUsersRequest\n\t6,  // 4: headscale.v1.HeadscaleService.CreatePreAuthKey:input_type -> headscale.v1.CreatePreAuthKeyRequest\n\t7,  // 5: headscale.v1.HeadscaleService.ExpirePreAuthKey:input_type -> headscale.v1.ExpirePreAuthKeyRequest\n\t8,  // 6: headscale.v1.HeadscaleService.DeletePreAuthKey:input_type -> headscale.v1.DeletePreAuthKeyRequest\n\t9,  // 7: headscale.v1.HeadscaleService.ListPreAuthKeys:input_type -> headscale.v1.ListPreAuthKeysRequest\n\t10, // 8: headscale.v1.HeadscaleService.DebugCreateNode:input_type -> headscale.v1.DebugCreateNodeRequest\n\t11, // 9: headscale.v1.HeadscaleService.GetNode:input_type -> headscale.v1.GetNodeRequest\n\t12, // 10: headscale.v1.HeadscaleService.SetTags:input_type -> headscale.v1.SetTagsRequest\n\t13, // 11: headscale.v1.HeadscaleService.SetApprovedRoutes:input_type -> headscale.v1.SetApprovedRoutesRequest\n\t14, // 12: headscale.v1.HeadscaleService.RegisterNode:input_type -> headscale.v1.RegisterNodeRequest\n\t15, // 13: headscale.v1.HeadscaleService.DeleteNode:input_type -> headscale.v1.DeleteNodeRequest\n\t16, // 14: headscale.v1.HeadscaleService.ExpireNode:input_type -> headscale.v1.ExpireNodeRequest\n\t17, // 15: headscale.v1.HeadscaleService.RenameNode:input_type -> headscale.v1.RenameNodeRequest\n\t18, // 16: headscale.v1.HeadscaleService.ListNodes:input_type -> headscale.v1.ListNodesRequest\n\t19, // 17: headscale.v1.HeadscaleService.BackfillNodeIPs:input_type -> headscale.v1.BackfillNodeIPsRequest\n\t20, // 18: headscale.v1.HeadscaleService.AuthRegister:input_type -> headscale.v1.AuthRegisterRequest\n\t21, // 19: headscale.v1.HeadscaleService.AuthApprove:input_type -> headscale.v1.AuthApproveRequest\n\t22, // 20: headscale.v1.HeadscaleService.AuthReject:input_type -> headscale.v1.AuthRejectRequest\n\t23, // 21: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest\n\t24, // 22: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest\n\t25, // 23: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest\n\t26, // 24: headscale.v1.HeadscaleService.DeleteApiKey:input_type -> headscale.v1.DeleteApiKeyRequest\n\t27, // 25: headscale.v1.HeadscaleService.GetPolicy:input_type -> headscale.v1.GetPolicyRequest\n\t28, // 26: headscale.v1.HeadscaleService.SetPolicy:input_type -> headscale.v1.SetPolicyRequest\n\t0,  // 27: headscale.v1.HeadscaleService.Health:input_type -> headscale.v1.HealthRequest\n\t29, // 28: headscale.v1.HeadscaleService.CreateUser:output_type -> headscale.v1.CreateUserResponse\n\t30, // 29: headscale.v1.HeadscaleService.RenameUser:output_type -> headscale.v1.RenameUserResponse\n\t31, // 30: headscale.v1.HeadscaleService.DeleteUser:output_type -> headscale.v1.DeleteUserResponse\n\t32, // 31: headscale.v1.HeadscaleService.ListUsers:output_type -> headscale.v1.ListUsersResponse\n\t33, // 32: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse\n\t34, // 33: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse\n\t35, // 34: headscale.v1.HeadscaleService.DeletePreAuthKey:output_type -> headscale.v1.DeletePreAuthKeyResponse\n\t36, // 35: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse\n\t37, // 36: headscale.v1.HeadscaleService.DebugCreateNode:output_type -> headscale.v1.DebugCreateNodeResponse\n\t38, // 37: headscale.v1.HeadscaleService.GetNode:output_type -> headscale.v1.GetNodeResponse\n\t39, // 38: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse\n\t40, // 39: headscale.v1.HeadscaleService.SetApprovedRoutes:output_type -> headscale.v1.SetApprovedRoutesResponse\n\t41, // 40: headscale.v1.HeadscaleService.RegisterNode:output_type -> headscale.v1.RegisterNodeResponse\n\t42, // 41: headscale.v1.HeadscaleService.DeleteNode:output_type -> headscale.v1.DeleteNodeResponse\n\t43, // 42: headscale.v1.HeadscaleService.ExpireNode:output_type -> headscale.v1.ExpireNodeResponse\n\t44, // 43: headscale.v1.HeadscaleService.RenameNode:output_type -> headscale.v1.RenameNodeResponse\n\t45, // 44: headscale.v1.HeadscaleService.ListNodes:output_type -> headscale.v1.ListNodesResponse\n\t46, // 45: headscale.v1.HeadscaleService.BackfillNodeIPs:output_type -> headscale.v1.BackfillNodeIPsResponse\n\t47, // 46: headscale.v1.HeadscaleService.AuthRegister:output_type -> headscale.v1.AuthRegisterResponse\n\t48, // 47: headscale.v1.HeadscaleService.AuthApprove:output_type -> headscale.v1.AuthApproveResponse\n\t49, // 48: headscale.v1.HeadscaleService.AuthReject:output_type -> headscale.v1.AuthRejectResponse\n\t50, // 49: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse\n\t51, // 50: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse\n\t52, // 51: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse\n\t53, // 52: headscale.v1.HeadscaleService.DeleteApiKey:output_type -> headscale.v1.DeleteApiKeyResponse\n\t54, // 53: headscale.v1.HeadscaleService.GetPolicy:output_type -> headscale.v1.GetPolicyResponse\n\t55, // 54: headscale.v1.HeadscaleService.SetPolicy:output_type -> headscale.v1.SetPolicyResponse\n\t1,  // 55: headscale.v1.HeadscaleService.Health:output_type -> headscale.v1.HealthResponse\n\t28, // [28:56] is the sub-list for method output_type\n\t0,  // [0:28] is the sub-list for method input_type\n\t0,  // [0:0] is the sub-list for extension type_name\n\t0,  // [0:0] is the sub-list for extension extendee\n\t0,  // [0:0] is the sub-list for field type_name\n}\n\nfunc init() { file_headscale_v1_headscale_proto_init() }\nfunc file_headscale_v1_headscale_proto_init() {\n\tif File_headscale_v1_headscale_proto != nil {\n\t\treturn\n\t}\n\tfile_headscale_v1_user_proto_init()\n\tfile_headscale_v1_preauthkey_proto_init()\n\tfile_headscale_v1_node_proto_init()\n\tfile_headscale_v1_apikey_proto_init()\n\tfile_headscale_v1_auth_proto_init()\n\tfile_headscale_v1_policy_proto_init()\n\ttype x struct{}\n\tout := protoimpl.TypeBuilder{\n\t\tFile: protoimpl.DescBuilder{\n\t\t\tGoPackagePath: reflect.TypeOf(x{}).PkgPath(),\n\t\t\tRawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_headscale_proto_rawDesc), len(file_headscale_v1_headscale_proto_rawDesc)),\n\t\t\tNumEnums:      0,\n\t\t\tNumMessages:   2,\n\t\t\tNumExtensions: 0,\n\t\t\tNumServices:   1,\n\t\t},\n\t\tGoTypes:           file_headscale_v1_headscale_proto_goTypes,\n\t\tDependencyIndexes: file_headscale_v1_headscale_proto_depIdxs,\n\t\tMessageInfos:      file_headscale_v1_headscale_proto_msgTypes,\n\t}.Build()\n\tFile_headscale_v1_headscale_proto = out.File\n\tfile_headscale_v1_headscale_proto_goTypes = nil\n\tfile_headscale_v1_headscale_proto_depIdxs = nil\n}\n"
  },
  {
    "path": "gen/go/headscale/v1/headscale.pb.gw.go",
    "content": "// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.\n// source: headscale/v1/headscale.proto\n\n/*\nPackage v1 is a reverse proxy.\n\nIt translates gRPC into RESTful JSON APIs.\n*/\npackage v1\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"io\"\n\t\"net/http\"\n\n\t\"github.com/grpc-ecosystem/grpc-gateway/v2/runtime\"\n\t\"github.com/grpc-ecosystem/grpc-gateway/v2/utilities\"\n\t\"google.golang.org/grpc\"\n\t\"google.golang.org/grpc/codes\"\n\t\"google.golang.org/grpc/grpclog\"\n\t\"google.golang.org/grpc/metadata\"\n\t\"google.golang.org/grpc/status\"\n\t\"google.golang.org/protobuf/proto\"\n)\n\n// Suppress \"imported and not used\" errors\nvar (\n\t_ codes.Code\n\t_ io.Reader\n\t_ status.Status\n\t_ = errors.New\n\t_ = runtime.String\n\t_ = utilities.NewDoubleArray\n\t_ = metadata.Join\n)\n\nfunc request_HeadscaleService_CreateUser_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq CreateUserRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tif req.Body != nil {\n\t\t_, _ = io.Copy(io.Discard, req.Body)\n\t}\n\tmsg, err := client.CreateUser(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n}\n\nfunc local_request_HeadscaleService_CreateUser_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq CreateUserRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tmsg, err := server.CreateUser(ctx, &protoReq)\n\treturn msg, metadata, err\n}\n\nfunc request_HeadscaleService_RenameUser_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq RenameUserRequest\n\t\tmetadata runtime.ServerMetadata\n\t\terr      error\n\t)\n\tif req.Body != nil {\n\t\t_, _ = io.Copy(io.Discard, req.Body)\n\t}\n\tval, ok := pathParams[\"old_id\"]\n\tif !ok {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"missing parameter %s\", \"old_id\")\n\t}\n\tprotoReq.OldId, err = runtime.Uint64(val)\n\tif err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"type mismatch, parameter: %s, error: %v\", \"old_id\", err)\n\t}\n\tval, ok = pathParams[\"new_name\"]\n\tif !ok {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"missing parameter %s\", \"new_name\")\n\t}\n\tprotoReq.NewName, err = runtime.String(val)\n\tif err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"type mismatch, parameter: %s, error: %v\", \"new_name\", err)\n\t}\n\tmsg, err := client.RenameUser(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n}\n\nfunc local_request_HeadscaleService_RenameUser_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq RenameUserRequest\n\t\tmetadata runtime.ServerMetadata\n\t\terr      error\n\t)\n\tval, ok := pathParams[\"old_id\"]\n\tif !ok {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"missing parameter %s\", \"old_id\")\n\t}\n\tprotoReq.OldId, err = runtime.Uint64(val)\n\tif err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"type mismatch, parameter: %s, error: %v\", \"old_id\", err)\n\t}\n\tval, ok = pathParams[\"new_name\"]\n\tif !ok {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"missing parameter %s\", \"new_name\")\n\t}\n\tprotoReq.NewName, err = runtime.String(val)\n\tif err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"type mismatch, parameter: %s, error: %v\", \"new_name\", err)\n\t}\n\tmsg, err := server.RenameUser(ctx, &protoReq)\n\treturn msg, metadata, err\n}\n\nfunc request_HeadscaleService_DeleteUser_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq DeleteUserRequest\n\t\tmetadata runtime.ServerMetadata\n\t\terr      error\n\t)\n\tif req.Body != nil {\n\t\t_, _ = io.Copy(io.Discard, req.Body)\n\t}\n\tval, ok := pathParams[\"id\"]\n\tif !ok {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"missing parameter %s\", \"id\")\n\t}\n\tprotoReq.Id, err = runtime.Uint64(val)\n\tif err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"type mismatch, parameter: %s, error: %v\", \"id\", err)\n\t}\n\tmsg, err := client.DeleteUser(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n}\n\nfunc local_request_HeadscaleService_DeleteUser_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq DeleteUserRequest\n\t\tmetadata runtime.ServerMetadata\n\t\terr      error\n\t)\n\tval, ok := pathParams[\"id\"]\n\tif !ok {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"missing parameter %s\", \"id\")\n\t}\n\tprotoReq.Id, err = runtime.Uint64(val)\n\tif err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"type mismatch, parameter: %s, error: %v\", \"id\", err)\n\t}\n\tmsg, err := server.DeleteUser(ctx, &protoReq)\n\treturn msg, metadata, err\n}\n\nvar filter_HeadscaleService_ListUsers_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}\n\nfunc request_HeadscaleService_ListUsers_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq ListUsersRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif req.Body != nil {\n\t\t_, _ = io.Copy(io.Discard, req.Body)\n\t}\n\tif err := req.ParseForm(); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tif err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_ListUsers_0); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tmsg, err := client.ListUsers(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n}\n\nfunc local_request_HeadscaleService_ListUsers_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq ListUsersRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif err := req.ParseForm(); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tif err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_ListUsers_0); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tmsg, err := server.ListUsers(ctx, &protoReq)\n\treturn msg, metadata, err\n}\n\nfunc request_HeadscaleService_CreatePreAuthKey_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq CreatePreAuthKeyRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tif req.Body != nil {\n\t\t_, _ = io.Copy(io.Discard, req.Body)\n\t}\n\tmsg, err := client.CreatePreAuthKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n}\n\nfunc local_request_HeadscaleService_CreatePreAuthKey_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq CreatePreAuthKeyRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tmsg, err := server.CreatePreAuthKey(ctx, &protoReq)\n\treturn msg, metadata, err\n}\n\nfunc request_HeadscaleService_ExpirePreAuthKey_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq ExpirePreAuthKeyRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tif req.Body != nil {\n\t\t_, _ = io.Copy(io.Discard, req.Body)\n\t}\n\tmsg, err := client.ExpirePreAuthKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n}\n\nfunc local_request_HeadscaleService_ExpirePreAuthKey_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq ExpirePreAuthKeyRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tmsg, err := server.ExpirePreAuthKey(ctx, &protoReq)\n\treturn msg, metadata, err\n}\n\nvar filter_HeadscaleService_DeletePreAuthKey_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}\n\nfunc request_HeadscaleService_DeletePreAuthKey_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq DeletePreAuthKeyRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif req.Body != nil {\n\t\t_, _ = io.Copy(io.Discard, req.Body)\n\t}\n\tif err := req.ParseForm(); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tif err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_DeletePreAuthKey_0); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tmsg, err := client.DeletePreAuthKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n}\n\nfunc local_request_HeadscaleService_DeletePreAuthKey_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq DeletePreAuthKeyRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif err := req.ParseForm(); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tif err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_DeletePreAuthKey_0); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tmsg, err := server.DeletePreAuthKey(ctx, &protoReq)\n\treturn msg, metadata, err\n}\n\nfunc request_HeadscaleService_ListPreAuthKeys_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq ListPreAuthKeysRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif req.Body != nil {\n\t\t_, _ = io.Copy(io.Discard, req.Body)\n\t}\n\tmsg, err := client.ListPreAuthKeys(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n}\n\nfunc local_request_HeadscaleService_ListPreAuthKeys_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq ListPreAuthKeysRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tmsg, err := server.ListPreAuthKeys(ctx, &protoReq)\n\treturn msg, metadata, err\n}\n\nfunc request_HeadscaleService_DebugCreateNode_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq DebugCreateNodeRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tif req.Body != nil {\n\t\t_, _ = io.Copy(io.Discard, req.Body)\n\t}\n\tmsg, err := client.DebugCreateNode(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n}\n\nfunc local_request_HeadscaleService_DebugCreateNode_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq DebugCreateNodeRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tmsg, err := server.DebugCreateNode(ctx, &protoReq)\n\treturn msg, metadata, err\n}\n\nfunc request_HeadscaleService_GetNode_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq GetNodeRequest\n\t\tmetadata runtime.ServerMetadata\n\t\terr      error\n\t)\n\tif req.Body != nil {\n\t\t_, _ = io.Copy(io.Discard, req.Body)\n\t}\n\tval, ok := pathParams[\"node_id\"]\n\tif !ok {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"missing parameter %s\", \"node_id\")\n\t}\n\tprotoReq.NodeId, err = runtime.Uint64(val)\n\tif err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"type mismatch, parameter: %s, error: %v\", \"node_id\", err)\n\t}\n\tmsg, err := client.GetNode(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n}\n\nfunc local_request_HeadscaleService_GetNode_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq GetNodeRequest\n\t\tmetadata runtime.ServerMetadata\n\t\terr      error\n\t)\n\tval, ok := pathParams[\"node_id\"]\n\tif !ok {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"missing parameter %s\", \"node_id\")\n\t}\n\tprotoReq.NodeId, err = runtime.Uint64(val)\n\tif err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"type mismatch, parameter: %s, error: %v\", \"node_id\", err)\n\t}\n\tmsg, err := server.GetNode(ctx, &protoReq)\n\treturn msg, metadata, err\n}\n\nfunc request_HeadscaleService_SetTags_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq SetTagsRequest\n\t\tmetadata runtime.ServerMetadata\n\t\terr      error\n\t)\n\tif err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tif req.Body != nil {\n\t\t_, _ = io.Copy(io.Discard, req.Body)\n\t}\n\tval, ok := pathParams[\"node_id\"]\n\tif !ok {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"missing parameter %s\", \"node_id\")\n\t}\n\tprotoReq.NodeId, err = runtime.Uint64(val)\n\tif err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"type mismatch, parameter: %s, error: %v\", \"node_id\", err)\n\t}\n\tmsg, err := client.SetTags(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n}\n\nfunc local_request_HeadscaleService_SetTags_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq SetTagsRequest\n\t\tmetadata runtime.ServerMetadata\n\t\terr      error\n\t)\n\tif err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tval, ok := pathParams[\"node_id\"]\n\tif !ok {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"missing parameter %s\", \"node_id\")\n\t}\n\tprotoReq.NodeId, err = runtime.Uint64(val)\n\tif err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"type mismatch, parameter: %s, error: %v\", \"node_id\", err)\n\t}\n\tmsg, err := server.SetTags(ctx, &protoReq)\n\treturn msg, metadata, err\n}\n\nfunc request_HeadscaleService_SetApprovedRoutes_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq SetApprovedRoutesRequest\n\t\tmetadata runtime.ServerMetadata\n\t\terr      error\n\t)\n\tif err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tif req.Body != nil {\n\t\t_, _ = io.Copy(io.Discard, req.Body)\n\t}\n\tval, ok := pathParams[\"node_id\"]\n\tif !ok {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"missing parameter %s\", \"node_id\")\n\t}\n\tprotoReq.NodeId, err = runtime.Uint64(val)\n\tif err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"type mismatch, parameter: %s, error: %v\", \"node_id\", err)\n\t}\n\tmsg, err := client.SetApprovedRoutes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n}\n\nfunc local_request_HeadscaleService_SetApprovedRoutes_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq SetApprovedRoutesRequest\n\t\tmetadata runtime.ServerMetadata\n\t\terr      error\n\t)\n\tif err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tval, ok := pathParams[\"node_id\"]\n\tif !ok {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"missing parameter %s\", \"node_id\")\n\t}\n\tprotoReq.NodeId, err = runtime.Uint64(val)\n\tif err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"type mismatch, parameter: %s, error: %v\", \"node_id\", err)\n\t}\n\tmsg, err := server.SetApprovedRoutes(ctx, &protoReq)\n\treturn msg, metadata, err\n}\n\nvar filter_HeadscaleService_RegisterNode_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}\n\nfunc request_HeadscaleService_RegisterNode_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq RegisterNodeRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif req.Body != nil {\n\t\t_, _ = io.Copy(io.Discard, req.Body)\n\t}\n\tif err := req.ParseForm(); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tif err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_RegisterNode_0); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tmsg, err := client.RegisterNode(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n}\n\nfunc local_request_HeadscaleService_RegisterNode_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq RegisterNodeRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif err := req.ParseForm(); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tif err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_RegisterNode_0); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tmsg, err := server.RegisterNode(ctx, &protoReq)\n\treturn msg, metadata, err\n}\n\nfunc request_HeadscaleService_DeleteNode_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq DeleteNodeRequest\n\t\tmetadata runtime.ServerMetadata\n\t\terr      error\n\t)\n\tif req.Body != nil {\n\t\t_, _ = io.Copy(io.Discard, req.Body)\n\t}\n\tval, ok := pathParams[\"node_id\"]\n\tif !ok {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"missing parameter %s\", \"node_id\")\n\t}\n\tprotoReq.NodeId, err = runtime.Uint64(val)\n\tif err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"type mismatch, parameter: %s, error: %v\", \"node_id\", err)\n\t}\n\tmsg, err := client.DeleteNode(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n}\n\nfunc local_request_HeadscaleService_DeleteNode_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq DeleteNodeRequest\n\t\tmetadata runtime.ServerMetadata\n\t\terr      error\n\t)\n\tval, ok := pathParams[\"node_id\"]\n\tif !ok {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"missing parameter %s\", \"node_id\")\n\t}\n\tprotoReq.NodeId, err = runtime.Uint64(val)\n\tif err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"type mismatch, parameter: %s, error: %v\", \"node_id\", err)\n\t}\n\tmsg, err := server.DeleteNode(ctx, &protoReq)\n\treturn msg, metadata, err\n}\n\nvar filter_HeadscaleService_ExpireNode_0 = &utilities.DoubleArray{Encoding: map[string]int{\"node_id\": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}\n\nfunc request_HeadscaleService_ExpireNode_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq ExpireNodeRequest\n\t\tmetadata runtime.ServerMetadata\n\t\terr      error\n\t)\n\tif req.Body != nil {\n\t\t_, _ = io.Copy(io.Discard, req.Body)\n\t}\n\tval, ok := pathParams[\"node_id\"]\n\tif !ok {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"missing parameter %s\", \"node_id\")\n\t}\n\tprotoReq.NodeId, err = runtime.Uint64(val)\n\tif err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"type mismatch, parameter: %s, error: %v\", \"node_id\", err)\n\t}\n\tif err := req.ParseForm(); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tif err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_ExpireNode_0); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tmsg, err := client.ExpireNode(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n}\n\nfunc local_request_HeadscaleService_ExpireNode_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq ExpireNodeRequest\n\t\tmetadata runtime.ServerMetadata\n\t\terr      error\n\t)\n\tval, ok := pathParams[\"node_id\"]\n\tif !ok {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"missing parameter %s\", \"node_id\")\n\t}\n\tprotoReq.NodeId, err = runtime.Uint64(val)\n\tif err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"type mismatch, parameter: %s, error: %v\", \"node_id\", err)\n\t}\n\tif err := req.ParseForm(); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tif err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_ExpireNode_0); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tmsg, err := server.ExpireNode(ctx, &protoReq)\n\treturn msg, metadata, err\n}\n\nfunc request_HeadscaleService_RenameNode_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq RenameNodeRequest\n\t\tmetadata runtime.ServerMetadata\n\t\terr      error\n\t)\n\tif req.Body != nil {\n\t\t_, _ = io.Copy(io.Discard, req.Body)\n\t}\n\tval, ok := pathParams[\"node_id\"]\n\tif !ok {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"missing parameter %s\", \"node_id\")\n\t}\n\tprotoReq.NodeId, err = runtime.Uint64(val)\n\tif err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"type mismatch, parameter: %s, error: %v\", \"node_id\", err)\n\t}\n\tval, ok = pathParams[\"new_name\"]\n\tif !ok {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"missing parameter %s\", \"new_name\")\n\t}\n\tprotoReq.NewName, err = runtime.String(val)\n\tif err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"type mismatch, parameter: %s, error: %v\", \"new_name\", err)\n\t}\n\tmsg, err := client.RenameNode(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n}\n\nfunc local_request_HeadscaleService_RenameNode_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq RenameNodeRequest\n\t\tmetadata runtime.ServerMetadata\n\t\terr      error\n\t)\n\tval, ok := pathParams[\"node_id\"]\n\tif !ok {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"missing parameter %s\", \"node_id\")\n\t}\n\tprotoReq.NodeId, err = runtime.Uint64(val)\n\tif err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"type mismatch, parameter: %s, error: %v\", \"node_id\", err)\n\t}\n\tval, ok = pathParams[\"new_name\"]\n\tif !ok {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"missing parameter %s\", \"new_name\")\n\t}\n\tprotoReq.NewName, err = runtime.String(val)\n\tif err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"type mismatch, parameter: %s, error: %v\", \"new_name\", err)\n\t}\n\tmsg, err := server.RenameNode(ctx, &protoReq)\n\treturn msg, metadata, err\n}\n\nvar filter_HeadscaleService_ListNodes_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}\n\nfunc request_HeadscaleService_ListNodes_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq ListNodesRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif req.Body != nil {\n\t\t_, _ = io.Copy(io.Discard, req.Body)\n\t}\n\tif err := req.ParseForm(); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tif err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_ListNodes_0); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tmsg, err := client.ListNodes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n}\n\nfunc local_request_HeadscaleService_ListNodes_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq ListNodesRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif err := req.ParseForm(); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tif err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_ListNodes_0); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tmsg, err := server.ListNodes(ctx, &protoReq)\n\treturn msg, metadata, err\n}\n\nvar filter_HeadscaleService_BackfillNodeIPs_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}\n\nfunc request_HeadscaleService_BackfillNodeIPs_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq BackfillNodeIPsRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif req.Body != nil {\n\t\t_, _ = io.Copy(io.Discard, req.Body)\n\t}\n\tif err := req.ParseForm(); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tif err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_BackfillNodeIPs_0); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tmsg, err := client.BackfillNodeIPs(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n}\n\nfunc local_request_HeadscaleService_BackfillNodeIPs_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq BackfillNodeIPsRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif err := req.ParseForm(); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tif err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_BackfillNodeIPs_0); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tmsg, err := server.BackfillNodeIPs(ctx, &protoReq)\n\treturn msg, metadata, err\n}\n\nfunc request_HeadscaleService_AuthRegister_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq AuthRegisterRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tif req.Body != nil {\n\t\t_, _ = io.Copy(io.Discard, req.Body)\n\t}\n\tmsg, err := client.AuthRegister(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n}\n\nfunc local_request_HeadscaleService_AuthRegister_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq AuthRegisterRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tmsg, err := server.AuthRegister(ctx, &protoReq)\n\treturn msg, metadata, err\n}\n\nfunc request_HeadscaleService_AuthApprove_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq AuthApproveRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tif req.Body != nil {\n\t\t_, _ = io.Copy(io.Discard, req.Body)\n\t}\n\tmsg, err := client.AuthApprove(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n}\n\nfunc local_request_HeadscaleService_AuthApprove_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq AuthApproveRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tmsg, err := server.AuthApprove(ctx, &protoReq)\n\treturn msg, metadata, err\n}\n\nfunc request_HeadscaleService_AuthReject_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq AuthRejectRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tif req.Body != nil {\n\t\t_, _ = io.Copy(io.Discard, req.Body)\n\t}\n\tmsg, err := client.AuthReject(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n}\n\nfunc local_request_HeadscaleService_AuthReject_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq AuthRejectRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tmsg, err := server.AuthReject(ctx, &protoReq)\n\treturn msg, metadata, err\n}\n\nfunc request_HeadscaleService_CreateApiKey_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq CreateApiKeyRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tif req.Body != nil {\n\t\t_, _ = io.Copy(io.Discard, req.Body)\n\t}\n\tmsg, err := client.CreateApiKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n}\n\nfunc local_request_HeadscaleService_CreateApiKey_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq CreateApiKeyRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tmsg, err := server.CreateApiKey(ctx, &protoReq)\n\treturn msg, metadata, err\n}\n\nfunc request_HeadscaleService_ExpireApiKey_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq ExpireApiKeyRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tif req.Body != nil {\n\t\t_, _ = io.Copy(io.Discard, req.Body)\n\t}\n\tmsg, err := client.ExpireApiKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n}\n\nfunc local_request_HeadscaleService_ExpireApiKey_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq ExpireApiKeyRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tmsg, err := server.ExpireApiKey(ctx, &protoReq)\n\treturn msg, metadata, err\n}\n\nfunc request_HeadscaleService_ListApiKeys_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq ListApiKeysRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif req.Body != nil {\n\t\t_, _ = io.Copy(io.Discard, req.Body)\n\t}\n\tmsg, err := client.ListApiKeys(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n}\n\nfunc local_request_HeadscaleService_ListApiKeys_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq ListApiKeysRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tmsg, err := server.ListApiKeys(ctx, &protoReq)\n\treturn msg, metadata, err\n}\n\nvar filter_HeadscaleService_DeleteApiKey_0 = &utilities.DoubleArray{Encoding: map[string]int{\"prefix\": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}\n\nfunc request_HeadscaleService_DeleteApiKey_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq DeleteApiKeyRequest\n\t\tmetadata runtime.ServerMetadata\n\t\terr      error\n\t)\n\tif req.Body != nil {\n\t\t_, _ = io.Copy(io.Discard, req.Body)\n\t}\n\tval, ok := pathParams[\"prefix\"]\n\tif !ok {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"missing parameter %s\", \"prefix\")\n\t}\n\tprotoReq.Prefix, err = runtime.String(val)\n\tif err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"type mismatch, parameter: %s, error: %v\", \"prefix\", err)\n\t}\n\tif err := req.ParseForm(); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tif err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_DeleteApiKey_0); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tmsg, err := client.DeleteApiKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n}\n\nfunc local_request_HeadscaleService_DeleteApiKey_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq DeleteApiKeyRequest\n\t\tmetadata runtime.ServerMetadata\n\t\terr      error\n\t)\n\tval, ok := pathParams[\"prefix\"]\n\tif !ok {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"missing parameter %s\", \"prefix\")\n\t}\n\tprotoReq.Prefix, err = runtime.String(val)\n\tif err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"type mismatch, parameter: %s, error: %v\", \"prefix\", err)\n\t}\n\tif err := req.ParseForm(); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tif err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_DeleteApiKey_0); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tmsg, err := server.DeleteApiKey(ctx, &protoReq)\n\treturn msg, metadata, err\n}\n\nfunc request_HeadscaleService_GetPolicy_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq GetPolicyRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif req.Body != nil {\n\t\t_, _ = io.Copy(io.Discard, req.Body)\n\t}\n\tmsg, err := client.GetPolicy(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n}\n\nfunc local_request_HeadscaleService_GetPolicy_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq GetPolicyRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tmsg, err := server.GetPolicy(ctx, &protoReq)\n\treturn msg, metadata, err\n}\n\nfunc request_HeadscaleService_SetPolicy_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq SetPolicyRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tif req.Body != nil {\n\t\t_, _ = io.Copy(io.Discard, req.Body)\n\t}\n\tmsg, err := client.SetPolicy(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n}\n\nfunc local_request_HeadscaleService_SetPolicy_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq SetPolicyRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\tmsg, err := server.SetPolicy(ctx, &protoReq)\n\treturn msg, metadata, err\n}\n\nfunc request_HeadscaleService_Health_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq HealthRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tif req.Body != nil {\n\t\t_, _ = io.Copy(io.Discard, req.Body)\n\t}\n\tmsg, err := client.Health(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n}\n\nfunc local_request_HeadscaleService_Health_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar (\n\t\tprotoReq HealthRequest\n\t\tmetadata runtime.ServerMetadata\n\t)\n\tmsg, err := server.Health(ctx, &protoReq)\n\treturn msg, metadata, err\n}\n\n// RegisterHeadscaleServiceHandlerServer registers the http handlers for service HeadscaleService to \"mux\".\n// UnaryRPC     :call HeadscaleServiceServer directly.\n// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.\n// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterHeadscaleServiceHandlerFromEndpoint instead.\n// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the \"runtime.WithMiddlewares\" option in the \"runtime.NewServeMux\" call.\nfunc RegisterHeadscaleServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server HeadscaleServiceServer) error {\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_CreateUser_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/CreateUser\", runtime.WithHTTPPathPattern(\"/api/v1/user\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_HeadscaleService_CreateUser_0(annotatedContext, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_CreateUser_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_RenameUser_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/RenameUser\", runtime.WithHTTPPathPattern(\"/api/v1/user/{old_id}/rename/{new_name}\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_HeadscaleService_RenameUser_0(annotatedContext, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_RenameUser_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodDelete, pattern_HeadscaleService_DeleteUser_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/DeleteUser\", runtime.WithHTTPPathPattern(\"/api/v1/user/{id}\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_HeadscaleService_DeleteUser_0(annotatedContext, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_DeleteUser_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodGet, pattern_HeadscaleService_ListUsers_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/ListUsers\", runtime.WithHTTPPathPattern(\"/api/v1/user\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_HeadscaleService_ListUsers_0(annotatedContext, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_ListUsers_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_CreatePreAuthKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/CreatePreAuthKey\", runtime.WithHTTPPathPattern(\"/api/v1/preauthkey\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_HeadscaleService_CreatePreAuthKey_0(annotatedContext, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_CreatePreAuthKey_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_ExpirePreAuthKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/ExpirePreAuthKey\", runtime.WithHTTPPathPattern(\"/api/v1/preauthkey/expire\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_HeadscaleService_ExpirePreAuthKey_0(annotatedContext, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_ExpirePreAuthKey_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodDelete, pattern_HeadscaleService_DeletePreAuthKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/DeletePreAuthKey\", runtime.WithHTTPPathPattern(\"/api/v1/preauthkey\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_HeadscaleService_DeletePreAuthKey_0(annotatedContext, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_DeletePreAuthKey_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodGet, pattern_HeadscaleService_ListPreAuthKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/ListPreAuthKeys\", runtime.WithHTTPPathPattern(\"/api/v1/preauthkey\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_HeadscaleService_ListPreAuthKeys_0(annotatedContext, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_ListPreAuthKeys_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_DebugCreateNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/DebugCreateNode\", runtime.WithHTTPPathPattern(\"/api/v1/debug/node\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_HeadscaleService_DebugCreateNode_0(annotatedContext, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_DebugCreateNode_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodGet, pattern_HeadscaleService_GetNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/GetNode\", runtime.WithHTTPPathPattern(\"/api/v1/node/{node_id}\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_HeadscaleService_GetNode_0(annotatedContext, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_GetNode_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_SetTags_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/SetTags\", runtime.WithHTTPPathPattern(\"/api/v1/node/{node_id}/tags\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_HeadscaleService_SetTags_0(annotatedContext, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_SetTags_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_SetApprovedRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/SetApprovedRoutes\", runtime.WithHTTPPathPattern(\"/api/v1/node/{node_id}/approve_routes\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_HeadscaleService_SetApprovedRoutes_0(annotatedContext, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_SetApprovedRoutes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_RegisterNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/RegisterNode\", runtime.WithHTTPPathPattern(\"/api/v1/node/register\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_HeadscaleService_RegisterNode_0(annotatedContext, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_RegisterNode_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodDelete, pattern_HeadscaleService_DeleteNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/DeleteNode\", runtime.WithHTTPPathPattern(\"/api/v1/node/{node_id}\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_HeadscaleService_DeleteNode_0(annotatedContext, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_DeleteNode_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_ExpireNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/ExpireNode\", runtime.WithHTTPPathPattern(\"/api/v1/node/{node_id}/expire\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_HeadscaleService_ExpireNode_0(annotatedContext, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_ExpireNode_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_RenameNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/RenameNode\", runtime.WithHTTPPathPattern(\"/api/v1/node/{node_id}/rename/{new_name}\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_HeadscaleService_RenameNode_0(annotatedContext, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_RenameNode_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodGet, pattern_HeadscaleService_ListNodes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/ListNodes\", runtime.WithHTTPPathPattern(\"/api/v1/node\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_HeadscaleService_ListNodes_0(annotatedContext, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_ListNodes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_BackfillNodeIPs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/BackfillNodeIPs\", runtime.WithHTTPPathPattern(\"/api/v1/node/backfillips\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_HeadscaleService_BackfillNodeIPs_0(annotatedContext, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_BackfillNodeIPs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_AuthRegister_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/AuthRegister\", runtime.WithHTTPPathPattern(\"/api/v1/auth/register\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_HeadscaleService_AuthRegister_0(annotatedContext, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_AuthRegister_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_AuthApprove_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/AuthApprove\", runtime.WithHTTPPathPattern(\"/api/v1/auth/approve\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_HeadscaleService_AuthApprove_0(annotatedContext, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_AuthApprove_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_AuthReject_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/AuthReject\", runtime.WithHTTPPathPattern(\"/api/v1/auth/reject\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_HeadscaleService_AuthReject_0(annotatedContext, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_AuthReject_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_CreateApiKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/CreateApiKey\", runtime.WithHTTPPathPattern(\"/api/v1/apikey\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_HeadscaleService_CreateApiKey_0(annotatedContext, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_CreateApiKey_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_ExpireApiKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/ExpireApiKey\", runtime.WithHTTPPathPattern(\"/api/v1/apikey/expire\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_HeadscaleService_ExpireApiKey_0(annotatedContext, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_ExpireApiKey_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodGet, pattern_HeadscaleService_ListApiKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/ListApiKeys\", runtime.WithHTTPPathPattern(\"/api/v1/apikey\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_HeadscaleService_ListApiKeys_0(annotatedContext, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_ListApiKeys_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodDelete, pattern_HeadscaleService_DeleteApiKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/DeleteApiKey\", runtime.WithHTTPPathPattern(\"/api/v1/apikey/{prefix}\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_HeadscaleService_DeleteApiKey_0(annotatedContext, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_DeleteApiKey_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodGet, pattern_HeadscaleService_GetPolicy_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/GetPolicy\", runtime.WithHTTPPathPattern(\"/api/v1/policy\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_HeadscaleService_GetPolicy_0(annotatedContext, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_GetPolicy_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPut, pattern_HeadscaleService_SetPolicy_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/SetPolicy\", runtime.WithHTTPPathPattern(\"/api/v1/policy\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_HeadscaleService_SetPolicy_0(annotatedContext, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_SetPolicy_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodGet, pattern_HeadscaleService_Health_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/Health\", runtime.WithHTTPPathPattern(\"/api/v1/health\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_HeadscaleService_Health_0(annotatedContext, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_Health_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\n\treturn nil\n}\n\n// RegisterHeadscaleServiceHandlerFromEndpoint is same as RegisterHeadscaleServiceHandler but\n// automatically dials to \"endpoint\" and closes the connection when \"ctx\" gets done.\nfunc RegisterHeadscaleServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {\n\tconn, err := grpc.NewClient(endpoint, opts...)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tif cerr := conn.Close(); cerr != nil {\n\t\t\t\tgrpclog.Errorf(\"Failed to close conn to %s: %v\", endpoint, cerr)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tgo func() {\n\t\t\t<-ctx.Done()\n\t\t\tif cerr := conn.Close(); cerr != nil {\n\t\t\t\tgrpclog.Errorf(\"Failed to close conn to %s: %v\", endpoint, cerr)\n\t\t\t}\n\t\t}()\n\t}()\n\treturn RegisterHeadscaleServiceHandler(ctx, mux, conn)\n}\n\n// RegisterHeadscaleServiceHandler registers the http handlers for service HeadscaleService to \"mux\".\n// The handlers forward requests to the grpc endpoint over \"conn\".\nfunc RegisterHeadscaleServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {\n\treturn RegisterHeadscaleServiceHandlerClient(ctx, mux, NewHeadscaleServiceClient(conn))\n}\n\n// RegisterHeadscaleServiceHandlerClient registers the http handlers for service HeadscaleService\n// to \"mux\". The handlers forward requests to the grpc endpoint over the given implementation of \"HeadscaleServiceClient\".\n// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in \"HeadscaleServiceClient\"\n// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in\n// \"HeadscaleServiceClient\" to call the correct interceptors. This client ignores the HTTP middlewares.\nfunc RegisterHeadscaleServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client HeadscaleServiceClient) error {\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_CreateUser_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/CreateUser\", runtime.WithHTTPPathPattern(\"/api/v1/user\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_HeadscaleService_CreateUser_0(annotatedContext, inboundMarshaler, client, req, pathParams)\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_CreateUser_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_RenameUser_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/RenameUser\", runtime.WithHTTPPathPattern(\"/api/v1/user/{old_id}/rename/{new_name}\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_HeadscaleService_RenameUser_0(annotatedContext, inboundMarshaler, client, req, pathParams)\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_RenameUser_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodDelete, pattern_HeadscaleService_DeleteUser_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/DeleteUser\", runtime.WithHTTPPathPattern(\"/api/v1/user/{id}\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_HeadscaleService_DeleteUser_0(annotatedContext, inboundMarshaler, client, req, pathParams)\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_DeleteUser_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodGet, pattern_HeadscaleService_ListUsers_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/ListUsers\", runtime.WithHTTPPathPattern(\"/api/v1/user\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_HeadscaleService_ListUsers_0(annotatedContext, inboundMarshaler, client, req, pathParams)\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_ListUsers_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_CreatePreAuthKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/CreatePreAuthKey\", runtime.WithHTTPPathPattern(\"/api/v1/preauthkey\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_HeadscaleService_CreatePreAuthKey_0(annotatedContext, inboundMarshaler, client, req, pathParams)\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_CreatePreAuthKey_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_ExpirePreAuthKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/ExpirePreAuthKey\", runtime.WithHTTPPathPattern(\"/api/v1/preauthkey/expire\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_HeadscaleService_ExpirePreAuthKey_0(annotatedContext, inboundMarshaler, client, req, pathParams)\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_ExpirePreAuthKey_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodDelete, pattern_HeadscaleService_DeletePreAuthKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/DeletePreAuthKey\", runtime.WithHTTPPathPattern(\"/api/v1/preauthkey\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_HeadscaleService_DeletePreAuthKey_0(annotatedContext, inboundMarshaler, client, req, pathParams)\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_DeletePreAuthKey_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodGet, pattern_HeadscaleService_ListPreAuthKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/ListPreAuthKeys\", runtime.WithHTTPPathPattern(\"/api/v1/preauthkey\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_HeadscaleService_ListPreAuthKeys_0(annotatedContext, inboundMarshaler, client, req, pathParams)\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_ListPreAuthKeys_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_DebugCreateNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/DebugCreateNode\", runtime.WithHTTPPathPattern(\"/api/v1/debug/node\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_HeadscaleService_DebugCreateNode_0(annotatedContext, inboundMarshaler, client, req, pathParams)\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_DebugCreateNode_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodGet, pattern_HeadscaleService_GetNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/GetNode\", runtime.WithHTTPPathPattern(\"/api/v1/node/{node_id}\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_HeadscaleService_GetNode_0(annotatedContext, inboundMarshaler, client, req, pathParams)\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_GetNode_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_SetTags_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/SetTags\", runtime.WithHTTPPathPattern(\"/api/v1/node/{node_id}/tags\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_HeadscaleService_SetTags_0(annotatedContext, inboundMarshaler, client, req, pathParams)\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_SetTags_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_SetApprovedRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/SetApprovedRoutes\", runtime.WithHTTPPathPattern(\"/api/v1/node/{node_id}/approve_routes\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_HeadscaleService_SetApprovedRoutes_0(annotatedContext, inboundMarshaler, client, req, pathParams)\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_SetApprovedRoutes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_RegisterNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/RegisterNode\", runtime.WithHTTPPathPattern(\"/api/v1/node/register\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_HeadscaleService_RegisterNode_0(annotatedContext, inboundMarshaler, client, req, pathParams)\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_RegisterNode_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodDelete, pattern_HeadscaleService_DeleteNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/DeleteNode\", runtime.WithHTTPPathPattern(\"/api/v1/node/{node_id}\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_HeadscaleService_DeleteNode_0(annotatedContext, inboundMarshaler, client, req, pathParams)\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_DeleteNode_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_ExpireNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/ExpireNode\", runtime.WithHTTPPathPattern(\"/api/v1/node/{node_id}/expire\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_HeadscaleService_ExpireNode_0(annotatedContext, inboundMarshaler, client, req, pathParams)\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_ExpireNode_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_RenameNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/RenameNode\", runtime.WithHTTPPathPattern(\"/api/v1/node/{node_id}/rename/{new_name}\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_HeadscaleService_RenameNode_0(annotatedContext, inboundMarshaler, client, req, pathParams)\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_RenameNode_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodGet, pattern_HeadscaleService_ListNodes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/ListNodes\", runtime.WithHTTPPathPattern(\"/api/v1/node\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_HeadscaleService_ListNodes_0(annotatedContext, inboundMarshaler, client, req, pathParams)\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_ListNodes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_BackfillNodeIPs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/BackfillNodeIPs\", runtime.WithHTTPPathPattern(\"/api/v1/node/backfillips\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_HeadscaleService_BackfillNodeIPs_0(annotatedContext, inboundMarshaler, client, req, pathParams)\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_BackfillNodeIPs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_AuthRegister_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/AuthRegister\", runtime.WithHTTPPathPattern(\"/api/v1/auth/register\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_HeadscaleService_AuthRegister_0(annotatedContext, inboundMarshaler, client, req, pathParams)\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_AuthRegister_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_AuthApprove_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/AuthApprove\", runtime.WithHTTPPathPattern(\"/api/v1/auth/approve\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_HeadscaleService_AuthApprove_0(annotatedContext, inboundMarshaler, client, req, pathParams)\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_AuthApprove_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_AuthReject_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/AuthReject\", runtime.WithHTTPPathPattern(\"/api/v1/auth/reject\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_HeadscaleService_AuthReject_0(annotatedContext, inboundMarshaler, client, req, pathParams)\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_AuthReject_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_CreateApiKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/CreateApiKey\", runtime.WithHTTPPathPattern(\"/api/v1/apikey\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_HeadscaleService_CreateApiKey_0(annotatedContext, inboundMarshaler, client, req, pathParams)\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_CreateApiKey_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPost, pattern_HeadscaleService_ExpireApiKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/ExpireApiKey\", runtime.WithHTTPPathPattern(\"/api/v1/apikey/expire\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_HeadscaleService_ExpireApiKey_0(annotatedContext, inboundMarshaler, client, req, pathParams)\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_ExpireApiKey_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodGet, pattern_HeadscaleService_ListApiKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/ListApiKeys\", runtime.WithHTTPPathPattern(\"/api/v1/apikey\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_HeadscaleService_ListApiKeys_0(annotatedContext, inboundMarshaler, client, req, pathParams)\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_ListApiKeys_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodDelete, pattern_HeadscaleService_DeleteApiKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/DeleteApiKey\", runtime.WithHTTPPathPattern(\"/api/v1/apikey/{prefix}\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_HeadscaleService_DeleteApiKey_0(annotatedContext, inboundMarshaler, client, req, pathParams)\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_DeleteApiKey_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodGet, pattern_HeadscaleService_GetPolicy_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/GetPolicy\", runtime.WithHTTPPathPattern(\"/api/v1/policy\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_HeadscaleService_GetPolicy_0(annotatedContext, inboundMarshaler, client, req, pathParams)\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_GetPolicy_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodPut, pattern_HeadscaleService_SetPolicy_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/SetPolicy\", runtime.WithHTTPPathPattern(\"/api/v1/policy\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_HeadscaleService_SetPolicy_0(annotatedContext, inboundMarshaler, client, req, pathParams)\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_SetPolicy_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\tmux.Handle(http.MethodGet, pattern_HeadscaleService_Health_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\tannotatedContext, err := runtime.AnnotateContext(ctx, mux, req, \"/headscale.v1.HeadscaleService/Health\", runtime.WithHTTPPathPattern(\"/api/v1/health\"))\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_HeadscaleService_Health_0(annotatedContext, inboundMarshaler, client, req, pathParams)\n\t\tannotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tforward_HeadscaleService_Health_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\t})\n\treturn nil\n}\n\nvar (\n\tpattern_HeadscaleService_CreateUser_0        = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{\"api\", \"v1\", \"user\"}, \"\"))\n\tpattern_HeadscaleService_RenameUser_0        = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{\"api\", \"v1\", \"user\", \"old_id\", \"rename\", \"new_name\"}, \"\"))\n\tpattern_HeadscaleService_DeleteUser_0        = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{\"api\", \"v1\", \"user\", \"id\"}, \"\"))\n\tpattern_HeadscaleService_ListUsers_0         = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{\"api\", \"v1\", \"user\"}, \"\"))\n\tpattern_HeadscaleService_CreatePreAuthKey_0  = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{\"api\", \"v1\", \"preauthkey\"}, \"\"))\n\tpattern_HeadscaleService_ExpirePreAuthKey_0  = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{\"api\", \"v1\", \"preauthkey\", \"expire\"}, \"\"))\n\tpattern_HeadscaleService_DeletePreAuthKey_0  = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{\"api\", \"v1\", \"preauthkey\"}, \"\"))\n\tpattern_HeadscaleService_ListPreAuthKeys_0   = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{\"api\", \"v1\", \"preauthkey\"}, \"\"))\n\tpattern_HeadscaleService_DebugCreateNode_0   = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{\"api\", \"v1\", \"debug\", \"node\"}, \"\"))\n\tpattern_HeadscaleService_GetNode_0           = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{\"api\", \"v1\", \"node\", \"node_id\"}, \"\"))\n\tpattern_HeadscaleService_SetTags_0           = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{\"api\", \"v1\", \"node\", \"node_id\", \"tags\"}, \"\"))\n\tpattern_HeadscaleService_SetApprovedRoutes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{\"api\", \"v1\", \"node\", \"node_id\", \"approve_routes\"}, \"\"))\n\tpattern_HeadscaleService_RegisterNode_0      = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{\"api\", \"v1\", \"node\", \"register\"}, \"\"))\n\tpattern_HeadscaleService_DeleteNode_0        = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{\"api\", \"v1\", \"node\", \"node_id\"}, \"\"))\n\tpattern_HeadscaleService_ExpireNode_0        = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{\"api\", \"v1\", \"node\", \"node_id\", \"expire\"}, \"\"))\n\tpattern_HeadscaleService_RenameNode_0        = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{\"api\", \"v1\", \"node\", \"node_id\", \"rename\", \"new_name\"}, \"\"))\n\tpattern_HeadscaleService_ListNodes_0         = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{\"api\", \"v1\", \"node\"}, \"\"))\n\tpattern_HeadscaleService_BackfillNodeIPs_0   = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{\"api\", \"v1\", \"node\", \"backfillips\"}, \"\"))\n\tpattern_HeadscaleService_AuthRegister_0      = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{\"api\", \"v1\", \"auth\", \"register\"}, \"\"))\n\tpattern_HeadscaleService_AuthApprove_0       = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{\"api\", \"v1\", \"auth\", \"approve\"}, \"\"))\n\tpattern_HeadscaleService_AuthReject_0        = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{\"api\", \"v1\", \"auth\", \"reject\"}, \"\"))\n\tpattern_HeadscaleService_CreateApiKey_0      = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{\"api\", \"v1\", \"apikey\"}, \"\"))\n\tpattern_HeadscaleService_ExpireApiKey_0      = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{\"api\", \"v1\", \"apikey\", \"expire\"}, \"\"))\n\tpattern_HeadscaleService_ListApiKeys_0       = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{\"api\", \"v1\", \"apikey\"}, \"\"))\n\tpattern_HeadscaleService_DeleteApiKey_0      = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{\"api\", \"v1\", \"apikey\", \"prefix\"}, \"\"))\n\tpattern_HeadscaleService_GetPolicy_0         = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{\"api\", \"v1\", \"policy\"}, \"\"))\n\tpattern_HeadscaleService_SetPolicy_0         = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{\"api\", \"v1\", \"policy\"}, \"\"))\n\tpattern_HeadscaleService_Health_0            = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{\"api\", \"v1\", \"health\"}, \"\"))\n)\n\nvar (\n\tforward_HeadscaleService_CreateUser_0        = runtime.ForwardResponseMessage\n\tforward_HeadscaleService_RenameUser_0        = runtime.ForwardResponseMessage\n\tforward_HeadscaleService_DeleteUser_0        = runtime.ForwardResponseMessage\n\tforward_HeadscaleService_ListUsers_0         = runtime.ForwardResponseMessage\n\tforward_HeadscaleService_CreatePreAuthKey_0  = runtime.ForwardResponseMessage\n\tforward_HeadscaleService_ExpirePreAuthKey_0  = runtime.ForwardResponseMessage\n\tforward_HeadscaleService_DeletePreAuthKey_0  = runtime.ForwardResponseMessage\n\tforward_HeadscaleService_ListPreAuthKeys_0   = runtime.ForwardResponseMessage\n\tforward_HeadscaleService_DebugCreateNode_0   = runtime.ForwardResponseMessage\n\tforward_HeadscaleService_GetNode_0           = runtime.ForwardResponseMessage\n\tforward_HeadscaleService_SetTags_0           = runtime.ForwardResponseMessage\n\tforward_HeadscaleService_SetApprovedRoutes_0 = runtime.ForwardResponseMessage\n\tforward_HeadscaleService_RegisterNode_0      = runtime.ForwardResponseMessage\n\tforward_HeadscaleService_DeleteNode_0        = runtime.ForwardResponseMessage\n\tforward_HeadscaleService_ExpireNode_0        = runtime.ForwardResponseMessage\n\tforward_HeadscaleService_RenameNode_0        = runtime.ForwardResponseMessage\n\tforward_HeadscaleService_ListNodes_0         = runtime.ForwardResponseMessage\n\tforward_HeadscaleService_BackfillNodeIPs_0   = runtime.ForwardResponseMessage\n\tforward_HeadscaleService_AuthRegister_0      = runtime.ForwardResponseMessage\n\tforward_HeadscaleService_AuthApprove_0       = runtime.ForwardResponseMessage\n\tforward_HeadscaleService_AuthReject_0        = runtime.ForwardResponseMessage\n\tforward_HeadscaleService_CreateApiKey_0      = runtime.ForwardResponseMessage\n\tforward_HeadscaleService_ExpireApiKey_0      = runtime.ForwardResponseMessage\n\tforward_HeadscaleService_ListApiKeys_0       = runtime.ForwardResponseMessage\n\tforward_HeadscaleService_DeleteApiKey_0      = runtime.ForwardResponseMessage\n\tforward_HeadscaleService_GetPolicy_0         = runtime.ForwardResponseMessage\n\tforward_HeadscaleService_SetPolicy_0         = runtime.ForwardResponseMessage\n\tforward_HeadscaleService_Health_0            = runtime.ForwardResponseMessage\n)\n"
  },
  {
    "path": "gen/go/headscale/v1/headscale_grpc.pb.go",
    "content": "// Code generated by protoc-gen-go-grpc. DO NOT EDIT.\n// versions:\n// - protoc-gen-go-grpc v1.6.0\n// - protoc             (unknown)\n// source: headscale/v1/headscale.proto\n\npackage v1\n\nimport (\n\tcontext \"context\"\n\tgrpc \"google.golang.org/grpc\"\n\tcodes \"google.golang.org/grpc/codes\"\n\tstatus \"google.golang.org/grpc/status\"\n)\n\n// This is a compile-time assertion to ensure that this generated file\n// is compatible with the grpc package it is being compiled against.\n// Requires gRPC-Go v1.64.0 or later.\nconst _ = grpc.SupportPackageIsVersion9\n\nconst (\n\tHeadscaleService_CreateUser_FullMethodName        = \"/headscale.v1.HeadscaleService/CreateUser\"\n\tHeadscaleService_RenameUser_FullMethodName        = \"/headscale.v1.HeadscaleService/RenameUser\"\n\tHeadscaleService_DeleteUser_FullMethodName        = \"/headscale.v1.HeadscaleService/DeleteUser\"\n\tHeadscaleService_ListUsers_FullMethodName         = \"/headscale.v1.HeadscaleService/ListUsers\"\n\tHeadscaleService_CreatePreAuthKey_FullMethodName  = \"/headscale.v1.HeadscaleService/CreatePreAuthKey\"\n\tHeadscaleService_ExpirePreAuthKey_FullMethodName  = \"/headscale.v1.HeadscaleService/ExpirePreAuthKey\"\n\tHeadscaleService_DeletePreAuthKey_FullMethodName  = \"/headscale.v1.HeadscaleService/DeletePreAuthKey\"\n\tHeadscaleService_ListPreAuthKeys_FullMethodName   = \"/headscale.v1.HeadscaleService/ListPreAuthKeys\"\n\tHeadscaleService_DebugCreateNode_FullMethodName   = \"/headscale.v1.HeadscaleService/DebugCreateNode\"\n\tHeadscaleService_GetNode_FullMethodName           = \"/headscale.v1.HeadscaleService/GetNode\"\n\tHeadscaleService_SetTags_FullMethodName           = \"/headscale.v1.HeadscaleService/SetTags\"\n\tHeadscaleService_SetApprovedRoutes_FullMethodName = \"/headscale.v1.HeadscaleService/SetApprovedRoutes\"\n\tHeadscaleService_RegisterNode_FullMethodName      = \"/headscale.v1.HeadscaleService/RegisterNode\"\n\tHeadscaleService_DeleteNode_FullMethodName        = \"/headscale.v1.HeadscaleService/DeleteNode\"\n\tHeadscaleService_ExpireNode_FullMethodName        = \"/headscale.v1.HeadscaleService/ExpireNode\"\n\tHeadscaleService_RenameNode_FullMethodName        = \"/headscale.v1.HeadscaleService/RenameNode\"\n\tHeadscaleService_ListNodes_FullMethodName         = \"/headscale.v1.HeadscaleService/ListNodes\"\n\tHeadscaleService_BackfillNodeIPs_FullMethodName   = \"/headscale.v1.HeadscaleService/BackfillNodeIPs\"\n\tHeadscaleService_AuthRegister_FullMethodName      = \"/headscale.v1.HeadscaleService/AuthRegister\"\n\tHeadscaleService_AuthApprove_FullMethodName       = \"/headscale.v1.HeadscaleService/AuthApprove\"\n\tHeadscaleService_AuthReject_FullMethodName        = \"/headscale.v1.HeadscaleService/AuthReject\"\n\tHeadscaleService_CreateApiKey_FullMethodName      = \"/headscale.v1.HeadscaleService/CreateApiKey\"\n\tHeadscaleService_ExpireApiKey_FullMethodName      = \"/headscale.v1.HeadscaleService/ExpireApiKey\"\n\tHeadscaleService_ListApiKeys_FullMethodName       = \"/headscale.v1.HeadscaleService/ListApiKeys\"\n\tHeadscaleService_DeleteApiKey_FullMethodName      = \"/headscale.v1.HeadscaleService/DeleteApiKey\"\n\tHeadscaleService_GetPolicy_FullMethodName         = \"/headscale.v1.HeadscaleService/GetPolicy\"\n\tHeadscaleService_SetPolicy_FullMethodName         = \"/headscale.v1.HeadscaleService/SetPolicy\"\n\tHeadscaleService_Health_FullMethodName            = \"/headscale.v1.HeadscaleService/Health\"\n)\n\n// HeadscaleServiceClient is the client API for HeadscaleService service.\n//\n// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.\ntype HeadscaleServiceClient interface {\n\t// --- User start ---\n\tCreateUser(ctx context.Context, in *CreateUserRequest, opts ...grpc.CallOption) (*CreateUserResponse, error)\n\tRenameUser(ctx context.Context, in *RenameUserRequest, opts ...grpc.CallOption) (*RenameUserResponse, error)\n\tDeleteUser(ctx context.Context, in *DeleteUserRequest, opts ...grpc.CallOption) (*DeleteUserResponse, error)\n\tListUsers(ctx context.Context, in *ListUsersRequest, opts ...grpc.CallOption) (*ListUsersResponse, error)\n\t// --- PreAuthKeys start ---\n\tCreatePreAuthKey(ctx context.Context, in *CreatePreAuthKeyRequest, opts ...grpc.CallOption) (*CreatePreAuthKeyResponse, error)\n\tExpirePreAuthKey(ctx context.Context, in *ExpirePreAuthKeyRequest, opts ...grpc.CallOption) (*ExpirePreAuthKeyResponse, error)\n\tDeletePreAuthKey(ctx context.Context, in *DeletePreAuthKeyRequest, opts ...grpc.CallOption) (*DeletePreAuthKeyResponse, error)\n\tListPreAuthKeys(ctx context.Context, in *ListPreAuthKeysRequest, opts ...grpc.CallOption) (*ListPreAuthKeysResponse, error)\n\t// --- Node start ---\n\tDebugCreateNode(ctx context.Context, in *DebugCreateNodeRequest, opts ...grpc.CallOption) (*DebugCreateNodeResponse, error)\n\tGetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error)\n\tSetTags(ctx context.Context, in *SetTagsRequest, opts ...grpc.CallOption) (*SetTagsResponse, error)\n\tSetApprovedRoutes(ctx context.Context, in *SetApprovedRoutesRequest, opts ...grpc.CallOption) (*SetApprovedRoutesResponse, error)\n\tRegisterNode(ctx context.Context, in *RegisterNodeRequest, opts ...grpc.CallOption) (*RegisterNodeResponse, error)\n\tDeleteNode(ctx context.Context, in *DeleteNodeRequest, opts ...grpc.CallOption) (*DeleteNodeResponse, error)\n\tExpireNode(ctx context.Context, in *ExpireNodeRequest, opts ...grpc.CallOption) (*ExpireNodeResponse, error)\n\tRenameNode(ctx context.Context, in *RenameNodeRequest, opts ...grpc.CallOption) (*RenameNodeResponse, error)\n\tListNodes(ctx context.Context, in *ListNodesRequest, opts ...grpc.CallOption) (*ListNodesResponse, error)\n\tBackfillNodeIPs(ctx context.Context, in *BackfillNodeIPsRequest, opts ...grpc.CallOption) (*BackfillNodeIPsResponse, error)\n\t// --- Auth start ---\n\tAuthRegister(ctx context.Context, in *AuthRegisterRequest, opts ...grpc.CallOption) (*AuthRegisterResponse, error)\n\tAuthApprove(ctx context.Context, in *AuthApproveRequest, opts ...grpc.CallOption) (*AuthApproveResponse, error)\n\tAuthReject(ctx context.Context, in *AuthRejectRequest, opts ...grpc.CallOption) (*AuthRejectResponse, error)\n\t// --- ApiKeys start ---\n\tCreateApiKey(ctx context.Context, in *CreateApiKeyRequest, opts ...grpc.CallOption) (*CreateApiKeyResponse, error)\n\tExpireApiKey(ctx context.Context, in *ExpireApiKeyRequest, opts ...grpc.CallOption) (*ExpireApiKeyResponse, error)\n\tListApiKeys(ctx context.Context, in *ListApiKeysRequest, opts ...grpc.CallOption) (*ListApiKeysResponse, error)\n\tDeleteApiKey(ctx context.Context, in *DeleteApiKeyRequest, opts ...grpc.CallOption) (*DeleteApiKeyResponse, error)\n\t// --- Policy start ---\n\tGetPolicy(ctx context.Context, in *GetPolicyRequest, opts ...grpc.CallOption) (*GetPolicyResponse, error)\n\tSetPolicy(ctx context.Context, in *SetPolicyRequest, opts ...grpc.CallOption) (*SetPolicyResponse, error)\n\t// --- Health start ---\n\tHealth(ctx context.Context, in *HealthRequest, opts ...grpc.CallOption) (*HealthResponse, error)\n}\n\ntype headscaleServiceClient struct {\n\tcc grpc.ClientConnInterface\n}\n\nfunc NewHeadscaleServiceClient(cc grpc.ClientConnInterface) HeadscaleServiceClient {\n\treturn &headscaleServiceClient{cc}\n}\n\nfunc (c *headscaleServiceClient) CreateUser(ctx context.Context, in *CreateUserRequest, opts ...grpc.CallOption) (*CreateUserResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(CreateUserResponse)\n\terr := c.cc.Invoke(ctx, HeadscaleService_CreateUser_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *headscaleServiceClient) RenameUser(ctx context.Context, in *RenameUserRequest, opts ...grpc.CallOption) (*RenameUserResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(RenameUserResponse)\n\terr := c.cc.Invoke(ctx, HeadscaleService_RenameUser_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *headscaleServiceClient) DeleteUser(ctx context.Context, in *DeleteUserRequest, opts ...grpc.CallOption) (*DeleteUserResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(DeleteUserResponse)\n\terr := c.cc.Invoke(ctx, HeadscaleService_DeleteUser_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *headscaleServiceClient) ListUsers(ctx context.Context, in *ListUsersRequest, opts ...grpc.CallOption) (*ListUsersResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(ListUsersResponse)\n\terr := c.cc.Invoke(ctx, HeadscaleService_ListUsers_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *headscaleServiceClient) CreatePreAuthKey(ctx context.Context, in *CreatePreAuthKeyRequest, opts ...grpc.CallOption) (*CreatePreAuthKeyResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(CreatePreAuthKeyResponse)\n\terr := c.cc.Invoke(ctx, HeadscaleService_CreatePreAuthKey_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *headscaleServiceClient) ExpirePreAuthKey(ctx context.Context, in *ExpirePreAuthKeyRequest, opts ...grpc.CallOption) (*ExpirePreAuthKeyResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(ExpirePreAuthKeyResponse)\n\terr := c.cc.Invoke(ctx, HeadscaleService_ExpirePreAuthKey_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *headscaleServiceClient) DeletePreAuthKey(ctx context.Context, in *DeletePreAuthKeyRequest, opts ...grpc.CallOption) (*DeletePreAuthKeyResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(DeletePreAuthKeyResponse)\n\terr := c.cc.Invoke(ctx, HeadscaleService_DeletePreAuthKey_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *headscaleServiceClient) ListPreAuthKeys(ctx context.Context, in *ListPreAuthKeysRequest, opts ...grpc.CallOption) (*ListPreAuthKeysResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(ListPreAuthKeysResponse)\n\terr := c.cc.Invoke(ctx, HeadscaleService_ListPreAuthKeys_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *headscaleServiceClient) DebugCreateNode(ctx context.Context, in *DebugCreateNodeRequest, opts ...grpc.CallOption) (*DebugCreateNodeResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(DebugCreateNodeResponse)\n\terr := c.cc.Invoke(ctx, HeadscaleService_DebugCreateNode_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *headscaleServiceClient) GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(GetNodeResponse)\n\terr := c.cc.Invoke(ctx, HeadscaleService_GetNode_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *headscaleServiceClient) SetTags(ctx context.Context, in *SetTagsRequest, opts ...grpc.CallOption) (*SetTagsResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(SetTagsResponse)\n\terr := c.cc.Invoke(ctx, HeadscaleService_SetTags_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *headscaleServiceClient) SetApprovedRoutes(ctx context.Context, in *SetApprovedRoutesRequest, opts ...grpc.CallOption) (*SetApprovedRoutesResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(SetApprovedRoutesResponse)\n\terr := c.cc.Invoke(ctx, HeadscaleService_SetApprovedRoutes_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *headscaleServiceClient) RegisterNode(ctx context.Context, in *RegisterNodeRequest, opts ...grpc.CallOption) (*RegisterNodeResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(RegisterNodeResponse)\n\terr := c.cc.Invoke(ctx, HeadscaleService_RegisterNode_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *headscaleServiceClient) DeleteNode(ctx context.Context, in *DeleteNodeRequest, opts ...grpc.CallOption) (*DeleteNodeResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(DeleteNodeResponse)\n\terr := c.cc.Invoke(ctx, HeadscaleService_DeleteNode_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *headscaleServiceClient) ExpireNode(ctx context.Context, in *ExpireNodeRequest, opts ...grpc.CallOption) (*ExpireNodeResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(ExpireNodeResponse)\n\terr := c.cc.Invoke(ctx, HeadscaleService_ExpireNode_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *headscaleServiceClient) RenameNode(ctx context.Context, in *RenameNodeRequest, opts ...grpc.CallOption) (*RenameNodeResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(RenameNodeResponse)\n\terr := c.cc.Invoke(ctx, HeadscaleService_RenameNode_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *headscaleServiceClient) ListNodes(ctx context.Context, in *ListNodesRequest, opts ...grpc.CallOption) (*ListNodesResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(ListNodesResponse)\n\terr := c.cc.Invoke(ctx, HeadscaleService_ListNodes_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *headscaleServiceClient) BackfillNodeIPs(ctx context.Context, in *BackfillNodeIPsRequest, opts ...grpc.CallOption) (*BackfillNodeIPsResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(BackfillNodeIPsResponse)\n\terr := c.cc.Invoke(ctx, HeadscaleService_BackfillNodeIPs_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *headscaleServiceClient) AuthRegister(ctx context.Context, in *AuthRegisterRequest, opts ...grpc.CallOption) (*AuthRegisterResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(AuthRegisterResponse)\n\terr := c.cc.Invoke(ctx, HeadscaleService_AuthRegister_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *headscaleServiceClient) AuthApprove(ctx context.Context, in *AuthApproveRequest, opts ...grpc.CallOption) (*AuthApproveResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(AuthApproveResponse)\n\terr := c.cc.Invoke(ctx, HeadscaleService_AuthApprove_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *headscaleServiceClient) AuthReject(ctx context.Context, in *AuthRejectRequest, opts ...grpc.CallOption) (*AuthRejectResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(AuthRejectResponse)\n\terr := c.cc.Invoke(ctx, HeadscaleService_AuthReject_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *headscaleServiceClient) CreateApiKey(ctx context.Context, in *CreateApiKeyRequest, opts ...grpc.CallOption) (*CreateApiKeyResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(CreateApiKeyResponse)\n\terr := c.cc.Invoke(ctx, HeadscaleService_CreateApiKey_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *headscaleServiceClient) ExpireApiKey(ctx context.Context, in *ExpireApiKeyRequest, opts ...grpc.CallOption) (*ExpireApiKeyResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(ExpireApiKeyResponse)\n\terr := c.cc.Invoke(ctx, HeadscaleService_ExpireApiKey_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *headscaleServiceClient) ListApiKeys(ctx context.Context, in *ListApiKeysRequest, opts ...grpc.CallOption) (*ListApiKeysResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(ListApiKeysResponse)\n\terr := c.cc.Invoke(ctx, HeadscaleService_ListApiKeys_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *headscaleServiceClient) DeleteApiKey(ctx context.Context, in *DeleteApiKeyRequest, opts ...grpc.CallOption) (*DeleteApiKeyResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(DeleteApiKeyResponse)\n\terr := c.cc.Invoke(ctx, HeadscaleService_DeleteApiKey_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *headscaleServiceClient) GetPolicy(ctx context.Context, in *GetPolicyRequest, opts ...grpc.CallOption) (*GetPolicyResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(GetPolicyResponse)\n\terr := c.cc.Invoke(ctx, HeadscaleService_GetPolicy_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *headscaleServiceClient) SetPolicy(ctx context.Context, in *SetPolicyRequest, opts ...grpc.CallOption) (*SetPolicyResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(SetPolicyResponse)\n\terr := c.cc.Invoke(ctx, HeadscaleService_SetPolicy_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *headscaleServiceClient) Health(ctx context.Context, in *HealthRequest, opts ...grpc.CallOption) (*HealthResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(HealthResponse)\n\terr := c.cc.Invoke(ctx, HeadscaleService_Health_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\n// HeadscaleServiceServer is the server API for HeadscaleService service.\n// All implementations must embed UnimplementedHeadscaleServiceServer\n// for forward compatibility.\ntype HeadscaleServiceServer interface {\n\t// --- User start ---\n\tCreateUser(context.Context, *CreateUserRequest) (*CreateUserResponse, error)\n\tRenameUser(context.Context, *RenameUserRequest) (*RenameUserResponse, error)\n\tDeleteUser(context.Context, *DeleteUserRequest) (*DeleteUserResponse, error)\n\tListUsers(context.Context, *ListUsersRequest) (*ListUsersResponse, error)\n\t// --- PreAuthKeys start ---\n\tCreatePreAuthKey(context.Context, *CreatePreAuthKeyRequest) (*CreatePreAuthKeyResponse, error)\n\tExpirePreAuthKey(context.Context, *ExpirePreAuthKeyRequest) (*ExpirePreAuthKeyResponse, error)\n\tDeletePreAuthKey(context.Context, *DeletePreAuthKeyRequest) (*DeletePreAuthKeyResponse, error)\n\tListPreAuthKeys(context.Context, *ListPreAuthKeysRequest) (*ListPreAuthKeysResponse, error)\n\t// --- Node start ---\n\tDebugCreateNode(context.Context, *DebugCreateNodeRequest) (*DebugCreateNodeResponse, error)\n\tGetNode(context.Context, *GetNodeRequest) (*GetNodeResponse, error)\n\tSetTags(context.Context, *SetTagsRequest) (*SetTagsResponse, error)\n\tSetApprovedRoutes(context.Context, *SetApprovedRoutesRequest) (*SetApprovedRoutesResponse, error)\n\tRegisterNode(context.Context, *RegisterNodeRequest) (*RegisterNodeResponse, error)\n\tDeleteNode(context.Context, *DeleteNodeRequest) (*DeleteNodeResponse, error)\n\tExpireNode(context.Context, *ExpireNodeRequest) (*ExpireNodeResponse, error)\n\tRenameNode(context.Context, *RenameNodeRequest) (*RenameNodeResponse, error)\n\tListNodes(context.Context, *ListNodesRequest) (*ListNodesResponse, error)\n\tBackfillNodeIPs(context.Context, *BackfillNodeIPsRequest) (*BackfillNodeIPsResponse, error)\n\t// --- Auth start ---\n\tAuthRegister(context.Context, *AuthRegisterRequest) (*AuthRegisterResponse, error)\n\tAuthApprove(context.Context, *AuthApproveRequest) (*AuthApproveResponse, error)\n\tAuthReject(context.Context, *AuthRejectRequest) (*AuthRejectResponse, error)\n\t// --- ApiKeys start ---\n\tCreateApiKey(context.Context, *CreateApiKeyRequest) (*CreateApiKeyResponse, error)\n\tExpireApiKey(context.Context, *ExpireApiKeyRequest) (*ExpireApiKeyResponse, error)\n\tListApiKeys(context.Context, *ListApiKeysRequest) (*ListApiKeysResponse, error)\n\tDeleteApiKey(context.Context, *DeleteApiKeyRequest) (*DeleteApiKeyResponse, error)\n\t// --- Policy start ---\n\tGetPolicy(context.Context, *GetPolicyRequest) (*GetPolicyResponse, error)\n\tSetPolicy(context.Context, *SetPolicyRequest) (*SetPolicyResponse, error)\n\t// --- Health start ---\n\tHealth(context.Context, *HealthRequest) (*HealthResponse, error)\n\tmustEmbedUnimplementedHeadscaleServiceServer()\n}\n\n// UnimplementedHeadscaleServiceServer must be embedded to have\n// forward compatible implementations.\n//\n// NOTE: this should be embedded by value instead of pointer to avoid a nil\n// pointer dereference when methods are called.\ntype UnimplementedHeadscaleServiceServer struct{}\n\nfunc (UnimplementedHeadscaleServiceServer) CreateUser(context.Context, *CreateUserRequest) (*CreateUserResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method CreateUser not implemented\")\n}\nfunc (UnimplementedHeadscaleServiceServer) RenameUser(context.Context, *RenameUserRequest) (*RenameUserResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method RenameUser not implemented\")\n}\nfunc (UnimplementedHeadscaleServiceServer) DeleteUser(context.Context, *DeleteUserRequest) (*DeleteUserResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method DeleteUser not implemented\")\n}\nfunc (UnimplementedHeadscaleServiceServer) ListUsers(context.Context, *ListUsersRequest) (*ListUsersResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method ListUsers not implemented\")\n}\nfunc (UnimplementedHeadscaleServiceServer) CreatePreAuthKey(context.Context, *CreatePreAuthKeyRequest) (*CreatePreAuthKeyResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method CreatePreAuthKey not implemented\")\n}\nfunc (UnimplementedHeadscaleServiceServer) ExpirePreAuthKey(context.Context, *ExpirePreAuthKeyRequest) (*ExpirePreAuthKeyResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method ExpirePreAuthKey not implemented\")\n}\nfunc (UnimplementedHeadscaleServiceServer) DeletePreAuthKey(context.Context, *DeletePreAuthKeyRequest) (*DeletePreAuthKeyResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method DeletePreAuthKey not implemented\")\n}\nfunc (UnimplementedHeadscaleServiceServer) ListPreAuthKeys(context.Context, *ListPreAuthKeysRequest) (*ListPreAuthKeysResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method ListPreAuthKeys not implemented\")\n}\nfunc (UnimplementedHeadscaleServiceServer) DebugCreateNode(context.Context, *DebugCreateNodeRequest) (*DebugCreateNodeResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method DebugCreateNode not implemented\")\n}\nfunc (UnimplementedHeadscaleServiceServer) GetNode(context.Context, *GetNodeRequest) (*GetNodeResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method GetNode not implemented\")\n}\nfunc (UnimplementedHeadscaleServiceServer) SetTags(context.Context, *SetTagsRequest) (*SetTagsResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method SetTags not implemented\")\n}\nfunc (UnimplementedHeadscaleServiceServer) SetApprovedRoutes(context.Context, *SetApprovedRoutesRequest) (*SetApprovedRoutesResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method SetApprovedRoutes not implemented\")\n}\nfunc (UnimplementedHeadscaleServiceServer) RegisterNode(context.Context, *RegisterNodeRequest) (*RegisterNodeResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method RegisterNode not implemented\")\n}\nfunc (UnimplementedHeadscaleServiceServer) DeleteNode(context.Context, *DeleteNodeRequest) (*DeleteNodeResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method DeleteNode not implemented\")\n}\nfunc (UnimplementedHeadscaleServiceServer) ExpireNode(context.Context, *ExpireNodeRequest) (*ExpireNodeResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method ExpireNode not implemented\")\n}\nfunc (UnimplementedHeadscaleServiceServer) RenameNode(context.Context, *RenameNodeRequest) (*RenameNodeResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method RenameNode not implemented\")\n}\nfunc (UnimplementedHeadscaleServiceServer) ListNodes(context.Context, *ListNodesRequest) (*ListNodesResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method ListNodes not implemented\")\n}\nfunc (UnimplementedHeadscaleServiceServer) BackfillNodeIPs(context.Context, *BackfillNodeIPsRequest) (*BackfillNodeIPsResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method BackfillNodeIPs not implemented\")\n}\nfunc (UnimplementedHeadscaleServiceServer) AuthRegister(context.Context, *AuthRegisterRequest) (*AuthRegisterResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method AuthRegister not implemented\")\n}\nfunc (UnimplementedHeadscaleServiceServer) AuthApprove(context.Context, *AuthApproveRequest) (*AuthApproveResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method AuthApprove not implemented\")\n}\nfunc (UnimplementedHeadscaleServiceServer) AuthReject(context.Context, *AuthRejectRequest) (*AuthRejectResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method AuthReject not implemented\")\n}\nfunc (UnimplementedHeadscaleServiceServer) CreateApiKey(context.Context, *CreateApiKeyRequest) (*CreateApiKeyResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method CreateApiKey not implemented\")\n}\nfunc (UnimplementedHeadscaleServiceServer) ExpireApiKey(context.Context, *ExpireApiKeyRequest) (*ExpireApiKeyResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method ExpireApiKey not implemented\")\n}\nfunc (UnimplementedHeadscaleServiceServer) ListApiKeys(context.Context, *ListApiKeysRequest) (*ListApiKeysResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method ListApiKeys not implemented\")\n}\nfunc (UnimplementedHeadscaleServiceServer) DeleteApiKey(context.Context, *DeleteApiKeyRequest) (*DeleteApiKeyResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method DeleteApiKey not implemented\")\n}\nfunc (UnimplementedHeadscaleServiceServer) GetPolicy(context.Context, *GetPolicyRequest) (*GetPolicyResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method GetPolicy not implemented\")\n}\nfunc (UnimplementedHeadscaleServiceServer) SetPolicy(context.Context, *SetPolicyRequest) (*SetPolicyResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method SetPolicy not implemented\")\n}\nfunc (UnimplementedHeadscaleServiceServer) Health(context.Context, *HealthRequest) (*HealthResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method Health not implemented\")\n}\nfunc (UnimplementedHeadscaleServiceServer) mustEmbedUnimplementedHeadscaleServiceServer() {}\nfunc (UnimplementedHeadscaleServiceServer) testEmbeddedByValue()                          {}\n\n// UnsafeHeadscaleServiceServer may be embedded to opt out of forward compatibility for this service.\n// Use of this interface is not recommended, as added methods to HeadscaleServiceServer will\n// result in compilation errors.\ntype UnsafeHeadscaleServiceServer interface {\n\tmustEmbedUnimplementedHeadscaleServiceServer()\n}\n\nfunc RegisterHeadscaleServiceServer(s grpc.ServiceRegistrar, srv HeadscaleServiceServer) {\n\t// If the following call panics, it indicates UnimplementedHeadscaleServiceServer was\n\t// embedded by pointer and is nil.  This will cause panics if an\n\t// unimplemented method is ever invoked, so we test this at initialization\n\t// time to prevent it from happening at runtime later due to I/O.\n\tif t, ok := srv.(interface{ testEmbeddedByValue() }); ok {\n\t\tt.testEmbeddedByValue()\n\t}\n\ts.RegisterService(&HeadscaleService_ServiceDesc, srv)\n}\n\nfunc _HeadscaleService_CreateUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(CreateUserRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(HeadscaleServiceServer).CreateUser(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: HeadscaleService_CreateUser_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(HeadscaleServiceServer).CreateUser(ctx, req.(*CreateUserRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _HeadscaleService_RenameUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(RenameUserRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(HeadscaleServiceServer).RenameUser(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: HeadscaleService_RenameUser_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(HeadscaleServiceServer).RenameUser(ctx, req.(*RenameUserRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _HeadscaleService_DeleteUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(DeleteUserRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(HeadscaleServiceServer).DeleteUser(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: HeadscaleService_DeleteUser_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(HeadscaleServiceServer).DeleteUser(ctx, req.(*DeleteUserRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _HeadscaleService_ListUsers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(ListUsersRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(HeadscaleServiceServer).ListUsers(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: HeadscaleService_ListUsers_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(HeadscaleServiceServer).ListUsers(ctx, req.(*ListUsersRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _HeadscaleService_CreatePreAuthKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(CreatePreAuthKeyRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(HeadscaleServiceServer).CreatePreAuthKey(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: HeadscaleService_CreatePreAuthKey_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(HeadscaleServiceServer).CreatePreAuthKey(ctx, req.(*CreatePreAuthKeyRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _HeadscaleService_ExpirePreAuthKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(ExpirePreAuthKeyRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(HeadscaleServiceServer).ExpirePreAuthKey(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: HeadscaleService_ExpirePreAuthKey_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(HeadscaleServiceServer).ExpirePreAuthKey(ctx, req.(*ExpirePreAuthKeyRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _HeadscaleService_DeletePreAuthKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(DeletePreAuthKeyRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(HeadscaleServiceServer).DeletePreAuthKey(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: HeadscaleService_DeletePreAuthKey_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(HeadscaleServiceServer).DeletePreAuthKey(ctx, req.(*DeletePreAuthKeyRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _HeadscaleService_ListPreAuthKeys_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(ListPreAuthKeysRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(HeadscaleServiceServer).ListPreAuthKeys(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: HeadscaleService_ListPreAuthKeys_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(HeadscaleServiceServer).ListPreAuthKeys(ctx, req.(*ListPreAuthKeysRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _HeadscaleService_DebugCreateNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(DebugCreateNodeRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(HeadscaleServiceServer).DebugCreateNode(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: HeadscaleService_DebugCreateNode_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(HeadscaleServiceServer).DebugCreateNode(ctx, req.(*DebugCreateNodeRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _HeadscaleService_GetNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(GetNodeRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(HeadscaleServiceServer).GetNode(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: HeadscaleService_GetNode_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(HeadscaleServiceServer).GetNode(ctx, req.(*GetNodeRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _HeadscaleService_SetTags_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(SetTagsRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(HeadscaleServiceServer).SetTags(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: HeadscaleService_SetTags_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(HeadscaleServiceServer).SetTags(ctx, req.(*SetTagsRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _HeadscaleService_SetApprovedRoutes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(SetApprovedRoutesRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(HeadscaleServiceServer).SetApprovedRoutes(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: HeadscaleService_SetApprovedRoutes_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(HeadscaleServiceServer).SetApprovedRoutes(ctx, req.(*SetApprovedRoutesRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _HeadscaleService_RegisterNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(RegisterNodeRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(HeadscaleServiceServer).RegisterNode(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: HeadscaleService_RegisterNode_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(HeadscaleServiceServer).RegisterNode(ctx, req.(*RegisterNodeRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _HeadscaleService_DeleteNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(DeleteNodeRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(HeadscaleServiceServer).DeleteNode(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: HeadscaleService_DeleteNode_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(HeadscaleServiceServer).DeleteNode(ctx, req.(*DeleteNodeRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _HeadscaleService_ExpireNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(ExpireNodeRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(HeadscaleServiceServer).ExpireNode(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: HeadscaleService_ExpireNode_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(HeadscaleServiceServer).ExpireNode(ctx, req.(*ExpireNodeRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _HeadscaleService_RenameNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(RenameNodeRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(HeadscaleServiceServer).RenameNode(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: HeadscaleService_RenameNode_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(HeadscaleServiceServer).RenameNode(ctx, req.(*RenameNodeRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _HeadscaleService_ListNodes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(ListNodesRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(HeadscaleServiceServer).ListNodes(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: HeadscaleService_ListNodes_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(HeadscaleServiceServer).ListNodes(ctx, req.(*ListNodesRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _HeadscaleService_BackfillNodeIPs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(BackfillNodeIPsRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(HeadscaleServiceServer).BackfillNodeIPs(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: HeadscaleService_BackfillNodeIPs_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(HeadscaleServiceServer).BackfillNodeIPs(ctx, req.(*BackfillNodeIPsRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _HeadscaleService_AuthRegister_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(AuthRegisterRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(HeadscaleServiceServer).AuthRegister(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: HeadscaleService_AuthRegister_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(HeadscaleServiceServer).AuthRegister(ctx, req.(*AuthRegisterRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _HeadscaleService_AuthApprove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(AuthApproveRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(HeadscaleServiceServer).AuthApprove(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: HeadscaleService_AuthApprove_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(HeadscaleServiceServer).AuthApprove(ctx, req.(*AuthApproveRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _HeadscaleService_AuthReject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(AuthRejectRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(HeadscaleServiceServer).AuthReject(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: HeadscaleService_AuthReject_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(HeadscaleServiceServer).AuthReject(ctx, req.(*AuthRejectRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _HeadscaleService_CreateApiKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(CreateApiKeyRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(HeadscaleServiceServer).CreateApiKey(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: HeadscaleService_CreateApiKey_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(HeadscaleServiceServer).CreateApiKey(ctx, req.(*CreateApiKeyRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _HeadscaleService_ExpireApiKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(ExpireApiKeyRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(HeadscaleServiceServer).ExpireApiKey(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: HeadscaleService_ExpireApiKey_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(HeadscaleServiceServer).ExpireApiKey(ctx, req.(*ExpireApiKeyRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _HeadscaleService_ListApiKeys_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(ListApiKeysRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(HeadscaleServiceServer).ListApiKeys(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: HeadscaleService_ListApiKeys_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(HeadscaleServiceServer).ListApiKeys(ctx, req.(*ListApiKeysRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _HeadscaleService_DeleteApiKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(DeleteApiKeyRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(HeadscaleServiceServer).DeleteApiKey(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: HeadscaleService_DeleteApiKey_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(HeadscaleServiceServer).DeleteApiKey(ctx, req.(*DeleteApiKeyRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _HeadscaleService_GetPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(GetPolicyRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(HeadscaleServiceServer).GetPolicy(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: HeadscaleService_GetPolicy_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(HeadscaleServiceServer).GetPolicy(ctx, req.(*GetPolicyRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _HeadscaleService_SetPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(SetPolicyRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(HeadscaleServiceServer).SetPolicy(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: HeadscaleService_SetPolicy_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(HeadscaleServiceServer).SetPolicy(ctx, req.(*SetPolicyRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _HeadscaleService_Health_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(HealthRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(HeadscaleServiceServer).Health(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: HeadscaleService_Health_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(HeadscaleServiceServer).Health(ctx, req.(*HealthRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\n// HeadscaleService_ServiceDesc is the grpc.ServiceDesc for HeadscaleService service.\n// It's only intended for direct use with grpc.RegisterService,\n// and not to be introspected or modified (even as a copy)\nvar HeadscaleService_ServiceDesc = grpc.ServiceDesc{\n\tServiceName: \"headscale.v1.HeadscaleService\",\n\tHandlerType: (*HeadscaleServiceServer)(nil),\n\tMethods: []grpc.MethodDesc{\n\t\t{\n\t\t\tMethodName: \"CreateUser\",\n\t\t\tHandler:    _HeadscaleService_CreateUser_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"RenameUser\",\n\t\t\tHandler:    _HeadscaleService_RenameUser_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"DeleteUser\",\n\t\t\tHandler:    _HeadscaleService_DeleteUser_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"ListUsers\",\n\t\t\tHandler:    _HeadscaleService_ListUsers_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"CreatePreAuthKey\",\n\t\t\tHandler:    _HeadscaleService_CreatePreAuthKey_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"ExpirePreAuthKey\",\n\t\t\tHandler:    _HeadscaleService_ExpirePreAuthKey_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"DeletePreAuthKey\",\n\t\t\tHandler:    _HeadscaleService_DeletePreAuthKey_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"ListPreAuthKeys\",\n\t\t\tHandler:    _HeadscaleService_ListPreAuthKeys_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"DebugCreateNode\",\n\t\t\tHandler:    _HeadscaleService_DebugCreateNode_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"GetNode\",\n\t\t\tHandler:    _HeadscaleService_GetNode_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"SetTags\",\n\t\t\tHandler:    _HeadscaleService_SetTags_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"SetApprovedRoutes\",\n\t\t\tHandler:    _HeadscaleService_SetApprovedRoutes_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"RegisterNode\",\n\t\t\tHandler:    _HeadscaleService_RegisterNode_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"DeleteNode\",\n\t\t\tHandler:    _HeadscaleService_DeleteNode_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"ExpireNode\",\n\t\t\tHandler:    _HeadscaleService_ExpireNode_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"RenameNode\",\n\t\t\tHandler:    _HeadscaleService_RenameNode_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"ListNodes\",\n\t\t\tHandler:    _HeadscaleService_ListNodes_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"BackfillNodeIPs\",\n\t\t\tHandler:    _HeadscaleService_BackfillNodeIPs_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"AuthRegister\",\n\t\t\tHandler:    _HeadscaleService_AuthRegister_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"AuthApprove\",\n\t\t\tHandler:    _HeadscaleService_AuthApprove_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"AuthReject\",\n\t\t\tHandler:    _HeadscaleService_AuthReject_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"CreateApiKey\",\n\t\t\tHandler:    _HeadscaleService_CreateApiKey_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"ExpireApiKey\",\n\t\t\tHandler:    _HeadscaleService_ExpireApiKey_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"ListApiKeys\",\n\t\t\tHandler:    _HeadscaleService_ListApiKeys_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"DeleteApiKey\",\n\t\t\tHandler:    _HeadscaleService_DeleteApiKey_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"GetPolicy\",\n\t\t\tHandler:    _HeadscaleService_GetPolicy_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"SetPolicy\",\n\t\t\tHandler:    _HeadscaleService_SetPolicy_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"Health\",\n\t\t\tHandler:    _HeadscaleService_Health_Handler,\n\t\t},\n\t},\n\tStreams:  []grpc.StreamDesc{},\n\tMetadata: \"headscale/v1/headscale.proto\",\n}\n"
  },
  {
    "path": "gen/go/headscale/v1/node.pb.go",
    "content": "// Code generated by protoc-gen-go. DO NOT EDIT.\n// versions:\n// \tprotoc-gen-go v1.36.11\n// \tprotoc        (unknown)\n// source: headscale/v1/node.proto\n\npackage v1\n\nimport (\n\tprotoreflect \"google.golang.org/protobuf/reflect/protoreflect\"\n\tprotoimpl \"google.golang.org/protobuf/runtime/protoimpl\"\n\ttimestamppb \"google.golang.org/protobuf/types/known/timestamppb\"\n\treflect \"reflect\"\n\tsync \"sync\"\n\tunsafe \"unsafe\"\n)\n\nconst (\n\t// Verify that this generated code is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)\n\t// Verify that runtime/protoimpl is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)\n)\n\ntype RegisterMethod int32\n\nconst (\n\tRegisterMethod_REGISTER_METHOD_UNSPECIFIED RegisterMethod = 0\n\tRegisterMethod_REGISTER_METHOD_AUTH_KEY    RegisterMethod = 1\n\tRegisterMethod_REGISTER_METHOD_CLI         RegisterMethod = 2\n\tRegisterMethod_REGISTER_METHOD_OIDC        RegisterMethod = 3\n)\n\n// Enum value maps for RegisterMethod.\nvar (\n\tRegisterMethod_name = map[int32]string{\n\t\t0: \"REGISTER_METHOD_UNSPECIFIED\",\n\t\t1: \"REGISTER_METHOD_AUTH_KEY\",\n\t\t2: \"REGISTER_METHOD_CLI\",\n\t\t3: \"REGISTER_METHOD_OIDC\",\n\t}\n\tRegisterMethod_value = map[string]int32{\n\t\t\"REGISTER_METHOD_UNSPECIFIED\": 0,\n\t\t\"REGISTER_METHOD_AUTH_KEY\":    1,\n\t\t\"REGISTER_METHOD_CLI\":         2,\n\t\t\"REGISTER_METHOD_OIDC\":        3,\n\t}\n)\n\nfunc (x RegisterMethod) Enum() *RegisterMethod {\n\tp := new(RegisterMethod)\n\t*p = x\n\treturn p\n}\n\nfunc (x RegisterMethod) String() string {\n\treturn protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))\n}\n\nfunc (RegisterMethod) Descriptor() protoreflect.EnumDescriptor {\n\treturn file_headscale_v1_node_proto_enumTypes[0].Descriptor()\n}\n\nfunc (RegisterMethod) Type() protoreflect.EnumType {\n\treturn &file_headscale_v1_node_proto_enumTypes[0]\n}\n\nfunc (x RegisterMethod) Number() protoreflect.EnumNumber {\n\treturn protoreflect.EnumNumber(x)\n}\n\n// Deprecated: Use RegisterMethod.Descriptor instead.\nfunc (RegisterMethod) EnumDescriptor() ([]byte, []int) {\n\treturn file_headscale_v1_node_proto_rawDescGZIP(), []int{0}\n}\n\ntype Node struct {\n\tstate          protoimpl.MessageState `protogen:\"open.v1\"`\n\tId             uint64                 `protobuf:\"varint,1,opt,name=id,proto3\" json:\"id,omitempty\"`\n\tMachineKey     string                 `protobuf:\"bytes,2,opt,name=machine_key,json=machineKey,proto3\" json:\"machine_key,omitempty\"`\n\tNodeKey        string                 `protobuf:\"bytes,3,opt,name=node_key,json=nodeKey,proto3\" json:\"node_key,omitempty\"`\n\tDiscoKey       string                 `protobuf:\"bytes,4,opt,name=disco_key,json=discoKey,proto3\" json:\"disco_key,omitempty\"`\n\tIpAddresses    []string               `protobuf:\"bytes,5,rep,name=ip_addresses,json=ipAddresses,proto3\" json:\"ip_addresses,omitempty\"`\n\tName           string                 `protobuf:\"bytes,6,opt,name=name,proto3\" json:\"name,omitempty\"`\n\tUser           *User                  `protobuf:\"bytes,7,opt,name=user,proto3\" json:\"user,omitempty\"`\n\tLastSeen       *timestamppb.Timestamp `protobuf:\"bytes,8,opt,name=last_seen,json=lastSeen,proto3\" json:\"last_seen,omitempty\"`\n\tExpiry         *timestamppb.Timestamp `protobuf:\"bytes,10,opt,name=expiry,proto3\" json:\"expiry,omitempty\"`\n\tPreAuthKey     *PreAuthKey            `protobuf:\"bytes,11,opt,name=pre_auth_key,json=preAuthKey,proto3\" json:\"pre_auth_key,omitempty\"`\n\tCreatedAt      *timestamppb.Timestamp `protobuf:\"bytes,12,opt,name=created_at,json=createdAt,proto3\" json:\"created_at,omitempty\"`\n\tRegisterMethod RegisterMethod         `protobuf:\"varint,13,opt,name=register_method,json=registerMethod,proto3,enum=headscale.v1.RegisterMethod\" json:\"register_method,omitempty\"`\n\t// Deprecated\n\t// repeated string forced_tags = 18;\n\t// repeated string invalid_tags = 19;\n\t// repeated string valid_tags = 20;\n\tGivenName       string   `protobuf:\"bytes,21,opt,name=given_name,json=givenName,proto3\" json:\"given_name,omitempty\"`\n\tOnline          bool     `protobuf:\"varint,22,opt,name=online,proto3\" json:\"online,omitempty\"`\n\tApprovedRoutes  []string `protobuf:\"bytes,23,rep,name=approved_routes,json=approvedRoutes,proto3\" json:\"approved_routes,omitempty\"`\n\tAvailableRoutes []string `protobuf:\"bytes,24,rep,name=available_routes,json=availableRoutes,proto3\" json:\"available_routes,omitempty\"`\n\tSubnetRoutes    []string `protobuf:\"bytes,25,rep,name=subnet_routes,json=subnetRoutes,proto3\" json:\"subnet_routes,omitempty\"`\n\tTags            []string `protobuf:\"bytes,26,rep,name=tags,proto3\" json:\"tags,omitempty\"`\n\tunknownFields   protoimpl.UnknownFields\n\tsizeCache       protoimpl.SizeCache\n}\n\nfunc (x *Node) Reset() {\n\t*x = Node{}\n\tmi := &file_headscale_v1_node_proto_msgTypes[0]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *Node) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*Node) ProtoMessage() {}\n\nfunc (x *Node) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_node_proto_msgTypes[0]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use Node.ProtoReflect.Descriptor instead.\nfunc (*Node) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_node_proto_rawDescGZIP(), []int{0}\n}\n\nfunc (x *Node) GetId() uint64 {\n\tif x != nil {\n\t\treturn x.Id\n\t}\n\treturn 0\n}\n\nfunc (x *Node) GetMachineKey() string {\n\tif x != nil {\n\t\treturn x.MachineKey\n\t}\n\treturn \"\"\n}\n\nfunc (x *Node) GetNodeKey() string {\n\tif x != nil {\n\t\treturn x.NodeKey\n\t}\n\treturn \"\"\n}\n\nfunc (x *Node) GetDiscoKey() string {\n\tif x != nil {\n\t\treturn x.DiscoKey\n\t}\n\treturn \"\"\n}\n\nfunc (x *Node) GetIpAddresses() []string {\n\tif x != nil {\n\t\treturn x.IpAddresses\n\t}\n\treturn nil\n}\n\nfunc (x *Node) GetName() string {\n\tif x != nil {\n\t\treturn x.Name\n\t}\n\treturn \"\"\n}\n\nfunc (x *Node) GetUser() *User {\n\tif x != nil {\n\t\treturn x.User\n\t}\n\treturn nil\n}\n\nfunc (x *Node) GetLastSeen() *timestamppb.Timestamp {\n\tif x != nil {\n\t\treturn x.LastSeen\n\t}\n\treturn nil\n}\n\nfunc (x *Node) GetExpiry() *timestamppb.Timestamp {\n\tif x != nil {\n\t\treturn x.Expiry\n\t}\n\treturn nil\n}\n\nfunc (x *Node) GetPreAuthKey() *PreAuthKey {\n\tif x != nil {\n\t\treturn x.PreAuthKey\n\t}\n\treturn nil\n}\n\nfunc (x *Node) GetCreatedAt() *timestamppb.Timestamp {\n\tif x != nil {\n\t\treturn x.CreatedAt\n\t}\n\treturn nil\n}\n\nfunc (x *Node) GetRegisterMethod() RegisterMethod {\n\tif x != nil {\n\t\treturn x.RegisterMethod\n\t}\n\treturn RegisterMethod_REGISTER_METHOD_UNSPECIFIED\n}\n\nfunc (x *Node) GetGivenName() string {\n\tif x != nil {\n\t\treturn x.GivenName\n\t}\n\treturn \"\"\n}\n\nfunc (x *Node) GetOnline() bool {\n\tif x != nil {\n\t\treturn x.Online\n\t}\n\treturn false\n}\n\nfunc (x *Node) GetApprovedRoutes() []string {\n\tif x != nil {\n\t\treturn x.ApprovedRoutes\n\t}\n\treturn nil\n}\n\nfunc (x *Node) GetAvailableRoutes() []string {\n\tif x != nil {\n\t\treturn x.AvailableRoutes\n\t}\n\treturn nil\n}\n\nfunc (x *Node) GetSubnetRoutes() []string {\n\tif x != nil {\n\t\treturn x.SubnetRoutes\n\t}\n\treturn nil\n}\n\nfunc (x *Node) GetTags() []string {\n\tif x != nil {\n\t\treturn x.Tags\n\t}\n\treturn nil\n}\n\ntype RegisterNodeRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tUser          string                 `protobuf:\"bytes,1,opt,name=user,proto3\" json:\"user,omitempty\"`\n\tKey           string                 `protobuf:\"bytes,2,opt,name=key,proto3\" json:\"key,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *RegisterNodeRequest) Reset() {\n\t*x = RegisterNodeRequest{}\n\tmi := &file_headscale_v1_node_proto_msgTypes[1]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *RegisterNodeRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*RegisterNodeRequest) ProtoMessage() {}\n\nfunc (x *RegisterNodeRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_node_proto_msgTypes[1]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use RegisterNodeRequest.ProtoReflect.Descriptor instead.\nfunc (*RegisterNodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_node_proto_rawDescGZIP(), []int{1}\n}\n\nfunc (x *RegisterNodeRequest) GetUser() string {\n\tif x != nil {\n\t\treturn x.User\n\t}\n\treturn \"\"\n}\n\nfunc (x *RegisterNodeRequest) GetKey() string {\n\tif x != nil {\n\t\treturn x.Key\n\t}\n\treturn \"\"\n}\n\ntype RegisterNodeResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tNode          *Node                  `protobuf:\"bytes,1,opt,name=node,proto3\" json:\"node,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *RegisterNodeResponse) Reset() {\n\t*x = RegisterNodeResponse{}\n\tmi := &file_headscale_v1_node_proto_msgTypes[2]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *RegisterNodeResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*RegisterNodeResponse) ProtoMessage() {}\n\nfunc (x *RegisterNodeResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_node_proto_msgTypes[2]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use RegisterNodeResponse.ProtoReflect.Descriptor instead.\nfunc (*RegisterNodeResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_node_proto_rawDescGZIP(), []int{2}\n}\n\nfunc (x *RegisterNodeResponse) GetNode() *Node {\n\tif x != nil {\n\t\treturn x.Node\n\t}\n\treturn nil\n}\n\ntype GetNodeRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tNodeId        uint64                 `protobuf:\"varint,1,opt,name=node_id,json=nodeId,proto3\" json:\"node_id,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *GetNodeRequest) Reset() {\n\t*x = GetNodeRequest{}\n\tmi := &file_headscale_v1_node_proto_msgTypes[3]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *GetNodeRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*GetNodeRequest) ProtoMessage() {}\n\nfunc (x *GetNodeRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_node_proto_msgTypes[3]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use GetNodeRequest.ProtoReflect.Descriptor instead.\nfunc (*GetNodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_node_proto_rawDescGZIP(), []int{3}\n}\n\nfunc (x *GetNodeRequest) GetNodeId() uint64 {\n\tif x != nil {\n\t\treturn x.NodeId\n\t}\n\treturn 0\n}\n\ntype GetNodeResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tNode          *Node                  `protobuf:\"bytes,1,opt,name=node,proto3\" json:\"node,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *GetNodeResponse) Reset() {\n\t*x = GetNodeResponse{}\n\tmi := &file_headscale_v1_node_proto_msgTypes[4]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *GetNodeResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*GetNodeResponse) ProtoMessage() {}\n\nfunc (x *GetNodeResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_node_proto_msgTypes[4]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use GetNodeResponse.ProtoReflect.Descriptor instead.\nfunc (*GetNodeResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_node_proto_rawDescGZIP(), []int{4}\n}\n\nfunc (x *GetNodeResponse) GetNode() *Node {\n\tif x != nil {\n\t\treturn x.Node\n\t}\n\treturn nil\n}\n\ntype SetTagsRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tNodeId        uint64                 `protobuf:\"varint,1,opt,name=node_id,json=nodeId,proto3\" json:\"node_id,omitempty\"`\n\tTags          []string               `protobuf:\"bytes,2,rep,name=tags,proto3\" json:\"tags,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *SetTagsRequest) Reset() {\n\t*x = SetTagsRequest{}\n\tmi := &file_headscale_v1_node_proto_msgTypes[5]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *SetTagsRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*SetTagsRequest) ProtoMessage() {}\n\nfunc (x *SetTagsRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_node_proto_msgTypes[5]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use SetTagsRequest.ProtoReflect.Descriptor instead.\nfunc (*SetTagsRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_node_proto_rawDescGZIP(), []int{5}\n}\n\nfunc (x *SetTagsRequest) GetNodeId() uint64 {\n\tif x != nil {\n\t\treturn x.NodeId\n\t}\n\treturn 0\n}\n\nfunc (x *SetTagsRequest) GetTags() []string {\n\tif x != nil {\n\t\treturn x.Tags\n\t}\n\treturn nil\n}\n\ntype SetTagsResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tNode          *Node                  `protobuf:\"bytes,1,opt,name=node,proto3\" json:\"node,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *SetTagsResponse) Reset() {\n\t*x = SetTagsResponse{}\n\tmi := &file_headscale_v1_node_proto_msgTypes[6]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *SetTagsResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*SetTagsResponse) ProtoMessage() {}\n\nfunc (x *SetTagsResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_node_proto_msgTypes[6]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use SetTagsResponse.ProtoReflect.Descriptor instead.\nfunc (*SetTagsResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_node_proto_rawDescGZIP(), []int{6}\n}\n\nfunc (x *SetTagsResponse) GetNode() *Node {\n\tif x != nil {\n\t\treturn x.Node\n\t}\n\treturn nil\n}\n\ntype SetApprovedRoutesRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tNodeId        uint64                 `protobuf:\"varint,1,opt,name=node_id,json=nodeId,proto3\" json:\"node_id,omitempty\"`\n\tRoutes        []string               `protobuf:\"bytes,2,rep,name=routes,proto3\" json:\"routes,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *SetApprovedRoutesRequest) Reset() {\n\t*x = SetApprovedRoutesRequest{}\n\tmi := &file_headscale_v1_node_proto_msgTypes[7]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *SetApprovedRoutesRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*SetApprovedRoutesRequest) ProtoMessage() {}\n\nfunc (x *SetApprovedRoutesRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_node_proto_msgTypes[7]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use SetApprovedRoutesRequest.ProtoReflect.Descriptor instead.\nfunc (*SetApprovedRoutesRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_node_proto_rawDescGZIP(), []int{7}\n}\n\nfunc (x *SetApprovedRoutesRequest) GetNodeId() uint64 {\n\tif x != nil {\n\t\treturn x.NodeId\n\t}\n\treturn 0\n}\n\nfunc (x *SetApprovedRoutesRequest) GetRoutes() []string {\n\tif x != nil {\n\t\treturn x.Routes\n\t}\n\treturn nil\n}\n\ntype SetApprovedRoutesResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tNode          *Node                  `protobuf:\"bytes,1,opt,name=node,proto3\" json:\"node,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *SetApprovedRoutesResponse) Reset() {\n\t*x = SetApprovedRoutesResponse{}\n\tmi := &file_headscale_v1_node_proto_msgTypes[8]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *SetApprovedRoutesResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*SetApprovedRoutesResponse) ProtoMessage() {}\n\nfunc (x *SetApprovedRoutesResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_node_proto_msgTypes[8]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use SetApprovedRoutesResponse.ProtoReflect.Descriptor instead.\nfunc (*SetApprovedRoutesResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_node_proto_rawDescGZIP(), []int{8}\n}\n\nfunc (x *SetApprovedRoutesResponse) GetNode() *Node {\n\tif x != nil {\n\t\treturn x.Node\n\t}\n\treturn nil\n}\n\ntype DeleteNodeRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tNodeId        uint64                 `protobuf:\"varint,1,opt,name=node_id,json=nodeId,proto3\" json:\"node_id,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *DeleteNodeRequest) Reset() {\n\t*x = DeleteNodeRequest{}\n\tmi := &file_headscale_v1_node_proto_msgTypes[9]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *DeleteNodeRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*DeleteNodeRequest) ProtoMessage() {}\n\nfunc (x *DeleteNodeRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_node_proto_msgTypes[9]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use DeleteNodeRequest.ProtoReflect.Descriptor instead.\nfunc (*DeleteNodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_node_proto_rawDescGZIP(), []int{9}\n}\n\nfunc (x *DeleteNodeRequest) GetNodeId() uint64 {\n\tif x != nil {\n\t\treturn x.NodeId\n\t}\n\treturn 0\n}\n\ntype DeleteNodeResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *DeleteNodeResponse) Reset() {\n\t*x = DeleteNodeResponse{}\n\tmi := &file_headscale_v1_node_proto_msgTypes[10]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *DeleteNodeResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*DeleteNodeResponse) ProtoMessage() {}\n\nfunc (x *DeleteNodeResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_node_proto_msgTypes[10]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use DeleteNodeResponse.ProtoReflect.Descriptor instead.\nfunc (*DeleteNodeResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_node_proto_rawDescGZIP(), []int{10}\n}\n\ntype ExpireNodeRequest struct {\n\tstate  protoimpl.MessageState `protogen:\"open.v1\"`\n\tNodeId uint64                 `protobuf:\"varint,1,opt,name=node_id,json=nodeId,proto3\" json:\"node_id,omitempty\"`\n\tExpiry *timestamppb.Timestamp `protobuf:\"bytes,2,opt,name=expiry,proto3\" json:\"expiry,omitempty\"`\n\t// When true, sets expiry to null (node will never expire).\n\tDisableExpiry bool `protobuf:\"varint,3,opt,name=disable_expiry,json=disableExpiry,proto3\" json:\"disable_expiry,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *ExpireNodeRequest) Reset() {\n\t*x = ExpireNodeRequest{}\n\tmi := &file_headscale_v1_node_proto_msgTypes[11]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *ExpireNodeRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*ExpireNodeRequest) ProtoMessage() {}\n\nfunc (x *ExpireNodeRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_node_proto_msgTypes[11]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use ExpireNodeRequest.ProtoReflect.Descriptor instead.\nfunc (*ExpireNodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_node_proto_rawDescGZIP(), []int{11}\n}\n\nfunc (x *ExpireNodeRequest) GetNodeId() uint64 {\n\tif x != nil {\n\t\treturn x.NodeId\n\t}\n\treturn 0\n}\n\nfunc (x *ExpireNodeRequest) GetExpiry() *timestamppb.Timestamp {\n\tif x != nil {\n\t\treturn x.Expiry\n\t}\n\treturn nil\n}\n\nfunc (x *ExpireNodeRequest) GetDisableExpiry() bool {\n\tif x != nil {\n\t\treturn x.DisableExpiry\n\t}\n\treturn false\n}\n\ntype ExpireNodeResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tNode          *Node                  `protobuf:\"bytes,1,opt,name=node,proto3\" json:\"node,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *ExpireNodeResponse) Reset() {\n\t*x = ExpireNodeResponse{}\n\tmi := &file_headscale_v1_node_proto_msgTypes[12]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *ExpireNodeResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*ExpireNodeResponse) ProtoMessage() {}\n\nfunc (x *ExpireNodeResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_node_proto_msgTypes[12]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use ExpireNodeResponse.ProtoReflect.Descriptor instead.\nfunc (*ExpireNodeResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_node_proto_rawDescGZIP(), []int{12}\n}\n\nfunc (x *ExpireNodeResponse) GetNode() *Node {\n\tif x != nil {\n\t\treturn x.Node\n\t}\n\treturn nil\n}\n\ntype RenameNodeRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tNodeId        uint64                 `protobuf:\"varint,1,opt,name=node_id,json=nodeId,proto3\" json:\"node_id,omitempty\"`\n\tNewName       string                 `protobuf:\"bytes,2,opt,name=new_name,json=newName,proto3\" json:\"new_name,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *RenameNodeRequest) Reset() {\n\t*x = RenameNodeRequest{}\n\tmi := &file_headscale_v1_node_proto_msgTypes[13]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *RenameNodeRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*RenameNodeRequest) ProtoMessage() {}\n\nfunc (x *RenameNodeRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_node_proto_msgTypes[13]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use RenameNodeRequest.ProtoReflect.Descriptor instead.\nfunc (*RenameNodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_node_proto_rawDescGZIP(), []int{13}\n}\n\nfunc (x *RenameNodeRequest) GetNodeId() uint64 {\n\tif x != nil {\n\t\treturn x.NodeId\n\t}\n\treturn 0\n}\n\nfunc (x *RenameNodeRequest) GetNewName() string {\n\tif x != nil {\n\t\treturn x.NewName\n\t}\n\treturn \"\"\n}\n\ntype RenameNodeResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tNode          *Node                  `protobuf:\"bytes,1,opt,name=node,proto3\" json:\"node,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *RenameNodeResponse) Reset() {\n\t*x = RenameNodeResponse{}\n\tmi := &file_headscale_v1_node_proto_msgTypes[14]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *RenameNodeResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*RenameNodeResponse) ProtoMessage() {}\n\nfunc (x *RenameNodeResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_node_proto_msgTypes[14]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use RenameNodeResponse.ProtoReflect.Descriptor instead.\nfunc (*RenameNodeResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_node_proto_rawDescGZIP(), []int{14}\n}\n\nfunc (x *RenameNodeResponse) GetNode() *Node {\n\tif x != nil {\n\t\treturn x.Node\n\t}\n\treturn nil\n}\n\ntype ListNodesRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tUser          string                 `protobuf:\"bytes,1,opt,name=user,proto3\" json:\"user,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *ListNodesRequest) Reset() {\n\t*x = ListNodesRequest{}\n\tmi := &file_headscale_v1_node_proto_msgTypes[15]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *ListNodesRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*ListNodesRequest) ProtoMessage() {}\n\nfunc (x *ListNodesRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_node_proto_msgTypes[15]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use ListNodesRequest.ProtoReflect.Descriptor instead.\nfunc (*ListNodesRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_node_proto_rawDescGZIP(), []int{15}\n}\n\nfunc (x *ListNodesRequest) GetUser() string {\n\tif x != nil {\n\t\treturn x.User\n\t}\n\treturn \"\"\n}\n\ntype ListNodesResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tNodes         []*Node                `protobuf:\"bytes,1,rep,name=nodes,proto3\" json:\"nodes,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *ListNodesResponse) Reset() {\n\t*x = ListNodesResponse{}\n\tmi := &file_headscale_v1_node_proto_msgTypes[16]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *ListNodesResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*ListNodesResponse) ProtoMessage() {}\n\nfunc (x *ListNodesResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_node_proto_msgTypes[16]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use ListNodesResponse.ProtoReflect.Descriptor instead.\nfunc (*ListNodesResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_node_proto_rawDescGZIP(), []int{16}\n}\n\nfunc (x *ListNodesResponse) GetNodes() []*Node {\n\tif x != nil {\n\t\treturn x.Nodes\n\t}\n\treturn nil\n}\n\ntype DebugCreateNodeRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tUser          string                 `protobuf:\"bytes,1,opt,name=user,proto3\" json:\"user,omitempty\"`\n\tKey           string                 `protobuf:\"bytes,2,opt,name=key,proto3\" json:\"key,omitempty\"`\n\tName          string                 `protobuf:\"bytes,3,opt,name=name,proto3\" json:\"name,omitempty\"`\n\tRoutes        []string               `protobuf:\"bytes,4,rep,name=routes,proto3\" json:\"routes,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *DebugCreateNodeRequest) Reset() {\n\t*x = DebugCreateNodeRequest{}\n\tmi := &file_headscale_v1_node_proto_msgTypes[17]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *DebugCreateNodeRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*DebugCreateNodeRequest) ProtoMessage() {}\n\nfunc (x *DebugCreateNodeRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_node_proto_msgTypes[17]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use DebugCreateNodeRequest.ProtoReflect.Descriptor instead.\nfunc (*DebugCreateNodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_node_proto_rawDescGZIP(), []int{17}\n}\n\nfunc (x *DebugCreateNodeRequest) GetUser() string {\n\tif x != nil {\n\t\treturn x.User\n\t}\n\treturn \"\"\n}\n\nfunc (x *DebugCreateNodeRequest) GetKey() string {\n\tif x != nil {\n\t\treturn x.Key\n\t}\n\treturn \"\"\n}\n\nfunc (x *DebugCreateNodeRequest) GetName() string {\n\tif x != nil {\n\t\treturn x.Name\n\t}\n\treturn \"\"\n}\n\nfunc (x *DebugCreateNodeRequest) GetRoutes() []string {\n\tif x != nil {\n\t\treturn x.Routes\n\t}\n\treturn nil\n}\n\ntype DebugCreateNodeResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tNode          *Node                  `protobuf:\"bytes,1,opt,name=node,proto3\" json:\"node,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *DebugCreateNodeResponse) Reset() {\n\t*x = DebugCreateNodeResponse{}\n\tmi := &file_headscale_v1_node_proto_msgTypes[18]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *DebugCreateNodeResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*DebugCreateNodeResponse) ProtoMessage() {}\n\nfunc (x *DebugCreateNodeResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_node_proto_msgTypes[18]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use DebugCreateNodeResponse.ProtoReflect.Descriptor instead.\nfunc (*DebugCreateNodeResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_node_proto_rawDescGZIP(), []int{18}\n}\n\nfunc (x *DebugCreateNodeResponse) GetNode() *Node {\n\tif x != nil {\n\t\treturn x.Node\n\t}\n\treturn nil\n}\n\ntype BackfillNodeIPsRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tConfirmed     bool                   `protobuf:\"varint,1,opt,name=confirmed,proto3\" json:\"confirmed,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *BackfillNodeIPsRequest) Reset() {\n\t*x = BackfillNodeIPsRequest{}\n\tmi := &file_headscale_v1_node_proto_msgTypes[19]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *BackfillNodeIPsRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*BackfillNodeIPsRequest) ProtoMessage() {}\n\nfunc (x *BackfillNodeIPsRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_node_proto_msgTypes[19]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use BackfillNodeIPsRequest.ProtoReflect.Descriptor instead.\nfunc (*BackfillNodeIPsRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_node_proto_rawDescGZIP(), []int{19}\n}\n\nfunc (x *BackfillNodeIPsRequest) GetConfirmed() bool {\n\tif x != nil {\n\t\treturn x.Confirmed\n\t}\n\treturn false\n}\n\ntype BackfillNodeIPsResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tChanges       []string               `protobuf:\"bytes,1,rep,name=changes,proto3\" json:\"changes,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *BackfillNodeIPsResponse) Reset() {\n\t*x = BackfillNodeIPsResponse{}\n\tmi := &file_headscale_v1_node_proto_msgTypes[20]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *BackfillNodeIPsResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*BackfillNodeIPsResponse) ProtoMessage() {}\n\nfunc (x *BackfillNodeIPsResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_node_proto_msgTypes[20]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use BackfillNodeIPsResponse.ProtoReflect.Descriptor instead.\nfunc (*BackfillNodeIPsResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_node_proto_rawDescGZIP(), []int{20}\n}\n\nfunc (x *BackfillNodeIPsResponse) GetChanges() []string {\n\tif x != nil {\n\t\treturn x.Changes\n\t}\n\treturn nil\n}\n\nvar File_headscale_v1_node_proto protoreflect.FileDescriptor\n\nconst file_headscale_v1_node_proto_rawDesc = \"\" +\n\t\"\\n\" +\n\t\"\\x17headscale/v1/node.proto\\x12\\fheadscale.v1\\x1a\\x1fgoogle/protobuf/timestamp.proto\\x1a\\x1dheadscale/v1/preauthkey.proto\\x1a\\x17headscale/v1/user.proto\\\"\\xc9\\x05\\n\" +\n\t\"\\x04Node\\x12\\x0e\\n\" +\n\t\"\\x02id\\x18\\x01 \\x01(\\x04R\\x02id\\x12\\x1f\\n\" +\n\t\"\\vmachine_key\\x18\\x02 \\x01(\\tR\\n\" +\n\t\"machineKey\\x12\\x19\\n\" +\n\t\"\\bnode_key\\x18\\x03 \\x01(\\tR\\anodeKey\\x12\\x1b\\n\" +\n\t\"\\tdisco_key\\x18\\x04 \\x01(\\tR\\bdiscoKey\\x12!\\n\" +\n\t\"\\fip_addresses\\x18\\x05 \\x03(\\tR\\vipAddresses\\x12\\x12\\n\" +\n\t\"\\x04name\\x18\\x06 \\x01(\\tR\\x04name\\x12&\\n\" +\n\t\"\\x04user\\x18\\a \\x01(\\v2\\x12.headscale.v1.UserR\\x04user\\x127\\n\" +\n\t\"\\tlast_seen\\x18\\b \\x01(\\v2\\x1a.google.protobuf.TimestampR\\blastSeen\\x122\\n\" +\n\t\"\\x06expiry\\x18\\n\" +\n\t\" \\x01(\\v2\\x1a.google.protobuf.TimestampR\\x06expiry\\x12:\\n\" +\n\t\"\\fpre_auth_key\\x18\\v \\x01(\\v2\\x18.headscale.v1.PreAuthKeyR\\n\" +\n\t\"preAuthKey\\x129\\n\" +\n\t\"\\n\" +\n\t\"created_at\\x18\\f \\x01(\\v2\\x1a.google.protobuf.TimestampR\\tcreatedAt\\x12E\\n\" +\n\t\"\\x0fregister_method\\x18\\r \\x01(\\x0e2\\x1c.headscale.v1.RegisterMethodR\\x0eregisterMethod\\x12\\x1d\\n\" +\n\t\"\\n\" +\n\t\"given_name\\x18\\x15 \\x01(\\tR\\tgivenName\\x12\\x16\\n\" +\n\t\"\\x06online\\x18\\x16 \\x01(\\bR\\x06online\\x12'\\n\" +\n\t\"\\x0fapproved_routes\\x18\\x17 \\x03(\\tR\\x0eapprovedRoutes\\x12)\\n\" +\n\t\"\\x10available_routes\\x18\\x18 \\x03(\\tR\\x0favailableRoutes\\x12#\\n\" +\n\t\"\\rsubnet_routes\\x18\\x19 \\x03(\\tR\\fsubnetRoutes\\x12\\x12\\n\" +\n\t\"\\x04tags\\x18\\x1a \\x03(\\tR\\x04tagsJ\\x04\\b\\t\\x10\\n\" +\n\t\"J\\x04\\b\\x0e\\x10\\x15\\\";\\n\" +\n\t\"\\x13RegisterNodeRequest\\x12\\x12\\n\" +\n\t\"\\x04user\\x18\\x01 \\x01(\\tR\\x04user\\x12\\x10\\n\" +\n\t\"\\x03key\\x18\\x02 \\x01(\\tR\\x03key\\\">\\n\" +\n\t\"\\x14RegisterNodeResponse\\x12&\\n\" +\n\t\"\\x04node\\x18\\x01 \\x01(\\v2\\x12.headscale.v1.NodeR\\x04node\\\")\\n\" +\n\t\"\\x0eGetNodeRequest\\x12\\x17\\n\" +\n\t\"\\anode_id\\x18\\x01 \\x01(\\x04R\\x06nodeId\\\"9\\n\" +\n\t\"\\x0fGetNodeResponse\\x12&\\n\" +\n\t\"\\x04node\\x18\\x01 \\x01(\\v2\\x12.headscale.v1.NodeR\\x04node\\\"=\\n\" +\n\t\"\\x0eSetTagsRequest\\x12\\x17\\n\" +\n\t\"\\anode_id\\x18\\x01 \\x01(\\x04R\\x06nodeId\\x12\\x12\\n\" +\n\t\"\\x04tags\\x18\\x02 \\x03(\\tR\\x04tags\\\"9\\n\" +\n\t\"\\x0fSetTagsResponse\\x12&\\n\" +\n\t\"\\x04node\\x18\\x01 \\x01(\\v2\\x12.headscale.v1.NodeR\\x04node\\\"K\\n\" +\n\t\"\\x18SetApprovedRoutesRequest\\x12\\x17\\n\" +\n\t\"\\anode_id\\x18\\x01 \\x01(\\x04R\\x06nodeId\\x12\\x16\\n\" +\n\t\"\\x06routes\\x18\\x02 \\x03(\\tR\\x06routes\\\"C\\n\" +\n\t\"\\x19SetApprovedRoutesResponse\\x12&\\n\" +\n\t\"\\x04node\\x18\\x01 \\x01(\\v2\\x12.headscale.v1.NodeR\\x04node\\\",\\n\" +\n\t\"\\x11DeleteNodeRequest\\x12\\x17\\n\" +\n\t\"\\anode_id\\x18\\x01 \\x01(\\x04R\\x06nodeId\\\"\\x14\\n\" +\n\t\"\\x12DeleteNodeResponse\\\"\\x87\\x01\\n\" +\n\t\"\\x11ExpireNodeRequest\\x12\\x17\\n\" +\n\t\"\\anode_id\\x18\\x01 \\x01(\\x04R\\x06nodeId\\x122\\n\" +\n\t\"\\x06expiry\\x18\\x02 \\x01(\\v2\\x1a.google.protobuf.TimestampR\\x06expiry\\x12%\\n\" +\n\t\"\\x0edisable_expiry\\x18\\x03 \\x01(\\bR\\rdisableExpiry\\\"<\\n\" +\n\t\"\\x12ExpireNodeResponse\\x12&\\n\" +\n\t\"\\x04node\\x18\\x01 \\x01(\\v2\\x12.headscale.v1.NodeR\\x04node\\\"G\\n\" +\n\t\"\\x11RenameNodeRequest\\x12\\x17\\n\" +\n\t\"\\anode_id\\x18\\x01 \\x01(\\x04R\\x06nodeId\\x12\\x19\\n\" +\n\t\"\\bnew_name\\x18\\x02 \\x01(\\tR\\anewName\\\"<\\n\" +\n\t\"\\x12RenameNodeResponse\\x12&\\n\" +\n\t\"\\x04node\\x18\\x01 \\x01(\\v2\\x12.headscale.v1.NodeR\\x04node\\\"&\\n\" +\n\t\"\\x10ListNodesRequest\\x12\\x12\\n\" +\n\t\"\\x04user\\x18\\x01 \\x01(\\tR\\x04user\\\"=\\n\" +\n\t\"\\x11ListNodesResponse\\x12(\\n\" +\n\t\"\\x05nodes\\x18\\x01 \\x03(\\v2\\x12.headscale.v1.NodeR\\x05nodes\\\"j\\n\" +\n\t\"\\x16DebugCreateNodeRequest\\x12\\x12\\n\" +\n\t\"\\x04user\\x18\\x01 \\x01(\\tR\\x04user\\x12\\x10\\n\" +\n\t\"\\x03key\\x18\\x02 \\x01(\\tR\\x03key\\x12\\x12\\n\" +\n\t\"\\x04name\\x18\\x03 \\x01(\\tR\\x04name\\x12\\x16\\n\" +\n\t\"\\x06routes\\x18\\x04 \\x03(\\tR\\x06routes\\\"A\\n\" +\n\t\"\\x17DebugCreateNodeResponse\\x12&\\n\" +\n\t\"\\x04node\\x18\\x01 \\x01(\\v2\\x12.headscale.v1.NodeR\\x04node\\\"6\\n\" +\n\t\"\\x16BackfillNodeIPsRequest\\x12\\x1c\\n\" +\n\t\"\\tconfirmed\\x18\\x01 \\x01(\\bR\\tconfirmed\\\"3\\n\" +\n\t\"\\x17BackfillNodeIPsResponse\\x12\\x18\\n\" +\n\t\"\\achanges\\x18\\x01 \\x03(\\tR\\achanges*\\x82\\x01\\n\" +\n\t\"\\x0eRegisterMethod\\x12\\x1f\\n\" +\n\t\"\\x1bREGISTER_METHOD_UNSPECIFIED\\x10\\x00\\x12\\x1c\\n\" +\n\t\"\\x18REGISTER_METHOD_AUTH_KEY\\x10\\x01\\x12\\x17\\n\" +\n\t\"\\x13REGISTER_METHOD_CLI\\x10\\x02\\x12\\x18\\n\" +\n\t\"\\x14REGISTER_METHOD_OIDC\\x10\\x03B)Z'github.com/juanfont/headscale/gen/go/v1b\\x06proto3\"\n\nvar (\n\tfile_headscale_v1_node_proto_rawDescOnce sync.Once\n\tfile_headscale_v1_node_proto_rawDescData []byte\n)\n\nfunc file_headscale_v1_node_proto_rawDescGZIP() []byte {\n\tfile_headscale_v1_node_proto_rawDescOnce.Do(func() {\n\t\tfile_headscale_v1_node_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_node_proto_rawDesc), len(file_headscale_v1_node_proto_rawDesc)))\n\t})\n\treturn file_headscale_v1_node_proto_rawDescData\n}\n\nvar file_headscale_v1_node_proto_enumTypes = make([]protoimpl.EnumInfo, 1)\nvar file_headscale_v1_node_proto_msgTypes = make([]protoimpl.MessageInfo, 21)\nvar file_headscale_v1_node_proto_goTypes = []any{\n\t(RegisterMethod)(0),               // 0: headscale.v1.RegisterMethod\n\t(*Node)(nil),                      // 1: headscale.v1.Node\n\t(*RegisterNodeRequest)(nil),       // 2: headscale.v1.RegisterNodeRequest\n\t(*RegisterNodeResponse)(nil),      // 3: headscale.v1.RegisterNodeResponse\n\t(*GetNodeRequest)(nil),            // 4: headscale.v1.GetNodeRequest\n\t(*GetNodeResponse)(nil),           // 5: headscale.v1.GetNodeResponse\n\t(*SetTagsRequest)(nil),            // 6: headscale.v1.SetTagsRequest\n\t(*SetTagsResponse)(nil),           // 7: headscale.v1.SetTagsResponse\n\t(*SetApprovedRoutesRequest)(nil),  // 8: headscale.v1.SetApprovedRoutesRequest\n\t(*SetApprovedRoutesResponse)(nil), // 9: headscale.v1.SetApprovedRoutesResponse\n\t(*DeleteNodeRequest)(nil),         // 10: headscale.v1.DeleteNodeRequest\n\t(*DeleteNodeResponse)(nil),        // 11: headscale.v1.DeleteNodeResponse\n\t(*ExpireNodeRequest)(nil),         // 12: headscale.v1.ExpireNodeRequest\n\t(*ExpireNodeResponse)(nil),        // 13: headscale.v1.ExpireNodeResponse\n\t(*RenameNodeRequest)(nil),         // 14: headscale.v1.RenameNodeRequest\n\t(*RenameNodeResponse)(nil),        // 15: headscale.v1.RenameNodeResponse\n\t(*ListNodesRequest)(nil),          // 16: headscale.v1.ListNodesRequest\n\t(*ListNodesResponse)(nil),         // 17: headscale.v1.ListNodesResponse\n\t(*DebugCreateNodeRequest)(nil),    // 18: headscale.v1.DebugCreateNodeRequest\n\t(*DebugCreateNodeResponse)(nil),   // 19: headscale.v1.DebugCreateNodeResponse\n\t(*BackfillNodeIPsRequest)(nil),    // 20: headscale.v1.BackfillNodeIPsRequest\n\t(*BackfillNodeIPsResponse)(nil),   // 21: headscale.v1.BackfillNodeIPsResponse\n\t(*User)(nil),                      // 22: headscale.v1.User\n\t(*timestamppb.Timestamp)(nil),     // 23: google.protobuf.Timestamp\n\t(*PreAuthKey)(nil),                // 24: headscale.v1.PreAuthKey\n}\nvar file_headscale_v1_node_proto_depIdxs = []int32{\n\t22, // 0: headscale.v1.Node.user:type_name -> headscale.v1.User\n\t23, // 1: headscale.v1.Node.last_seen:type_name -> google.protobuf.Timestamp\n\t23, // 2: headscale.v1.Node.expiry:type_name -> google.protobuf.Timestamp\n\t24, // 3: headscale.v1.Node.pre_auth_key:type_name -> headscale.v1.PreAuthKey\n\t23, // 4: headscale.v1.Node.created_at:type_name -> google.protobuf.Timestamp\n\t0,  // 5: headscale.v1.Node.register_method:type_name -> headscale.v1.RegisterMethod\n\t1,  // 6: headscale.v1.RegisterNodeResponse.node:type_name -> headscale.v1.Node\n\t1,  // 7: headscale.v1.GetNodeResponse.node:type_name -> headscale.v1.Node\n\t1,  // 8: headscale.v1.SetTagsResponse.node:type_name -> headscale.v1.Node\n\t1,  // 9: headscale.v1.SetApprovedRoutesResponse.node:type_name -> headscale.v1.Node\n\t23, // 10: headscale.v1.ExpireNodeRequest.expiry:type_name -> google.protobuf.Timestamp\n\t1,  // 11: headscale.v1.ExpireNodeResponse.node:type_name -> headscale.v1.Node\n\t1,  // 12: headscale.v1.RenameNodeResponse.node:type_name -> headscale.v1.Node\n\t1,  // 13: headscale.v1.ListNodesResponse.nodes:type_name -> headscale.v1.Node\n\t1,  // 14: headscale.v1.DebugCreateNodeResponse.node:type_name -> headscale.v1.Node\n\t15, // [15:15] is the sub-list for method output_type\n\t15, // [15:15] is the sub-list for method input_type\n\t15, // [15:15] is the sub-list for extension type_name\n\t15, // [15:15] is the sub-list for extension extendee\n\t0,  // [0:15] is the sub-list for field type_name\n}\n\nfunc init() { file_headscale_v1_node_proto_init() }\nfunc file_headscale_v1_node_proto_init() {\n\tif File_headscale_v1_node_proto != nil {\n\t\treturn\n\t}\n\tfile_headscale_v1_preauthkey_proto_init()\n\tfile_headscale_v1_user_proto_init()\n\ttype x struct{}\n\tout := protoimpl.TypeBuilder{\n\t\tFile: protoimpl.DescBuilder{\n\t\t\tGoPackagePath: reflect.TypeOf(x{}).PkgPath(),\n\t\t\tRawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_node_proto_rawDesc), len(file_headscale_v1_node_proto_rawDesc)),\n\t\t\tNumEnums:      1,\n\t\t\tNumMessages:   21,\n\t\t\tNumExtensions: 0,\n\t\t\tNumServices:   0,\n\t\t},\n\t\tGoTypes:           file_headscale_v1_node_proto_goTypes,\n\t\tDependencyIndexes: file_headscale_v1_node_proto_depIdxs,\n\t\tEnumInfos:         file_headscale_v1_node_proto_enumTypes,\n\t\tMessageInfos:      file_headscale_v1_node_proto_msgTypes,\n\t}.Build()\n\tFile_headscale_v1_node_proto = out.File\n\tfile_headscale_v1_node_proto_goTypes = nil\n\tfile_headscale_v1_node_proto_depIdxs = nil\n}\n"
  },
  {
    "path": "gen/go/headscale/v1/policy.pb.go",
    "content": "// Code generated by protoc-gen-go. DO NOT EDIT.\n// versions:\n// \tprotoc-gen-go v1.36.11\n// \tprotoc        (unknown)\n// source: headscale/v1/policy.proto\n\npackage v1\n\nimport (\n\tprotoreflect \"google.golang.org/protobuf/reflect/protoreflect\"\n\tprotoimpl \"google.golang.org/protobuf/runtime/protoimpl\"\n\ttimestamppb \"google.golang.org/protobuf/types/known/timestamppb\"\n\treflect \"reflect\"\n\tsync \"sync\"\n\tunsafe \"unsafe\"\n)\n\nconst (\n\t// Verify that this generated code is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)\n\t// Verify that runtime/protoimpl is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)\n)\n\ntype SetPolicyRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tPolicy        string                 `protobuf:\"bytes,1,opt,name=policy,proto3\" json:\"policy,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *SetPolicyRequest) Reset() {\n\t*x = SetPolicyRequest{}\n\tmi := &file_headscale_v1_policy_proto_msgTypes[0]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *SetPolicyRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*SetPolicyRequest) ProtoMessage() {}\n\nfunc (x *SetPolicyRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_policy_proto_msgTypes[0]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use SetPolicyRequest.ProtoReflect.Descriptor instead.\nfunc (*SetPolicyRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_policy_proto_rawDescGZIP(), []int{0}\n}\n\nfunc (x *SetPolicyRequest) GetPolicy() string {\n\tif x != nil {\n\t\treturn x.Policy\n\t}\n\treturn \"\"\n}\n\ntype SetPolicyResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tPolicy        string                 `protobuf:\"bytes,1,opt,name=policy,proto3\" json:\"policy,omitempty\"`\n\tUpdatedAt     *timestamppb.Timestamp `protobuf:\"bytes,2,opt,name=updated_at,json=updatedAt,proto3\" json:\"updated_at,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *SetPolicyResponse) Reset() {\n\t*x = SetPolicyResponse{}\n\tmi := &file_headscale_v1_policy_proto_msgTypes[1]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *SetPolicyResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*SetPolicyResponse) ProtoMessage() {}\n\nfunc (x *SetPolicyResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_policy_proto_msgTypes[1]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use SetPolicyResponse.ProtoReflect.Descriptor instead.\nfunc (*SetPolicyResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_policy_proto_rawDescGZIP(), []int{1}\n}\n\nfunc (x *SetPolicyResponse) GetPolicy() string {\n\tif x != nil {\n\t\treturn x.Policy\n\t}\n\treturn \"\"\n}\n\nfunc (x *SetPolicyResponse) GetUpdatedAt() *timestamppb.Timestamp {\n\tif x != nil {\n\t\treturn x.UpdatedAt\n\t}\n\treturn nil\n}\n\ntype GetPolicyRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *GetPolicyRequest) Reset() {\n\t*x = GetPolicyRequest{}\n\tmi := &file_headscale_v1_policy_proto_msgTypes[2]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *GetPolicyRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*GetPolicyRequest) ProtoMessage() {}\n\nfunc (x *GetPolicyRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_policy_proto_msgTypes[2]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use GetPolicyRequest.ProtoReflect.Descriptor instead.\nfunc (*GetPolicyRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_policy_proto_rawDescGZIP(), []int{2}\n}\n\ntype GetPolicyResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tPolicy        string                 `protobuf:\"bytes,1,opt,name=policy,proto3\" json:\"policy,omitempty\"`\n\tUpdatedAt     *timestamppb.Timestamp `protobuf:\"bytes,2,opt,name=updated_at,json=updatedAt,proto3\" json:\"updated_at,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *GetPolicyResponse) Reset() {\n\t*x = GetPolicyResponse{}\n\tmi := &file_headscale_v1_policy_proto_msgTypes[3]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *GetPolicyResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*GetPolicyResponse) ProtoMessage() {}\n\nfunc (x *GetPolicyResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_policy_proto_msgTypes[3]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use GetPolicyResponse.ProtoReflect.Descriptor instead.\nfunc (*GetPolicyResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_policy_proto_rawDescGZIP(), []int{3}\n}\n\nfunc (x *GetPolicyResponse) GetPolicy() string {\n\tif x != nil {\n\t\treturn x.Policy\n\t}\n\treturn \"\"\n}\n\nfunc (x *GetPolicyResponse) GetUpdatedAt() *timestamppb.Timestamp {\n\tif x != nil {\n\t\treturn x.UpdatedAt\n\t}\n\treturn nil\n}\n\nvar File_headscale_v1_policy_proto protoreflect.FileDescriptor\n\nconst file_headscale_v1_policy_proto_rawDesc = \"\" +\n\t\"\\n\" +\n\t\"\\x19headscale/v1/policy.proto\\x12\\fheadscale.v1\\x1a\\x1fgoogle/protobuf/timestamp.proto\\\"*\\n\" +\n\t\"\\x10SetPolicyRequest\\x12\\x16\\n\" +\n\t\"\\x06policy\\x18\\x01 \\x01(\\tR\\x06policy\\\"f\\n\" +\n\t\"\\x11SetPolicyResponse\\x12\\x16\\n\" +\n\t\"\\x06policy\\x18\\x01 \\x01(\\tR\\x06policy\\x129\\n\" +\n\t\"\\n\" +\n\t\"updated_at\\x18\\x02 \\x01(\\v2\\x1a.google.protobuf.TimestampR\\tupdatedAt\\\"\\x12\\n\" +\n\t\"\\x10GetPolicyRequest\\\"f\\n\" +\n\t\"\\x11GetPolicyResponse\\x12\\x16\\n\" +\n\t\"\\x06policy\\x18\\x01 \\x01(\\tR\\x06policy\\x129\\n\" +\n\t\"\\n\" +\n\t\"updated_at\\x18\\x02 \\x01(\\v2\\x1a.google.protobuf.TimestampR\\tupdatedAtB)Z'github.com/juanfont/headscale/gen/go/v1b\\x06proto3\"\n\nvar (\n\tfile_headscale_v1_policy_proto_rawDescOnce sync.Once\n\tfile_headscale_v1_policy_proto_rawDescData []byte\n)\n\nfunc file_headscale_v1_policy_proto_rawDescGZIP() []byte {\n\tfile_headscale_v1_policy_proto_rawDescOnce.Do(func() {\n\t\tfile_headscale_v1_policy_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_policy_proto_rawDesc), len(file_headscale_v1_policy_proto_rawDesc)))\n\t})\n\treturn file_headscale_v1_policy_proto_rawDescData\n}\n\nvar file_headscale_v1_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 4)\nvar file_headscale_v1_policy_proto_goTypes = []any{\n\t(*SetPolicyRequest)(nil),      // 0: headscale.v1.SetPolicyRequest\n\t(*SetPolicyResponse)(nil),     // 1: headscale.v1.SetPolicyResponse\n\t(*GetPolicyRequest)(nil),      // 2: headscale.v1.GetPolicyRequest\n\t(*GetPolicyResponse)(nil),     // 3: headscale.v1.GetPolicyResponse\n\t(*timestamppb.Timestamp)(nil), // 4: google.protobuf.Timestamp\n}\nvar file_headscale_v1_policy_proto_depIdxs = []int32{\n\t4, // 0: headscale.v1.SetPolicyResponse.updated_at:type_name -> google.protobuf.Timestamp\n\t4, // 1: headscale.v1.GetPolicyResponse.updated_at:type_name -> google.protobuf.Timestamp\n\t2, // [2:2] is the sub-list for method output_type\n\t2, // [2:2] is the sub-list for method input_type\n\t2, // [2:2] is the sub-list for extension type_name\n\t2, // [2:2] is the sub-list for extension extendee\n\t0, // [0:2] is the sub-list for field type_name\n}\n\nfunc init() { file_headscale_v1_policy_proto_init() }\nfunc file_headscale_v1_policy_proto_init() {\n\tif File_headscale_v1_policy_proto != nil {\n\t\treturn\n\t}\n\ttype x struct{}\n\tout := protoimpl.TypeBuilder{\n\t\tFile: protoimpl.DescBuilder{\n\t\t\tGoPackagePath: reflect.TypeOf(x{}).PkgPath(),\n\t\t\tRawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_policy_proto_rawDesc), len(file_headscale_v1_policy_proto_rawDesc)),\n\t\t\tNumEnums:      0,\n\t\t\tNumMessages:   4,\n\t\t\tNumExtensions: 0,\n\t\t\tNumServices:   0,\n\t\t},\n\t\tGoTypes:           file_headscale_v1_policy_proto_goTypes,\n\t\tDependencyIndexes: file_headscale_v1_policy_proto_depIdxs,\n\t\tMessageInfos:      file_headscale_v1_policy_proto_msgTypes,\n\t}.Build()\n\tFile_headscale_v1_policy_proto = out.File\n\tfile_headscale_v1_policy_proto_goTypes = nil\n\tfile_headscale_v1_policy_proto_depIdxs = nil\n}\n"
  },
  {
    "path": "gen/go/headscale/v1/preauthkey.pb.go",
    "content": "// Code generated by protoc-gen-go. DO NOT EDIT.\n// versions:\n// \tprotoc-gen-go v1.36.11\n// \tprotoc        (unknown)\n// source: headscale/v1/preauthkey.proto\n\npackage v1\n\nimport (\n\tprotoreflect \"google.golang.org/protobuf/reflect/protoreflect\"\n\tprotoimpl \"google.golang.org/protobuf/runtime/protoimpl\"\n\ttimestamppb \"google.golang.org/protobuf/types/known/timestamppb\"\n\treflect \"reflect\"\n\tsync \"sync\"\n\tunsafe \"unsafe\"\n)\n\nconst (\n\t// Verify that this generated code is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)\n\t// Verify that runtime/protoimpl is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)\n)\n\ntype PreAuthKey struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tUser          *User                  `protobuf:\"bytes,1,opt,name=user,proto3\" json:\"user,omitempty\"`\n\tId            uint64                 `protobuf:\"varint,2,opt,name=id,proto3\" json:\"id,omitempty\"`\n\tKey           string                 `protobuf:\"bytes,3,opt,name=key,proto3\" json:\"key,omitempty\"`\n\tReusable      bool                   `protobuf:\"varint,4,opt,name=reusable,proto3\" json:\"reusable,omitempty\"`\n\tEphemeral     bool                   `protobuf:\"varint,5,opt,name=ephemeral,proto3\" json:\"ephemeral,omitempty\"`\n\tUsed          bool                   `protobuf:\"varint,6,opt,name=used,proto3\" json:\"used,omitempty\"`\n\tExpiration    *timestamppb.Timestamp `protobuf:\"bytes,7,opt,name=expiration,proto3\" json:\"expiration,omitempty\"`\n\tCreatedAt     *timestamppb.Timestamp `protobuf:\"bytes,8,opt,name=created_at,json=createdAt,proto3\" json:\"created_at,omitempty\"`\n\tAclTags       []string               `protobuf:\"bytes,9,rep,name=acl_tags,json=aclTags,proto3\" json:\"acl_tags,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *PreAuthKey) Reset() {\n\t*x = PreAuthKey{}\n\tmi := &file_headscale_v1_preauthkey_proto_msgTypes[0]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *PreAuthKey) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*PreAuthKey) ProtoMessage() {}\n\nfunc (x *PreAuthKey) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_preauthkey_proto_msgTypes[0]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use PreAuthKey.ProtoReflect.Descriptor instead.\nfunc (*PreAuthKey) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{0}\n}\n\nfunc (x *PreAuthKey) GetUser() *User {\n\tif x != nil {\n\t\treturn x.User\n\t}\n\treturn nil\n}\n\nfunc (x *PreAuthKey) GetId() uint64 {\n\tif x != nil {\n\t\treturn x.Id\n\t}\n\treturn 0\n}\n\nfunc (x *PreAuthKey) GetKey() string {\n\tif x != nil {\n\t\treturn x.Key\n\t}\n\treturn \"\"\n}\n\nfunc (x *PreAuthKey) GetReusable() bool {\n\tif x != nil {\n\t\treturn x.Reusable\n\t}\n\treturn false\n}\n\nfunc (x *PreAuthKey) GetEphemeral() bool {\n\tif x != nil {\n\t\treturn x.Ephemeral\n\t}\n\treturn false\n}\n\nfunc (x *PreAuthKey) GetUsed() bool {\n\tif x != nil {\n\t\treturn x.Used\n\t}\n\treturn false\n}\n\nfunc (x *PreAuthKey) GetExpiration() *timestamppb.Timestamp {\n\tif x != nil {\n\t\treturn x.Expiration\n\t}\n\treturn nil\n}\n\nfunc (x *PreAuthKey) GetCreatedAt() *timestamppb.Timestamp {\n\tif x != nil {\n\t\treturn x.CreatedAt\n\t}\n\treturn nil\n}\n\nfunc (x *PreAuthKey) GetAclTags() []string {\n\tif x != nil {\n\t\treturn x.AclTags\n\t}\n\treturn nil\n}\n\ntype CreatePreAuthKeyRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tUser          uint64                 `protobuf:\"varint,1,opt,name=user,proto3\" json:\"user,omitempty\"`\n\tReusable      bool                   `protobuf:\"varint,2,opt,name=reusable,proto3\" json:\"reusable,omitempty\"`\n\tEphemeral     bool                   `protobuf:\"varint,3,opt,name=ephemeral,proto3\" json:\"ephemeral,omitempty\"`\n\tExpiration    *timestamppb.Timestamp `protobuf:\"bytes,4,opt,name=expiration,proto3\" json:\"expiration,omitempty\"`\n\tAclTags       []string               `protobuf:\"bytes,5,rep,name=acl_tags,json=aclTags,proto3\" json:\"acl_tags,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *CreatePreAuthKeyRequest) Reset() {\n\t*x = CreatePreAuthKeyRequest{}\n\tmi := &file_headscale_v1_preauthkey_proto_msgTypes[1]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *CreatePreAuthKeyRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*CreatePreAuthKeyRequest) ProtoMessage() {}\n\nfunc (x *CreatePreAuthKeyRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_preauthkey_proto_msgTypes[1]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use CreatePreAuthKeyRequest.ProtoReflect.Descriptor instead.\nfunc (*CreatePreAuthKeyRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{1}\n}\n\nfunc (x *CreatePreAuthKeyRequest) GetUser() uint64 {\n\tif x != nil {\n\t\treturn x.User\n\t}\n\treturn 0\n}\n\nfunc (x *CreatePreAuthKeyRequest) GetReusable() bool {\n\tif x != nil {\n\t\treturn x.Reusable\n\t}\n\treturn false\n}\n\nfunc (x *CreatePreAuthKeyRequest) GetEphemeral() bool {\n\tif x != nil {\n\t\treturn x.Ephemeral\n\t}\n\treturn false\n}\n\nfunc (x *CreatePreAuthKeyRequest) GetExpiration() *timestamppb.Timestamp {\n\tif x != nil {\n\t\treturn x.Expiration\n\t}\n\treturn nil\n}\n\nfunc (x *CreatePreAuthKeyRequest) GetAclTags() []string {\n\tif x != nil {\n\t\treturn x.AclTags\n\t}\n\treturn nil\n}\n\ntype CreatePreAuthKeyResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tPreAuthKey    *PreAuthKey            `protobuf:\"bytes,1,opt,name=pre_auth_key,json=preAuthKey,proto3\" json:\"pre_auth_key,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *CreatePreAuthKeyResponse) Reset() {\n\t*x = CreatePreAuthKeyResponse{}\n\tmi := &file_headscale_v1_preauthkey_proto_msgTypes[2]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *CreatePreAuthKeyResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*CreatePreAuthKeyResponse) ProtoMessage() {}\n\nfunc (x *CreatePreAuthKeyResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_preauthkey_proto_msgTypes[2]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use CreatePreAuthKeyResponse.ProtoReflect.Descriptor instead.\nfunc (*CreatePreAuthKeyResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{2}\n}\n\nfunc (x *CreatePreAuthKeyResponse) GetPreAuthKey() *PreAuthKey {\n\tif x != nil {\n\t\treturn x.PreAuthKey\n\t}\n\treturn nil\n}\n\ntype ExpirePreAuthKeyRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tId            uint64                 `protobuf:\"varint,1,opt,name=id,proto3\" json:\"id,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *ExpirePreAuthKeyRequest) Reset() {\n\t*x = ExpirePreAuthKeyRequest{}\n\tmi := &file_headscale_v1_preauthkey_proto_msgTypes[3]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *ExpirePreAuthKeyRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*ExpirePreAuthKeyRequest) ProtoMessage() {}\n\nfunc (x *ExpirePreAuthKeyRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_preauthkey_proto_msgTypes[3]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use ExpirePreAuthKeyRequest.ProtoReflect.Descriptor instead.\nfunc (*ExpirePreAuthKeyRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{3}\n}\n\nfunc (x *ExpirePreAuthKeyRequest) GetId() uint64 {\n\tif x != nil {\n\t\treturn x.Id\n\t}\n\treturn 0\n}\n\ntype ExpirePreAuthKeyResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *ExpirePreAuthKeyResponse) Reset() {\n\t*x = ExpirePreAuthKeyResponse{}\n\tmi := &file_headscale_v1_preauthkey_proto_msgTypes[4]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *ExpirePreAuthKeyResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*ExpirePreAuthKeyResponse) ProtoMessage() {}\n\nfunc (x *ExpirePreAuthKeyResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_preauthkey_proto_msgTypes[4]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use ExpirePreAuthKeyResponse.ProtoReflect.Descriptor instead.\nfunc (*ExpirePreAuthKeyResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{4}\n}\n\ntype DeletePreAuthKeyRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tId            uint64                 `protobuf:\"varint,1,opt,name=id,proto3\" json:\"id,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *DeletePreAuthKeyRequest) Reset() {\n\t*x = DeletePreAuthKeyRequest{}\n\tmi := &file_headscale_v1_preauthkey_proto_msgTypes[5]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *DeletePreAuthKeyRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*DeletePreAuthKeyRequest) ProtoMessage() {}\n\nfunc (x *DeletePreAuthKeyRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_preauthkey_proto_msgTypes[5]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use DeletePreAuthKeyRequest.ProtoReflect.Descriptor instead.\nfunc (*DeletePreAuthKeyRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{5}\n}\n\nfunc (x *DeletePreAuthKeyRequest) GetId() uint64 {\n\tif x != nil {\n\t\treturn x.Id\n\t}\n\treturn 0\n}\n\ntype DeletePreAuthKeyResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *DeletePreAuthKeyResponse) Reset() {\n\t*x = DeletePreAuthKeyResponse{}\n\tmi := &file_headscale_v1_preauthkey_proto_msgTypes[6]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *DeletePreAuthKeyResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*DeletePreAuthKeyResponse) ProtoMessage() {}\n\nfunc (x *DeletePreAuthKeyResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_preauthkey_proto_msgTypes[6]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use DeletePreAuthKeyResponse.ProtoReflect.Descriptor instead.\nfunc (*DeletePreAuthKeyResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{6}\n}\n\ntype ListPreAuthKeysRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *ListPreAuthKeysRequest) Reset() {\n\t*x = ListPreAuthKeysRequest{}\n\tmi := &file_headscale_v1_preauthkey_proto_msgTypes[7]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *ListPreAuthKeysRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*ListPreAuthKeysRequest) ProtoMessage() {}\n\nfunc (x *ListPreAuthKeysRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_preauthkey_proto_msgTypes[7]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use ListPreAuthKeysRequest.ProtoReflect.Descriptor instead.\nfunc (*ListPreAuthKeysRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{7}\n}\n\ntype ListPreAuthKeysResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tPreAuthKeys   []*PreAuthKey          `protobuf:\"bytes,1,rep,name=pre_auth_keys,json=preAuthKeys,proto3\" json:\"pre_auth_keys,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *ListPreAuthKeysResponse) Reset() {\n\t*x = ListPreAuthKeysResponse{}\n\tmi := &file_headscale_v1_preauthkey_proto_msgTypes[8]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *ListPreAuthKeysResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*ListPreAuthKeysResponse) ProtoMessage() {}\n\nfunc (x *ListPreAuthKeysResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_preauthkey_proto_msgTypes[8]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use ListPreAuthKeysResponse.ProtoReflect.Descriptor instead.\nfunc (*ListPreAuthKeysResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{8}\n}\n\nfunc (x *ListPreAuthKeysResponse) GetPreAuthKeys() []*PreAuthKey {\n\tif x != nil {\n\t\treturn x.PreAuthKeys\n\t}\n\treturn nil\n}\n\nvar File_headscale_v1_preauthkey_proto protoreflect.FileDescriptor\n\nconst file_headscale_v1_preauthkey_proto_rawDesc = \"\" +\n\t\"\\n\" +\n\t\"\\x1dheadscale/v1/preauthkey.proto\\x12\\fheadscale.v1\\x1a\\x1fgoogle/protobuf/timestamp.proto\\x1a\\x17headscale/v1/user.proto\\\"\\xb6\\x02\\n\" +\n\t\"\\n\" +\n\t\"PreAuthKey\\x12&\\n\" +\n\t\"\\x04user\\x18\\x01 \\x01(\\v2\\x12.headscale.v1.UserR\\x04user\\x12\\x0e\\n\" +\n\t\"\\x02id\\x18\\x02 \\x01(\\x04R\\x02id\\x12\\x10\\n\" +\n\t\"\\x03key\\x18\\x03 \\x01(\\tR\\x03key\\x12\\x1a\\n\" +\n\t\"\\breusable\\x18\\x04 \\x01(\\bR\\breusable\\x12\\x1c\\n\" +\n\t\"\\tephemeral\\x18\\x05 \\x01(\\bR\\tephemeral\\x12\\x12\\n\" +\n\t\"\\x04used\\x18\\x06 \\x01(\\bR\\x04used\\x12:\\n\" +\n\t\"\\n\" +\n\t\"expiration\\x18\\a \\x01(\\v2\\x1a.google.protobuf.TimestampR\\n\" +\n\t\"expiration\\x129\\n\" +\n\t\"\\n\" +\n\t\"created_at\\x18\\b \\x01(\\v2\\x1a.google.protobuf.TimestampR\\tcreatedAt\\x12\\x19\\n\" +\n\t\"\\bacl_tags\\x18\\t \\x03(\\tR\\aaclTags\\\"\\xbe\\x01\\n\" +\n\t\"\\x17CreatePreAuthKeyRequest\\x12\\x12\\n\" +\n\t\"\\x04user\\x18\\x01 \\x01(\\x04R\\x04user\\x12\\x1a\\n\" +\n\t\"\\breusable\\x18\\x02 \\x01(\\bR\\breusable\\x12\\x1c\\n\" +\n\t\"\\tephemeral\\x18\\x03 \\x01(\\bR\\tephemeral\\x12:\\n\" +\n\t\"\\n\" +\n\t\"expiration\\x18\\x04 \\x01(\\v2\\x1a.google.protobuf.TimestampR\\n\" +\n\t\"expiration\\x12\\x19\\n\" +\n\t\"\\bacl_tags\\x18\\x05 \\x03(\\tR\\aaclTags\\\"V\\n\" +\n\t\"\\x18CreatePreAuthKeyResponse\\x12:\\n\" +\n\t\"\\fpre_auth_key\\x18\\x01 \\x01(\\v2\\x18.headscale.v1.PreAuthKeyR\\n\" +\n\t\"preAuthKey\\\")\\n\" +\n\t\"\\x17ExpirePreAuthKeyRequest\\x12\\x0e\\n\" +\n\t\"\\x02id\\x18\\x01 \\x01(\\x04R\\x02id\\\"\\x1a\\n\" +\n\t\"\\x18ExpirePreAuthKeyResponse\\\")\\n\" +\n\t\"\\x17DeletePreAuthKeyRequest\\x12\\x0e\\n\" +\n\t\"\\x02id\\x18\\x01 \\x01(\\x04R\\x02id\\\"\\x1a\\n\" +\n\t\"\\x18DeletePreAuthKeyResponse\\\"\\x18\\n\" +\n\t\"\\x16ListPreAuthKeysRequest\\\"W\\n\" +\n\t\"\\x17ListPreAuthKeysResponse\\x12<\\n\" +\n\t\"\\rpre_auth_keys\\x18\\x01 \\x03(\\v2\\x18.headscale.v1.PreAuthKeyR\\vpreAuthKeysB)Z'github.com/juanfont/headscale/gen/go/v1b\\x06proto3\"\n\nvar (\n\tfile_headscale_v1_preauthkey_proto_rawDescOnce sync.Once\n\tfile_headscale_v1_preauthkey_proto_rawDescData []byte\n)\n\nfunc file_headscale_v1_preauthkey_proto_rawDescGZIP() []byte {\n\tfile_headscale_v1_preauthkey_proto_rawDescOnce.Do(func() {\n\t\tfile_headscale_v1_preauthkey_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_preauthkey_proto_rawDesc), len(file_headscale_v1_preauthkey_proto_rawDesc)))\n\t})\n\treturn file_headscale_v1_preauthkey_proto_rawDescData\n}\n\nvar file_headscale_v1_preauthkey_proto_msgTypes = make([]protoimpl.MessageInfo, 9)\nvar file_headscale_v1_preauthkey_proto_goTypes = []any{\n\t(*PreAuthKey)(nil),               // 0: headscale.v1.PreAuthKey\n\t(*CreatePreAuthKeyRequest)(nil),  // 1: headscale.v1.CreatePreAuthKeyRequest\n\t(*CreatePreAuthKeyResponse)(nil), // 2: headscale.v1.CreatePreAuthKeyResponse\n\t(*ExpirePreAuthKeyRequest)(nil),  // 3: headscale.v1.ExpirePreAuthKeyRequest\n\t(*ExpirePreAuthKeyResponse)(nil), // 4: headscale.v1.ExpirePreAuthKeyResponse\n\t(*DeletePreAuthKeyRequest)(nil),  // 5: headscale.v1.DeletePreAuthKeyRequest\n\t(*DeletePreAuthKeyResponse)(nil), // 6: headscale.v1.DeletePreAuthKeyResponse\n\t(*ListPreAuthKeysRequest)(nil),   // 7: headscale.v1.ListPreAuthKeysRequest\n\t(*ListPreAuthKeysResponse)(nil),  // 8: headscale.v1.ListPreAuthKeysResponse\n\t(*User)(nil),                     // 9: headscale.v1.User\n\t(*timestamppb.Timestamp)(nil),    // 10: google.protobuf.Timestamp\n}\nvar file_headscale_v1_preauthkey_proto_depIdxs = []int32{\n\t9,  // 0: headscale.v1.PreAuthKey.user:type_name -> headscale.v1.User\n\t10, // 1: headscale.v1.PreAuthKey.expiration:type_name -> google.protobuf.Timestamp\n\t10, // 2: headscale.v1.PreAuthKey.created_at:type_name -> google.protobuf.Timestamp\n\t10, // 3: headscale.v1.CreatePreAuthKeyRequest.expiration:type_name -> google.protobuf.Timestamp\n\t0,  // 4: headscale.v1.CreatePreAuthKeyResponse.pre_auth_key:type_name -> headscale.v1.PreAuthKey\n\t0,  // 5: headscale.v1.ListPreAuthKeysResponse.pre_auth_keys:type_name -> headscale.v1.PreAuthKey\n\t6,  // [6:6] is the sub-list for method output_type\n\t6,  // [6:6] is the sub-list for method input_type\n\t6,  // [6:6] is the sub-list for extension type_name\n\t6,  // [6:6] is the sub-list for extension extendee\n\t0,  // [0:6] is the sub-list for field type_name\n}\n\nfunc init() { file_headscale_v1_preauthkey_proto_init() }\nfunc file_headscale_v1_preauthkey_proto_init() {\n\tif File_headscale_v1_preauthkey_proto != nil {\n\t\treturn\n\t}\n\tfile_headscale_v1_user_proto_init()\n\ttype x struct{}\n\tout := protoimpl.TypeBuilder{\n\t\tFile: protoimpl.DescBuilder{\n\t\t\tGoPackagePath: reflect.TypeOf(x{}).PkgPath(),\n\t\t\tRawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_preauthkey_proto_rawDesc), len(file_headscale_v1_preauthkey_proto_rawDesc)),\n\t\t\tNumEnums:      0,\n\t\t\tNumMessages:   9,\n\t\t\tNumExtensions: 0,\n\t\t\tNumServices:   0,\n\t\t},\n\t\tGoTypes:           file_headscale_v1_preauthkey_proto_goTypes,\n\t\tDependencyIndexes: file_headscale_v1_preauthkey_proto_depIdxs,\n\t\tMessageInfos:      file_headscale_v1_preauthkey_proto_msgTypes,\n\t}.Build()\n\tFile_headscale_v1_preauthkey_proto = out.File\n\tfile_headscale_v1_preauthkey_proto_goTypes = nil\n\tfile_headscale_v1_preauthkey_proto_depIdxs = nil\n}\n"
  },
  {
    "path": "gen/go/headscale/v1/user.pb.go",
    "content": "// Code generated by protoc-gen-go. DO NOT EDIT.\n// versions:\n// \tprotoc-gen-go v1.36.11\n// \tprotoc        (unknown)\n// source: headscale/v1/user.proto\n\npackage v1\n\nimport (\n\tprotoreflect \"google.golang.org/protobuf/reflect/protoreflect\"\n\tprotoimpl \"google.golang.org/protobuf/runtime/protoimpl\"\n\ttimestamppb \"google.golang.org/protobuf/types/known/timestamppb\"\n\treflect \"reflect\"\n\tsync \"sync\"\n\tunsafe \"unsafe\"\n)\n\nconst (\n\t// Verify that this generated code is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)\n\t// Verify that runtime/protoimpl is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)\n)\n\ntype User struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tId            uint64                 `protobuf:\"varint,1,opt,name=id,proto3\" json:\"id,omitempty\"`\n\tName          string                 `protobuf:\"bytes,2,opt,name=name,proto3\" json:\"name,omitempty\"`\n\tCreatedAt     *timestamppb.Timestamp `protobuf:\"bytes,3,opt,name=created_at,json=createdAt,proto3\" json:\"created_at,omitempty\"`\n\tDisplayName   string                 `protobuf:\"bytes,4,opt,name=display_name,json=displayName,proto3\" json:\"display_name,omitempty\"`\n\tEmail         string                 `protobuf:\"bytes,5,opt,name=email,proto3\" json:\"email,omitempty\"`\n\tProviderId    string                 `protobuf:\"bytes,6,opt,name=provider_id,json=providerId,proto3\" json:\"provider_id,omitempty\"`\n\tProvider      string                 `protobuf:\"bytes,7,opt,name=provider,proto3\" json:\"provider,omitempty\"`\n\tProfilePicUrl string                 `protobuf:\"bytes,8,opt,name=profile_pic_url,json=profilePicUrl,proto3\" json:\"profile_pic_url,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *User) Reset() {\n\t*x = User{}\n\tmi := &file_headscale_v1_user_proto_msgTypes[0]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *User) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*User) ProtoMessage() {}\n\nfunc (x *User) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_user_proto_msgTypes[0]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use User.ProtoReflect.Descriptor instead.\nfunc (*User) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_user_proto_rawDescGZIP(), []int{0}\n}\n\nfunc (x *User) GetId() uint64 {\n\tif x != nil {\n\t\treturn x.Id\n\t}\n\treturn 0\n}\n\nfunc (x *User) GetName() string {\n\tif x != nil {\n\t\treturn x.Name\n\t}\n\treturn \"\"\n}\n\nfunc (x *User) GetCreatedAt() *timestamppb.Timestamp {\n\tif x != nil {\n\t\treturn x.CreatedAt\n\t}\n\treturn nil\n}\n\nfunc (x *User) GetDisplayName() string {\n\tif x != nil {\n\t\treturn x.DisplayName\n\t}\n\treturn \"\"\n}\n\nfunc (x *User) GetEmail() string {\n\tif x != nil {\n\t\treturn x.Email\n\t}\n\treturn \"\"\n}\n\nfunc (x *User) GetProviderId() string {\n\tif x != nil {\n\t\treturn x.ProviderId\n\t}\n\treturn \"\"\n}\n\nfunc (x *User) GetProvider() string {\n\tif x != nil {\n\t\treturn x.Provider\n\t}\n\treturn \"\"\n}\n\nfunc (x *User) GetProfilePicUrl() string {\n\tif x != nil {\n\t\treturn x.ProfilePicUrl\n\t}\n\treturn \"\"\n}\n\ntype CreateUserRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tName          string                 `protobuf:\"bytes,1,opt,name=name,proto3\" json:\"name,omitempty\"`\n\tDisplayName   string                 `protobuf:\"bytes,2,opt,name=display_name,json=displayName,proto3\" json:\"display_name,omitempty\"`\n\tEmail         string                 `protobuf:\"bytes,3,opt,name=email,proto3\" json:\"email,omitempty\"`\n\tPictureUrl    string                 `protobuf:\"bytes,4,opt,name=picture_url,json=pictureUrl,proto3\" json:\"picture_url,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *CreateUserRequest) Reset() {\n\t*x = CreateUserRequest{}\n\tmi := &file_headscale_v1_user_proto_msgTypes[1]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *CreateUserRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*CreateUserRequest) ProtoMessage() {}\n\nfunc (x *CreateUserRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_user_proto_msgTypes[1]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use CreateUserRequest.ProtoReflect.Descriptor instead.\nfunc (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_user_proto_rawDescGZIP(), []int{1}\n}\n\nfunc (x *CreateUserRequest) GetName() string {\n\tif x != nil {\n\t\treturn x.Name\n\t}\n\treturn \"\"\n}\n\nfunc (x *CreateUserRequest) GetDisplayName() string {\n\tif x != nil {\n\t\treturn x.DisplayName\n\t}\n\treturn \"\"\n}\n\nfunc (x *CreateUserRequest) GetEmail() string {\n\tif x != nil {\n\t\treturn x.Email\n\t}\n\treturn \"\"\n}\n\nfunc (x *CreateUserRequest) GetPictureUrl() string {\n\tif x != nil {\n\t\treturn x.PictureUrl\n\t}\n\treturn \"\"\n}\n\ntype CreateUserResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tUser          *User                  `protobuf:\"bytes,1,opt,name=user,proto3\" json:\"user,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *CreateUserResponse) Reset() {\n\t*x = CreateUserResponse{}\n\tmi := &file_headscale_v1_user_proto_msgTypes[2]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *CreateUserResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*CreateUserResponse) ProtoMessage() {}\n\nfunc (x *CreateUserResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_user_proto_msgTypes[2]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use CreateUserResponse.ProtoReflect.Descriptor instead.\nfunc (*CreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_user_proto_rawDescGZIP(), []int{2}\n}\n\nfunc (x *CreateUserResponse) GetUser() *User {\n\tif x != nil {\n\t\treturn x.User\n\t}\n\treturn nil\n}\n\ntype RenameUserRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tOldId         uint64                 `protobuf:\"varint,1,opt,name=old_id,json=oldId,proto3\" json:\"old_id,omitempty\"`\n\tNewName       string                 `protobuf:\"bytes,2,opt,name=new_name,json=newName,proto3\" json:\"new_name,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *RenameUserRequest) Reset() {\n\t*x = RenameUserRequest{}\n\tmi := &file_headscale_v1_user_proto_msgTypes[3]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *RenameUserRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*RenameUserRequest) ProtoMessage() {}\n\nfunc (x *RenameUserRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_user_proto_msgTypes[3]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use RenameUserRequest.ProtoReflect.Descriptor instead.\nfunc (*RenameUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_user_proto_rawDescGZIP(), []int{3}\n}\n\nfunc (x *RenameUserRequest) GetOldId() uint64 {\n\tif x != nil {\n\t\treturn x.OldId\n\t}\n\treturn 0\n}\n\nfunc (x *RenameUserRequest) GetNewName() string {\n\tif x != nil {\n\t\treturn x.NewName\n\t}\n\treturn \"\"\n}\n\ntype RenameUserResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tUser          *User                  `protobuf:\"bytes,1,opt,name=user,proto3\" json:\"user,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *RenameUserResponse) Reset() {\n\t*x = RenameUserResponse{}\n\tmi := &file_headscale_v1_user_proto_msgTypes[4]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *RenameUserResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*RenameUserResponse) ProtoMessage() {}\n\nfunc (x *RenameUserResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_user_proto_msgTypes[4]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use RenameUserResponse.ProtoReflect.Descriptor instead.\nfunc (*RenameUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_user_proto_rawDescGZIP(), []int{4}\n}\n\nfunc (x *RenameUserResponse) GetUser() *User {\n\tif x != nil {\n\t\treturn x.User\n\t}\n\treturn nil\n}\n\ntype DeleteUserRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tId            uint64                 `protobuf:\"varint,1,opt,name=id,proto3\" json:\"id,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *DeleteUserRequest) Reset() {\n\t*x = DeleteUserRequest{}\n\tmi := &file_headscale_v1_user_proto_msgTypes[5]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *DeleteUserRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*DeleteUserRequest) ProtoMessage() {}\n\nfunc (x *DeleteUserRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_user_proto_msgTypes[5]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use DeleteUserRequest.ProtoReflect.Descriptor instead.\nfunc (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_user_proto_rawDescGZIP(), []int{5}\n}\n\nfunc (x *DeleteUserRequest) GetId() uint64 {\n\tif x != nil {\n\t\treturn x.Id\n\t}\n\treturn 0\n}\n\ntype DeleteUserResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *DeleteUserResponse) Reset() {\n\t*x = DeleteUserResponse{}\n\tmi := &file_headscale_v1_user_proto_msgTypes[6]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *DeleteUserResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*DeleteUserResponse) ProtoMessage() {}\n\nfunc (x *DeleteUserResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_user_proto_msgTypes[6]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use DeleteUserResponse.ProtoReflect.Descriptor instead.\nfunc (*DeleteUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_user_proto_rawDescGZIP(), []int{6}\n}\n\ntype ListUsersRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tId            uint64                 `protobuf:\"varint,1,opt,name=id,proto3\" json:\"id,omitempty\"`\n\tName          string                 `protobuf:\"bytes,2,opt,name=name,proto3\" json:\"name,omitempty\"`\n\tEmail         string                 `protobuf:\"bytes,3,opt,name=email,proto3\" json:\"email,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *ListUsersRequest) Reset() {\n\t*x = ListUsersRequest{}\n\tmi := &file_headscale_v1_user_proto_msgTypes[7]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *ListUsersRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*ListUsersRequest) ProtoMessage() {}\n\nfunc (x *ListUsersRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_user_proto_msgTypes[7]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use ListUsersRequest.ProtoReflect.Descriptor instead.\nfunc (*ListUsersRequest) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_user_proto_rawDescGZIP(), []int{7}\n}\n\nfunc (x *ListUsersRequest) GetId() uint64 {\n\tif x != nil {\n\t\treturn x.Id\n\t}\n\treturn 0\n}\n\nfunc (x *ListUsersRequest) GetName() string {\n\tif x != nil {\n\t\treturn x.Name\n\t}\n\treturn \"\"\n}\n\nfunc (x *ListUsersRequest) GetEmail() string {\n\tif x != nil {\n\t\treturn x.Email\n\t}\n\treturn \"\"\n}\n\ntype ListUsersResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tUsers         []*User                `protobuf:\"bytes,1,rep,name=users,proto3\" json:\"users,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *ListUsersResponse) Reset() {\n\t*x = ListUsersResponse{}\n\tmi := &file_headscale_v1_user_proto_msgTypes[8]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *ListUsersResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*ListUsersResponse) ProtoMessage() {}\n\nfunc (x *ListUsersResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_headscale_v1_user_proto_msgTypes[8]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use ListUsersResponse.ProtoReflect.Descriptor instead.\nfunc (*ListUsersResponse) Descriptor() ([]byte, []int) {\n\treturn file_headscale_v1_user_proto_rawDescGZIP(), []int{8}\n}\n\nfunc (x *ListUsersResponse) GetUsers() []*User {\n\tif x != nil {\n\t\treturn x.Users\n\t}\n\treturn nil\n}\n\nvar File_headscale_v1_user_proto protoreflect.FileDescriptor\n\nconst file_headscale_v1_user_proto_rawDesc = \"\" +\n\t\"\\n\" +\n\t\"\\x17headscale/v1/user.proto\\x12\\fheadscale.v1\\x1a\\x1fgoogle/protobuf/timestamp.proto\\\"\\x83\\x02\\n\" +\n\t\"\\x04User\\x12\\x0e\\n\" +\n\t\"\\x02id\\x18\\x01 \\x01(\\x04R\\x02id\\x12\\x12\\n\" +\n\t\"\\x04name\\x18\\x02 \\x01(\\tR\\x04name\\x129\\n\" +\n\t\"\\n\" +\n\t\"created_at\\x18\\x03 \\x01(\\v2\\x1a.google.protobuf.TimestampR\\tcreatedAt\\x12!\\n\" +\n\t\"\\fdisplay_name\\x18\\x04 \\x01(\\tR\\vdisplayName\\x12\\x14\\n\" +\n\t\"\\x05email\\x18\\x05 \\x01(\\tR\\x05email\\x12\\x1f\\n\" +\n\t\"\\vprovider_id\\x18\\x06 \\x01(\\tR\\n\" +\n\t\"providerId\\x12\\x1a\\n\" +\n\t\"\\bprovider\\x18\\a \\x01(\\tR\\bprovider\\x12&\\n\" +\n\t\"\\x0fprofile_pic_url\\x18\\b \\x01(\\tR\\rprofilePicUrl\\\"\\x81\\x01\\n\" +\n\t\"\\x11CreateUserRequest\\x12\\x12\\n\" +\n\t\"\\x04name\\x18\\x01 \\x01(\\tR\\x04name\\x12!\\n\" +\n\t\"\\fdisplay_name\\x18\\x02 \\x01(\\tR\\vdisplayName\\x12\\x14\\n\" +\n\t\"\\x05email\\x18\\x03 \\x01(\\tR\\x05email\\x12\\x1f\\n\" +\n\t\"\\vpicture_url\\x18\\x04 \\x01(\\tR\\n\" +\n\t\"pictureUrl\\\"<\\n\" +\n\t\"\\x12CreateUserResponse\\x12&\\n\" +\n\t\"\\x04user\\x18\\x01 \\x01(\\v2\\x12.headscale.v1.UserR\\x04user\\\"E\\n\" +\n\t\"\\x11RenameUserRequest\\x12\\x15\\n\" +\n\t\"\\x06old_id\\x18\\x01 \\x01(\\x04R\\x05oldId\\x12\\x19\\n\" +\n\t\"\\bnew_name\\x18\\x02 \\x01(\\tR\\anewName\\\"<\\n\" +\n\t\"\\x12RenameUserResponse\\x12&\\n\" +\n\t\"\\x04user\\x18\\x01 \\x01(\\v2\\x12.headscale.v1.UserR\\x04user\\\"#\\n\" +\n\t\"\\x11DeleteUserRequest\\x12\\x0e\\n\" +\n\t\"\\x02id\\x18\\x01 \\x01(\\x04R\\x02id\\\"\\x14\\n\" +\n\t\"\\x12DeleteUserResponse\\\"L\\n\" +\n\t\"\\x10ListUsersRequest\\x12\\x0e\\n\" +\n\t\"\\x02id\\x18\\x01 \\x01(\\x04R\\x02id\\x12\\x12\\n\" +\n\t\"\\x04name\\x18\\x02 \\x01(\\tR\\x04name\\x12\\x14\\n\" +\n\t\"\\x05email\\x18\\x03 \\x01(\\tR\\x05email\\\"=\\n\" +\n\t\"\\x11ListUsersResponse\\x12(\\n\" +\n\t\"\\x05users\\x18\\x01 \\x03(\\v2\\x12.headscale.v1.UserR\\x05usersB)Z'github.com/juanfont/headscale/gen/go/v1b\\x06proto3\"\n\nvar (\n\tfile_headscale_v1_user_proto_rawDescOnce sync.Once\n\tfile_headscale_v1_user_proto_rawDescData []byte\n)\n\nfunc file_headscale_v1_user_proto_rawDescGZIP() []byte {\n\tfile_headscale_v1_user_proto_rawDescOnce.Do(func() {\n\t\tfile_headscale_v1_user_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_user_proto_rawDesc), len(file_headscale_v1_user_proto_rawDesc)))\n\t})\n\treturn file_headscale_v1_user_proto_rawDescData\n}\n\nvar file_headscale_v1_user_proto_msgTypes = make([]protoimpl.MessageInfo, 9)\nvar file_headscale_v1_user_proto_goTypes = []any{\n\t(*User)(nil),                  // 0: headscale.v1.User\n\t(*CreateUserRequest)(nil),     // 1: headscale.v1.CreateUserRequest\n\t(*CreateUserResponse)(nil),    // 2: headscale.v1.CreateUserResponse\n\t(*RenameUserRequest)(nil),     // 3: headscale.v1.RenameUserRequest\n\t(*RenameUserResponse)(nil),    // 4: headscale.v1.RenameUserResponse\n\t(*DeleteUserRequest)(nil),     // 5: headscale.v1.DeleteUserRequest\n\t(*DeleteUserResponse)(nil),    // 6: headscale.v1.DeleteUserResponse\n\t(*ListUsersRequest)(nil),      // 7: headscale.v1.ListUsersRequest\n\t(*ListUsersResponse)(nil),     // 8: headscale.v1.ListUsersResponse\n\t(*timestamppb.Timestamp)(nil), // 9: google.protobuf.Timestamp\n}\nvar file_headscale_v1_user_proto_depIdxs = []int32{\n\t9, // 0: headscale.v1.User.created_at:type_name -> google.protobuf.Timestamp\n\t0, // 1: headscale.v1.CreateUserResponse.user:type_name -> headscale.v1.User\n\t0, // 2: headscale.v1.RenameUserResponse.user:type_name -> headscale.v1.User\n\t0, // 3: headscale.v1.ListUsersResponse.users:type_name -> headscale.v1.User\n\t4, // [4:4] is the sub-list for method output_type\n\t4, // [4:4] is the sub-list for method input_type\n\t4, // [4:4] is the sub-list for extension type_name\n\t4, // [4:4] is the sub-list for extension extendee\n\t0, // [0:4] is the sub-list for field type_name\n}\n\nfunc init() { file_headscale_v1_user_proto_init() }\nfunc file_headscale_v1_user_proto_init() {\n\tif File_headscale_v1_user_proto != nil {\n\t\treturn\n\t}\n\ttype x struct{}\n\tout := protoimpl.TypeBuilder{\n\t\tFile: protoimpl.DescBuilder{\n\t\t\tGoPackagePath: reflect.TypeOf(x{}).PkgPath(),\n\t\t\tRawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_user_proto_rawDesc), len(file_headscale_v1_user_proto_rawDesc)),\n\t\t\tNumEnums:      0,\n\t\t\tNumMessages:   9,\n\t\t\tNumExtensions: 0,\n\t\t\tNumServices:   0,\n\t\t},\n\t\tGoTypes:           file_headscale_v1_user_proto_goTypes,\n\t\tDependencyIndexes: file_headscale_v1_user_proto_depIdxs,\n\t\tMessageInfos:      file_headscale_v1_user_proto_msgTypes,\n\t}.Build()\n\tFile_headscale_v1_user_proto = out.File\n\tfile_headscale_v1_user_proto_goTypes = nil\n\tfile_headscale_v1_user_proto_depIdxs = nil\n}\n"
  },
  {
    "path": "gen/openapiv2/headscale/v1/apikey.swagger.json",
    "content": "{\n  \"swagger\": \"2.0\",\n  \"info\": {\n    \"title\": \"headscale/v1/apikey.proto\",\n    \"version\": \"version not set\"\n  },\n  \"consumes\": [\n    \"application/json\"\n  ],\n  \"produces\": [\n    \"application/json\"\n  ],\n  \"paths\": {},\n  \"definitions\": {\n    \"protobufAny\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"@type\": {\n          \"type\": \"string\"\n        }\n      },\n      \"additionalProperties\": {}\n    },\n    \"rpcStatus\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"code\": {\n          \"type\": \"integer\",\n          \"format\": \"int32\"\n        },\n        \"message\": {\n          \"type\": \"string\"\n        },\n        \"details\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"object\",\n            \"$ref\": \"#/definitions/protobufAny\"\n          }\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "gen/openapiv2/headscale/v1/auth.swagger.json",
    "content": "{\n  \"swagger\": \"2.0\",\n  \"info\": {\n    \"title\": \"headscale/v1/auth.proto\",\n    \"version\": \"version not set\"\n  },\n  \"consumes\": [\n    \"application/json\"\n  ],\n  \"produces\": [\n    \"application/json\"\n  ],\n  \"paths\": {},\n  \"definitions\": {\n    \"protobufAny\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"@type\": {\n          \"type\": \"string\"\n        }\n      },\n      \"additionalProperties\": {}\n    },\n    \"rpcStatus\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"code\": {\n          \"type\": \"integer\",\n          \"format\": \"int32\"\n        },\n        \"message\": {\n          \"type\": \"string\"\n        },\n        \"details\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"object\",\n            \"$ref\": \"#/definitions/protobufAny\"\n          }\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "gen/openapiv2/headscale/v1/device.swagger.json",
    "content": "{\n  \"swagger\": \"2.0\",\n  \"info\": {\n    \"title\": \"headscale/v1/device.proto\",\n    \"version\": \"version not set\"\n  },\n  \"consumes\": [\n    \"application/json\"\n  ],\n  \"produces\": [\n    \"application/json\"\n  ],\n  \"paths\": {},\n  \"definitions\": {\n    \"protobufAny\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"@type\": {\n          \"type\": \"string\"\n        }\n      },\n      \"additionalProperties\": {}\n    },\n    \"rpcStatus\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"code\": {\n          \"type\": \"integer\",\n          \"format\": \"int32\"\n        },\n        \"message\": {\n          \"type\": \"string\"\n        },\n        \"details\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"object\",\n            \"$ref\": \"#/definitions/protobufAny\"\n          }\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "gen/openapiv2/headscale/v1/headscale.swagger.json",
    "content": "{\n  \"swagger\": \"2.0\",\n  \"info\": {\n    \"title\": \"headscale/v1/headscale.proto\",\n    \"version\": \"version not set\"\n  },\n  \"tags\": [\n    {\n      \"name\": \"HeadscaleService\"\n    }\n  ],\n  \"consumes\": [\n    \"application/json\"\n  ],\n  \"produces\": [\n    \"application/json\"\n  ],\n  \"paths\": {\n    \"/api/v1/apikey\": {\n      \"get\": {\n        \"operationId\": \"HeadscaleService_ListApiKeys\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"A successful response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1ListApiKeysResponse\"\n            }\n          },\n          \"default\": {\n            \"description\": \"An unexpected error response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/rpcStatus\"\n            }\n          }\n        },\n        \"tags\": [\n          \"HeadscaleService\"\n        ]\n      },\n      \"post\": {\n        \"summary\": \"--- ApiKeys start ---\",\n        \"operationId\": \"HeadscaleService_CreateApiKey\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"A successful response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1CreateApiKeyResponse\"\n            }\n          },\n          \"default\": {\n            \"description\": \"An unexpected error response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/rpcStatus\"\n            }\n          }\n        },\n        \"parameters\": [\n          {\n            \"name\": \"body\",\n            \"in\": \"body\",\n            \"required\": true,\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1CreateApiKeyRequest\"\n            }\n          }\n        ],\n        \"tags\": [\n          \"HeadscaleService\"\n        ]\n      }\n    },\n    \"/api/v1/apikey/expire\": {\n      \"post\": {\n        \"operationId\": \"HeadscaleService_ExpireApiKey\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"A successful response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1ExpireApiKeyResponse\"\n            }\n          },\n          \"default\": {\n            \"description\": \"An unexpected error response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/rpcStatus\"\n            }\n          }\n        },\n        \"parameters\": [\n          {\n            \"name\": \"body\",\n            \"in\": \"body\",\n            \"required\": true,\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1ExpireApiKeyRequest\"\n            }\n          }\n        ],\n        \"tags\": [\n          \"HeadscaleService\"\n        ]\n      }\n    },\n    \"/api/v1/apikey/{prefix}\": {\n      \"delete\": {\n        \"operationId\": \"HeadscaleService_DeleteApiKey\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"A successful response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1DeleteApiKeyResponse\"\n            }\n          },\n          \"default\": {\n            \"description\": \"An unexpected error response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/rpcStatus\"\n            }\n          }\n        },\n        \"parameters\": [\n          {\n            \"name\": \"prefix\",\n            \"in\": \"path\",\n            \"required\": true,\n            \"type\": \"string\"\n          },\n          {\n            \"name\": \"id\",\n            \"in\": \"query\",\n            \"required\": false,\n            \"type\": \"string\",\n            \"format\": \"uint64\"\n          }\n        ],\n        \"tags\": [\n          \"HeadscaleService\"\n        ]\n      }\n    },\n    \"/api/v1/auth/approve\": {\n      \"post\": {\n        \"operationId\": \"HeadscaleService_AuthApprove\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"A successful response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1AuthApproveResponse\"\n            }\n          },\n          \"default\": {\n            \"description\": \"An unexpected error response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/rpcStatus\"\n            }\n          }\n        },\n        \"parameters\": [\n          {\n            \"name\": \"body\",\n            \"in\": \"body\",\n            \"required\": true,\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1AuthApproveRequest\"\n            }\n          }\n        ],\n        \"tags\": [\n          \"HeadscaleService\"\n        ]\n      }\n    },\n    \"/api/v1/auth/register\": {\n      \"post\": {\n        \"summary\": \"--- Auth start ---\",\n        \"operationId\": \"HeadscaleService_AuthRegister\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"A successful response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1AuthRegisterResponse\"\n            }\n          },\n          \"default\": {\n            \"description\": \"An unexpected error response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/rpcStatus\"\n            }\n          }\n        },\n        \"parameters\": [\n          {\n            \"name\": \"body\",\n            \"in\": \"body\",\n            \"required\": true,\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1AuthRegisterRequest\"\n            }\n          }\n        ],\n        \"tags\": [\n          \"HeadscaleService\"\n        ]\n      }\n    },\n    \"/api/v1/auth/reject\": {\n      \"post\": {\n        \"operationId\": \"HeadscaleService_AuthReject\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"A successful response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1AuthRejectResponse\"\n            }\n          },\n          \"default\": {\n            \"description\": \"An unexpected error response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/rpcStatus\"\n            }\n          }\n        },\n        \"parameters\": [\n          {\n            \"name\": \"body\",\n            \"in\": \"body\",\n            \"required\": true,\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1AuthRejectRequest\"\n            }\n          }\n        ],\n        \"tags\": [\n          \"HeadscaleService\"\n        ]\n      }\n    },\n    \"/api/v1/debug/node\": {\n      \"post\": {\n        \"summary\": \"--- Node start ---\",\n        \"operationId\": \"HeadscaleService_DebugCreateNode\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"A successful response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1DebugCreateNodeResponse\"\n            }\n          },\n          \"default\": {\n            \"description\": \"An unexpected error response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/rpcStatus\"\n            }\n          }\n        },\n        \"parameters\": [\n          {\n            \"name\": \"body\",\n            \"in\": \"body\",\n            \"required\": true,\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1DebugCreateNodeRequest\"\n            }\n          }\n        ],\n        \"tags\": [\n          \"HeadscaleService\"\n        ]\n      }\n    },\n    \"/api/v1/health\": {\n      \"get\": {\n        \"summary\": \"--- Health start ---\",\n        \"operationId\": \"HeadscaleService_Health\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"A successful response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1HealthResponse\"\n            }\n          },\n          \"default\": {\n            \"description\": \"An unexpected error response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/rpcStatus\"\n            }\n          }\n        },\n        \"tags\": [\n          \"HeadscaleService\"\n        ]\n      }\n    },\n    \"/api/v1/node\": {\n      \"get\": {\n        \"operationId\": \"HeadscaleService_ListNodes\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"A successful response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1ListNodesResponse\"\n            }\n          },\n          \"default\": {\n            \"description\": \"An unexpected error response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/rpcStatus\"\n            }\n          }\n        },\n        \"parameters\": [\n          {\n            \"name\": \"user\",\n            \"in\": \"query\",\n            \"required\": false,\n            \"type\": \"string\"\n          }\n        ],\n        \"tags\": [\n          \"HeadscaleService\"\n        ]\n      }\n    },\n    \"/api/v1/node/backfillips\": {\n      \"post\": {\n        \"operationId\": \"HeadscaleService_BackfillNodeIPs\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"A successful response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1BackfillNodeIPsResponse\"\n            }\n          },\n          \"default\": {\n            \"description\": \"An unexpected error response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/rpcStatus\"\n            }\n          }\n        },\n        \"parameters\": [\n          {\n            \"name\": \"confirmed\",\n            \"in\": \"query\",\n            \"required\": false,\n            \"type\": \"boolean\"\n          }\n        ],\n        \"tags\": [\n          \"HeadscaleService\"\n        ]\n      }\n    },\n    \"/api/v1/node/register\": {\n      \"post\": {\n        \"operationId\": \"HeadscaleService_RegisterNode\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"A successful response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1RegisterNodeResponse\"\n            }\n          },\n          \"default\": {\n            \"description\": \"An unexpected error response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/rpcStatus\"\n            }\n          }\n        },\n        \"parameters\": [\n          {\n            \"name\": \"user\",\n            \"in\": \"query\",\n            \"required\": false,\n            \"type\": \"string\"\n          },\n          {\n            \"name\": \"key\",\n            \"in\": \"query\",\n            \"required\": false,\n            \"type\": \"string\"\n          }\n        ],\n        \"tags\": [\n          \"HeadscaleService\"\n        ]\n      }\n    },\n    \"/api/v1/node/{nodeId}\": {\n      \"get\": {\n        \"operationId\": \"HeadscaleService_GetNode\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"A successful response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1GetNodeResponse\"\n            }\n          },\n          \"default\": {\n            \"description\": \"An unexpected error response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/rpcStatus\"\n            }\n          }\n        },\n        \"parameters\": [\n          {\n            \"name\": \"nodeId\",\n            \"in\": \"path\",\n            \"required\": true,\n            \"type\": \"string\",\n            \"format\": \"uint64\"\n          }\n        ],\n        \"tags\": [\n          \"HeadscaleService\"\n        ]\n      },\n      \"delete\": {\n        \"operationId\": \"HeadscaleService_DeleteNode\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"A successful response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1DeleteNodeResponse\"\n            }\n          },\n          \"default\": {\n            \"description\": \"An unexpected error response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/rpcStatus\"\n            }\n          }\n        },\n        \"parameters\": [\n          {\n            \"name\": \"nodeId\",\n            \"in\": \"path\",\n            \"required\": true,\n            \"type\": \"string\",\n            \"format\": \"uint64\"\n          }\n        ],\n        \"tags\": [\n          \"HeadscaleService\"\n        ]\n      }\n    },\n    \"/api/v1/node/{nodeId}/approve_routes\": {\n      \"post\": {\n        \"operationId\": \"HeadscaleService_SetApprovedRoutes\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"A successful response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1SetApprovedRoutesResponse\"\n            }\n          },\n          \"default\": {\n            \"description\": \"An unexpected error response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/rpcStatus\"\n            }\n          }\n        },\n        \"parameters\": [\n          {\n            \"name\": \"nodeId\",\n            \"in\": \"path\",\n            \"required\": true,\n            \"type\": \"string\",\n            \"format\": \"uint64\"\n          },\n          {\n            \"name\": \"body\",\n            \"in\": \"body\",\n            \"required\": true,\n            \"schema\": {\n              \"$ref\": \"#/definitions/HeadscaleServiceSetApprovedRoutesBody\"\n            }\n          }\n        ],\n        \"tags\": [\n          \"HeadscaleService\"\n        ]\n      }\n    },\n    \"/api/v1/node/{nodeId}/expire\": {\n      \"post\": {\n        \"operationId\": \"HeadscaleService_ExpireNode\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"A successful response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1ExpireNodeResponse\"\n            }\n          },\n          \"default\": {\n            \"description\": \"An unexpected error response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/rpcStatus\"\n            }\n          }\n        },\n        \"parameters\": [\n          {\n            \"name\": \"nodeId\",\n            \"in\": \"path\",\n            \"required\": true,\n            \"type\": \"string\",\n            \"format\": \"uint64\"\n          },\n          {\n            \"name\": \"expiry\",\n            \"in\": \"query\",\n            \"required\": false,\n            \"type\": \"string\",\n            \"format\": \"date-time\"\n          },\n          {\n            \"name\": \"disableExpiry\",\n            \"description\": \"When true, sets expiry to null (node will never expire).\",\n            \"in\": \"query\",\n            \"required\": false,\n            \"type\": \"boolean\"\n          }\n        ],\n        \"tags\": [\n          \"HeadscaleService\"\n        ]\n      }\n    },\n    \"/api/v1/node/{nodeId}/rename/{newName}\": {\n      \"post\": {\n        \"operationId\": \"HeadscaleService_RenameNode\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"A successful response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1RenameNodeResponse\"\n            }\n          },\n          \"default\": {\n            \"description\": \"An unexpected error response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/rpcStatus\"\n            }\n          }\n        },\n        \"parameters\": [\n          {\n            \"name\": \"nodeId\",\n            \"in\": \"path\",\n            \"required\": true,\n            \"type\": \"string\",\n            \"format\": \"uint64\"\n          },\n          {\n            \"name\": \"newName\",\n            \"in\": \"path\",\n            \"required\": true,\n            \"type\": \"string\"\n          }\n        ],\n        \"tags\": [\n          \"HeadscaleService\"\n        ]\n      }\n    },\n    \"/api/v1/node/{nodeId}/tags\": {\n      \"post\": {\n        \"operationId\": \"HeadscaleService_SetTags\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"A successful response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1SetTagsResponse\"\n            }\n          },\n          \"default\": {\n            \"description\": \"An unexpected error response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/rpcStatus\"\n            }\n          }\n        },\n        \"parameters\": [\n          {\n            \"name\": \"nodeId\",\n            \"in\": \"path\",\n            \"required\": true,\n            \"type\": \"string\",\n            \"format\": \"uint64\"\n          },\n          {\n            \"name\": \"body\",\n            \"in\": \"body\",\n            \"required\": true,\n            \"schema\": {\n              \"$ref\": \"#/definitions/HeadscaleServiceSetTagsBody\"\n            }\n          }\n        ],\n        \"tags\": [\n          \"HeadscaleService\"\n        ]\n      }\n    },\n    \"/api/v1/policy\": {\n      \"get\": {\n        \"summary\": \"--- Policy start ---\",\n        \"operationId\": \"HeadscaleService_GetPolicy\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"A successful response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1GetPolicyResponse\"\n            }\n          },\n          \"default\": {\n            \"description\": \"An unexpected error response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/rpcStatus\"\n            }\n          }\n        },\n        \"tags\": [\n          \"HeadscaleService\"\n        ]\n      },\n      \"put\": {\n        \"operationId\": \"HeadscaleService_SetPolicy\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"A successful response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1SetPolicyResponse\"\n            }\n          },\n          \"default\": {\n            \"description\": \"An unexpected error response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/rpcStatus\"\n            }\n          }\n        },\n        \"parameters\": [\n          {\n            \"name\": \"body\",\n            \"in\": \"body\",\n            \"required\": true,\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1SetPolicyRequest\"\n            }\n          }\n        ],\n        \"tags\": [\n          \"HeadscaleService\"\n        ]\n      }\n    },\n    \"/api/v1/preauthkey\": {\n      \"get\": {\n        \"operationId\": \"HeadscaleService_ListPreAuthKeys\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"A successful response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1ListPreAuthKeysResponse\"\n            }\n          },\n          \"default\": {\n            \"description\": \"An unexpected error response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/rpcStatus\"\n            }\n          }\n        },\n        \"tags\": [\n          \"HeadscaleService\"\n        ]\n      },\n      \"delete\": {\n        \"operationId\": \"HeadscaleService_DeletePreAuthKey\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"A successful response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1DeletePreAuthKeyResponse\"\n            }\n          },\n          \"default\": {\n            \"description\": \"An unexpected error response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/rpcStatus\"\n            }\n          }\n        },\n        \"parameters\": [\n          {\n            \"name\": \"id\",\n            \"in\": \"query\",\n            \"required\": false,\n            \"type\": \"string\",\n            \"format\": \"uint64\"\n          }\n        ],\n        \"tags\": [\n          \"HeadscaleService\"\n        ]\n      },\n      \"post\": {\n        \"summary\": \"--- PreAuthKeys start ---\",\n        \"operationId\": \"HeadscaleService_CreatePreAuthKey\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"A successful response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1CreatePreAuthKeyResponse\"\n            }\n          },\n          \"default\": {\n            \"description\": \"An unexpected error response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/rpcStatus\"\n            }\n          }\n        },\n        \"parameters\": [\n          {\n            \"name\": \"body\",\n            \"in\": \"body\",\n            \"required\": true,\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1CreatePreAuthKeyRequest\"\n            }\n          }\n        ],\n        \"tags\": [\n          \"HeadscaleService\"\n        ]\n      }\n    },\n    \"/api/v1/preauthkey/expire\": {\n      \"post\": {\n        \"operationId\": \"HeadscaleService_ExpirePreAuthKey\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"A successful response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1ExpirePreAuthKeyResponse\"\n            }\n          },\n          \"default\": {\n            \"description\": \"An unexpected error response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/rpcStatus\"\n            }\n          }\n        },\n        \"parameters\": [\n          {\n            \"name\": \"body\",\n            \"in\": \"body\",\n            \"required\": true,\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1ExpirePreAuthKeyRequest\"\n            }\n          }\n        ],\n        \"tags\": [\n          \"HeadscaleService\"\n        ]\n      }\n    },\n    \"/api/v1/user\": {\n      \"get\": {\n        \"operationId\": \"HeadscaleService_ListUsers\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"A successful response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1ListUsersResponse\"\n            }\n          },\n          \"default\": {\n            \"description\": \"An unexpected error response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/rpcStatus\"\n            }\n          }\n        },\n        \"parameters\": [\n          {\n            \"name\": \"id\",\n            \"in\": \"query\",\n            \"required\": false,\n            \"type\": \"string\",\n            \"format\": \"uint64\"\n          },\n          {\n            \"name\": \"name\",\n            \"in\": \"query\",\n            \"required\": false,\n            \"type\": \"string\"\n          },\n          {\n            \"name\": \"email\",\n            \"in\": \"query\",\n            \"required\": false,\n            \"type\": \"string\"\n          }\n        ],\n        \"tags\": [\n          \"HeadscaleService\"\n        ]\n      },\n      \"post\": {\n        \"summary\": \"--- User start ---\",\n        \"operationId\": \"HeadscaleService_CreateUser\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"A successful response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1CreateUserResponse\"\n            }\n          },\n          \"default\": {\n            \"description\": \"An unexpected error response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/rpcStatus\"\n            }\n          }\n        },\n        \"parameters\": [\n          {\n            \"name\": \"body\",\n            \"in\": \"body\",\n            \"required\": true,\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1CreateUserRequest\"\n            }\n          }\n        ],\n        \"tags\": [\n          \"HeadscaleService\"\n        ]\n      }\n    },\n    \"/api/v1/user/{id}\": {\n      \"delete\": {\n        \"operationId\": \"HeadscaleService_DeleteUser\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"A successful response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1DeleteUserResponse\"\n            }\n          },\n          \"default\": {\n            \"description\": \"An unexpected error response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/rpcStatus\"\n            }\n          }\n        },\n        \"parameters\": [\n          {\n            \"name\": \"id\",\n            \"in\": \"path\",\n            \"required\": true,\n            \"type\": \"string\",\n            \"format\": \"uint64\"\n          }\n        ],\n        \"tags\": [\n          \"HeadscaleService\"\n        ]\n      }\n    },\n    \"/api/v1/user/{oldId}/rename/{newName}\": {\n      \"post\": {\n        \"operationId\": \"HeadscaleService_RenameUser\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"A successful response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/v1RenameUserResponse\"\n            }\n          },\n          \"default\": {\n            \"description\": \"An unexpected error response.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/rpcStatus\"\n            }\n          }\n        },\n        \"parameters\": [\n          {\n            \"name\": \"oldId\",\n            \"in\": \"path\",\n            \"required\": true,\n            \"type\": \"string\",\n            \"format\": \"uint64\"\n          },\n          {\n            \"name\": \"newName\",\n            \"in\": \"path\",\n            \"required\": true,\n            \"type\": \"string\"\n          }\n        ],\n        \"tags\": [\n          \"HeadscaleService\"\n        ]\n      }\n    }\n  },\n  \"definitions\": {\n    \"HeadscaleServiceSetApprovedRoutesBody\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"routes\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        }\n      }\n    },\n    \"HeadscaleServiceSetTagsBody\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"tags\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        }\n      }\n    },\n    \"protobufAny\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"@type\": {\n          \"type\": \"string\"\n        }\n      },\n      \"additionalProperties\": {}\n    },\n    \"rpcStatus\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"code\": {\n          \"type\": \"integer\",\n          \"format\": \"int32\"\n        },\n        \"message\": {\n          \"type\": \"string\"\n        },\n        \"details\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"object\",\n            \"$ref\": \"#/definitions/protobufAny\"\n          }\n        }\n      }\n    },\n    \"v1ApiKey\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"id\": {\n          \"type\": \"string\",\n          \"format\": \"uint64\"\n        },\n        \"prefix\": {\n          \"type\": \"string\"\n        },\n        \"expiration\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"createdAt\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"lastSeen\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        }\n      }\n    },\n    \"v1AuthApproveRequest\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"authId\": {\n          \"type\": \"string\"\n        }\n      }\n    },\n    \"v1AuthApproveResponse\": {\n      \"type\": \"object\"\n    },\n    \"v1AuthRegisterRequest\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"user\": {\n          \"type\": \"string\"\n        },\n        \"authId\": {\n          \"type\": \"string\"\n        }\n      }\n    },\n    \"v1AuthRegisterResponse\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"node\": {\n          \"$ref\": \"#/definitions/v1Node\"\n        }\n      }\n    },\n    \"v1AuthRejectRequest\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"authId\": {\n          \"type\": \"string\"\n        }\n      }\n    },\n    \"v1AuthRejectResponse\": {\n      \"type\": \"object\"\n    },\n    \"v1BackfillNodeIPsResponse\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"changes\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        }\n      }\n    },\n    \"v1CreateApiKeyRequest\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"expiration\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        }\n      }\n    },\n    \"v1CreateApiKeyResponse\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"apiKey\": {\n          \"type\": \"string\"\n        }\n      }\n    },\n    \"v1CreatePreAuthKeyRequest\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"user\": {\n          \"type\": \"string\",\n          \"format\": \"uint64\"\n        },\n        \"reusable\": {\n          \"type\": \"boolean\"\n        },\n        \"ephemeral\": {\n          \"type\": \"boolean\"\n        },\n        \"expiration\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"aclTags\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        }\n      }\n    },\n    \"v1CreatePreAuthKeyResponse\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"preAuthKey\": {\n          \"$ref\": \"#/definitions/v1PreAuthKey\"\n        }\n      }\n    },\n    \"v1CreateUserRequest\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"name\": {\n          \"type\": \"string\"\n        },\n        \"displayName\": {\n          \"type\": \"string\"\n        },\n        \"email\": {\n          \"type\": \"string\"\n        },\n        \"pictureUrl\": {\n          \"type\": \"string\"\n        }\n      }\n    },\n    \"v1CreateUserResponse\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"user\": {\n          \"$ref\": \"#/definitions/v1User\"\n        }\n      }\n    },\n    \"v1DebugCreateNodeRequest\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"user\": {\n          \"type\": \"string\"\n        },\n        \"key\": {\n          \"type\": \"string\"\n        },\n        \"name\": {\n          \"type\": \"string\"\n        },\n        \"routes\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        }\n      }\n    },\n    \"v1DebugCreateNodeResponse\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"node\": {\n          \"$ref\": \"#/definitions/v1Node\"\n        }\n      }\n    },\n    \"v1DeleteApiKeyResponse\": {\n      \"type\": \"object\"\n    },\n    \"v1DeleteNodeResponse\": {\n      \"type\": \"object\"\n    },\n    \"v1DeletePreAuthKeyResponse\": {\n      \"type\": \"object\"\n    },\n    \"v1DeleteUserResponse\": {\n      \"type\": \"object\"\n    },\n    \"v1ExpireApiKeyRequest\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"prefix\": {\n          \"type\": \"string\"\n        },\n        \"id\": {\n          \"type\": \"string\",\n          \"format\": \"uint64\"\n        }\n      }\n    },\n    \"v1ExpireApiKeyResponse\": {\n      \"type\": \"object\"\n    },\n    \"v1ExpireNodeResponse\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"node\": {\n          \"$ref\": \"#/definitions/v1Node\"\n        }\n      }\n    },\n    \"v1ExpirePreAuthKeyRequest\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"id\": {\n          \"type\": \"string\",\n          \"format\": \"uint64\"\n        }\n      }\n    },\n    \"v1ExpirePreAuthKeyResponse\": {\n      \"type\": \"object\"\n    },\n    \"v1GetNodeResponse\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"node\": {\n          \"$ref\": \"#/definitions/v1Node\"\n        }\n      }\n    },\n    \"v1GetPolicyResponse\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"policy\": {\n          \"type\": \"string\"\n        },\n        \"updatedAt\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        }\n      }\n    },\n    \"v1HealthResponse\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"databaseConnectivity\": {\n          \"type\": \"boolean\"\n        }\n      }\n    },\n    \"v1ListApiKeysResponse\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"apiKeys\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"object\",\n            \"$ref\": \"#/definitions/v1ApiKey\"\n          }\n        }\n      }\n    },\n    \"v1ListNodesResponse\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"nodes\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"object\",\n            \"$ref\": \"#/definitions/v1Node\"\n          }\n        }\n      }\n    },\n    \"v1ListPreAuthKeysResponse\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"preAuthKeys\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"object\",\n            \"$ref\": \"#/definitions/v1PreAuthKey\"\n          }\n        }\n      }\n    },\n    \"v1ListUsersResponse\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"users\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"object\",\n            \"$ref\": \"#/definitions/v1User\"\n          }\n        }\n      }\n    },\n    \"v1Node\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"id\": {\n          \"type\": \"string\",\n          \"format\": \"uint64\"\n        },\n        \"machineKey\": {\n          \"type\": \"string\"\n        },\n        \"nodeKey\": {\n          \"type\": \"string\"\n        },\n        \"discoKey\": {\n          \"type\": \"string\"\n        },\n        \"ipAddresses\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        },\n        \"name\": {\n          \"type\": \"string\"\n        },\n        \"user\": {\n          \"$ref\": \"#/definitions/v1User\"\n        },\n        \"lastSeen\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"expiry\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"preAuthKey\": {\n          \"$ref\": \"#/definitions/v1PreAuthKey\"\n        },\n        \"createdAt\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"registerMethod\": {\n          \"$ref\": \"#/definitions/v1RegisterMethod\"\n        },\n        \"givenName\": {\n          \"type\": \"string\",\n          \"title\": \"Deprecated\\nrepeated string forced_tags = 18;\\nrepeated string invalid_tags = 19;\\nrepeated string valid_tags = 20;\"\n        },\n        \"online\": {\n          \"type\": \"boolean\"\n        },\n        \"approvedRoutes\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        },\n        \"availableRoutes\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        },\n        \"subnetRoutes\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        },\n        \"tags\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        }\n      }\n    },\n    \"v1PreAuthKey\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"user\": {\n          \"$ref\": \"#/definitions/v1User\"\n        },\n        \"id\": {\n          \"type\": \"string\",\n          \"format\": \"uint64\"\n        },\n        \"key\": {\n          \"type\": \"string\"\n        },\n        \"reusable\": {\n          \"type\": \"boolean\"\n        },\n        \"ephemeral\": {\n          \"type\": \"boolean\"\n        },\n        \"used\": {\n          \"type\": \"boolean\"\n        },\n        \"expiration\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"createdAt\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"aclTags\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        }\n      }\n    },\n    \"v1RegisterMethod\": {\n      \"type\": \"string\",\n      \"enum\": [\n        \"REGISTER_METHOD_UNSPECIFIED\",\n        \"REGISTER_METHOD_AUTH_KEY\",\n        \"REGISTER_METHOD_CLI\",\n        \"REGISTER_METHOD_OIDC\"\n      ],\n      \"default\": \"REGISTER_METHOD_UNSPECIFIED\"\n    },\n    \"v1RegisterNodeResponse\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"node\": {\n          \"$ref\": \"#/definitions/v1Node\"\n        }\n      }\n    },\n    \"v1RenameNodeResponse\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"node\": {\n          \"$ref\": \"#/definitions/v1Node\"\n        }\n      }\n    },\n    \"v1RenameUserResponse\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"user\": {\n          \"$ref\": \"#/definitions/v1User\"\n        }\n      }\n    },\n    \"v1SetApprovedRoutesResponse\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"node\": {\n          \"$ref\": \"#/definitions/v1Node\"\n        }\n      }\n    },\n    \"v1SetPolicyRequest\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"policy\": {\n          \"type\": \"string\"\n        }\n      }\n    },\n    \"v1SetPolicyResponse\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"policy\": {\n          \"type\": \"string\"\n        },\n        \"updatedAt\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        }\n      }\n    },\n    \"v1SetTagsResponse\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"node\": {\n          \"$ref\": \"#/definitions/v1Node\"\n        }\n      }\n    },\n    \"v1User\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"id\": {\n          \"type\": \"string\",\n          \"format\": \"uint64\"\n        },\n        \"name\": {\n          \"type\": \"string\"\n        },\n        \"createdAt\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"displayName\": {\n          \"type\": \"string\"\n        },\n        \"email\": {\n          \"type\": \"string\"\n        },\n        \"providerId\": {\n          \"type\": \"string\"\n        },\n        \"provider\": {\n          \"type\": \"string\"\n        },\n        \"profilePicUrl\": {\n          \"type\": \"string\"\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "gen/openapiv2/headscale/v1/node.swagger.json",
    "content": "{\n  \"swagger\": \"2.0\",\n  \"info\": {\n    \"title\": \"headscale/v1/node.proto\",\n    \"version\": \"version not set\"\n  },\n  \"consumes\": [\n    \"application/json\"\n  ],\n  \"produces\": [\n    \"application/json\"\n  ],\n  \"paths\": {},\n  \"definitions\": {\n    \"protobufAny\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"@type\": {\n          \"type\": \"string\"\n        }\n      },\n      \"additionalProperties\": {}\n    },\n    \"rpcStatus\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"code\": {\n          \"type\": \"integer\",\n          \"format\": \"int32\"\n        },\n        \"message\": {\n          \"type\": \"string\"\n        },\n        \"details\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"object\",\n            \"$ref\": \"#/definitions/protobufAny\"\n          }\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "gen/openapiv2/headscale/v1/policy.swagger.json",
    "content": "{\n  \"swagger\": \"2.0\",\n  \"info\": {\n    \"title\": \"headscale/v1/policy.proto\",\n    \"version\": \"version not set\"\n  },\n  \"consumes\": [\n    \"application/json\"\n  ],\n  \"produces\": [\n    \"application/json\"\n  ],\n  \"paths\": {},\n  \"definitions\": {\n    \"protobufAny\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"@type\": {\n          \"type\": \"string\"\n        }\n      },\n      \"additionalProperties\": {}\n    },\n    \"rpcStatus\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"code\": {\n          \"type\": \"integer\",\n          \"format\": \"int32\"\n        },\n        \"message\": {\n          \"type\": \"string\"\n        },\n        \"details\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"object\",\n            \"$ref\": \"#/definitions/protobufAny\"\n          }\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "gen/openapiv2/headscale/v1/preauthkey.swagger.json",
    "content": "{\n  \"swagger\": \"2.0\",\n  \"info\": {\n    \"title\": \"headscale/v1/preauthkey.proto\",\n    \"version\": \"version not set\"\n  },\n  \"consumes\": [\n    \"application/json\"\n  ],\n  \"produces\": [\n    \"application/json\"\n  ],\n  \"paths\": {},\n  \"definitions\": {\n    \"protobufAny\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"@type\": {\n          \"type\": \"string\"\n        }\n      },\n      \"additionalProperties\": {}\n    },\n    \"rpcStatus\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"code\": {\n          \"type\": \"integer\",\n          \"format\": \"int32\"\n        },\n        \"message\": {\n          \"type\": \"string\"\n        },\n        \"details\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"object\",\n            \"$ref\": \"#/definitions/protobufAny\"\n          }\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "gen/openapiv2/headscale/v1/user.swagger.json",
    "content": "{\n  \"swagger\": \"2.0\",\n  \"info\": {\n    \"title\": \"headscale/v1/user.proto\",\n    \"version\": \"version not set\"\n  },\n  \"consumes\": [\n    \"application/json\"\n  ],\n  \"produces\": [\n    \"application/json\"\n  ],\n  \"paths\": {},\n  \"definitions\": {\n    \"protobufAny\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"@type\": {\n          \"type\": \"string\"\n        }\n      },\n      \"additionalProperties\": {}\n    },\n    \"rpcStatus\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"code\": {\n          \"type\": \"integer\",\n          \"format\": \"int32\"\n        },\n        \"message\": {\n          \"type\": \"string\"\n        },\n        \"details\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"object\",\n            \"$ref\": \"#/definitions/protobufAny\"\n          }\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "go.mod",
    "content": "module github.com/juanfont/headscale\n\ngo 1.26.1\n\nrequire (\n\tgithub.com/arl/statsviz v0.8.0\n\tgithub.com/cenkalti/backoff/v5 v5.0.3\n\tgithub.com/chasefleming/elem-go v0.31.0\n\tgithub.com/coder/websocket v1.8.14\n\tgithub.com/coreos/go-oidc/v3 v3.17.0\n\tgithub.com/creachadair/command v0.2.0\n\tgithub.com/creachadair/flax v0.0.5\n\tgithub.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc\n\tgithub.com/docker/docker v28.5.2+incompatible\n\tgithub.com/fsnotify/fsnotify v1.9.0\n\tgithub.com/glebarez/sqlite v1.11.0\n\tgithub.com/go-chi/chi/v5 v5.2.5\n\tgithub.com/go-chi/metrics v0.1.1\n\tgithub.com/go-gormigrate/gormigrate/v2 v2.1.5\n\tgithub.com/go-json-experiment/json v0.0.0-20251027170946-4849db3c2f7e\n\tgithub.com/gofrs/uuid/v5 v5.4.0\n\tgithub.com/google/go-cmp v0.7.0\n\tgithub.com/gorilla/mux v1.8.1\n\tgithub.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7\n\tgithub.com/jagottsicher/termcolor v1.0.2\n\tgithub.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25\n\tgithub.com/ory/dockertest/v3 v3.12.0\n\tgithub.com/philip-bui/grpc-zerolog v1.0.1\n\tgithub.com/pkg/profile v1.7.0\n\tgithub.com/prometheus/client_golang v1.23.2\n\tgithub.com/prometheus/common v0.67.5\n\tgithub.com/pterm/pterm v0.12.82\n\tgithub.com/puzpuzpuz/xsync/v4 v4.4.0\n\tgithub.com/rs/zerolog v1.34.0\n\tgithub.com/samber/lo v1.52.0\n\tgithub.com/sasha-s/go-deadlock v0.3.6\n\tgithub.com/spf13/cobra v1.10.2\n\tgithub.com/spf13/viper v1.21.0\n\tgithub.com/stretchr/testify v1.11.1\n\tgithub.com/tailscale/hujson v0.0.0-20250605163823-992244df8c5a\n\tgithub.com/tailscale/squibble v0.0.0-20251104223530-a961feffb67f\n\tgithub.com/tailscale/tailsql v0.0.0-20260105194658-001575c3ca09\n\tgithub.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e\n\tgo4.org/netipx v0.0.0-20231129151722-fdeea329fbba\n\tgolang.org/x/crypto v0.48.0\n\tgolang.org/x/exp v0.0.0-20260112195511-716be5621a96\n\tgolang.org/x/net v0.50.0\n\tgolang.org/x/oauth2 v0.34.0\n\tgolang.org/x/sync v0.19.0\n\tgoogle.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20\n\tgoogle.golang.org/grpc v1.78.0\n\tgoogle.golang.org/protobuf v1.36.11\n\tgopkg.in/yaml.v3 v3.0.1\n\tgorm.io/driver/postgres v1.6.0\n\tgorm.io/gorm v1.31.1\n\ttailscale.com v1.94.1\n\tzgo.at/zcache/v2 v2.4.1\n\tzombiezen.com/go/postgrestest v1.0.1\n)\n\n// NOTE: modernc sqlite has a fragile dependency\n// chain and it is important that they are updated\n// in lockstep to ensure that they do not break\n// some architectures and similar at runtime:\n// https://github.com/juanfont/headscale/issues/2188\n//\n// Fragile libc dependency:\n// https://pkg.go.dev/modernc.org/sqlite#hdr-Fragile_modernc_org_libc_dependency\n// https://gitlab.com/cznic/sqlite/-/issues/177\n//\n// To upgrade, determine the new SQLite version to\n// be used, and consult the `go.mod` file:\n// https://gitlab.com/cznic/sqlite/-/blob/master/go.mod\n// to find\n// the appropriate `libc` version, then upgrade them\n// together, e.g:\n// go get modernc.org/libc@v1.55.3 modernc.org/sqlite@v1.33.1\nrequire (\n\tmodernc.org/libc v1.67.6 // indirect\n\tmodernc.org/mathutil v1.7.1 // indirect\n\tmodernc.org/memory v1.11.0 // indirect\n\tmodernc.org/sqlite v1.44.3\n)\n\n// NOTE: gvisor must be updated in lockstep with\n// tailscale.com. The version used here should match\n// the version required by the tailscale.com dependency.\n// To find the correct version, check tailscale.com's\n// go.mod file for the gvisor.dev/gvisor version:\n// https://github.com/tailscale/tailscale/blob/main/go.mod\nrequire gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 // indirect\n\nrequire (\n\tatomicgo.dev/cursor v0.2.0 // indirect\n\tatomicgo.dev/keyboard v0.2.9 // indirect\n\tatomicgo.dev/schedule v0.1.0 // indirect\n\tdario.cat/mergo v1.0.2 // indirect\n\tfilippo.io/edwards25519 v1.1.0 // indirect\n\tgithub.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect\n\tgithub.com/Microsoft/go-winio v0.6.2 // indirect\n\tgithub.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect\n\tgithub.com/akutz/memconn v0.1.0 // indirect\n\tgithub.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e // indirect\n\tgithub.com/aws/aws-sdk-go-v2 v1.41.1 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/config v1.32.7 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/credentials v1.19.7 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect\n\tgithub.com/aws/smithy-go v1.24.0 // indirect\n\tgithub.com/axiomhq/hyperloglog v0.2.6 // indirect\n\tgithub.com/beorn7/perks v1.0.1 // indirect\n\tgithub.com/cenkalti/backoff/v4 v4.3.0 // indirect\n\tgithub.com/cespare/xxhash/v2 v2.3.0 // indirect\n\tgithub.com/clipperhouse/stringish v0.1.1 // indirect\n\tgithub.com/clipperhouse/uax29/v2 v2.5.0 // indirect\n\tgithub.com/containerd/console v1.0.5 // indirect\n\tgithub.com/containerd/continuity v0.4.5 // indirect\n\tgithub.com/containerd/errdefs v1.0.0 // indirect\n\tgithub.com/containerd/errdefs/pkg v0.3.0 // indirect\n\tgithub.com/creachadair/mds v0.25.15 // indirect\n\tgithub.com/creachadair/msync v0.8.2 // indirect\n\tgithub.com/dblohm7/wingoes v0.0.0-20250822163801-6d8e6105c62d // indirect\n\tgithub.com/dgryski/go-metro v0.0.0-20250106013310-edb8663e5e33 // indirect\n\tgithub.com/distribution/reference v0.6.0 // indirect\n\tgithub.com/docker/cli v29.2.1+incompatible // indirect\n\tgithub.com/docker/go-connections v0.6.0 // indirect\n\tgithub.com/docker/go-units v0.5.0 // indirect\n\tgithub.com/dustin/go-humanize v1.0.1 // indirect\n\tgithub.com/felixge/fgprof v0.9.5 // indirect\n\tgithub.com/felixge/httpsnoop v1.0.4 // indirect\n\tgithub.com/fxamacker/cbor/v2 v2.9.0 // indirect\n\tgithub.com/gaissmai/bart v0.26.1 // indirect\n\tgithub.com/glebarez/go-sqlite v1.22.0 // indirect\n\tgithub.com/go-jose/go-jose/v3 v3.0.4 // indirect\n\tgithub.com/go-jose/go-jose/v4 v4.1.3 // indirect\n\tgithub.com/go-logr/logr v1.4.3 // indirect\n\tgithub.com/go-logr/stdr v1.2.2 // indirect\n\tgithub.com/go-viper/mapstructure/v2 v2.5.0 // indirect\n\tgithub.com/godbus/dbus/v5 v5.2.2 // indirect\n\tgithub.com/golang-jwt/jwt/v5 v5.3.1 // indirect\n\tgithub.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect\n\tgithub.com/golang/protobuf v1.5.4 // indirect\n\tgithub.com/google/btree v1.1.3 // indirect\n\tgithub.com/google/go-github v17.0.0+incompatible // indirect\n\tgithub.com/google/go-querystring v1.2.0 // indirect\n\tgithub.com/google/pprof v0.0.0-20260202012954-cb029daf43ef // indirect\n\tgithub.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect\n\tgithub.com/google/uuid v1.6.0 // indirect\n\tgithub.com/gookit/color v1.6.0 // indirect\n\tgithub.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect\n\tgithub.com/hashicorp/go-version v1.8.0 // indirect\n\tgithub.com/hdevalence/ed25519consensus v0.2.0 // indirect\n\tgithub.com/huin/goupnp v1.3.0 // indirect\n\tgithub.com/inconshreveable/mousetrap v1.1.0 // indirect\n\tgithub.com/jackc/pgpassfile v1.0.0 // indirect\n\tgithub.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect\n\tgithub.com/jackc/pgx/v5 v5.8.0 // indirect\n\tgithub.com/jackc/puddle/v2 v2.2.2 // indirect\n\tgithub.com/jinzhu/inflection v1.0.0 // indirect\n\tgithub.com/jinzhu/now v1.1.5 // indirect\n\tgithub.com/jsimonetti/rtnetlink v1.4.2 // indirect\n\tgithub.com/kamstrup/intmap v0.5.2 // indirect\n\tgithub.com/klauspost/compress v1.18.3 // indirect\n\tgithub.com/lib/pq v1.11.1 // indirect\n\tgithub.com/lithammer/fuzzysearch v1.1.8 // indirect\n\tgithub.com/mattn/go-colorable v0.1.14 // indirect\n\tgithub.com/mattn/go-isatty v0.0.20 // indirect\n\tgithub.com/mattn/go-runewidth v0.0.19 // indirect\n\tgithub.com/mdlayher/netlink v1.8.0 // indirect\n\tgithub.com/mdlayher/socket v0.5.1 // indirect\n\tgithub.com/mitchellh/go-ps v1.0.0 // indirect\n\tgithub.com/moby/docker-image-spec v1.3.1 // indirect\n\tgithub.com/moby/moby/api v1.53.0 // indirect\n\tgithub.com/moby/moby/client v0.2.2 // indirect\n\tgithub.com/moby/sys/atomicwriter v0.1.0 // indirect\n\tgithub.com/moby/sys/user v0.4.0 // indirect\n\tgithub.com/moby/term v0.5.2 // indirect\n\tgithub.com/morikuni/aec v1.1.0 // indirect\n\tgithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect\n\tgithub.com/ncruces/go-strftime v1.0.0 // indirect\n\tgithub.com/opencontainers/go-digest v1.0.0 // indirect\n\tgithub.com/opencontainers/image-spec v1.1.1 // indirect\n\tgithub.com/opencontainers/runc v1.3.2 // indirect\n\tgithub.com/pelletier/go-toml/v2 v2.2.4 // indirect\n\tgithub.com/petermattis/goid v0.0.0-20260113132338-7c7de50cc741 // indirect\n\tgithub.com/pires/go-proxyproto v0.9.2 // indirect\n\tgithub.com/pkg/errors v0.9.1 // indirect\n\tgithub.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect\n\tgithub.com/prometheus-community/pro-bing v0.7.0 // indirect\n\tgithub.com/prometheus/client_model v0.6.2 // indirect\n\tgithub.com/prometheus/procfs v0.19.2 // indirect\n\tgithub.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect\n\tgithub.com/safchain/ethtool v0.7.0 // indirect\n\tgithub.com/sagikazarmark/locafero v0.12.0 // indirect\n\tgithub.com/sirupsen/logrus v1.9.4 // indirect\n\tgithub.com/spf13/afero v1.15.0 // indirect\n\tgithub.com/spf13/cast v1.10.0 // indirect\n\tgithub.com/spf13/pflag v1.0.10 // indirect\n\tgithub.com/subosito/gotenv v1.6.0 // indirect\n\tgithub.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e // indirect\n\tgithub.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 // indirect\n\tgithub.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc // indirect\n\tgithub.com/tailscale/setec v0.0.0-20260115174028-19d190c5556d // indirect\n\tgithub.com/tailscale/web-client-prebuilt v0.0.0-20251127225136-f19339b67368 // indirect\n\tgithub.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da // indirect\n\tgithub.com/x448/float16 v0.8.4 // indirect\n\tgithub.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect\n\tgithub.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect\n\tgithub.com/xeipuuv/gojsonschema v1.2.0 // indirect\n\tgithub.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect\n\tgo.opentelemetry.io/auto/sdk v1.2.1 // indirect\n\tgo.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect\n\tgo.opentelemetry.io/otel v1.40.0 // indirect\n\tgo.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 // indirect\n\tgo.opentelemetry.io/otel/metric v1.40.0 // indirect\n\tgo.opentelemetry.io/otel/trace v1.40.0 // indirect\n\tgo.yaml.in/yaml/v2 v2.4.3 // indirect\n\tgo.yaml.in/yaml/v3 v3.0.4 // indirect\n\tgo4.org/mem v0.0.0-20240501181205-ae6ca9944745 // indirect\n\tgolang.org/x/mod v0.33.0 // indirect\n\tgolang.org/x/sys v0.41.0 // indirect\n\tgolang.org/x/term v0.40.0 // indirect\n\tgolang.org/x/text v0.34.0 // indirect\n\tgolang.org/x/time v0.14.0 // indirect\n\tgolang.org/x/tools v0.42.0 // indirect\n\tgolang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect\n\tgolang.zx2c4.com/wireguard/windows v0.5.3 // indirect\n\tgoogle.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20 // indirect\n)\n\ntool (\n\tgolang.org/x/tools/cmd/stress\n\tgolang.org/x/tools/cmd/stringer\n\ttailscale.com/cmd/viewer\n)\n"
  },
  {
    "path": "go.sum",
    "content": "9fans.net/go v0.0.8-0.20250307142834-96bdba94b63f h1:1C7nZuxUMNz7eiQALRfiqNOm04+m3edWlRff/BYHf0Q=\n9fans.net/go v0.0.8-0.20250307142834-96bdba94b63f/go.mod h1:hHyrZRryGqVdqrknjq5OWDLGCTJ2NeEvtrpR96mjraM=\natomicgo.dev/assert v0.0.2 h1:FiKeMiZSgRrZsPo9qn/7vmr7mCsh5SZyXY4YGYiYwrg=\natomicgo.dev/assert v0.0.2/go.mod h1:ut4NcI3QDdJtlmAxQULOmA13Gz6e2DWbSAS8RUOmNYQ=\natomicgo.dev/cursor v0.2.0 h1:H6XN5alUJ52FZZUkI7AlJbUc1aW38GWZalpYRPpoPOw=\natomicgo.dev/cursor v0.2.0/go.mod h1:Lr4ZJB3U7DfPPOkbH7/6TOtJ4vFGHlgj1nc+n900IpU=\natomicgo.dev/keyboard v0.2.9 h1:tOsIid3nlPLZ3lwgG8KZMp/SFmr7P0ssEN5JUsm78K8=\natomicgo.dev/keyboard v0.2.9/go.mod h1:BC4w9g00XkxH/f1HXhW2sXmJFOCWbKn9xrOunSFtExQ=\natomicgo.dev/schedule v0.1.0 h1:nTthAbhZS5YZmgYbb2+DH8uQIZcTlIrd4eYr3UQxEjs=\natomicgo.dev/schedule v0.1.0/go.mod h1:xeUa3oAkiuHYh8bKiQBRojqAMq3PXXbJujjb0hw8pEU=\ndario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=\ndario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=\nfilippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=\nfilippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=\nfilippo.io/mkcert v1.4.4 h1:8eVbbwfVlaqUM7OwuftKc2nuYOoTDQWqsoXmzoXZdbc=\nfilippo.io/mkcert v1.4.4/go.mod h1:VyvOchVuAye3BoUsPUOOofKygVwLV2KQMVFJNRq+1dA=\ngithub.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=\ngithub.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=\ngithub.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=\ngithub.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=\ngithub.com/MarvinJWendt/testza v0.1.0/go.mod h1:7AxNvlfeHP7Z/hDQ5JtE3OKYT3XFUeLCDE2DQninSqs=\ngithub.com/MarvinJWendt/testza v0.2.1/go.mod h1:God7bhG8n6uQxwdScay+gjm9/LnO4D3kkcZX4hv9Rp8=\ngithub.com/MarvinJWendt/testza v0.2.8/go.mod h1:nwIcjmr0Zz+Rcwfh3/4UhBp7ePKVhuBExvZqnKYWlII=\ngithub.com/MarvinJWendt/testza v0.2.10/go.mod h1:pd+VWsoGUiFtq+hRKSU1Bktnn+DMCSrDrXDpX2bG66k=\ngithub.com/MarvinJWendt/testza v0.2.12/go.mod h1:JOIegYyV7rX+7VZ9r77L/eH6CfJHHzXjB69adAhzZkI=\ngithub.com/MarvinJWendt/testza v0.3.0/go.mod h1:eFcL4I0idjtIx8P9C6KkAuLgATNKpX4/2oUqKc6bF2c=\ngithub.com/MarvinJWendt/testza v0.4.2/go.mod h1:mSdhXiKH8sg/gQehJ63bINcCKp7RtYewEjXsvsVUPbE=\ngithub.com/MarvinJWendt/testza v0.5.2 h1:53KDo64C1z/h/d/stCYCPY69bt/OSwjq5KpFNwi+zB4=\ngithub.com/MarvinJWendt/testza v0.5.2/go.mod h1:xu53QFE5sCdjtMCKk8YMQ2MnymimEctc4n3EjyIYvEY=\ngithub.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=\ngithub.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=\ngithub.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=\ngithub.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=\ngithub.com/akutz/memconn v0.1.0 h1:NawI0TORU4hcOMsMr11g7vwlCdkYeLKXBcxWu2W/P8A=\ngithub.com/akutz/memconn v0.1.0/go.mod h1:Jo8rI7m0NieZyLI5e2CDlRdRqRRB4S7Xp77ukDjH+Fw=\ngithub.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e h1:4dAU9FXIyQktpoUAgOJK3OTFc/xug0PCXYCqU0FgDKI=\ngithub.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=\ngithub.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=\ngithub.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=\ngithub.com/arl/statsviz v0.8.0 h1:O6GjjVxEDxcByAucOSl29HaGYLXsuwA3ujJw8H9E7/U=\ngithub.com/arl/statsviz v0.8.0/go.mod h1:XlrbiT7xYT03xaW9JMMfD8KFUhBOESJwfyNJu83PbB0=\ngithub.com/atomicgo/cursor v0.0.1/go.mod h1:cBON2QmmrysudxNBFthvMtN32r3jxVRIvzkUiF/RuIk=\ngithub.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=\ngithub.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=\ngithub.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU=\ngithub.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4=\ngithub.com/aws/aws-sdk-go-v2/config v1.32.7 h1:vxUyWGUwmkQ2g19n7JY/9YL8MfAIl7bTesIUykECXmY=\ngithub.com/aws/aws-sdk-go-v2/config v1.32.7/go.mod h1:2/Qm5vKUU/r7Y+zUk/Ptt2MDAEKAfUtKc1+3U1Mo3oY=\ngithub.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8=\ngithub.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw=\ngithub.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY=\ngithub.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA=\ngithub.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U=\ngithub.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ=\ngithub.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik=\ngithub.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM=\ngithub.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=\ngithub.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=\ngithub.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16 h1:CjMzUs78RDDv4ROu3JnJn/Ig1r6ZD7/T2DXLLRpejic=\ngithub.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16/go.mod h1:uVW4OLBqbJXSHJYA9svT9BluSvvwbzLQ2Crf6UPzR3c=\ngithub.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=\ngithub.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=\ngithub.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7 h1:DIBqIrJ7hv+e4CmIk2z3pyKT+3B6qVMgRsawHiR3qso=\ngithub.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7/go.mod h1:vLm00xmBke75UmpNvOcZQ/Q30ZFjbczeLFqGx5urmGo=\ngithub.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY=\ngithub.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU=\ngithub.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16 h1:NSbvS17MlI2lurYgXnCOLvCFX38sBW4eiVER7+kkgsU=\ngithub.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16/go.mod h1:SwT8Tmqd4sA6G1qaGdzWCJN99bUmPGHfRwwq3G5Qb+A=\ngithub.com/aws/aws-sdk-go-v2/service/s3 v1.93.2 h1:U3ygWUhCpiSPYSHOrRhb3gOl9T5Y3kB8k5Vjs//57bE=\ngithub.com/aws/aws-sdk-go-v2/service/s3 v1.93.2/go.mod h1:79S2BdqCJpScXZA2y+cpZuocWsjGjJINyXnOsf5DTz8=\ngithub.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y=\ngithub.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M=\ngithub.com/aws/aws-sdk-go-v2/service/ssm v1.45.0 h1:IOdss+igJDFdic9w3WKwxGCmHqUxydvIhJOm9LJ32Dk=\ngithub.com/aws/aws-sdk-go-v2/service/ssm v1.45.0/go.mod h1:Q7XIWsMo0JcMpI/6TGD6XXcXcV1DbTj6e9BKNntIMIM=\ngithub.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk=\ngithub.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw=\ngithub.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds=\ngithub.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo=\ngithub.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ=\ngithub.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ=\ngithub.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=\ngithub.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=\ngithub.com/axiomhq/hyperloglog v0.2.6 h1:sRhvvF3RIXWQgAXaTphLp4yJiX4S0IN3MWTaAgZoRJw=\ngithub.com/axiomhq/hyperloglog v0.2.6/go.mod h1:YjX/dQqCR/7QYX0g8mu8UZAjpIenz1FKM71UEsjFoTo=\ngithub.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=\ngithub.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=\ngithub.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=\ngithub.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=\ngithub.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=\ngithub.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=\ngithub.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=\ngithub.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=\ngithub.com/chasefleming/elem-go v0.31.0 h1:vZsuKmKdv6idnUbu3awMruxTiFqZ/ertFJFAyBCkVhI=\ngithub.com/chasefleming/elem-go v0.31.0/go.mod h1:UBmmZfso2LkXA0HZInbcwsmhE/LXFClEcBPNCGeARtA=\ngithub.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=\ngithub.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=\ngithub.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=\ngithub.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=\ngithub.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=\ngithub.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=\ngithub.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=\ngithub.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=\ngithub.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=\ngithub.com/cilium/ebpf v0.17.3 h1:FnP4r16PWYSE4ux6zN+//jMcW4nMVRvuTLVTvCjyyjg=\ngithub.com/cilium/ebpf v0.17.3/go.mod h1:G5EDHij8yiLzaqn0WjyfJHvRa+3aDlReIaLVRMvOyJk=\ngithub.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs=\ngithub.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA=\ngithub.com/clipperhouse/uax29/v2 v2.5.0 h1:x7T0T4eTHDONxFJsL94uKNKPHrclyFI0lm7+w94cO8U=\ngithub.com/clipperhouse/uax29/v2 v2.5.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g=\ngithub.com/coder/websocket v1.8.14 h1:9L0p0iKiNOibykf283eHkKUHHrpG7f65OE3BhhO7v9g=\ngithub.com/coder/websocket v1.8.14/go.mod h1:NX3SzP+inril6yawo5CQXx8+fk145lPDC6pumgx0mVg=\ngithub.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=\ngithub.com/containerd/console v1.0.5 h1:R0ymNeydRqH2DmakFNdmjR2k0t7UPuiOV/N/27/qqsc=\ngithub.com/containerd/console v1.0.5/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=\ngithub.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4=\ngithub.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE=\ngithub.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=\ngithub.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=\ngithub.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=\ngithub.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=\ngithub.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=\ngithub.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=\ngithub.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0=\ngithub.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=\ngithub.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc=\ngithub.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8=\ngithub.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=\ngithub.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=\ngithub.com/creachadair/command v0.2.0 h1:qTA9cMMhZePAxFoNdnk6F6nn94s1qPndIg9hJbqI9cA=\ngithub.com/creachadair/command v0.2.0/go.mod h1:j+Ar+uYnFsHpkMeV9kGj6lJ45y9u2xqtg8FYy6cm+0o=\ngithub.com/creachadair/flax v0.0.5 h1:zt+CRuXQASxwQ68e9GHAOnEgAU29nF0zYMHOCrL5wzE=\ngithub.com/creachadair/flax v0.0.5/go.mod h1:F1PML0JZLXSNDMNiRGK2yjm5f+L9QCHchyHBldFymj8=\ngithub.com/creachadair/mds v0.25.15 h1:i8CUqtfgbCqbvZ++L7lm8No3cOeic9YKF4vHEvEoj+Y=\ngithub.com/creachadair/mds v0.25.15/go.mod h1:XtMfRW15sjd1iOi1Z1k+dq0pRsR5xPbulpoTrpyhk8w=\ngithub.com/creachadair/msync v0.8.2 h1:ujvc/SVJPn+bFwmjUHucXNTTn3opVe2YbQ46mBCnP08=\ngithub.com/creachadair/msync v0.8.2/go.mod h1:LzxqD9kfIl/O3DczkwOgJplLPqwrTbIhINlf9bHIsEY=\ngithub.com/creachadair/taskgroup v0.13.2 h1:3KyqakBuFsm3KkXi/9XIb0QcA8tEzLHLgaoidf0MdVc=\ngithub.com/creachadair/taskgroup v0.13.2/go.mod h1:i3V1Zx7H8RjwljUEeUWYT30Lmb9poewSb2XI1yTwD0g=\ngithub.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s=\ngithub.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=\ngithub.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=\ngithub.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/dblohm7/wingoes v0.0.0-20250822163801-6d8e6105c62d h1:QRKpU+9ZBDs62LyBfwhZkJdB5DJX2Sm3p4kUh7l1aA0=\ngithub.com/dblohm7/wingoes v0.0.0-20250822163801-6d8e6105c62d/go.mod h1:SUxUaAK/0UG5lYyZR1L1nC4AaYYvSSYTWQSH3FPcxKU=\ngithub.com/dgryski/go-metro v0.0.0-20250106013310-edb8663e5e33 h1:ucRHb6/lvW/+mTEIGbvhcYU3S8+uSNkuMjx/qZFfhtM=\ngithub.com/dgryski/go-metro v0.0.0-20250106013310-edb8663e5e33/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw=\ngithub.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q=\ngithub.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A=\ngithub.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=\ngithub.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=\ngithub.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c=\ngithub.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0=\ngithub.com/docker/cli v29.2.1+incompatible h1:n3Jt0QVCN65eiVBoUTZQM9mcQICCJt3akW4pKAbKdJg=\ngithub.com/docker/cli v29.2.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=\ngithub.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM=\ngithub.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=\ngithub.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=\ngithub.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=\ngithub.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=\ngithub.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=\ngithub.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=\ngithub.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=\ngithub.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw=\ngithub.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=\ngithub.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=\ngithub.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=\ngithub.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=\ngithub.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=\ngithub.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=\ngithub.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=\ngithub.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=\ngithub.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=\ngithub.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=\ngithub.com/gaissmai/bart v0.26.1 h1:+w4rnLGNlA2GDVn382Tfe3jOsK5vOr5n4KmigJ9lbTo=\ngithub.com/gaissmai/bart v0.26.1/go.mod h1:GREWQfTLRWz/c5FTOsIw+KkscuFkIV5t8Rp7Nd1Td5c=\ngithub.com/github/fakeca v0.1.0 h1:Km/MVOFvclqxPM9dZBC4+QE564nU4gz4iZ0D9pMw28I=\ngithub.com/github/fakeca v0.1.0/go.mod h1:+bormgoGMMuamOscx7N91aOuUST7wdaJ2rNjeohylyo=\ngithub.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec+ruQ=\ngithub.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc=\ngithub.com/glebarez/sqlite v1.11.0 h1:wSG0irqzP6VurnMEpFGer5Li19RpIRi2qvQz++w0GMw=\ngithub.com/glebarez/sqlite v1.11.0/go.mod h1:h8/o8j5wiAsqSPoWELDUdJXhjAhsVliSn7bWZjOhrgQ=\ngithub.com/go-chi/chi/v5 v5.2.5 h1:Eg4myHZBjyvJmAFjFvWgrqDTXFyOzjj7YIm3L3mu6Ug=\ngithub.com/go-chi/chi/v5 v5.2.5/go.mod h1:X7Gx4mteadT3eDOMTsXzmI4/rwUpOwBHLpAfupzFJP0=\ngithub.com/go-chi/metrics v0.1.1 h1:CXhbnkAVVjb0k73EBRQ6Z2YdWFnbXZgNtg1Mboguibk=\ngithub.com/go-chi/metrics v0.1.1/go.mod h1:mcGTM1pPalP7WCtb+akNYFO/lwNwBBLCuedepqjoPn4=\ngithub.com/go-gormigrate/gormigrate/v2 v2.1.5 h1:1OyorA5LtdQw12cyJDEHuTrEV3GiXiIhS4/QTTa/SM8=\ngithub.com/go-gormigrate/gormigrate/v2 v2.1.5/go.mod h1:mj9ekk/7CPF3VjopaFvWKN2v7fN3D9d3eEOAXRhi/+M=\ngithub.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFSaNY=\ngithub.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=\ngithub.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs=\ngithub.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=\ngithub.com/go-json-experiment/json v0.0.0-20251027170946-4849db3c2f7e h1:Lf/gRkoycfOBPa42vU2bbgPurFong6zXeFtPoxholzU=\ngithub.com/go-json-experiment/json v0.0.0-20251027170946-4849db3c2f7e/go.mod h1:uNVvRXArCGbZ508SxYYTC5v1JWoz2voff5pm25jU1Ok=\ngithub.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=\ngithub.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=\ngithub.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=\ngithub.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=\ngithub.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=\ngithub.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=\ngithub.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=\ngithub.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=\ngithub.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=\ngithub.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro=\ngithub.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=\ngithub.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737 h1:cf60tHxREO3g1nroKr2osU3JWZsJzkfi7rEg+oAB0Lo=\ngithub.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737/go.mod h1:MIS0jDzbU/vuM9MC4YnBITCv+RYuTRq8dJzmCrFsK9g=\ngithub.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=\ngithub.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=\ngithub.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=\ngithub.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=\ngithub.com/godbus/dbus/v5 v5.2.2 h1:TUR3TgtSVDmjiXOgAAyaZbYmIeP3DPkld3jgKGV8mXQ=\ngithub.com/godbus/dbus/v5 v5.2.2/go.mod h1:3AAv2+hPq5rdnr5txxxRwiGjPXamgoIHgz9FPBfOp3c=\ngithub.com/gofrs/uuid/v5 v5.4.0 h1:EfbpCTjqMuGyq5ZJwxqzn3Cbr2d0rUZU7v5ycAk/e/0=\ngithub.com/gofrs/uuid/v5 v5.4.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8=\ngithub.com/golang-jwt/jwt/v5 v5.3.1 h1:kYf81DTWFe7t+1VvL7eS+jKFVWaUnK9cB1qbwn63YCY=\ngithub.com/golang-jwt/jwt/v5 v5.3.1/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=\ngithub.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=\ngithub.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=\ngithub.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=\ngithub.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=\ngithub.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=\ngithub.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=\ngithub.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=\ngithub.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=\ngithub.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=\ngithub.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=\ngithub.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=\ngithub.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=\ngithub.com/google/go-querystring v1.2.0 h1:yhqkPbu2/OH+V9BfpCVPZkNmUXhb2gBxJArfhIxNtP0=\ngithub.com/google/go-querystring v1.2.0/go.mod h1:8IFJqpSRITyJ8QhQ13bmbeMBDfmeEJZD5A0egEOmkqU=\ngithub.com/google/go-tpm v0.9.4 h1:awZRf9FwOeTunQmHoDYSHJps3ie6f1UlhS1fOdPEt1I=\ngithub.com/google/go-tpm v0.9.4/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY=\ngithub.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 h1:wG8RYIyctLhdFk6Vl1yPGtSRtwGpVkWyZww1OCil2MI=\ngithub.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806/go.mod h1:Beg6V6zZ3oEn0JuiUQ4wqwuyqqzasOltcoXPtgLbFp4=\ngithub.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=\ngithub.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=\ngithub.com/google/pprof v0.0.0-20260202012954-cb029daf43ef h1:xpF9fUHpoIrrjX24DURVKiwHcFpw19ndIs+FwTSMbno=\ngithub.com/google/pprof v0.0.0-20260202012954-cb029daf43ef/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI=\ngithub.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=\ngithub.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=\ngithub.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=\ngithub.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/gookit/assert v0.1.1 h1:lh3GcawXe/p+cU7ESTZ5Ui3Sm/x8JWpIis4/1aF0mY0=\ngithub.com/gookit/assert v0.1.1/go.mod h1:jS5bmIVQZTIwk42uXl4lyj4iaaxx32tqH16CFj0VX2E=\ngithub.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ=\ngithub.com/gookit/color v1.5.0/go.mod h1:43aQb+Zerm/BWh2GnrgOQm7ffz7tvQXEKV6BFMl7wAo=\ngithub.com/gookit/color v1.6.0 h1:JjJXBTk1ETNyqyilJhkTXJYYigHG24TM9Xa2M1xAhRA=\ngithub.com/gookit/color v1.6.0/go.mod h1:9ACFc7/1IpHGBW8RwuDm/0YEnhg3dwwXpoMsmtyHfjs=\ngithub.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=\ngithub.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=\ngithub.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=\ngithub.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=\ngithub.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 h1:X+2YciYSxvMQK0UZ7sg45ZVabVZBeBuvMkmuI2V3Fak=\ngithub.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7/go.mod h1:lW34nIZuQ8UDPdkon5fmfp2l3+ZkQ2me/+oecHYLOII=\ngithub.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4=\ngithub.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=\ngithub.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=\ngithub.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=\ngithub.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=\ngithub.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU=\ngithub.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo=\ngithub.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=\ngithub.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=\ngithub.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=\ngithub.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=\ngithub.com/illarion/gonotify/v3 v3.0.2 h1:O7S6vcopHexutmpObkeWsnzMJt/r1hONIEogeVNmJMk=\ngithub.com/illarion/gonotify/v3 v3.0.2/go.mod h1:HWGPdPe817GfvY3w7cx6zkbzNZfi3QjcBm/wgVvEL1U=\ngithub.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=\ngithub.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=\ngithub.com/insomniacslk/dhcp v0.0.0-20240129002554-15c9b8791914 h1:kD8PseueGeYiid/Mmcv17Q0Qqicc4F46jcX22L/e/Hs=\ngithub.com/insomniacslk/dhcp v0.0.0-20240129002554-15c9b8791914/go.mod h1:3A9PQ1cunSDF/1rbTq99Ts4pVnycWg+vlPkfeD2NLFI=\ngithub.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=\ngithub.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=\ngithub.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=\ngithub.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=\ngithub.com/jackc/pgx/v5 v5.8.0 h1:TYPDoleBBme0xGSAX3/+NujXXtpZn9HBONkQC7IEZSo=\ngithub.com/jackc/pgx/v5 v5.8.0/go.mod h1:QVeDInX2m9VyzvNeiCJVjCkNFqzsNb43204HshNSZKw=\ngithub.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=\ngithub.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=\ngithub.com/jagottsicher/termcolor v1.0.2 h1:fo0c51pQSuLBN1+yVX2ZE+hE+P7ULb/TY8eRowJnrsM=\ngithub.com/jagottsicher/termcolor v1.0.2/go.mod h1:RcH8uFwF/0wbEdQmi83rjmlJ+QOKdMSE9Rc1BEB7zFo=\ngithub.com/jellydator/ttlcache/v3 v3.1.0 h1:0gPFG0IHHP6xyUyXq+JaD8fwkDCqgqwohXNJBcYE71g=\ngithub.com/jellydator/ttlcache/v3 v3.1.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4=\ngithub.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=\ngithub.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=\ngithub.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=\ngithub.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=\ngithub.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=\ngithub.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=\ngithub.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=\ngithub.com/jsimonetti/rtnetlink v1.4.2 h1:Df9w9TZ3npHTyDn0Ev9e1uzmN2odmXd0QX+J5GTEn90=\ngithub.com/jsimonetti/rtnetlink v1.4.2/go.mod h1:92s6LJdE+1iOrw+F2/RO7LYI2Qd8pPpFNNUYW06gcoM=\ngithub.com/kamstrup/intmap v0.5.2 h1:qnwBm1mh4XAnW9W9Ue9tZtTff8pS6+s6iKF6JRIV2Dk=\ngithub.com/kamstrup/intmap v0.5.2/go.mod h1:gWUVWHKzWj8xpJVFf5GC0O26bWmv3GqdnIX/LMT6Aq4=\ngithub.com/klauspost/compress v1.18.3 h1:9PJRvfbmTabkOX8moIpXPbMMbYN60bWImDDU7L+/6zw=\ngithub.com/klauspost/compress v1.18.3/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=\ngithub.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=\ngithub.com/klauspost/cpuid/v2 v2.0.10/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=\ngithub.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=\ngithub.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU=\ngithub.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=\ngithub.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a h1:+RR6SqnTkDLWyICxS1xpjCi/3dhyV+TgZwA6Ww3KncQ=\ngithub.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a/go.mod h1:YTtCCM3ryyfiu4F7t8HQ1mxvp1UBdWM2r6Xa+nGWvDk=\ngithub.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=\ngithub.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=\ngithub.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=\ngithub.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=\ngithub.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=\ngithub.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=\ngithub.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=\ngithub.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=\ngithub.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=\ngithub.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=\ngithub.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=\ngithub.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=\ngithub.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=\ngithub.com/lib/pq v1.11.1 h1:wuChtj2hfsGmmx3nf1m7xC2XpK6OtelS2shMY+bGMtI=\ngithub.com/lib/pq v1.11.1/go.mod h1:/p+8NSbOcwzAEI7wiMXFlgydTwcgTr3OSKMsD2BitpA=\ngithub.com/lithammer/fuzzysearch v1.1.8 h1:/HIuJnjHuXS8bKaiTMeeDlW2/AyIWk2brx1V8LFgLN4=\ngithub.com/lithammer/fuzzysearch v1.1.8/go.mod h1:IdqeyBClc3FFqSzYq/MXESsS4S0FsZ5ajtkr5xPLts4=\ngithub.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=\ngithub.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=\ngithub.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=\ngithub.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=\ngithub.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=\ngithub.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=\ngithub.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=\ngithub.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=\ngithub.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=\ngithub.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw=\ngithub.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs=\ngithub.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw=\ngithub.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o=\ngithub.com/mdlayher/netlink v1.8.0 h1:e7XNIYJKD7hUct3Px04RuIGJbBxy1/c4nX7D5YyvvlM=\ngithub.com/mdlayher/netlink v1.8.0/go.mod h1:UhgKXUlDQhzb09DrCl2GuRNEglHmhYoWAHid9HK3594=\ngithub.com/mdlayher/sdnotify v1.0.0 h1:Ma9XeLVN/l0qpyx1tNeMSeTjCPH6NtuD6/N9XdTlQ3c=\ngithub.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE=\ngithub.com/mdlayher/socket v0.5.1 h1:VZaqt6RkGkt2OE9l3GcC6nZkqD3xKeQLyfleW/uBcos=\ngithub.com/mdlayher/socket v0.5.1/go.mod h1:TjPLHI1UgwEv5J1B5q0zTZq12A/6H7nKmtTanQE37IQ=\ngithub.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4=\ngithub.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY=\ngithub.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc=\ngithub.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=\ngithub.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=\ngithub.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=\ngithub.com/moby/moby/api v1.53.0 h1:PihqG1ncw4W+8mZs69jlwGXdaYBeb5brF6BL7mPIS/w=\ngithub.com/moby/moby/api v1.53.0/go.mod h1:8mb+ReTlisw4pS6BRzCMts5M49W5M7bKt1cJy/YbAqc=\ngithub.com/moby/moby/client v0.2.2 h1:Pt4hRMCAIlyjL3cr8M5TrXCwKzguebPAc2do2ur7dEM=\ngithub.com/moby/moby/client v0.2.2/go.mod h1:2EkIPVNCqR05CMIzL1mfA07t0HvVUUOl85pasRz/GmQ=\ngithub.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw=\ngithub.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs=\ngithub.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=\ngithub.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=\ngithub.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=\ngithub.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=\ngithub.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=\ngithub.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=\ngithub.com/morikuni/aec v1.1.0 h1:vBBl0pUnvi/Je71dsRrhMBtreIqNMYErSAbEeb8jrXQ=\ngithub.com/morikuni/aec v1.1.0/go.mod h1:xDRgiq/iw5l+zkao76YTKzKttOp2cwPEne25HDkJnBw=\ngithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=\ngithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=\ngithub.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=\ngithub.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=\ngithub.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ=\ngithub.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8=\ngithub.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25 h1:9bCMuD3TcnjeqjPT2gSlha4asp8NvgcFRYExCaikCxk=\ngithub.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25/go.mod h1:eDjgYHYDJbPLBLsyZ6qRaugP0mX8vePOhZ5id1fdzJw=\ngithub.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=\ngithub.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=\ngithub.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=\ngithub.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=\ngithub.com/opencontainers/runc v1.3.2 h1:GUwgo0Fx9M/pl2utaSYlJfdBcXAB/CZXDxe322lvJ3Y=\ngithub.com/opencontainers/runc v1.3.2/go.mod h1:F7UQQEsxcjUNnFpT1qPLHZBKYP7yWwk6hq8suLy9cl0=\ngithub.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=\ngithub.com/ory/dockertest/v3 v3.12.0 h1:3oV9d0sDzlSQfHtIaB5k6ghUCVMVLpAY8hwrqoCyRCw=\ngithub.com/ory/dockertest/v3 v3.12.0/go.mod h1:aKNDTva3cp8dwOWwb9cWuX84aH5akkxXRvO7KCwWVjE=\ngithub.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=\ngithub.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=\ngithub.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=\ngithub.com/petermattis/goid v0.0.0-20260113132338-7c7de50cc741 h1:KPpdlQLZcHfTMQRi6bFQ7ogNO0ltFT4PmtwTLW4W+14=\ngithub.com/petermattis/goid v0.0.0-20260113132338-7c7de50cc741/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=\ngithub.com/philip-bui/grpc-zerolog v1.0.1 h1:EMacvLRUd2O1K0eWod27ZP5CY1iTNkhBDLSN+Q4JEvA=\ngithub.com/philip-bui/grpc-zerolog v1.0.1/go.mod h1:qXbiq/2X4ZUMMshsqlWyTHOcw7ns+GZmlqZZN05ZHcQ=\ngithub.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=\ngithub.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=\ngithub.com/pires/go-proxyproto v0.9.2 h1:H1UdHn695zUVVmB0lQ354lOWHOy6TZSpzBl3tgN0s1U=\ngithub.com/pires/go-proxyproto v0.9.2/go.mod h1:ZKAAyp3cgy5Y5Mo4n9AlScrkCZwUy0g3Jf+slqQVcuU=\ngithub.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=\ngithub.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=\ngithub.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo=\ngithub.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo=\ngithub.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=\ngithub.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/prometheus-community/pro-bing v0.7.0 h1:KFYFbxC2f2Fp6c+TyxbCOEarf7rbnzr9Gw8eIb0RfZA=\ngithub.com/prometheus-community/pro-bing v0.7.0/go.mod h1:Moob9dvlY50Bfq6i88xIwfyw7xLFHH69LUgx9n5zqCE=\ngithub.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=\ngithub.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=\ngithub.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=\ngithub.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=\ngithub.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4=\ngithub.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw=\ngithub.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws=\ngithub.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw=\ngithub.com/pterm/pterm v0.12.27/go.mod h1:PhQ89w4i95rhgE+xedAoqous6K9X+r6aSOI2eFF7DZI=\ngithub.com/pterm/pterm v0.12.29/go.mod h1:WI3qxgvoQFFGKGjGnJR849gU0TsEOvKn5Q8LlY1U7lg=\ngithub.com/pterm/pterm v0.12.30/go.mod h1:MOqLIyMOgmTDz9yorcYbcw+HsgoZo3BQfg2wtl3HEFE=\ngithub.com/pterm/pterm v0.12.31/go.mod h1:32ZAWZVXD7ZfG0s8qqHXePte42kdz8ECtRyEejaWgXU=\ngithub.com/pterm/pterm v0.12.33/go.mod h1:x+h2uL+n7CP/rel9+bImHD5lF3nM9vJj80k9ybiiTTE=\ngithub.com/pterm/pterm v0.12.36/go.mod h1:NjiL09hFhT/vWjQHSj1athJpx6H8cjpHXNAK5bUw8T8=\ngithub.com/pterm/pterm v0.12.40/go.mod h1:ffwPLwlbXxP+rxT0GsgDTzS3y3rmpAO1NMjUkGTYf8s=\ngithub.com/pterm/pterm v0.12.82 h1:+D9wYhCaeaK0FIQoZtqbNQuNpe2lB2tajKKsTd5paVQ=\ngithub.com/pterm/pterm v0.12.82/go.mod h1:TyuyrPjnxfwP+ccJdBTeWHtd/e0ybQHkOS/TakajZCw=\ngithub.com/puzpuzpuz/xsync/v4 v4.4.0 h1:vlSN6/CkEY0pY8KaB0yqo/pCLZvp9nhdbBdjipT4gWo=\ngithub.com/puzpuzpuz/xsync/v4 v4.4.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=\ngithub.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=\ngithub.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=\ngithub.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=\ngithub.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=\ngithub.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=\ngithub.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=\ngithub.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=\ngithub.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=\ngithub.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=\ngithub.com/safchain/ethtool v0.7.0 h1:rlJzfDetsVvT61uz8x1YIcFn12akMfuPulHtZjtb7Is=\ngithub.com/safchain/ethtool v0.7.0/go.mod h1:MenQKEjXdfkjD3mp2QdCk8B/hwvkrlOTm/FD4gTpFxQ=\ngithub.com/sagikazarmark/locafero v0.12.0 h1:/NQhBAkUb4+fH1jivKHWusDYFjMOOKU88eegjfxfHb4=\ngithub.com/sagikazarmark/locafero v0.12.0/go.mod h1:sZh36u/YSZ918v0Io+U9ogLYQJ9tLLBmM4eneO6WwsI=\ngithub.com/samber/lo v1.52.0 h1:Rvi+3BFHES3A8meP33VPAxiBZX/Aws5RxrschYGjomw=\ngithub.com/samber/lo v1.52.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0=\ngithub.com/sasha-s/go-deadlock v0.3.6 h1:TR7sfOnZ7x00tWPfD397Peodt57KzMDo+9Ae9rMiUmw=\ngithub.com/sasha-s/go-deadlock v0.3.6/go.mod h1:CUqNyyvMxTyjFqDT7MRg9mb4Dv/btmGTqSR+rky/UXo=\ngithub.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=\ngithub.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=\ngithub.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=\ngithub.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w=\ngithub.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g=\ngithub.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=\ngithub.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg=\ngithub.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=\ngithub.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=\ngithub.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=\ngithub.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=\ngithub.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=\ngithub.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=\ngithub.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=\ngithub.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU=\ngithub.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY=\ngithub.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=\ngithub.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=\ngithub.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=\ngithub.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=\ngithub.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=\ngithub.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=\ngithub.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=\ngithub.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=\ngithub.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=\ngithub.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=\ngithub.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e h1:PtWT87weP5LWHEY//SWsYkSO3RWRZo4OSWagh3YD2vQ=\ngithub.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e/go.mod h1:XrBNfAFN+pwoWuksbFS9Ccxnopa15zJGgXRFN90l3K4=\ngithub.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 h1:Gzfnfk2TWrk8Jj4P4c1a3CtQyMaTVCznlkLZI++hok4=\ngithub.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55/go.mod h1:4k4QO+dQ3R5FofL+SanAUZe+/QfeK0+OIuwDIRu2vSg=\ngithub.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869 h1:SRL6irQkKGQKKLzvQP/ke/2ZuB7Py5+XuqtOgSj+iMM=\ngithub.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ=\ngithub.com/tailscale/hujson v0.0.0-20250605163823-992244df8c5a h1:a6TNDN9CgG+cYjaeN8l2mc4kSz2iMiCDQxPEyltUV/I=\ngithub.com/tailscale/hujson v0.0.0-20250605163823-992244df8c5a/go.mod h1:EbW0wDK/qEUYI0A5bqq0C2kF8JTQwWONmGDBbzsxxHo=\ngithub.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU=\ngithub.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0=\ngithub.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA=\ngithub.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc=\ngithub.com/tailscale/setec v0.0.0-20260115174028-19d190c5556d h1:N+TtzIaGYREbLbKZB0WU0vVnMSfaqUkSf3qMEi03hwE=\ngithub.com/tailscale/setec v0.0.0-20260115174028-19d190c5556d/go.mod h1:6NU8H/GLPVX2TnXAY1duyy9ylLaHwFpr0X93UPiYmNI=\ngithub.com/tailscale/squibble v0.0.0-20251104223530-a961feffb67f h1:CL6gu95Y1o2ko4XiWPvWkJka0QmQWcUyPywWVWDPQbQ=\ngithub.com/tailscale/squibble v0.0.0-20251104223530-a961feffb67f/go.mod h1:xJkMmR3t+thnUQhA3Q4m2VSlS5pcOq+CIjmU/xfKKx4=\ngithub.com/tailscale/tailsql v0.0.0-20260105194658-001575c3ca09 h1:Fc9lE2cDYJbBLpCqnVmoLdf7McPqoHZiDxDPPpkJM04=\ngithub.com/tailscale/tailsql v0.0.0-20260105194658-001575c3ca09/go.mod h1:QMNhC4XGFiXKngHVLXE+ERDmQoH0s5fD7AUxupykocQ=\ngithub.com/tailscale/web-client-prebuilt v0.0.0-20251127225136-f19339b67368 h1:0tpDdAj9sSfSZg4gMwNTdqMP592sBrq2Sm0w6ipnh7k=\ngithub.com/tailscale/web-client-prebuilt v0.0.0-20251127225136-f19339b67368/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ=\ngithub.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M=\ngithub.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y=\ngithub.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da h1:jVRUZPRs9sqyKlYHHzHjAqKN+6e/Vog6NpHYeNPJqOw=\ngithub.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4=\ngithub.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek=\ngithub.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg=\ngithub.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA=\ngithub.com/tc-hib/winres v0.2.1/go.mod h1:C/JaNhH3KBvhNKVbvdlDWkbMDO9H4fKKDaN7/07SSuk=\ngithub.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e h1:IWllFTiDjjLIf2oeKxpIUmtiDV5sn71VgeQgg6vcE7k=\ngithub.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e/go.mod h1:d7u6HkTYKSv5m6MCKkOQlHwaShTMl3HjqSGW3XtVhXM=\ngithub.com/tink-crypto/tink-go/v2 v2.6.0 h1:+KHNBHhWH33Vn+igZWcsgdEPUxKwBMEe0QC60t388v4=\ngithub.com/tink-crypto/tink-go/v2 v2.6.0/go.mod h1:2WbBA6pfNsAfBwDCggboaHeB2X29wkU8XHtGwh2YIk8=\ngithub.com/u-root/u-root v0.14.0 h1:Ka4T10EEML7dQ5XDvO9c3MBN8z4nuSnGjcd1jmU2ivg=\ngithub.com/u-root/u-root v0.14.0/go.mod h1:hAyZorapJe4qzbLWlAkmSVCJGbfoU9Pu4jpJ1WMluqE=\ngithub.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM=\ngithub.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA=\ngithub.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY=\ngithub.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=\ngithub.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=\ngithub.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=\ngithub.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=\ngithub.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=\ngithub.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=\ngithub.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=\ngithub.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=\ngithub.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=\ngithub.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=\ngithub.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs=\ngithub.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=\ngithub.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=\ngithub.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=\ngo.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=\ngo.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=\ngo.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8=\ngo.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0=\ngo.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=\ngo.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=\ngo.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs=\ngo.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI=\ngo.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 h1:wVZXIWjQSeSmMoxF74LzAnpVQOAFDo3pPji9Y4SOFKc=\ngo.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0/go.mod h1:khvBS2IggMFNwZK/6lEeHg/W57h/IX6J4URh57fuI40=\ngo.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g=\ngo.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=\ngo.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8=\ngo.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE=\ngo.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw=\ngo.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg=\ngo.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=\ngo.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=\ngo.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=\ngo.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4=\ngo.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=\ngo.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=\ngo.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=\ngo.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=\ngo.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=\ngo.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=\ngo4.org/mem v0.0.0-20240501181205-ae6ca9944745 h1:Tl++JLUCe4sxGu8cTpDzRLd3tN7US4hOxG5YpKCzkek=\ngo4.org/mem v0.0.0-20240501181205-ae6ca9944745/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g=\ngo4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M=\ngo4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y=\ngolang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=\ngolang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=\ngolang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=\ngolang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=\ngolang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=\ngolang.org/x/exp v0.0.0-20260112195511-716be5621a96 h1:Z/6YuSHTLOHfNFdb8zVZomZr7cqNgTJvA8+Qz75D8gU=\ngolang.org/x/exp v0.0.0-20260112195511-716be5621a96/go.mod h1:nzimsREAkjBCIEFtHiYkrJyT+2uy9YZJB7H1k68CXZU=\ngolang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8=\ngolang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=\ngolang.org/x/image v0.27.0 h1:C8gA4oWU/tKkdCfYT6T2u4faJu3MeNS5O8UPWlPF61w=\ngolang.org/x/image v0.27.0/go.mod h1:xbdrClrAUway1MUTEZDq9mz/UpRwYAkFFNUslZtcB+g=\ngolang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=\ngolang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=\ngolang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8=\ngolang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w=\ngolang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=\ngolang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=\ngolang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=\ngolang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=\ngolang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60=\ngolang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM=\ngolang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=\ngolang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=\ngolang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=\ngolang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=\ngolang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=\ngolang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=\ngolang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=\ngolang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=\ngolang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=\ngolang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=\ngolang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=\ngolang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=\ngolang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=\ngolang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=\ngolang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg=\ngolang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM=\ngolang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=\ngolang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=\ngolang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=\ngolang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=\ngolang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=\ngolang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=\ngolang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=\ngolang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=\ngolang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=\ngolang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=\ngolang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k=\ngolang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0=\ngolang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeunTOisW56dUokqW/FOteYJJ/yg=\ngolang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=\ngolang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE=\ngolang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=\ngonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=\ngonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=\ngoogle.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20 h1:7ei4lp52gK1uSejlA8AZl5AJjeLUOHBQscRQZUgAcu0=\ngoogle.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20/go.mod h1:ZdbssH/1SOVnjnDlXzxDHK2MCidiqXtbYccJNzNYPEE=\ngoogle.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20 h1:Jr5R2J6F6qWyzINc+4AM8t5pfUz6beZpHp678GNrMbE=\ngoogle.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=\ngoogle.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=\ngoogle.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=\ngoogle.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=\ngoogle.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=\ngopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=\ngopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=\ngopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngorm.io/driver/postgres v1.6.0 h1:2dxzU8xJ+ivvqTRph34QX+WrRaJlmfyPqXmoGVjMBa4=\ngorm.io/driver/postgres v1.6.0/go.mod h1:vUw0mrGgrTK+uPHEhAdV4sfFELrByKVGnaVRkXDhtWo=\ngorm.io/gorm v1.31.1 h1:7CA8FTFz/gRfgqgpeKIBcervUn3xSyPUmr6B2WXJ7kg=\ngorm.io/gorm v1.31.1/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs=\ngotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=\ngotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=\ngvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 h1:2gap+Kh/3F47cO6hAu3idFvsJ0ue6TRcEi2IUkv/F8k=\ngvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633/go.mod h1:5DMfjtclAbTIjbXqO1qCe2K5GKKxWz2JHvCChuTcJEM=\nhonnef.co/go/tools v0.7.0-0.dev.0.20251022135355-8273271481d0 h1:5SXjd4ET5dYijLaf0O3aOenC0Z4ZafIWSpjUzsQaNho=\nhonnef.co/go/tools v0.7.0-0.dev.0.20251022135355-8273271481d0/go.mod h1:EPDDhEZqVHhWuPI5zPAsjU0U7v9xNIWjoOVyZ5ZcniQ=\nhowett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM=\nhowett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=\nmodernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis=\nmodernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=\nmodernc.org/ccgo/v4 v4.30.1 h1:4r4U1J6Fhj98NKfSjnPUN7Ze2c6MnAdL0hWw6+LrJpc=\nmodernc.org/ccgo/v4 v4.30.1/go.mod h1:bIOeI1JL54Utlxn+LwrFyjCx2n2RDiYEaJVSrgdrRfM=\nmodernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA=\nmodernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=\nmodernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=\nmodernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=\nmodernc.org/gc/v3 v3.1.1 h1:k8T3gkXWY9sEiytKhcgyiZ2L0DTyCQ/nvX+LoCljoRE=\nmodernc.org/gc/v3 v3.1.1/go.mod h1:HFK/6AGESC7Ex+EZJhJ2Gni6cTaYpSMmU/cT9RmlfYY=\nmodernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks=\nmodernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI=\nmodernc.org/libc v1.67.6 h1:eVOQvpModVLKOdT+LvBPjdQqfrZq+pC39BygcT+E7OI=\nmodernc.org/libc v1.67.6/go.mod h1:JAhxUVlolfYDErnwiqaLvUqc8nfb2r6S6slAgZOnaiE=\nmodernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=\nmodernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=\nmodernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=\nmodernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw=\nmodernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=\nmodernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=\nmodernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=\nmodernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=\nmodernc.org/sqlite v1.44.3 h1:+39JvV/HWMcYslAwRxHb8067w+2zowvFOUrOWIy9PjY=\nmodernc.org/sqlite v1.44.3/go.mod h1:CzbrU2lSB1DKUusvwGz7rqEKIq+NUd8GWuBBZDs9/nA=\nmodernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=\nmodernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=\nmodernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=\nmodernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=\npgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk=\npgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04=\nsoftware.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k=\nsoftware.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI=\ntailscale.com v1.94.1 h1:0dAst/ozTuFkgmxZULc3oNwR9+qPIt5ucvzH7kaM0Jw=\ntailscale.com v1.94.1/go.mod h1:gLnVrEOP32GWvroaAHHGhjSGMPJ1i4DvqNwEg+Yuov4=\nzgo.at/zcache/v2 v2.4.1 h1:Dfjoi8yI0Uq7NCc4lo2kaQJJmp9Mijo21gef+oJstbY=\nzgo.at/zcache/v2 v2.4.1/go.mod h1:gyCeoLVo01QjDZynjime8xUGHHMbsLiPyUTBpDGd4Gk=\nzombiezen.com/go/postgrestest v1.0.1 h1:aXoADQAJmZDU3+xilYVut0pHhgc0sF8ZspPW9gFNwP4=\nzombiezen.com/go/postgrestest v1.0.1/go.mod h1:marlZezr+k2oSJrvXHnZUs1olHqpE9czlz8ZYkVxliQ=\n"
  },
  {
    "path": "hscontrol/app.go",
    "content": "package hscontrol\n\nimport (\n\t\"context\"\n\t\"crypto/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net/http\"\n\t_ \"net/http/pprof\" // nolint\n\t\"os\"\n\t\"os/signal\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/cenkalti/backoff/v5\"\n\t\"github.com/davecgh/go-spew/spew\"\n\t\"github.com/go-chi/chi/v5\"\n\t\"github.com/go-chi/chi/v5/middleware\"\n\t\"github.com/go-chi/metrics\"\n\tgrpcRuntime \"github.com/grpc-ecosystem/grpc-gateway/v2/runtime\"\n\t\"github.com/juanfont/headscale\"\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\t\"github.com/juanfont/headscale/hscontrol/capver\"\n\t\"github.com/juanfont/headscale/hscontrol/db\"\n\t\"github.com/juanfont/headscale/hscontrol/derp\"\n\tderpServer \"github.com/juanfont/headscale/hscontrol/derp/server\"\n\t\"github.com/juanfont/headscale/hscontrol/dns\"\n\t\"github.com/juanfont/headscale/hscontrol/mapper\"\n\t\"github.com/juanfont/headscale/hscontrol/state\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/types/change\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\tzerolog \"github.com/philip-bui/grpc-zerolog\"\n\t\"github.com/pkg/profile\"\n\tzl \"github.com/rs/zerolog\"\n\t\"github.com/rs/zerolog/log\"\n\t\"github.com/sasha-s/go-deadlock\"\n\t\"golang.org/x/crypto/acme\"\n\t\"golang.org/x/crypto/acme/autocert\"\n\t\"golang.org/x/sync/errgroup\"\n\t\"google.golang.org/grpc\"\n\t\"google.golang.org/grpc/codes\"\n\t\"google.golang.org/grpc/credentials\"\n\t\"google.golang.org/grpc/credentials/insecure\"\n\t\"google.golang.org/grpc/metadata\"\n\t\"google.golang.org/grpc/peer\"\n\t\"google.golang.org/grpc/reflection\"\n\t\"google.golang.org/grpc/status\"\n\t\"tailscale.com/envknob\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/dnstype\"\n\t\"tailscale.com/types/key\"\n\t\"tailscale.com/util/dnsname\"\n)\n\nvar (\n\terrSTUNAddressNotSet                   = errors.New(\"STUN address not set\")\n\terrUnsupportedLetsEncryptChallengeType = errors.New(\n\t\t\"unknown value for Lets Encrypt challenge type\",\n\t)\n\terrEmptyInitialDERPMap = errors.New(\n\t\t\"initial DERPMap is empty, Headscale requires at least one entry\",\n\t)\n)\n\nvar (\n\tdebugDeadlock        = envknob.Bool(\"HEADSCALE_DEBUG_DEADLOCK\")\n\tdebugDeadlockTimeout = envknob.RegisterDuration(\"HEADSCALE_DEBUG_DEADLOCK_TIMEOUT\")\n)\n\nfunc init() {\n\tdeadlock.Opts.Disable = !debugDeadlock\n\tif debugDeadlock {\n\t\tdeadlock.Opts.DeadlockTimeout = debugDeadlockTimeout()\n\t\tdeadlock.Opts.PrintAllCurrentGoroutines = true\n\t}\n}\n\nconst (\n\tAuthPrefix         = \"Bearer \"\n\tupdateInterval     = 5 * time.Second\n\tprivateKeyFileMode = 0o600\n\theadscaleDirPerm   = 0o700\n)\n\n// Headscale represents the base app of the service.\ntype Headscale struct {\n\tcfg             *types.Config\n\tstate           *state.State\n\tnoisePrivateKey *key.MachinePrivate\n\tephemeralGC     *db.EphemeralGarbageCollector\n\n\tDERPServer *derpServer.DERPServer\n\n\t// Things that generate changes\n\textraRecordMan *dns.ExtraRecordsMan\n\tauthProvider   AuthProvider\n\tmapBatcher     *mapper.Batcher\n\n\tclientStreamsOpen sync.WaitGroup\n}\n\nvar (\n\tprofilingEnabled = envknob.Bool(\"HEADSCALE_DEBUG_PROFILING_ENABLED\")\n\tprofilingPath    = envknob.String(\"HEADSCALE_DEBUG_PROFILING_PATH\")\n\ttailsqlEnabled   = envknob.Bool(\"HEADSCALE_DEBUG_TAILSQL_ENABLED\")\n\ttailsqlStateDir  = envknob.String(\"HEADSCALE_DEBUG_TAILSQL_STATE_DIR\")\n\ttailsqlTSKey     = envknob.String(\"TS_AUTHKEY\")\n\tdumpConfig       = envknob.Bool(\"HEADSCALE_DEBUG_DUMP_CONFIG\")\n)\n\nfunc NewHeadscale(cfg *types.Config) (*Headscale, error) {\n\tvar err error\n\n\tif profilingEnabled {\n\t\truntime.SetBlockProfileRate(1)\n\t}\n\n\tnoisePrivateKey, err := readOrCreatePrivateKey(cfg.NoisePrivateKeyPath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"reading or creating Noise protocol private key: %w\", err)\n\t}\n\n\ts, err := state.NewState(cfg)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"init state: %w\", err)\n\t}\n\n\tapp := Headscale{\n\t\tcfg:               cfg,\n\t\tnoisePrivateKey:   noisePrivateKey,\n\t\tclientStreamsOpen: sync.WaitGroup{},\n\t\tstate:             s,\n\t}\n\n\t// Initialize ephemeral garbage collector\n\tephemeralGC := db.NewEphemeralGarbageCollector(func(ni types.NodeID) {\n\t\tnode, ok := app.state.GetNodeByID(ni)\n\t\tif !ok {\n\t\t\tlog.Error().Uint64(\"node.id\", ni.Uint64()).Msg(\"ephemeral node deletion failed\")\n\t\t\tlog.Debug().Caller().Uint64(\"node.id\", ni.Uint64()).Msg(\"ephemeral node deletion failed because node not found in NodeStore\")\n\n\t\t\treturn\n\t\t}\n\n\t\tpolicyChanged, err := app.state.DeleteNode(node)\n\t\tif err != nil {\n\t\t\tlog.Error().Err(err).EmbedObject(node).Msg(\"ephemeral node deletion failed\")\n\t\t\treturn\n\t\t}\n\n\t\tapp.Change(policyChanged)\n\t\tlog.Debug().Caller().EmbedObject(node).Msg(\"ephemeral node deleted because garbage collection timeout reached\")\n\t})\n\tapp.ephemeralGC = ephemeralGC\n\n\tvar authProvider AuthProvider\n\n\tauthProvider = NewAuthProviderWeb(cfg.ServerURL)\n\tif cfg.OIDC.Issuer != \"\" {\n\t\tctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)\n\t\tdefer cancel()\n\n\t\toidcProvider, err := NewAuthProviderOIDC(\n\t\t\tctx,\n\t\t\t&app,\n\t\t\tcfg.ServerURL,\n\t\t\t&cfg.OIDC,\n\t\t)\n\t\tif err != nil {\n\t\t\tif cfg.OIDC.OnlyStartIfOIDCIsAvailable {\n\t\t\t\treturn nil, err\n\t\t\t} else {\n\t\t\t\tlog.Warn().Err(err).Msg(\"failed to set up OIDC provider, falling back to CLI based authentication\")\n\t\t\t}\n\t\t} else {\n\t\t\tauthProvider = oidcProvider\n\t\t}\n\t}\n\n\tapp.authProvider = authProvider\n\n\tif app.cfg.TailcfgDNSConfig != nil && app.cfg.TailcfgDNSConfig.Proxied { // if MagicDNS\n\t\t// TODO(kradalby): revisit why this takes a list.\n\t\tvar magicDNSDomains []dnsname.FQDN\n\t\tif cfg.PrefixV4 != nil {\n\t\t\tmagicDNSDomains = append(\n\t\t\t\tmagicDNSDomains,\n\t\t\t\tutil.GenerateIPv4DNSRootDomain(*cfg.PrefixV4)...)\n\t\t}\n\n\t\tif cfg.PrefixV6 != nil {\n\t\t\tmagicDNSDomains = append(\n\t\t\t\tmagicDNSDomains,\n\t\t\t\tutil.GenerateIPv6DNSRootDomain(*cfg.PrefixV6)...)\n\t\t}\n\n\t\t// we might have routes already from Split DNS\n\t\tif app.cfg.TailcfgDNSConfig.Routes == nil {\n\t\t\tapp.cfg.TailcfgDNSConfig.Routes = make(map[string][]*dnstype.Resolver)\n\t\t}\n\n\t\tfor _, d := range magicDNSDomains {\n\t\t\tapp.cfg.TailcfgDNSConfig.Routes[d.WithoutTrailingDot()] = nil\n\t\t}\n\t}\n\n\tif cfg.DERP.ServerEnabled {\n\t\tderpServerKey, err := readOrCreatePrivateKey(cfg.DERP.ServerPrivateKeyPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"reading or creating DERP server private key: %w\", err)\n\t\t}\n\n\t\tif derpServerKey.Equal(*noisePrivateKey) {\n\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\"DERP server private key and noise private key are the same: %w\",\n\t\t\t\terr,\n\t\t\t)\n\t\t}\n\n\t\tif cfg.DERP.ServerVerifyClients {\n\t\t\tt := http.DefaultTransport.(*http.Transport) //nolint:forcetypeassert\n\t\t\tt.RegisterProtocol(\n\t\t\t\tderpServer.DerpVerifyScheme,\n\t\t\t\tderpServer.NewDERPVerifyTransport(app.handleVerifyRequest),\n\t\t\t)\n\t\t}\n\n\t\tembeddedDERPServer, err := derpServer.NewDERPServer(\n\t\t\tcfg.ServerURL,\n\t\t\tkey.NodePrivate(*derpServerKey),\n\t\t\t&cfg.DERP,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tapp.DERPServer = embeddedDERPServer\n\t}\n\n\treturn &app, nil\n}\n\n// Redirect to our TLS url.\nfunc (h *Headscale) redirect(w http.ResponseWriter, req *http.Request) {\n\ttarget := h.cfg.ServerURL + req.URL.RequestURI()\n\thttp.Redirect(w, req, target, http.StatusFound)\n}\n\nfunc (h *Headscale) scheduledTasks(ctx context.Context) {\n\texpireTicker := time.NewTicker(updateInterval)\n\tdefer expireTicker.Stop()\n\n\tlastExpiryCheck := time.Unix(0, 0)\n\n\tderpTickerChan := make(<-chan time.Time)\n\n\tif h.cfg.DERP.AutoUpdate && h.cfg.DERP.UpdateFrequency != 0 {\n\t\tderpTicker := time.NewTicker(h.cfg.DERP.UpdateFrequency)\n\t\tdefer derpTicker.Stop()\n\n\t\tderpTickerChan = derpTicker.C\n\t}\n\n\tvar extraRecordsUpdate <-chan []tailcfg.DNSRecord\n\tif h.extraRecordMan != nil {\n\t\textraRecordsUpdate = h.extraRecordMan.UpdateCh()\n\t} else {\n\t\textraRecordsUpdate = make(chan []tailcfg.DNSRecord)\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tlog.Info().Caller().Msg(\"scheduled task worker is shutting down.\")\n\t\t\treturn\n\n\t\tcase <-expireTicker.C:\n\t\t\tvar (\n\t\t\t\texpiredNodeChanges []change.Change\n\t\t\t\tchanged            bool\n\t\t\t)\n\n\t\t\tlastExpiryCheck, expiredNodeChanges, changed = h.state.ExpireExpiredNodes(lastExpiryCheck)\n\n\t\t\tif changed {\n\t\t\t\tlog.Trace().Interface(\"changes\", expiredNodeChanges).Msgf(\"expiring nodes\")\n\n\t\t\t\t// Send the changes directly since they're already in the new format\n\t\t\t\tfor _, nodeChange := range expiredNodeChanges {\n\t\t\t\t\th.Change(nodeChange)\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase <-derpTickerChan:\n\t\t\tlog.Info().Msg(\"fetching DERPMap updates\")\n\n\t\t\tderpMap, err := backoff.Retry(ctx, func() (*tailcfg.DERPMap, error) { //nolint:contextcheck\n\t\t\t\tderpMap, err := derp.GetDERPMap(h.cfg.DERP)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\tif h.cfg.DERP.ServerEnabled && h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion {\n\t\t\t\t\tregion, _ := h.DERPServer.GenerateRegion()\n\t\t\t\t\tderpMap.Regions[region.RegionID] = &region\n\t\t\t\t}\n\n\t\t\t\treturn derpMap, nil\n\t\t\t}, backoff.WithBackOff(backoff.NewExponentialBackOff()))\n\t\t\tif err != nil {\n\t\t\t\tlog.Error().Err(err).Msg(\"failed to build new DERPMap, retrying later\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\th.state.SetDERPMap(derpMap)\n\n\t\t\th.Change(change.DERPMap())\n\n\t\tcase records, ok := <-extraRecordsUpdate:\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\th.cfg.TailcfgDNSConfig.ExtraRecords = records\n\n\t\t\th.Change(change.ExtraRecords())\n\t\t}\n\t}\n}\n\nfunc (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context,\n\treq any,\n\tinfo *grpc.UnaryServerInfo,\n\thandler grpc.UnaryHandler,\n) (any, error) {\n\t// Check if the request is coming from the on-server client.\n\t// This is not secure, but it is to maintain maintainability\n\t// with the \"legacy\" database-based client\n\t// It is also needed for grpc-gateway to be able to connect to\n\t// the server\n\tclient, _ := peer.FromContext(ctx)\n\n\tlog.Trace().\n\t\tCaller().\n\t\tStr(\"client_address\", client.Addr.String()).\n\t\tMsg(\"Client is trying to authenticate\")\n\n\tmeta, ok := metadata.FromIncomingContext(ctx)\n\tif !ok {\n\t\treturn ctx, status.Errorf(\n\t\t\tcodes.InvalidArgument,\n\t\t\t\"retrieving metadata\",\n\t\t)\n\t}\n\n\tauthHeader, ok := meta[\"authorization\"]\n\tif !ok {\n\t\treturn ctx, status.Errorf(\n\t\t\tcodes.Unauthenticated,\n\t\t\t\"authorization token not supplied\",\n\t\t)\n\t}\n\n\ttoken := authHeader[0]\n\n\tif !strings.HasPrefix(token, AuthPrefix) {\n\t\treturn ctx, status.Error(\n\t\t\tcodes.Unauthenticated,\n\t\t\t`missing \"Bearer \" prefix in \"Authorization\" header`,\n\t\t)\n\t}\n\n\tvalid, err := h.state.ValidateAPIKey(strings.TrimPrefix(token, AuthPrefix))\n\tif err != nil {\n\t\treturn ctx, status.Error(codes.Internal, \"validating token\")\n\t}\n\n\tif !valid {\n\t\tlog.Info().\n\t\t\tStr(\"client_address\", client.Addr.String()).\n\t\t\tMsg(\"invalid token\")\n\n\t\treturn ctx, status.Error(codes.Unauthenticated, \"invalid token\")\n\t}\n\n\treturn handler(ctx, req)\n}\n\nfunc (h *Headscale) httpAuthenticationMiddleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(\n\t\twriter http.ResponseWriter,\n\t\treq *http.Request,\n\t) {\n\t\tlog.Trace().\n\t\t\tCaller().\n\t\t\tStr(\"client_address\", req.RemoteAddr).\n\t\t\tMsg(\"HTTP authentication invoked\")\n\n\t\tauthHeader := req.Header.Get(\"Authorization\")\n\n\t\twriteUnauthorized := func(statusCode int) {\n\t\t\twriter.WriteHeader(statusCode)\n\n\t\t\tif _, err := writer.Write([]byte(\"Unauthorized\")); err != nil { //nolint:noinlineerr\n\t\t\t\tlog.Error().Err(err).Msg(\"writing HTTP response failed\")\n\t\t\t}\n\t\t}\n\n\t\tif !strings.HasPrefix(authHeader, AuthPrefix) {\n\t\t\tlog.Error().\n\t\t\t\tCaller().\n\t\t\t\tStr(\"client_address\", req.RemoteAddr).\n\t\t\t\tMsg(`missing \"Bearer \" prefix in \"Authorization\" header`)\n\t\t\twriteUnauthorized(http.StatusUnauthorized)\n\n\t\t\treturn\n\t\t}\n\n\t\tvalid, err := h.state.ValidateAPIKey(strings.TrimPrefix(authHeader, AuthPrefix))\n\t\tif err != nil {\n\t\t\tlog.Info().\n\t\t\t\tCaller().\n\t\t\t\tErr(err).\n\t\t\t\tStr(\"client_address\", req.RemoteAddr).\n\t\t\t\tMsg(\"failed to validate token\")\n\t\t\twriteUnauthorized(http.StatusUnauthorized)\n\n\t\t\treturn\n\t\t}\n\n\t\tif !valid {\n\t\t\tlog.Info().\n\t\t\t\tStr(\"client_address\", req.RemoteAddr).\n\t\t\t\tMsg(\"invalid token\")\n\t\t\twriteUnauthorized(http.StatusUnauthorized)\n\n\t\t\treturn\n\t\t}\n\n\t\tnext.ServeHTTP(writer, req)\n\t})\n}\n\n// ensureUnixSocketIsAbsent will check if the given path for headscales unix socket is clear\n// and will remove it if it is not.\nfunc (h *Headscale) ensureUnixSocketIsAbsent() error {\n\t// File does not exist, all fine\n\tif _, err := os.Stat(h.cfg.UnixSocket); errors.Is(err, os.ErrNotExist) { //nolint:noinlineerr\n\t\treturn nil\n\t}\n\n\treturn os.Remove(h.cfg.UnixSocket)\n}\n\nfunc (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *chi.Mux {\n\tr := chi.NewRouter()\n\tr.Use(metrics.Collector(metrics.CollectorOpts{\n\t\tHost:  false,\n\t\tProto: true,\n\t\tSkip: func(r *http.Request) bool {\n\t\t\treturn r.Method != http.MethodOptions\n\t\t},\n\t}))\n\tr.Use(middleware.RequestID)\n\tr.Use(middleware.RealIP)\n\tr.Use(middleware.RequestLogger(&zerologRequestLogger{}))\n\tr.Use(middleware.Recoverer)\n\n\tr.Post(ts2021UpgradePath, h.NoiseUpgradeHandler)\n\n\tr.Get(\"/robots.txt\", h.RobotsHandler)\n\tr.Get(\"/health\", h.HealthHandler)\n\tr.Get(\"/version\", h.VersionHandler)\n\tr.Get(\"/key\", h.KeyHandler)\n\tr.Get(\"/register/{auth_id}\", h.authProvider.RegisterHandler)\n\tr.Get(\"/auth/{auth_id}\", h.authProvider.AuthHandler)\n\n\tif provider, ok := h.authProvider.(*AuthProviderOIDC); ok {\n\t\tr.Get(\"/oidc/callback\", provider.OIDCCallbackHandler)\n\t}\n\n\tr.Get(\"/apple\", h.AppleConfigMessage)\n\tr.Get(\"/apple/{platform}\", h.ApplePlatformConfig)\n\tr.Get(\"/windows\", h.WindowsConfigMessage)\n\n\t// TODO(kristoffer): move swagger into a package\n\tr.Get(\"/swagger\", headscale.SwaggerUI)\n\tr.Get(\"/swagger/v1/openapiv2.json\", headscale.SwaggerAPIv1)\n\n\tr.Post(\"/verify\", h.VerifyHandler)\n\n\tif h.cfg.DERP.ServerEnabled {\n\t\tr.HandleFunc(\"/derp\", h.DERPServer.DERPHandler)\n\t\tr.HandleFunc(\"/derp/probe\", derpServer.DERPProbeHandler)\n\t\tr.HandleFunc(\"/derp/latency-check\", derpServer.DERPProbeHandler)\n\t\tr.HandleFunc(\"/bootstrap-dns\", derpServer.DERPBootstrapDNSHandler(h.state.DERPMap()))\n\t}\n\n\tr.Route(\"/api\", func(r chi.Router) {\n\t\tr.Use(h.httpAuthenticationMiddleware)\n\t\tr.HandleFunc(\"/v1/*\", grpcMux.ServeHTTP)\n\t})\n\tr.Get(\"/favicon.ico\", FaviconHandler)\n\tr.Get(\"/\", BlankHandler)\n\n\treturn r\n}\n\n// Serve launches the HTTP and gRPC server service Headscale and the API.\n//\n//nolint:gocyclo // complex server startup function\nfunc (h *Headscale) Serve() error {\n\tvar err error\n\n\tcapver.CanOldCodeBeCleanedUp()\n\n\tif profilingEnabled {\n\t\tif profilingPath != \"\" {\n\t\t\terr = os.MkdirAll(profilingPath, os.ModePerm)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal().Err(err).Msg(\"failed to create profiling directory\")\n\t\t\t}\n\n\t\t\tdefer profile.Start(profile.ProfilePath(profilingPath)).Stop()\n\t\t} else {\n\t\t\tdefer profile.Start().Stop()\n\t\t}\n\t}\n\n\tif dumpConfig {\n\t\tspew.Dump(h.cfg)\n\t}\n\n\tversionInfo := types.GetVersionInfo()\n\tlog.Info().Str(\"version\", versionInfo.Version).Str(\"commit\", versionInfo.Commit).Msg(\"starting headscale\")\n\tlog.Info().\n\t\tStr(\"minimum_version\", capver.TailscaleVersion(capver.MinSupportedCapabilityVersion)).\n\t\tMsg(\"Clients with a lower minimum version will be rejected\")\n\n\th.mapBatcher = mapper.NewBatcherAndMapper(h.cfg, h.state)\n\n\th.mapBatcher.Start()\n\tdefer h.mapBatcher.Close()\n\n\tif h.cfg.DERP.ServerEnabled {\n\t\t// When embedded DERP is enabled we always need a STUN server\n\t\tif h.cfg.DERP.STUNAddr == \"\" {\n\t\t\treturn errSTUNAddressNotSet\n\t\t}\n\n\t\tgo h.DERPServer.ServeSTUN()\n\t}\n\n\tderpMap, err := derp.GetDERPMap(h.cfg.DERP)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting DERPMap: %w\", err)\n\t}\n\n\tif h.cfg.DERP.ServerEnabled && h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion {\n\t\tregion, _ := h.DERPServer.GenerateRegion()\n\t\tderpMap.Regions[region.RegionID] = &region\n\t}\n\n\tif len(derpMap.Regions) == 0 {\n\t\treturn errEmptyInitialDERPMap\n\t}\n\n\th.state.SetDERPMap(derpMap)\n\n\t// Start ephemeral node garbage collector and schedule all nodes\n\t// that are already in the database and ephemeral. If they are still\n\t// around between restarts, they will reconnect and the GC will\n\t// be cancelled.\n\tgo h.ephemeralGC.Start()\n\n\tephmNodes := h.state.ListEphemeralNodes()\n\tfor _, node := range ephmNodes.All() {\n\t\th.ephemeralGC.Schedule(node.ID(), h.cfg.EphemeralNodeInactivityTimeout)\n\t}\n\n\tif h.cfg.DNSConfig.ExtraRecordsPath != \"\" {\n\t\th.extraRecordMan, err = dns.NewExtraRecordsManager(h.cfg.DNSConfig.ExtraRecordsPath)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"setting up extrarecord manager: %w\", err)\n\t\t}\n\n\t\th.cfg.TailcfgDNSConfig.ExtraRecords = h.extraRecordMan.Records()\n\n\t\tgo h.extraRecordMan.Run()\n\t\tdefer h.extraRecordMan.Close()\n\t}\n\n\t// Start all scheduled tasks, e.g. expiring nodes, derp updates and\n\t// records updates\n\tscheduleCtx, scheduleCancel := context.WithCancel(context.Background())\n\tdefer scheduleCancel()\n\n\tgo h.scheduledTasks(scheduleCtx)\n\n\tif zl.GlobalLevel() == zl.TraceLevel {\n\t\tzerolog.RespLog = true\n\t} else {\n\t\tzerolog.RespLog = false\n\t}\n\n\t// Prepare group for running listeners\n\terrorGroup := new(errgroup.Group)\n\n\tctx := context.Background()\n\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\t//\n\t//\n\t// Set up LOCAL listeners\n\t//\n\n\terr = h.ensureUnixSocketIsAbsent()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"removing old socket file: %w\", err)\n\t}\n\n\tsocketDir := filepath.Dir(h.cfg.UnixSocket)\n\n\terr = util.EnsureDir(socketDir)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"setting up unix socket: %w\", err)\n\t}\n\n\tsocketListener, err := new(net.ListenConfig).Listen(context.Background(), \"unix\", h.cfg.UnixSocket)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"setting up gRPC socket: %w\", err)\n\t}\n\n\t// Change socket permissions\n\tif err := os.Chmod(h.cfg.UnixSocket, h.cfg.UnixSocketPermission); err != nil { //nolint:noinlineerr\n\t\treturn fmt.Errorf(\"changing gRPC socket permission: %w\", err)\n\t}\n\n\tgrpcGatewayMux := grpcRuntime.NewServeMux()\n\n\t// Make the grpc-gateway connect to grpc over socket\n\tgrpcGatewayConn, err := grpc.Dial( //nolint:staticcheck // SA1019: deprecated but supported in 1.x\n\t\th.cfg.UnixSocket,\n\t\t[]grpc.DialOption{\n\t\t\tgrpc.WithTransportCredentials(insecure.NewCredentials()),\n\t\t\tgrpc.WithContextDialer(util.GrpcSocketDialer),\n\t\t}...,\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"setting up gRPC gateway via socket: %w\", err)\n\t}\n\n\t// Connect to the gRPC server over localhost to skip\n\t// the authentication.\n\terr = v1.RegisterHeadscaleServiceHandler(ctx, grpcGatewayMux, grpcGatewayConn)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"registering Headscale API service to gRPC: %w\", err)\n\t}\n\n\t// Start the local gRPC server without TLS and without authentication\n\tgrpcSocket := grpc.NewServer(\n\t// Uncomment to debug grpc communication.\n\t// zerolog.UnaryInterceptor(),\n\t)\n\n\tv1.RegisterHeadscaleServiceServer(grpcSocket, newHeadscaleV1APIServer(h))\n\treflection.Register(grpcSocket)\n\n\terrorGroup.Go(func() error { return grpcSocket.Serve(socketListener) })\n\n\t//\n\t//\n\t// Set up REMOTE listeners\n\t//\n\n\ttlsConfig, err := h.getTLSSettings()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"configuring TLS settings: %w\", err)\n\t}\n\n\t//\n\t//\n\t// gRPC setup\n\t//\n\n\t// We are sadly not able to run gRPC and HTTPS (2.0) on the same\n\t// port because the connection mux does not support matching them\n\t// since they are so similar. There is multiple issues open and we\n\t// can revisit this if changes:\n\t// https://github.com/soheilhy/cmux/issues/68\n\t// https://github.com/soheilhy/cmux/issues/91\n\n\tvar (\n\t\tgrpcServer   *grpc.Server\n\t\tgrpcListener net.Listener\n\t)\n\n\tif tlsConfig != nil || h.cfg.GRPCAllowInsecure {\n\t\tlog.Info().Msgf(\"enabling remote gRPC at %s\", h.cfg.GRPCAddr)\n\n\t\tgrpcOptions := []grpc.ServerOption{\n\t\t\tgrpc.ChainUnaryInterceptor(\n\t\t\t\th.grpcAuthenticationInterceptor,\n\t\t\t\t// Uncomment to debug grpc communication.\n\t\t\t\t// zerolog.NewUnaryServerInterceptor(),\n\t\t\t),\n\t\t}\n\n\t\tif tlsConfig != nil {\n\t\t\tgrpcOptions = append(grpcOptions,\n\t\t\t\tgrpc.Creds(credentials.NewTLS(tlsConfig)),\n\t\t\t)\n\t\t} else {\n\t\t\tlog.Warn().Msg(\"gRPC is running without security\")\n\t\t}\n\n\t\tgrpcServer = grpc.NewServer(grpcOptions...)\n\n\t\tv1.RegisterHeadscaleServiceServer(grpcServer, newHeadscaleV1APIServer(h))\n\t\treflection.Register(grpcServer)\n\n\t\tgrpcListener, err = new(net.ListenConfig).Listen(context.Background(), \"tcp\", h.cfg.GRPCAddr)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"binding to TCP address: %w\", err)\n\t\t}\n\n\t\terrorGroup.Go(func() error { return grpcServer.Serve(grpcListener) })\n\n\t\tlog.Info().\n\t\t\tMsgf(\"listening and serving gRPC on: %s\", h.cfg.GRPCAddr)\n\t}\n\n\t//\n\t//\n\t// HTTP setup\n\t//\n\t// This is the regular router that we expose\n\t// over our main Addr\n\trouter := h.createRouter(grpcGatewayMux)\n\n\thttpServer := &http.Server{\n\t\tAddr:        h.cfg.Addr,\n\t\tHandler:     router,\n\t\tReadTimeout: types.HTTPTimeout,\n\n\t\t// Long polling should not have any timeout, this is overridden\n\t\t// further down the chain\n\t\tWriteTimeout: types.HTTPTimeout,\n\t}\n\n\tvar httpListener net.Listener\n\n\tif tlsConfig != nil {\n\t\thttpServer.TLSConfig = tlsConfig\n\t\thttpListener, err = tls.Listen(\"tcp\", h.cfg.Addr, tlsConfig)\n\t} else {\n\t\thttpListener, err = new(net.ListenConfig).Listen(context.Background(), \"tcp\", h.cfg.Addr)\n\t}\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"binding to TCP address: %w\", err)\n\t}\n\n\terrorGroup.Go(func() error { return httpServer.Serve(httpListener) })\n\n\tlog.Info().\n\t\tMsgf(\"listening and serving HTTP on: %s\", h.cfg.Addr)\n\n\t// Only start debug/metrics server if address is configured\n\tvar debugHTTPServer *http.Server\n\n\tvar debugHTTPListener net.Listener\n\n\tif h.cfg.MetricsAddr != \"\" {\n\t\tdebugHTTPListener, err = (&net.ListenConfig{}).Listen(ctx, \"tcp\", h.cfg.MetricsAddr)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"binding to TCP address: %w\", err)\n\t\t}\n\n\t\tdebugHTTPServer = h.debugHTTPServer()\n\n\t\terrorGroup.Go(func() error { return debugHTTPServer.Serve(debugHTTPListener) })\n\n\t\tlog.Info().\n\t\t\tMsgf(\"listening and serving debug and metrics on: %s\", h.cfg.MetricsAddr)\n\t} else {\n\t\tlog.Info().Msg(\"metrics server disabled (metrics_listen_addr is empty)\")\n\t}\n\n\tvar tailsqlContext context.Context\n\n\tif tailsqlEnabled {\n\t\tif h.cfg.Database.Type != types.DatabaseSqlite {\n\t\t\t//nolint:gocritic // exitAfterDefer: Fatal exits during initialization before servers start\n\t\t\tlog.Fatal().\n\t\t\t\tStr(\"type\", h.cfg.Database.Type).\n\t\t\t\tMsgf(\"tailsql only support %q\", types.DatabaseSqlite)\n\t\t}\n\n\t\tif tailsqlTSKey == \"\" {\n\t\t\t//nolint:gocritic // exitAfterDefer: Fatal exits during initialization before servers start\n\t\t\tlog.Fatal().Msg(\"tailsql requires TS_AUTHKEY to be set\")\n\t\t}\n\n\t\ttailsqlContext = context.Background()\n\n\t\tgo runTailSQLService(ctx, util.TSLogfWrapper(), tailsqlStateDir, h.cfg.Database.Sqlite.Path) //nolint:errcheck\n\t}\n\n\t// Handle common process-killing signals so we can gracefully shut down:\n\tsigc := make(chan os.Signal, 1)\n\tsignal.Notify(sigc,\n\t\tsyscall.SIGHUP,\n\t\tsyscall.SIGINT,\n\t\tsyscall.SIGTERM,\n\t\tsyscall.SIGQUIT,\n\t\tsyscall.SIGHUP)\n\n\tsigFunc := func(c chan os.Signal) {\n\t\t// Wait for a SIGINT or SIGKILL:\n\t\tfor {\n\t\t\tsig := <-c\n\t\t\tswitch sig {\n\t\t\tcase syscall.SIGHUP:\n\t\t\t\tlog.Info().\n\t\t\t\t\tStr(\"signal\", sig.String()).\n\t\t\t\t\tMsg(\"Received SIGHUP, reloading ACL policy\")\n\n\t\t\t\tif h.cfg.Policy.IsEmpty() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tchanges, err := h.state.ReloadPolicy()\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error().Err(err).Msgf(\"reloading policy\")\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\th.Change(changes...)\n\n\t\t\tdefault:\n\t\t\t\tinfo := func(msg string) { log.Info().Msg(msg) }\n\n\t\t\t\tlog.Info().\n\t\t\t\t\tStr(\"signal\", sig.String()).\n\t\t\t\t\tMsg(\"Received signal to stop, shutting down gracefully\")\n\n\t\t\t\tscheduleCancel()\n\t\t\t\th.ephemeralGC.Close()\n\n\t\t\t\t// Gracefully shut down servers\n\t\t\t\tshutdownCtx, cancel := context.WithTimeout(\n\t\t\t\t\tcontext.WithoutCancel(ctx),\n\t\t\t\t\ttypes.HTTPShutdownTimeout,\n\t\t\t\t)\n\t\t\t\tdefer cancel()\n\n\t\t\t\tif debugHTTPServer != nil {\n\t\t\t\t\tinfo(\"shutting down debug http server\")\n\n\t\t\t\t\terr := debugHTTPServer.Shutdown(shutdownCtx)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Error().Err(err).Msg(\"failed to shutdown prometheus http\")\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tinfo(\"shutting down main http server\")\n\n\t\t\t\terr := httpServer.Shutdown(shutdownCtx)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error().Err(err).Msg(\"failed to shutdown http\")\n\t\t\t\t}\n\n\t\t\t\tinfo(\"closing batcher\")\n\t\t\t\th.mapBatcher.Close()\n\n\t\t\t\tinfo(\"waiting for netmap stream to close\")\n\t\t\t\th.clientStreamsOpen.Wait()\n\n\t\t\t\tinfo(\"shutting down grpc server (socket)\")\n\t\t\t\tgrpcSocket.GracefulStop()\n\n\t\t\t\tif grpcServer != nil {\n\t\t\t\t\tinfo(\"shutting down grpc server (external)\")\n\t\t\t\t\tgrpcServer.GracefulStop()\n\t\t\t\t\tgrpcListener.Close()\n\t\t\t\t}\n\n\t\t\t\tif tailsqlContext != nil {\n\t\t\t\t\tinfo(\"shutting down tailsql\")\n\t\t\t\t\ttailsqlContext.Done()\n\t\t\t\t}\n\n\t\t\t\t// Close network listeners\n\t\t\t\tinfo(\"closing network listeners\")\n\n\t\t\t\tif debugHTTPListener != nil {\n\t\t\t\t\tdebugHTTPListener.Close()\n\t\t\t\t}\n\n\t\t\t\thttpListener.Close()\n\t\t\t\tgrpcGatewayConn.Close()\n\n\t\t\t\t// Stop listening (and unlink the socket if unix type):\n\t\t\t\tinfo(\"closing socket listener\")\n\t\t\t\tsocketListener.Close()\n\n\t\t\t\t// Close state connections\n\t\t\t\tinfo(\"closing state and database\")\n\n\t\t\t\terr = h.state.Close()\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error().Err(err).Msg(\"failed to close state\")\n\t\t\t\t}\n\n\t\t\t\tlog.Info().\n\t\t\t\t\tMsg(\"Headscale stopped\")\n\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\terrorGroup.Go(func() error {\n\t\tsigFunc(sigc)\n\n\t\treturn nil\n\t})\n\n\treturn errorGroup.Wait()\n}\n\nfunc (h *Headscale) getTLSSettings() (*tls.Config, error) {\n\tvar err error\n\n\tif h.cfg.TLS.LetsEncrypt.Hostname != \"\" {\n\t\tif !strings.HasPrefix(h.cfg.ServerURL, \"https://\") {\n\t\t\tlog.Warn().\n\t\t\t\tMsg(\"Listening with TLS but ServerURL does not start with https://\")\n\t\t}\n\n\t\tcertManager := autocert.Manager{\n\t\t\tPrompt:     autocert.AcceptTOS,\n\t\t\tHostPolicy: autocert.HostWhitelist(h.cfg.TLS.LetsEncrypt.Hostname),\n\t\t\tCache:      autocert.DirCache(h.cfg.TLS.LetsEncrypt.CacheDir),\n\t\t\tClient: &acme.Client{\n\t\t\t\tDirectoryURL: h.cfg.ACMEURL,\n\t\t\t\tHTTPClient: &http.Client{\n\t\t\t\t\tTransport: &acmeLogger{\n\t\t\t\t\t\trt: http.DefaultTransport,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tEmail: h.cfg.ACMEEmail,\n\t\t}\n\n\t\tswitch h.cfg.TLS.LetsEncrypt.ChallengeType {\n\t\tcase types.TLSALPN01ChallengeType:\n\t\t\t// Configuration via autocert with TLS-ALPN-01 (https://tools.ietf.org/html/rfc8737)\n\t\t\t// The RFC requires that the validation is done on port 443; in other words, headscale\n\t\t\t// must be reachable on port 443.\n\t\t\treturn certManager.TLSConfig(), nil\n\n\t\tcase types.HTTP01ChallengeType:\n\t\t\t// Configuration via autocert with HTTP-01. This requires listening on\n\t\t\t// port 80 for the certificate validation in addition to the headscale\n\t\t\t// service, which can be configured to run on any other port.\n\t\t\tserver := &http.Server{\n\t\t\t\tAddr:        h.cfg.TLS.LetsEncrypt.Listen,\n\t\t\t\tHandler:     certManager.HTTPHandler(http.HandlerFunc(h.redirect)),\n\t\t\t\tReadTimeout: types.HTTPTimeout,\n\t\t\t}\n\n\t\t\tgo func() {\n\t\t\t\terr := server.ListenAndServe()\n\t\t\t\tlog.Fatal().\n\t\t\t\t\tCaller().\n\t\t\t\t\tErr(err).\n\t\t\t\t\tMsg(\"failed to set up a HTTP server\")\n\t\t\t}()\n\n\t\t\treturn certManager.TLSConfig(), nil\n\n\t\tdefault:\n\t\t\treturn nil, errUnsupportedLetsEncryptChallengeType\n\t\t}\n\t} else if h.cfg.TLS.CertPath == \"\" {\n\t\tif !strings.HasPrefix(h.cfg.ServerURL, \"http://\") {\n\t\t\tlog.Warn().Msg(\"listening without TLS but ServerURL does not start with http://\")\n\t\t}\n\n\t\treturn nil, err\n\t} else {\n\t\tif !strings.HasPrefix(h.cfg.ServerURL, \"https://\") {\n\t\t\tlog.Warn().Msg(\"listening with TLS but ServerURL does not start with https://\")\n\t\t}\n\n\t\ttlsConfig := &tls.Config{\n\t\t\tNextProtos:   []string{\"http/1.1\"},\n\t\t\tCertificates: make([]tls.Certificate, 1),\n\t\t\tMinVersion:   tls.VersionTLS12,\n\t\t}\n\n\t\ttlsConfig.Certificates[0], err = tls.LoadX509KeyPair(h.cfg.TLS.CertPath, h.cfg.TLS.KeyPath)\n\n\t\treturn tlsConfig, err\n\t}\n}\n\nfunc readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) {\n\tdir := filepath.Dir(path)\n\n\terr := util.EnsureDir(dir)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ensuring private key directory: %w\", err)\n\t}\n\n\tprivateKey, err := os.ReadFile(path)\n\tif errors.Is(err, os.ErrNotExist) {\n\t\tlog.Info().Str(\"path\", path).Msg(\"no private key file at path, creating...\")\n\n\t\tmachineKey := key.NewMachine()\n\n\t\tmachineKeyStr, err := machineKey.MarshalText()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\"converting private key to string for saving: %w\",\n\t\t\t\terr,\n\t\t\t)\n\t\t}\n\n\t\terr = os.WriteFile(path, machineKeyStr, privateKeyFileMode)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\"saving private key to disk at path %q: %w\",\n\t\t\t\tpath,\n\t\t\t\terr,\n\t\t\t)\n\t\t}\n\n\t\treturn &machineKey, nil\n\t} else if err != nil {\n\t\treturn nil, fmt.Errorf(\"reading private key file: %w\", err)\n\t}\n\n\ttrimmedPrivateKey := strings.TrimSpace(string(privateKey))\n\n\tvar machineKey key.MachinePrivate\n\tif err = machineKey.UnmarshalText([]byte(trimmedPrivateKey)); err != nil { //nolint:noinlineerr\n\t\treturn nil, fmt.Errorf(\"parsing private key: %w\", err)\n\t}\n\n\treturn &machineKey, nil\n}\n\n// Change is used to send changes to nodes.\n// All change should be enqueued here and empty will be automatically\n// ignored.\nfunc (h *Headscale) Change(cs ...change.Change) {\n\th.mapBatcher.AddWork(cs...)\n}\n\n// HTTPHandler returns an http.Handler for the Headscale control server.\n// The handler serves the Tailscale control protocol including the /key\n// endpoint and /ts2021 Noise upgrade path.\nfunc (h *Headscale) HTTPHandler() http.Handler {\n\treturn h.createRouter(grpcRuntime.NewServeMux())\n}\n\n// NoisePublicKey returns the server's Noise protocol public key.\nfunc (h *Headscale) NoisePublicKey() key.MachinePublic {\n\treturn h.noisePrivateKey.Public()\n}\n\n// GetState returns the server's state manager for programmatic access\n// to users, nodes, policies, and other server state.\nfunc (h *Headscale) GetState() *state.State {\n\treturn h.state\n}\n\n// SetServerURLForTest updates the server URL in the configuration.\n// This is needed for test servers where the URL is not known until\n// the HTTP test server starts.\n// It panics when called outside of tests.\nfunc (h *Headscale) SetServerURLForTest(tb testing.TB, url string) {\n\ttb.Helper()\n\n\th.cfg.ServerURL = url\n}\n\n// StartBatcherForTest initialises and starts the map response batcher.\n// It registers a cleanup function on tb to stop the batcher.\n// It panics when called outside of tests.\nfunc (h *Headscale) StartBatcherForTest(tb testing.TB) {\n\ttb.Helper()\n\n\th.mapBatcher = mapper.NewBatcherAndMapper(h.cfg, h.state)\n\th.mapBatcher.Start()\n\ttb.Cleanup(func() { h.mapBatcher.Close() })\n}\n\n// StartEphemeralGCForTest starts the ephemeral node garbage collector.\n// It registers a cleanup function on tb to stop the collector.\n// It panics when called outside of tests.\nfunc (h *Headscale) StartEphemeralGCForTest(tb testing.TB) {\n\ttb.Helper()\n\n\tgo h.ephemeralGC.Start()\n\n\ttb.Cleanup(func() { h.ephemeralGC.Close() })\n}\n\n// Provide some middleware that can inspect the ACME/autocert https calls\n// and log when things are failing.\ntype acmeLogger struct {\n\trt http.RoundTripper\n}\n\n// RoundTrip will log when ACME/autocert failures happen either when err != nil OR\n// when http status codes indicate a failure has occurred.\nfunc (l *acmeLogger) RoundTrip(req *http.Request) (*http.Response, error) {\n\tresp, err := l.rt.RoundTrip(req)\n\tif err != nil {\n\t\tlog.Error().Err(err).Str(\"url\", req.URL.String()).Msg(\"acme request failed\")\n\t\treturn nil, err\n\t}\n\n\tif resp.StatusCode >= http.StatusBadRequest {\n\t\tdefer resp.Body.Close()\n\n\t\tbody, _ := io.ReadAll(resp.Body)\n\t\tlog.Error().Int(\"status_code\", resp.StatusCode).Str(\"url\", req.URL.String()).Bytes(\"body\", body).Msg(\"acme request returned error\")\n\t}\n\n\treturn resp, nil\n}\n\n// zerologRequestLogger implements chi's middleware.LogFormatter\n// to route HTTP request logs through zerolog.\ntype zerologRequestLogger struct{}\n\nfunc (z *zerologRequestLogger) NewLogEntry(\n\tr *http.Request,\n) middleware.LogEntry {\n\treturn &zerologLogEntry{\n\t\tmethod: r.Method,\n\t\tpath:   r.URL.Path,\n\t\tproto:  r.Proto,\n\t\tremote: r.RemoteAddr,\n\t}\n}\n\ntype zerologLogEntry struct {\n\tmethod string\n\tpath   string\n\tproto  string\n\tremote string\n}\n\nfunc (e *zerologLogEntry) Write(\n\tstatus, bytes int,\n\theader http.Header,\n\telapsed time.Duration,\n\textra any,\n) {\n\tlog.Info().\n\t\tStr(\"method\", e.method).\n\t\tStr(\"path\", e.path).\n\t\tStr(\"proto\", e.proto).\n\t\tStr(\"remote\", e.remote).\n\t\tInt(\"status\", status).\n\t\tInt(\"bytes\", bytes).\n\t\tDur(\"elapsed\", elapsed).\n\t\tMsg(\"http request\")\n}\n\nfunc (e *zerologLogEntry) Panic(\n\tv any,\n\tstack []byte,\n) {\n\tlog.Error().\n\t\tInterface(\"panic\", v).\n\t\tBytes(\"stack\", stack).\n\t\tMsg(\"http handler panic\")\n}\n"
  },
  {
    "path": "hscontrol/assets/assets.go",
    "content": "// Package assets provides embedded static assets for Headscale.\n// All static files (favicon, CSS, SVG) are embedded here for\n// centralized asset management.\npackage assets\n\nimport (\n\t_ \"embed\"\n)\n\n// Favicon is the embedded favicon.png file served at /favicon.ico\n//\n//go:embed favicon.png\nvar Favicon []byte\n\n// CSS is the embedded style.css stylesheet used in HTML templates.\n// Contains Material for MkDocs design system styles.\n//\n//go:embed style.css\nvar CSS string\n\n// SVG is the embedded headscale.svg logo used in HTML templates.\n//\n//go:embed headscale.svg\nvar SVG string\n"
  },
  {
    "path": "hscontrol/assets/style.css",
    "content": "/* CSS Variables from Material for MkDocs */\n:root {\n  --md-default-fg-color: rgba(0, 0, 0, 0.87);\n  --md-default-fg-color--light: rgba(0, 0, 0, 0.54);\n  --md-default-fg-color--lighter: rgba(0, 0, 0, 0.32);\n  --md-default-fg-color--lightest: rgba(0, 0, 0, 0.07);\n  --md-code-fg-color: #36464e;\n  --md-code-bg-color: #f5f5f5;\n  --md-primary-fg-color: #4051b5;\n  --md-accent-fg-color: #526cfe;\n  --md-typeset-a-color: var(--md-primary-fg-color);\n  --md-text-font: \"Roboto\", -apple-system, BlinkMacSystemFont, \"Segoe UI\", \"Helvetica Neue\", Arial, sans-serif;\n  --md-code-font: \"Roboto Mono\", \"SF Mono\", Monaco, \"Cascadia Code\", Consolas, \"Courier New\", monospace;\n}\n\n/* Base Typography */\n.md-typeset {\n  font-size: 0.8rem;\n  line-height: 1.6;\n  color: var(--md-default-fg-color);\n  font-family: var(--md-text-font);\n  overflow-wrap: break-word;\n  text-align: left;\n}\n\n/* Headings */\n.md-typeset h1 {\n  color: var(--md-default-fg-color--light);\n  font-size: 2em;\n  line-height: 1.3;\n  margin: 0 0 1.25em;\n  font-weight: 300;\n  letter-spacing: -0.01em;\n}\n\n.md-typeset h1:not(:first-child) {\n  margin-top: 2em;\n}\n\n.md-typeset h2 {\n  font-size: 1.5625em;\n  line-height: 1.4;\n  margin: 2.4em 0 0.64em;\n  font-weight: 300;\n  letter-spacing: -0.01em;\n  color: var(--md-default-fg-color--light);\n}\n\n.md-typeset h3 {\n  font-size: 1.25em;\n  line-height: 1.5;\n  margin: 2em 0 0.8em;\n  font-weight: 400;\n  letter-spacing: -0.01em;\n  color: var(--md-default-fg-color--light);\n}\n\n/* Paragraphs and block elements */\n.md-typeset p {\n  margin: 1em 0;\n}\n\n.md-typeset blockquote,\n.md-typeset dl,\n.md-typeset figure,\n.md-typeset ol,\n.md-typeset pre,\n.md-typeset ul {\n  margin-bottom: 1em;\n  margin-top: 1em;\n}\n\n/* Lists */\n.md-typeset ol,\n.md-typeset ul {\n  padding-left: 2em;\n}\n\n/* Links */\n.md-typeset a {\n  color: var(--md-typeset-a-color);\n  text-decoration: none;\n  word-break: break-word;\n}\n\n.md-typeset a:hover,\n.md-typeset a:focus {\n  color: var(--md-accent-fg-color);\n}\n\n/* Code (inline) */\n.md-typeset code {\n  background-color: var(--md-code-bg-color);\n  color: var(--md-code-fg-color);\n  border-radius: 0.1rem;\n  font-size: 0.85em;\n  font-family: var(--md-code-font);\n  padding: 0 0.2941176471em;\n  word-break: break-word;\n}\n\n/* Code blocks (pre) */\n.md-typeset pre {\n  display: block;\n  line-height: 1.4;\n  margin: 1em 0;\n  overflow-x: auto;\n}\n\n.md-typeset pre > code {\n  background-color: var(--md-code-bg-color);\n  color: var(--md-code-fg-color);\n  display: block;\n  padding: 0.7720588235em 1.1764705882em;\n  font-family: var(--md-code-font);\n  font-size: 0.85em;\n  line-height: 1.4;\n  overflow-wrap: break-word;\n  word-wrap: break-word;\n  white-space: pre-wrap;\n}\n\n/* Links in code */\n.md-typeset a code {\n  color: currentcolor;\n}\n\n/* Logo */\n.headscale-logo {\n  display: block;\n  width: 400px;\n  max-width: 100%;\n  height: auto;\n  margin: 0 0 3rem 0;\n  padding: 0;\n}\n\n@media (max-width: 768px) {\n  .headscale-logo {\n    width: 200px;\n    margin-left: 0;\n  }\n}\n"
  },
  {
    "path": "hscontrol/auth.go",
    "content": "package hscontrol\n\nimport (\n\t\"cmp\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/rs/zerolog/log\"\n\t\"gorm.io/gorm\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/key\"\n)\n\ntype AuthProvider interface {\n\tRegisterHandler(w http.ResponseWriter, r *http.Request)\n\tAuthHandler(w http.ResponseWriter, r *http.Request)\n\tRegisterURL(authID types.AuthID) string\n\tAuthURL(authID types.AuthID) string\n}\n\nfunc (h *Headscale) handleRegister(\n\tctx context.Context,\n\treq tailcfg.RegisterRequest,\n\tmachineKey key.MachinePublic,\n) (*tailcfg.RegisterResponse, error) {\n\t// Check for logout/expiry FIRST, before checking auth key.\n\t// Tailscale clients may send logout requests with BOTH a past expiry AND an auth key.\n\t// A past expiry takes precedence - it's a logout regardless of other fields.\n\tif !req.Expiry.IsZero() && req.Expiry.Before(time.Now()) {\n\t\tlog.Debug().\n\t\t\tStr(\"node.key\", req.NodeKey.ShortString()).\n\t\t\tTime(\"expiry\", req.Expiry).\n\t\t\tBool(\"has_auth\", req.Auth != nil).\n\t\t\tMsg(\"Detected logout attempt with past expiry\")\n\n\t\t// This is a logout attempt (expiry in the past)\n\t\tif node, ok := h.state.GetNodeByNodeKey(req.NodeKey); ok {\n\t\t\tlog.Debug().\n\t\t\t\tEmbedObject(node).\n\t\t\t\tBool(\"is_ephemeral\", node.IsEphemeral()).\n\t\t\t\tBool(\"has_authkey\", node.AuthKey().Valid()).\n\t\t\t\tMsg(\"Found existing node for logout, calling handleLogout\")\n\n\t\t\tresp, err := h.handleLogout(node, req, machineKey)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"handling logout: %w\", err)\n\t\t\t}\n\n\t\t\tif resp != nil {\n\t\t\t\treturn resp, nil\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Warn().\n\t\t\t\tStr(\"node.key\", req.NodeKey.ShortString()).\n\t\t\t\tMsg(\"Logout attempt but node not found in NodeStore\")\n\t\t}\n\t}\n\n\t// If the register request does not contain a Auth struct, it means we are logging\n\t// out an existing node (legacy logout path for clients that send Auth=nil).\n\tif req.Auth == nil {\n\t\t// If the register request present a NodeKey that is currently in use, we will\n\t\t// check if the node needs to be sent to re-auth, or if the node is logging out.\n\t\t// We do not look up nodes by [key.MachinePublic] as it might belong to multiple\n\t\t// nodes, separated by users and this path is handling expiring/logout paths.\n\t\tif node, ok := h.state.GetNodeByNodeKey(req.NodeKey); ok {\n\t\t\t// When tailscaled restarts, it sends RegisterRequest with Auth=nil and Expiry=zero.\n\t\t\t// Return the current node state without modification.\n\t\t\t// See: https://github.com/juanfont/headscale/issues/2862\n\t\t\tif req.Expiry.IsZero() && node.Expiry().Valid() && !node.IsExpired() {\n\t\t\t\treturn nodeToRegisterResponse(node), nil\n\t\t\t}\n\n\t\t\tresp, err := h.handleLogout(node, req, machineKey)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"handling existing node: %w\", err)\n\t\t\t}\n\n\t\t\t// If resp is not nil, we have a response to return to the node.\n\t\t\t// If resp is nil, we should proceed and see if the node is trying to re-auth.\n\t\t\tif resp != nil {\n\t\t\t\treturn resp, nil\n\t\t\t}\n\t\t} else {\n\t\t\t// If the register request is not attempting to register a node, and\n\t\t\t// we cannot match it with an existing node, we consider that unexpected\n\t\t\t// as only register nodes should attempt to log out.\n\t\t\tlog.Debug().\n\t\t\t\tStr(\"node.key\", req.NodeKey.ShortString()).\n\t\t\t\tStr(\"machine.key\", machineKey.ShortString()).\n\t\t\t\tBool(\"unexpected\", true).\n\t\t\t\tMsg(\"received register request with no auth, and no existing node\")\n\t\t}\n\t}\n\n\t// If the [tailcfg.RegisterRequest] has a Followup URL, it means that the\n\t// node has already started the registration process and we should wait for\n\t// it to finish the original registration.\n\tif req.Followup != \"\" {\n\t\treturn h.waitForFollowup(ctx, req, machineKey)\n\t}\n\n\t// Pre authenticated keys are handled slightly different than interactive\n\t// logins as they can be done fully sync and we can respond to the node with\n\t// the result as it is waiting.\n\tif isAuthKey(req) {\n\t\tresp, err := h.handleRegisterWithAuthKey(req, machineKey)\n\t\tif err != nil {\n\t\t\t// Preserve HTTPError types so they can be handled properly by the HTTP layer\n\t\t\tif httpErr, ok := errors.AsType[HTTPError](err); ok {\n\t\t\t\treturn nil, httpErr\n\t\t\t}\n\n\t\t\treturn nil, fmt.Errorf(\"handling register with auth key: %w\", err)\n\t\t}\n\n\t\treturn resp, nil\n\t}\n\n\tresp, err := h.handleRegisterInteractive(req, machineKey)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"handling register interactive: %w\", err)\n\t}\n\n\treturn resp, nil\n}\n\n// handleLogout checks if the [tailcfg.RegisterRequest] is a\n// logout attempt from a node. If the node is not attempting to.\nfunc (h *Headscale) handleLogout(\n\tnode types.NodeView,\n\treq tailcfg.RegisterRequest,\n\tmachineKey key.MachinePublic,\n) (*tailcfg.RegisterResponse, error) {\n\t// Fail closed if it looks like this is an attempt to modify a node where\n\t// the node key and the machine key the noise session was started with does\n\t// not align.\n\tif node.MachineKey() != machineKey {\n\t\treturn nil, NewHTTPError(http.StatusUnauthorized, \"node exist with different machine key\", nil)\n\t}\n\n\t// Note: We do NOT return early if req.Auth is set, because Tailscale clients\n\t// may send logout requests with BOTH a past expiry AND an auth key.\n\t// A past expiry indicates logout, regardless of whether Auth is present.\n\t// The expiry check below will handle the logout logic.\n\n\t// If the node is expired and this is not a re-authentication attempt,\n\t// force the client to re-authenticate.\n\t// TODO(kradalby): I wonder if this is a path we ever hit?\n\tif node.IsExpired() {\n\t\tlog.Trace().\n\t\t\tEmbedObject(node).\n\t\t\tInterface(\"reg.req\", req).\n\t\t\tBool(\"unexpected\", true).\n\t\t\tMsg(\"Node key expired, forcing re-authentication\")\n\n\t\treturn &tailcfg.RegisterResponse{\n\t\t\tNodeKeyExpired:    true,\n\t\t\tMachineAuthorized: false,\n\t\t\tAuthURL:           \"\", // Client will need to re-authenticate\n\t\t}, nil\n\t}\n\n\t// If we get here, the node is not currently expired, and not trying to\n\t// do an auth.\n\t// The node is likely logging out, but before we run that logic, we will validate\n\t// that the node is not attempting to tamper/extend their expiry.\n\t// If it is not, we will expire the node or in the case of an ephemeral node, delete it.\n\n\t// The client is trying to extend their key, this is not allowed.\n\tif req.Expiry.After(time.Now()) {\n\t\treturn nil, NewHTTPError(http.StatusBadRequest, \"extending key is not allowed\", nil)\n\t}\n\n\t// If the request expiry is in the past, we consider it a logout.\n\t// Zero expiry is handled in handleRegister() before calling this function.\n\tif req.Expiry.Before(time.Now()) {\n\t\tlog.Debug().\n\t\t\tEmbedObject(node).\n\t\t\tBool(\"is_ephemeral\", node.IsEphemeral()).\n\t\t\tBool(\"has_authkey\", node.AuthKey().Valid()).\n\t\t\tTime(\"req.expiry\", req.Expiry).\n\t\t\tMsg(\"Processing logout request with past expiry\")\n\n\t\tif node.IsEphemeral() {\n\t\t\tlog.Info().\n\t\t\t\tEmbedObject(node).\n\t\t\t\tMsg(\"Deleting ephemeral node during logout\")\n\n\t\t\tc, err := h.state.DeleteNode(node)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"deleting ephemeral node: %w\", err)\n\t\t\t}\n\n\t\t\th.Change(c)\n\n\t\t\treturn &tailcfg.RegisterResponse{\n\t\t\t\tNodeKeyExpired:    true,\n\t\t\t\tMachineAuthorized: false,\n\t\t\t}, nil\n\t\t}\n\n\t\tlog.Debug().\n\t\t\tEmbedObject(node).\n\t\t\tMsg(\"Node is not ephemeral, setting expiry instead of deleting\")\n\t}\n\n\t// Update the internal state with the nodes new expiry, meaning it is\n\t// logged out.\n\texpiry := req.Expiry\n\n\tupdatedNode, c, err := h.state.SetNodeExpiry(node.ID(), &expiry)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"setting node expiry: %w\", err)\n\t}\n\n\th.Change(c)\n\n\treturn nodeToRegisterResponse(updatedNode), nil\n}\n\n// isAuthKey reports if the register request is a registration request\n// using an pre auth key.\nfunc isAuthKey(req tailcfg.RegisterRequest) bool {\n\treturn req.Auth != nil && req.Auth.AuthKey != \"\"\n}\n\nfunc nodeToRegisterResponse(node types.NodeView) *tailcfg.RegisterResponse {\n\tresp := &tailcfg.RegisterResponse{\n\t\tNodeKeyExpired: node.IsExpired(),\n\n\t\t// Headscale does not implement the concept of machine authorization\n\t\t// so we always return true here.\n\t\t// Revisit this if #2176 gets implemented.\n\t\tMachineAuthorized: true,\n\t}\n\n\t// For tagged nodes, use the TaggedDevices special user\n\t// For user-owned nodes, include User and Login information from the actual user\n\tif node.IsTagged() {\n\t\tresp.User = types.TaggedDevices.View().TailscaleUser()\n\t\tresp.Login = types.TaggedDevices.View().TailscaleLogin()\n\t} else if node.Owner().Valid() {\n\t\tresp.User = node.Owner().TailscaleUser()\n\t\tresp.Login = node.Owner().TailscaleLogin()\n\t}\n\n\treturn resp\n}\n\nfunc (h *Headscale) waitForFollowup(\n\tctx context.Context,\n\treq tailcfg.RegisterRequest,\n\tmachineKey key.MachinePublic,\n) (*tailcfg.RegisterResponse, error) {\n\tfu, err := url.Parse(req.Followup)\n\tif err != nil {\n\t\treturn nil, NewHTTPError(http.StatusUnauthorized, \"invalid followup URL\", err)\n\t}\n\n\tfollowupReg, err := types.AuthIDFromString(strings.ReplaceAll(fu.Path, \"/register/\", \"\"))\n\tif err != nil {\n\t\treturn nil, NewHTTPError(http.StatusUnauthorized, \"invalid registration ID\", err)\n\t}\n\n\tif reg, ok := h.state.GetAuthCacheEntry(followupReg); ok {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil, NewHTTPError(http.StatusUnauthorized, \"registration timed out\", err)\n\t\tcase verdict := <-reg.WaitForAuth():\n\t\t\tif verdict.Accept() {\n\t\t\t\tif !verdict.Node.Valid() {\n\t\t\t\t\t// registration is expired in the cache, instruct the client to try a new registration\n\t\t\t\t\treturn h.reqToNewRegisterResponse(req, machineKey)\n\t\t\t\t}\n\n\t\t\t\treturn nodeToRegisterResponse(verdict.Node), nil\n\t\t\t}\n\t\t}\n\t}\n\n\t// if the follow-up registration isn't found anymore, instruct the client to try a new registration\n\treturn h.reqToNewRegisterResponse(req, machineKey)\n}\n\n// reqToNewRegisterResponse refreshes the registration flow by creating a new\n// registration ID and returning the corresponding AuthURL so the client can\n// restart the authentication process.\nfunc (h *Headscale) reqToNewRegisterResponse(\n\treq tailcfg.RegisterRequest,\n\tmachineKey key.MachinePublic,\n) (*tailcfg.RegisterResponse, error) {\n\tnewAuthID, err := types.NewAuthID()\n\tif err != nil {\n\t\treturn nil, NewHTTPError(http.StatusInternalServerError, \"failed to generate registration ID\", err)\n\t}\n\n\t// Ensure we have a valid hostname\n\thostname := util.EnsureHostname(\n\t\treq.Hostinfo.View(),\n\t\tmachineKey.String(),\n\t\treq.NodeKey.String(),\n\t)\n\n\t// Ensure we have valid hostinfo\n\thostinfo := cmp.Or(req.Hostinfo, &tailcfg.Hostinfo{})\n\thostinfo.Hostname = hostname\n\n\tnodeToRegister := types.Node{\n\t\tHostname:   hostname,\n\t\tMachineKey: machineKey,\n\t\tNodeKey:    req.NodeKey,\n\t\tHostinfo:   hostinfo,\n\t\tLastSeen:   new(time.Now()),\n\t}\n\n\tif !req.Expiry.IsZero() {\n\t\tnodeToRegister.Expiry = &req.Expiry\n\t}\n\n\tauthRegReq := types.NewRegisterAuthRequest(nodeToRegister)\n\n\tlog.Info().Msgf(\"new followup node registration using auth id: %s\", newAuthID)\n\th.state.SetAuthCacheEntry(newAuthID, authRegReq)\n\n\treturn &tailcfg.RegisterResponse{\n\t\tAuthURL: h.authProvider.RegisterURL(newAuthID),\n\t}, nil\n}\n\nfunc (h *Headscale) handleRegisterWithAuthKey(\n\treq tailcfg.RegisterRequest,\n\tmachineKey key.MachinePublic,\n) (*tailcfg.RegisterResponse, error) {\n\tnode, changed, err := h.state.HandleNodeFromPreAuthKey(\n\t\treq,\n\t\tmachineKey,\n\t)\n\tif err != nil {\n\t\tif errors.Is(err, gorm.ErrRecordNotFound) {\n\t\t\treturn nil, NewHTTPError(http.StatusUnauthorized, \"invalid pre auth key\", nil)\n\t\t}\n\n\t\tif perr, ok := errors.AsType[types.PAKError](err); ok {\n\t\t\treturn nil, NewHTTPError(http.StatusUnauthorized, perr.Error(), nil)\n\t\t}\n\n\t\treturn nil, err\n\t}\n\n\t// If node is not valid, it means an ephemeral node was deleted during logout\n\tif !node.Valid() {\n\t\th.Change(changed)\n\t\treturn nil, nil //nolint:nilnil // intentional: no node to return when ephemeral deleted\n\t}\n\n\t// This is a bit of a back and forth, but we have a bit of a chicken and egg\n\t// dependency here.\n\t// Because the way the policy manager works, we need to have the node\n\t// in the database, then add it to the policy manager and then we can\n\t// approve the route. This means we get this dance where the node is\n\t// first added to the database, then we add it to the policy manager via\n\t// nodesChangedHook and then we can auto approve the routes.\n\t// As that only approves the struct object, we need to save it again and\n\t// ensure we send an update.\n\t// This works, but might be another good candidate for doing some sort of\n\t// eventbus.\n\t// TODO(kradalby): This needs to be ran as part of the batcher maybe?\n\t// now since we dont update the node/pol here anymore\n\troutesChange, err := h.state.AutoApproveRoutes(node)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"auto approving routes: %w\", err)\n\t}\n\n\t// Send both changes. Empty changes are ignored by Change().\n\th.Change(changed, routesChange)\n\n\tresp := &tailcfg.RegisterResponse{\n\t\tMachineAuthorized: true,\n\t\tNodeKeyExpired:    node.IsExpired(),\n\t\tUser:              node.Owner().TailscaleUser(),\n\t\tLogin:             node.Owner().TailscaleLogin(),\n\t}\n\n\tlog.Trace().\n\t\tCaller().\n\t\tInterface(\"reg.resp\", resp).\n\t\tInterface(\"reg.req\", req).\n\t\tEmbedObject(node).\n\t\tMsg(\"RegisterResponse\")\n\n\treturn resp, nil\n}\n\nfunc (h *Headscale) handleRegisterInteractive(\n\treq tailcfg.RegisterRequest,\n\tmachineKey key.MachinePublic,\n) (*tailcfg.RegisterResponse, error) {\n\tauthID, err := types.NewAuthID()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"generating registration ID: %w\", err)\n\t}\n\n\t// Ensure we have a valid hostname\n\thostname := util.EnsureHostname(\n\t\treq.Hostinfo.View(),\n\t\tmachineKey.String(),\n\t\treq.NodeKey.String(),\n\t)\n\n\t// Ensure we have valid hostinfo\n\thostinfo := cmp.Or(req.Hostinfo, &tailcfg.Hostinfo{})\n\tif req.Hostinfo == nil {\n\t\tlog.Warn().\n\t\t\tStr(\"machine.key\", machineKey.ShortString()).\n\t\t\tStr(\"node.key\", req.NodeKey.ShortString()).\n\t\t\tStr(\"generated.hostname\", hostname).\n\t\t\tMsg(\"Received registration request with nil hostinfo, generated default hostname\")\n\t} else if req.Hostinfo.Hostname == \"\" {\n\t\tlog.Warn().\n\t\t\tStr(\"machine.key\", machineKey.ShortString()).\n\t\t\tStr(\"node.key\", req.NodeKey.ShortString()).\n\t\t\tStr(\"generated.hostname\", hostname).\n\t\t\tMsg(\"Received registration request with empty hostname, generated default\")\n\t}\n\n\thostinfo.Hostname = hostname\n\n\tnodeToRegister := types.Node{\n\t\tHostname:   hostname,\n\t\tMachineKey: machineKey,\n\t\tNodeKey:    req.NodeKey,\n\t\tHostinfo:   hostinfo,\n\t\tLastSeen:   new(time.Now()),\n\t}\n\n\tif !req.Expiry.IsZero() {\n\t\tnodeToRegister.Expiry = &req.Expiry\n\t}\n\n\tauthRegReq := types.NewRegisterAuthRequest(nodeToRegister)\n\n\th.state.SetAuthCacheEntry(\n\t\tauthID,\n\t\tauthRegReq,\n\t)\n\n\tlog.Info().Msgf(\"starting node registration using auth id: %s\", authID)\n\n\treturn &tailcfg.RegisterResponse{\n\t\tAuthURL: h.authProvider.RegisterURL(authID),\n\t}, nil\n}\n"
  },
  {
    "path": "hscontrol/auth_tags_test.go",
    "content": "package hscontrol\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/key\"\n)\n\n// TestTaggedPreAuthKeyCreatesTaggedNode tests that a PreAuthKey with tags creates\n// a tagged node with:\n// - Tags from the PreAuthKey\n// - Nil UserID (tagged nodes are owned by tags, not a user)\n// - IsTagged() returns true.\nfunc TestTaggedPreAuthKeyCreatesTaggedNode(t *testing.T) {\n\tapp := createTestApp(t)\n\n\tuser := app.state.CreateUserForTest(\"tag-creator\")\n\ttags := []string{\"tag:server\", \"tag:prod\"}\n\n\t// Create a tagged PreAuthKey\n\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, tags)\n\trequire.NoError(t, err)\n\trequire.NotEmpty(t, pak.Tags, \"PreAuthKey should have tags\")\n\trequire.ElementsMatch(t, tags, pak.Tags, \"PreAuthKey should have specified tags\")\n\n\t// Register a node using the tagged key\n\tmachineKey := key.NewMachine()\n\tnodeKey := key.NewNode()\n\n\tregReq := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak.Key,\n\t\t},\n\t\tNodeKey: nodeKey.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"tagged-node\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\tresp, err := app.handleRegisterWithAuthKey(regReq, machineKey.Public())\n\trequire.NoError(t, err)\n\trequire.True(t, resp.MachineAuthorized)\n\n\t// Verify the node was created with tags\n\tnode, found := app.state.GetNodeByNodeKey(nodeKey.Public())\n\trequire.True(t, found)\n\n\t// Tagged nodes are owned by their tags, not a user.\n\tassert.True(t, node.IsTagged(), \"Node should be tagged\")\n\tassert.ElementsMatch(t, tags, node.Tags().AsSlice(), \"Node should have tags from PreAuthKey\")\n\tassert.False(t, node.UserID().Valid(), \"Tagged node should not have UserID\")\n\n\t// Verify node is identified correctly\n\tassert.True(t, node.IsTagged(), \"Tagged node is not user-owned\")\n\tassert.True(t, node.HasTag(\"tag:server\"), \"Node should have tag:server\")\n\tassert.True(t, node.HasTag(\"tag:prod\"), \"Node should have tag:prod\")\n\tassert.False(t, node.HasTag(\"tag:other\"), \"Node should not have tag:other\")\n}\n\n// TestReAuthDoesNotReapplyTags tests that when a node re-authenticates using the\n// same PreAuthKey, the tags are NOT re-applied. Tags are only set during initial\n// authentication. This is critical for the container restart scenario (#2830).\n//\n// NOTE: This test verifies that re-authentication preserves the node's current tags\n// without testing tag modification via SetNodeTags (which requires ACL policy setup).\nfunc TestReAuthDoesNotReapplyTags(t *testing.T) {\n\tapp := createTestApp(t)\n\n\tuser := app.state.CreateUserForTest(\"tag-creator\")\n\tinitialTags := []string{\"tag:server\", \"tag:dev\"}\n\n\t// Create a tagged PreAuthKey with reusable=true for re-auth\n\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, initialTags)\n\trequire.NoError(t, err)\n\n\t// Initial registration\n\tmachineKey := key.NewMachine()\n\tnodeKey := key.NewNode()\n\n\tregReq := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak.Key,\n\t\t},\n\t\tNodeKey: nodeKey.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"reauth-test-node\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\tresp, err := app.handleRegisterWithAuthKey(regReq, machineKey.Public())\n\trequire.NoError(t, err)\n\trequire.True(t, resp.MachineAuthorized)\n\n\t// Verify initial tags\n\tnode, found := app.state.GetNodeByNodeKey(nodeKey.Public())\n\trequire.True(t, found)\n\trequire.True(t, node.IsTagged())\n\trequire.ElementsMatch(t, initialTags, node.Tags().AsSlice())\n\n\t// Re-authenticate with the SAME PreAuthKey (container restart scenario)\n\t// Key behavior: Tags should NOT be re-applied during re-auth\n\treAuthReq := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak.Key, // Same key\n\t\t},\n\t\tNodeKey: nodeKey.Public(), // Same node key\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"reauth-test-node\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\treAuthResp, err := app.handleRegisterWithAuthKey(reAuthReq, machineKey.Public())\n\trequire.NoError(t, err)\n\trequire.True(t, reAuthResp.MachineAuthorized)\n\n\t// CRITICAL: Tags should remain unchanged after re-auth\n\t// They should match the original tags, proving they weren't re-applied\n\tnodeAfterReauth, found := app.state.GetNodeByNodeKey(nodeKey.Public())\n\trequire.True(t, found)\n\tassert.True(t, nodeAfterReauth.IsTagged(), \"Node should still be tagged\")\n\tassert.ElementsMatch(t, initialTags, nodeAfterReauth.Tags().AsSlice(), \"Tags should remain unchanged on re-auth\")\n\n\t// Verify only one node was created (no duplicates).\n\t// Tagged nodes are not indexed by user, so check the global list.\n\tallNodes := app.state.ListNodes()\n\tassert.Equal(t, 1, allNodes.Len(), \"Should have exactly one node\")\n}\n\n// NOTE: TestSetTagsOnUserOwnedNode functionality is covered by gRPC tests in grpcv1_test.go\n// which properly handle ACL policy setup. The test verifies that SetTags can convert\n// user-owned nodes to tagged nodes while preserving UserID.\n\n// TestCannotRemoveAllTags tests that attempting to remove all tags from a\n// tagged node fails with ErrCannotRemoveAllTags. Once a node is tagged,\n// it must always have at least one tag (Tailscale requirement).\nfunc TestCannotRemoveAllTags(t *testing.T) {\n\tapp := createTestApp(t)\n\n\tuser := app.state.CreateUserForTest(\"tag-creator\")\n\ttags := []string{\"tag:server\"}\n\n\t// Create a tagged node\n\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, tags)\n\trequire.NoError(t, err)\n\n\tmachineKey := key.NewMachine()\n\tnodeKey := key.NewNode()\n\n\tregReq := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak.Key,\n\t\t},\n\t\tNodeKey: nodeKey.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"tagged-node\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\tresp, err := app.handleRegisterWithAuthKey(regReq, machineKey.Public())\n\trequire.NoError(t, err)\n\trequire.True(t, resp.MachineAuthorized)\n\n\t// Verify node is tagged\n\tnode, found := app.state.GetNodeByNodeKey(nodeKey.Public())\n\trequire.True(t, found)\n\trequire.True(t, node.IsTagged())\n\n\t// Attempt to remove all tags by setting empty array\n\t_, _, err = app.state.SetNodeTags(node.ID(), []string{})\n\trequire.Error(t, err, \"Should not be able to remove all tags\")\n\trequire.ErrorIs(t, err, types.ErrCannotRemoveAllTags, \"Error should be ErrCannotRemoveAllTags\")\n\n\t// Verify node still has original tags\n\tnodeAfter, found := app.state.GetNodeByNodeKey(nodeKey.Public())\n\trequire.True(t, found)\n\tassert.True(t, nodeAfter.IsTagged(), \"Node should still be tagged\")\n\tassert.ElementsMatch(t, tags, nodeAfter.Tags().AsSlice(), \"Tags should be unchanged\")\n}\n\n// TestUserOwnedNodeCreatedWithUntaggedPreAuthKey tests that using a PreAuthKey\n// without tags creates a user-owned node (no tags, UserID is the owner).\nfunc TestUserOwnedNodeCreatedWithUntaggedPreAuthKey(t *testing.T) {\n\tapp := createTestApp(t)\n\n\tuser := app.state.CreateUserForTest(\"node-owner\")\n\n\t// Create an untagged PreAuthKey\n\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil)\n\trequire.NoError(t, err)\n\trequire.Empty(t, pak.Tags, \"PreAuthKey should not be tagged\")\n\trequire.Empty(t, pak.Tags, \"PreAuthKey should have no tags\")\n\n\t// Register a node\n\tmachineKey := key.NewMachine()\n\tnodeKey := key.NewNode()\n\n\tregReq := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak.Key,\n\t\t},\n\t\tNodeKey: nodeKey.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"user-owned-node\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\tresp, err := app.handleRegisterWithAuthKey(regReq, machineKey.Public())\n\trequire.NoError(t, err)\n\trequire.True(t, resp.MachineAuthorized)\n\n\t// Verify node is user-owned\n\tnode, found := app.state.GetNodeByNodeKey(nodeKey.Public())\n\trequire.True(t, found)\n\n\t// Critical assertions for user-owned node\n\tassert.False(t, node.IsTagged(), \"Node should not be tagged\")\n\tassert.False(t, node.IsTagged(), \"Node should be user-owned (not tagged)\")\n\tassert.Empty(t, node.Tags().AsSlice(), \"Node should have no tags\")\n\tassert.True(t, node.UserID().Valid(), \"Node should have UserID\")\n\tassert.Equal(t, user.ID, node.UserID().Get(), \"UserID should be the PreAuthKey owner\")\n}\n\n// TestMultipleNodesWithSameReusableTaggedPreAuthKey tests that a reusable\n// PreAuthKey with tags can be used to register multiple nodes, and all nodes\n// receive the same tags from the key.\nfunc TestMultipleNodesWithSameReusableTaggedPreAuthKey(t *testing.T) {\n\tapp := createTestApp(t)\n\n\tuser := app.state.CreateUserForTest(\"tag-creator\")\n\ttags := []string{\"tag:server\", \"tag:prod\"}\n\n\t// Create a REUSABLE tagged PreAuthKey\n\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, tags)\n\trequire.NoError(t, err)\n\trequire.ElementsMatch(t, tags, pak.Tags)\n\n\t// Register first node\n\tmachineKey1 := key.NewMachine()\n\tnodeKey1 := key.NewNode()\n\n\tregReq1 := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak.Key,\n\t\t},\n\t\tNodeKey: nodeKey1.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"tagged-node-1\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\tresp1, err := app.handleRegisterWithAuthKey(regReq1, machineKey1.Public())\n\trequire.NoError(t, err)\n\trequire.True(t, resp1.MachineAuthorized)\n\n\t// Register second node with SAME PreAuthKey\n\tmachineKey2 := key.NewMachine()\n\tnodeKey2 := key.NewNode()\n\n\tregReq2 := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak.Key, // Same key\n\t\t},\n\t\tNodeKey: nodeKey2.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"tagged-node-2\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\tresp2, err := app.handleRegisterWithAuthKey(regReq2, machineKey2.Public())\n\trequire.NoError(t, err)\n\trequire.True(t, resp2.MachineAuthorized)\n\n\t// Verify both nodes exist and have the same tags\n\tnode1, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\trequire.True(t, found)\n\tnode2, found := app.state.GetNodeByNodeKey(nodeKey2.Public())\n\trequire.True(t, found)\n\n\t// Both nodes should be tagged with the same tags\n\tassert.True(t, node1.IsTagged(), \"First node should be tagged\")\n\tassert.True(t, node2.IsTagged(), \"Second node should be tagged\")\n\tassert.ElementsMatch(t, tags, node1.Tags().AsSlice(), \"First node should have PreAuthKey tags\")\n\tassert.ElementsMatch(t, tags, node2.Tags().AsSlice(), \"Second node should have PreAuthKey tags\")\n\n\t// Tagged nodes should not have UserID set.\n\tassert.False(t, node1.UserID().Valid(), \"First node should not have UserID\")\n\tassert.False(t, node2.UserID().Valid(), \"Second node should not have UserID\")\n\n\t// Verify we have exactly 2 nodes.\n\tallNodes := app.state.ListNodes()\n\tassert.Equal(t, 2, allNodes.Len(), \"Should have exactly two nodes\")\n}\n\n// TestNonReusableTaggedPreAuthKey tests that a non-reusable PreAuthKey with tags\n// can only be used once. The second attempt should fail.\nfunc TestNonReusableTaggedPreAuthKey(t *testing.T) {\n\tapp := createTestApp(t)\n\n\tuser := app.state.CreateUserForTest(\"tag-creator\")\n\ttags := []string{\"tag:server\"}\n\n\t// Create a NON-REUSABLE tagged PreAuthKey\n\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), false, false, nil, tags)\n\trequire.NoError(t, err)\n\trequire.ElementsMatch(t, tags, pak.Tags)\n\n\t// Register first node - should succeed\n\tmachineKey1 := key.NewMachine()\n\tnodeKey1 := key.NewNode()\n\n\tregReq1 := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak.Key,\n\t\t},\n\t\tNodeKey: nodeKey1.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"tagged-node-1\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\tresp1, err := app.handleRegisterWithAuthKey(regReq1, machineKey1.Public())\n\trequire.NoError(t, err)\n\trequire.True(t, resp1.MachineAuthorized)\n\n\t// Verify first node was created with tags\n\tnode1, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\trequire.True(t, found)\n\tassert.True(t, node1.IsTagged())\n\tassert.ElementsMatch(t, tags, node1.Tags().AsSlice())\n\n\t// Attempt to register second node with SAME non-reusable key - should fail\n\tmachineKey2 := key.NewMachine()\n\tnodeKey2 := key.NewNode()\n\n\tregReq2 := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak.Key, // Same non-reusable key\n\t\t},\n\t\tNodeKey: nodeKey2.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"tagged-node-2\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\t_, err = app.handleRegisterWithAuthKey(regReq2, machineKey2.Public())\n\trequire.Error(t, err, \"Should not be able to reuse non-reusable PreAuthKey\")\n\n\t// Verify only one node was created.\n\tallNodes := app.state.ListNodes()\n\tassert.Equal(t, 1, allNodes.Len(), \"Should have exactly one node\")\n}\n\n// TestExpiredTaggedPreAuthKey tests that an expired PreAuthKey with tags\n// cannot be used to register a node.\nfunc TestExpiredTaggedPreAuthKey(t *testing.T) {\n\tapp := createTestApp(t)\n\n\tuser := app.state.CreateUserForTest(\"tag-creator\")\n\ttags := []string{\"tag:server\"}\n\n\t// Create a PreAuthKey that expires immediately\n\texpiration := time.Now().Add(-1 * time.Hour) // Already expired\n\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), false, false, &expiration, tags)\n\trequire.NoError(t, err)\n\trequire.ElementsMatch(t, tags, pak.Tags)\n\n\t// Attempt to register with expired key\n\tmachineKey := key.NewMachine()\n\tnodeKey := key.NewNode()\n\n\tregReq := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak.Key,\n\t\t},\n\t\tNodeKey: nodeKey.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"tagged-node\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\t_, err = app.handleRegisterWithAuthKey(regReq, machineKey.Public())\n\trequire.Error(t, err, \"Should not be able to use expired PreAuthKey\")\n\n\t// Verify no node was created\n\t_, found := app.state.GetNodeByNodeKey(nodeKey.Public())\n\tassert.False(t, found, \"No node should be created with expired key\")\n}\n\n// TestSingleVsMultipleTags tests that PreAuthKeys work correctly with both\n// a single tag and multiple tags.\nfunc TestSingleVsMultipleTags(t *testing.T) {\n\tapp := createTestApp(t)\n\n\tuser := app.state.CreateUserForTest(\"tag-creator\")\n\n\t// Test with single tag\n\tsingleTag := []string{\"tag:server\"}\n\tpak1, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, singleTag)\n\trequire.NoError(t, err)\n\n\tmachineKey1 := key.NewMachine()\n\tnodeKey1 := key.NewNode()\n\n\tregReq1 := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak1.Key,\n\t\t},\n\t\tNodeKey: nodeKey1.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"single-tag-node\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\tresp1, err := app.handleRegisterWithAuthKey(regReq1, machineKey1.Public())\n\trequire.NoError(t, err)\n\trequire.True(t, resp1.MachineAuthorized)\n\n\tnode1, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\trequire.True(t, found)\n\tassert.True(t, node1.IsTagged())\n\tassert.ElementsMatch(t, singleTag, node1.Tags().AsSlice())\n\n\t// Test with multiple tags\n\tmultipleTags := []string{\"tag:server\", \"tag:prod\", \"tag:database\"}\n\tpak2, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, multipleTags)\n\trequire.NoError(t, err)\n\n\tmachineKey2 := key.NewMachine()\n\tnodeKey2 := key.NewNode()\n\n\tregReq2 := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak2.Key,\n\t\t},\n\t\tNodeKey: nodeKey2.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"multi-tag-node\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\tresp2, err := app.handleRegisterWithAuthKey(regReq2, machineKey2.Public())\n\trequire.NoError(t, err)\n\trequire.True(t, resp2.MachineAuthorized)\n\n\tnode2, found := app.state.GetNodeByNodeKey(nodeKey2.Public())\n\trequire.True(t, found)\n\tassert.True(t, node2.IsTagged())\n\tassert.ElementsMatch(t, multipleTags, node2.Tags().AsSlice())\n\n\t// Verify HasTag works for all tags\n\tassert.True(t, node2.HasTag(\"tag:server\"))\n\tassert.True(t, node2.HasTag(\"tag:prod\"))\n\tassert.True(t, node2.HasTag(\"tag:database\"))\n\tassert.False(t, node2.HasTag(\"tag:other\"))\n}\n\n// TestTaggedPreAuthKeyDisablesKeyExpiry tests that nodes registered with\n// a tagged PreAuthKey have key expiry disabled (expiry is nil).\nfunc TestTaggedPreAuthKeyDisablesKeyExpiry(t *testing.T) {\n\tapp := createTestApp(t)\n\n\tuser := app.state.CreateUserForTest(\"tag-creator\")\n\ttags := []string{\"tag:server\", \"tag:prod\"}\n\n\t// Create a tagged PreAuthKey\n\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, tags)\n\trequire.NoError(t, err)\n\trequire.ElementsMatch(t, tags, pak.Tags)\n\n\t// Register a node using the tagged key\n\tmachineKey := key.NewMachine()\n\tnodeKey := key.NewNode()\n\n\t// Client requests an expiry time, but for tagged nodes it should be ignored\n\tclientRequestedExpiry := time.Now().Add(24 * time.Hour)\n\n\tregReq := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak.Key,\n\t\t},\n\t\tNodeKey: nodeKey.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"tagged-expiry-test\",\n\t\t},\n\t\tExpiry: clientRequestedExpiry,\n\t}\n\n\tresp, err := app.handleRegisterWithAuthKey(regReq, machineKey.Public())\n\trequire.NoError(t, err)\n\trequire.True(t, resp.MachineAuthorized)\n\n\t// Verify the node has key expiry DISABLED (expiry is nil/zero)\n\tnode, found := app.state.GetNodeByNodeKey(nodeKey.Public())\n\trequire.True(t, found)\n\n\t// Critical assertion: Tagged nodes should have expiry disabled\n\tassert.True(t, node.IsTagged(), \"Node should be tagged\")\n\tassert.False(t, node.Expiry().Valid(), \"Tagged node should have expiry disabled (nil)\")\n}\n\n// TestUntaggedPreAuthKeyPreservesKeyExpiry tests that nodes registered with\n// an untagged PreAuthKey preserve the client's requested key expiry.\nfunc TestUntaggedPreAuthKeyPreservesKeyExpiry(t *testing.T) {\n\tapp := createTestApp(t)\n\n\tuser := app.state.CreateUserForTest(\"node-owner\")\n\n\t// Create an untagged PreAuthKey\n\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil)\n\trequire.NoError(t, err)\n\trequire.Empty(t, pak.Tags, \"PreAuthKey should not be tagged\")\n\n\t// Register a node\n\tmachineKey := key.NewMachine()\n\tnodeKey := key.NewNode()\n\n\t// Client requests an expiry time\n\tclientRequestedExpiry := time.Now().Add(24 * time.Hour)\n\n\tregReq := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak.Key,\n\t\t},\n\t\tNodeKey: nodeKey.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"untagged-expiry-test\",\n\t\t},\n\t\tExpiry: clientRequestedExpiry,\n\t}\n\n\tresp, err := app.handleRegisterWithAuthKey(regReq, machineKey.Public())\n\trequire.NoError(t, err)\n\trequire.True(t, resp.MachineAuthorized)\n\n\t// Verify the node has the client's requested expiry\n\tnode, found := app.state.GetNodeByNodeKey(nodeKey.Public())\n\trequire.True(t, found)\n\n\t// Critical assertion: User-owned nodes should preserve client expiry\n\tassert.False(t, node.IsTagged(), \"Node should not be tagged\")\n\tassert.True(t, node.Expiry().Valid(), \"User-owned node should have expiry set\")\n\t// Allow some tolerance for test execution time\n\tassert.WithinDuration(t, clientRequestedExpiry, node.Expiry().Get(), 5*time.Second,\n\t\t\"User-owned node should have the client's requested expiry\")\n}\n\n// TestTaggedNodeReauthPreservesDisabledExpiry tests that when a tagged node\n// re-authenticates, the disabled expiry is preserved (not updated from client request).\nfunc TestTaggedNodeReauthPreservesDisabledExpiry(t *testing.T) {\n\tapp := createTestApp(t)\n\n\tuser := app.state.CreateUserForTest(\"tag-creator\")\n\ttags := []string{\"tag:server\"}\n\n\t// Create a reusable tagged PreAuthKey\n\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, tags)\n\trequire.NoError(t, err)\n\n\t// Initial registration\n\tmachineKey := key.NewMachine()\n\tnodeKey := key.NewNode()\n\n\tregReq := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak.Key,\n\t\t},\n\t\tNodeKey: nodeKey.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"tagged-reauth-test\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\tresp, err := app.handleRegisterWithAuthKey(regReq, machineKey.Public())\n\trequire.NoError(t, err)\n\trequire.True(t, resp.MachineAuthorized)\n\n\t// Verify initial registration has expiry disabled\n\tnode, found := app.state.GetNodeByNodeKey(nodeKey.Public())\n\trequire.True(t, found)\n\trequire.True(t, node.IsTagged())\n\trequire.False(t, node.Expiry().Valid(), \"Initial registration should have expiry disabled\")\n\n\t// Re-authenticate with a NEW expiry request (should be ignored for tagged nodes)\n\tnewRequestedExpiry := time.Now().Add(48 * time.Hour)\n\treAuthReq := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak.Key,\n\t\t},\n\t\tNodeKey: nodeKey.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"tagged-reauth-test\",\n\t\t},\n\t\tExpiry: newRequestedExpiry, // Client requests new expiry\n\t}\n\n\treAuthResp, err := app.handleRegisterWithAuthKey(reAuthReq, machineKey.Public())\n\trequire.NoError(t, err)\n\trequire.True(t, reAuthResp.MachineAuthorized)\n\n\t// Verify expiry is STILL disabled after re-auth\n\tnodeAfterReauth, found := app.state.GetNodeByNodeKey(nodeKey.Public())\n\trequire.True(t, found)\n\n\t// Critical assertion: Tagged node should preserve disabled expiry on re-auth\n\tassert.True(t, nodeAfterReauth.IsTagged(), \"Node should still be tagged\")\n\tassert.False(t, nodeAfterReauth.Expiry().Valid(),\n\t\t\"Tagged node should have expiry PRESERVED as disabled after re-auth\")\n}\n\n// TestExpiryDuringPersonalToTaggedConversion tests that when a personal node\n// is converted to tagged via reauth with RequestTags, the expiry is cleared to nil.\n// BUG #3048: Previously expiry was NOT cleared because expiry handling ran\n// BEFORE processReauthTags.\nfunc TestExpiryDuringPersonalToTaggedConversion(t *testing.T) {\n\tapp := createTestApp(t)\n\tuser := app.state.CreateUserForTest(\"expiry-test-user\")\n\n\t// Update policy to allow user to own tags\n\terr := app.state.UpdatePolicyManagerUsersForTest()\n\trequire.NoError(t, err)\n\n\tpolicy := `{\n\t\t\"tagOwners\": {\n\t\t\t\"tag:server\": [\"expiry-test-user@\"]\n\t\t},\n\t\t\"acls\": [{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]}]\n\t}`\n\t_, err = app.state.SetPolicy([]byte(policy))\n\trequire.NoError(t, err)\n\n\tmachineKey := key.NewMachine()\n\tnodeKey1 := key.NewNode()\n\n\t// Step 1: Create user-owned node WITH expiry set\n\tclientExpiry := time.Now().Add(24 * time.Hour)\n\tregistrationID1 := types.MustAuthID()\n\tregEntry1 := types.NewRegisterAuthRequest(types.Node{\n\t\tMachineKey: machineKey.Public(),\n\t\tNodeKey:    nodeKey1.Public(),\n\t\tHostname:   \"personal-to-tagged\",\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname:    \"personal-to-tagged\",\n\t\t\tRequestTags: []string{}, // No tags - user-owned\n\t\t},\n\t\tExpiry: &clientExpiry,\n\t})\n\tapp.state.SetAuthCacheEntry(registrationID1, regEntry1)\n\n\tnode, _, err := app.state.HandleNodeFromAuthPath(\n\t\tregistrationID1, types.UserID(user.ID), nil, \"webauth\",\n\t)\n\trequire.NoError(t, err)\n\trequire.False(t, node.IsTagged(), \"Node should be user-owned initially\")\n\trequire.True(t, node.Expiry().Valid(), \"User-owned node should have expiry set\")\n\n\t// Step 2: Re-auth with tags (Personal → Tagged conversion)\n\tnodeKey2 := key.NewNode()\n\tregistrationID2 := types.MustAuthID()\n\tregEntry2 := types.NewRegisterAuthRequest(types.Node{\n\t\tMachineKey: machineKey.Public(),\n\t\tNodeKey:    nodeKey2.Public(),\n\t\tHostname:   \"personal-to-tagged\",\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname:    \"personal-to-tagged\",\n\t\t\tRequestTags: []string{\"tag:server\"}, // Adding tags\n\t\t},\n\t\tExpiry: &clientExpiry, // Client still sends expiry\n\t})\n\tapp.state.SetAuthCacheEntry(registrationID2, regEntry2)\n\n\tnodeAfter, _, err := app.state.HandleNodeFromAuthPath(\n\t\tregistrationID2, types.UserID(user.ID), nil, \"webauth\",\n\t)\n\trequire.NoError(t, err)\n\trequire.True(t, nodeAfter.IsTagged(), \"Node should be tagged after conversion\")\n\n\t// CRITICAL ASSERTION: Tagged nodes should NOT have expiry\n\tassert.False(t, nodeAfter.Expiry().Valid(),\n\t\t\"Tagged node should have expiry cleared to nil\")\n}\n\n// TestExpiryDuringTaggedToPersonalConversion tests that when a tagged node\n// is converted to personal via reauth with empty RequestTags, expiry is set\n// from the client request.\n// BUG #3048: Previously expiry was NOT set because expiry handling ran\n// BEFORE processReauthTags (node was still tagged at check time).\nfunc TestExpiryDuringTaggedToPersonalConversion(t *testing.T) {\n\tapp := createTestApp(t)\n\tuser := app.state.CreateUserForTest(\"expiry-test-user2\")\n\n\t// Update policy to allow user to own tags\n\terr := app.state.UpdatePolicyManagerUsersForTest()\n\trequire.NoError(t, err)\n\n\tpolicy := `{\n\t\t\"tagOwners\": {\n\t\t\t\"tag:server\": [\"expiry-test-user2@\"]\n\t\t},\n\t\t\"acls\": [{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]}]\n\t}`\n\t_, err = app.state.SetPolicy([]byte(policy))\n\trequire.NoError(t, err)\n\n\tmachineKey := key.NewMachine()\n\tnodeKey1 := key.NewNode()\n\n\t// Step 1: Create tagged node (expiry should be nil)\n\tregistrationID1 := types.MustAuthID()\n\tregEntry1 := types.NewRegisterAuthRequest(types.Node{\n\t\tMachineKey: machineKey.Public(),\n\t\tNodeKey:    nodeKey1.Public(),\n\t\tHostname:   \"tagged-to-personal\",\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname:    \"tagged-to-personal\",\n\t\t\tRequestTags: []string{\"tag:server\"}, // Tagged node\n\t\t},\n\t})\n\tapp.state.SetAuthCacheEntry(registrationID1, regEntry1)\n\n\tnode, _, err := app.state.HandleNodeFromAuthPath(\n\t\tregistrationID1, types.UserID(user.ID), nil, \"webauth\",\n\t)\n\trequire.NoError(t, err)\n\trequire.True(t, node.IsTagged(), \"Node should be tagged initially\")\n\trequire.False(t, node.Expiry().Valid(), \"Tagged node should have nil expiry\")\n\n\t// Step 2: Re-auth with empty tags (Tagged → Personal conversion)\n\tnodeKey2 := key.NewNode()\n\tclientExpiry := time.Now().Add(48 * time.Hour)\n\tregistrationID2 := types.MustAuthID()\n\tregEntry2 := types.NewRegisterAuthRequest(types.Node{\n\t\tMachineKey: machineKey.Public(),\n\t\tNodeKey:    nodeKey2.Public(),\n\t\tHostname:   \"tagged-to-personal\",\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname:    \"tagged-to-personal\",\n\t\t\tRequestTags: []string{}, // Empty tags - convert to user-owned\n\t\t},\n\t\tExpiry: &clientExpiry, // Client requests expiry\n\t})\n\tapp.state.SetAuthCacheEntry(registrationID2, regEntry2)\n\n\tnodeAfter, _, err := app.state.HandleNodeFromAuthPath(\n\t\tregistrationID2, types.UserID(user.ID), nil, \"webauth\",\n\t)\n\trequire.NoError(t, err)\n\trequire.False(t, nodeAfter.IsTagged(), \"Node should be user-owned after conversion\")\n\n\t// CRITICAL ASSERTION: User-owned nodes should have expiry from client\n\tassert.True(t, nodeAfter.Expiry().Valid(),\n\t\t\"User-owned node should have expiry set\")\n\tassert.WithinDuration(t, clientExpiry, nodeAfter.Expiry().Get(), 5*time.Second,\n\t\t\"Expiry should match client request\")\n}\n\n// TestReAuthWithDifferentMachineKey tests the edge case where a node attempts\n// to re-authenticate with the same NodeKey but a DIFFERENT MachineKey.\n// This scenario should be handled gracefully (currently creates a new node).\nfunc TestReAuthWithDifferentMachineKey(t *testing.T) {\n\tapp := createTestApp(t)\n\n\tuser := app.state.CreateUserForTest(\"tag-creator\")\n\ttags := []string{\"tag:server\"}\n\n\t// Create a reusable tagged PreAuthKey\n\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, tags)\n\trequire.NoError(t, err)\n\n\t// Initial registration\n\tmachineKey1 := key.NewMachine()\n\tnodeKey := key.NewNode() // Same NodeKey for both attempts\n\n\tregReq1 := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak.Key,\n\t\t},\n\t\tNodeKey: nodeKey.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"test-node\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\tresp1, err := app.handleRegisterWithAuthKey(regReq1, machineKey1.Public())\n\trequire.NoError(t, err)\n\trequire.True(t, resp1.MachineAuthorized)\n\n\t// Verify initial node\n\tnode1, found := app.state.GetNodeByNodeKey(nodeKey.Public())\n\trequire.True(t, found)\n\tassert.True(t, node1.IsTagged())\n\n\t// Re-authenticate with DIFFERENT MachineKey but SAME NodeKey\n\tmachineKey2 := key.NewMachine() // Different machine key\n\n\tregReq2 := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak.Key,\n\t\t},\n\t\tNodeKey: nodeKey.Public(), // Same NodeKey\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"test-node\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\tresp2, err := app.handleRegisterWithAuthKey(regReq2, machineKey2.Public())\n\trequire.NoError(t, err)\n\trequire.True(t, resp2.MachineAuthorized)\n\n\t// Verify the node still exists and has tags\n\t// Note: Depending on implementation, this might be the same node or a new node\n\tnode2, found := app.state.GetNodeByNodeKey(nodeKey.Public())\n\trequire.True(t, found)\n\tassert.True(t, node2.IsTagged())\n\tassert.ElementsMatch(t, tags, node2.Tags().AsSlice())\n}\n"
  },
  {
    "path": "hscontrol/auth_test.go",
    "content": "package hscontrol\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/url\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/mapper\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/key\"\n)\n\n// Interactive step type constants.\nconst (\n\tstepTypeInitialRequest  = \"initial_request\"\n\tstepTypeAuthCompletion  = \"auth_completion\"\n\tstepTypeFollowupRequest = \"followup_request\"\n)\n\nvar errNodeNotFoundAfterSetup = errors.New(\"node not found after setup\")\n\n// interactiveStep defines a step in the interactive authentication workflow.\ntype interactiveStep struct {\n\tstepType         string // stepTypeInitialRequest, stepTypeAuthCompletion, or stepTypeFollowupRequest\n\texpectAuthURL    bool\n\texpectCacheEntry bool\n\tcallAuthPath     bool // Real call to HandleNodeFromAuthPath, not mocked\n}\n\n//nolint:gocyclo // comprehensive test function with many scenarios\nfunc TestAuthenticationFlows(t *testing.T) {\n\t// Shared test keys for consistent behavior across test cases\n\tmachineKey1 := key.NewMachine()\n\tmachineKey2 := key.NewMachine()\n\tnodeKey1 := key.NewNode()\n\tnodeKey2 := key.NewNode()\n\n\ttests := []struct {\n\t\tname        string\n\t\tsetupFunc   func(*testing.T, *Headscale) (string, error) // Returns dynamic values like auth keys\n\t\trequest     func(dynamicValue string) tailcfg.RegisterRequest\n\t\tmachineKey  func() key.MachinePublic\n\t\twantAuth    bool\n\t\twantError   bool\n\t\twantAuthURL bool\n\t\twantExpired bool\n\t\tvalidate    func(*testing.T, *tailcfg.RegisterResponse, *Headscale)\n\n\t\t// Interactive workflow support\n\t\trequiresInteractiveFlow   bool\n\t\tinteractiveSteps          []interactiveStep\n\t\tvalidateRegistrationCache bool\n\t\texpectedAuthURLPattern    string\n\t\tsimulateAuthCompletion    bool\n\t\tvalidateCompleteResponse  bool\n\t}{\n\t\t// === PRE-AUTH KEY SCENARIOS ===\n\t\t// Tests authentication using pre-authorization keys for automated node registration.\n\t\t// Pre-auth keys allow nodes to join without interactive authentication.\n\n\t\t// TEST: Valid pre-auth key registers a new node\n\t\t// WHAT: Tests successful node registration using a valid pre-auth key\n\t\t// INPUT: Register request with valid pre-auth key, node key, and hostinfo\n\t\t// EXPECTED: Node is authorized immediately, registered in database\n\t\t// WHY: Pre-auth keys enable automated/headless node registration without user interaction\n\t\t{\n\t\t\tname: \"preauth_key_valid_new_node\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper //nolint:thelper // not a test helper, inline closure\n\t\t\t\tuser := app.state.CreateUserForTest(\"preauth-user\")\n\n\t\t\t\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\treturn pak.Key, nil\n\t\t\t},\n\t\t\trequest: func(authKey string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: authKey,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"preauth-node-1\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\twantAuth:   true,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper //nolint:thelper // not a test helper, inline closure\n\t\t\t\tassert.True(t, resp.MachineAuthorized)\n\t\t\t\tassert.False(t, resp.NodeKeyExpired)\n\t\t\t\tassert.NotEmpty(t, resp.User.DisplayName)\n\n\t\t\t\t// Verify node was created in database\n\t\t\t\tnode, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\tassert.True(t, found)\n\t\t\t\tassert.Equal(t, \"preauth-node-1\", node.Hostname())\n\t\t\t},\n\t\t},\n\n\t\t// TEST: Reusable pre-auth key can register multiple nodes\n\t\t// WHAT: Tests that a reusable pre-auth key can be used for multiple node registrations\n\t\t// INPUT: Same reusable pre-auth key used to register two different nodes\n\t\t// EXPECTED: Both nodes successfully register with the same key\n\t\t// WHY: Reusable keys allow multiple machines to join using one key (useful for fleet deployments)\n\t\t{\n\t\t\tname: \"preauth_key_reusable_multiple_nodes\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper //nolint:thelper\n\t\t\t\tuser := app.state.CreateUserForTest(\"reusable-user\")\n\n\t\t\t\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\t// Use the key for first node\n\t\t\t\tfirstReq := tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: pak.Key,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"reusable-node-1\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\n\t\t\t\t_, err = app.handleRegisterWithAuthKey(firstReq, machineKey1.Public())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\t// Wait for node to be available in NodeStore\n\t\t\t\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t_, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\t\tassert.True(c, found, \"node should be available in NodeStore\")\n\t\t\t\t}, 1*time.Second, 50*time.Millisecond, \"waiting for node to be available in NodeStore\")\n\n\t\t\t\treturn pak.Key, nil\n\t\t\t},\n\t\t\trequest: func(authKey string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: authKey,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey2.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"reusable-node-2\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey2.Public,\n\t\t\twantAuth:   true,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper //nolint:thelper\n\t\t\t\tassert.True(t, resp.MachineAuthorized)\n\t\t\t\tassert.False(t, resp.NodeKeyExpired)\n\n\t\t\t\t// Verify both nodes exist\n\t\t\t\tnode1, found1 := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\tnode2, found2 := app.state.GetNodeByNodeKey(nodeKey2.Public())\n\n\t\t\t\tassert.True(t, found1)\n\t\t\t\tassert.True(t, found2)\n\t\t\t\tassert.Equal(t, \"reusable-node-1\", node1.Hostname())\n\t\t\t\tassert.Equal(t, \"reusable-node-2\", node2.Hostname())\n\t\t\t},\n\t\t},\n\n\t\t// TEST: Single-use pre-auth key cannot be reused\n\t\t// WHAT: Tests that a single-use pre-auth key fails on second use\n\t\t// INPUT: Single-use key used for first node (succeeds), then attempted for second node\n\t\t// EXPECTED: First node registers successfully, second node fails with error\n\t\t// WHY: Single-use keys provide security by preventing key reuse after initial registration\n\t\t{\n\t\t\tname: \"preauth_key_single_use_exhausted\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper //nolint:thelper\n\t\t\t\tuser := app.state.CreateUserForTest(\"single-use-user\")\n\n\t\t\t\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), false, false, nil, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\t// Use the key for first node (should work)\n\t\t\t\tfirstReq := tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: pak.Key,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"single-use-node-1\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\n\t\t\t\t_, err = app.handleRegisterWithAuthKey(firstReq, machineKey1.Public())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\t// Wait for node to be available in NodeStore\n\t\t\t\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t_, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\t\tassert.True(c, found, \"node should be available in NodeStore\")\n\t\t\t\t}, 1*time.Second, 50*time.Millisecond, \"waiting for node to be available in NodeStore\")\n\n\t\t\t\treturn pak.Key, nil\n\t\t\t},\n\t\t\trequest: func(authKey string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: authKey,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey2.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"single-use-node-2\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey2.Public,\n\t\t\twantError:  true,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper //nolint:thelper\n\t\t\t\t// First node should exist, second should not\n\t\t\t\t_, found1 := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\t_, found2 := app.state.GetNodeByNodeKey(nodeKey2.Public())\n\n\t\t\t\tassert.True(t, found1)\n\t\t\t\tassert.False(t, found2)\n\t\t\t},\n\t\t},\n\n\t\t// TEST: Invalid pre-auth key is rejected\n\t\t// WHAT: Tests that an invalid/non-existent pre-auth key is rejected\n\t\t// INPUT: Register request with invalid auth key string\n\t\t// EXPECTED: Registration fails with error\n\t\t// WHY: Invalid keys must be rejected to prevent unauthorized node registration\n\t\t{\n\t\t\tname: \"preauth_key_invalid\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper //nolint:thelper\n\t\t\t\treturn \"invalid-key-12345\", nil\n\t\t\t},\n\t\t\trequest: func(authKey string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: authKey,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"invalid-key-node\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\twantError:  true,\n\t\t},\n\n\t\t// TEST: Ephemeral pre-auth key creates ephemeral node\n\t\t// WHAT: Tests that a node registered with ephemeral key is marked as ephemeral\n\t\t// INPUT: Pre-auth key with ephemeral=true, standard register request\n\t\t// EXPECTED: Node registers and is marked as ephemeral (will be deleted on logout)\n\t\t// WHY: Ephemeral nodes auto-cleanup when disconnected, useful for temporary/CI environments\n\t\t{\n\t\t\tname: \"preauth_key_ephemeral_node\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper //nolint:thelper\n\t\t\t\tuser := app.state.CreateUserForTest(\"ephemeral-user\")\n\n\t\t\t\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), false, true, nil, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\treturn pak.Key, nil\n\t\t\t},\n\t\t\trequest: func(authKey string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: authKey,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"ephemeral-node\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\twantAuth:   true,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper //nolint:thelper\n\t\t\t\tassert.True(t, resp.MachineAuthorized)\n\t\t\t\tassert.False(t, resp.NodeKeyExpired)\n\n\t\t\t\t// Verify ephemeral node was created\n\t\t\t\tnode, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\tassert.True(t, found)\n\t\t\t\tassert.NotNil(t, node.AuthKey)\n\t\t\t\tassert.True(t, node.AuthKey().Ephemeral())\n\t\t\t},\n\t\t},\n\n\t\t// === INTERACTIVE REGISTRATION SCENARIOS ===\n\t\t// Tests interactive authentication flow where user completes registration via web UI.\n\t\t// Interactive flow: node requests registration → receives AuthURL → user authenticates → node gets registered\n\n\t\t// TEST: Complete interactive workflow for new node\n\t\t// WHAT: Tests full interactive registration flow from initial request to completion\n\t\t// INPUT: Register request with no auth → user completes auth → followup request\n\t\t// EXPECTED: Initial request returns AuthURL, after auth completion node is registered\n\t\t// WHY: Interactive flow is the standard user-facing authentication method for new nodes\n\t\t{\n\t\t\tname: \"full_interactive_workflow_new_node\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper //nolint:thelper\n\t\t\t\treturn \"\", nil\n\t\t\t},\n\t\t\trequest: func(_ string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"interactive-flow-node\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey:              machineKey1.Public,\n\t\t\trequiresInteractiveFlow: true,\n\t\t\tinteractiveSteps: []interactiveStep{\n\t\t\t\t{stepType: stepTypeInitialRequest, expectAuthURL: true, expectCacheEntry: true},\n\t\t\t\t{stepType: stepTypeAuthCompletion, callAuthPath: true, expectCacheEntry: false}, // cleaned up after completion\n\t\t\t},\n\t\t\tvalidateCompleteResponse: true,\n\t\t\texpectedAuthURLPattern:   \"/register/\",\n\t\t},\n\t\t// TEST: Interactive workflow with no Auth struct in request\n\t\t// WHAT: Tests interactive flow when request has no Auth field (nil)\n\t\t// INPUT: Register request with Auth field set to nil\n\t\t// EXPECTED: Node receives AuthURL and can complete registration via interactive flow\n\t\t// WHY: Validates handling of requests without Auth field, same as empty auth\n\t\t{\n\t\t\tname: \"interactive_workflow_no_auth_struct\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper //nolint:thelper\n\t\t\t\treturn \"\", nil\n\t\t\t},\n\t\t\trequest: func(_ string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\t// No Auth field at all\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"interactive-no-auth-node\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey:              machineKey1.Public,\n\t\t\trequiresInteractiveFlow: true,\n\t\t\tinteractiveSteps: []interactiveStep{\n\t\t\t\t{stepType: stepTypeInitialRequest, expectAuthURL: true, expectCacheEntry: true},\n\t\t\t\t{stepType: stepTypeAuthCompletion, callAuthPath: true, expectCacheEntry: false}, // cleaned up after completion\n\t\t\t},\n\t\t\tvalidateCompleteResponse: true,\n\t\t\texpectedAuthURLPattern:   \"/register/\",\n\t\t},\n\n\t\t// === EXISTING NODE SCENARIOS ===\n\t\t// Tests behavior when existing registered nodes send requests (logout, re-auth, expiry, etc.)\n\n\t\t// TEST: Existing node logout with past expiry\n\t\t// WHAT: Tests node logout by sending request with expiry in the past\n\t\t// INPUT: Previously registered node sends request with Auth=nil and past expiry time\n\t\t// EXPECTED: Node expiry is updated, NodeKeyExpired=true, MachineAuthorized=true (for compatibility)\n\t\t// WHY: Nodes signal logout by setting expiry to past time; system updates node state accordingly\n\t\t{\n\t\t\tname: \"existing_node_logout\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper //nolint:thelper\n\t\t\t\tuser := app.state.CreateUserForTest(\"logout-user\")\n\n\t\t\t\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\t// Register the node first\n\t\t\t\tregReq := tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: pak.Key,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"logout-node\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\n\t\t\t\tresp, err := app.handleRegisterWithAuthKey(regReq, machineKey1.Public())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\tt.Logf(\"Setup registered node: %+v\", resp)\n\n\t\t\t\t// Wait for node to be available in NodeStore with debug info\n\t\t\t\tvar attemptCount int\n\n\t\t\t\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\tattemptCount++\n\n\t\t\t\t\t_, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\t\tif assert.True(c, found, \"node should be available in NodeStore\") {\n\t\t\t\t\t\tt.Logf(\"Node found in NodeStore after %d attempts\", attemptCount)\n\t\t\t\t\t}\n\t\t\t\t}, 1*time.Second, 100*time.Millisecond, \"waiting for node to be available in NodeStore\")\n\n\t\t\t\treturn \"\", nil\n\t\t\t},\n\t\t\trequest: func(_ string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tAuth:    nil,\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tExpiry:  time.Now().Add(-1 * time.Hour), // Past expiry = logout\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey:  machineKey1.Public,\n\t\t\twantAuth:    true,\n\t\t\twantExpired: true,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper //nolint:thelper\n\t\t\t\tassert.True(t, resp.MachineAuthorized)\n\t\t\t\tassert.True(t, resp.NodeKeyExpired)\n\t\t\t},\n\t\t},\n\t\t// TEST: Existing node with different machine key is rejected\n\t\t// WHAT: Tests that requests for existing node with wrong machine key are rejected\n\t\t// INPUT: Node key matches existing node, but machine key is different\n\t\t// EXPECTED: Request fails with unauthorized error (machine key mismatch)\n\t\t// WHY: Machine key must match to prevent node hijacking/impersonation\n\t\t{\n\t\t\tname: \"existing_node_machine_key_mismatch\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper //nolint:thelper\n\t\t\t\tuser := app.state.CreateUserForTest(\"mismatch-user\")\n\n\t\t\t\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\t// Register with machineKey1\n\t\t\t\tregReq := tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: pak.Key,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"mismatch-node\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\n\t\t\t\t_, err = app.handleRegisterWithAuthKey(regReq, machineKey1.Public())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\t// Wait for node to be available in NodeStore\n\t\t\t\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t_, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\t\tassert.True(c, found, \"node should be available in NodeStore\")\n\t\t\t\t}, 1*time.Second, 50*time.Millisecond, \"waiting for node to be available in NodeStore\")\n\n\t\t\t\treturn \"\", nil\n\t\t\t},\n\t\t\trequest: func(_ string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tAuth:    nil,\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tExpiry:  time.Now().Add(-1 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey2.Public, // Different machine key\n\t\t\twantError:  true,\n\t\t},\n\t\t// TEST: Existing node cannot extend expiry without re-auth\n\t\t// WHAT: Tests that nodes cannot extend their expiry time without authentication\n\t\t// INPUT: Existing node sends request with Auth=nil and future expiry (extension attempt)\n\t\t// EXPECTED: Request fails with error (extending key not allowed)\n\t\t// WHY: Prevents nodes from extending their own lifetime; must re-authenticate\n\t\t{\n\t\t\tname: \"existing_node_key_extension_not_allowed\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper //nolint:thelper\n\t\t\t\tuser := app.state.CreateUserForTest(\"extend-user\")\n\n\t\t\t\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\t// Register the node first\n\t\t\t\tregReq := tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: pak.Key,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"extend-node\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\n\t\t\t\t_, err = app.handleRegisterWithAuthKey(regReq, machineKey1.Public())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\t// Wait for node to be available in NodeStore\n\t\t\t\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t_, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\t\tassert.True(c, found, \"node should be available in NodeStore\")\n\t\t\t\t}, 1*time.Second, 50*time.Millisecond, \"waiting for node to be available in NodeStore\")\n\n\t\t\t\treturn \"\", nil\n\t\t\t},\n\t\t\trequest: func(_ string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tAuth:    nil,\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tExpiry:  time.Now().Add(48 * time.Hour), // Future time = extend attempt\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\twantError:  true,\n\t\t},\n\t\t// TEST: Expired node must re-authenticate\n\t\t// WHAT: Tests that expired nodes receive NodeKeyExpired=true and must re-auth\n\t\t// INPUT: Previously expired node sends request with no auth\n\t\t// EXPECTED: Response has NodeKeyExpired=true, node must re-authenticate\n\t\t// WHY: Expired nodes must go through authentication again for security\n\t\t{\n\t\t\tname: \"existing_node_expired_forces_reauth\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper //nolint:thelper\n\t\t\t\tuser := app.state.CreateUserForTest(\"reauth-user\")\n\n\t\t\t\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\t// Register the node first\n\t\t\t\tregReq := tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: pak.Key,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"reauth-node\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\n\t\t\t\t_, err = app.handleRegisterWithAuthKey(regReq, machineKey1.Public())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\t// Wait for node to be available in NodeStore\n\t\t\t\tvar (\n\t\t\t\t\tnode  types.NodeView\n\t\t\t\t\tfound bool\n\t\t\t\t)\n\n\t\t\t\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\tnode, found = app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\t\tassert.True(c, found, \"node should be available in NodeStore\")\n\t\t\t\t}, 1*time.Second, 50*time.Millisecond, \"waiting for node to be available in NodeStore\")\n\n\t\t\t\tif !found {\n\t\t\t\t\treturn \"\", errNodeNotFoundAfterSetup\n\t\t\t\t}\n\n\t\t\t\t// Expire the node\n\t\t\t\texpiredTime := time.Now().Add(-1 * time.Hour)\n\t\t\t\t_, _, err = app.state.SetNodeExpiry(node.ID(), &expiredTime)\n\n\t\t\t\treturn \"\", err\n\t\t\t},\n\t\t\trequest: func(_ string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tAuth:    nil,\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tExpiry:  time.Now().Add(24 * time.Hour), // Future expiry\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey:  machineKey1.Public,\n\t\t\twantExpired: true,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper //nolint:thelper\n\t\t\t\tassert.True(t, resp.NodeKeyExpired)\n\t\t\t\tassert.False(t, resp.MachineAuthorized)\n\t\t\t},\n\t\t},\n\t\t// TEST: Ephemeral node is deleted on logout\n\t\t// WHAT: Tests that ephemeral nodes are deleted (not just expired) on logout\n\t\t// INPUT: Ephemeral node sends logout request (past expiry)\n\t\t// EXPECTED: Node is completely deleted from database, not just marked expired\n\t\t// WHY: Ephemeral nodes should not persist after logout; auto-cleanup\n\t\t{\n\t\t\tname: \"ephemeral_node_logout_deletion\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper //nolint:thelper\n\t\t\t\tuser := app.state.CreateUserForTest(\"ephemeral-logout-user\")\n\n\t\t\t\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), false, true, nil, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\t// Register ephemeral node\n\t\t\t\tregReq := tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: pak.Key,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"ephemeral-logout-node\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\n\t\t\t\t_, err = app.handleRegisterWithAuthKey(regReq, machineKey1.Public())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\t// Wait for node to be available in NodeStore\n\t\t\t\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t_, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\t\tassert.True(c, found, \"node should be available in NodeStore\")\n\t\t\t\t}, 1*time.Second, 50*time.Millisecond, \"waiting for node to be available in NodeStore\")\n\n\t\t\t\treturn \"\", nil\n\t\t\t},\n\t\t\trequest: func(_ string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tAuth:    nil,\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tExpiry:  time.Now().Add(-1 * time.Hour), // Logout\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey:  machineKey1.Public,\n\t\t\twantExpired: true,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper //nolint:thelper\n\t\t\t\tassert.True(t, resp.NodeKeyExpired)\n\t\t\t\tassert.False(t, resp.MachineAuthorized)\n\n\t\t\t\t// Ephemeral node should be deleted, not just marked expired\n\t\t\t\t_, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\tassert.False(t, found, \"ephemeral node should be deleted on logout\")\n\t\t\t},\n\t\t},\n\n\t\t// === FOLLOWUP REGISTRATION SCENARIOS ===\n\t\t// Tests followup request handling after interactive registration is initiated.\n\t\t// Followup requests are sent by nodes waiting for auth completion.\n\n\t\t// TEST: Successful followup registration after auth completion\n\t\t// WHAT: Tests node successfully completes registration via followup URL\n\t\t// INPUT: Register request with followup URL after auth completion\n\t\t// EXPECTED: Node receives successful registration response with user info\n\t\t// WHY: Followup mechanism allows nodes to poll/wait for auth completion\n\t\t{\n\t\t\tname: \"followup_registration_success\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper //nolint:thelper\n\t\t\t\tregID, err := types.NewAuthID()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\tnodeToRegister := types.NewRegisterAuthRequest(types.Node{\n\t\t\t\t\tHostname: \"followup-success-node\",\n\t\t\t\t})\n\t\t\t\tapp.state.SetAuthCacheEntry(regID, nodeToRegister)\n\n\t\t\t\t// Simulate successful registration\n\t\t\t\t// handleRegister will receive the value when it starts waiting\n\t\t\t\tgo func() {\n\t\t\t\t\tuser := app.state.CreateUserForTest(\"followup-user\")\n\n\t\t\t\t\tnode := app.state.CreateNodeForTest(user, \"followup-success-node\")\n\t\t\t\t\tnodeToRegister.FinishAuth(types.AuthVerdict{Node: node.View()})\n\t\t\t\t}()\n\n\t\t\t\treturn fmt.Sprintf(\"http://localhost:8080/register/%s\", regID), nil\n\t\t\t},\n\t\t\trequest: func(followupURL string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tFollowup: followupURL,\n\t\t\t\t\tNodeKey:  nodeKey1.Public(),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\twantAuth:   true,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper //nolint:thelper\n\t\t\t\tassert.True(t, resp.MachineAuthorized)\n\t\t\t\tassert.False(t, resp.NodeKeyExpired)\n\t\t\t},\n\t\t},\n\t\t// TEST: Followup registration times out when auth not completed\n\t\t// WHAT: Tests that followup request times out if auth is not completed in time\n\t\t// INPUT: Followup request with short timeout, no auth completion\n\t\t// EXPECTED: Request times out with unauthorized error\n\t\t// WHY: Prevents indefinite waiting; nodes must retry if auth takes too long\n\t\t{\n\t\t\tname: \"followup_registration_timeout\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper //nolint:thelper\n\t\t\t\tregID, err := types.NewAuthID()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\tnodeToRegister := types.NewRegisterAuthRequest(types.Node{\n\t\t\t\t\tHostname: \"followup-timeout-node\",\n\t\t\t\t})\n\t\t\t\tapp.state.SetAuthCacheEntry(regID, nodeToRegister)\n\t\t\t\t// Don't call FinishRegistration - will timeout\n\n\t\t\t\treturn fmt.Sprintf(\"http://localhost:8080/register/%s\", regID), nil\n\t\t\t},\n\t\t\trequest: func(followupURL string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tFollowup: followupURL,\n\t\t\t\t\tNodeKey:  nodeKey1.Public(),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\twantError:  true,\n\t\t},\n\t\t// TEST: Invalid followup URL is rejected\n\t\t// WHAT: Tests that malformed/invalid followup URLs are rejected\n\t\t// INPUT: Register request with invalid URL in Followup field\n\t\t// EXPECTED: Request fails with error (invalid followup URL)\n\t\t// WHY: Validates URL format to prevent errors and potential exploits\n\t\t{\n\t\t\tname: \"followup_invalid_url\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper //nolint:thelper\n\t\t\t\treturn \"invalid://url[malformed\", nil\n\t\t\t},\n\t\t\trequest: func(followupURL string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tFollowup: followupURL,\n\t\t\t\t\tNodeKey:  nodeKey1.Public(),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\twantError:  true,\n\t\t},\n\t\t// TEST: Non-existent registration ID is rejected\n\t\t// WHAT: Tests that followup with non-existent registration ID fails\n\t\t// INPUT: Valid followup URL but registration ID not in cache\n\t\t// EXPECTED: Request fails with unauthorized error\n\t\t// WHY: Registration must exist in cache; prevents invalid/expired registrations\n\t\t{\n\t\t\tname: \"followup_registration_not_found\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper //nolint:thelper\n\t\t\t\treturn \"http://localhost:8080/register/nonexistent-id\", nil\n\t\t\t},\n\t\t\trequest: func(followupURL string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tFollowup: followupURL,\n\t\t\t\t\tNodeKey:  nodeKey1.Public(),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\twantError:  true,\n\t\t},\n\n\t\t// === EDGE CASES ===\n\t\t// Tests handling of malformed, invalid, or unusual input data\n\n\t\t// TEST: Empty hostname is handled with defensive code\n\t\t// WHAT: Tests that empty hostname in hostinfo generates a default hostname\n\t\t// INPUT: Register request with hostinfo containing empty hostname string\n\t\t// EXPECTED: Node registers successfully with generated hostname (node-MACHINEKEY)\n\t\t// WHY: Defensive code prevents errors from missing hostnames; generates sensible default\n\t\t{\n\t\t\tname: \"empty_hostname\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper //nolint:thelper\n\t\t\t\tuser := app.state.CreateUserForTest(\"empty-hostname-user\")\n\n\t\t\t\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\treturn pak.Key, nil\n\t\t\t},\n\t\t\trequest: func(authKey string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: authKey,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"\", // Empty hostname should be handled gracefully\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\twantAuth:   true,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper //nolint:thelper\n\t\t\t\tassert.True(t, resp.MachineAuthorized)\n\n\t\t\t\t// Node should be created with generated hostname\n\t\t\t\tnode, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\tassert.True(t, found)\n\t\t\t\tassert.NotEmpty(t, node.Hostname())\n\t\t\t},\n\t\t},\n\t\t// TEST: Nil hostinfo is handled with defensive code\n\t\t// WHAT: Tests that nil hostinfo in register request is handled gracefully\n\t\t// INPUT: Register request with Hostinfo field set to nil\n\t\t// EXPECTED: Node registers successfully with generated hostname starting with \"node-\"\n\t\t// WHY: Defensive code prevents nil pointer panics; creates valid default hostinfo\n\t\t{\n\t\t\tname: \"nil_hostinfo\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper //nolint:thelper\n\t\t\t\tuser := app.state.CreateUserForTest(\"nil-hostinfo-user\")\n\n\t\t\t\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\treturn pak.Key, nil\n\t\t\t},\n\t\t\trequest: func(authKey string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: authKey,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey:  nodeKey1.Public(),\n\t\t\t\t\tHostinfo: nil, // Nil hostinfo should be handled with defensive code\n\t\t\t\t\tExpiry:   time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\twantAuth:   true,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper //nolint:thelper\n\t\t\t\tassert.True(t, resp.MachineAuthorized)\n\n\t\t\t\t// Node should be created with generated hostname from defensive code\n\t\t\t\tnode, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\tassert.True(t, found)\n\t\t\t\tassert.NotEmpty(t, node.Hostname())\n\t\t\t\t// Hostname should start with \"node-\" (generated from machine key)\n\t\t\t\tassert.True(t, strings.HasPrefix(node.Hostname(), \"node-\"))\n\t\t\t},\n\t\t},\n\n\t\t// === PRE-AUTH KEY WITH EXPIRY SCENARIOS ===\n\t\t// Tests pre-auth key expiration handling\n\n\t\t// TEST: Expired pre-auth key is rejected\n\t\t// WHAT: Tests that a pre-auth key with past expiration date cannot be used\n\t\t// INPUT: Pre-auth key with expiry 1 hour in the past\n\t\t// EXPECTED: Registration fails with error\n\t\t// WHY: Expired keys must be rejected to maintain security and key lifecycle management\n\t\t{\n\t\t\tname: \"preauth_key_expired\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper //nolint:thelper\n\t\t\t\tuser := app.state.CreateUserForTest(\"expired-pak-user\")\n\t\t\t\texpiry := time.Now().Add(-1 * time.Hour) // Expired 1 hour ago\n\n\t\t\t\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, &expiry, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\treturn pak.Key, nil\n\t\t\t},\n\t\t\trequest: func(authKey string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: authKey,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"expired-pak-node\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\twantError:  true,\n\t\t},\n\n\t\t// TEST: Pre-auth key with ACL tags applies tags to node\n\t\t// WHAT: Tests that ACL tags from pre-auth key are applied to registered node\n\t\t// INPUT: Pre-auth key with ACL tags [\"tag:test\", \"tag:integration\"], register request\n\t\t// EXPECTED: Node registers with specified ACL tags applied as ForcedTags\n\t\t// WHY: Pre-auth keys can enforce ACL policies on nodes during registration\n\t\t{\n\t\t\tname: \"preauth_key_with_acl_tags\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper //nolint:thelper\n\t\t\t\tuser := app.state.CreateUserForTest(\"tagged-pak-user\")\n\t\t\t\ttags := []string{\"tag:server\", \"tag:database\"}\n\n\t\t\t\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, tags)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\treturn pak.Key, nil\n\t\t\t},\n\t\t\trequest: func(authKey string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: authKey,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"tagged-pak-node\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\twantAuth:   true,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper\n\t\t\t\tassert.True(t, resp.MachineAuthorized)\n\t\t\t\tassert.False(t, resp.NodeKeyExpired)\n\n\t\t\t\t// Verify node was created with tags\n\t\t\t\tnode, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\tassert.True(t, found)\n\t\t\t\tassert.Equal(t, \"tagged-pak-node\", node.Hostname())\n\n\t\t\t\tif node.AuthKey().Valid() {\n\t\t\t\t\tassert.NotEmpty(t, node.AuthKey().Tags())\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\n\t\t// === ADVERTISE-TAGS (RequestTags) SCENARIOS ===\n\t\t// Tests for client-provided tags via --advertise-tags flag\n\n\t\t// TEST: PreAuthKey registration rejects client-provided RequestTags\n\t\t// WHAT: Tests that PreAuthKey registrations cannot use client-provided tags\n\t\t// INPUT: PreAuthKey registration with RequestTags in Hostinfo\n\t\t// EXPECTED: Registration fails with \"requested tags [...] are invalid or not permitted\" error\n\t\t// WHY: PreAuthKey nodes get their tags from the key itself, not from client requests\n\t\t{\n\t\t\tname: \"preauth_key_rejects_request_tags\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper\n\t\t\t\tt.Helper()\n\n\t\t\t\tuser := app.state.CreateUserForTest(\"pak-requesttags-user\")\n\n\t\t\t\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\treturn pak.Key, nil\n\t\t\t},\n\t\t\trequest: func(authKey string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: authKey,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname:    \"pak-requesttags-node\",\n\t\t\t\t\t\tRequestTags: []string{\"tag:unauthorized\"},\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\twantError:  true,\n\t\t},\n\n\t\t// TEST: Tagged PreAuthKey ignores client-provided RequestTags\n\t\t// WHAT: Tests that tagged PreAuthKey uses key tags, not client RequestTags\n\t\t// INPUT: Tagged PreAuthKey registration with different RequestTags\n\t\t// EXPECTED: Registration fails because RequestTags are rejected for PreAuthKey\n\t\t// WHY: Tags-as-identity: PreAuthKey tags are authoritative, client cannot override\n\t\t{\n\t\t\tname: \"tagged_preauth_key_rejects_client_request_tags\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper\n\t\t\t\tt.Helper()\n\n\t\t\t\tuser := app.state.CreateUserForTest(\"tagged-pak-clienttags-user\")\n\t\t\t\tkeyTags := []string{\"tag:authorized\"}\n\n\t\t\t\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, keyTags)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\treturn pak.Key, nil\n\t\t\t},\n\t\t\trequest: func(authKey string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: authKey,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname:    \"tagged-pak-clienttags-node\",\n\t\t\t\t\t\tRequestTags: []string{\"tag:client-wants-this\"}, // Should be rejected\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\twantError:  true, // RequestTags rejected for PreAuthKey registrations\n\t\t},\n\n\t\t// === RE-AUTHENTICATION SCENARIOS ===\n\t\t// TEST: Existing node re-authenticates with new pre-auth key\n\t\t// WHAT: Tests that existing node can re-authenticate using new pre-auth key\n\t\t// INPUT: Existing node sends request with new valid pre-auth key\n\t\t// EXPECTED: Node successfully re-authenticates, stays authorized\n\t\t// WHY: Allows nodes to refresh authentication using pre-auth keys\n\t\t{\n\t\t\tname: \"existing_node_reauth_with_new_authkey\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper\n\t\t\t\tuser := app.state.CreateUserForTest(\"reauth-user\")\n\n\t\t\t\t// First, register with initial auth key\n\t\t\t\tpak1, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\tregReq := tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: pak1.Key,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"reauth-node\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\n\t\t\t\t_, err = app.handleRegisterWithAuthKey(regReq, machineKey1.Public())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\t// Wait for node to be available\n\t\t\t\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t_, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\t\tassert.True(c, found, \"node should be available in NodeStore\")\n\t\t\t\t}, 1*time.Second, 50*time.Millisecond, \"waiting for node to be available in NodeStore\")\n\n\t\t\t\t// Create new auth key for re-authentication\n\t\t\t\tpak2, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\treturn pak2.Key, nil\n\t\t\t},\n\t\t\trequest: func(newAuthKey string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: newAuthKey,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"reauth-node-updated\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(48 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\twantAuth:   true,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper\n\t\t\t\tassert.True(t, resp.MachineAuthorized)\n\t\t\t\tassert.False(t, resp.NodeKeyExpired)\n\n\t\t\t\t// Verify node was updated, not duplicated\n\t\t\t\tnode, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\tassert.True(t, found)\n\t\t\t\tassert.Equal(t, \"reauth-node-updated\", node.Hostname())\n\t\t\t},\n\t\t},\n\t\t// TEST: Existing node re-authenticates via interactive flow\n\t\t// WHAT: Tests that existing expired node can re-authenticate interactively\n\t\t// INPUT: Expired node initiates interactive re-authentication\n\t\t// EXPECTED: Node receives AuthURL and can complete re-authentication\n\t\t// WHY: Allows expired nodes to re-authenticate without pre-auth keys\n\t\t{\n\t\t\tname: \"existing_node_reauth_interactive_flow\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper\n\t\t\t\tuser := app.state.CreateUserForTest(\"interactive-reauth-user\")\n\n\t\t\t\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\t// Register initially with auth key\n\t\t\t\tregReq := tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: pak.Key,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"interactive-reauth-node\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\n\t\t\t\t_, err = app.handleRegisterWithAuthKey(regReq, machineKey1.Public())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\t// Wait for node to be available\n\t\t\t\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t_, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\t\tassert.True(c, found, \"node should be available in NodeStore\")\n\t\t\t\t}, 1*time.Second, 50*time.Millisecond, \"waiting for node to be available in NodeStore\")\n\n\t\t\t\treturn \"\", nil\n\t\t\t},\n\t\t\trequest: func(_ string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: \"\", // Empty auth key triggers interactive flow\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"interactive-reauth-node-updated\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(48 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey:  machineKey1.Public,\n\t\t\twantAuthURL: true,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper\n\t\t\t\tassert.Contains(t, resp.AuthURL, \"register/\")\n\t\t\t\tassert.False(t, resp.MachineAuthorized)\n\t\t\t},\n\t\t},\n\n\t\t// === NODE KEY ROTATION SCENARIOS ===\n\t\t// Tests node key rotation where node changes its node key while keeping same machine key\n\n\t\t// TEST: Node key rotation with same machine key updates in place\n\t\t// WHAT: Tests that registering with new node key and same machine key updates existing node\n\t\t// INPUT: Register node with nodeKey1, then register again with nodeKey2 but same machineKey\n\t\t// EXPECTED: Node is updated in place; nodeKey2 exists, nodeKey1 no longer exists\n\t\t// WHY: Same machine key means same physical device; node key rotation updates, doesn't duplicate\n\t\t{\n\t\t\tname: \"node_key_rotation_same_machine\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper\n\t\t\t\tuser := app.state.CreateUserForTest(\"rotation-user\")\n\n\t\t\t\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\t// Register with initial node key\n\t\t\t\tregReq := tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: pak.Key,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"rotation-node\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\n\t\t\t\t_, err = app.handleRegisterWithAuthKey(regReq, machineKey1.Public())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\t// Wait for node to be available\n\t\t\t\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t_, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\t\tassert.True(c, found, \"node should be available in NodeStore\")\n\t\t\t\t}, 1*time.Second, 50*time.Millisecond, \"waiting for node to be available in NodeStore\")\n\n\t\t\t\t// Create new auth key for rotation\n\t\t\t\tpakRotation, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\treturn pakRotation.Key, nil\n\t\t\t},\n\t\t\trequest: func(authKey string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: authKey,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey2.Public(), // Different node key, same machine\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"rotation-node\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\twantAuth:   true,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper\n\t\t\t\tassert.True(t, resp.MachineAuthorized)\n\t\t\t\tassert.False(t, resp.NodeKeyExpired)\n\n\t\t\t\t// When same machine key is used, node is updated in place (not duplicated)\n\t\t\t\t// The old nodeKey1 should no longer exist\n\t\t\t\t_, found1 := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\tassert.False(t, found1, \"old node key should not exist after rotation\")\n\n\t\t\t\t// The new nodeKey2 should exist with the same machine key\n\t\t\t\tnode2, found2 := app.state.GetNodeByNodeKey(nodeKey2.Public())\n\t\t\t\tassert.True(t, found2, \"new node key should exist after rotation\")\n\t\t\t\tassert.Equal(t, machineKey1.Public(), node2.MachineKey(), \"machine key should remain the same\")\n\t\t\t},\n\t\t},\n\n\t\t// === MALFORMED REQUEST SCENARIOS ===\n\t\t// Tests handling of requests with malformed or unusual field values\n\n\t\t// TEST: Zero-time expiry is handled correctly\n\t\t// WHAT: Tests registration with expiry set to zero time value\n\t\t// INPUT: Register request with Expiry set to time.Time{} (zero value)\n\t\t// EXPECTED: Node registers successfully; zero time treated as no expiry\n\t\t// WHY: Zero time is valid Go default; should be handled gracefully\n\t\t{\n\t\t\tname: \"malformed_expiry_zero_time\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper\n\t\t\t\tuser := app.state.CreateUserForTest(\"zero-expiry-user\")\n\n\t\t\t\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\treturn pak.Key, nil\n\t\t\t},\n\t\t\trequest: func(authKey string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: authKey,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"zero-expiry-node\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Time{}, // Zero time\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\twantAuth:   true,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper\n\t\t\t\tassert.True(t, resp.MachineAuthorized)\n\n\t\t\t\t// Node should be created with default expiry handling\n\t\t\t\tnode, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\tassert.True(t, found)\n\t\t\t\tassert.Equal(t, \"zero-expiry-node\", node.Hostname())\n\t\t\t},\n\t\t},\n\t\t// TEST: Malformed hostinfo with very long hostname is truncated\n\t\t// WHAT: Tests that excessively long hostname is truncated to DNS label limit\n\t\t// INPUT: Hostinfo with 110-character hostname (exceeds 63-char DNS limit)\n\t\t// EXPECTED: Node registers successfully; hostname truncated to 63 characters\n\t\t// WHY: Defensive code enforces DNS label limit (RFC 1123); prevents errors\n\t\t{\n\t\t\tname: \"malformed_hostinfo_invalid_data\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper\n\t\t\t\tuser := app.state.CreateUserForTest(\"invalid-hostinfo-user\")\n\n\t\t\t\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\treturn pak.Key, nil\n\t\t\t},\n\t\t\trequest: func(authKey string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: authKey,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname:     \"test-node-with-very-long-hostname-that-might-exceed-normal-limits-and-contain-special-chars-!@#$%\",\n\t\t\t\t\t\tBackendLogID: \"invalid-log-id\",\n\t\t\t\t\t\tOS:           \"unknown-os\",\n\t\t\t\t\t\tOSVersion:    \"999.999.999\",\n\t\t\t\t\t\tDeviceModel:  \"test-device-model\",\n\t\t\t\t\t\t// Note: RequestTags are not included for PreAuthKey registrations\n\t\t\t\t\t\t// since tags come from the key itself, not client requests.\n\t\t\t\t\t\tServices: []tailcfg.Service{{Proto: \"tcp\", Port: 65535}},\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\twantAuth:   true,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper\n\t\t\t\tassert.True(t, resp.MachineAuthorized)\n\n\t\t\t\t// Node should be created even with malformed hostinfo\n\t\t\t\tnode, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\tassert.True(t, found)\n\t\t\t\t// Hostname should be sanitized or handled gracefully\n\t\t\t\tassert.NotEmpty(t, node.Hostname())\n\t\t\t},\n\t\t},\n\n\t\t// === REGISTRATION CACHE EDGE CASES ===\n\t\t// Tests edge cases in registration cache handling during interactive flow\n\n\t\t// TEST: Followup registration with nil response (cache expired during auth)\n\t\t// WHAT: Tests that followup request handles nil node response (cache expired/cleared)\n\t\t// INPUT: Followup request where auth completion sends nil (cache was cleared)\n\t\t// EXPECTED: Returns new AuthURL so client can retry authentication\n\t\t// WHY: Nil response means cache expired - give client new AuthURL instead of error\n\t\t{\n\t\t\tname: \"followup_registration_node_nil_response\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper\n\t\t\t\tregID, err := types.NewAuthID()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\tnodeToRegister := types.NewRegisterAuthRequest(types.Node{\n\t\t\t\t\tHostname: \"nil-response-node\",\n\t\t\t\t})\n\t\t\t\tapp.state.SetAuthCacheEntry(regID, nodeToRegister)\n\n\t\t\t\t// Simulate registration that returns empty NodeView (cache expired during auth)\n\t\t\t\tgo func() {\n\t\t\t\t\tnodeToRegister.FinishAuth(types.AuthVerdict{Node: types.NodeView{}}) // Empty view indicates cache expiry\n\t\t\t\t}()\n\n\t\t\t\treturn fmt.Sprintf(\"http://localhost:8080/register/%s\", regID), nil\n\t\t\t},\n\t\t\trequest: func(followupURL string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tFollowup: followupURL,\n\t\t\t\t\tNodeKey:  nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"nil-response-node\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\twantAuth:   false, // Should not be authorized yet - needs to use new AuthURL\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper\n\t\t\t\t// Should get a new AuthURL, not an error\n\t\t\t\tassert.NotEmpty(t, resp.AuthURL, \"should receive new AuthURL when cache returns nil\")\n\t\t\t\tassert.Contains(t, resp.AuthURL, \"/register/\", \"AuthURL should contain registration path\")\n\t\t\t\tassert.False(t, resp.MachineAuthorized, \"machine should not be authorized yet\")\n\t\t\t},\n\t\t},\n\t\t// TEST: Malformed followup path is rejected\n\t\t// WHAT: Tests that followup URL with malformed path is rejected\n\t\t// INPUT: Followup URL with path that doesn't match expected format\n\t\t// EXPECTED: Request fails with error (invalid followup URL)\n\t\t// WHY: Path validation prevents processing of corrupted/invalid URLs\n\t\t{\n\t\t\tname: \"followup_registration_malformed_path\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper\n\t\t\t\treturn \"http://localhost:8080/register/\", nil // Missing registration ID\n\t\t\t},\n\t\t\trequest: func(followupURL string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tFollowup: followupURL,\n\t\t\t\t\tNodeKey:  nodeKey1.Public(),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\twantError:  true,\n\t\t},\n\t\t// TEST: Wrong followup path format is rejected\n\t\t// WHAT: Tests that followup URL with incorrect path structure fails\n\t\t// INPUT: Valid URL but path doesn't start with \"/register/\"\n\t\t// EXPECTED: Request fails with error (invalid path format)\n\t\t// WHY: Strict path validation ensures only valid registration URLs accepted\n\t\t{\n\t\t\tname: \"followup_registration_wrong_path_format\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper\n\t\t\t\treturn \"http://localhost:8080/wrong/path/format\", nil\n\t\t\t},\n\t\t\trequest: func(followupURL string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tFollowup: followupURL,\n\t\t\t\t\tNodeKey:  nodeKey1.Public(),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\twantError:  true,\n\t\t},\n\n\t\t// === AUTH PROVIDER EDGE CASES ===\n\t\t// TEST: Interactive workflow preserves custom hostinfo\n\t\t// WHAT: Tests that custom hostinfo fields are preserved through interactive flow\n\t\t// INPUT: Interactive registration with detailed hostinfo (OS, version, model)\n\t\t// EXPECTED: Node registers with all hostinfo fields preserved\n\t\t// WHY: Ensures interactive flow doesn't lose custom hostinfo data\n\t\t// NOTE: RequestTags are NOT tested here because tag authorization via\n\t\t// advertise-tags requires the user to have existing nodes (for IP-based\n\t\t// ownership verification). New users registering their first node cannot\n\t\t// claim tags via RequestTags - they must use a tagged PreAuthKey instead.\n\t\t{\n\t\t\tname: \"interactive_workflow_with_custom_hostinfo\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper\n\t\t\t\treturn \"\", nil\n\t\t\t},\n\t\t\trequest: func(_ string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname:    \"custom-interactive-node\",\n\t\t\t\t\t\tOS:          \"linux\",\n\t\t\t\t\t\tOSVersion:   \"20.04\",\n\t\t\t\t\t\tDeviceModel: \"server\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey:              machineKey1.Public,\n\t\t\trequiresInteractiveFlow: true,\n\t\t\tinteractiveSteps: []interactiveStep{\n\t\t\t\t{stepType: stepTypeInitialRequest, expectAuthURL: true, expectCacheEntry: true},\n\t\t\t\t{stepType: stepTypeAuthCompletion, callAuthPath: true, expectCacheEntry: false}, // cleaned up after completion\n\t\t\t},\n\t\t\tvalidateCompleteResponse: true,\n\t\t\texpectedAuthURLPattern:   \"/register/\",\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper\n\t\t\t\t// Verify custom hostinfo was preserved through interactive workflow\n\t\t\t\tnode, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\tassert.True(t, found, \"node should be found after interactive registration\")\n\n\t\t\t\tif found {\n\t\t\t\t\tassert.Equal(t, \"custom-interactive-node\", node.Hostname())\n\t\t\t\t\tassert.Equal(t, \"linux\", node.Hostinfo().OS())\n\t\t\t\t\tassert.Equal(t, \"20.04\", node.Hostinfo().OSVersion())\n\t\t\t\t\tassert.Equal(t, \"server\", node.Hostinfo().DeviceModel())\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\n\t\t// === PRE-AUTH KEY USAGE TRACKING ===\n\t\t// Tests accurate tracking of pre-auth key usage counts\n\n\t\t// TEST: Pre-auth key usage count is tracked correctly\n\t\t// WHAT: Tests that each use of a pre-auth key increments its usage counter\n\t\t// INPUT: Reusable pre-auth key used to register three different nodes\n\t\t// EXPECTED: All three nodes register successfully, key usage count increments each time\n\t\t// WHY: Usage tracking enables monitoring and auditing of pre-auth key usage\n\t\t{\n\t\t\tname: \"preauth_key_usage_count_tracking\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper\n\t\t\t\tuser := app.state.CreateUserForTest(\"usage-count-user\")\n\n\t\t\t\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), false, false, nil, nil) // Single use\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\treturn pak.Key, nil\n\t\t\t},\n\t\t\trequest: func(authKey string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: authKey,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"usage-count-node\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\twantAuth:   true,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper\n\t\t\t\tassert.True(t, resp.MachineAuthorized)\n\t\t\t\tassert.False(t, resp.NodeKeyExpired)\n\n\t\t\t\t// Verify auth key usage was tracked\n\t\t\t\tnode, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\tassert.True(t, found)\n\t\t\t\tassert.Equal(t, \"usage-count-node\", node.Hostname())\n\n\t\t\t\t// Key should now be used up (single use)\n\t\t\t\tif node.AuthKey().Valid() {\n\t\t\t\t\tassert.False(t, node.AuthKey().Reusable())\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\n\t\t// === REGISTRATION ID GENERATION AND ADVANCED EDGE CASES ===\n\t\t// TEST: Interactive workflow generates valid registration IDs\n\t\t// WHAT: Tests that interactive flow generates unique, valid registration IDs\n\t\t// INPUT: Interactive registration request\n\t\t// EXPECTED: AuthURL contains valid registration ID that can be extracted\n\t\t// WHY: Registration IDs must be unique and valid for cache lookup\n\t\t{\n\t\t\tname: \"interactive_workflow_registration_id_generation\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper\n\t\t\t\treturn \"\", nil\n\t\t\t},\n\t\t\trequest: func(_ string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"registration-id-test-node\",\n\t\t\t\t\t\tOS:       \"test-os\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey:              machineKey1.Public,\n\t\t\trequiresInteractiveFlow: true,\n\t\t\tinteractiveSteps: []interactiveStep{\n\t\t\t\t{stepType: stepTypeInitialRequest, expectAuthURL: true, expectCacheEntry: true},\n\t\t\t\t{stepType: stepTypeAuthCompletion, callAuthPath: true, expectCacheEntry: false},\n\t\t\t},\n\t\t\tvalidateCompleteResponse: true,\n\t\t\texpectedAuthURLPattern:   \"/register/\",\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper\n\t\t\t\t// Verify registration ID was properly generated and used\n\t\t\t\tnode, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\tassert.True(t, found, \"node should be registered after interactive workflow\")\n\n\t\t\t\tif found {\n\t\t\t\t\tassert.Equal(t, \"registration-id-test-node\", node.Hostname())\n\t\t\t\t\tassert.Equal(t, \"test-os\", node.Hostinfo().OS())\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"concurrent_registration_same_node_key\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper\n\t\t\t\tuser := app.state.CreateUserForTest(\"concurrent-user\")\n\n\t\t\t\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\treturn pak.Key, nil\n\t\t\t},\n\t\t\trequest: func(authKey string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: authKey,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"concurrent-node\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\twantAuth:   true,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper\n\t\t\t\tassert.True(t, resp.MachineAuthorized)\n\t\t\t\tassert.False(t, resp.NodeKeyExpired)\n\n\t\t\t\t// Verify node was registered\n\t\t\t\tnode, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\tassert.True(t, found)\n\t\t\t\tassert.Equal(t, \"concurrent-node\", node.Hostname())\n\t\t\t},\n\t\t},\n\t\t// TEST: Auth key expiry vs request expiry handling\n\t\t// WHAT: Tests that pre-auth key expiry is independent of request expiry\n\t\t// INPUT: Valid pre-auth key (future expiry), request with past expiry\n\t\t// EXPECTED: Node registers with request expiry used (logout scenario)\n\t\t// WHY: Request expiry overrides key expiry; allows logout with valid key\n\t\t{\n\t\t\tname: \"auth_key_with_future_expiry_past_request_expiry\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper\n\t\t\t\tuser := app.state.CreateUserForTest(\"future-expiry-user\")\n\t\t\t\t// Auth key expires in the future\n\t\t\t\texpiry := time.Now().Add(48 * time.Hour)\n\n\t\t\t\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, &expiry, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\treturn pak.Key, nil\n\t\t\t},\n\t\t\trequest: func(authKey string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: authKey,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"future-expiry-node\",\n\t\t\t\t\t},\n\t\t\t\t\t// Request expires before auth key\n\t\t\t\t\tExpiry: time.Now().Add(12 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\twantAuth:   true,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper\n\t\t\t\tassert.True(t, resp.MachineAuthorized)\n\t\t\t\tassert.False(t, resp.NodeKeyExpired)\n\n\t\t\t\t// Node should be created with request expiry (shorter than auth key expiry)\n\t\t\t\tnode, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\tassert.True(t, found)\n\t\t\t\tassert.Equal(t, \"future-expiry-node\", node.Hostname())\n\t\t\t},\n\t\t},\n\t\t// TEST: Re-authentication with different user's auth key\n\t\t// WHAT: Tests node transfer when re-authenticating with a different user's auth key\n\t\t// INPUT: Node registered with user1's auth key, re-authenticates with user2's auth key\n\t\t// EXPECTED: Node is transferred to user2 (updates UserID and related fields)\n\t\t// WHY: Validates device reassignment scenarios where a machine moves between users\n\t\t{\n\t\t\tname: \"reauth_existing_node_different_user_auth_key\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper\n\t\t\t\t// Create two users\n\t\t\t\tuser1 := app.state.CreateUserForTest(\"user1-context\")\n\t\t\t\tuser2 := app.state.CreateUserForTest(\"user2-context\")\n\n\t\t\t\t// Register node with user1's auth key\n\t\t\t\tpak1, err := app.state.CreatePreAuthKey(user1.TypedID(), true, false, nil, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\tregReq := tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: pak1.Key,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"context-node-user1\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\n\t\t\t\t_, err = app.handleRegisterWithAuthKey(regReq, machineKey1.Public())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\t// Wait for node to be available\n\t\t\t\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t_, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\t\tassert.True(c, found, \"node should be available in NodeStore\")\n\t\t\t\t}, 1*time.Second, 50*time.Millisecond, \"waiting for node to be available in NodeStore\")\n\n\t\t\t\t// Return user2's auth key for re-authentication\n\t\t\t\tpak2, err := app.state.CreatePreAuthKey(user2.TypedID(), true, false, nil, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\treturn pak2.Key, nil\n\t\t\t},\n\t\t\trequest: func(user2AuthKey string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: user2AuthKey,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"context-node-user2\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\twantAuth:   true,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper\n\t\t\t\tassert.True(t, resp.MachineAuthorized)\n\t\t\t\tassert.False(t, resp.NodeKeyExpired)\n\n\t\t\t\t// Verify NEW node was created for user2\n\t\t\t\tnode2, found := app.state.GetNodeByMachineKey(machineKey1.Public(), types.UserID(2))\n\t\t\t\trequire.True(t, found, \"new node should exist for user2\")\n\t\t\t\tassert.Equal(t, uint(2), node2.UserID().Get(), \"new node should belong to user2\")\n\n\t\t\t\tuser := node2.User()\n\t\t\t\tassert.Equal(t, \"user2-context\", user.Name(), \"new node should show user2 username\")\n\n\t\t\t\t// Verify original node still exists for user1\n\t\t\t\tnode1, found := app.state.GetNodeByMachineKey(machineKey1.Public(), types.UserID(1))\n\t\t\t\trequire.True(t, found, \"original node should still exist for user1\")\n\t\t\t\tassert.Equal(t, uint(1), node1.UserID().Get(), \"original node should still belong to user1\")\n\n\t\t\t\t// Verify they are different nodes (different IDs)\n\t\t\t\tassert.NotEqual(t, node1.ID(), node2.ID(), \"should be different node IDs\")\n\t\t\t},\n\t\t},\n\t\t// TEST: Re-authentication with different user via interactive flow creates new node\n\t\t// WHAT: Tests new node creation when re-authenticating interactively with a different user\n\t\t// INPUT: Node registered with user1, re-authenticates interactively as user2 (same machine key, same node key)\n\t\t// EXPECTED: New node is created for user2, user1's original node remains (no transfer)\n\t\t// WHY: Same physical machine can have separate node identities per user\n\t\t{\n\t\t\tname: \"interactive_reauth_existing_node_different_user_creates_new_node\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper\n\t\t\t\t// Create user1 and register a node with auth key\n\t\t\t\tuser1 := app.state.CreateUserForTest(\"interactive-user-1\")\n\n\t\t\t\tpak1, err := app.state.CreatePreAuthKey(user1.TypedID(), true, false, nil, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\t// Register node with user1's auth key first\n\t\t\t\tinitialReq := tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: pak1.Key,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"transfer-node-user1\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\n\t\t\t\t_, err = app.handleRegister(context.Background(), initialReq, machineKey1.Public())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\t// Wait for node to be available\n\t\t\t\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t_, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\t\tassert.True(c, found, \"node should be available in NodeStore\")\n\t\t\t\t}, 1*time.Second, 50*time.Millisecond, \"waiting for node to be available in NodeStore\")\n\n\t\t\t\treturn \"\", nil\n\t\t\t},\n\t\t\trequest: func(_ string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tAuth:    &tailcfg.RegisterResponseAuth{}, // Empty auth triggers interactive flow\n\t\t\t\t\tNodeKey: nodeKey1.Public(),               // Same node key as original registration\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"transfer-node-user2\", // Different hostname\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey:              machineKey1.Public, // Same machine key\n\t\t\trequiresInteractiveFlow: true,\n\t\t\tinteractiveSteps: []interactiveStep{\n\t\t\t\t{stepType: stepTypeInitialRequest, expectAuthURL: true, expectCacheEntry: true},\n\t\t\t\t{stepType: stepTypeAuthCompletion, callAuthPath: true, expectCacheEntry: false},\n\t\t\t},\n\t\t\tvalidateCompleteResponse: true,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper\n\t\t\t\t// User1's original node should STILL exist (not transferred)\n\t\t\t\tnode1, found1 := app.state.GetNodeByMachineKey(machineKey1.Public(), types.UserID(1))\n\t\t\t\trequire.True(t, found1, \"user1's original node should still exist\")\n\t\t\t\tassert.Equal(t, uint(1), node1.UserID().Get(), \"user1's node should still belong to user1\")\n\t\t\t\tassert.Equal(t, nodeKey1.Public(), node1.NodeKey(), \"user1's node should have original node key\")\n\n\t\t\t\t// User2 should have a NEW node created\n\t\t\t\tnode2, found2 := app.state.GetNodeByMachineKey(machineKey1.Public(), types.UserID(2))\n\t\t\t\trequire.True(t, found2, \"user2 should have new node created\")\n\t\t\t\tassert.Equal(t, uint(2), node2.UserID().Get(), \"user2's node should belong to user2\")\n\n\t\t\t\tuser := node2.User()\n\t\t\t\tassert.Equal(t, \"interactive-test-user\", user.Name(), \"user2's node should show correct username\")\n\n\t\t\t\t// Both nodes should have the same machine key but different IDs\n\t\t\t\tassert.NotEqual(t, node1.ID(), node2.ID(), \"should be different nodes (different IDs)\")\n\t\t\t\tassert.Equal(t, machineKey1.Public(), node2.MachineKey(), \"user2's node should have same machine key\")\n\t\t\t},\n\t\t},\n\t\t// TEST: Followup request after registration cache expiry\n\t\t// WHAT: Tests that expired followup requests get a new AuthURL instead of error\n\t\t// INPUT: Followup request for registration ID that has expired/been evicted from cache\n\t\t// EXPECTED: Returns new AuthURL (not error) so client can retry authentication\n\t\t// WHY: Validates new reqToNewRegisterResponse functionality - prevents client getting stuck\n\t\t{\n\t\t\tname: \"followup_request_after_cache_expiry\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper\n\t\t\t\t// Generate a registration ID that doesn't exist in cache\n\t\t\t\t// This simulates an expired/missing cache entry\n\t\t\t\tregID, err := types.NewAuthID()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\t\t\t\t// Don't add it to cache - it's already expired/missing\n\t\t\t\treturn regID.String(), nil\n\t\t\t},\n\t\t\trequest: func(regID string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tFollowup: \"http://localhost:8080/register/\" + regID,\n\t\t\t\t\tNodeKey:  nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"expired-cache-node\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\twantAuth:   false, // Should not be authorized yet - needs to use new AuthURL\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper\n\t\t\t\t// Should get a new AuthURL, not an error\n\t\t\t\tassert.NotEmpty(t, resp.AuthURL, \"should receive new AuthURL when registration expired\")\n\t\t\t\tassert.Contains(t, resp.AuthURL, \"/register/\", \"AuthURL should contain registration path\")\n\t\t\t\tassert.False(t, resp.MachineAuthorized, \"machine should not be authorized yet\")\n\n\t\t\t\t// Verify the response contains a valid registration URL\n\t\t\t\tauthURL, err := url.Parse(resp.AuthURL)\n\t\t\t\tassert.NoError(t, err, \"AuthURL should be a valid URL\") //nolint:testifylint // inside closure, uses assert pattern\n\t\t\t\tassert.True(t, strings.HasPrefix(authURL.Path, \"/register/\"), \"AuthURL path should start with /register/\")\n\n\t\t\t\t// Extract and validate the new registration ID exists in cache\n\t\t\t\tnewRegIDStr := strings.TrimPrefix(authURL.Path, \"/register/\")\n\t\t\t\tnewRegID, err := types.AuthIDFromString(newRegIDStr)\n\t\t\t\tassert.NoError(t, err, \"should be able to parse new registration ID\") //nolint:testifylint // inside closure\n\n\t\t\t\t// Verify new registration entry exists in cache\n\t\t\t\t_, found := app.state.GetAuthCacheEntry(newRegID)\n\t\t\t\tassert.True(t, found, \"new registration should exist in cache\")\n\t\t\t},\n\t\t},\n\t\t// TEST: Logout with expiry exactly at current time\n\t\t// WHAT: Tests logout when expiry is set to exact current time (boundary case)\n\t\t// INPUT: Existing node sends request with expiry=time.Now() (not past, not future)\n\t\t// EXPECTED: Node is logged out (treated as expired)\n\t\t// WHY: Edge case: current time should be treated as expired\n\t\t{\n\t\t\tname: \"logout_with_exactly_now_expiry\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper\n\t\t\t\tuser := app.state.CreateUserForTest(\"exact-now-user\")\n\n\t\t\t\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\t// Register the node first\n\t\t\t\tregReq := tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: pak.Key,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"exact-now-node\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\n\t\t\t\t_, err = app.handleRegisterWithAuthKey(regReq, machineKey1.Public())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\t// Wait for node to be available\n\t\t\t\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t_, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\t\tassert.True(c, found, \"node should be available in NodeStore\")\n\t\t\t\t}, 1*time.Second, 50*time.Millisecond, \"waiting for node to be available in NodeStore\")\n\n\t\t\t\treturn \"\", nil\n\t\t\t},\n\t\t\trequest: func(_ string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tAuth:    nil,\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tExpiry:  time.Now(), // Exactly now (edge case between past and future)\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey:  machineKey1.Public,\n\t\t\twantAuth:    true,\n\t\t\twantExpired: true,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper\n\t\t\t\tassert.True(t, resp.MachineAuthorized)\n\t\t\t\tassert.True(t, resp.NodeKeyExpired)\n\n\t\t\t\t// Node should be marked as expired but still exist\n\t\t\t\tnode, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\tassert.True(t, found)\n\t\t\t\tassert.True(t, node.IsExpired())\n\t\t\t},\n\t\t},\n\t\t// TEST: Interactive workflow timeout cleans up cache\n\t\t// WHAT: Tests that timed-out interactive registrations clean up cache entries\n\t\t// INPUT: Interactive registration that times out without completion\n\t\t// EXPECTED: Cache entry should be cleaned up (behavior depends on implementation)\n\t\t// WHY: Prevents cache bloat from abandoned registrations\n\t\t{\n\t\t\tname: \"interactive_workflow_timeout_cleanup\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper\n\t\t\t\treturn \"\", nil\n\t\t\t},\n\t\t\trequest: func(_ string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tNodeKey: nodeKey2.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"interactive-timeout-node\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey:              machineKey2.Public,\n\t\t\trequiresInteractiveFlow: true,\n\t\t\tinteractiveSteps: []interactiveStep{\n\t\t\t\t{stepType: stepTypeInitialRequest, expectAuthURL: true, expectCacheEntry: true},\n\t\t\t\t// NOTE: No auth_completion step - simulates timeout scenario\n\t\t\t},\n\t\t\tvalidateRegistrationCache: true, // should be cleaned up eventually\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper\n\t\t\t\t// Verify AuthURL was generated but registration not completed\n\t\t\t\tassert.Contains(t, resp.AuthURL, \"/register/\")\n\t\t\t\tassert.False(t, resp.MachineAuthorized)\n\t\t\t},\n\t\t},\n\n\t\t// === COMPREHENSIVE INTERACTIVE WORKFLOW EDGE CASES ===\n\t\t// TEST: Interactive workflow with existing node from different user creates new node\n\t\t// WHAT: Tests new node creation when re-authenticating interactively with different user\n\t\t// INPUT: Node already registered with user1, interactive auth with user2 (same machine key, different node key)\n\t\t// EXPECTED: New node is created for user2, user1's original node remains (no transfer)\n\t\t// WHY: Same physical machine can have separate node identities per user\n\t\t{\n\t\t\tname: \"interactive_workflow_with_existing_node_different_user_creates_new_node\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper\n\t\t\t\t// First create a node under user1\n\t\t\t\tuser1 := app.state.CreateUserForTest(\"existing-user-1\")\n\n\t\t\t\tpak1, err := app.state.CreatePreAuthKey(user1.TypedID(), true, false, nil, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\t// Register the node with user1 first\n\t\t\t\tinitialReq := tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: pak1.Key,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"existing-node-user1\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\n\t\t\t\t_, err = app.handleRegister(context.Background(), initialReq, machineKey1.Public())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\t// Wait for node to be available\n\t\t\t\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t_, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\t\tassert.True(c, found, \"node should be available in NodeStore\")\n\t\t\t\t}, 1*time.Second, 50*time.Millisecond, \"waiting for node to be available in NodeStore\")\n\n\t\t\t\treturn \"\", nil\n\t\t\t},\n\t\t\trequest: func(_ string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tAuth:    &tailcfg.RegisterResponseAuth{}, // Empty auth triggers interactive flow\n\t\t\t\t\tNodeKey: nodeKey2.Public(),               // Different node key for different user\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"existing-node-user2\", // Different hostname\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey:              machineKey1.Public,\n\t\t\trequiresInteractiveFlow: true,\n\t\t\tinteractiveSteps: []interactiveStep{\n\t\t\t\t{stepType: stepTypeInitialRequest, expectAuthURL: true, expectCacheEntry: true},\n\t\t\t\t{stepType: stepTypeAuthCompletion, callAuthPath: true, expectCacheEntry: false},\n\t\t\t},\n\t\t\tvalidateCompleteResponse: true,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper\n\t\t\t\t// User1's original node with nodeKey1 should STILL exist\n\t\t\t\tnode1, found1 := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\trequire.True(t, found1, \"user1's original node with nodeKey1 should still exist\")\n\t\t\t\tassert.Equal(t, uint(1), node1.UserID().Get(), \"user1's node should still belong to user1\")\n\t\t\t\tassert.Equal(t, uint64(1), node1.ID().Uint64(), \"user1's node should be ID=1\")\n\n\t\t\t\t// User2 should have a NEW node with nodeKey2\n\t\t\t\tnode2, found2 := app.state.GetNodeByNodeKey(nodeKey2.Public())\n\t\t\t\trequire.True(t, found2, \"user2 should have new node with nodeKey2\")\n\n\t\t\t\tassert.Equal(t, \"existing-node-user2\", node2.Hostname(), \"hostname should be from new registration\")\n\t\t\t\tuser := node2.User()\n\t\t\t\tassert.Equal(t, \"interactive-test-user\", user.Name(), \"user2's node should belong to user2\")\n\t\t\t\tassert.Equal(t, machineKey1.Public(), node2.MachineKey(), \"machine key should be the same\")\n\n\t\t\t\t// Verify it's a NEW node, not transferred\n\t\t\t\tassert.NotEqual(t, uint64(1), node2.ID().Uint64(), \"should be a NEW node (different ID)\")\n\t\t\t},\n\t\t},\n\t\t// TEST: Interactive workflow with malformed followup URL\n\t\t// WHAT: Tests that malformed followup URLs in interactive flow are rejected\n\t\t// INPUT: Interactive registration with invalid followup URL format\n\t\t// EXPECTED: Request fails with error (invalid URL)\n\t\t// WHY: Validates followup URLs to prevent errors\n\t\t{\n\t\t\tname: \"interactive_workflow_malformed_followup_url\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper\n\t\t\t\treturn \"\", nil\n\t\t\t},\n\t\t\trequest: func(_ string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"malformed-followup-node\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey:              machineKey1.Public,\n\t\t\trequiresInteractiveFlow: true,\n\t\t\tinteractiveSteps: []interactiveStep{\n\t\t\t\t{stepType: stepTypeInitialRequest, expectAuthURL: true, expectCacheEntry: true},\n\t\t\t},\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper\n\t\t\t\t// Test malformed followup URLs after getting initial AuthURL\n\t\t\t\tauthURL := resp.AuthURL\n\t\t\t\tassert.Contains(t, authURL, \"/register/\")\n\n\t\t\t\t// Test various malformed followup URLs - use completely invalid IDs to avoid blocking\n\t\t\t\tmalformedURLs := []string{\n\t\t\t\t\t\"invalid-url\",\n\t\t\t\t\t\"/register/\",\n\t\t\t\t\t\"/register/invalid-id-that-does-not-exist\",\n\t\t\t\t\t\"/register/00000000-0000-0000-0000-000000000000\",\n\t\t\t\t\t\"http://malicious-site.com/register/invalid-id\",\n\t\t\t\t}\n\n\t\t\t\tfor _, malformedURL := range malformedURLs {\n\t\t\t\t\tfollowupReq := tailcfg.RegisterRequest{\n\t\t\t\t\t\tNodeKey:  nodeKey1.Public(),\n\t\t\t\t\t\tFollowup: malformedURL,\n\t\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\t\tHostname: \"malformed-followup-node\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t\t}\n\n\t\t\t\t\t// These should all fail gracefully\n\t\t\t\t\t_, err := app.handleRegister(context.Background(), followupReq, machineKey1.Public())\n\t\t\t\t\tassert.Error(t, err, \"malformed followup URL should be rejected: %s\", malformedURL)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t// TEST: Concurrent interactive workflow registrations\n\t\t// WHAT: Tests multiple simultaneous interactive registrations\n\t\t// INPUT: Two nodes initiate interactive registration concurrently\n\t\t// EXPECTED: Both registrations succeed independently\n\t\t// WHY: System should handle concurrent interactive flows without conflicts\n\t\t{\n\t\t\tname: \"interactive_workflow_concurrent_registrations\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper\n\t\t\t\treturn \"\", nil\n\t\t\t},\n\t\t\trequest: func(_ string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"concurrent-registration-node\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper\n\t\t\t\t// This test validates concurrent interactive registration attempts\n\t\t\t\tassert.Contains(t, resp.AuthURL, \"/register/\")\n\n\t\t\t\t// Start multiple concurrent followup requests\n\t\t\t\tauthURL := resp.AuthURL\n\t\t\t\tnumConcurrent := 3\n\t\t\t\tresults := make(chan error, numConcurrent)\n\n\t\t\t\tfor i := range numConcurrent {\n\t\t\t\t\tgo func(index int) {\n\t\t\t\t\t\tfollowupReq := tailcfg.RegisterRequest{\n\t\t\t\t\t\t\tNodeKey:  nodeKey1.Public(),\n\t\t\t\t\t\t\tFollowup: authURL,\n\t\t\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\t\t\tHostname: fmt.Sprintf(\"concurrent-node-%d\", index),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t_, err := app.handleRegister(context.Background(), followupReq, machineKey1.Public())\n\t\t\t\t\t\tresults <- err\n\t\t\t\t\t}(i)\n\t\t\t\t}\n\n\t\t\t\t// Complete the authentication to signal the waiting goroutines\n\t\t\t\t// The goroutines will receive from the buffered channel when ready\n\t\t\t\tregistrationID, err := extractRegistrationIDFromAuthURL(authURL)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tuser := app.state.CreateUserForTest(\"concurrent-test-user\")\n\t\t\t\t_, _, err = app.state.HandleNodeFromAuthPath(\n\t\t\t\t\tregistrationID,\n\t\t\t\t\ttypes.UserID(user.ID),\n\t\t\t\t\tnil,\n\t\t\t\t\t\"concurrent-test-method\",\n\t\t\t\t)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t// Collect results - at least one should succeed\n\t\t\t\tsuccessCount := 0\n\n\t\t\t\tfor range numConcurrent {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase err := <-results:\n\t\t\t\t\t\tif err == nil {\n\t\t\t\t\t\t\tsuccessCount++\n\t\t\t\t\t\t}\n\t\t\t\t\tcase <-time.After(2 * time.Second):\n\t\t\t\t\t\t// Some may timeout, which is expected\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// At least one concurrent request should have succeeded\n\t\t\t\tassert.GreaterOrEqual(t, successCount, 1, \"at least one concurrent registration should succeed\")\n\t\t\t},\n\t\t},\n\t\t// TEST: Interactive workflow with node key rotation attempt\n\t\t// WHAT: Tests interactive registration with different node key (appears as rotation)\n\t\t// INPUT: Node registered with nodeKey1, then interactive registration with nodeKey2\n\t\t// EXPECTED: Creates new node for different user (not true rotation)\n\t\t// WHY: Interactive flow creates new nodes with new users; doesn't rotate existing nodes\n\t\t{\n\t\t\tname: \"interactive_workflow_node_key_rotation\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper\n\t\t\t\t// Register initial node\n\t\t\t\tuser := app.state.CreateUserForTest(\"rotation-user\")\n\n\t\t\t\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\tinitialReq := tailcfg.RegisterRequest{\n\t\t\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\t\t\tAuthKey: pak.Key,\n\t\t\t\t\t},\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"rotation-node-initial\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\n\t\t\t\t_, err = app.handleRegister(context.Background(), initialReq, machineKey1.Public())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\t// Wait for node to be available\n\t\t\t\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t_, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\t\tassert.True(c, found, \"node should be available in NodeStore\")\n\t\t\t\t}, 1*time.Second, 50*time.Millisecond, \"waiting for node to be available in NodeStore\")\n\n\t\t\t\treturn \"\", nil\n\t\t\t},\n\t\t\trequest: func(_ string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tNodeKey:    nodeKey2.Public(), // Different node key (rotation scenario)\n\t\t\t\t\tOldNodeKey: nodeKey1.Public(), // Previous node key\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"rotation-node-updated\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey:              machineKey1.Public,\n\t\t\trequiresInteractiveFlow: true,\n\t\t\tinteractiveSteps: []interactiveStep{\n\t\t\t\t{stepType: stepTypeInitialRequest, expectAuthURL: true, expectCacheEntry: true},\n\t\t\t\t{stepType: stepTypeAuthCompletion, callAuthPath: true, expectCacheEntry: false},\n\t\t\t},\n\t\t\tvalidateCompleteResponse: true,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper\n\t\t\t\t// User1's original node with nodeKey1 should STILL exist\n\t\t\t\toldNode, foundOld := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\trequire.True(t, foundOld, \"user1's original node with nodeKey1 should still exist\")\n\t\t\t\tassert.Equal(t, uint(1), oldNode.UserID().Get(), \"user1's node should still belong to user1\")\n\t\t\t\tassert.Equal(t, uint64(1), oldNode.ID().Uint64(), \"user1's node should be ID=1\")\n\n\t\t\t\t// User2 should have a NEW node with nodeKey2\n\t\t\t\tnewNode, found := app.state.GetNodeByNodeKey(nodeKey2.Public())\n\t\t\t\trequire.True(t, found, \"user2 should have new node with nodeKey2\")\n\t\t\t\tassert.Equal(t, \"rotation-node-updated\", newNode.Hostname())\n\t\t\t\tassert.Equal(t, machineKey1.Public(), newNode.MachineKey())\n\n\t\t\t\tuser := newNode.User()\n\t\t\t\tassert.Equal(t, \"interactive-test-user\", user.Name(), \"user2's node should belong to user2\")\n\n\t\t\t\t// Verify it's a NEW node, not transferred\n\t\t\t\tassert.NotEqual(t, uint64(1), newNode.ID().Uint64(), \"should be a NEW node (different ID)\")\n\t\t\t},\n\t\t},\n\t\t// TEST: Interactive workflow with nil hostinfo\n\t\t// WHAT: Tests interactive registration when request has nil hostinfo\n\t\t// INPUT: Interactive registration request with Hostinfo=nil\n\t\t// EXPECTED: Node registers successfully with generated default hostname\n\t\t// WHY: Defensive code handles nil hostinfo in interactive flow\n\t\t{\n\t\t\tname: \"interactive_workflow_with_nil_hostinfo\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper\n\t\t\t\treturn \"\", nil\n\t\t\t},\n\t\t\trequest: func(_ string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tNodeKey:  nodeKey1.Public(),\n\t\t\t\t\tHostinfo: nil, // Nil hostinfo should be handled gracefully\n\t\t\t\t\tExpiry:   time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey:              machineKey1.Public,\n\t\t\trequiresInteractiveFlow: true,\n\t\t\tinteractiveSteps: []interactiveStep{\n\t\t\t\t{stepType: stepTypeInitialRequest, expectAuthURL: true, expectCacheEntry: true},\n\t\t\t\t{stepType: stepTypeAuthCompletion, callAuthPath: true, expectCacheEntry: false},\n\t\t\t},\n\t\t\tvalidateCompleteResponse: true,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper\n\t\t\t\t// Should handle nil hostinfo gracefully\n\t\t\t\tnode, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\tassert.True(t, found, \"node should be registered despite nil hostinfo\")\n\n\t\t\t\tif found {\n\t\t\t\t\t// Should have some default hostname or handle nil gracefully\n\t\t\t\t\thostname := node.Hostname()\n\t\t\t\t\tassert.NotEmpty(t, hostname, \"should have some hostname even with nil hostinfo\")\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t// TEST: Registration cache cleanup on authentication error\n\t\t// WHAT: Tests that cache is cleaned up when authentication fails\n\t\t// INPUT: Interactive registration that fails during auth completion\n\t\t// EXPECTED: Cache entry removed after error\n\t\t// WHY: Failed registrations should clean up to prevent stale cache entries\n\t\t{\n\t\t\tname: \"interactive_workflow_registration_cache_cleanup_on_error\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper\n\t\t\t\treturn \"\", nil\n\t\t\t},\n\t\t\trequest: func(_ string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"cache-cleanup-test-node\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper\n\t\t\t\t// Get initial AuthURL and extract registration ID\n\t\t\t\tauthURL := resp.AuthURL\n\t\t\t\tassert.Contains(t, authURL, \"/register/\")\n\n\t\t\t\tregistrationID, err := extractRegistrationIDFromAuthURL(authURL)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t// Verify cache entry exists\n\t\t\t\tcacheEntry, found := app.state.GetAuthCacheEntry(registrationID)\n\t\t\t\tassert.True(t, found, \"registration cache entry should exist initially\")\n\t\t\t\tassert.NotNil(t, cacheEntry)\n\n\t\t\t\t// Try to complete authentication with invalid user ID (should cause error)\n\t\t\t\tinvalidUserID := types.UserID(99999) // Non-existent user\n\t\t\t\t_, _, err = app.state.HandleNodeFromAuthPath(\n\t\t\t\t\tregistrationID,\n\t\t\t\t\tinvalidUserID,\n\t\t\t\t\tnil,\n\t\t\t\t\t\"error-test-method\",\n\t\t\t\t)\n\t\t\t\tassert.Error(t, err, \"should fail with invalid user ID\") //nolint:testifylint // inside closure, uses assert pattern\n\n\t\t\t\t// Cache entry should still exist after auth error (for retry scenarios)\n\t\t\t\t_, stillFound := app.state.GetAuthCacheEntry(registrationID)\n\t\t\t\tassert.True(t, stillFound, \"registration cache entry should still exist after auth error for potential retry\")\n\t\t\t},\n\t\t},\n\t\t// TEST: Multiple interactive workflow steps for same node\n\t\t// WHAT: Tests that interactive workflow can handle multi-step process for same node\n\t\t// INPUT: Node goes through complete interactive flow with multiple steps\n\t\t// EXPECTED: Node successfully completes registration after all steps\n\t\t// WHY: Validates complete interactive flow works end-to-end\n\t\t// TEST: Interactive workflow with multiple registration attempts for same node\n\t\t// WHAT: Tests that multiple interactive registrations can be created for same node\n\t\t// INPUT: Start two interactive registrations, verify both cache entries exist\n\t\t// EXPECTED: Both registrations get different IDs and can coexist\n\t\t// WHY: Validates that multiple pending registrations don't interfere with each other\n\t\t{\n\t\t\tname: \"interactive_workflow_multiple_steps_same_node\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper\n\t\t\t\treturn \"\", nil\n\t\t\t},\n\t\t\trequest: func(_ string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"multi-step-node\",\n\t\t\t\t\t\tOS:       \"linux\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper\n\t\t\t\t// Test multiple interactive registration attempts for the same node can coexist\n\t\t\t\tauthURL1 := resp.AuthURL\n\t\t\t\tassert.Contains(t, authURL1, \"/register/\")\n\n\t\t\t\t// Start a second interactive registration for the same node\n\t\t\t\tsecondReq := tailcfg.RegisterRequest{\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"multi-step-node-updated\",\n\t\t\t\t\t\tOS:       \"linux-updated\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\n\t\t\t\tresp2, err := app.handleRegister(context.Background(), secondReq, machineKey1.Public())\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tauthURL2 := resp2.AuthURL\n\t\t\t\tassert.Contains(t, authURL2, \"/register/\")\n\n\t\t\t\t// Both should have different registration IDs\n\t\t\t\tregID1, err1 := extractRegistrationIDFromAuthURL(authURL1)\n\t\t\t\tregID2, err2 := extractRegistrationIDFromAuthURL(authURL2)\n\n\t\t\t\trequire.NoError(t, err1)\n\t\t\t\trequire.NoError(t, err2)\n\t\t\t\tassert.NotEqual(t, regID1, regID2, \"different registration attempts should have different IDs\")\n\n\t\t\t\t// Both cache entries should exist simultaneously\n\t\t\t\t_, found1 := app.state.GetAuthCacheEntry(regID1)\n\t\t\t\t_, found2 := app.state.GetAuthCacheEntry(regID2)\n\n\t\t\t\tassert.True(t, found1, \"first registration cache entry should exist\")\n\t\t\t\tassert.True(t, found2, \"second registration cache entry should exist\")\n\n\t\t\t\t// This validates that multiple pending registrations can coexist\n\t\t\t\t// without interfering with each other\n\t\t\t},\n\t\t},\n\t\t// TEST: Complete one of multiple pending registrations\n\t\t// WHAT: Tests completing the second of two pending registrations for same node\n\t\t// INPUT: Create two pending registrations, complete the second one\n\t\t// EXPECTED: Second registration completes successfully, node is created\n\t\t// WHY: Validates that you can complete any pending registration, not just the first\n\t\t{\n\t\t\tname: \"interactive_workflow_complete_second_of_multiple_pending\",\n\t\t\tsetupFunc: func(t *testing.T, app *Headscale) (string, error) { //nolint:thelper\n\t\t\t\treturn \"\", nil\n\t\t\t},\n\t\t\trequest: func(_ string) tailcfg.RegisterRequest {\n\t\t\t\treturn tailcfg.RegisterRequest{\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"pending-node-1\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\t\t\t},\n\t\t\tmachineKey: machineKey1.Public,\n\t\t\tvalidate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { //nolint:thelper\n\t\t\t\tauthURL1 := resp.AuthURL\n\t\t\t\tregID1, err := extractRegistrationIDFromAuthURL(authURL1)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t// Start a second interactive registration for the same node\n\t\t\t\tsecondReq := tailcfg.RegisterRequest{\n\t\t\t\t\tNodeKey: nodeKey1.Public(),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"pending-node-2\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\n\t\t\t\tresp2, err := app.handleRegister(context.Background(), secondReq, machineKey1.Public())\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tauthURL2 := resp2.AuthURL\n\t\t\t\tregID2, err := extractRegistrationIDFromAuthURL(authURL2)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t// Verify both exist\n\t\t\t\t_, found1 := app.state.GetAuthCacheEntry(regID1)\n\t\t\t\t_, found2 := app.state.GetAuthCacheEntry(regID2)\n\n\t\t\t\tassert.True(t, found1, \"first cache entry should exist\")\n\t\t\t\tassert.True(t, found2, \"second cache entry should exist\")\n\n\t\t\t\t// Complete the SECOND registration (not the first)\n\t\t\t\tuser := app.state.CreateUserForTest(\"second-registration-user\")\n\n\t\t\t\t// Start followup request in goroutine (it will wait for auth completion)\n\t\t\t\tresponseChan := make(chan *tailcfg.RegisterResponse, 1)\n\t\t\t\terrorChan := make(chan error, 1)\n\n\t\t\t\tfollowupReq := tailcfg.RegisterRequest{\n\t\t\t\t\tNodeKey:  nodeKey1.Public(),\n\t\t\t\t\tFollowup: authURL2,\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tHostname: \"pending-node-2\",\n\t\t\t\t\t},\n\t\t\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t\t\t}\n\n\t\t\t\tgo func() {\n\t\t\t\t\tresp, err := app.handleRegister(context.Background(), followupReq, machineKey1.Public())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\terrorChan <- err\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tresponseChan <- resp\n\t\t\t\t}()\n\n\t\t\t\t// Complete authentication for second registration\n\t\t\t\t// The goroutine will receive the node from the buffered channel\n\t\t\t\t_, _, err = app.state.HandleNodeFromAuthPath(\n\t\t\t\t\tregID2,\n\t\t\t\t\ttypes.UserID(user.ID),\n\t\t\t\t\tnil,\n\t\t\t\t\t\"second-registration-method\",\n\t\t\t\t)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t// Wait for followup to complete\n\t\t\t\tselect {\n\t\t\t\tcase err := <-errorChan:\n\t\t\t\t\tt.Fatalf(\"followup request failed: %v\", err)\n\t\t\t\tcase finalResp := <-responseChan:\n\t\t\t\t\trequire.NotNil(t, finalResp)\n\t\t\t\t\tassert.True(t, finalResp.MachineAuthorized, \"machine should be authorized\")\n\t\t\t\tcase <-time.After(2 * time.Second):\n\t\t\t\t\tt.Fatal(\"followup request timed out\")\n\t\t\t\t}\n\n\t\t\t\t// Verify the node was created with the second registration's data\n\t\t\t\tnode, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\t\t\t\tassert.True(t, found, \"node should be registered\")\n\n\t\t\t\tif found {\n\t\t\t\t\tassert.Equal(t, \"pending-node-2\", node.Hostname())\n\t\t\t\t\tassert.Equal(t, \"second-registration-user\", node.User().Name())\n\t\t\t\t}\n\n\t\t\t\t// First registration should still be in cache (not completed)\n\t\t\t\t_, stillFound := app.state.GetAuthCacheEntry(regID1)\n\t\t\t\tassert.True(t, stillFound, \"first registration should still be pending\")\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\t// Create test app\n\t\t\tapp := createTestApp(t)\n\n\t\t\t// Run setup function\n\t\t\tdynamicValue, err := tt.setupFunc(t, app)\n\t\t\trequire.NoError(t, err, \"setup should not fail\")\n\n\t\t\t// Check if this test requires interactive workflow\n\t\t\tif tt.requiresInteractiveFlow {\n\t\t\t\trunInteractiveWorkflowTest(t, tt, app, dynamicValue)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Build request\n\t\t\treq := tt.request(dynamicValue)\n\t\t\tmachineKey := tt.machineKey()\n\n\t\t\t// Set up context with timeout for followup tests\n\t\t\tctx := context.Background()\n\n\t\t\tif req.Followup != \"\" {\n\t\t\t\tvar cancel context.CancelFunc\n\n\t\t\t\tctx, cancel = context.WithTimeout(context.Background(), 5*time.Second)\n\t\t\t\tdefer cancel()\n\t\t\t}\n\n\t\t\t// Debug: check node availability before test execution\n\t\t\tif req.Auth == nil {\n\t\t\t\tif node, found := app.state.GetNodeByNodeKey(req.NodeKey); found {\n\t\t\t\t\tt.Logf(\"Node found before handleRegister: hostname=%s, expired=%t\", node.Hostname(), node.IsExpired())\n\t\t\t\t} else {\n\t\t\t\t\tt.Logf(\"Node NOT found before handleRegister for key %s\", req.NodeKey.ShortString())\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Execute the test\n\t\t\tresp, err := app.handleRegister(ctx, req, machineKey)\n\n\t\t\t// Validate error expectations\n\t\t\tif tt.wantError {\n\t\t\t\tassert.Error(t, err, \"expected error but got none\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err, \"unexpected error: %v\", err)\n\t\t\trequire.NotNil(t, resp, \"response should not be nil\")\n\n\t\t\t// Validate basic response properties\n\t\t\tif tt.wantAuth {\n\t\t\t\tassert.True(t, resp.MachineAuthorized, \"machine should be authorized\")\n\t\t\t} else {\n\t\t\t\tassert.False(t, resp.MachineAuthorized, \"machine should not be authorized\")\n\t\t\t}\n\n\t\t\tif tt.wantAuthURL {\n\t\t\t\tassert.NotEmpty(t, resp.AuthURL, \"should have AuthURL\")\n\t\t\t\tassert.Contains(t, resp.AuthURL, \"register/\", \"AuthURL should contain registration path\")\n\t\t\t}\n\n\t\t\tif tt.wantExpired {\n\t\t\t\tassert.True(t, resp.NodeKeyExpired, \"node key should be expired\")\n\t\t\t} else {\n\t\t\t\tassert.False(t, resp.NodeKeyExpired, \"node key should not be expired\")\n\t\t\t}\n\n\t\t\t// Run custom validation if provided\n\t\t\tif tt.validate != nil {\n\t\t\t\ttt.validate(t, resp, app)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// runInteractiveWorkflowTest executes a multi-step interactive authentication workflow.\nfunc runInteractiveWorkflowTest(t *testing.T, tt struct {\n\tname                      string\n\tsetupFunc                 func(*testing.T, *Headscale) (string, error)\n\trequest                   func(dynamicValue string) tailcfg.RegisterRequest\n\tmachineKey                func() key.MachinePublic\n\twantAuth                  bool\n\twantError                 bool\n\twantAuthURL               bool\n\twantExpired               bool\n\tvalidate                  func(*testing.T, *tailcfg.RegisterResponse, *Headscale)\n\trequiresInteractiveFlow   bool\n\tinteractiveSteps          []interactiveStep\n\tvalidateRegistrationCache bool\n\texpectedAuthURLPattern    string\n\tsimulateAuthCompletion    bool\n\tvalidateCompleteResponse  bool\n}, app *Headscale, dynamicValue string,\n) {\n\tt.Helper()\n\t// Build initial request\n\treq := tt.request(dynamicValue)\n\tmachineKey := tt.machineKey()\n\tctx := context.Background()\n\n\t// Execute interactive workflow steps\n\tvar (\n\t\tinitialResp    *tailcfg.RegisterResponse\n\t\tauthURL        string\n\t\tregistrationID types.AuthID\n\t\tfinalResp      *tailcfg.RegisterResponse\n\t\terr            error\n\t)\n\n\t// Execute the steps in the correct sequence for interactive workflow\n\tfor i, step := range tt.interactiveSteps {\n\t\tt.Logf(\"Executing interactive step %d: %s\", i+1, step.stepType)\n\n\t\tswitch step.stepType {\n\t\tcase stepTypeInitialRequest:\n\t\t\t// Step 1: Initial request should get AuthURL back\n\t\t\tinitialResp, err = app.handleRegister(ctx, req, machineKey)\n\t\t\trequire.NoError(t, err, \"initial request should not fail\")\n\t\t\trequire.NotNil(t, initialResp, \"initial response should not be nil\")\n\n\t\t\tif step.expectAuthURL {\n\t\t\t\trequire.NotEmpty(t, initialResp.AuthURL, \"should have AuthURL\")\n\t\t\t\trequire.Contains(t, initialResp.AuthURL, \"/register/\", \"AuthURL should contain registration path\")\n\t\t\t\tauthURL = initialResp.AuthURL\n\n\t\t\t\t// Extract registration ID from AuthURL\n\t\t\t\tregistrationID, err = extractRegistrationIDFromAuthURL(authURL)\n\t\t\t\trequire.NoError(t, err, \"should be able to extract registration ID from AuthURL\")\n\t\t\t}\n\n\t\t\tif step.expectCacheEntry {\n\t\t\t\t// Verify registration cache entry was created\n\t\t\t\tcacheEntry, found := app.state.GetAuthCacheEntry(registrationID)\n\t\t\t\trequire.True(t, found, \"registration cache entry should exist\")\n\t\t\t\trequire.NotNil(t, cacheEntry, \"cache entry should not be nil\")\n\t\t\t\trequire.Equal(t, req.NodeKey, cacheEntry.Node().NodeKey(), \"cache entry should have correct node key\")\n\t\t\t}\n\n\t\tcase stepTypeAuthCompletion:\n\t\t\t// Step 2: Start followup request that will wait, then complete authentication\n\t\t\tif step.callAuthPath {\n\t\t\t\trequire.NotEmpty(t, registrationID, \"registration ID should be available from previous step\")\n\n\t\t\t\t// Prepare followup request\n\t\t\t\tfollowupReq := tt.request(dynamicValue)\n\t\t\t\tfollowupReq.Followup = authURL\n\n\t\t\t\t// Start the followup request in a goroutine - it will wait for channel signal\n\t\t\t\tresponseChan := make(chan *tailcfg.RegisterResponse, 1)\n\t\t\t\terrorChan := make(chan error, 1)\n\n\t\t\t\tgo func() {\n\t\t\t\t\tresp, err := app.handleRegister(context.Background(), followupReq, machineKey)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\terrorChan <- err\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tresponseChan <- resp\n\t\t\t\t}()\n\n\t\t\t\t// Complete the authentication - the goroutine will receive from the buffered channel\n\t\t\t\tuser := app.state.CreateUserForTest(\"interactive-test-user\")\n\t\t\t\t_, _, err = app.state.HandleNodeFromAuthPath(\n\t\t\t\t\tregistrationID,\n\t\t\t\t\ttypes.UserID(user.ID),\n\t\t\t\t\tnil, // no custom expiry\n\t\t\t\t\t\"test-method\",\n\t\t\t\t)\n\t\t\t\trequire.NoError(t, err, \"HandleNodeFromAuthPath should succeed\")\n\n\t\t\t\t// Wait for the followup request to complete\n\t\t\t\tselect {\n\t\t\t\tcase err := <-errorChan:\n\t\t\t\t\trequire.NoError(t, err, \"followup request should not fail\")\n\t\t\t\tcase finalResp = <-responseChan:\n\t\t\t\t\trequire.NotNil(t, finalResp, \"final response should not be nil\")\n\t\t\t\t\t// Verify machine is now authorized\n\t\t\t\t\trequire.True(t, finalResp.MachineAuthorized, \"machine should be authorized after followup\")\n\t\t\t\tcase <-time.After(5 * time.Second):\n\t\t\t\t\tt.Fatal(\"followup request timed out waiting for authentication completion\")\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase stepTypeFollowupRequest:\n\t\t\t// This step is deprecated - followup is now handled within auth_completion step\n\t\t\tt.Logf(\"followup_request step is deprecated - use expectCacheEntry in auth_completion instead\")\n\n\t\tdefault:\n\t\t\tt.Fatalf(\"unknown interactive step type: %s\", step.stepType)\n\t\t}\n\n\t\t// Check cache cleanup expectation for this step\n\t\tif step.expectCacheEntry == false && registrationID != \"\" {\n\t\t\t// Verify cache entry was cleaned up\n\t\t\t_, found := app.state.GetAuthCacheEntry(registrationID)\n\t\t\trequire.False(t, found, \"registration cache entry should be cleaned up after step: %s\", step.stepType)\n\t\t}\n\t}\n\n\t// Validate final response if requested\n\tif tt.validateCompleteResponse && finalResp != nil {\n\t\tvalidateCompleteRegistrationResponse(t, finalResp, req)\n\t}\n\n\t// Run custom validation if provided\n\tif tt.validate != nil {\n\t\tresponseToValidate := finalResp\n\t\tif responseToValidate == nil {\n\t\t\tresponseToValidate = initialResp\n\t\t}\n\n\t\ttt.validate(t, responseToValidate, app)\n\t}\n}\n\n// extractRegistrationIDFromAuthURL extracts the registration ID from an AuthURL.\nfunc extractRegistrationIDFromAuthURL(authURL string) (types.AuthID, error) {\n\t// AuthURL format: \"http://localhost/register/abc123\"\n\tconst registerPrefix = \"/register/\"\n\n\tidx := strings.LastIndex(authURL, registerPrefix)\n\tif idx == -1 {\n\t\treturn \"\", fmt.Errorf(\"invalid AuthURL format: %s\", authURL) //nolint:err113\n\t}\n\n\tidStr := authURL[idx+len(registerPrefix):]\n\n\treturn types.AuthIDFromString(idStr)\n}\n\n// validateCompleteRegistrationResponse performs comprehensive validation of a registration response.\nfunc validateCompleteRegistrationResponse(t *testing.T, resp *tailcfg.RegisterResponse, _ tailcfg.RegisterRequest) {\n\tt.Helper()\n\t// Basic response validation\n\trequire.NotNil(t, resp, \"response should not be nil\")\n\trequire.True(t, resp.MachineAuthorized, \"machine should be authorized\")\n\trequire.False(t, resp.NodeKeyExpired, \"node key should not be expired\")\n\trequire.NotEmpty(t, resp.User.DisplayName, \"user should have display name\")\n\n\t// Additional validation can be added here as needed\n\t// Note: NodeKey field may not be present in all response types\n\n\t// Additional validation can be added here as needed\n}\n\n// Simple test to validate basic node creation and lookup.\nfunc TestNodeStoreLookup(t *testing.T) {\n\tapp := createTestApp(t)\n\n\tmachineKey := key.NewMachine()\n\tnodeKey := key.NewNode()\n\n\tuser := app.state.CreateUserForTest(\"test-user\")\n\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil)\n\trequire.NoError(t, err)\n\n\t// Register a node\n\tregReq := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak.Key,\n\t\t},\n\t\tNodeKey: nodeKey.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"test-node\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\tresp, err := app.handleRegisterWithAuthKey(regReq, machineKey.Public())\n\trequire.NoError(t, err)\n\trequire.NotNil(t, resp)\n\trequire.True(t, resp.MachineAuthorized)\n\n\tt.Logf(\"Registered node successfully: %+v\", resp)\n\n\t// Wait for node to be available in NodeStore\n\tvar node types.NodeView\n\n\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tvar found bool\n\n\t\tnode, found = app.state.GetNodeByNodeKey(nodeKey.Public())\n\t\tassert.True(c, found, \"Node should be found in NodeStore\")\n\t}, 1*time.Second, 100*time.Millisecond, \"waiting for node to be available in NodeStore\")\n\n\trequire.Equal(t, \"test-node\", node.Hostname())\n\n\tt.Logf(\"Found node: hostname=%s, id=%d\", node.Hostname(), node.ID().Uint64())\n}\n\n// TestPreAuthKeyLogoutAndReloginDifferentUser tests the scenario where:\n// 1. Multiple nodes register with different users using pre-auth keys\n// 2. All nodes logout\n// 3. All nodes re-login using a different user's pre-auth key\n// EXPECTED BEHAVIOR: Should create NEW nodes for the new user, leaving old nodes with the old user.\n// This matches the integration test expectation and web flow behavior.\nfunc TestPreAuthKeyLogoutAndReloginDifferentUser(t *testing.T) {\n\tapp := createTestApp(t)\n\n\t// Create two users\n\tuser1 := app.state.CreateUserForTest(\"user1\")\n\tuser2 := app.state.CreateUserForTest(\"user2\")\n\n\t// Create pre-auth keys for both users\n\tpak1, err := app.state.CreatePreAuthKey(user1.TypedID(), true, false, nil, nil)\n\trequire.NoError(t, err)\n\tpak2, err := app.state.CreatePreAuthKey(user2.TypedID(), true, false, nil, nil)\n\trequire.NoError(t, err)\n\n\t// Create machine and node keys for 4 nodes (2 per user)\n\ttype nodeInfo struct {\n\t\tmachineKey key.MachinePrivate\n\t\tnodeKey    key.NodePrivate\n\t\thostname   string\n\t\tnodeID     types.NodeID\n\t}\n\n\tnodes := []nodeInfo{\n\t\t{machineKey: key.NewMachine(), nodeKey: key.NewNode(), hostname: \"user1-node1\"},\n\t\t{machineKey: key.NewMachine(), nodeKey: key.NewNode(), hostname: \"user1-node2\"},\n\t\t{machineKey: key.NewMachine(), nodeKey: key.NewNode(), hostname: \"user2-node1\"},\n\t\t{machineKey: key.NewMachine(), nodeKey: key.NewNode(), hostname: \"user2-node2\"},\n\t}\n\n\t// Register nodes: first 2 to user1, last 2 to user2\n\tfor i, node := range nodes {\n\t\tauthKey := pak1.Key\n\t\tif i >= 2 {\n\t\t\tauthKey = pak2.Key\n\t\t}\n\n\t\tregReq := tailcfg.RegisterRequest{\n\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\tAuthKey: authKey,\n\t\t\t},\n\t\t\tNodeKey: node.nodeKey.Public(),\n\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: node.hostname,\n\t\t\t},\n\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t}\n\n\t\tresp, err := app.handleRegisterWithAuthKey(regReq, node.machineKey.Public())\n\t\trequire.NoError(t, err)\n\t\trequire.NotNil(t, resp)\n\t\trequire.True(t, resp.MachineAuthorized)\n\n\t\t// Get the node ID\n\t\tvar registeredNode types.NodeView\n\n\t\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\tvar found bool\n\n\t\t\tregisteredNode, found = app.state.GetNodeByNodeKey(node.nodeKey.Public())\n\t\t\tassert.True(c, found, \"Node should be found in NodeStore\")\n\t\t}, 1*time.Second, 100*time.Millisecond, \"waiting for node to be available\")\n\n\t\tnodes[i].nodeID = registeredNode.ID()\n\t\tt.Logf(\"Registered node %s with ID %d to user%d\", node.hostname, registeredNode.ID().Uint64(), i/2+1)\n\t}\n\n\t// Verify initial state: user1 has 2 nodes, user2 has 2 nodes\n\tuser1Nodes := app.state.ListNodesByUser(types.UserID(user1.ID))\n\tuser2Nodes := app.state.ListNodesByUser(types.UserID(user2.ID))\n\n\trequire.Equal(t, 2, user1Nodes.Len(), \"user1 should have 2 nodes initially\")\n\trequire.Equal(t, 2, user2Nodes.Len(), \"user2 should have 2 nodes initially\")\n\n\tt.Logf(\"Initial state verified: user1=%d nodes, user2=%d nodes\", user1Nodes.Len(), user2Nodes.Len())\n\n\t// Simulate logout for all nodes\n\tfor _, node := range nodes {\n\t\tlogoutReq := tailcfg.RegisterRequest{\n\t\t\tAuth:    nil, // nil Auth indicates logout\n\t\t\tNodeKey: node.nodeKey.Public(),\n\t\t}\n\n\t\tresp, err := app.handleRegister(context.Background(), logoutReq, node.machineKey.Public())\n\t\trequire.NoError(t, err)\n\t\tt.Logf(\"Logout response for %s: %+v\", node.hostname, resp)\n\t}\n\n\tt.Logf(\"All nodes logged out\")\n\n\t// Create a new pre-auth key for user1 (reusable for all nodes)\n\tnewPak1, err := app.state.CreatePreAuthKey(user1.TypedID(), true, false, nil, nil)\n\trequire.NoError(t, err)\n\n\t// Re-login all nodes using user1's new pre-auth key\n\tfor i, node := range nodes {\n\t\tregReq := tailcfg.RegisterRequest{\n\t\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\t\tAuthKey: newPak1.Key,\n\t\t\t},\n\t\t\tNodeKey: node.nodeKey.Public(),\n\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: node.hostname,\n\t\t\t},\n\t\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t\t}\n\n\t\tresp, err := app.handleRegisterWithAuthKey(regReq, node.machineKey.Public())\n\t\trequire.NoError(t, err)\n\t\trequire.NotNil(t, resp)\n\t\trequire.True(t, resp.MachineAuthorized)\n\n\t\tt.Logf(\"Re-registered node %s (originally user%d) with user1's pre-auth key\", node.hostname, i/2+1)\n\t}\n\n\t// Verify final state after re-login\n\t// EXPECTED: New nodes created for user1, old nodes remain with original users\n\tuser1NodesAfter := app.state.ListNodesByUser(types.UserID(user1.ID))\n\tuser2NodesAfter := app.state.ListNodesByUser(types.UserID(user2.ID))\n\n\tt.Logf(\"Final state: user1=%d nodes, user2=%d nodes\", user1NodesAfter.Len(), user2NodesAfter.Len())\n\n\t// CORRECT BEHAVIOR: When re-authenticating with a DIFFERENT user's pre-auth key,\n\t// new nodes should be created (not transferred). This matches:\n\t// 1. The integration test expectation\n\t// 2. The web flow behavior (creates new nodes)\n\t// 3. The principle that each user owns distinct node entries\n\trequire.Equal(t, 4, user1NodesAfter.Len(), \"user1 should have 4 nodes total (2 original + 2 new from user2's machines)\")\n\trequire.Equal(t, 2, user2NodesAfter.Len(), \"user2 should still have 2 nodes (old nodes from original registration)\")\n\n\t// Verify original nodes still exist with original users\n\tfor i := range 2 {\n\t\tnode := nodes[i]\n\t\t// User1's original nodes should still be owned by user1\n\t\tregisteredNode, found := app.state.GetNodeByMachineKey(node.machineKey.Public(), types.UserID(user1.ID))\n\t\trequire.True(t, found, \"User1's original node %s should still exist\", node.hostname)\n\t\trequire.Equal(t, user1.ID, registeredNode.UserID().Get(), \"Node %s should still belong to user1\", node.hostname)\n\t\tt.Logf(\"✓ User1's original node %s (ID=%d) still owned by user1\", node.hostname, registeredNode.ID().Uint64())\n\t}\n\n\tfor i := 2; i < 4; i++ {\n\t\tnode := nodes[i]\n\t\t// User2's original nodes should still be owned by user2\n\t\tregisteredNode, found := app.state.GetNodeByMachineKey(node.machineKey.Public(), types.UserID(user2.ID))\n\t\trequire.True(t, found, \"User2's original node %s should still exist\", node.hostname)\n\t\trequire.Equal(t, user2.ID, registeredNode.UserID().Get(), \"Node %s should still belong to user2\", node.hostname)\n\t\tt.Logf(\"✓ User2's original node %s (ID=%d) still owned by user2\", node.hostname, registeredNode.ID().Uint64())\n\t}\n\n\t// Verify new nodes were created for user1 with the same machine keys\n\tt.Logf(\"Verifying new nodes created for user1 from user2's machine keys...\")\n\n\tfor i := 2; i < 4; i++ {\n\t\tnode := nodes[i]\n\t\t// Should be able to find a node with user1 and this machine key (the new one)\n\t\tnewNode, found := app.state.GetNodeByMachineKey(node.machineKey.Public(), types.UserID(user1.ID))\n\t\trequire.True(t, found, \"Should have created new node for user1 with machine key from %s\", node.hostname)\n\t\trequire.Equal(t, user1.ID, newNode.UserID().Get(), \"New node should belong to user1\")\n\t\tt.Logf(\"✓ New node created for user1 with machine key from %s (ID=%d)\", node.hostname, newNode.ID().Uint64())\n\t}\n}\n\n// TestWebFlowReauthDifferentUser validates CLI registration behavior when switching users.\n// This test replicates the TestAuthWebFlowLogoutAndReloginNewUser integration test scenario.\n//\n// IMPORTANT: CLI registration creates NEW nodes (different from interactive flow which transfers).\n//\n// Scenario:\n// 1. Node registers with user1 via pre-auth key\n// 2. Node logs out (expires)\n// 3. Admin runs: headscale auth register --auth-id <id> --user user2\n//\n// Expected behavior:\n// - User1's original node should STILL EXIST (expired)\n// - User2 should get a NEW node created (NOT transfer)\n// - Both nodes share the same machine key (same physical device).\nfunc TestWebFlowReauthDifferentUser(t *testing.T) {\n\tmachineKey := key.NewMachine()\n\tnodeKey1 := key.NewNode()\n\tnodeKey2 := key.NewNode() // Node key rotates on re-auth\n\n\tapp := createTestApp(t)\n\n\t// Step 1: Register node for user1 via pre-auth key (simulating initial web flow registration)\n\tuser1 := app.state.CreateUserForTest(\"user1\")\n\tpak1, err := app.state.CreatePreAuthKey(user1.TypedID(), true, false, nil, nil)\n\trequire.NoError(t, err)\n\n\tregReq1 := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak1.Key,\n\t\t},\n\t\tNodeKey: nodeKey1.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"test-machine\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\tresp1, err := app.handleRegisterWithAuthKey(regReq1, machineKey.Public())\n\trequire.NoError(t, err)\n\trequire.True(t, resp1.MachineAuthorized, \"Should be authorized via pre-auth key\")\n\n\t// Verify node exists for user1\n\tuser1Node, found := app.state.GetNodeByMachineKey(machineKey.Public(), types.UserID(user1.ID))\n\trequire.True(t, found, \"Node should exist for user1\")\n\trequire.Equal(t, user1.ID, user1Node.UserID().Get(), \"Node should belong to user1\")\n\tuser1NodeID := user1Node.ID()\n\tt.Logf(\"✓ User1 node created with ID: %d\", user1NodeID)\n\n\t// Step 2: Simulate logout by expiring the node\n\tpastTime := time.Now().Add(-1 * time.Hour)\n\tlogoutReq := tailcfg.RegisterRequest{\n\t\tNodeKey: nodeKey1.Public(),\n\t\tExpiry:  pastTime, // Expired = logout\n\t}\n\t_, err = app.handleRegister(context.Background(), logoutReq, machineKey.Public())\n\trequire.NoError(t, err)\n\n\t// Verify node is expired\n\tuser1Node, found = app.state.GetNodeByMachineKey(machineKey.Public(), types.UserID(user1.ID))\n\trequire.True(t, found, \"Node should still exist after logout\")\n\trequire.True(t, user1Node.IsExpired(), \"Node should be expired after logout\")\n\tt.Logf(\"✓ User1 node expired (logged out)\")\n\n\t// Step 3: Start interactive re-authentication (simulates \"tailscale up\")\n\tuser2 := app.state.CreateUserForTest(\"user2\")\n\n\treAuthReq := tailcfg.RegisterRequest{\n\t\t// No Auth field - triggers interactive flow\n\t\tNodeKey: nodeKey2.Public(), // New node key (rotated on re-auth)\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"test-machine\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\t// Initial request should return AuthURL\n\tinitialResp, err := app.handleRegister(context.Background(), reAuthReq, machineKey.Public())\n\trequire.NoError(t, err)\n\trequire.NotEmpty(t, initialResp.AuthURL, \"Should receive AuthURL for interactive flow\")\n\tt.Logf(\"✓ Interactive flow started, AuthURL: %s\", initialResp.AuthURL)\n\n\t// Extract registration ID from AuthURL\n\tregID, err := extractRegistrationIDFromAuthURL(initialResp.AuthURL)\n\trequire.NoError(t, err, \"Should extract registration ID from AuthURL\")\n\trequire.NotEmpty(t, regID, \"Should have valid registration ID\")\n\n\t// Step 4: Admin completes authentication via CLI\n\t// This simulates: headscale auth register --auth-id <id> --user user2\n\tnode, _, err := app.state.HandleNodeFromAuthPath(\n\t\tregID,\n\t\ttypes.UserID(user2.ID), // Register to user2, not user1!\n\t\tnil,                    // No custom expiry\n\t\t\"cli\",                  // Registration method (CLI register command)\n\t)\n\trequire.NoError(t, err, \"HandleNodeFromAuthPath should succeed\")\n\tt.Logf(\"✓ Admin registered node to user2 via CLI (node ID: %d)\", node.ID())\n\n\tt.Run(\"user1_original_node_still_exists\", func(t *testing.T) {\n\t\t// User1's original node should STILL exist (not transferred to user2)\n\t\tuser1NodeAfter, found1 := app.state.GetNodeByMachineKey(machineKey.Public(), types.UserID(user1.ID))\n\t\tassert.True(t, found1, \"User1's original node should still exist (not transferred)\")\n\n\t\tif !found1 {\n\t\t\tt.Fatal(\"User1's node was transferred or deleted - this breaks the integration test!\")\n\t\t}\n\n\t\tassert.Equal(t, user1.ID, user1NodeAfter.UserID().Get(), \"User1's node should still belong to user1\")\n\t\tassert.Equal(t, user1NodeID, user1NodeAfter.ID(), \"Should be the same node (same ID)\")\n\t\tassert.True(t, user1NodeAfter.IsExpired(), \"User1's node should still be expired\")\n\t\tt.Logf(\"✓ User1's original node still exists (ID: %d, expired: %v)\", user1NodeAfter.ID(), user1NodeAfter.IsExpired())\n\t})\n\n\tt.Run(\"user2_has_new_node_created\", func(t *testing.T) {\n\t\t// User2 should have a NEW node created (not transfer from user1)\n\t\tuser2Node, found2 := app.state.GetNodeByMachineKey(machineKey.Public(), types.UserID(user2.ID))\n\t\tassert.True(t, found2, \"User2 should have a new node created\")\n\n\t\tif !found2 {\n\t\t\tt.Fatal(\"User2 doesn't have a node - registration failed!\")\n\t\t}\n\n\t\tassert.Equal(t, user2.ID, user2Node.UserID().Get(), \"User2's node should belong to user2\")\n\t\tassert.NotEqual(t, user1NodeID, user2Node.ID(), \"Should be a NEW node (different ID), not transfer!\")\n\t\tassert.Equal(t, machineKey.Public(), user2Node.MachineKey(), \"Should have same machine key\")\n\t\tassert.Equal(t, nodeKey2.Public(), user2Node.NodeKey(), \"Should have new node key\")\n\t\tassert.False(t, user2Node.IsExpired(), \"User2's node should NOT be expired (active)\")\n\t\tt.Logf(\"✓ User2's new node created (ID: %d, active)\", user2Node.ID())\n\t})\n\n\tt.Run(\"returned_node_is_user2_new_node\", func(t *testing.T) {\n\t\t// The node returned from HandleNodeFromAuthPath should be user2's NEW node\n\t\tassert.Equal(t, user2.ID, node.UserID().Get(), \"Returned node should belong to user2\")\n\t\tassert.NotEqual(t, user1NodeID, node.ID(), \"Returned node should be NEW, not transferred from user1\")\n\t\tt.Logf(\"✓ HandleNodeFromAuthPath returned user2's new node (ID: %d)\", node.ID())\n\t})\n\n\tt.Run(\"both_nodes_share_machine_key\", func(t *testing.T) {\n\t\t// Both nodes should have the same machine key (same physical device)\n\t\tuser1NodeFinal, found1 := app.state.GetNodeByMachineKey(machineKey.Public(), types.UserID(user1.ID))\n\t\tuser2NodeFinal, found2 := app.state.GetNodeByMachineKey(machineKey.Public(), types.UserID(user2.ID))\n\n\t\trequire.True(t, found1, \"User1 node should exist\")\n\t\trequire.True(t, found2, \"User2 node should exist\")\n\n\t\tassert.Equal(t, machineKey.Public(), user1NodeFinal.MachineKey(), \"User1 node should have correct machine key\")\n\t\tassert.Equal(t, machineKey.Public(), user2NodeFinal.MachineKey(), \"User2 node should have same machine key\")\n\t\tt.Logf(\"✓ Both nodes share machine key: %s\", machineKey.Public().ShortString())\n\t})\n\n\tt.Run(\"total_node_count\", func(t *testing.T) {\n\t\t// We should have exactly 2 nodes total: one for user1 (expired), one for user2 (active)\n\t\tallNodesSlice := app.state.ListNodes()\n\t\tassert.Equal(t, 2, allNodesSlice.Len(), \"Should have exactly 2 nodes total\")\n\n\t\t// Count nodes per user\n\t\tuser1Nodes := 0\n\t\tuser2Nodes := 0\n\n\t\tfor i := range allNodesSlice.Len() {\n\t\t\tn := allNodesSlice.At(i)\n\t\t\tif n.UserID().Get() == user1.ID {\n\t\t\t\tuser1Nodes++\n\t\t\t}\n\n\t\t\tif n.UserID().Get() == user2.ID {\n\t\t\t\tuser2Nodes++\n\t\t\t}\n\t\t}\n\n\t\tassert.Equal(t, 1, user1Nodes, \"User1 should have 1 node\")\n\t\tassert.Equal(t, 1, user2Nodes, \"User2 should have 1 node\")\n\t\tt.Logf(\"✓ Total: 2 nodes (user1: 1 expired, user2: 1 active)\")\n\t})\n}\n\n// Helper function to create test app.\nfunc createTestApp(t *testing.T) *Headscale {\n\tt.Helper()\n\n\ttmpDir := t.TempDir()\n\n\tcfg := types.Config{\n\t\tServerURL:           \"http://localhost:8080\",\n\t\tNoisePrivateKeyPath: tmpDir + \"/noise_private.key\",\n\t\tDatabase: types.DatabaseConfig{\n\t\t\tType: \"sqlite3\",\n\t\t\tSqlite: types.SqliteConfig{\n\t\t\t\tPath: tmpDir + \"/headscale_test.db\",\n\t\t\t},\n\t\t},\n\t\tOIDC: types.OIDCConfig{},\n\t\tPolicy: types.PolicyConfig{\n\t\t\tMode: types.PolicyModeDB,\n\t\t},\n\t\tTuning: types.Tuning{\n\t\t\tBatchChangeDelay: 100 * time.Millisecond,\n\t\t\tBatcherWorkers:   1,\n\t\t},\n\t}\n\n\tapp, err := NewHeadscale(&cfg)\n\trequire.NoError(t, err)\n\n\t// Initialize and start the mapBatcher to handle Change() calls\n\tapp.mapBatcher = mapper.NewBatcherAndMapper(&cfg, app.state)\n\tapp.mapBatcher.Start()\n\n\t// Clean up the batcher when the test finishes\n\tt.Cleanup(func() {\n\t\tif app.mapBatcher != nil {\n\t\t\tapp.mapBatcher.Close()\n\t\t}\n\t})\n\n\treturn app\n}\n\n// TestGitHubIssue2830_NodeRestartWithUsedPreAuthKey tests the scenario reported in\n// https://github.com/juanfont/headscale/issues/2830\n//\n// Scenario:\n// 1. Node registers successfully with a single-use pre-auth key\n// 2. Node is running fine\n// 3. Node restarts (e.g., after headscale upgrade or tailscale container restart)\n// 4. Node sends RegisterRequest with the same pre-auth key\n// 5. BUG: Headscale rejects the request with \"authkey expired\" or \"authkey already used\"\n//\n// Expected behavior:\n// When an existing node (identified by matching NodeKey + MachineKey) re-registers\n// with a pre-auth key that it previously used, the registration should succeed.\n// The node is not creating a new registration - it's re-authenticating the same device.\nfunc TestGitHubIssue2830_NodeRestartWithUsedPreAuthKey(t *testing.T) {\n\tt.Parallel()\n\n\tapp := createTestApp(t)\n\n\t// Create user and single-use pre-auth key\n\tuser := app.state.CreateUserForTest(\"test-user\")\n\tpakNew, err := app.state.CreatePreAuthKey(user.TypedID(), false, false, nil, nil) // reusable=false\n\trequire.NoError(t, err)\n\n\t// Fetch the full pre-auth key to check Reusable field\n\tpak, err := app.state.GetPreAuthKey(pakNew.Key)\n\trequire.NoError(t, err)\n\trequire.False(t, pak.Reusable, \"key should be single-use for this test\")\n\n\tmachineKey := key.NewMachine()\n\tnodeKey := key.NewNode()\n\n\t// STEP 1: Initial registration with pre-auth key (simulates fresh node joining)\n\tinitialReq := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pakNew.Key,\n\t\t},\n\t\tNodeKey: nodeKey.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"test-node\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\tt.Log(\"Step 1: Initial registration with pre-auth key\")\n\n\tinitialResp, err := app.handleRegister(context.Background(), initialReq, machineKey.Public())\n\trequire.NoError(t, err, \"initial registration should succeed\")\n\trequire.NotNil(t, initialResp)\n\tassert.True(t, initialResp.MachineAuthorized, \"node should be authorized\")\n\tassert.False(t, initialResp.NodeKeyExpired, \"node key should not be expired\")\n\n\t// Verify node was created in database\n\tnode, found := app.state.GetNodeByNodeKey(nodeKey.Public())\n\trequire.True(t, found, \"node should exist after initial registration\")\n\tassert.Equal(t, \"test-node\", node.Hostname())\n\tassert.Equal(t, nodeKey.Public(), node.NodeKey())\n\tassert.Equal(t, machineKey.Public(), node.MachineKey())\n\n\t// Verify pre-auth key is now marked as used\n\tusedPak, err := app.state.GetPreAuthKey(pakNew.Key)\n\trequire.NoError(t, err)\n\tassert.True(t, usedPak.Used, \"pre-auth key should be marked as used after initial registration\")\n\n\t// STEP 2: Simulate node restart - node sends RegisterRequest again with same pre-auth key\n\t// This happens when:\n\t// - Tailscale container restarts\n\t// - Tailscaled service restarts\n\t// - System reboots\n\t// The Tailscale client persists the pre-auth key in its state and sends it on every registration\n\tt.Log(\"Step 2: Node restart - re-registration with same (now used) pre-auth key\")\n\n\trestartReq := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pakNew.Key, // Same key, now marked as Used=true\n\t\t},\n\t\tNodeKey: nodeKey.Public(), // Same node key\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"test-node\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\t// BUG: This fails with \"authkey already used\" or \"authkey expired\"\n\t// EXPECTED: Should succeed because it's the same node re-registering\n\trestartResp, err := app.handleRegister(context.Background(), restartReq, machineKey.Public())\n\n\t// This is the assertion that currently FAILS in v0.27.0\n\tassert.NoError(t, err, \"BUG: existing node re-registration with its own used pre-auth key should succeed\") //nolint:testifylint // intentionally uses assert to show bug\n\n\tif err != nil {\n\t\tt.Logf(\"Error received (this is the bug): %v\", err)\n\t\tt.Logf(\"Expected behavior: Node should be able to re-register with the same pre-auth key it used initially\")\n\n\t\treturn // Stop here to show the bug clearly\n\t}\n\n\trequire.NotNil(t, restartResp)\n\tassert.True(t, restartResp.MachineAuthorized, \"node should remain authorized after restart\")\n\tassert.False(t, restartResp.NodeKeyExpired, \"node key should not be expired after restart\")\n\n\t// Verify it's the same node (not a duplicate)\n\tnodeAfterRestart, found := app.state.GetNodeByNodeKey(nodeKey.Public())\n\trequire.True(t, found, \"node should still exist after restart\")\n\tassert.Equal(t, node.ID(), nodeAfterRestart.ID(), \"should be the same node, not a new one\")\n\tassert.Equal(t, \"test-node\", nodeAfterRestart.Hostname())\n}\n\n// TestNodeReregistrationWithReusablePreAuthKey tests that reusable keys work correctly\n// for node re-registration.\nfunc TestNodeReregistrationWithReusablePreAuthKey(t *testing.T) {\n\tt.Parallel()\n\n\tapp := createTestApp(t)\n\n\tuser := app.state.CreateUserForTest(\"test-user\")\n\tpakNew, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil) // reusable=true\n\trequire.NoError(t, err)\n\n\t// Fetch the full pre-auth key to check Reusable field\n\tpak, err := app.state.GetPreAuthKey(pakNew.Key)\n\trequire.NoError(t, err)\n\trequire.True(t, pak.Reusable)\n\n\tmachineKey := key.NewMachine()\n\tnodeKey := key.NewNode()\n\n\t// Initial registration\n\tinitialReq := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pakNew.Key,\n\t\t},\n\t\tNodeKey: nodeKey.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"reusable-test-node\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\tinitialResp, err := app.handleRegister(context.Background(), initialReq, machineKey.Public())\n\trequire.NoError(t, err)\n\trequire.NotNil(t, initialResp)\n\tassert.True(t, initialResp.MachineAuthorized)\n\n\t// Node restart - re-registration with reusable key\n\trestartReq := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pakNew.Key, // Reusable key\n\t\t},\n\t\tNodeKey: nodeKey.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"reusable-test-node\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\trestartResp, err := app.handleRegister(context.Background(), restartReq, machineKey.Public())\n\trequire.NoError(t, err, \"reusable key should allow re-registration\")\n\trequire.NotNil(t, restartResp)\n\tassert.True(t, restartResp.MachineAuthorized)\n\tassert.False(t, restartResp.NodeKeyExpired)\n}\n\n// TestNodeReregistrationWithExpiredPreAuthKey tests that truly expired keys\n// are still rejected even for existing nodes.\nfunc TestNodeReregistrationWithExpiredPreAuthKey(t *testing.T) {\n\tt.Parallel()\n\n\tapp := createTestApp(t)\n\n\tuser := app.state.CreateUserForTest(\"test-user\")\n\texpiry := time.Now().Add(-1 * time.Hour) // Already expired\n\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, &expiry, nil)\n\trequire.NoError(t, err)\n\n\tmachineKey := key.NewMachine()\n\tnodeKey := key.NewNode()\n\n\t// Try to register with expired key\n\treq := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak.Key,\n\t\t},\n\t\tNodeKey: nodeKey.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"expired-key-node\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\t_, err = app.handleRegister(context.Background(), req, machineKey.Public())\n\trequire.Error(t, err, \"expired pre-auth key should be rejected\")\n\tassert.Contains(t, err.Error(), \"authkey expired\", \"error should mention key expiration\")\n}\n\n// TestIssue2830_ExistingNodeReregistersWithExpiredKey tests the fix for issue #2830.\n// When a node is already registered and the pre-auth key expires, the node should\n// still be able to re-register (e.g., after a container restart) using the same\n// expired key. The key was only needed for initial authentication.\nfunc TestIssue2830_ExistingNodeReregistersWithExpiredKey(t *testing.T) {\n\tt.Parallel()\n\n\tapp := createTestApp(t)\n\n\tuser := app.state.CreateUserForTest(\"test-user\")\n\n\t// Create a valid key (will expire it later)\n\texpiry := time.Now().Add(1 * time.Hour)\n\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), false, false, &expiry, nil)\n\trequire.NoError(t, err)\n\n\tmachineKey := key.NewMachine()\n\tnodeKey := key.NewNode()\n\n\t// Register the node initially (key is still valid)\n\treq := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak.Key,\n\t\t},\n\t\tNodeKey: nodeKey.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"issue2830-node\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\tresp, err := app.handleRegister(context.Background(), req, machineKey.Public())\n\trequire.NoError(t, err, \"initial registration should succeed\")\n\trequire.NotNil(t, resp)\n\trequire.True(t, resp.MachineAuthorized, \"node should be authorized after initial registration\")\n\n\t// Verify node was created\n\tallNodes := app.state.ListNodes()\n\trequire.Equal(t, 1, allNodes.Len())\n\tinitialNodeID := allNodes.At(0).ID()\n\n\t// Now expire the key by updating it in the database to have an expiry in the past.\n\t// This simulates the real-world scenario where a key expires after initial registration.\n\tpastExpiry := time.Now().Add(-1 * time.Hour)\n\terr = app.state.DB().DB.Model(&types.PreAuthKey{}).\n\t\tWhere(\"id = ?\", pak.ID).\n\t\tUpdate(\"expiration\", pastExpiry).Error\n\trequire.NoError(t, err, \"should be able to update key expiration\")\n\n\t// Reload the key to verify it's now expired\n\texpiredPak, err := app.state.GetPreAuthKey(pak.Key)\n\trequire.NoError(t, err)\n\trequire.NotNil(t, expiredPak.Expiration)\n\trequire.True(t, expiredPak.Expiration.Before(time.Now()), \"key should be expired\")\n\n\t// Verify the expired key would fail validation\n\terr = expiredPak.Validate()\n\trequire.Error(t, err, \"key should fail validation when expired\")\n\trequire.Contains(t, err.Error(), \"authkey expired\")\n\n\t// Attempt to re-register with the SAME key (now expired).\n\t// This should SUCCEED because:\n\t// - The node already exists with the same MachineKey and User\n\t// - The fix allows existing nodes to re-register even with expired keys\n\t// - The key was only needed for initial authentication\n\treq2 := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak.Key, // Same key as initial registration (now expired)\n\t\t},\n\t\tNodeKey: nodeKey.Public(), // Same NodeKey as initial registration\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"issue2830-node\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\tresp2, err := app.handleRegister(context.Background(), req2, machineKey.Public())\n\trequire.NoError(t, err, \"re-registration should succeed even with expired key for existing node\")\n\tassert.NotNil(t, resp2)\n\tassert.True(t, resp2.MachineAuthorized, \"node should remain authorized after re-registration\")\n\n\t// Verify we still have only one node (re-registered, not created new)\n\tallNodes = app.state.ListNodes()\n\trequire.Equal(t, 1, allNodes.Len(), \"should have exactly one node (re-registered)\")\n\tassert.Equal(t, initialNodeID, allNodes.At(0).ID(), \"node ID should not change on re-registration\")\n}\n\n// TestGitHubIssue2830_ExistingNodeCanReregisterWithUsedPreAuthKey tests that an existing node\n// can re-register using a pre-auth key that's already marked as Used=true, as long as:\n// 1. The node is re-registering with the same MachineKey it originally used\n// 2. The node is using the same pre-auth key it was originally registered with (AuthKeyID matches)\n//\n// This is the fix for GitHub issue #2830: https://github.com/juanfont/headscale/issues/2830\n//\n// Background: When Docker/Kubernetes containers restart, they keep their persistent state\n// (including the MachineKey), but container entrypoints unconditionally run:\n//\n//\ttailscale up --authkey=$TS_AUTHKEY\n//\n// This caused nodes to be rejected after restart because the pre-auth key was already\n// marked as Used=true from the initial registration. The fix allows re-registration of\n// existing nodes with their own used keys.\nfunc TestGitHubIssue2830_ExistingNodeCanReregisterWithUsedPreAuthKey(t *testing.T) {\n\tapp := createTestApp(t)\n\n\t// Create a user\n\tuser := app.state.CreateUserForTest(\"testuser\")\n\n\t// Create a SINGLE-USE pre-auth key (reusable=false)\n\t// This is the type of key that triggers the bug in issue #2830\n\tpreAuthKeyNew, err := app.state.CreatePreAuthKey(user.TypedID(), false, false, nil, nil)\n\trequire.NoError(t, err)\n\n\t// Fetch the full pre-auth key to check Reusable and Used fields\n\tpreAuthKey, err := app.state.GetPreAuthKey(preAuthKeyNew.Key)\n\trequire.NoError(t, err)\n\trequire.False(t, preAuthKey.Reusable, \"Pre-auth key must be single-use to test issue #2830\")\n\trequire.False(t, preAuthKey.Used, \"Pre-auth key should not be used yet\")\n\n\t// Generate node keys for the client\n\tmachineKey := key.NewMachine()\n\tnodeKey := key.NewNode()\n\n\t// Step 1: Initial registration with the pre-auth key\n\t// This simulates the first time the container starts and runs 'tailscale up --authkey=...'\n\tinitialReq := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: preAuthKeyNew.Key, // Use the full key from creation\n\t\t},\n\t\tNodeKey: nodeKey.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"issue-2830-test-node\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\tinitialResp, err := app.handleRegisterWithAuthKey(initialReq, machineKey.Public())\n\trequire.NoError(t, err, \"Initial registration should succeed\")\n\trequire.True(t, initialResp.MachineAuthorized, \"Node should be authorized after initial registration\")\n\trequire.NotNil(t, initialResp.User, \"User should be set in response\")\n\trequire.Equal(t, \"testuser\", initialResp.User.DisplayName, \"User should match the pre-auth key's user\")\n\n\t// Verify the pre-auth key is now marked as Used\n\tupdatedKey, err := app.state.GetPreAuthKey(preAuthKeyNew.Key)\n\trequire.NoError(t, err)\n\trequire.True(t, updatedKey.Used, \"Pre-auth key should be marked as Used after initial registration\")\n\n\t// Step 2: Container restart scenario\n\t// The container keeps its MachineKey (persistent state), but the entrypoint script\n\t// unconditionally runs 'tailscale up --authkey=$TS_AUTHKEY' again\n\t//\n\t// WITHOUT THE FIX: This would fail with \"authkey already used\" error\n\t// WITH THE FIX: This succeeds because it's the same node re-registering with its own key\n\n\t// Simulate sending the same RegisterRequest again (same MachineKey, same AuthKey)\n\t// This is exactly what happens when a container restarts\n\treregisterReq := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: preAuthKeyNew.Key, // Same key, now marked as Used=true\n\t\t},\n\t\tNodeKey: nodeKey.Public(), // Same NodeKey\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"issue-2830-test-node\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\treregisterResp, err := app.handleRegisterWithAuthKey(reregisterReq, machineKey.Public()) // Same MachineKey\n\trequire.NoError(t, err, \"Re-registration with same MachineKey and used pre-auth key should succeed (fixes #2830)\")\n\trequire.True(t, reregisterResp.MachineAuthorized, \"Node should remain authorized after re-registration\")\n\trequire.NotNil(t, reregisterResp.User, \"User should be set in re-registration response\")\n\trequire.Equal(t, \"testuser\", reregisterResp.User.DisplayName, \"User should remain the same\")\n\n\t// Verify that only ONE node was created (not a duplicate)\n\tnodes := app.state.ListNodesByUser(types.UserID(user.ID))\n\trequire.Equal(t, 1, nodes.Len(), \"Should have exactly one node (no duplicates created)\")\n\trequire.Equal(t, \"issue-2830-test-node\", nodes.At(0).Hostname(), \"Node hostname should match\")\n\n\t// Step 3: Verify that a DIFFERENT machine cannot use the same used key\n\t// This ensures we didn't break the security model - only the original node can re-register\n\tdifferentMachineKey := key.NewMachine()\n\tdifferentNodeKey := key.NewNode()\n\n\tattackReq := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: preAuthKeyNew.Key, // Try to use the same key\n\t\t},\n\t\tNodeKey: differentNodeKey.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"attacker-node\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\t_, err = app.handleRegisterWithAuthKey(attackReq, differentMachineKey.Public())\n\trequire.Error(t, err, \"Different machine should NOT be able to use the same used pre-auth key\")\n\trequire.Contains(t, err.Error(), \"already used\", \"Error should indicate key is already used\")\n\n\t// Verify still only one node (the original one)\n\tnodesAfterAttack := app.state.ListNodesByUser(types.UserID(user.ID))\n\trequire.Equal(t, 1, nodesAfterAttack.Len(), \"Should still have exactly one node (attack prevented)\")\n}\n\n// TestWebAuthRejectsUnauthorizedRequestTags tests that web auth registrations\n// validate RequestTags against policy and reject unauthorized tags.\nfunc TestWebAuthRejectsUnauthorizedRequestTags(t *testing.T) {\n\tt.Parallel()\n\n\tapp := createTestApp(t)\n\n\t// Create a user that will authenticate via web auth\n\tuser := app.state.CreateUserForTest(\"webauth-tags-user\")\n\n\tmachineKey := key.NewMachine()\n\tnodeKey := key.NewNode()\n\n\t// Simulate a registration cache entry (as would be created during web auth)\n\tregistrationID := types.MustAuthID()\n\tregEntry := types.NewRegisterAuthRequest(types.Node{\n\t\tMachineKey: machineKey.Public(),\n\t\tNodeKey:    nodeKey.Public(),\n\t\tHostname:   \"webauth-tags-node\",\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname:    \"webauth-tags-node\",\n\t\t\tRequestTags: []string{\"tag:unauthorized\"}, // This tag is not in policy\n\t\t},\n\t})\n\tapp.state.SetAuthCacheEntry(registrationID, regEntry)\n\n\t// Complete the web auth - should fail because tag is unauthorized\n\t_, _, err := app.state.HandleNodeFromAuthPath(\n\t\tregistrationID,\n\t\ttypes.UserID(user.ID),\n\t\tnil, // no expiry\n\t\t\"webauth\",\n\t)\n\n\t// Expect error due to unauthorized tags\n\trequire.Error(t, err, \"HandleNodeFromAuthPath should reject unauthorized RequestTags\")\n\trequire.Contains(t, err.Error(), \"requested tags\",\n\t\t\"Error should indicate requested tags are invalid or not permitted\")\n\trequire.Contains(t, err.Error(), \"tag:unauthorized\",\n\t\t\"Error should mention the rejected tag\")\n\n\t// Verify no node was created\n\t_, found := app.state.GetNodeByNodeKey(nodeKey.Public())\n\trequire.False(t, found, \"Node should not be created when tags are unauthorized\")\n}\n\n// TestWebAuthReauthWithEmptyTagsRemovesAllTags tests that when an existing tagged node\n// reauths with empty RequestTags, all tags are removed and ownership returns to user.\n// This is the fix for issue #2979.\nfunc TestWebAuthReauthWithEmptyTagsRemovesAllTags(t *testing.T) {\n\tt.Parallel()\n\n\tapp := createTestApp(t)\n\n\t// Create a user\n\tuser := app.state.CreateUserForTest(\"reauth-untag-user\")\n\n\t// Update policy manager to recognize the new user\n\t// This is necessary because CreateUserForTest doesn't update the policy manager\n\terr := app.state.UpdatePolicyManagerUsersForTest()\n\trequire.NoError(t, err, \"Failed to update policy manager users\")\n\n\t// Set up policy that allows the user to own these tags\n\tpolicy := `{\n\t\t\"tagOwners\": {\n\t\t\t\"tag:valid-owned\": [\"reauth-untag-user@\"],\n\t\t\t\"tag:second\": [\"reauth-untag-user@\"]\n\t\t},\n\t\t\"acls\": [{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]}]\n\t}`\n\t_, err = app.state.SetPolicy([]byte(policy))\n\trequire.NoError(t, err, \"Failed to set policy\")\n\n\tmachineKey := key.NewMachine()\n\tnodeKey1 := key.NewNode()\n\n\t// Step 1: Initial registration with tags\n\tregistrationID1 := types.MustAuthID()\n\tregEntry1 := types.NewRegisterAuthRequest(types.Node{\n\t\tMachineKey: machineKey.Public(),\n\t\tNodeKey:    nodeKey1.Public(),\n\t\tHostname:   \"reauth-untag-node\",\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname:    \"reauth-untag-node\",\n\t\t\tRequestTags: []string{\"tag:valid-owned\", \"tag:second\"},\n\t\t},\n\t})\n\tapp.state.SetAuthCacheEntry(registrationID1, regEntry1)\n\n\t// Complete initial registration with tags\n\tnode, _, err := app.state.HandleNodeFromAuthPath(\n\t\tregistrationID1,\n\t\ttypes.UserID(user.ID),\n\t\tnil,\n\t\t\"webauth\",\n\t)\n\trequire.NoError(t, err, \"Initial registration should succeed\")\n\trequire.True(t, node.IsTagged(), \"Node should be tagged after initial registration\")\n\trequire.ElementsMatch(t, []string{\"tag:valid-owned\", \"tag:second\"}, node.Tags().AsSlice())\n\tt.Logf(\"Initial registration complete - Node ID: %d, Tags: %v, IsTagged: %t\",\n\t\tnode.ID().Uint64(), node.Tags().AsSlice(), node.IsTagged())\n\n\t// Step 2: Reauth with EMPTY tags to untag\n\tnodeKey2 := key.NewNode() // New node key for reauth\n\tregistrationID2 := types.MustAuthID()\n\tregEntry2 := types.NewRegisterAuthRequest(types.Node{\n\t\tMachineKey: machineKey.Public(), // Same machine key\n\t\tNodeKey:    nodeKey2.Public(),   // Different node key (rotation)\n\t\tHostname:   \"reauth-untag-node\",\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname:    \"reauth-untag-node\",\n\t\t\tRequestTags: []string{}, // EMPTY - should untag\n\t\t},\n\t})\n\tapp.state.SetAuthCacheEntry(registrationID2, regEntry2)\n\n\t// Complete reauth with empty tags\n\tnodeAfterReauth, _, err := app.state.HandleNodeFromAuthPath(\n\t\tregistrationID2,\n\t\ttypes.UserID(user.ID),\n\t\tnil,\n\t\t\"webauth\",\n\t)\n\trequire.NoError(t, err, \"Reauth should succeed\")\n\n\t// Verify tags were removed\n\trequire.False(t, nodeAfterReauth.IsTagged(), \"Node should NOT be tagged after reauth with empty tags\")\n\trequire.Empty(t, nodeAfterReauth.Tags().AsSlice(), \"Node should have no tags\")\n\n\t// Verify ownership returned to user\n\trequire.True(t, nodeAfterReauth.UserID().Valid(), \"Node should have a user ID\")\n\trequire.Equal(t, user.ID, nodeAfterReauth.UserID().Get(), \"Node should be owned by the user again\")\n\n\t// Verify it's the same node (not a new one)\n\trequire.Equal(t, node.ID(), nodeAfterReauth.ID(), \"Should be the same node after reauth\")\n\n\tt.Logf(\"Reauth complete - Node ID: %d, Tags: %v, IsTagged: %t, UserID: %d\",\n\t\tnodeAfterReauth.ID().Uint64(), nodeAfterReauth.Tags().AsSlice(),\n\t\tnodeAfterReauth.IsTagged(), nodeAfterReauth.UserID().Get())\n}\n\n// TestAuthKeyTaggedToUserOwnedViaReauth tests that a node originally registered\n// with a tagged pre-auth key can transition to user-owned by re-authenticating\n// via web auth with empty RequestTags. This ensures authkey-tagged nodes are\n// not permanently locked to being tagged.\nfunc TestAuthKeyTaggedToUserOwnedViaReauth(t *testing.T) {\n\tt.Parallel()\n\n\tapp := createTestApp(t)\n\n\t// Create a user\n\tuser := app.state.CreateUserForTest(\"authkey-to-user\")\n\n\t// Create a tagged pre-auth key\n\tauthKeyTags := []string{\"tag:server\", \"tag:prod\"}\n\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, authKeyTags)\n\trequire.NoError(t, err, \"Failed to create tagged pre-auth key\")\n\n\tmachineKey := key.NewMachine()\n\tnodeKey1 := key.NewNode()\n\n\t// Step 1: Initial registration with tagged pre-auth key\n\tregReq := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak.Key,\n\t\t},\n\t\tNodeKey: nodeKey1.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"authkey-tagged-node\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\tresp, err := app.handleRegisterWithAuthKey(regReq, machineKey.Public())\n\trequire.NoError(t, err, \"Initial registration should succeed\")\n\trequire.True(t, resp.MachineAuthorized, \"Node should be authorized\")\n\n\t// Verify initial state: node is tagged via authkey\n\tnode, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\trequire.True(t, found, \"Node should be found\")\n\trequire.True(t, node.IsTagged(), \"Node should be tagged after authkey registration\")\n\trequire.ElementsMatch(t, authKeyTags, node.Tags().AsSlice(), \"Node should have authkey tags\")\n\trequire.NotNil(t, node.AuthKey(), \"Node should have AuthKey reference\")\n\trequire.Positive(t, node.AuthKey().Tags().Len(), \"AuthKey should have tags\")\n\n\tt.Logf(\"Initial registration complete - Node ID: %d, Tags: %v, IsTagged: %t, AuthKey.Tags.Len: %d\",\n\t\tnode.ID().Uint64(), node.Tags().AsSlice(), node.IsTagged(), node.AuthKey().Tags().Len())\n\n\t// Step 2: Reauth via web auth with EMPTY tags to transition to user-owned\n\tnodeKey2 := key.NewNode() // New node key for reauth\n\tregistrationID := types.MustAuthID()\n\tregEntry := types.NewRegisterAuthRequest(types.Node{\n\t\tMachineKey: machineKey.Public(), // Same machine key\n\t\tNodeKey:    nodeKey2.Public(),   // Different node key (rotation)\n\t\tHostname:   \"authkey-tagged-node\",\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname:    \"authkey-tagged-node\",\n\t\t\tRequestTags: []string{}, // EMPTY - should untag\n\t\t},\n\t})\n\tapp.state.SetAuthCacheEntry(registrationID, regEntry)\n\n\t// Complete reauth with empty tags\n\tnodeAfterReauth, _, err := app.state.HandleNodeFromAuthPath(\n\t\tregistrationID,\n\t\ttypes.UserID(user.ID),\n\t\tnil,\n\t\t\"webauth\",\n\t)\n\trequire.NoError(t, err, \"Reauth should succeed\")\n\n\t// Verify tags were removed (authkey-tagged → user-owned transition)\n\trequire.False(t, nodeAfterReauth.IsTagged(), \"Node should NOT be tagged after reauth with empty tags\")\n\trequire.Empty(t, nodeAfterReauth.Tags().AsSlice(), \"Node should have no tags\")\n\n\t// Verify ownership returned to user\n\trequire.True(t, nodeAfterReauth.UserID().Valid(), \"Node should have a user ID\")\n\trequire.Equal(t, user.ID, nodeAfterReauth.UserID().Get(), \"Node should be owned by the user\")\n\n\t// Verify it's the same node (not a new one)\n\trequire.Equal(t, node.ID(), nodeAfterReauth.ID(), \"Should be the same node after reauth\")\n\n\t// AuthKey reference should still exist (for audit purposes)\n\trequire.NotNil(t, nodeAfterReauth.AuthKey(), \"AuthKey reference should be preserved\")\n\n\tt.Logf(\"Reauth complete - Node ID: %d, Tags: %v, IsTagged: %t, UserID: %d\",\n\t\tnodeAfterReauth.ID().Uint64(), nodeAfterReauth.Tags().AsSlice(),\n\t\tnodeAfterReauth.IsTagged(), nodeAfterReauth.UserID().Get())\n}\n\n// TestDeletedPreAuthKeyNotRecreatedOnNodeUpdate tests that when a PreAuthKey is deleted,\n// subsequent node updates (like those triggered by MapRequests) do not recreate the key.\n//\n// This reproduces the bug where:\n//  1. Create a tagged preauthkey and register a node\n//  2. Delete the preauthkey (confirmed gone from pre_auth_keys DB table)\n//  3. Node sends MapRequest (e.g., after tailscaled restart)\n//  4. BUG: The preauthkey reappears because GORM's Updates() upserts the stale AuthKey\n//     data that still exists in the NodeStore's in-memory cache.\n//\n// The fix is to use Omit(\"AuthKey\") on all node Updates() calls to prevent GORM\n// from touching the AuthKey association.\nfunc TestDeletedPreAuthKeyNotRecreatedOnNodeUpdate(t *testing.T) {\n\tt.Parallel()\n\n\tapp := createTestApp(t)\n\n\t// Create user and tagged pre-auth key\n\tuser := app.state.CreateUserForTest(\"test-user\")\n\tpakNew, err := app.state.CreatePreAuthKey(user.TypedID(), false, false, nil, []string{\"tag:test\"})\n\trequire.NoError(t, err)\n\n\tpakID := pakNew.ID\n\tt.Logf(\"Created PreAuthKey ID: %d\", pakID)\n\n\t// Register a node with the pre-auth key\n\tmachineKey := key.NewMachine()\n\tnodeKey := key.NewNode()\n\n\tregisterReq := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pakNew.Key,\n\t\t},\n\t\tNodeKey: nodeKey.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"test-node\",\n\t\t},\n\t}\n\n\tresp, err := app.handleRegister(context.Background(), registerReq, machineKey.Public())\n\trequire.NoError(t, err, \"registration should succeed\")\n\trequire.True(t, resp.MachineAuthorized, \"node should be authorized\")\n\n\t// Verify node exists and has AuthKey reference\n\tnode, found := app.state.GetNodeByNodeKey(nodeKey.Public())\n\trequire.True(t, found, \"node should exist\")\n\trequire.True(t, node.AuthKeyID().Valid(), \"node should have AuthKeyID set\")\n\trequire.Equal(t, pakID, node.AuthKeyID().Get(), \"node should reference the correct PreAuthKey\")\n\tt.Logf(\"Node ID: %d, AuthKeyID: %d\", node.ID().Uint64(), node.AuthKeyID().Get())\n\n\t// Verify the PreAuthKey exists in the database\n\tvar pakCount int64\n\n\terr = app.state.DB().DB.Model(&types.PreAuthKey{}).Where(\"id = ?\", pakID).Count(&pakCount).Error\n\trequire.NoError(t, err)\n\trequire.Equal(t, int64(1), pakCount, \"PreAuthKey should exist in database\")\n\n\t// Delete the PreAuthKey\n\tt.Log(\"Deleting PreAuthKey...\")\n\n\terr = app.state.DeletePreAuthKey(pakID)\n\trequire.NoError(t, err, \"deleting PreAuthKey should succeed\")\n\n\t// Verify the PreAuthKey is gone from the database\n\terr = app.state.DB().DB.Model(&types.PreAuthKey{}).Where(\"id = ?\", pakID).Count(&pakCount).Error\n\trequire.NoError(t, err)\n\trequire.Equal(t, int64(0), pakCount, \"PreAuthKey should be deleted from database\")\n\tt.Log(\"PreAuthKey deleted from database\")\n\n\t// Verify the node's auth_key_id is now NULL in the database\n\tvar dbNode types.Node\n\n\terr = app.state.DB().DB.First(&dbNode, node.ID().Uint64()).Error\n\trequire.NoError(t, err)\n\trequire.Nil(t, dbNode.AuthKeyID, \"node's AuthKeyID should be NULL after PreAuthKey deletion\")\n\tt.Log(\"Node's AuthKeyID is NULL in database\")\n\n\t// The NodeStore may still have stale AuthKey data in memory.\n\t// Now simulate what happens when the node sends a MapRequest after a tailscaled restart.\n\t// This triggers persistNodeToDB which calls GORM's Updates().\n\n\t// Simulate a MapRequest by updating the node through the state layer\n\t// This mimics what poll.go does when processing MapRequests\n\tmapReq := tailcfg.MapRequest{\n\t\tNodeKey:  nodeKey.Public(),\n\t\tDiscoKey: node.DiscoKey(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname:  \"test-node\",\n\t\t\tGoVersion: \"go1.21\", // Some change to trigger an update\n\t\t},\n\t}\n\n\t// Process the MapRequest-like update\n\t// This calls UpdateNodeFromMapRequest which eventually calls persistNodeToDB\n\t_, err = app.state.UpdateNodeFromMapRequest(node.ID(), mapReq)\n\trequire.NoError(t, err, \"UpdateNodeFromMapRequest should succeed\")\n\tt.Log(\"Simulated MapRequest update completed\")\n\n\t// THE CRITICAL CHECK: Verify the PreAuthKey was NOT recreated\n\terr = app.state.DB().DB.Model(&types.PreAuthKey{}).Where(\"id = ?\", pakID).Count(&pakCount).Error\n\trequire.NoError(t, err)\n\trequire.Equal(t, int64(0), pakCount,\n\t\t\"BUG: PreAuthKey was recreated! The deleted PreAuthKey should NOT reappear after node update\")\n\n\tt.Log(\"SUCCESS: PreAuthKey remained deleted after node update\")\n}\n\n// TestTaggedNodeWithoutUserToDifferentUser tests that a node registered with a\n// tags-only PreAuthKey (no user) can be re-registered to a different user\n// without panicking. This reproduces the issue reported in #3038.\nfunc TestTaggedNodeWithoutUserToDifferentUser(t *testing.T) {\n\tt.Parallel()\n\n\tapp := createTestApp(t)\n\n\t// Step 1: Create a tags-only PreAuthKey (no user, only tags)\n\t// This is valid for tagged nodes where ownership is defined by tags, not users\n\ttags := []string{\"tag:server\", \"tag:prod\"}\n\tpak, err := app.state.CreatePreAuthKey(nil, true, false, nil, tags)\n\trequire.NoError(t, err, \"Failed to create tags-only pre-auth key\")\n\trequire.Nil(t, pak.User, \"Tags-only PAK should have nil User\")\n\n\tmachineKey := key.NewMachine()\n\tnodeKey1 := key.NewNode()\n\n\t// Step 2: Register node with tags-only PreAuthKey\n\tregReq := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak.Key,\n\t\t},\n\t\tNodeKey: nodeKey1.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"tagged-orphan-node\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\tresp, err := app.handleRegisterWithAuthKey(regReq, machineKey.Public())\n\trequire.NoError(t, err, \"Initial registration should succeed\")\n\trequire.True(t, resp.MachineAuthorized, \"Node should be authorized\")\n\n\t// Verify initial state: node is tagged with no UserID\n\tnode, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\trequire.True(t, found, \"Node should be found\")\n\trequire.True(t, node.IsTagged(), \"Node should be tagged\")\n\trequire.ElementsMatch(t, tags, node.Tags().AsSlice(), \"Node should have tags from PAK\")\n\trequire.False(t, node.UserID().Valid(), \"Node should NOT have a UserID (tags-only PAK)\")\n\trequire.False(t, node.User().Valid(), \"Node should NOT have a User (tags-only PAK)\")\n\n\tt.Logf(\"Initial registration complete - Node ID: %d, Tags: %v, IsTagged: %t, UserID valid: %t\",\n\t\tnode.ID().Uint64(), node.Tags().AsSlice(), node.IsTagged(), node.UserID().Valid())\n\n\t// Step 3: Create a new user (alice) to re-register the node to\n\talice := app.state.CreateUserForTest(\"alice\")\n\trequire.NotNil(t, alice, \"Alice user should be created\")\n\n\t// Step 4: Re-register the node to alice via HandleNodeFromAuthPath\n\t// This is what happens when running: headscale auth register --auth-id <id> --user alice\n\tnodeKey2 := key.NewNode()\n\tregistrationID := types.MustAuthID()\n\tregEntry := types.NewRegisterAuthRequest(types.Node{\n\t\tMachineKey: machineKey.Public(), // Same machine key as the tagged node\n\t\tNodeKey:    nodeKey2.Public(),\n\t\tHostname:   \"tagged-orphan-node\",\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname:    \"tagged-orphan-node\",\n\t\t\tRequestTags: []string{}, // Empty - transition to user-owned\n\t\t},\n\t})\n\tapp.state.SetAuthCacheEntry(registrationID, regEntry)\n\n\t// This should NOT panic - before the fix, this would panic with:\n\t// panic: runtime error: invalid memory address or nil pointer dereference\n\t// at UserView.Name() because the existing node has no User\n\tnodeAfterReauth, _, err := app.state.HandleNodeFromAuthPath(\n\t\tregistrationID,\n\t\ttypes.UserID(alice.ID),\n\t\tnil,\n\t\t\"cli\",\n\t)\n\trequire.NoError(t, err, \"Re-registration to alice should succeed without panic\")\n\n\t// Verify the existing tagged node was converted to be owned by alice (same node ID)\n\trequire.True(t, nodeAfterReauth.Valid(), \"Node should be valid\")\n\trequire.True(t, nodeAfterReauth.UserID().Valid(), \"Node should have a UserID\")\n\trequire.Equal(t, alice.ID, nodeAfterReauth.UserID().Get(), \"Node should be owned by alice\")\n\trequire.Equal(t, node.ID(), nodeAfterReauth.ID(), \"Should be the same node (converted, not new)\")\n\trequire.False(t, nodeAfterReauth.IsTagged(), \"Node should no longer be tagged\")\n\trequire.Empty(t, nodeAfterReauth.Tags().AsSlice(), \"Node should have no tags\")\n\n\t// Verify Owner() works without panicking - this is what the mapper's\n\t// generateUserProfiles calls, and it would panic with a nil pointer\n\t// dereference if node.User was not set during the tag→user conversion.\n\towner := nodeAfterReauth.Owner()\n\trequire.True(t, owner.Valid(), \"Owner should be valid after conversion (mapper would panic if nil)\")\n\trequire.Equal(t, alice.ID, owner.Model().ID, \"Owner should be alice\")\n\n\tt.Logf(\"Re-registration complete - Node ID: %d, Tags: %v, IsTagged: %t, UserID: %d\",\n\t\tnodeAfterReauth.ID().Uint64(), nodeAfterReauth.Tags().AsSlice(),\n\t\tnodeAfterReauth.IsTagged(), nodeAfterReauth.UserID().Get())\n}\n"
  },
  {
    "path": "hscontrol/capver/capver.go",
    "content": "package capver\n\n//go:generate go run ../../tools/capver/main.go\n\nimport (\n\t\"slices\"\n\t\"sort\"\n\t\"strings\"\n\n\txmaps \"golang.org/x/exp/maps\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/util/set\"\n)\n\nconst (\n\t// minVersionParts is the minimum number of version parts needed for major.minor.\n\tminVersionParts = 2\n\n\t// legacyDERPCapVer is the capability version when LegacyDERP can be cleaned up.\n\tlegacyDERPCapVer = 111\n)\n\n// CanOldCodeBeCleanedUp is intended to be called on startup to see if\n// there are old code that can ble cleaned up, entries should contain\n// a CapVer where something can be cleaned up and a panic if it can.\n// This is only intended to catch things in tests.\n//\n// All uses of Capability version checks should be listed here.\nfunc CanOldCodeBeCleanedUp() {\n\tif MinSupportedCapabilityVersion >= legacyDERPCapVer {\n\t\tpanic(\"LegacyDERP can be cleaned up in tail.go\")\n\t}\n}\n\nfunc tailscaleVersSorted() []string {\n\tvers := xmaps.Keys(tailscaleToCapVer)\n\tsort.Strings(vers)\n\n\treturn vers\n}\n\nfunc capVersSorted() []tailcfg.CapabilityVersion {\n\tcapVers := xmaps.Keys(capVerToTailscaleVer)\n\tslices.Sort(capVers)\n\n\treturn capVers\n}\n\n// TailscaleVersion returns the Tailscale version for the given CapabilityVersion.\nfunc TailscaleVersion(ver tailcfg.CapabilityVersion) string {\n\treturn capVerToTailscaleVer[ver]\n}\n\n// CapabilityVersion returns the CapabilityVersion for the given Tailscale version.\n// It accepts both full versions (v1.90.1) and minor versions (v1.90).\nfunc CapabilityVersion(ver string) tailcfg.CapabilityVersion {\n\tif !strings.HasPrefix(ver, \"v\") {\n\t\tver = \"v\" + ver\n\t}\n\n\t// Try direct lookup first (works for minor versions like v1.90)\n\tif cv, ok := tailscaleToCapVer[ver]; ok {\n\t\treturn cv\n\t}\n\n\t// Try extracting minor version from full version (v1.90.1 -> v1.90)\n\tparts := strings.Split(strings.TrimPrefix(ver, \"v\"), \".\")\n\tif len(parts) >= minVersionParts {\n\t\tminor := \"v\" + parts[0] + \".\" + parts[1]\n\t\treturn tailscaleToCapVer[minor]\n\t}\n\n\treturn 0\n}\n\n// TailscaleLatest returns the n latest Tailscale versions.\nfunc TailscaleLatest(n int) []string {\n\tif n <= 0 {\n\t\treturn nil\n\t}\n\n\ttsSorted := tailscaleVersSorted()\n\n\tif n > len(tsSorted) {\n\t\treturn tsSorted\n\t}\n\n\treturn tsSorted[len(tsSorted)-n:]\n}\n\n// TailscaleLatestMajorMinor returns the n latest Tailscale versions (e.g. 1.80).\nfunc TailscaleLatestMajorMinor(n int, stripV bool) []string {\n\tif n <= 0 {\n\t\treturn nil\n\t}\n\n\tmajors := set.Set[string]{}\n\n\tfor _, vers := range tailscaleVersSorted() {\n\t\tif stripV {\n\t\t\tvers = strings.TrimPrefix(vers, \"v\")\n\t\t}\n\n\t\tv := strings.Split(vers, \".\")\n\t\tmajors.Add(v[0] + \".\" + v[1])\n\t}\n\n\tmajorSl := majors.Slice()\n\tsort.Strings(majorSl)\n\n\tif n > len(majorSl) {\n\t\treturn majorSl\n\t}\n\n\treturn majorSl[len(majorSl)-n:]\n}\n\n// CapVerLatest returns the n latest CapabilityVersions.\nfunc CapVerLatest(n int) []tailcfg.CapabilityVersion {\n\tif n <= 0 {\n\t\treturn nil\n\t}\n\n\ts := capVersSorted()\n\n\tif n > len(s) {\n\t\treturn s\n\t}\n\n\treturn s[len(s)-n:]\n}\n"
  },
  {
    "path": "hscontrol/capver/capver_generated.go",
    "content": "package capver\n\n// Generated DO NOT EDIT\n\nimport \"tailscale.com/tailcfg\"\n\nvar tailscaleToCapVer = map[string]tailcfg.CapabilityVersion{\n\t\"v1.24\": 32,\n\t\"v1.26\": 32,\n\t\"v1.28\": 32,\n\t\"v1.30\": 41,\n\t\"v1.32\": 46,\n\t\"v1.34\": 51,\n\t\"v1.36\": 56,\n\t\"v1.38\": 58,\n\t\"v1.40\": 61,\n\t\"v1.42\": 62,\n\t\"v1.44\": 63,\n\t\"v1.46\": 65,\n\t\"v1.48\": 68,\n\t\"v1.50\": 74,\n\t\"v1.52\": 79,\n\t\"v1.54\": 79,\n\t\"v1.56\": 82,\n\t\"v1.58\": 85,\n\t\"v1.60\": 87,\n\t\"v1.62\": 88,\n\t\"v1.64\": 90,\n\t\"v1.66\": 95,\n\t\"v1.68\": 97,\n\t\"v1.70\": 102,\n\t\"v1.72\": 104,\n\t\"v1.74\": 106,\n\t\"v1.76\": 106,\n\t\"v1.78\": 109,\n\t\"v1.80\": 113,\n\t\"v1.82\": 115,\n\t\"v1.84\": 116,\n\t\"v1.86\": 123,\n\t\"v1.88\": 125,\n\t\"v1.90\": 130,\n\t\"v1.92\": 131,\n\t\"v1.94\": 131,\n}\n\nvar capVerToTailscaleVer = map[tailcfg.CapabilityVersion]string{\n\t32:  \"v1.24\",\n\t41:  \"v1.30\",\n\t46:  \"v1.32\",\n\t51:  \"v1.34\",\n\t56:  \"v1.36\",\n\t58:  \"v1.38\",\n\t61:  \"v1.40\",\n\t62:  \"v1.42\",\n\t63:  \"v1.44\",\n\t65:  \"v1.46\",\n\t68:  \"v1.48\",\n\t74:  \"v1.50\",\n\t79:  \"v1.52\",\n\t82:  \"v1.56\",\n\t85:  \"v1.58\",\n\t87:  \"v1.60\",\n\t88:  \"v1.62\",\n\t90:  \"v1.64\",\n\t95:  \"v1.66\",\n\t97:  \"v1.68\",\n\t102: \"v1.70\",\n\t104: \"v1.72\",\n\t106: \"v1.74\",\n\t109: \"v1.78\",\n\t113: \"v1.80\",\n\t115: \"v1.82\",\n\t116: \"v1.84\",\n\t123: \"v1.86\",\n\t125: \"v1.88\",\n\t130: \"v1.90\",\n\t131: \"v1.92\",\n}\n\n// SupportedMajorMinorVersions is the number of major.minor Tailscale versions supported.\nconst SupportedMajorMinorVersions = 10\n\n// MinSupportedCapabilityVersion represents the minimum capability version\n// supported by this Headscale instance (latest 10 minor versions)\nconst MinSupportedCapabilityVersion tailcfg.CapabilityVersion = 106\n"
  },
  {
    "path": "hscontrol/capver/capver_test.go",
    "content": "package capver\n\nimport (\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n)\n\nfunc TestTailscaleLatestMajorMinor(t *testing.T) {\n\tfor _, test := range tailscaleLatestMajorMinorTests {\n\t\tt.Run(\"\", func(t *testing.T) {\n\t\t\toutput := TailscaleLatestMajorMinor(test.n, test.stripV)\n\t\t\tif diff := cmp.Diff(output, test.expected); diff != \"\" {\n\t\t\t\tt.Errorf(\"TailscaleLatestMajorMinor(%d, %v) mismatch (-want +got):\\n%s\", test.n, test.stripV, diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestCapVerMinimumTailscaleVersion(t *testing.T) {\n\tfor _, test := range capVerMinimumTailscaleVersionTests {\n\t\tt.Run(\"\", func(t *testing.T) {\n\t\t\toutput := TailscaleVersion(test.input)\n\t\t\tif output != test.expected {\n\t\t\t\tt.Errorf(\"CapVerFromTailscaleVersion(%d) = %s; want %s\", test.input, output, test.expected)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "hscontrol/capver/capver_test_data.go",
    "content": "package capver\n\n// Generated DO NOT EDIT\n\nimport \"tailscale.com/tailcfg\"\n\nvar tailscaleLatestMajorMinorTests = []struct {\n\tn        int\n\tstripV   bool\n\texpected []string\n}{\n\t{3, false, []string{\"v1.90\", \"v1.92\", \"v1.94\"}},\n\t{2, true, []string{\"1.92\", \"1.94\"}},\n\t{10, true, []string{\n\t\t\"1.76\",\n\t\t\"1.78\",\n\t\t\"1.80\",\n\t\t\"1.82\",\n\t\t\"1.84\",\n\t\t\"1.86\",\n\t\t\"1.88\",\n\t\t\"1.90\",\n\t\t\"1.92\",\n\t\t\"1.94\",\n\t}},\n\t{0, false, nil},\n}\n\nvar capVerMinimumTailscaleVersionTests = []struct {\n\tinput    tailcfg.CapabilityVersion\n\texpected string\n}{\n\t{106, \"v1.74\"},\n\t{32, \"v1.24\"},\n\t{41, \"v1.30\"},\n\t{46, \"v1.32\"},\n\t{51, \"v1.34\"},\n\t{9001, \"\"}, // Test case for a version higher than any in the map\n\t{60, \"\"},   // Test case for a version lower than any in the map\n}\n"
  },
  {
    "path": "hscontrol/db/api_key.go",
    "content": "package db\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"golang.org/x/crypto/bcrypt\"\n\t\"gorm.io/gorm\"\n)\n\nconst (\n\tapiKeyPrefix       = \"hskey-api-\" //nolint:gosec // This is a prefix, not a credential\n\tapiKeyPrefixLength = 12\n\tapiKeyHashLength   = 64\n\n\t// Legacy format constants.\n\tlegacyAPIPrefixLength = 7\n\tlegacyAPIKeyLength    = 32\n)\n\nvar (\n\tErrAPIKeyFailedToParse     = errors.New(\"failed to parse ApiKey\")\n\tErrAPIKeyGenerationFailed  = errors.New(\"failed to generate API key\")\n\tErrAPIKeyInvalidGeneration = errors.New(\"generated API key failed validation\")\n)\n\n// CreateAPIKey creates a new ApiKey in a user, and returns it.\nfunc (hsdb *HSDatabase) CreateAPIKey(\n\texpiration *time.Time,\n) (string, *types.APIKey, error) {\n\t// Generate public prefix (12 chars)\n\tprefix, err := util.GenerateRandomStringURLSafe(apiKeyPrefixLength)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\t// Validate prefix\n\tif len(prefix) != apiKeyPrefixLength {\n\t\treturn \"\", nil, fmt.Errorf(\"%w: generated prefix has invalid length: expected %d, got %d\", ErrAPIKeyInvalidGeneration, apiKeyPrefixLength, len(prefix))\n\t}\n\n\tif !isValidBase64URLSafe(prefix) {\n\t\treturn \"\", nil, fmt.Errorf(\"%w: generated prefix contains invalid characters\", ErrAPIKeyInvalidGeneration)\n\t}\n\n\t// Generate secret (64 chars)\n\tsecret, err := util.GenerateRandomStringURLSafe(apiKeyHashLength)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\t// Validate secret\n\tif len(secret) != apiKeyHashLength {\n\t\treturn \"\", nil, fmt.Errorf(\"%w: generated secret has invalid length: expected %d, got %d\", ErrAPIKeyInvalidGeneration, apiKeyHashLength, len(secret))\n\t}\n\n\tif !isValidBase64URLSafe(secret) {\n\t\treturn \"\", nil, fmt.Errorf(\"%w: generated secret contains invalid characters\", ErrAPIKeyInvalidGeneration)\n\t}\n\n\t// Full key string (shown ONCE to user)\n\tkeyStr := apiKeyPrefix + prefix + \"-\" + secret\n\n\t// bcrypt hash of secret\n\thash, err := bcrypt.GenerateFromPassword([]byte(secret), bcrypt.DefaultCost)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\tkey := types.APIKey{\n\t\tPrefix:     prefix,\n\t\tHash:       hash,\n\t\tExpiration: expiration,\n\t}\n\n\tif err := hsdb.DB.Save(&key).Error; err != nil { //nolint:noinlineerr\n\t\treturn \"\", nil, fmt.Errorf(\"saving API key to database: %w\", err)\n\t}\n\n\treturn keyStr, &key, nil\n}\n\n// ListAPIKeys returns the list of ApiKeys for a user.\nfunc (hsdb *HSDatabase) ListAPIKeys() ([]types.APIKey, error) {\n\tkeys := []types.APIKey{}\n\n\terr := hsdb.DB.Find(&keys).Error\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn keys, nil\n}\n\n// GetAPIKey returns a ApiKey for a given key.\nfunc (hsdb *HSDatabase) GetAPIKey(prefix string) (*types.APIKey, error) {\n\tkey := types.APIKey{}\n\tif result := hsdb.DB.First(&key, \"prefix = ?\", prefix); result.Error != nil {\n\t\treturn nil, result.Error\n\t}\n\n\treturn &key, nil\n}\n\n// GetAPIKeyByID returns a ApiKey for a given id.\nfunc (hsdb *HSDatabase) GetAPIKeyByID(id uint64) (*types.APIKey, error) {\n\tkey := types.APIKey{}\n\tif result := hsdb.DB.Find(&types.APIKey{ID: id}).First(&key); result.Error != nil {\n\t\treturn nil, result.Error\n\t}\n\n\treturn &key, nil\n}\n\n// DestroyAPIKey destroys a ApiKey. Returns error if the ApiKey\n// does not exist.\nfunc (hsdb *HSDatabase) DestroyAPIKey(key types.APIKey) error {\n\tif result := hsdb.DB.Unscoped().Delete(key); result.Error != nil {\n\t\treturn result.Error\n\t}\n\n\treturn nil\n}\n\n// ExpireAPIKey marks a ApiKey as expired.\nfunc (hsdb *HSDatabase) ExpireAPIKey(key *types.APIKey) error {\n\terr := hsdb.DB.Model(&key).Update(\"Expiration\", time.Now()).Error\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (hsdb *HSDatabase) ValidateAPIKey(keyStr string) (bool, error) {\n\tkey, err := validateAPIKey(hsdb.DB, keyStr)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif key.Expiration != nil && key.Expiration.Before(time.Now()) {\n\t\treturn false, nil\n\t}\n\n\treturn true, nil\n}\n\n// ParseAPIKeyPrefix extracts the database prefix from a display prefix.\n// Handles formats: \"hskey-api-{12chars}-***\", \"hskey-api-{12chars}\", or just \"{12chars}\".\n// Returns the 12-character prefix suitable for database lookup.\nfunc ParseAPIKeyPrefix(displayPrefix string) (string, error) {\n\t// If it's already just the 12-character prefix, return it\n\tif len(displayPrefix) == apiKeyPrefixLength && isValidBase64URLSafe(displayPrefix) {\n\t\treturn displayPrefix, nil\n\t}\n\n\t// If it starts with the API key prefix, parse it\n\tif strings.HasPrefix(displayPrefix, apiKeyPrefix) {\n\t\t// Remove the \"hskey-api-\" prefix\n\t\t_, remainder, found := strings.Cut(displayPrefix, apiKeyPrefix)\n\t\tif !found {\n\t\t\treturn \"\", fmt.Errorf(\"%w: invalid display prefix format\", ErrAPIKeyFailedToParse)\n\t\t}\n\n\t\t// Extract just the first 12 characters (the actual prefix)\n\t\tif len(remainder) < apiKeyPrefixLength {\n\t\t\treturn \"\", fmt.Errorf(\"%w: prefix too short\", ErrAPIKeyFailedToParse)\n\t\t}\n\n\t\tprefix := remainder[:apiKeyPrefixLength]\n\n\t\t// Validate it's base64 URL-safe\n\t\tif !isValidBase64URLSafe(prefix) {\n\t\t\treturn \"\", fmt.Errorf(\"%w: prefix contains invalid characters\", ErrAPIKeyFailedToParse)\n\t\t}\n\n\t\treturn prefix, nil\n\t}\n\n\t// For legacy 7-character prefixes or other formats, return as-is\n\treturn displayPrefix, nil\n}\n\n// validateAPIKey validates an API key and returns the key if valid.\n// Handles both new (hskey-api-{prefix}-{secret}) and legacy (prefix.secret) formats.\nfunc validateAPIKey(db *gorm.DB, keyStr string) (*types.APIKey, error) {\n\t// Validate input is not empty\n\tif keyStr == \"\" {\n\t\treturn nil, ErrAPIKeyFailedToParse\n\t}\n\n\t// Check for new format: hskey-api-{prefix}-{secret}\n\t_, prefixAndSecret, found := strings.Cut(keyStr, apiKeyPrefix)\n\n\tif !found {\n\t\t// Legacy format: prefix.secret\n\t\treturn validateLegacyAPIKey(db, keyStr)\n\t}\n\n\t// New format: parse and verify\n\tconst expectedMinLength = apiKeyPrefixLength + 1 + apiKeyHashLength\n\tif len(prefixAndSecret) < expectedMinLength {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"%w: key too short, expected at least %d chars after prefix, got %d\",\n\t\t\tErrAPIKeyFailedToParse,\n\t\t\texpectedMinLength,\n\t\t\tlen(prefixAndSecret),\n\t\t)\n\t}\n\n\t// Use fixed-length parsing\n\tprefix := prefixAndSecret[:apiKeyPrefixLength]\n\n\t// Validate separator at expected position\n\tif prefixAndSecret[apiKeyPrefixLength] != '-' {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"%w: expected separator '-' at position %d, got '%c'\",\n\t\t\tErrAPIKeyFailedToParse,\n\t\t\tapiKeyPrefixLength,\n\t\t\tprefixAndSecret[apiKeyPrefixLength],\n\t\t)\n\t}\n\n\tsecret := prefixAndSecret[apiKeyPrefixLength+1:]\n\n\t// Validate secret length\n\tif len(secret) != apiKeyHashLength {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"%w: secret length mismatch, expected %d chars, got %d\",\n\t\t\tErrAPIKeyFailedToParse,\n\t\t\tapiKeyHashLength,\n\t\t\tlen(secret),\n\t\t)\n\t}\n\n\t// Validate prefix contains only base64 URL-safe characters\n\tif !isValidBase64URLSafe(prefix) {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"%w: prefix contains invalid characters (expected base64 URL-safe: A-Za-z0-9_-)\",\n\t\t\tErrAPIKeyFailedToParse,\n\t\t)\n\t}\n\n\t// Validate secret contains only base64 URL-safe characters\n\tif !isValidBase64URLSafe(secret) {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"%w: secret contains invalid characters (expected base64 URL-safe: A-Za-z0-9_-)\",\n\t\t\tErrAPIKeyFailedToParse,\n\t\t)\n\t}\n\n\t// Look up by prefix (indexed)\n\tvar key types.APIKey\n\n\terr := db.First(&key, \"prefix = ?\", prefix).Error\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"API key not found: %w\", err)\n\t}\n\n\t// Verify bcrypt hash\n\terr = bcrypt.CompareHashAndPassword(key.Hash, []byte(secret))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid API key: %w\", err)\n\t}\n\n\treturn &key, nil\n}\n\n// validateLegacyAPIKey validates a legacy format API key (prefix.secret).\nfunc validateLegacyAPIKey(db *gorm.DB, keyStr string) (*types.APIKey, error) {\n\t// Legacy format uses \".\" as separator\n\tprefix, secret, found := strings.Cut(keyStr, \".\")\n\tif !found {\n\t\treturn nil, ErrAPIKeyFailedToParse\n\t}\n\n\t// Legacy prefix is 7 chars\n\tif len(prefix) != legacyAPIPrefixLength {\n\t\treturn nil, fmt.Errorf(\"%w: legacy prefix length mismatch\", ErrAPIKeyFailedToParse)\n\t}\n\n\tvar key types.APIKey\n\n\terr := db.First(&key, \"prefix = ?\", prefix).Error\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"API key not found: %w\", err)\n\t}\n\n\t// Verify bcrypt (key.Hash stores bcrypt of full secret)\n\terr = bcrypt.CompareHashAndPassword(key.Hash, []byte(secret))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid API key: %w\", err)\n\t}\n\n\treturn &key, nil\n}\n"
  },
  {
    "path": "hscontrol/db/api_key_test.go",
    "content": "package db\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"golang.org/x/crypto/bcrypt\"\n)\n\nfunc TestCreateAPIKey(t *testing.T) {\n\tdb, err := newSQLiteTestDB()\n\trequire.NoError(t, err)\n\n\tapiKeyStr, apiKey, err := db.CreateAPIKey(nil)\n\trequire.NoError(t, err)\n\trequire.NotNil(t, apiKey)\n\n\t// Did we get a valid key?\n\tassert.NotNil(t, apiKey.Prefix)\n\tassert.NotNil(t, apiKey.Hash)\n\tassert.NotEmpty(t, apiKeyStr)\n\n\t_, err = db.ListAPIKeys()\n\trequire.NoError(t, err)\n\n\tkeys, err := db.ListAPIKeys()\n\trequire.NoError(t, err)\n\tassert.Len(t, keys, 1)\n}\n\nfunc TestAPIKeyDoesNotExist(t *testing.T) {\n\tdb, err := newSQLiteTestDB()\n\trequire.NoError(t, err)\n\n\tkey, err := db.GetAPIKey(\"does-not-exist\")\n\trequire.Error(t, err)\n\tassert.Nil(t, key)\n}\n\nfunc TestValidateAPIKeyOk(t *testing.T) {\n\tdb, err := newSQLiteTestDB()\n\trequire.NoError(t, err)\n\n\tnowPlus2 := time.Now().Add(2 * time.Hour)\n\tapiKeyStr, apiKey, err := db.CreateAPIKey(&nowPlus2)\n\trequire.NoError(t, err)\n\trequire.NotNil(t, apiKey)\n\n\tvalid, err := db.ValidateAPIKey(apiKeyStr)\n\trequire.NoError(t, err)\n\tassert.True(t, valid)\n}\n\nfunc TestValidateAPIKeyNotOk(t *testing.T) {\n\tdb, err := newSQLiteTestDB()\n\trequire.NoError(t, err)\n\n\tnowMinus2 := time.Now().Add(time.Duration(-2) * time.Hour)\n\tapiKeyStr, apiKey, err := db.CreateAPIKey(&nowMinus2)\n\trequire.NoError(t, err)\n\trequire.NotNil(t, apiKey)\n\n\tvalid, err := db.ValidateAPIKey(apiKeyStr)\n\trequire.NoError(t, err)\n\tassert.False(t, valid)\n\n\tnow := time.Now()\n\tapiKeyStrNow, apiKey, err := db.CreateAPIKey(&now)\n\trequire.NoError(t, err)\n\trequire.NotNil(t, apiKey)\n\n\tvalidNow, err := db.ValidateAPIKey(apiKeyStrNow)\n\trequire.NoError(t, err)\n\tassert.False(t, validNow)\n\n\tvalidSilly, err := db.ValidateAPIKey(\"nota.validkey\")\n\trequire.Error(t, err)\n\tassert.False(t, validSilly)\n\n\tvalidWithErr, err := db.ValidateAPIKey(\"produceerrorkey\")\n\trequire.Error(t, err)\n\tassert.False(t, validWithErr)\n}\n\nfunc TestExpireAPIKey(t *testing.T) {\n\tdb, err := newSQLiteTestDB()\n\trequire.NoError(t, err)\n\n\tnowPlus2 := time.Now().Add(2 * time.Hour)\n\tapiKeyStr, apiKey, err := db.CreateAPIKey(&nowPlus2)\n\trequire.NoError(t, err)\n\trequire.NotNil(t, apiKey)\n\n\tvalid, err := db.ValidateAPIKey(apiKeyStr)\n\trequire.NoError(t, err)\n\tassert.True(t, valid)\n\n\terr = db.ExpireAPIKey(apiKey)\n\trequire.NoError(t, err)\n\tassert.NotNil(t, apiKey.Expiration)\n\n\tnotValid, err := db.ValidateAPIKey(apiKeyStr)\n\trequire.NoError(t, err)\n\tassert.False(t, notValid)\n}\n\nfunc TestAPIKeyWithPrefix(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\ttest func(*testing.T, *HSDatabase)\n\t}{\n\t\t{\n\t\t\tname: \"new_key_with_prefix\",\n\t\t\ttest: func(t *testing.T, db *HSDatabase) {\n\t\t\t\tt.Helper()\n\n\t\t\t\tkeyStr, apiKey, err := db.CreateAPIKey(nil)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t// Verify format: hskey-api-{12-char-prefix}-{64-char-secret}\n\t\t\t\tassert.True(t, strings.HasPrefix(keyStr, \"hskey-api-\"))\n\n\t\t\t\t_, prefixAndSecret, found := strings.Cut(keyStr, \"hskey-api-\")\n\t\t\t\tassert.True(t, found)\n\t\t\t\tassert.GreaterOrEqual(t, len(prefixAndSecret), 12+1+64)\n\n\t\t\t\tprefix := prefixAndSecret[:12]\n\t\t\t\tassert.Len(t, prefix, 12)\n\t\t\t\tassert.Equal(t, byte('-'), prefixAndSecret[12])\n\t\t\t\tsecret := prefixAndSecret[13:]\n\t\t\t\tassert.Len(t, secret, 64)\n\n\t\t\t\t// Verify stored fields\n\t\t\t\tassert.Len(t, apiKey.Prefix, types.NewAPIKeyPrefixLength)\n\t\t\t\tassert.NotNil(t, apiKey.Hash)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"new_key_can_be_retrieved\",\n\t\t\ttest: func(t *testing.T, db *HSDatabase) {\n\t\t\t\tt.Helper()\n\n\t\t\t\tkeyStr, createdKey, err := db.CreateAPIKey(nil)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t// Validate the created key\n\t\t\t\tvalid, err := db.ValidateAPIKey(keyStr)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.True(t, valid)\n\n\t\t\t\t// Verify prefix is correct length\n\t\t\t\tassert.Len(t, createdKey.Prefix, types.NewAPIKeyPrefixLength)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"invalid_key_format_rejected\",\n\t\t\ttest: func(t *testing.T, db *HSDatabase) {\n\t\t\t\tt.Helper()\n\n\t\t\t\tinvalidKeys := []string{\n\t\t\t\t\t\"\",\n\t\t\t\t\t\"hskey-api-short\",\n\t\t\t\t\t\"hskey-api-ABCDEFGHIJKL-tooshort\",\n\t\t\t\t\t\"hskey-api-ABC$EFGHIJKL-\" + strings.Repeat(\"a\", 64),\n\t\t\t\t\t\"hskey-api-ABCDEFGHIJKL\" + strings.Repeat(\"a\", 64), // missing separator\n\t\t\t\t}\n\n\t\t\t\tfor _, invalidKey := range invalidKeys {\n\t\t\t\t\tvalid, err := db.ValidateAPIKey(invalidKey)\n\t\t\t\t\trequire.Error(t, err, \"key should be rejected: %s\", invalidKey)\n\t\t\t\t\tassert.False(t, valid)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"legacy_key_still_works\",\n\t\t\ttest: func(t *testing.T, db *HSDatabase) {\n\t\t\t\tt.Helper()\n\n\t\t\t\t// Insert legacy API key directly (7-char prefix + 32-char secret)\n\t\t\t\tlegacyPrefix := \"abcdefg\"\n\t\t\t\tlegacySecret := strings.Repeat(\"x\", 32)\n\t\t\t\tlegacyKey := legacyPrefix + \".\" + legacySecret\n\t\t\t\thash, err := bcrypt.GenerateFromPassword([]byte(legacySecret), bcrypt.DefaultCost)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tnow := time.Now()\n\t\t\t\terr = db.DB.Exec(`\n\t\t\t\t\tINSERT INTO api_keys (prefix, hash, created_at)\n\t\t\t\t\tVALUES (?, ?, ?)\n\t\t\t\t`, legacyPrefix, hash, now).Error\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t// Validate legacy key\n\t\t\t\tvalid, err := db.ValidateAPIKey(legacyKey)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.True(t, valid)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"wrong_secret_rejected\",\n\t\t\ttest: func(t *testing.T, db *HSDatabase) {\n\t\t\t\tt.Helper()\n\n\t\t\t\tkeyStr, _, err := db.CreateAPIKey(nil)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t// Tamper with the secret\n\t\t\t\t_, prefixAndSecret, _ := strings.Cut(keyStr, \"hskey-api-\")\n\t\t\t\tprefix := prefixAndSecret[:12]\n\t\t\t\ttamperedKey := \"hskey-api-\" + prefix + \"-\" + strings.Repeat(\"x\", 64)\n\n\t\t\t\tvalid, err := db.ValidateAPIKey(tamperedKey)\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tassert.False(t, valid)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"expired_key_rejected\",\n\t\t\ttest: func(t *testing.T, db *HSDatabase) {\n\t\t\t\tt.Helper()\n\n\t\t\t\t// Create expired key\n\t\t\t\texpired := time.Now().Add(-1 * time.Hour)\n\t\t\t\tkeyStr, _, err := db.CreateAPIKey(&expired)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t// Should fail validation\n\t\t\t\tvalid, err := db.ValidateAPIKey(keyStr)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.False(t, valid)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tdb, err := newSQLiteTestDB()\n\t\t\trequire.NoError(t, err)\n\n\t\t\ttt.test(t, db)\n\t\t})\n\t}\n}\n\nfunc TestGetAPIKeyByID(t *testing.T) {\n\tdb, err := newSQLiteTestDB()\n\trequire.NoError(t, err)\n\n\t// Create an API key\n\t_, apiKey, err := db.CreateAPIKey(nil)\n\trequire.NoError(t, err)\n\trequire.NotNil(t, apiKey)\n\n\t// Retrieve by ID\n\tretrievedKey, err := db.GetAPIKeyByID(apiKey.ID)\n\trequire.NoError(t, err)\n\trequire.NotNil(t, retrievedKey)\n\tassert.Equal(t, apiKey.ID, retrievedKey.ID)\n\tassert.Equal(t, apiKey.Prefix, retrievedKey.Prefix)\n}\n\nfunc TestGetAPIKeyByIDNotFound(t *testing.T) {\n\tdb, err := newSQLiteTestDB()\n\trequire.NoError(t, err)\n\n\t// Try to get a non-existent key by ID\n\tkey, err := db.GetAPIKeyByID(99999)\n\trequire.Error(t, err)\n\tassert.Nil(t, key)\n}\n"
  },
  {
    "path": "hscontrol/db/db.go",
    "content": "package db\n\nimport (\n\t\"context\"\n\t_ \"embed\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/netip\"\n\t\"path/filepath\"\n\t\"slices\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com/glebarez/sqlite\"\n\t\"github.com/go-gormigrate/gormigrate/v2\"\n\t\"github.com/juanfont/headscale/hscontrol/db/sqliteconfig\"\n\t\"github.com/juanfont/headscale/hscontrol/policy\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/rs/zerolog/log\"\n\t\"github.com/tailscale/squibble\"\n\t\"gorm.io/driver/postgres\"\n\t\"gorm.io/gorm\"\n\t\"gorm.io/gorm/logger\"\n\t\"gorm.io/gorm/schema\"\n\t\"zgo.at/zcache/v2\"\n)\n\n//go:embed schema.sql\nvar dbSchema string\n\nfunc init() {\n\tschema.RegisterSerializer(\"text\", TextSerialiser{})\n}\n\nvar errDatabaseNotSupported = errors.New(\"database type not supported\")\n\nvar errForeignKeyConstraintsViolated = errors.New(\"foreign key constraints violated\")\n\nconst (\n\tmaxIdleConns       = 100\n\tmaxOpenConns       = 100\n\tcontextTimeoutSecs = 10\n)\n\ntype HSDatabase struct {\n\tDB       *gorm.DB\n\tcfg      *types.Config\n\tregCache *zcache.Cache[types.AuthID, types.AuthRequest]\n}\n\n// NewHeadscaleDatabase creates a new database connection and runs migrations.\n// It accepts the full configuration to allow migrations access to policy settings.\n//\n//nolint:gocyclo // complex database initialization with many migrations\nfunc NewHeadscaleDatabase(\n\tcfg *types.Config,\n\tregCache *zcache.Cache[types.AuthID, types.AuthRequest],\n) (*HSDatabase, error) {\n\tdbConn, err := openDB(cfg.Database)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = checkVersionUpgradePath(dbConn)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"version check: %w\", err)\n\t}\n\n\tmigrations := gormigrate.New(\n\t\tdbConn,\n\t\tgormigrate.DefaultOptions,\n\t\t[]*gormigrate.Migration{\n\t\t\t// New migrations must be added as transactions at the end of this list.\n\t\t\t// Migrations start from v0.25.0. If upgrading from v0.24.x or earlier,\n\t\t\t// you must first upgrade to v0.25.1 before upgrading to this version.\n\n\t\t\t// v0.25.0\n\t\t\t{\n\t\t\t\t// Add a constraint to routes ensuring they cannot exist without a node.\n\t\t\t\tID: \"202501221827\",\n\t\t\t\tMigrate: func(tx *gorm.DB) error {\n\t\t\t\t\t// Remove any invalid routes associated with a node that does not exist.\n\t\t\t\t\tif tx.Migrator().HasTable(&types.Route{}) && tx.Migrator().HasTable(&types.Node{}) { //nolint:staticcheck // SA1019: Route kept for migrations\n\t\t\t\t\t\terr := tx.Exec(\"delete from routes where node_id not in (select id from nodes)\").Error\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// Remove any invalid routes without a node_id.\n\t\t\t\t\tif tx.Migrator().HasTable(&types.Route{}) { //nolint:staticcheck // SA1019: Route kept for migrations\n\t\t\t\t\t\terr := tx.Exec(\"delete from routes where node_id is null\").Error\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\terr := tx.AutoMigrate(&types.Route{}) //nolint:staticcheck // SA1019: Route kept for migrations\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"automigrating types.Route: %w\", err)\n\t\t\t\t\t}\n\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t\tRollback: func(db *gorm.DB) error { return nil },\n\t\t\t},\n\t\t\t// Add back constraint so you cannot delete preauth keys that\n\t\t\t// is still used by a node.\n\t\t\t{\n\t\t\t\tID: \"202501311657\",\n\t\t\t\tMigrate: func(tx *gorm.DB) error {\n\t\t\t\t\terr := tx.AutoMigrate(&types.PreAuthKey{})\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"automigrating types.PreAuthKey: %w\", err)\n\t\t\t\t\t}\n\n\t\t\t\t\terr = tx.AutoMigrate(&types.Node{})\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"automigrating types.Node: %w\", err)\n\t\t\t\t\t}\n\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t\tRollback: func(db *gorm.DB) error { return nil },\n\t\t\t},\n\t\t\t// Ensure there are no nodes referring to a deleted preauthkey.\n\t\t\t{\n\t\t\t\tID: \"202502070949\",\n\t\t\t\tMigrate: func(tx *gorm.DB) error {\n\t\t\t\t\tif tx.Migrator().HasTable(&types.PreAuthKey{}) {\n\t\t\t\t\t\terr := tx.Exec(`\nUPDATE nodes\nSET auth_key_id = NULL\nWHERE auth_key_id IS NOT NULL\nAND auth_key_id NOT IN (\n    SELECT id FROM pre_auth_keys\n);\n\t\t\t\t\t\t\t`).Error\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"setting auth_key to null on nodes with non-existing keys: %w\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t\tRollback: func(db *gorm.DB) error { return nil },\n\t\t\t},\n\t\t\t// v0.26.0\n\t\t\t// Migrate all routes from the Route table to the new field ApprovedRoutes\n\t\t\t// in the Node table. Then drop the Route table.\n\t\t\t{\n\t\t\t\tID: \"202502131714\",\n\t\t\t\tMigrate: func(tx *gorm.DB) error {\n\t\t\t\t\tif !tx.Migrator().HasColumn(&types.Node{}, \"approved_routes\") {\n\t\t\t\t\t\terr := tx.Migrator().AddColumn(&types.Node{}, \"approved_routes\")\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"adding column types.Node: %w\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tnodeRoutes := map[uint64][]netip.Prefix{}\n\n\t\t\t\t\tvar routes []types.Route //nolint:staticcheck // SA1019: Route kept for migrations\n\n\t\t\t\t\terr = tx.Find(&routes).Error\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"fetching routes: %w\", err)\n\t\t\t\t\t}\n\n\t\t\t\t\tfor _, route := range routes {\n\t\t\t\t\t\tif route.Enabled {\n\t\t\t\t\t\t\tnodeRoutes[route.NodeID] = append(nodeRoutes[route.NodeID], route.Prefix)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tfor nodeID, routes := range nodeRoutes {\n\t\t\t\t\t\tslices.SortFunc(routes, netip.Prefix.Compare)\n\t\t\t\t\t\troutes = slices.Compact(routes)\n\n\t\t\t\t\t\tdata, _ := json.Marshal(routes)\n\n\t\t\t\t\t\terr = tx.Model(&types.Node{}).Where(\"id = ?\", nodeID).Update(\"approved_routes\", data).Error\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"saving approved routes to new column: %w\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// Drop the old table.\n\t\t\t\t\t_ = tx.Migrator().DropTable(&types.Route{}) //nolint:staticcheck // SA1019: Route kept for migrations\n\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t\tRollback: func(db *gorm.DB) error { return nil },\n\t\t\t},\n\t\t\t{\n\t\t\t\tID: \"202502171819\",\n\t\t\t\tMigrate: func(tx *gorm.DB) error {\n\t\t\t\t\t// This migration originally removed the last_seen column\n\t\t\t\t\t// from the node table, but it was added back in\n\t\t\t\t\t// 202505091439.\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t\tRollback: func(db *gorm.DB) error { return nil },\n\t\t\t},\n\t\t\t// Add back last_seen column to node table.\n\t\t\t{\n\t\t\t\tID: \"202505091439\",\n\t\t\t\tMigrate: func(tx *gorm.DB) error {\n\t\t\t\t\t// Add back last_seen column to node table if it does not exist.\n\t\t\t\t\t// This is a workaround for the fact that the last_seen column\n\t\t\t\t\t// was removed in the 202502171819 migration, but only for some\n\t\t\t\t\t// beta testers.\n\t\t\t\t\tif !tx.Migrator().HasColumn(&types.Node{}, \"last_seen\") {\n\t\t\t\t\t\t_ = tx.Migrator().AddColumn(&types.Node{}, \"last_seen\")\n\t\t\t\t\t}\n\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t\tRollback: func(db *gorm.DB) error { return nil },\n\t\t\t},\n\t\t\t// Fix the provider identifier for users that have a double slash in the\n\t\t\t// provider identifier.\n\t\t\t{\n\t\t\t\tID: \"202505141324\",\n\t\t\t\tMigrate: func(tx *gorm.DB) error {\n\t\t\t\t\tusers, err := ListUsers(tx)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"listing users: %w\", err)\n\t\t\t\t\t}\n\n\t\t\t\t\tfor _, user := range users {\n\t\t\t\t\t\tuser.ProviderIdentifier.String = types.CleanIdentifier(user.ProviderIdentifier.String)\n\n\t\t\t\t\t\terr := tx.Save(user).Error\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"saving user: %w\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t\tRollback: func(db *gorm.DB) error { return nil },\n\t\t\t},\n\t\t\t// v0.27.0\n\t\t\t// Schema migration to ensure all tables match the expected schema.\n\t\t\t// This migration recreates all tables to match the exact structure in schema.sql,\n\t\t\t// preserving all data during the process.\n\t\t\t// Only SQLite will be migrated for consistency.\n\t\t\t{\n\t\t\t\tID: \"202507021200\",\n\t\t\t\tMigrate: func(tx *gorm.DB) error {\n\t\t\t\t\t// Only run on SQLite\n\t\t\t\t\tif cfg.Database.Type != types.DatabaseSqlite {\n\t\t\t\t\t\tlog.Info().Msg(\"skipping schema migration on non-SQLite database\")\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\n\t\t\t\t\tlog.Info().Msg(\"starting schema recreation with table renaming\")\n\n\t\t\t\t\t// Rename existing tables to _old versions\n\t\t\t\t\ttablesToRename := []string{\"users\", \"pre_auth_keys\", \"api_keys\", \"nodes\", \"policies\"}\n\n\t\t\t\t\t// Check if routes table exists and drop it (should have been migrated already)\n\t\t\t\t\tvar routesExists bool\n\n\t\t\t\t\terr := tx.Raw(\"SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name='routes'\").Row().Scan(&routesExists)\n\t\t\t\t\tif err == nil && routesExists {\n\t\t\t\t\t\tlog.Info().Msg(\"dropping leftover routes table\")\n\n\t\t\t\t\t\terr := tx.Exec(\"DROP TABLE routes\").Error\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"dropping routes table: %w\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// Drop all indexes first to avoid conflicts\n\t\t\t\t\tindexesToDrop := []string{\n\t\t\t\t\t\t\"idx_users_deleted_at\",\n\t\t\t\t\t\t\"idx_provider_identifier\",\n\t\t\t\t\t\t\"idx_name_provider_identifier\",\n\t\t\t\t\t\t\"idx_name_no_provider_identifier\",\n\t\t\t\t\t\t\"idx_api_keys_prefix\",\n\t\t\t\t\t\t\"idx_policies_deleted_at\",\n\t\t\t\t\t}\n\n\t\t\t\t\tfor _, index := range indexesToDrop {\n\t\t\t\t\t\t_ = tx.Exec(\"DROP INDEX IF EXISTS \" + index).Error\n\t\t\t\t\t}\n\n\t\t\t\t\tfor _, table := range tablesToRename {\n\t\t\t\t\t\t// Check if table exists before renaming\n\t\t\t\t\t\tvar exists bool\n\n\t\t\t\t\t\terr := tx.Raw(\"SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name=?\", table).Row().Scan(&exists)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"checking if table %s exists: %w\", table, err)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif exists {\n\t\t\t\t\t\t\t// Drop old table if it exists from previous failed migration\n\t\t\t\t\t\t\t_ = tx.Exec(\"DROP TABLE IF EXISTS \" + table + \"_old\").Error\n\n\t\t\t\t\t\t\t// Rename current table to _old\n\t\t\t\t\t\t\terr := tx.Exec(\"ALTER TABLE \" + table + \" RENAME TO \" + table + \"_old\").Error\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\treturn fmt.Errorf(\"renaming table %s to %s_old: %w\", table, table, err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// Create new tables with correct schema\n\t\t\t\t\ttableCreationSQL := []string{\n\t\t\t\t\t\t`CREATE TABLE users(\n  id integer PRIMARY KEY AUTOINCREMENT,\n  name text,\n  display_name text,\n  email text,\n  provider_identifier text,\n  provider text,\n  profile_pic_url text,\n  created_at datetime,\n  updated_at datetime,\n  deleted_at datetime\n)`,\n\t\t\t\t\t\t`CREATE TABLE pre_auth_keys(\n  id integer PRIMARY KEY AUTOINCREMENT,\n  key text,\n  user_id integer,\n  reusable numeric,\n  ephemeral numeric DEFAULT false,\n  used numeric DEFAULT false,\n  tags text,\n  expiration datetime,\n  created_at datetime,\n  CONSTRAINT fk_pre_auth_keys_user FOREIGN KEY(user_id) REFERENCES users(id) ON DELETE SET NULL\n)`,\n\t\t\t\t\t\t`CREATE TABLE api_keys(\n  id integer PRIMARY KEY AUTOINCREMENT,\n  prefix text,\n  hash blob,\n  expiration datetime,\n  last_seen datetime,\n  created_at datetime\n)`,\n\t\t\t\t\t\t`CREATE TABLE nodes(\n  id integer PRIMARY KEY AUTOINCREMENT,\n  machine_key text,\n  node_key text,\n  disco_key text,\n  endpoints text,\n  host_info text,\n  ipv4 text,\n  ipv6 text,\n  hostname text,\n  given_name varchar(63),\n  user_id integer,\n  register_method text,\n  forced_tags text,\n  auth_key_id integer,\n  last_seen datetime,\n  expiry datetime,\n  approved_routes text,\n  created_at datetime,\n  updated_at datetime,\n  deleted_at datetime,\n  CONSTRAINT fk_nodes_user FOREIGN KEY(user_id) REFERENCES users(id) ON DELETE CASCADE,\n  CONSTRAINT fk_nodes_auth_key FOREIGN KEY(auth_key_id) REFERENCES pre_auth_keys(id)\n)`,\n\t\t\t\t\t\t`CREATE TABLE policies(\n  id integer PRIMARY KEY AUTOINCREMENT,\n  data text,\n  created_at datetime,\n  updated_at datetime,\n  deleted_at datetime\n)`,\n\t\t\t\t\t}\n\n\t\t\t\t\tfor _, createSQL := range tableCreationSQL {\n\t\t\t\t\t\terr := tx.Exec(createSQL).Error\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"creating new table: %w\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// Copy data directly using SQL\n\t\t\t\t\tdataCopySQL := []string{\n\t\t\t\t\t\t`INSERT INTO users (id, name, display_name, email, provider_identifier, provider, profile_pic_url, created_at, updated_at, deleted_at)\n             SELECT id, name, display_name, email, provider_identifier, provider, profile_pic_url, created_at, updated_at, deleted_at\n             FROM users_old`,\n\n\t\t\t\t\t\t`INSERT INTO pre_auth_keys (id, key, user_id, reusable, ephemeral, used, tags, expiration, created_at)\n             SELECT id, key, user_id, reusable, ephemeral, used, tags, expiration, created_at\n             FROM pre_auth_keys_old`,\n\n\t\t\t\t\t\t`INSERT INTO api_keys (id, prefix, hash, expiration, last_seen, created_at)\n             SELECT id, prefix, hash, expiration, last_seen, created_at\n             FROM api_keys_old`,\n\n\t\t\t\t\t\t`INSERT INTO nodes (id, machine_key, node_key, disco_key, endpoints, host_info, ipv4, ipv6, hostname, given_name, user_id, register_method, forced_tags, auth_key_id, last_seen, expiry, approved_routes, created_at, updated_at, deleted_at)\n             SELECT id, machine_key, node_key, disco_key, endpoints, host_info, ipv4, ipv6, hostname, given_name, user_id, register_method, forced_tags, auth_key_id, last_seen, expiry, approved_routes, created_at, updated_at, deleted_at\n             FROM nodes_old`,\n\n\t\t\t\t\t\t`INSERT INTO policies (id, data, created_at, updated_at, deleted_at)\n             SELECT id, data, created_at, updated_at, deleted_at\n             FROM policies_old`,\n\t\t\t\t\t}\n\n\t\t\t\t\tfor _, copySQL := range dataCopySQL {\n\t\t\t\t\t\terr := tx.Exec(copySQL).Error\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"copying data: %w\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// Create indexes\n\t\t\t\t\tindexes := []string{\n\t\t\t\t\t\t\"CREATE INDEX idx_users_deleted_at ON users(deleted_at)\",\n\t\t\t\t\t\t`CREATE UNIQUE INDEX idx_provider_identifier ON users(\n  provider_identifier\n) WHERE provider_identifier IS NOT NULL`,\n\t\t\t\t\t\t`CREATE UNIQUE INDEX idx_name_provider_identifier ON users(\n  name,\n  provider_identifier\n)`,\n\t\t\t\t\t\t`CREATE UNIQUE INDEX idx_name_no_provider_identifier ON users(\n  name\n) WHERE provider_identifier IS NULL`,\n\t\t\t\t\t\t\"CREATE UNIQUE INDEX idx_api_keys_prefix ON api_keys(prefix)\",\n\t\t\t\t\t\t\"CREATE INDEX idx_policies_deleted_at ON policies(deleted_at)\",\n\t\t\t\t\t}\n\n\t\t\t\t\tfor _, indexSQL := range indexes {\n\t\t\t\t\t\terr := tx.Exec(indexSQL).Error\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"creating index: %w\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// Drop old tables only after everything succeeds\n\t\t\t\t\tfor _, table := range tablesToRename {\n\t\t\t\t\t\terr := tx.Exec(\"DROP TABLE IF EXISTS \" + table + \"_old\").Error\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Warn().Str(\"table\", table+\"_old\").Err(err).Msg(\"failed to drop old table, but migration succeeded\")\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tlog.Info().Msg(\"schema recreation completed successfully\")\n\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t\tRollback: func(db *gorm.DB) error { return nil },\n\t\t\t},\n\t\t\t// v0.27.1\n\t\t\t{\n\t\t\t\t// Drop all tables that are no longer in use and has existed.\n\t\t\t\t// They potentially still present from broken migrations in the past.\n\t\t\t\tID: \"202510311551\",\n\t\t\t\tMigrate: func(tx *gorm.DB) error {\n\t\t\t\t\tfor _, oldTable := range []string{\"namespaces\", \"machines\", \"shared_machines\", \"kvs\", \"pre_auth_key_acl_tags\", \"routes\"} {\n\t\t\t\t\t\terr := tx.Migrator().DropTable(oldTable)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Trace().Str(\"table\", oldTable).\n\t\t\t\t\t\t\t\tErr(err).\n\t\t\t\t\t\t\t\tMsg(\"Error dropping old table, continuing...\")\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t\tRollback: func(tx *gorm.DB) error {\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\t// Drop all indices that are no longer in use and has existed.\n\t\t\t\t// They potentially still present from broken migrations in the past.\n\t\t\t\t// They should all be cleaned up by the db engine, but we are a bit\n\t\t\t\t// conservative to ensure all our previous mess is cleaned up.\n\t\t\t\tID: \"202511101554-drop-old-idx\",\n\t\t\t\tMigrate: func(tx *gorm.DB) error {\n\t\t\t\t\tfor _, oldIdx := range []struct{ name, table string }{\n\t\t\t\t\t\t{\"idx_namespaces_deleted_at\", \"namespaces\"},\n\t\t\t\t\t\t{\"idx_routes_deleted_at\", \"routes\"},\n\t\t\t\t\t\t{\"idx_shared_machines_deleted_at\", \"shared_machines\"},\n\t\t\t\t\t} {\n\t\t\t\t\t\terr := tx.Migrator().DropIndex(oldIdx.table, oldIdx.name)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Trace().\n\t\t\t\t\t\t\t\tStr(\"index\", oldIdx.name).\n\t\t\t\t\t\t\t\tStr(\"table\", oldIdx.table).\n\t\t\t\t\t\t\t\tErr(err).\n\t\t\t\t\t\t\t\tMsg(\"Error dropping old index, continuing...\")\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t\tRollback: func(tx *gorm.DB) error {\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t// Migrations **above** this points will be REMOVED in version **0.29.0**\n\t\t\t// This is to clean up a lot of old migrations that is seldom used\n\t\t\t// and carries a lot of technical debt.\n\t\t\t// Any new migrations should be added after the comment below and follow\n\t\t\t// the rules it sets out.\n\n\t\t\t// From this point, the following rules must be followed:\n\t\t\t// - NEVER use gorm.AutoMigrate, write the exact migration steps needed\n\t\t\t// - AutoMigrate depends on the struct staying exactly the same, which it won't over time.\n\t\t\t// - Never write migrations that requires foreign keys to be disabled.\n\t\t\t// - ALL errors in migrations must be handled properly.\n\n\t\t\t{\n\t\t\t\t// Add columns for prefix and hash for pre auth keys, implementing\n\t\t\t\t// them with the same security model as api keys.\n\t\t\t\tID: \"202511011637-preauthkey-bcrypt\",\n\t\t\t\tMigrate: func(tx *gorm.DB) error {\n\t\t\t\t\t// Check and add prefix column if it doesn't exist\n\t\t\t\t\tif !tx.Migrator().HasColumn(&types.PreAuthKey{}, \"prefix\") {\n\t\t\t\t\t\terr := tx.Migrator().AddColumn(&types.PreAuthKey{}, \"prefix\")\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"adding prefix column: %w\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// Check and add hash column if it doesn't exist\n\t\t\t\t\tif !tx.Migrator().HasColumn(&types.PreAuthKey{}, \"hash\") {\n\t\t\t\t\t\terr := tx.Migrator().AddColumn(&types.PreAuthKey{}, \"hash\")\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"adding hash column: %w\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// Create partial unique index to allow multiple legacy keys (NULL/empty prefix)\n\t\t\t\t\t// while enforcing uniqueness for new bcrypt-based keys\n\t\t\t\t\terr := tx.Exec(\"CREATE UNIQUE INDEX IF NOT EXISTS idx_pre_auth_keys_prefix ON pre_auth_keys(prefix) WHERE prefix IS NOT NULL AND prefix != ''\").Error\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"creating prefix index: %w\", err)\n\t\t\t\t\t}\n\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t\tRollback: func(db *gorm.DB) error { return nil },\n\t\t\t},\n\t\t\t{\n\t\t\t\tID: \"202511122344-remove-newline-index\",\n\t\t\t\tMigrate: func(tx *gorm.DB) error {\n\t\t\t\t\t// Reformat multi-line indexes to single-line for consistency\n\t\t\t\t\t// This migration drops and recreates the three user identity indexes\n\t\t\t\t\t// to match the single-line format expected by schema validation\n\n\t\t\t\t\t// Drop existing multi-line indexes\n\t\t\t\t\tdropIndexes := []string{\n\t\t\t\t\t\t`DROP INDEX IF EXISTS idx_provider_identifier`,\n\t\t\t\t\t\t`DROP INDEX IF EXISTS idx_name_provider_identifier`,\n\t\t\t\t\t\t`DROP INDEX IF EXISTS idx_name_no_provider_identifier`,\n\t\t\t\t\t}\n\n\t\t\t\t\tfor _, dropSQL := range dropIndexes {\n\t\t\t\t\t\terr := tx.Exec(dropSQL).Error\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"dropping index: %w\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// Recreate indexes in single-line format\n\t\t\t\t\tcreateIndexes := []string{\n\t\t\t\t\t\t`CREATE UNIQUE INDEX idx_provider_identifier ON users(provider_identifier) WHERE provider_identifier IS NOT NULL`,\n\t\t\t\t\t\t`CREATE UNIQUE INDEX idx_name_provider_identifier ON users(name, provider_identifier)`,\n\t\t\t\t\t\t`CREATE UNIQUE INDEX idx_name_no_provider_identifier ON users(name) WHERE provider_identifier IS NULL`,\n\t\t\t\t\t}\n\n\t\t\t\t\tfor _, createSQL := range createIndexes {\n\t\t\t\t\t\terr := tx.Exec(createSQL).Error\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"creating index: %w\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t\tRollback: func(db *gorm.DB) error { return nil },\n\t\t\t},\n\t\t\t{\n\t\t\t\t// Rename forced_tags column to tags in nodes table.\n\t\t\t\t// This must run after migration 202505141324 which creates tables with forced_tags.\n\t\t\t\tID: \"202511131445-node-forced-tags-to-tags\",\n\t\t\t\tMigrate: func(tx *gorm.DB) error {\n\t\t\t\t\t// Rename the column from forced_tags to tags\n\t\t\t\t\terr := tx.Migrator().RenameColumn(&types.Node{}, \"forced_tags\", \"tags\")\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"renaming forced_tags to tags: %w\", err)\n\t\t\t\t\t}\n\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t\tRollback: func(db *gorm.DB) error { return nil },\n\t\t\t},\n\t\t\t{\n\t\t\t\t// Migrate RequestTags from host_info JSON to tags column.\n\t\t\t\t// In 0.27.x, tags from --advertise-tags (ValidTags) were stored only in\n\t\t\t\t// host_info.RequestTags, not in the tags column (formerly forced_tags).\n\t\t\t\t// This migration validates RequestTags against the policy's tagOwners\n\t\t\t\t// and merges validated tags into the tags column.\n\t\t\t\t// Fixes: https://github.com/juanfont/headscale/issues/3006\n\t\t\t\tID: \"202601121700-migrate-hostinfo-request-tags\",\n\t\t\t\tMigrate: func(tx *gorm.DB) error {\n\t\t\t\t\t// 1. Load policy from file or database based on configuration\n\t\t\t\t\tpolicyData, err := PolicyBytes(tx, cfg)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Warn().Err(err).Msg(\"failed to load policy, skipping RequestTags migration (tags will be validated on node reconnect)\")\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\n\t\t\t\t\tif len(policyData) == 0 {\n\t\t\t\t\t\tlog.Info().Msg(\"no policy found, skipping RequestTags migration (tags will be validated on node reconnect)\")\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\n\t\t\t\t\t// 2. Load users and nodes to create PolicyManager\n\t\t\t\t\tusers, err := ListUsers(tx)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"loading users for RequestTags migration: %w\", err)\n\t\t\t\t\t}\n\n\t\t\t\t\tnodes, err := ListNodes(tx)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"loading nodes for RequestTags migration: %w\", err)\n\t\t\t\t\t}\n\n\t\t\t\t\t// 3. Create PolicyManager (handles HuJSON parsing, groups, nested tags, etc.)\n\t\t\t\t\tpolMan, err := policy.NewPolicyManager(policyData, users, nodes.ViewSlice())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Warn().Err(err).Msg(\"failed to parse policy, skipping RequestTags migration (tags will be validated on node reconnect)\")\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\n\t\t\t\t\t// 4. Process each node\n\t\t\t\t\tfor _, node := range nodes {\n\t\t\t\t\t\tif node.Hostinfo == nil {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\trequestTags := node.Hostinfo.RequestTags\n\t\t\t\t\t\tif len(requestTags) == 0 {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\texistingTags := node.Tags\n\n\t\t\t\t\t\tvar validatedTags, rejectedTags []string\n\n\t\t\t\t\t\tnodeView := node.View()\n\n\t\t\t\t\t\tfor _, tag := range requestTags {\n\t\t\t\t\t\t\tif polMan.NodeCanHaveTag(nodeView, tag) {\n\t\t\t\t\t\t\t\tif !slices.Contains(existingTags, tag) {\n\t\t\t\t\t\t\t\t\tvalidatedTags = append(validatedTags, tag)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\trejectedTags = append(rejectedTags, tag)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif len(validatedTags) == 0 {\n\t\t\t\t\t\t\tif len(rejectedTags) > 0 {\n\t\t\t\t\t\t\t\tlog.Debug().\n\t\t\t\t\t\t\t\t\tEmbedObject(node).\n\t\t\t\t\t\t\t\t\tStrs(\"rejected_tags\", rejectedTags).\n\t\t\t\t\t\t\t\t\tMsg(\"RequestTags rejected during migration (not authorized)\")\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tmergedTags := append(existingTags, validatedTags...)\n\t\t\t\t\t\tslices.Sort(mergedTags)\n\t\t\t\t\t\tmergedTags = slices.Compact(mergedTags)\n\n\t\t\t\t\t\ttagsJSON, err := json.Marshal(mergedTags)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"serializing merged tags for node %d: %w\", node.ID, err)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\terr = tx.Exec(\"UPDATE nodes SET tags = ? WHERE id = ?\", string(tagsJSON), node.ID).Error\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"updating tags for node %d: %w\", node.ID, err)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tlog.Info().\n\t\t\t\t\t\t\tEmbedObject(node).\n\t\t\t\t\t\t\tStrs(\"validated_tags\", validatedTags).\n\t\t\t\t\t\t\tStrs(\"rejected_tags\", rejectedTags).\n\t\t\t\t\t\t\tStrs(\"existing_tags\", existingTags).\n\t\t\t\t\t\t\tStrs(\"merged_tags\", mergedTags).\n\t\t\t\t\t\t\tMsg(\"Migrated validated RequestTags from host_info to tags column\")\n\t\t\t\t\t}\n\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t\tRollback: func(db *gorm.DB) error { return nil },\n\t\t\t},\n\t\t\t{\n\t\t\t\t// Clear user_id on tagged nodes.\n\t\t\t\t// Tagged nodes are owned by their tags, not a user.\n\t\t\t\t// Previously user_id was kept as \"created by\" tracking,\n\t\t\t\t// but this prevents deleting users whose nodes have been\n\t\t\t\t// tagged, and the ON DELETE CASCADE FK would destroy the\n\t\t\t\t// tagged nodes if the user were deleted.\n\t\t\t\t// Fixes: https://github.com/juanfont/headscale/issues/3077\n\t\t\t\tID: \"202602201200-clear-tagged-node-user-id\",\n\t\t\t\tMigrate: func(tx *gorm.DB) error {\n\t\t\t\t\terr := tx.Exec(`\nUPDATE nodes\nSET user_id = NULL\nWHERE tags IS NOT NULL AND tags != '[]' AND tags != '';\n\t\t\t\t\t\t`).Error\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"clearing user_id on tagged nodes: %w\", err)\n\t\t\t\t\t}\n\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t\tRollback: func(db *gorm.DB) error { return nil },\n\t\t\t},\n\t\t},\n\t)\n\n\tmigrations.InitSchema(func(tx *gorm.DB) error {\n\t\t// Create all tables using AutoMigrate\n\t\terr := tx.AutoMigrate(\n\t\t\t&types.User{},\n\t\t\t&types.PreAuthKey{},\n\t\t\t&types.APIKey{},\n\t\t\t&types.Node{},\n\t\t\t&types.Policy{},\n\t\t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Drop all indexes (both GORM-created and potentially pre-existing ones)\n\t\t// to ensure we can recreate them in the correct format\n\t\tdropIndexes := []string{\n\t\t\t`DROP INDEX IF EXISTS \"idx_users_deleted_at\"`,\n\t\t\t`DROP INDEX IF EXISTS \"idx_api_keys_prefix\"`,\n\t\t\t`DROP INDEX IF EXISTS \"idx_policies_deleted_at\"`,\n\t\t\t`DROP INDEX IF EXISTS \"idx_provider_identifier\"`,\n\t\t\t`DROP INDEX IF EXISTS \"idx_name_provider_identifier\"`,\n\t\t\t`DROP INDEX IF EXISTS \"idx_name_no_provider_identifier\"`,\n\t\t\t`DROP INDEX IF EXISTS \"idx_pre_auth_keys_prefix\"`,\n\t\t}\n\n\t\tfor _, dropSQL := range dropIndexes {\n\t\t\terr := tx.Exec(dropSQL).Error\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t// Recreate indexes without backticks to match schema.sql format\n\t\tindexes := []string{\n\t\t\t`CREATE INDEX idx_users_deleted_at ON users(deleted_at)`,\n\t\t\t`CREATE UNIQUE INDEX idx_api_keys_prefix ON api_keys(prefix)`,\n\t\t\t`CREATE INDEX idx_policies_deleted_at ON policies(deleted_at)`,\n\t\t\t`CREATE UNIQUE INDEX idx_provider_identifier ON users(provider_identifier) WHERE provider_identifier IS NOT NULL`,\n\t\t\t`CREATE UNIQUE INDEX idx_name_provider_identifier ON users(name, provider_identifier)`,\n\t\t\t`CREATE UNIQUE INDEX idx_name_no_provider_identifier ON users(name) WHERE provider_identifier IS NULL`,\n\t\t\t`CREATE UNIQUE INDEX idx_pre_auth_keys_prefix ON pre_auth_keys(prefix) WHERE prefix IS NOT NULL AND prefix != ''`,\n\t\t}\n\n\t\tfor _, indexSQL := range indexes {\n\t\t\terr := tx.Exec(indexSQL).Error\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\n\terr = runMigrations(cfg.Database, dbConn, migrations)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"migration failed: %w\", err)\n\t}\n\n\t// Store the current version in the database after migrations succeed.\n\t// Dev builds skip this to preserve the stored version for the next\n\t// real versioned binary.\n\tcurrentVersion := types.GetVersionInfo().Version\n\tif !isDev(currentVersion) {\n\t\terr = setDatabaseVersion(dbConn, currentVersion)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\"storing database version: %w\",\n\t\t\t\terr,\n\t\t\t)\n\t\t}\n\t}\n\n\t// Validate that the schema ends up in the expected state.\n\t// This is currently only done on sqlite as squibble does not\n\t// support Postgres and we use our sqlite schema as our source of\n\t// truth.\n\tif cfg.Database.Type == types.DatabaseSqlite {\n\t\tsqlConn, err := dbConn.DB()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"getting DB from gorm: %w\", err)\n\t\t}\n\n\t\t// or else it blocks...\n\t\tsqlConn.SetMaxIdleConns(maxIdleConns)\n\n\t\tsqlConn.SetMaxOpenConns(maxOpenConns)\n\t\tdefer sqlConn.SetMaxIdleConns(1)\n\t\tdefer sqlConn.SetMaxOpenConns(1)\n\n\t\tctx, cancel := context.WithTimeout(context.Background(), contextTimeoutSecs*time.Second)\n\t\tdefer cancel()\n\n\t\topts := squibble.DigestOptions{\n\t\t\tIgnoreTables: []string{\n\t\t\t\t// Litestream tables, these are inserted by\n\t\t\t\t// litestream and not part of our schema\n\t\t\t\t// https://litestream.io/how-it-works\n\t\t\t\t\"_litestream_lock\",\n\t\t\t\t\"_litestream_seq\",\n\t\t\t},\n\t\t}\n\n\t\tif err := squibble.Validate(ctx, sqlConn, dbSchema, &opts); err != nil { //nolint:noinlineerr\n\t\t\treturn nil, fmt.Errorf(\"validating schema: %w\", err)\n\t\t}\n\t}\n\n\tdb := HSDatabase{\n\t\tDB:       dbConn,\n\t\tcfg:      cfg,\n\t\tregCache: regCache,\n\t}\n\n\treturn &db, err\n}\n\nfunc openDB(cfg types.DatabaseConfig) (*gorm.DB, error) {\n\t// TODO(kradalby): Integrate this with zerolog\n\tvar dbLogger logger.Interface\n\tif cfg.Debug {\n\t\tdbLogger = util.NewDBLogWrapper(&log.Logger, cfg.Gorm.SlowThreshold, cfg.Gorm.SkipErrRecordNotFound, cfg.Gorm.ParameterizedQueries)\n\t} else {\n\t\tdbLogger = logger.Default.LogMode(logger.Silent)\n\t}\n\n\tswitch cfg.Type {\n\tcase types.DatabaseSqlite:\n\t\tdir := filepath.Dir(cfg.Sqlite.Path)\n\n\t\terr := util.EnsureDir(dir)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"creating directory for sqlite: %w\", err)\n\t\t}\n\n\t\tlog.Info().\n\t\t\tStr(\"database\", types.DatabaseSqlite).\n\t\t\tStr(\"path\", cfg.Sqlite.Path).\n\t\t\tMsg(\"Opening database\")\n\n\t\t// Build SQLite configuration with pragmas set at connection time\n\t\tsqliteConfig := sqliteconfig.Default(cfg.Sqlite.Path)\n\t\tif cfg.Sqlite.WriteAheadLog {\n\t\t\tsqliteConfig.JournalMode = sqliteconfig.JournalModeWAL\n\t\t\tsqliteConfig.WALAutocheckpoint = cfg.Sqlite.WALAutoCheckPoint\n\t\t}\n\n\t\tconnectionURL, err := sqliteConfig.ToURL()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"building sqlite connection URL: %w\", err)\n\t\t}\n\n\t\tdb, err := gorm.Open(\n\t\t\tsqlite.Open(connectionURL),\n\t\t\t&gorm.Config{\n\t\t\t\tPrepareStmt: cfg.Gorm.PrepareStmt,\n\t\t\t\tLogger:      dbLogger,\n\t\t\t},\n\t\t)\n\n\t\t// The pure Go SQLite library does not handle locking in\n\t\t// the same way as the C based one and we can't use the gorm\n\t\t// connection pool as of 2022/02/23.\n\t\tsqlDB, _ := db.DB()\n\t\tsqlDB.SetMaxIdleConns(1)\n\t\tsqlDB.SetMaxOpenConns(1)\n\t\tsqlDB.SetConnMaxIdleTime(time.Hour)\n\n\t\treturn db, err\n\n\tcase types.DatabasePostgres:\n\t\tdbString := fmt.Sprintf(\n\t\t\t\"host=%s dbname=%s user=%s\",\n\t\t\tcfg.Postgres.Host,\n\t\t\tcfg.Postgres.Name,\n\t\t\tcfg.Postgres.User,\n\t\t)\n\n\t\tlog.Info().\n\t\t\tStr(\"database\", types.DatabasePostgres).\n\t\t\tStr(\"path\", dbString).\n\t\t\tMsg(\"Opening database\")\n\n\t\tif sslEnabled, err := strconv.ParseBool(cfg.Postgres.Ssl); err == nil { //nolint:noinlineerr\n\t\t\tif !sslEnabled {\n\t\t\t\tdbString += \" sslmode=disable\"\n\t\t\t}\n\t\t} else {\n\t\t\tdbString += \" sslmode=\" + cfg.Postgres.Ssl\n\t\t}\n\n\t\tif cfg.Postgres.Port != 0 {\n\t\t\tdbString += fmt.Sprintf(\" port=%d\", cfg.Postgres.Port)\n\t\t}\n\n\t\tif cfg.Postgres.Pass != \"\" {\n\t\t\tdbString += \" password=\" + cfg.Postgres.Pass\n\t\t}\n\n\t\tdb, err := gorm.Open(postgres.Open(dbString), &gorm.Config{\n\t\t\tLogger: dbLogger,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tsqlDB, _ := db.DB()\n\t\tsqlDB.SetMaxIdleConns(cfg.Postgres.MaxIdleConnections)\n\t\tsqlDB.SetMaxOpenConns(cfg.Postgres.MaxOpenConnections)\n\t\tsqlDB.SetConnMaxIdleTime(\n\t\t\ttime.Duration(cfg.Postgres.ConnMaxIdleTimeSecs) * time.Second,\n\t\t)\n\n\t\treturn db, nil\n\t}\n\n\treturn nil, fmt.Errorf(\n\t\t\"database of type %s is not supported: %w\",\n\t\tcfg.Type,\n\t\terrDatabaseNotSupported,\n\t)\n}\n\nfunc runMigrations(cfg types.DatabaseConfig, dbConn *gorm.DB, migrations *gormigrate.Gormigrate) error {\n\tif cfg.Type == types.DatabaseSqlite {\n\t\t// SQLite: Run migrations step-by-step, only disabling foreign keys when necessary\n\n\t\t// List of migration IDs that require foreign keys to be disabled\n\t\t// These are migrations that perform complex schema changes that GORM cannot handle safely with FK enabled\n\t\t// NO NEW MIGRATIONS SHOULD BE ADDED HERE. ALL NEW MIGRATIONS MUST RUN WITH FOREIGN KEYS ENABLED.\n\t\tmigrationsRequiringFKDisabled := map[string]bool{\n\t\t\t\"202501221827\": true, // Route table automigration with FK constraint issues\n\t\t\t\"202501311657\": true, // PreAuthKey table automigration with FK constraint issues\n\t\t\t// Add other migration IDs here as they are identified to need FK disabled\n\t\t}\n\n\t\t// Get the current foreign key status\n\t\tvar fkOriginallyEnabled int\n\t\tif err := dbConn.Raw(\"PRAGMA foreign_keys\").Scan(&fkOriginallyEnabled).Error; err != nil { //nolint:noinlineerr\n\t\t\treturn fmt.Errorf(\"checking foreign key status: %w\", err)\n\t\t}\n\n\t\t// Get all migration IDs in order from the actual migration definitions\n\t\t// Only IDs that are in the migrationsRequiringFKDisabled map will be processed with FK disabled\n\t\t// any other new migrations are ran after.\n\t\tmigrationIDs := []string{\n\t\t\t// v0.25.0\n\t\t\t\"202501221827\",\n\t\t\t\"202501311657\",\n\t\t\t\"202502070949\",\n\n\t\t\t// v0.26.0\n\t\t\t\"202502131714\",\n\t\t\t\"202502171819\",\n\t\t\t\"202505091439\",\n\t\t\t\"202505141324\",\n\n\t\t\t// As of 2025-07-02, no new IDs should be added here.\n\t\t\t// They will be ran by the migrations.Migrate() call below.\n\t\t}\n\n\t\tfor _, migrationID := range migrationIDs {\n\t\t\tlog.Trace().Caller().Str(\"migration_id\", migrationID).Msg(\"running migration\")\n\t\t\tneedsFKDisabled := migrationsRequiringFKDisabled[migrationID]\n\n\t\t\tif needsFKDisabled {\n\t\t\t\t// Disable foreign keys for this migration\n\t\t\t\terr := dbConn.Exec(\"PRAGMA foreign_keys = OFF\").Error\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"disabling foreign keys for migration %s: %w\", migrationID, err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Ensure foreign keys are enabled for this migration\n\t\t\t\terr := dbConn.Exec(\"PRAGMA foreign_keys = ON\").Error\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"enabling foreign keys for migration %s: %w\", migrationID, err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Run up to this specific migration (will only run the next pending migration)\n\t\t\terr := migrations.MigrateTo(migrationID)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"running migration %s: %w\", migrationID, err)\n\t\t\t}\n\t\t}\n\n\t\tif err := dbConn.Exec(\"PRAGMA foreign_keys = ON\").Error; err != nil { //nolint:noinlineerr\n\t\t\treturn fmt.Errorf(\"restoring foreign keys: %w\", err)\n\t\t}\n\n\t\t// Run the rest of the migrations\n\t\tif err := migrations.Migrate(); err != nil { //nolint:noinlineerr\n\t\t\treturn err\n\t\t}\n\n\t\t// Check for constraint violations at the end\n\t\ttype constraintViolation struct {\n\t\t\tTable           string\n\t\t\tRowID           int\n\t\t\tParent          string\n\t\t\tConstraintIndex int\n\t\t}\n\n\t\tvar violatedConstraints []constraintViolation\n\n\t\trows, err := dbConn.Raw(\"PRAGMA foreign_key_check\").Rows()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer rows.Close()\n\n\t\tfor rows.Next() {\n\t\t\tvar violation constraintViolation\n\n\t\t\terr := rows.Scan(&violation.Table, &violation.RowID, &violation.Parent, &violation.ConstraintIndex)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tviolatedConstraints = append(violatedConstraints, violation)\n\t\t}\n\n\t\tif err := rows.Err(); err != nil { //nolint:noinlineerr\n\t\t\treturn err\n\t\t}\n\n\t\tif len(violatedConstraints) > 0 {\n\t\t\tfor _, violation := range violatedConstraints {\n\t\t\t\tlog.Error().\n\t\t\t\t\tStr(\"table\", violation.Table).\n\t\t\t\t\tInt(\"row_id\", violation.RowID).\n\t\t\t\t\tStr(\"parent\", violation.Parent).\n\t\t\t\t\tMsg(\"Foreign key constraint violated\")\n\t\t\t}\n\n\t\t\treturn errForeignKeyConstraintsViolated\n\t\t}\n\t} else {\n\t\t// PostgreSQL can run all migrations in one block - no foreign key issues\n\t\terr := migrations.Migrate()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (hsdb *HSDatabase) PingDB(ctx context.Context) error {\n\tctx, cancel := context.WithTimeout(ctx, time.Second)\n\tdefer cancel()\n\n\tsqlDB, err := hsdb.DB.DB()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn sqlDB.PingContext(ctx)\n}\n\nfunc (hsdb *HSDatabase) Close() error {\n\tdb, err := hsdb.DB.DB()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif hsdb.cfg.Database.Type == types.DatabaseSqlite && hsdb.cfg.Database.Sqlite.WriteAheadLog {\n\t\tdb.Exec(\"VACUUM\") //nolint:errcheck,noctx\n\t}\n\n\treturn db.Close()\n}\n\nfunc (hsdb *HSDatabase) Read(fn func(rx *gorm.DB) error) error {\n\trx := hsdb.DB.Begin()\n\tdefer rx.Rollback()\n\n\treturn fn(rx)\n}\n\nfunc Read[T any](db *gorm.DB, fn func(rx *gorm.DB) (T, error)) (T, error) {\n\trx := db.Begin()\n\tdefer rx.Rollback()\n\n\tret, err := fn(rx)\n\tif err != nil {\n\t\tvar no T\n\t\treturn no, err\n\t}\n\n\treturn ret, nil\n}\n\nfunc (hsdb *HSDatabase) Write(fn func(tx *gorm.DB) error) error {\n\ttx := hsdb.DB.Begin()\n\tdefer tx.Rollback()\n\n\terr := fn(tx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn tx.Commit().Error\n}\n\nfunc Write[T any](db *gorm.DB, fn func(tx *gorm.DB) (T, error)) (T, error) {\n\ttx := db.Begin()\n\tdefer tx.Rollback()\n\n\tret, err := fn(tx)\n\tif err != nil {\n\t\tvar no T\n\t\treturn no, err\n\t}\n\n\treturn ret, tx.Commit().Error\n}\n"
  },
  {
    "path": "hscontrol/db/db_test.go",
    "content": "package db\n\nimport (\n\t\"context\"\n\t\"database/sql\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gorm.io/gorm\"\n\t\"zgo.at/zcache/v2\"\n)\n\n// TestSQLiteMigrationAndDataValidation tests specific SQLite migration scenarios\n// and validates data integrity after migration. All migrations that require data validation\n// should be added here.\nfunc TestSQLiteMigrationAndDataValidation(t *testing.T) {\n\ttests := []struct {\n\t\tdbPath   string\n\t\twantFunc func(*testing.T, *HSDatabase)\n\t}{\n\t\t// at 14:15:06 ❯ go run ./cmd/headscale preauthkeys list\n\t\t// ID | Key      | Reusable | Ephemeral | Used  | Expiration | Created    | Tags\n\t\t// 1  | 09b28f.. | false    | false     | false | 2024-09-27 | 2024-09-27 | tag:derp\n\t\t// 2  | 3112b9.. | false    | false     | false | 2024-09-27 | 2024-09-27 | tag:derp\n\t\t{\n\t\t\tdbPath: \"testdata/sqlite/failing-node-preauth-constraint_dump.sql\",\n\t\t\twantFunc: func(t *testing.T, hsdb *HSDatabase) {\n\t\t\t\tt.Helper()\n\t\t\t\t// Comprehensive data preservation validation for node-preauth constraint issue\n\t\t\t\t// Expected data from dump: 1 user, 2 api_keys, 6 nodes\n\n\t\t\t\t// Verify users data preservation\n\t\t\t\tusers, err := Read(hsdb.DB, func(rx *gorm.DB) ([]types.User, error) {\n\t\t\t\t\treturn ListUsers(rx)\n\t\t\t\t})\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.Len(t, users, 1, \"should preserve all 1 user from original schema\")\n\n\t\t\t\t// Verify api_keys data preservation\n\t\t\t\tvar apiKeyCount int\n\n\t\t\t\terr = hsdb.DB.Raw(\"SELECT COUNT(*) FROM api_keys\").Scan(&apiKeyCount).Error\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.Equal(t, 2, apiKeyCount, \"should preserve all 2 api_keys from original schema\")\n\n\t\t\t\t// Verify nodes data preservation and field validation\n\t\t\t\tnodes, err := Read(hsdb.DB, func(rx *gorm.DB) (types.Nodes, error) {\n\t\t\t\t\treturn ListNodes(rx)\n\t\t\t\t})\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.Len(t, nodes, 6, \"should preserve all 6 nodes from original schema\")\n\n\t\t\t\tfor _, node := range nodes {\n\t\t\t\t\tassert.Falsef(t, node.MachineKey.IsZero(), \"expected non zero machinekey\")\n\t\t\t\t\tassert.Contains(t, node.MachineKey.String(), \"mkey:\")\n\t\t\t\t\tassert.Falsef(t, node.NodeKey.IsZero(), \"expected non zero nodekey\")\n\t\t\t\t\tassert.Contains(t, node.NodeKey.String(), \"nodekey:\")\n\t\t\t\t\tassert.Falsef(t, node.DiscoKey.IsZero(), \"expected non zero discokey\")\n\t\t\t\t\tassert.Contains(t, node.DiscoKey.String(), \"discokey:\")\n\t\t\t\t\tassert.Nil(t, node.AuthKey)\n\t\t\t\t\tassert.Nil(t, node.AuthKeyID)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t// Test for RequestTags migration (202601121700-migrate-hostinfo-request-tags)\n\t\t// and forced_tags->tags rename migration (202511131445-node-forced-tags-to-tags)\n\t\t//\n\t\t// This test validates that:\n\t\t// 1. The forced_tags column is renamed to tags\n\t\t// 2. RequestTags from host_info are validated against policy tagOwners\n\t\t// 3. Authorized tags are migrated to the tags column\n\t\t// 4. Unauthorized tags are rejected\n\t\t// 5. Existing tags are preserved\n\t\t// 6. Group membership is evaluated for tag authorization\n\t\t{\n\t\t\tdbPath: \"testdata/sqlite/request_tags_migration_test.sql\",\n\t\t\twantFunc: func(t *testing.T, hsdb *HSDatabase) {\n\t\t\t\tt.Helper()\n\n\t\t\t\tnodes, err := Read(hsdb.DB, func(rx *gorm.DB) (types.Nodes, error) {\n\t\t\t\t\treturn ListNodes(rx)\n\t\t\t\t})\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.Len(t, nodes, 7, \"should have all 7 nodes\")\n\n\t\t\t\t// Helper to find node by hostname\n\t\t\t\tfindNode := func(hostname string) *types.Node {\n\t\t\t\t\tfor _, n := range nodes {\n\t\t\t\t\t\tif n.Hostname == hostname {\n\t\t\t\t\t\t\treturn n\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\t// Node 1: user1 has RequestTags for tag:server (authorized)\n\t\t\t\t// Expected: tags = [\"tag:server\"]\n\t\t\t\tnode1 := findNode(\"node1\")\n\t\t\t\trequire.NotNil(t, node1, \"node1 should exist\")\n\t\t\t\tassert.Contains(t, node1.Tags, \"tag:server\", \"node1 should have tag:server migrated from RequestTags\")\n\n\t\t\t\t// Node 2: user1 has RequestTags for tag:unauthorized (NOT authorized)\n\t\t\t\t// Expected: tags = [] (unchanged)\n\t\t\t\tnode2 := findNode(\"node2\")\n\t\t\t\trequire.NotNil(t, node2, \"node2 should exist\")\n\t\t\t\tassert.Empty(t, node2.Tags, \"node2 should have empty tags (unauthorized tag rejected)\")\n\n\t\t\t\t// Node 3: user2 has RequestTags for tag:client (authorized) + existing tag:existing\n\t\t\t\t// Expected: tags = [\"tag:client\", \"tag:existing\"]\n\t\t\t\tnode3 := findNode(\"node3\")\n\t\t\t\trequire.NotNil(t, node3, \"node3 should exist\")\n\t\t\t\tassert.Contains(t, node3.Tags, \"tag:client\", \"node3 should have tag:client migrated from RequestTags\")\n\t\t\t\tassert.Contains(t, node3.Tags, \"tag:existing\", \"node3 should preserve existing tag\")\n\n\t\t\t\t// Node 4: user1 has RequestTags for tag:server which already exists\n\t\t\t\t// Expected: tags = [\"tag:server\"] (no duplicates)\n\t\t\t\tnode4 := findNode(\"node4\")\n\t\t\t\trequire.NotNil(t, node4, \"node4 should exist\")\n\t\t\t\tassert.Equal(t, []string{\"tag:server\"}, node4.Tags, \"node4 should have tag:server without duplicates\")\n\n\t\t\t\t// Node 5: user2 has no RequestTags\n\t\t\t\t// Expected: tags = [] (unchanged)\n\t\t\t\tnode5 := findNode(\"node5\")\n\t\t\t\trequire.NotNil(t, node5, \"node5 should exist\")\n\t\t\t\tassert.Empty(t, node5.Tags, \"node5 should have empty tags (no RequestTags)\")\n\n\t\t\t\t// Node 6: admin1 has RequestTags for tag:admin (authorized via group:admins)\n\t\t\t\t// Expected: tags = [\"tag:admin\"]\n\t\t\t\tnode6 := findNode(\"node6\")\n\t\t\t\trequire.NotNil(t, node6, \"node6 should exist\")\n\t\t\t\tassert.Contains(t, node6.Tags, \"tag:admin\", \"node6 should have tag:admin migrated via group membership\")\n\n\t\t\t\t// Node 7: user1 has RequestTags for tag:server (authorized) and tag:forbidden (unauthorized)\n\t\t\t\t// Expected: tags = [\"tag:server\"] (only authorized tag)\n\t\t\t\tnode7 := findNode(\"node7\")\n\t\t\t\trequire.NotNil(t, node7, \"node7 should exist\")\n\t\t\t\tassert.Contains(t, node7.Tags, \"tag:server\", \"node7 should have tag:server migrated\")\n\t\t\t\tassert.NotContains(t, node7.Tags, \"tag:forbidden\", \"node7 should NOT have tag:forbidden (unauthorized)\")\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.dbPath, func(t *testing.T) {\n\t\t\tif !strings.HasSuffix(tt.dbPath, \".sql\") {\n\t\t\t\tt.Fatalf(\"TestSQLiteMigrationAndDataValidation only supports .sql files, got: %s\", tt.dbPath)\n\t\t\t}\n\n\t\t\thsdb := dbForTestWithPath(t, tt.dbPath)\n\t\t\tif tt.wantFunc != nil {\n\t\t\t\ttt.wantFunc(t, hsdb)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc emptyCache() *zcache.Cache[types.AuthID, types.AuthRequest] {\n\treturn zcache.New[types.AuthID, types.AuthRequest](time.Minute, time.Hour)\n}\n\nfunc createSQLiteFromSQLFile(sqlFilePath, dbPath string) error {\n\tdb, err := sql.Open(\"sqlite\", dbPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer db.Close()\n\n\tschemaContent, err := os.ReadFile(sqlFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = db.ExecContext(context.Background(), string(schemaContent))\n\n\treturn err\n}\n\n// requireConstraintFailed checks if the error is a constraint failure with\n// either SQLite and PostgreSQL error messages.\nfunc requireConstraintFailed(t *testing.T, err error) {\n\tt.Helper()\n\trequire.Error(t, err)\n\n\tif !strings.Contains(err.Error(), \"UNIQUE constraint failed:\") && !strings.Contains(err.Error(), \"violates unique constraint\") {\n\t\trequire.Failf(t, \"expected error to contain a constraint failure, got: %s\", err.Error())\n\t}\n}\n\nfunc TestConstraints(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\trun  func(*testing.T, *gorm.DB)\n\t}{\n\t\t{\n\t\t\tname: \"no-duplicate-username-if-no-oidc\",\n\t\t\trun: func(t *testing.T, db *gorm.DB) { //nolint:thelper\n\t\t\t\t_, err := CreateUser(db, types.User{Name: \"user1\"})\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\t_, err = CreateUser(db, types.User{Name: \"user1\"})\n\t\t\t\trequireConstraintFailed(t, err)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"no-oidc-duplicate-username-and-id\",\n\t\t\trun: func(t *testing.T, db *gorm.DB) { //nolint:thelper\n\t\t\t\tuser := types.User{\n\t\t\t\t\tModel: gorm.Model{ID: 1},\n\t\t\t\t\tName:  \"user1\",\n\t\t\t\t}\n\t\t\t\tuser.ProviderIdentifier = sql.NullString{String: \"http://test.com/user1\", Valid: true}\n\n\t\t\t\terr := db.Save(&user).Error\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tuser = types.User{\n\t\t\t\t\tModel: gorm.Model{ID: 2},\n\t\t\t\t\tName:  \"user1\",\n\t\t\t\t}\n\t\t\t\tuser.ProviderIdentifier = sql.NullString{String: \"http://test.com/user1\", Valid: true}\n\n\t\t\t\terr = db.Save(&user).Error\n\t\t\t\trequireConstraintFailed(t, err)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"no-oidc-duplicate-id\",\n\t\t\trun: func(t *testing.T, db *gorm.DB) { //nolint:thelper\n\t\t\t\tuser := types.User{\n\t\t\t\t\tModel: gorm.Model{ID: 1},\n\t\t\t\t\tName:  \"user1\",\n\t\t\t\t}\n\t\t\t\tuser.ProviderIdentifier = sql.NullString{String: \"http://test.com/user1\", Valid: true}\n\n\t\t\t\terr := db.Save(&user).Error\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tuser = types.User{\n\t\t\t\t\tModel: gorm.Model{ID: 2},\n\t\t\t\t\tName:  \"user1.1\",\n\t\t\t\t}\n\t\t\t\tuser.ProviderIdentifier = sql.NullString{String: \"http://test.com/user1\", Valid: true}\n\n\t\t\t\terr = db.Save(&user).Error\n\t\t\t\trequireConstraintFailed(t, err)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"allow-duplicate-username-cli-then-oidc\",\n\t\t\trun: func(t *testing.T, db *gorm.DB) { //nolint:thelper\n\t\t\t\t_, err := CreateUser(db, types.User{Name: \"user1\"}) // Create CLI username\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tuser := types.User{\n\t\t\t\t\tName:               \"user1\",\n\t\t\t\t\tProviderIdentifier: sql.NullString{String: \"http://test.com/user1\", Valid: true},\n\t\t\t\t}\n\n\t\t\t\terr = db.Save(&user).Error\n\t\t\t\trequire.NoError(t, err)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"allow-duplicate-username-oidc-then-cli\",\n\t\t\trun: func(t *testing.T, db *gorm.DB) { //nolint:thelper\n\t\t\t\tuser := types.User{\n\t\t\t\t\tName:               \"user1\",\n\t\t\t\t\tProviderIdentifier: sql.NullString{String: \"http://test.com/user1\", Valid: true},\n\t\t\t\t}\n\n\t\t\t\terr := db.Save(&user).Error\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t_, err = CreateUser(db, types.User{Name: \"user1\"}) // Create CLI username\n\t\t\t\trequire.NoError(t, err)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name+\"-postgres\", func(t *testing.T) {\n\t\t\tdb := newPostgresTestDB(t)\n\t\t\ttt.run(t, db.DB.Debug())\n\t\t})\n\t\tt.Run(tt.name+\"-sqlite\", func(t *testing.T) {\n\t\t\tdb, err := newSQLiteTestDB()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"creating database: %s\", err)\n\t\t\t}\n\n\t\t\ttt.run(t, db.DB.Debug())\n\t\t})\n\t}\n}\n\n// TestPostgresMigrationAndDataValidation tests specific PostgreSQL migration scenarios\n// and validates data integrity after migration. All migrations that require data validation\n// should be added here.\n//\n// TODO(kradalby): Convert to use plain text SQL dumps instead of binary .pssql dumps for consistency\n// with SQLite tests and easier version control.\nfunc TestPostgresMigrationAndDataValidation(t *testing.T) {\n\ttests := []struct {\n\t\tname     string\n\t\tdbPath   string\n\t\twantFunc func(*testing.T, *HSDatabase)\n\t}{}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tu := newPostgresDBForTest(t)\n\n\t\t\tpgRestorePath, err := exec.LookPath(\"pg_restore\")\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"pg_restore not found in PATH. Please install it and ensure it is accessible.\")\n\t\t\t}\n\n\t\t\t// Construct the pg_restore command\n\t\t\tcmd := exec.CommandContext(context.Background(), pgRestorePath, \"--verbose\", \"--if-exists\", \"--clean\", \"--no-owner\", \"--dbname\", u.String(), tt.dbPath)\n\n\t\t\t// Set the output streams\n\t\t\tcmd.Stdout = os.Stdout\n\t\t\tcmd.Stderr = os.Stderr\n\n\t\t\t// Execute the command\n\t\t\terr = cmd.Run()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"failed to restore postgres database: %s\", err)\n\t\t\t}\n\n\t\t\tdb := newHeadscaleDBFromPostgresURL(t, u)\n\n\t\t\tif tt.wantFunc != nil {\n\t\t\t\ttt.wantFunc(t, db)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc dbForTest(t *testing.T) *HSDatabase {\n\tt.Helper()\n\treturn dbForTestWithPath(t, \"\")\n}\n\nfunc dbForTestWithPath(t *testing.T, sqlFilePath string) *HSDatabase {\n\tt.Helper()\n\n\tdbPath := t.TempDir() + \"/headscale_test.db\"\n\n\t// If SQL file path provided, validate and create database from it\n\tif sqlFilePath != \"\" {\n\t\t// Validate that the file is a SQL text file\n\t\tif !strings.HasSuffix(sqlFilePath, \".sql\") {\n\t\t\tt.Fatalf(\"dbForTestWithPath only accepts .sql files, got: %s\", sqlFilePath)\n\t\t}\n\n\t\terr := createSQLiteFromSQLFile(sqlFilePath, dbPath)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"setting up database from SQL file %s: %s\", sqlFilePath, err)\n\t\t}\n\t}\n\n\tdb, err := NewHeadscaleDatabase(\n\t\t&types.Config{\n\t\t\tDatabase: types.DatabaseConfig{\n\t\t\t\tType: \"sqlite3\",\n\t\t\t\tSqlite: types.SqliteConfig{\n\t\t\t\t\tPath: dbPath,\n\t\t\t\t},\n\t\t\t},\n\t\t\tPolicy: types.PolicyConfig{\n\t\t\t\tMode: types.PolicyModeDB,\n\t\t\t},\n\t\t},\n\t\temptyCache(),\n\t)\n\tif err != nil {\n\t\tt.Fatalf(\"setting up database: %s\", err)\n\t}\n\n\tif sqlFilePath != \"\" {\n\t\tt.Logf(\"database set up from %s at: %s\", sqlFilePath, dbPath)\n\t} else {\n\t\tt.Logf(\"database set up at: %s\", dbPath)\n\t}\n\n\treturn db\n}\n\n// TestSQLiteAllTestdataMigrations tests migration compatibility across all SQLite schemas\n// in the testdata directory. It verifies they can be successfully migrated to the current\n// schema version. This test only validates migration success, not data integrity.\n//\n// All test database files are SQL dumps (created with `sqlite3 headscale.db .dump`) generated\n// with old Headscale binaries on empty databases (no user/node data). These dumps include the\n// migration history in the `migrations` table, which allows the migration system to correctly\n// skip already-applied migrations and only run new ones.\nfunc TestSQLiteAllTestdataMigrations(t *testing.T) {\n\tt.Parallel()\n\n\tschemas, err := os.ReadDir(\"testdata/sqlite\")\n\trequire.NoError(t, err)\n\n\tt.Logf(\"loaded %d schemas\", len(schemas))\n\n\tfor _, schema := range schemas {\n\t\tif schema.IsDir() {\n\t\t\tcontinue\n\t\t}\n\n\t\tt.Logf(\"validating: %s\", schema.Name())\n\n\t\tt.Run(schema.Name(), func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tdbPath := t.TempDir() + \"/headscale_test.db\"\n\n\t\t\t// Setup a database with the old schema\n\t\t\tschemaPath := filepath.Join(\"testdata/sqlite\", schema.Name())\n\t\t\terr := createSQLiteFromSQLFile(schemaPath, dbPath)\n\t\t\trequire.NoError(t, err)\n\n\t\t\t_, err = NewHeadscaleDatabase(\n\t\t\t\t&types.Config{\n\t\t\t\t\tDatabase: types.DatabaseConfig{\n\t\t\t\t\t\tType: \"sqlite3\",\n\t\t\t\t\t\tSqlite: types.SqliteConfig{\n\t\t\t\t\t\t\tPath: dbPath,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tPolicy: types.PolicyConfig{\n\t\t\t\t\t\tMode: types.PolicyModeDB,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\temptyCache(),\n\t\t\t)\n\t\t\trequire.NoError(t, err)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "hscontrol/db/ephemeral_garbage_collector_test.go",
    "content": "package db\n\nimport (\n\t\"runtime\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\nconst (\n\tfiveHundred = 500 * time.Millisecond\n\toneHundred  = 100 * time.Millisecond\n\tfifty       = 50 * time.Millisecond\n)\n\n// TestEphemeralGarbageCollectorGoRoutineLeak is a test for a goroutine leak in EphemeralGarbageCollector().\n// It creates a new EphemeralGarbageCollector, schedules several nodes for deletion with a short expiry,\n// and verifies that the nodes are deleted when the expiry time passes, and then\n// for any leaked goroutines after the garbage collector is closed.\nfunc TestEphemeralGarbageCollectorGoRoutineLeak(t *testing.T) {\n\t// Count goroutines at the start\n\tinitialGoroutines := runtime.NumGoroutine()\n\tt.Logf(\"Initial number of goroutines: %d\", initialGoroutines)\n\n\t// Basic deletion tracking mechanism\n\tvar (\n\t\tdeletedIDs  []types.NodeID\n\t\tdeleteMutex sync.Mutex\n\t\tdeletionWg  sync.WaitGroup\n\t)\n\n\tdeleteFunc := func(nodeID types.NodeID) {\n\t\tdeleteMutex.Lock()\n\n\t\tdeletedIDs = append(deletedIDs, nodeID)\n\n\t\tdeleteMutex.Unlock()\n\t\tdeletionWg.Done()\n\t}\n\n\t// Start the GC\n\tgc := NewEphemeralGarbageCollector(deleteFunc)\n\tgo gc.Start()\n\n\t// Schedule several nodes for deletion with short expiry\n\tconst (\n\t\texpiry   = fifty\n\t\tnumNodes = 100\n\t)\n\n\t// Set up wait group for expected deletions\n\n\tdeletionWg.Add(numNodes)\n\n\tfor i := 1; i <= numNodes; i++ {\n\t\tgc.Schedule(types.NodeID(i), expiry) //nolint:gosec // safe conversion in test\n\t}\n\n\t// Wait for all scheduled deletions to complete\n\tdeletionWg.Wait()\n\n\t// Check nodes are deleted\n\tdeleteMutex.Lock()\n\tassert.Len(t, deletedIDs, numNodes, \"Not all nodes were deleted\")\n\tdeleteMutex.Unlock()\n\n\t// Schedule and immediately cancel to test that part of the code\n\tfor i := numNodes + 1; i <= numNodes*2; i++ {\n\t\tnodeID := types.NodeID(i) //nolint:gosec // safe conversion in test\n\t\tgc.Schedule(nodeID, time.Hour)\n\t\tgc.Cancel(nodeID)\n\t}\n\n\t// Close GC\n\tgc.Close()\n\n\t// Wait for goroutines to clean up and verify no leaks\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tfinalGoroutines := runtime.NumGoroutine()\n\t\t// NB: We have to allow for a small number of extra goroutines because of test itself\n\t\tassert.LessOrEqual(c, finalGoroutines, initialGoroutines+5,\n\t\t\t\"There are significantly more goroutines after GC usage, which suggests a leak\")\n\t}, time.Second, 10*time.Millisecond, \"goroutines should clean up after GC close\")\n\n\tt.Logf(\"Final number of goroutines: %d\", runtime.NumGoroutine())\n}\n\n// TestEphemeralGarbageCollectorReschedule is a test for the rescheduling of nodes in EphemeralGarbageCollector().\n// It creates a new EphemeralGarbageCollector, schedules a node for deletion with a longer expiry,\n// and then reschedules it with a shorter expiry, and verifies that the node is deleted only once.\nfunc TestEphemeralGarbageCollectorReschedule(t *testing.T) {\n\t// Deletion tracking mechanism\n\tvar (\n\t\tdeletedIDs  []types.NodeID\n\t\tdeleteMutex sync.Mutex\n\t)\n\n\tdeletionNotifier := make(chan types.NodeID, 1)\n\n\tdeleteFunc := func(nodeID types.NodeID) {\n\t\tdeleteMutex.Lock()\n\n\t\tdeletedIDs = append(deletedIDs, nodeID)\n\n\t\tdeleteMutex.Unlock()\n\n\t\tdeletionNotifier <- nodeID\n\t}\n\n\t// Start GC\n\tgc := NewEphemeralGarbageCollector(deleteFunc)\n\n\tgo gc.Start()\n\tdefer gc.Close()\n\n\tconst (\n\t\tshortExpiry = fifty\n\t\tlongExpiry  = 1 * time.Hour\n\t)\n\n\tnodeID := types.NodeID(1)\n\n\t// Schedule node for deletion with long expiry\n\tgc.Schedule(nodeID, longExpiry)\n\n\t// Reschedule the same node with a shorter expiry\n\tgc.Schedule(nodeID, shortExpiry)\n\n\t// Wait for deletion notification with timeout\n\tselect {\n\tcase deletedNodeID := <-deletionNotifier:\n\t\tassert.Equal(t, nodeID, deletedNodeID, \"The correct node should be deleted\")\n\tcase <-time.After(time.Second):\n\t\tt.Fatal(\"Timed out waiting for node deletion\")\n\t}\n\n\t// Verify that the node was deleted exactly once\n\tdeleteMutex.Lock()\n\tassert.Len(t, deletedIDs, 1, \"Node should be deleted exactly once\")\n\tassert.Equal(t, nodeID, deletedIDs[0], \"The correct node should be deleted\")\n\tdeleteMutex.Unlock()\n}\n\n// TestEphemeralGarbageCollectorCancelAndReschedule is a test for the cancellation and rescheduling of nodes in EphemeralGarbageCollector().\n// It creates a new EphemeralGarbageCollector, schedules a node for deletion, cancels it, and then reschedules it,\n// and verifies that the node is deleted only once.\nfunc TestEphemeralGarbageCollectorCancelAndReschedule(t *testing.T) {\n\t// Deletion tracking mechanism\n\tvar (\n\t\tdeletedIDs  []types.NodeID\n\t\tdeleteMutex sync.Mutex\n\t)\n\n\tdeletionNotifier := make(chan types.NodeID, 1)\n\n\tdeleteFunc := func(nodeID types.NodeID) {\n\t\tdeleteMutex.Lock()\n\n\t\tdeletedIDs = append(deletedIDs, nodeID)\n\n\t\tdeleteMutex.Unlock()\n\n\t\tdeletionNotifier <- nodeID\n\t}\n\n\t// Start the GC\n\tgc := NewEphemeralGarbageCollector(deleteFunc)\n\n\tgo gc.Start()\n\tdefer gc.Close()\n\n\tnodeID := types.NodeID(1)\n\n\tconst expiry = fifty\n\n\t// Schedule node for deletion\n\tgc.Schedule(nodeID, expiry)\n\n\t// Cancel the scheduled deletion\n\tgc.Cancel(nodeID)\n\n\t// Use a timeout to verify no deletion occurred\n\tselect {\n\tcase <-deletionNotifier:\n\t\tt.Fatal(\"Node was deleted after cancellation\")\n\tcase <-time.After(expiry * 2): // Still need a timeout for negative test\n\t\t// This is expected - no deletion should occur\n\t}\n\n\tdeleteMutex.Lock()\n\tassert.Empty(t, deletedIDs, \"Node should not be deleted after cancellation\")\n\tdeleteMutex.Unlock()\n\n\t// Reschedule the node\n\tgc.Schedule(nodeID, expiry)\n\n\t// Wait for deletion with timeout\n\tselect {\n\tcase deletedNodeID := <-deletionNotifier:\n\t\t// Verify the correct node was deleted\n\t\tassert.Equal(t, nodeID, deletedNodeID, \"The correct node should be deleted\")\n\tcase <-time.After(time.Second): // Longer timeout as a safety net\n\t\tt.Fatal(\"Timed out waiting for node deletion\")\n\t}\n\n\t// Verify final state\n\tdeleteMutex.Lock()\n\tassert.Len(t, deletedIDs, 1, \"Node should be deleted after rescheduling\")\n\tassert.Equal(t, nodeID, deletedIDs[0], \"The correct node should be deleted\")\n\tdeleteMutex.Unlock()\n}\n\n// TestEphemeralGarbageCollectorCloseBeforeTimerFires is a test for the closing of the EphemeralGarbageCollector before the timer fires.\n// It creates a new EphemeralGarbageCollector, schedules a node for deletion, closes the GC, and verifies that the node is not deleted.\nfunc TestEphemeralGarbageCollectorCloseBeforeTimerFires(t *testing.T) {\n\t// Deletion tracking\n\tvar (\n\t\tdeletedIDs  []types.NodeID\n\t\tdeleteMutex sync.Mutex\n\t)\n\n\tdeletionNotifier := make(chan types.NodeID, 1)\n\n\tdeleteFunc := func(nodeID types.NodeID) {\n\t\tdeleteMutex.Lock()\n\n\t\tdeletedIDs = append(deletedIDs, nodeID)\n\n\t\tdeleteMutex.Unlock()\n\n\t\tdeletionNotifier <- nodeID\n\t}\n\n\t// Start the GC\n\tgc := NewEphemeralGarbageCollector(deleteFunc)\n\tgo gc.Start()\n\n\tconst (\n\t\tlongExpiry = 1 * time.Hour\n\t\tshortWait  = fifty * 2\n\t)\n\n\t// Schedule node deletion with a long expiry\n\tgc.Schedule(types.NodeID(1), longExpiry)\n\n\t// Close the GC before the timer\n\tgc.Close()\n\n\t// Verify that no deletion occurred within a reasonable time\n\tselect {\n\tcase <-deletionNotifier:\n\t\tt.Fatal(\"Node was deleted after GC was closed, which should not happen\")\n\tcase <-time.After(shortWait):\n\t\t// Expected: no deletion should occur\n\t}\n\n\t// Verify that no deletion occurred\n\tdeleteMutex.Lock()\n\tassert.Empty(t, deletedIDs, \"No node should be deleted when GC is closed before timer fires\")\n\tdeleteMutex.Unlock()\n}\n\n// TestEphemeralGarbageCollectorScheduleAfterClose verifies that calling Schedule after Close\n// is a no-op and doesn't cause any panics, goroutine leaks, or other issues.\nfunc TestEphemeralGarbageCollectorScheduleAfterClose(t *testing.T) {\n\t// Count initial goroutines to check for leaks\n\tinitialGoroutines := runtime.NumGoroutine()\n\tt.Logf(\"Initial number of goroutines: %d\", initialGoroutines)\n\n\t// Deletion tracking\n\tvar (\n\t\tdeletedIDs  []types.NodeID\n\t\tdeleteMutex sync.Mutex\n\t)\n\n\tnodeDeleted := make(chan struct{})\n\n\tdeleteFunc := func(nodeID types.NodeID) {\n\t\tdeleteMutex.Lock()\n\n\t\tdeletedIDs = append(deletedIDs, nodeID)\n\n\t\tdeleteMutex.Unlock()\n\t\tclose(nodeDeleted) // Signal that deletion happened\n\t}\n\n\t// Start new GC\n\tgc := NewEphemeralGarbageCollector(deleteFunc)\n\n\t// Use a WaitGroup to ensure the GC has started\n\tvar startWg sync.WaitGroup\n\tstartWg.Add(1)\n\n\tgo func() {\n\t\tstartWg.Done() // Signal that the goroutine has started\n\t\tgc.Start()\n\t}()\n\n\tstartWg.Wait() // Wait for the GC to start\n\n\t// Close GC right away\n\tgc.Close()\n\n\t// Now try to schedule node for deletion with a very short expiry\n\t// If the Schedule operation incorrectly creates a timer, it would fire quickly\n\tnodeID := types.NodeID(1)\n\tgc.Schedule(nodeID, 1*time.Millisecond)\n\n\t// Check if any node was deleted (which shouldn't happen)\n\t// Use timeout to wait for potential deletion\n\tselect {\n\tcase <-nodeDeleted:\n\t\tt.Fatal(\"Node was deleted after GC was closed, which should not happen\")\n\tcase <-time.After(fiveHundred):\n\t\t// This is the expected path - no deletion should occur\n\t}\n\n\t// Check no node was deleted\n\tdeleteMutex.Lock()\n\n\tnodesDeleted := len(deletedIDs)\n\n\tdeleteMutex.Unlock()\n\tassert.Equal(t, 0, nodesDeleted, \"No nodes should be deleted when Schedule is called after Close\")\n\n\t// Check for goroutine leaks after GC is fully closed\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tfinalGoroutines := runtime.NumGoroutine()\n\t\t// Allow for small fluctuations in goroutine count for testing routines etc\n\t\tassert.LessOrEqual(c, finalGoroutines, initialGoroutines+2,\n\t\t\t\"There should be no significant goroutine leaks when Schedule is called after Close\")\n\t}, time.Second, 10*time.Millisecond, \"goroutines should clean up after GC close\")\n\n\tt.Logf(\"Final number of goroutines: %d\", runtime.NumGoroutine())\n}\n\n// TestEphemeralGarbageCollectorConcurrentScheduleAndClose tests the behavior of the garbage collector\n// when Schedule and Close are called concurrently from multiple goroutines.\nfunc TestEphemeralGarbageCollectorConcurrentScheduleAndClose(t *testing.T) {\n\t// Count initial goroutines\n\tinitialGoroutines := runtime.NumGoroutine()\n\tt.Logf(\"Initial number of goroutines: %d\", initialGoroutines)\n\n\t// Deletion tracking mechanism\n\tvar (\n\t\tdeletedIDs  []types.NodeID\n\t\tdeleteMutex sync.Mutex\n\t)\n\n\tdeleteFunc := func(nodeID types.NodeID) {\n\t\tdeleteMutex.Lock()\n\n\t\tdeletedIDs = append(deletedIDs, nodeID)\n\n\t\tdeleteMutex.Unlock()\n\t}\n\n\t// Start the GC\n\tgc := NewEphemeralGarbageCollector(deleteFunc)\n\tgo gc.Start()\n\n\t// Number of concurrent scheduling goroutines\n\tconst (\n\t\tnumSchedulers     = 10\n\t\tnodesPerScheduler = 50\n\t)\n\n\tconst closeAfterNodes = 25 // Close GC after this many nodes per scheduler\n\n\t// Use WaitGroup to wait for all scheduling goroutines to finish\n\tvar wg sync.WaitGroup\n\twg.Add(numSchedulers + 1) // +1 for the closer goroutine\n\n\t// Create a stopper channel to signal scheduling goroutines to stop\n\tstopScheduling := make(chan struct{})\n\n\t// Track how many nodes have been scheduled\n\tvar scheduledCount int64\n\n\t// Launch goroutines that continuously schedule nodes\n\tfor schedulerIndex := range numSchedulers {\n\t\tgo func(schedulerID int) {\n\t\t\tdefer wg.Done()\n\n\t\t\tbaseNodeID := schedulerID * nodesPerScheduler\n\n\t\t\t// Keep scheduling nodes until signaled to stop\n\t\t\tfor j := range nodesPerScheduler {\n\t\t\t\tselect {\n\t\t\t\tcase <-stopScheduling:\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t\tnodeID := types.NodeID(baseNodeID + j + 1) //nolint:gosec // safe conversion in test\n\t\t\t\t\tgc.Schedule(nodeID, 1*time.Hour)           // Long expiry to ensure it doesn't trigger during test\n\t\t\t\t\tatomic.AddInt64(&scheduledCount, 1)\n\n\t\t\t\t\t// Yield to other goroutines to introduce variability\n\t\t\t\t\truntime.Gosched()\n\t\t\t\t}\n\t\t\t}\n\t\t}(schedulerIndex)\n\t}\n\n\t// Close the garbage collector after some nodes have been scheduled\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\t// Wait until enough nodes have been scheduled\n\t\tfor atomic.LoadInt64(&scheduledCount) < int64(numSchedulers*closeAfterNodes) {\n\t\t\truntime.Gosched()\n\t\t}\n\n\t\t// Close GC\n\t\tgc.Close()\n\n\t\t// Signal schedulers to stop\n\t\tclose(stopScheduling)\n\t}()\n\n\t// Wait for all goroutines to complete\n\twg.Wait()\n\n\t// Check for leaks using EventuallyWithT\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tfinalGoroutines := runtime.NumGoroutine()\n\t\t// Allow for a reasonable small variable routine count due to testing\n\t\tassert.LessOrEqual(c, finalGoroutines, initialGoroutines+5,\n\t\t\t\"There should be no significant goroutine leaks during concurrent Schedule and Close operations\")\n\t}, time.Second, 10*time.Millisecond, \"goroutines should clean up\")\n\n\tt.Logf(\"Final number of goroutines: %d\", runtime.NumGoroutine())\n}\n"
  },
  {
    "path": "hscontrol/db/ip.go",
    "content": "package db\n\nimport (\n\t\"crypto/rand\"\n\t\"database/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"net/netip\"\n\t\"sync\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/rs/zerolog/log\"\n\t\"go4.org/netipx\"\n\t\"gorm.io/gorm\"\n\t\"tailscale.com/net/tsaddr\"\n)\n\nvar (\n\terrGeneratedIPBytesInvalid = errors.New(\"generated ip bytes are invalid ip\")\n\terrGeneratedIPNotInPrefix  = errors.New(\"generated ip not in prefix\")\n\terrIPAllocatorNil          = errors.New(\"ip allocator was nil\")\n)\n\n// IPAllocator is a singleton responsible for allocating\n// IP addresses for nodes and making sure the same\n// address is not handed out twice. There can only be one\n// and it needs to be created before any other database\n// writes occur.\ntype IPAllocator struct {\n\tmu sync.Mutex\n\n\tprefix4 *netip.Prefix\n\tprefix6 *netip.Prefix\n\n\t// Previous IPs handed out\n\tprev4 netip.Addr\n\tprev6 netip.Addr\n\n\t// strategy used for handing out IP addresses.\n\tstrategy types.IPAllocationStrategy\n\n\t// Set of all IPs handed out.\n\t// This might not be in sync with the database,\n\t// but it is more conservative. If saves to the\n\t// database fails, the IP will be allocated here\n\t// until the next restart of Headscale.\n\tusedIPs netipx.IPSetBuilder\n}\n\n// NewIPAllocator returns a new IPAllocator singleton which\n// can be used to hand out unique IP addresses within the\n// provided IPv4 and IPv6 prefix. It needs to be created\n// when headscale starts and needs to finish its read\n// transaction before any writes to the database occur.\nfunc NewIPAllocator(\n\tdb *HSDatabase,\n\tprefix4, prefix6 *netip.Prefix,\n\tstrategy types.IPAllocationStrategy,\n) (*IPAllocator, error) {\n\tret := IPAllocator{\n\t\tprefix4: prefix4,\n\t\tprefix6: prefix6,\n\n\t\tstrategy: strategy,\n\t}\n\n\tvar (\n\t\tv4s []sql.NullString\n\t\tv6s []sql.NullString\n\t)\n\n\tif db != nil {\n\t\terr := db.Read(func(rx *gorm.DB) error {\n\t\t\treturn rx.Model(&types.Node{}).Pluck(\"ipv4\", &v4s).Error\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"reading IPv4 addresses from database: %w\", err)\n\t\t}\n\n\t\terr = db.Read(func(rx *gorm.DB) error {\n\t\t\treturn rx.Model(&types.Node{}).Pluck(\"ipv6\", &v6s).Error\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"reading IPv6 addresses from database: %w\", err)\n\t\t}\n\t}\n\n\tvar ips netipx.IPSetBuilder\n\n\t// Add network and broadcast addrs to used pool so they\n\t// are not handed out to nodes.\n\tif prefix4 != nil {\n\t\tnetwork4, broadcast4 := util.GetIPPrefixEndpoints(*prefix4)\n\t\tips.Add(network4)\n\t\tips.Add(broadcast4)\n\n\t\t// Use network as starting point, it will be used to call .Next()\n\t\t// TODO(kradalby): Could potentially take all the IPs loaded from\n\t\t// the database into account to start at a more \"educated\" location.\n\t\tret.prev4 = network4\n\t}\n\n\tif prefix6 != nil {\n\t\tnetwork6, broadcast6 := util.GetIPPrefixEndpoints(*prefix6)\n\t\tips.Add(network6)\n\t\tips.Add(broadcast6)\n\n\t\tret.prev6 = network6\n\t}\n\n\t// Fetch all the IP Addresses currently handed out from the Database\n\t// and add them to the used IP set.\n\tfor _, addrStr := range append(v4s, v6s...) {\n\t\tif addrStr.Valid {\n\t\t\taddr, err := netip.ParseAddr(addrStr.String)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"parsing IP address from database: %w\", err)\n\t\t\t}\n\n\t\t\tips.Add(addr)\n\t\t}\n\t}\n\n\t// Build the initial IPSet to validate that we can use it.\n\t_, err := ips.IPSet()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"building initial IP Set: %w\",\n\t\t\terr,\n\t\t)\n\t}\n\n\tret.usedIPs = ips\n\n\treturn &ret, nil\n}\n\nfunc (i *IPAllocator) Next() (*netip.Addr, *netip.Addr, error) {\n\ti.mu.Lock()\n\tdefer i.mu.Unlock()\n\n\tvar (\n\t\terr  error\n\t\tret4 *netip.Addr\n\t\tret6 *netip.Addr\n\t)\n\n\tif i.prefix4 != nil {\n\t\tret4, err = i.next(i.prev4, i.prefix4)\n\t\tif err != nil {\n\t\t\treturn nil, nil, fmt.Errorf(\"allocating IPv4 address: %w\", err)\n\t\t}\n\n\t\ti.prev4 = *ret4\n\t}\n\n\tif i.prefix6 != nil {\n\t\tret6, err = i.next(i.prev6, i.prefix6)\n\t\tif err != nil {\n\t\t\treturn nil, nil, fmt.Errorf(\"allocating IPv6 address: %w\", err)\n\t\t}\n\n\t\ti.prev6 = *ret6\n\t}\n\n\treturn ret4, ret6, nil\n}\n\nvar ErrCouldNotAllocateIP = errors.New(\"failed to allocate IP\")\n\nfunc (i *IPAllocator) nextLocked(prev netip.Addr, prefix *netip.Prefix) (*netip.Addr, error) {\n\ti.mu.Lock()\n\tdefer i.mu.Unlock()\n\n\treturn i.next(prev, prefix)\n}\n\nfunc (i *IPAllocator) next(prev netip.Addr, prefix *netip.Prefix) (*netip.Addr, error) {\n\tvar (\n\t\terr error\n\t\tip  netip.Addr\n\t)\n\n\tswitch i.strategy {\n\tcase types.IPAllocationStrategySequential:\n\t\t// Get the first IP in our prefix\n\t\tip = prev.Next()\n\tcase types.IPAllocationStrategyRandom:\n\t\tip, err = randomNext(*prefix)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"getting random IP: %w\", err)\n\t\t}\n\t}\n\n\t// TODO(kradalby): maybe this can be done less often.\n\tset, err := i.usedIPs.IPSet()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor {\n\t\tif !prefix.Contains(ip) {\n\t\t\treturn nil, ErrCouldNotAllocateIP\n\t\t}\n\n\t\t// Check if the IP has already been allocated\n\t\t// or if it is a IP reserved by Tailscale.\n\t\tif set.Contains(ip) || isTailscaleReservedIP(ip) {\n\t\t\tswitch i.strategy {\n\t\t\tcase types.IPAllocationStrategySequential:\n\t\t\t\tip = ip.Next()\n\t\t\tcase types.IPAllocationStrategyRandom:\n\t\t\t\tip, err = randomNext(*prefix)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"getting random IP: %w\", err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\ti.usedIPs.Add(ip)\n\n\t\treturn &ip, nil\n\t}\n}\n\nfunc randomNext(pfx netip.Prefix) (netip.Addr, error) {\n\trang := netipx.RangeOfPrefix(pfx)\n\tfromIP, toIP := rang.From(), rang.To()\n\n\tvar from, to big.Int\n\n\tfrom.SetBytes(fromIP.AsSlice())\n\tto.SetBytes(toIP.AsSlice())\n\n\t// Find the max, this is how we can do \"random range\",\n\t// get the \"max\" as 0 -> to - from and then add back from\n\t// after.\n\ttempMax := big.NewInt(0).Sub(&to, &from)\n\n\tout, err := rand.Int(rand.Reader, tempMax)\n\tif err != nil {\n\t\treturn netip.Addr{}, fmt.Errorf(\"generating random IP: %w\", err)\n\t}\n\n\tvalInRange := big.NewInt(0).Add(&from, out)\n\n\tip, ok := netip.AddrFromSlice(valInRange.Bytes())\n\tif !ok {\n\t\treturn netip.Addr{}, errGeneratedIPBytesInvalid\n\t}\n\n\tif !pfx.Contains(ip) {\n\t\treturn netip.Addr{}, fmt.Errorf(\n\t\t\t\"%w: ip(%s) not in prefix(%s)\",\n\t\t\terrGeneratedIPNotInPrefix,\n\t\t\tip.String(),\n\t\t\tpfx.String(),\n\t\t)\n\t}\n\n\treturn ip, nil\n}\n\nfunc isTailscaleReservedIP(ip netip.Addr) bool {\n\treturn tsaddr.ChromeOSVMRange().Contains(ip) ||\n\t\ttsaddr.TailscaleServiceIP() == ip ||\n\t\ttsaddr.TailscaleServiceIPv6() == ip\n}\n\n// BackfillNodeIPs will take a database transaction, and\n// iterate through all of the current nodes in headscale\n// and ensure it has IP addresses according to the current\n// configuration.\n// This means that if both IPv4 and IPv6 is set in the\n// config, and some nodes are missing that type of IP,\n// it will be added.\n// If a prefix type has been removed (IPv4 or IPv6), it\n// will remove the IPs in that family from the node.\nfunc (db *HSDatabase) BackfillNodeIPs(i *IPAllocator) ([]string, error) {\n\tvar (\n\t\terr error\n\t\tret []string\n\t)\n\n\terr = db.Write(func(tx *gorm.DB) error {\n\t\tif i == nil {\n\t\t\treturn fmt.Errorf(\"backfilling IPs: %w\", errIPAllocatorNil)\n\t\t}\n\n\t\tlog.Trace().Caller().Msgf(\"starting to backfill IPs\")\n\n\t\tnodes, err := ListNodes(tx)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"listing nodes to backfill IPs: %w\", err)\n\t\t}\n\n\t\tfor _, node := range nodes {\n\t\t\tlog.Trace().Caller().EmbedObject(node).Msg(\"ip backfill check started because node found in database\")\n\n\t\t\tchanged := false\n\t\t\t// IPv4 prefix is set, but node ip is missing, alloc\n\t\t\tif i.prefix4 != nil && node.IPv4 == nil {\n\t\t\t\tret4, err := i.nextLocked(i.prev4, i.prefix4)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"allocating IPv4 for node(%d): %w\", node.ID, err)\n\t\t\t\t}\n\n\t\t\t\tnode.IPv4 = ret4\n\t\t\t\tchanged = true\n\n\t\t\t\tret = append(ret, fmt.Sprintf(\"assigned IPv4 %q to Node(%d) %q\", ret4.String(), node.ID, node.Hostname))\n\t\t\t}\n\n\t\t\t// IPv6 prefix is set, but node ip is missing, alloc\n\t\t\tif i.prefix6 != nil && node.IPv6 == nil {\n\t\t\t\tret6, err := i.nextLocked(i.prev6, i.prefix6)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"allocating IPv6 for node(%d): %w\", node.ID, err)\n\t\t\t\t}\n\n\t\t\t\tnode.IPv6 = ret6\n\t\t\t\tchanged = true\n\n\t\t\t\tret = append(ret, fmt.Sprintf(\"assigned IPv6 %q to Node(%d) %q\", ret6.String(), node.ID, node.Hostname))\n\t\t\t}\n\n\t\t\t// IPv4 prefix is not set, but node has IP, remove\n\t\t\tif i.prefix4 == nil && node.IPv4 != nil {\n\t\t\t\tret = append(ret, fmt.Sprintf(\"removing IPv4 %q from Node(%d) %q\", node.IPv4.String(), node.ID, node.Hostname))\n\t\t\t\tnode.IPv4 = nil\n\t\t\t\tchanged = true\n\t\t\t}\n\n\t\t\t// IPv6 prefix is not set, but node has IP, remove\n\t\t\tif i.prefix6 == nil && node.IPv6 != nil {\n\t\t\t\tret = append(ret, fmt.Sprintf(\"removing IPv6 %q from Node(%d) %q\", node.IPv6.String(), node.ID, node.Hostname))\n\t\t\t\tnode.IPv6 = nil\n\t\t\t\tchanged = true\n\t\t\t}\n\n\t\t\tif changed {\n\t\t\t\t// Use Updates() with Select() to only update IP fields, avoiding overwriting\n\t\t\t\t// other fields like Expiry. We need Select() because Updates() alone skips\n\t\t\t\t// zero values, but we DO want to update IPv4/IPv6 to nil when removing them.\n\t\t\t\t// See issue #2862.\n\t\t\t\terr := tx.Model(node).Select(\"ipv4\", \"ipv6\").Updates(node).Error\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"saving node(%d) after adding IPs: %w\", node.ID, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\n\treturn ret, err\n}\n\nfunc (i *IPAllocator) FreeIPs(ips []netip.Addr) {\n\ti.mu.Lock()\n\tdefer i.mu.Unlock()\n\n\tfor _, ip := range ips {\n\t\ti.usedIPs.Remove(ip)\n\t}\n}\n"
  },
  {
    "path": "hscontrol/db/ip_test.go",
    "content": "package db\n\nimport (\n\t\"fmt\"\n\t\"net/netip\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/google/go-cmp/cmp/cmpopts\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/net/tsaddr\"\n)\n\nvar mpp = func(pref string) *netip.Prefix {\n\tp := netip.MustParsePrefix(pref)\n\treturn &p\n}\n\nvar na = netip.MustParseAddr\n\nvar nap = func(pref string) *netip.Addr {\n\tn := na(pref)\n\treturn &n\n}\n\nfunc TestIPAllocatorSequential(t *testing.T) {\n\ttests := []struct {\n\t\tname   string\n\t\tdbFunc func() *HSDatabase\n\n\t\tprefix4  *netip.Prefix\n\t\tprefix6  *netip.Prefix\n\t\tgetCount int\n\t\twant4    []netip.Addr\n\t\twant6    []netip.Addr\n\t}{\n\t\t{\n\t\t\tname: \"simple\",\n\t\t\tdbFunc: func() *HSDatabase {\n\t\t\t\treturn nil\n\t\t\t},\n\n\t\t\tprefix4: mpp(\"100.64.0.0/10\"),\n\t\t\tprefix6: mpp(\"fd7a:115c:a1e0::/48\"),\n\n\t\t\tgetCount: 1,\n\n\t\t\twant4: []netip.Addr{\n\t\t\t\tna(\"100.64.0.1\"),\n\t\t\t},\n\t\t\twant6: []netip.Addr{\n\t\t\t\tna(\"fd7a:115c:a1e0::1\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"simple-v4\",\n\t\t\tdbFunc: func() *HSDatabase {\n\t\t\t\treturn nil\n\t\t\t},\n\n\t\t\tprefix4: mpp(\"100.64.0.0/10\"),\n\n\t\t\tgetCount: 1,\n\n\t\t\twant4: []netip.Addr{\n\t\t\t\tna(\"100.64.0.1\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"simple-v6\",\n\t\t\tdbFunc: func() *HSDatabase {\n\t\t\t\treturn nil\n\t\t\t},\n\n\t\t\tprefix6: mpp(\"fd7a:115c:a1e0::/48\"),\n\n\t\t\tgetCount: 1,\n\n\t\t\twant6: []netip.Addr{\n\t\t\t\tna(\"fd7a:115c:a1e0::1\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"simple-with-db\",\n\t\t\tdbFunc: func() *HSDatabase {\n\t\t\t\tdb := dbForTest(t)\n\t\t\t\tuser := types.User{Name: \"\"}\n\t\t\t\tdb.DB.Save(&user)\n\n\t\t\t\tdb.DB.Save(&types.Node{\n\t\t\t\t\tUser: &user,\n\t\t\t\t\tIPv4: nap(\"100.64.0.1\"),\n\t\t\t\t\tIPv6: nap(\"fd7a:115c:a1e0::1\"),\n\t\t\t\t})\n\n\t\t\t\treturn db\n\t\t\t},\n\n\t\t\tprefix4: mpp(\"100.64.0.0/10\"),\n\t\t\tprefix6: mpp(\"fd7a:115c:a1e0::/48\"),\n\n\t\t\tgetCount: 1,\n\n\t\t\twant4: []netip.Addr{\n\t\t\t\tna(\"100.64.0.2\"),\n\t\t\t},\n\t\t\twant6: []netip.Addr{\n\t\t\t\tna(\"fd7a:115c:a1e0::2\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"before-after-free-middle-in-db\",\n\t\t\tdbFunc: func() *HSDatabase {\n\t\t\t\tdb := dbForTest(t)\n\t\t\t\tuser := types.User{Name: \"\"}\n\t\t\t\tdb.DB.Save(&user)\n\n\t\t\t\tdb.DB.Save(&types.Node{\n\t\t\t\t\tUser: &user,\n\t\t\t\t\tIPv4: nap(\"100.64.0.2\"),\n\t\t\t\t\tIPv6: nap(\"fd7a:115c:a1e0::2\"),\n\t\t\t\t})\n\n\t\t\t\treturn db\n\t\t\t},\n\n\t\t\tprefix4: mpp(\"100.64.0.0/10\"),\n\t\t\tprefix6: mpp(\"fd7a:115c:a1e0::/48\"),\n\n\t\t\tgetCount: 2,\n\n\t\t\twant4: []netip.Addr{\n\t\t\t\tna(\"100.64.0.1\"),\n\t\t\t\tna(\"100.64.0.3\"),\n\t\t\t},\n\t\t\twant6: []netip.Addr{\n\t\t\t\tna(\"fd7a:115c:a1e0::1\"),\n\t\t\t\tna(\"fd7a:115c:a1e0::3\"),\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tdb := tt.dbFunc()\n\n\t\t\talloc, _ := NewIPAllocator(\n\t\t\t\tdb,\n\t\t\t\ttt.prefix4,\n\t\t\t\ttt.prefix6,\n\t\t\t\ttypes.IPAllocationStrategySequential,\n\t\t\t)\n\n\t\t\tvar (\n\t\t\t\tgot4s []netip.Addr\n\t\t\t\tgot6s []netip.Addr\n\t\t\t)\n\n\t\t\tfor range tt.getCount {\n\t\t\t\tgot4, got6, err := alloc.Next()\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"allocating next IP: %s\", err)\n\t\t\t\t}\n\n\t\t\t\tif got4 != nil {\n\t\t\t\t\tgot4s = append(got4s, *got4)\n\t\t\t\t}\n\n\t\t\t\tif got6 != nil {\n\t\t\t\t\tgot6s = append(got6s, *got6)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif diff := cmp.Diff(tt.want4, got4s, util.Comparers...); diff != \"\" {\n\t\t\t\tt.Errorf(\"IPAllocator 4s unexpected result (-want +got):\\n%s\", diff)\n\t\t\t}\n\n\t\t\tif diff := cmp.Diff(tt.want6, got6s, util.Comparers...); diff != \"\" {\n\t\t\t\tt.Errorf(\"IPAllocator 6s unexpected result (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestIPAllocatorRandom(t *testing.T) {\n\ttests := []struct {\n\t\tname   string\n\t\tdbFunc func() *HSDatabase\n\n\t\tgetCount int\n\n\t\tprefix4 *netip.Prefix\n\t\tprefix6 *netip.Prefix\n\t\twant4   bool\n\t\twant6   bool\n\t}{\n\t\t{\n\t\t\tname: \"simple\",\n\t\t\tdbFunc: func() *HSDatabase {\n\t\t\t\treturn nil\n\t\t\t},\n\n\t\t\tprefix4: mpp(\"100.64.0.0/10\"),\n\t\t\tprefix6: mpp(\"fd7a:115c:a1e0::/48\"),\n\n\t\t\tgetCount: 1,\n\n\t\t\twant4: true,\n\t\t\twant6: true,\n\t\t},\n\t\t{\n\t\t\tname: \"simple-v4\",\n\t\t\tdbFunc: func() *HSDatabase {\n\t\t\t\treturn nil\n\t\t\t},\n\n\t\t\tprefix4: mpp(\"100.64.0.0/10\"),\n\n\t\t\tgetCount: 1,\n\n\t\t\twant4: true,\n\t\t\twant6: false,\n\t\t},\n\t\t{\n\t\t\tname: \"simple-v6\",\n\t\t\tdbFunc: func() *HSDatabase {\n\t\t\t\treturn nil\n\t\t\t},\n\n\t\t\tprefix6: mpp(\"fd7a:115c:a1e0::/48\"),\n\n\t\t\tgetCount: 1,\n\n\t\t\twant4: false,\n\t\t\twant6: true,\n\t\t},\n\t\t{\n\t\t\tname: \"generate-lots-of-random\",\n\t\t\tdbFunc: func() *HSDatabase {\n\t\t\t\treturn nil\n\t\t\t},\n\n\t\t\tprefix4: mpp(\"100.64.0.0/10\"),\n\t\t\tprefix6: mpp(\"fd7a:115c:a1e0::/48\"),\n\n\t\t\tgetCount: 1000,\n\n\t\t\twant4: true,\n\t\t\twant6: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tdb := tt.dbFunc()\n\n\t\t\talloc, _ := NewIPAllocator(db, tt.prefix4, tt.prefix6, types.IPAllocationStrategyRandom)\n\n\t\t\tfor range tt.getCount {\n\t\t\t\tgot4, got6, err := alloc.Next()\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"allocating next IP: %s\", err)\n\t\t\t\t}\n\n\t\t\t\tt.Logf(\"addrs ipv4: %v, ipv6: %v\", got4, got6)\n\n\t\t\t\tif tt.want4 {\n\t\t\t\t\tif got4 == nil {\n\t\t\t\t\t\tt.Fatalf(\"expected ipv4 addr, got nil\")\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif tt.want6 {\n\t\t\t\t\tif got6 == nil {\n\t\t\t\t\t\tt.Fatalf(\"expected ipv4 addr, got nil\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestBackfillIPAddresses(t *testing.T) {\n\tfullNodeP := func(i int) *types.Node {\n\t\tv4 := fmt.Sprintf(\"100.64.0.%d\", i)\n\t\tv6 := fmt.Sprintf(\"fd7a:115c:a1e0::%d\", i)\n\n\t\treturn &types.Node{\n\t\t\tIPv4: nap(v4),\n\t\t\tIPv6: nap(v6),\n\t\t}\n\t}\n\ttests := []struct {\n\t\tname   string\n\t\tdbFunc func() *HSDatabase\n\n\t\tprefix4 *netip.Prefix\n\t\tprefix6 *netip.Prefix\n\t\twant    types.Nodes\n\t}{\n\t\t{\n\t\t\tname: \"simple-backfill-ipv6\",\n\t\t\tdbFunc: func() *HSDatabase {\n\t\t\t\tdb := dbForTest(t)\n\t\t\t\tuser := types.User{Name: \"\"}\n\t\t\t\tdb.DB.Save(&user)\n\n\t\t\t\tdb.DB.Save(&types.Node{\n\t\t\t\t\tUser: &user,\n\t\t\t\t\tIPv4: nap(\"100.64.0.1\"),\n\t\t\t\t})\n\n\t\t\t\treturn db\n\t\t\t},\n\n\t\t\tprefix4: mpp(\"100.64.0.0/10\"),\n\t\t\tprefix6: mpp(\"fd7a:115c:a1e0::/48\"),\n\n\t\t\twant: types.Nodes{\n\t\t\t\t&types.Node{\n\t\t\t\t\tIPv4: nap(\"100.64.0.1\"),\n\t\t\t\t\tIPv6: nap(\"fd7a:115c:a1e0::1\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"simple-backfill-ipv4\",\n\t\t\tdbFunc: func() *HSDatabase {\n\t\t\t\tdb := dbForTest(t)\n\t\t\t\tuser := types.User{Name: \"\"}\n\t\t\t\tdb.DB.Save(&user)\n\n\t\t\t\tdb.DB.Save(&types.Node{\n\t\t\t\t\tUser: &user,\n\t\t\t\t\tIPv6: nap(\"fd7a:115c:a1e0::1\"),\n\t\t\t\t})\n\n\t\t\t\treturn db\n\t\t\t},\n\n\t\t\tprefix4: mpp(\"100.64.0.0/10\"),\n\t\t\tprefix6: mpp(\"fd7a:115c:a1e0::/48\"),\n\n\t\t\twant: types.Nodes{\n\t\t\t\t&types.Node{\n\t\t\t\t\tIPv4: nap(\"100.64.0.1\"),\n\t\t\t\t\tIPv6: nap(\"fd7a:115c:a1e0::1\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"simple-backfill-remove-ipv6\",\n\t\t\tdbFunc: func() *HSDatabase {\n\t\t\t\tdb := dbForTest(t)\n\t\t\t\tuser := types.User{Name: \"\"}\n\t\t\t\tdb.DB.Save(&user)\n\n\t\t\t\tdb.DB.Save(&types.Node{\n\t\t\t\t\tUser: &user,\n\t\t\t\t\tIPv4: nap(\"100.64.0.1\"),\n\t\t\t\t\tIPv6: nap(\"fd7a:115c:a1e0::1\"),\n\t\t\t\t})\n\n\t\t\t\treturn db\n\t\t\t},\n\n\t\t\tprefix4: mpp(\"100.64.0.0/10\"),\n\n\t\t\twant: types.Nodes{\n\t\t\t\t&types.Node{\n\t\t\t\t\tIPv4: nap(\"100.64.0.1\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"simple-backfill-remove-ipv4\",\n\t\t\tdbFunc: func() *HSDatabase {\n\t\t\t\tdb := dbForTest(t)\n\t\t\t\tuser := types.User{Name: \"\"}\n\t\t\t\tdb.DB.Save(&user)\n\n\t\t\t\tdb.DB.Save(&types.Node{\n\t\t\t\t\tUser: &user,\n\t\t\t\t\tIPv4: nap(\"100.64.0.1\"),\n\t\t\t\t\tIPv6: nap(\"fd7a:115c:a1e0::1\"),\n\t\t\t\t})\n\n\t\t\t\treturn db\n\t\t\t},\n\n\t\t\tprefix6: mpp(\"fd7a:115c:a1e0::/48\"),\n\n\t\t\twant: types.Nodes{\n\t\t\t\t&types.Node{\n\t\t\t\t\tIPv6: nap(\"fd7a:115c:a1e0::1\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multi-backfill-ipv6\",\n\t\t\tdbFunc: func() *HSDatabase {\n\t\t\t\tdb := dbForTest(t)\n\t\t\t\tuser := types.User{Name: \"\"}\n\t\t\t\tdb.DB.Save(&user)\n\n\t\t\t\tdb.DB.Save(&types.Node{\n\t\t\t\t\tUser: &user,\n\t\t\t\t\tIPv4: nap(\"100.64.0.1\"),\n\t\t\t\t})\n\t\t\t\tdb.DB.Save(&types.Node{\n\t\t\t\t\tUser: &user,\n\t\t\t\t\tIPv4: nap(\"100.64.0.2\"),\n\t\t\t\t})\n\t\t\t\tdb.DB.Save(&types.Node{\n\t\t\t\t\tUser: &user,\n\t\t\t\t\tIPv4: nap(\"100.64.0.3\"),\n\t\t\t\t})\n\t\t\t\tdb.DB.Save(&types.Node{\n\t\t\t\t\tUser: &user,\n\t\t\t\t\tIPv4: nap(\"100.64.0.4\"),\n\t\t\t\t})\n\n\t\t\t\treturn db\n\t\t\t},\n\n\t\t\tprefix4: mpp(\"100.64.0.0/10\"),\n\t\t\tprefix6: mpp(\"fd7a:115c:a1e0::/48\"),\n\n\t\t\twant: types.Nodes{\n\t\t\t\tfullNodeP(1),\n\t\t\t\tfullNodeP(2),\n\t\t\t\tfullNodeP(3),\n\t\t\t\tfullNodeP(4),\n\t\t\t},\n\t\t},\n\t}\n\n\tcomps := append(util.Comparers, cmpopts.IgnoreFields(types.Node{},\n\t\t\"ID\",\n\t\t\"User\",\n\t\t\"UserID\",\n\t\t\"Endpoints\",\n\t\t\"Hostinfo\",\n\t\t\"CreatedAt\",\n\t\t\"UpdatedAt\",\n\t))\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tdb := tt.dbFunc()\n\n\t\t\talloc, err := NewIPAllocator(\n\t\t\t\tdb,\n\t\t\t\ttt.prefix4,\n\t\t\t\ttt.prefix6,\n\t\t\t\ttypes.IPAllocationStrategySequential,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"failed to set up ip alloc: %s\", err)\n\t\t\t}\n\n\t\t\tlogs, err := db.BackfillNodeIPs(alloc)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"failed to backfill: %s\", err)\n\t\t\t}\n\n\t\t\tt.Logf(\"backfill log: \\n%s\", strings.Join(logs, \"\\n\"))\n\n\t\t\tgot, err := db.ListNodes()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"failed to get nodes: %s\", err)\n\t\t\t}\n\n\t\t\tif diff := cmp.Diff(tt.want, got, comps...); diff != \"\" {\n\t\t\t\tt.Errorf(\"Backfill unexpected result (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestIPAllocatorNextNoReservedIPs(t *testing.T) {\n\tdb, err := newSQLiteTestDB()\n\trequire.NoError(t, err)\n\n\tdefer db.Close()\n\n\talloc, err := NewIPAllocator(\n\t\tdb,\n\t\tnew(tsaddr.CGNATRange()),\n\t\tnew(tsaddr.TailscaleULARange()),\n\t\ttypes.IPAllocationStrategySequential,\n\t)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to set up ip alloc: %s\", err)\n\t}\n\n\t// Validate that we do not give out 100.100.100.100\n\tnextQuad100, err := alloc.next(na(\"100.100.100.99\"), new(tsaddr.CGNATRange()))\n\trequire.NoError(t, err)\n\tassert.Equal(t, na(\"100.100.100.101\"), *nextQuad100)\n\n\t// Validate that we do not give out fd7a:115c:a1e0::53\n\tnextQuad100v6, err := alloc.next(na(\"fd7a:115c:a1e0::52\"), new(tsaddr.TailscaleULARange()))\n\trequire.NoError(t, err)\n\tassert.Equal(t, na(\"fd7a:115c:a1e0::54\"), *nextQuad100v6)\n\n\t// Validate that we do not give out fd7a:115c:a1e0::53\n\tnextChrome, err := alloc.next(na(\"100.115.91.255\"), new(tsaddr.CGNATRange()))\n\tt.Logf(\"chrome: %s\", nextChrome.String())\n\trequire.NoError(t, err)\n\tassert.Equal(t, na(\"100.115.94.0\"), *nextChrome)\n}\n"
  },
  {
    "path": "hscontrol/db/main_test.go",
    "content": "package db\n\nimport (\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"testing\"\n)\n\n// TestMain ensures the working directory is set to the package source directory\n// so that relative testdata/ paths resolve correctly when the test binary is\n// executed from an arbitrary location (e.g., via \"go tool stress\").\nfunc TestMain(m *testing.M) {\n\t_, filename, _, ok := runtime.Caller(0)\n\tif !ok {\n\t\tpanic(\"could not determine test source directory\")\n\t}\n\n\terr := os.Chdir(filepath.Dir(filename))\n\tif err != nil {\n\t\tpanic(\"could not chdir to test source directory: \" + err.Error())\n\t}\n\n\tos.Exit(m.Run())\n}\n"
  },
  {
    "path": "hscontrol/db/node.go",
    "content": "package db\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/netip\"\n\t\"regexp\"\n\t\"slices\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/juanfont/headscale/hscontrol/util/zlog/zf\"\n\t\"github.com/rs/zerolog/log\"\n\t\"gorm.io/gorm\"\n\t\"tailscale.com/net/tsaddr\"\n\t\"tailscale.com/types/key\"\n)\n\nconst (\n\tNodeGivenNameHashLength = 8\n\tNodeGivenNameTrimSize   = 2\n\n\t// defaultTestNodePrefix is the default hostname prefix for nodes created in tests.\n\tdefaultTestNodePrefix = \"testnode\"\n)\n\n// ErrNodeNameNotUnique is returned when a node name is not unique.\nvar ErrNodeNameNotUnique = errors.New(\"node name is not unique\")\n\nvar invalidDNSRegex = regexp.MustCompile(\"[^a-z0-9-.]+\")\n\nvar (\n\tErrNodeNotFound                  = errors.New(\"node not found\")\n\tErrNodeRouteIsNotAvailable       = errors.New(\"route is not available on node\")\n\tErrNodeNotFoundRegistrationCache = errors.New(\n\t\t\"node not found in registration cache\",\n\t)\n\tErrCouldNotConvertNodeInterface = errors.New(\"failed to convert node interface\")\n)\n\n// ListPeers returns peers of node, regardless of any Policy or if the node is expired.\n// If no peer IDs are given, all peers are returned.\n// If at least one peer ID is given, only these peer nodes will be returned.\nfunc (hsdb *HSDatabase) ListPeers(nodeID types.NodeID, peerIDs ...types.NodeID) (types.Nodes, error) {\n\treturn ListPeers(hsdb.DB, nodeID, peerIDs...)\n}\n\n// ListPeers returns peers of node, regardless of any Policy or if the node is expired.\n// If no peer IDs are given, all peers are returned.\n// If at least one peer ID is given, only these peer nodes will be returned.\nfunc ListPeers(tx *gorm.DB, nodeID types.NodeID, peerIDs ...types.NodeID) (types.Nodes, error) {\n\tnodes := types.Nodes{}\n\n\terr := tx.\n\t\tPreload(\"AuthKey\").\n\t\tPreload(\"AuthKey.User\").\n\t\tPreload(\"User\").\n\t\tWhere(\"id <> ?\", nodeID).\n\t\tWhere(peerIDs).Find(&nodes).Error\n\tif err != nil {\n\t\treturn types.Nodes{}, err\n\t}\n\n\tsort.Slice(nodes, func(i, j int) bool { return nodes[i].ID < nodes[j].ID })\n\n\treturn nodes, nil\n}\n\n// ListNodes queries the database for either all nodes if no parameters are given\n// or for the given nodes if at least one node ID is given as parameter.\nfunc (hsdb *HSDatabase) ListNodes(nodeIDs ...types.NodeID) (types.Nodes, error) {\n\treturn ListNodes(hsdb.DB, nodeIDs...)\n}\n\n// ListNodes queries the database for either all nodes if no parameters are given\n// or for the given nodes if at least one node ID is given as parameter.\nfunc ListNodes(tx *gorm.DB, nodeIDs ...types.NodeID) (types.Nodes, error) {\n\tnodes := types.Nodes{}\n\n\terr := tx.\n\t\tPreload(\"AuthKey\").\n\t\tPreload(\"AuthKey.User\").\n\t\tPreload(\"User\").\n\t\tWhere(nodeIDs).Find(&nodes).Error\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn nodes, nil\n}\n\nfunc (hsdb *HSDatabase) ListEphemeralNodes() (types.Nodes, error) {\n\treturn Read(hsdb.DB, func(rx *gorm.DB) (types.Nodes, error) {\n\t\tnodes := types.Nodes{}\n\n\t\terr := rx.Joins(\"AuthKey\").Where(`\"AuthKey\".\"ephemeral\" = true`).Find(&nodes).Error\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn nodes, nil\n\t})\n}\n\nfunc (hsdb *HSDatabase) getNode(uid types.UserID, name string) (*types.Node, error) {\n\treturn Read(hsdb.DB, func(rx *gorm.DB) (*types.Node, error) {\n\t\treturn getNode(rx, uid, name)\n\t})\n}\n\n// getNode finds a Node by name and user and returns the Node struct.\nfunc getNode(tx *gorm.DB, uid types.UserID, name string) (*types.Node, error) {\n\tnodes, err := ListNodesByUser(tx, uid)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, m := range nodes {\n\t\tif m.Hostname == name {\n\t\t\treturn m, nil\n\t\t}\n\t}\n\n\treturn nil, ErrNodeNotFound\n}\n\nfunc (hsdb *HSDatabase) GetNodeByID(id types.NodeID) (*types.Node, error) {\n\treturn GetNodeByID(hsdb.DB, id)\n}\n\n// GetNodeByID finds a Node by ID and returns the Node struct.\nfunc GetNodeByID(tx *gorm.DB, id types.NodeID) (*types.Node, error) {\n\tmach := types.Node{}\n\tif result := tx.\n\t\tPreload(\"AuthKey\").\n\t\tPreload(\"AuthKey.User\").\n\t\tPreload(\"User\").\n\t\tFind(&types.Node{ID: id}).First(&mach); result.Error != nil {\n\t\treturn nil, result.Error\n\t}\n\n\treturn &mach, nil\n}\n\nfunc (hsdb *HSDatabase) GetNodeByMachineKey(machineKey key.MachinePublic) (*types.Node, error) {\n\treturn GetNodeByMachineKey(hsdb.DB, machineKey)\n}\n\n// GetNodeByMachineKey finds a Node by its MachineKey and returns the Node struct.\nfunc GetNodeByMachineKey(\n\ttx *gorm.DB,\n\tmachineKey key.MachinePublic,\n) (*types.Node, error) {\n\tmach := types.Node{}\n\tif result := tx.\n\t\tPreload(\"AuthKey\").\n\t\tPreload(\"AuthKey.User\").\n\t\tPreload(\"User\").\n\t\tFirst(&mach, \"machine_key = ?\", machineKey.String()); result.Error != nil {\n\t\treturn nil, result.Error\n\t}\n\n\treturn &mach, nil\n}\n\nfunc (hsdb *HSDatabase) GetNodeByNodeKey(nodeKey key.NodePublic) (*types.Node, error) {\n\treturn GetNodeByNodeKey(hsdb.DB, nodeKey)\n}\n\n// GetNodeByNodeKey finds a Node by its NodeKey and returns the Node struct.\nfunc GetNodeByNodeKey(\n\ttx *gorm.DB,\n\tnodeKey key.NodePublic,\n) (*types.Node, error) {\n\tmach := types.Node{}\n\tif result := tx.\n\t\tPreload(\"AuthKey\").\n\t\tPreload(\"AuthKey.User\").\n\t\tPreload(\"User\").\n\t\tFirst(&mach, \"node_key = ?\", nodeKey.String()); result.Error != nil {\n\t\treturn nil, result.Error\n\t}\n\n\treturn &mach, nil\n}\n\nfunc (hsdb *HSDatabase) SetTags(\n\tnodeID types.NodeID,\n\ttags []string,\n) error {\n\treturn hsdb.Write(func(tx *gorm.DB) error {\n\t\treturn SetTags(tx, nodeID, tags)\n\t})\n}\n\n// SetTags takes a NodeID and update the forced tags.\n// It will overwrite any tags with the new list.\nfunc SetTags(\n\ttx *gorm.DB,\n\tnodeID types.NodeID,\n\ttags []string,\n) error {\n\tif len(tags) == 0 {\n\t\t// if no tags are provided, we remove all tags\n\t\terr := tx.Model(&types.Node{}).Where(\"id = ?\", nodeID).Update(\"tags\", \"[]\").Error\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"removing tags: %w\", err)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tslices.Sort(tags)\n\ttags = slices.Compact(tags)\n\n\tb, err := json.Marshal(tags)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = tx.Model(&types.Node{}).Where(\"id = ?\", nodeID).Update(\"tags\", string(b)).Error\n\tif err != nil {\n\t\treturn fmt.Errorf(\"updating tags: %w\", err)\n\t}\n\n\treturn nil\n}\n\n// SetApprovedRoutes takes a Node struct pointer and updates the approved routes.\nfunc SetApprovedRoutes(\n\ttx *gorm.DB,\n\tnodeID types.NodeID,\n\troutes []netip.Prefix,\n) error {\n\tif len(routes) == 0 {\n\t\t// if no routes are provided, we remove all\n\t\terr := tx.Model(&types.Node{}).Where(\"id = ?\", nodeID).Update(\"approved_routes\", \"[]\").Error\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"removing approved routes: %w\", err)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\t// When approving exit routes, ensure both IPv4 and IPv6 are included\n\t// If either 0.0.0.0/0 or ::/0 is being approved, both should be approved\n\thasIPv4Exit := slices.Contains(routes, tsaddr.AllIPv4())\n\thasIPv6Exit := slices.Contains(routes, tsaddr.AllIPv6())\n\n\tif hasIPv4Exit && !hasIPv6Exit {\n\t\troutes = append(routes, tsaddr.AllIPv6())\n\t} else if hasIPv6Exit && !hasIPv4Exit {\n\t\troutes = append(routes, tsaddr.AllIPv4())\n\t}\n\n\tb, err := json.Marshal(routes)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := tx.Model(&types.Node{}).Where(\"id = ?\", nodeID).Update(\"approved_routes\", string(b)).Error; err != nil { //nolint:noinlineerr\n\t\treturn fmt.Errorf(\"updating approved routes: %w\", err)\n\t}\n\n\treturn nil\n}\n\n// SetLastSeen sets a node's last seen field indicating that we\n// have recently communicating with this node.\nfunc (hsdb *HSDatabase) SetLastSeen(nodeID types.NodeID, lastSeen time.Time) error {\n\treturn hsdb.Write(func(tx *gorm.DB) error {\n\t\treturn SetLastSeen(tx, nodeID, lastSeen)\n\t})\n}\n\n// SetLastSeen sets a node's last seen field indicating that we\n// have recently communicating with this node.\nfunc SetLastSeen(tx *gorm.DB, nodeID types.NodeID, lastSeen time.Time) error {\n\treturn tx.Model(&types.Node{}).Where(\"id = ?\", nodeID).Update(\"last_seen\", lastSeen).Error\n}\n\n// RenameNode takes a Node struct and a new GivenName for the nodes\n// and renames it. Validation should be done in the state layer before calling this function.\nfunc RenameNode(tx *gorm.DB,\n\tnodeID types.NodeID, newName string,\n) error {\n\terr := util.ValidateHostname(newName)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"renaming node: %w\", err)\n\t}\n\n\t// Check if the new name is unique\n\tvar count int64\n\n\terr = tx.Model(&types.Node{}).Where(\"given_name = ? AND id != ?\", newName, nodeID).Count(&count).Error\n\tif err != nil {\n\t\treturn fmt.Errorf(\"checking name uniqueness: %w\", err)\n\t}\n\n\tif count > 0 {\n\t\treturn ErrNodeNameNotUnique\n\t}\n\n\tif err := tx.Model(&types.Node{}).Where(\"id = ?\", nodeID).Update(\"given_name\", newName).Error; err != nil { //nolint:noinlineerr\n\t\treturn fmt.Errorf(\"renaming node in database: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (hsdb *HSDatabase) NodeSetExpiry(nodeID types.NodeID, expiry *time.Time) error {\n\treturn hsdb.Write(func(tx *gorm.DB) error {\n\t\treturn NodeSetExpiry(tx, nodeID, expiry)\n\t})\n}\n\n// NodeSetExpiry sets a new expiry time for a node.\n// If expiry is nil, the node's expiry is disabled (node will never expire).\nfunc NodeSetExpiry(tx *gorm.DB, nodeID types.NodeID, expiry *time.Time) error {\n\treturn tx.Model(&types.Node{}).Where(\"id = ?\", nodeID).Update(\"expiry\", expiry).Error\n}\n\nfunc (hsdb *HSDatabase) DeleteNode(node *types.Node) error {\n\treturn hsdb.Write(func(tx *gorm.DB) error {\n\t\treturn DeleteNode(tx, node)\n\t})\n}\n\n// DeleteNode deletes a Node from the database.\n// Caller is responsible for notifying all of change.\nfunc DeleteNode(tx *gorm.DB,\n\tnode *types.Node,\n) error {\n\t// Unscoped causes the node to be fully removed from the database.\n\terr := tx.Unscoped().Delete(&types.Node{}, node.ID).Error\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// DeleteEphemeralNode deletes a Node from the database, note that this method\n// will remove it straight, and not notify any changes or consider any routes.\n// It is intended for Ephemeral nodes.\nfunc (hsdb *HSDatabase) DeleteEphemeralNode(\n\tnodeID types.NodeID,\n) error {\n\treturn hsdb.Write(func(tx *gorm.DB) error {\n\t\terr := tx.Unscoped().Delete(&types.Node{}, nodeID).Error\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n}\n\n// RegisterNodeForTest is used only for testing purposes to register a node directly in the database.\n// Production code should use state.HandleNodeFromAuthPath or state.HandleNodeFromPreAuthKey.\nfunc RegisterNodeForTest(tx *gorm.DB, node types.Node, ipv4 *netip.Addr, ipv6 *netip.Addr) (*types.Node, error) {\n\tif !testing.Testing() {\n\t\tpanic(\"RegisterNodeForTest can only be called during tests\")\n\t}\n\n\tlogEvent := log.Debug().\n\t\tStr(zf.NodeHostname, node.Hostname).\n\t\tStr(zf.MachineKey, node.MachineKey.ShortString()).\n\t\tStr(zf.NodeKey, node.NodeKey.ShortString())\n\n\tif node.User != nil {\n\t\tlogEvent = logEvent.Str(zf.UserName, node.User.Username())\n\t} else if node.UserID != nil {\n\t\tlogEvent = logEvent.Uint(zf.UserID, *node.UserID)\n\t} else {\n\t\tlogEvent = logEvent.Str(zf.UserName, \"none\")\n\t}\n\n\tlogEvent.Msg(\"registering test node\")\n\n\t// If the a new node is registered with the same machine key, to the same user,\n\t// update the existing node.\n\t// If the same node is registered again, but to a new user, then that is considered\n\t// a new node.\n\toldNode, _ := GetNodeByMachineKey(tx, node.MachineKey)\n\tif oldNode != nil && oldNode.UserID == node.UserID {\n\t\tnode.ID = oldNode.ID\n\t\tnode.GivenName = oldNode.GivenName\n\t\tnode.ApprovedRoutes = oldNode.ApprovedRoutes\n\t\t// Don't overwrite the provided IPs with old ones when they exist\n\t\tif ipv4 == nil {\n\t\t\tipv4 = oldNode.IPv4\n\t\t}\n\n\t\tif ipv6 == nil {\n\t\t\tipv6 = oldNode.IPv6\n\t\t}\n\t}\n\n\t// If the node exists and it already has IP(s), we just save it\n\t// so we store the node.Expire and node.Nodekey that has been set when\n\t// adding it to the registrationCache\n\tif node.IPv4 != nil || node.IPv6 != nil {\n\t\terr := tx.Save(&node).Error\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"registering existing node in database: %w\", err)\n\t\t}\n\n\t\tlog.Trace().\n\t\t\tCaller().\n\t\t\tStr(zf.NodeHostname, node.Hostname).\n\t\t\tStr(zf.MachineKey, node.MachineKey.ShortString()).\n\t\t\tStr(zf.NodeKey, node.NodeKey.ShortString()).\n\t\t\tStr(zf.UserName, node.User.Username()).\n\t\t\tMsg(\"Test node authorized again\")\n\n\t\treturn &node, nil\n\t}\n\n\tnode.IPv4 = ipv4\n\tnode.IPv6 = ipv6\n\n\tvar err error\n\n\tnode.Hostname, err = util.NormaliseHostname(node.Hostname)\n\tif err != nil {\n\t\tnewHostname := util.InvalidString()\n\t\tlog.Info().Err(err).Str(zf.InvalidHostname, node.Hostname).Str(zf.NewHostname, newHostname).Msgf(\"invalid hostname, replacing\")\n\t\tnode.Hostname = newHostname\n\t}\n\n\tif node.GivenName == \"\" {\n\t\tgivenName, err := EnsureUniqueGivenName(tx, node.Hostname)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"ensuring unique given name: %w\", err)\n\t\t}\n\n\t\tnode.GivenName = givenName\n\t}\n\n\tif err := tx.Save(&node).Error; err != nil { //nolint:noinlineerr\n\t\treturn nil, fmt.Errorf(\"saving node to database: %w\", err)\n\t}\n\n\tlog.Trace().\n\t\tCaller().\n\t\tStr(zf.NodeHostname, node.Hostname).\n\t\tMsg(\"Test node registered with the database\")\n\n\treturn &node, nil\n}\n\n// NodeSetNodeKey sets the node key of a node and saves it to the database.\nfunc NodeSetNodeKey(tx *gorm.DB, node *types.Node, nodeKey key.NodePublic) error {\n\treturn tx.Model(node).Updates(types.Node{\n\t\tNodeKey: nodeKey,\n\t}).Error\n}\n\nfunc (hsdb *HSDatabase) NodeSetMachineKey(\n\tnode *types.Node,\n\tmachineKey key.MachinePublic,\n) error {\n\treturn hsdb.Write(func(tx *gorm.DB) error {\n\t\treturn NodeSetMachineKey(tx, node, machineKey)\n\t})\n}\n\n// NodeSetMachineKey sets the node key of a node and saves it to the database.\nfunc NodeSetMachineKey(\n\ttx *gorm.DB,\n\tnode *types.Node,\n\tmachineKey key.MachinePublic,\n) error {\n\treturn tx.Model(node).Updates(types.Node{\n\t\tMachineKey: machineKey,\n\t}).Error\n}\n\nfunc generateGivenName(suppliedName string, randomSuffix bool) (string, error) {\n\t// Strip invalid DNS characters for givenName\n\tsuppliedName = strings.ToLower(suppliedName)\n\tsuppliedName = invalidDNSRegex.ReplaceAllString(suppliedName, \"\")\n\n\tif len(suppliedName) > util.LabelHostnameLength {\n\t\treturn \"\", types.ErrHostnameTooLong\n\t}\n\n\tif randomSuffix {\n\t\t// Trim if a hostname will be longer than 63 chars after adding the hash.\n\t\ttrimmedHostnameLength := util.LabelHostnameLength - NodeGivenNameHashLength - NodeGivenNameTrimSize\n\t\tif len(suppliedName) > trimmedHostnameLength {\n\t\t\tsuppliedName = suppliedName[:trimmedHostnameLength]\n\t\t}\n\n\t\tsuffix, err := util.GenerateRandomStringDNSSafe(NodeGivenNameHashLength)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tsuppliedName += \"-\" + suffix\n\t}\n\n\treturn suppliedName, nil\n}\n\nfunc isUniqueName(tx *gorm.DB, name string) (bool, error) {\n\tnodes := types.Nodes{}\n\n\terr := tx.\n\t\tWhere(\"given_name = ?\", name).Find(&nodes).Error\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn len(nodes) == 0, nil\n}\n\n// EnsureUniqueGivenName generates a unique given name for a node based on its hostname.\nfunc EnsureUniqueGivenName(\n\ttx *gorm.DB,\n\tname string,\n) (string, error) {\n\tgivenName, err := generateGivenName(name, false)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tunique, err := isUniqueName(tx, givenName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif !unique {\n\t\tpostfixedName, err := generateGivenName(name, true)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tgivenName = postfixedName\n\t}\n\n\treturn givenName, nil\n}\n\n// EphemeralGarbageCollector is a garbage collector that will delete nodes after\n// a certain amount of time.\n// It is used to delete ephemeral nodes that have disconnected and should be\n// cleaned up.\ntype EphemeralGarbageCollector struct {\n\tmu sync.Mutex\n\n\tdeleteFunc  func(types.NodeID)\n\ttoBeDeleted map[types.NodeID]*time.Timer\n\n\tdeleteCh chan types.NodeID\n\tcancelCh chan struct{}\n}\n\n// NewEphemeralGarbageCollector creates a new EphemeralGarbageCollector, it takes\n// a deleteFunc that will be called when a node is scheduled for deletion.\nfunc NewEphemeralGarbageCollector(deleteFunc func(types.NodeID)) *EphemeralGarbageCollector {\n\treturn &EphemeralGarbageCollector{\n\t\ttoBeDeleted: make(map[types.NodeID]*time.Timer),\n\t\tdeleteCh:    make(chan types.NodeID, 10),\n\t\tcancelCh:    make(chan struct{}),\n\t\tdeleteFunc:  deleteFunc,\n\t}\n}\n\n// Close stops the garbage collector.\nfunc (e *EphemeralGarbageCollector) Close() {\n\te.mu.Lock()\n\tdefer e.mu.Unlock()\n\n\t// Stop all timers\n\tfor _, timer := range e.toBeDeleted {\n\t\ttimer.Stop()\n\t}\n\n\t// Close the cancel channel to signal all goroutines to exit\n\tclose(e.cancelCh)\n}\n\n// Schedule schedules a node for deletion after the expiry duration.\n// If the garbage collector is already closed, this is a no-op.\nfunc (e *EphemeralGarbageCollector) Schedule(nodeID types.NodeID, expiry time.Duration) {\n\te.mu.Lock()\n\tdefer e.mu.Unlock()\n\n\t// Don't schedule new timers if the garbage collector is already closed\n\tselect {\n\tcase <-e.cancelCh:\n\t\t// The cancel channel is closed, meaning the GC is shutting down\n\t\t// or already shut down, so we shouldn't schedule anything new\n\t\treturn\n\tdefault:\n\t\t// Continue with scheduling\n\t}\n\n\t// If a timer already exists for this node, stop it first\n\tif oldTimer, exists := e.toBeDeleted[nodeID]; exists {\n\t\toldTimer.Stop()\n\t}\n\n\ttimer := time.NewTimer(expiry)\n\te.toBeDeleted[nodeID] = timer\n\t// Start a goroutine to handle the timer completion\n\tgo func() {\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\t// This is to handle the situation where the GC is shutting down and\n\t\t\t// we are trying to schedule a new node for deletion at the same time\n\t\t\t// i.e. We don't want to send to deleteCh if the GC is shutting down\n\t\t\t// So, we try to send to deleteCh, but also watch for cancelCh\n\t\t\tselect {\n\t\t\tcase e.deleteCh <- nodeID:\n\t\t\t\t// Successfully sent to deleteCh\n\t\t\tcase <-e.cancelCh:\n\t\t\t\t// GC is shutting down, don't send to deleteCh\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-e.cancelCh:\n\t\t\t// If the GC is closed, exit the goroutine\n\t\t\treturn\n\t\t}\n\t}()\n}\n\n// Cancel cancels the deletion of a node.\nfunc (e *EphemeralGarbageCollector) Cancel(nodeID types.NodeID) {\n\te.mu.Lock()\n\tdefer e.mu.Unlock()\n\n\tif timer, ok := e.toBeDeleted[nodeID]; ok {\n\t\ttimer.Stop()\n\t\tdelete(e.toBeDeleted, nodeID)\n\t}\n}\n\n// Start starts the garbage collector.\nfunc (e *EphemeralGarbageCollector) Start() {\n\tfor {\n\t\tselect {\n\t\tcase <-e.cancelCh:\n\t\t\treturn\n\t\tcase nodeID := <-e.deleteCh:\n\t\t\te.mu.Lock()\n\t\t\tdelete(e.toBeDeleted, nodeID)\n\t\t\te.mu.Unlock()\n\n\t\t\tgo e.deleteFunc(nodeID)\n\t\t}\n\t}\n}\n\nfunc (hsdb *HSDatabase) CreateNodeForTest(user *types.User, hostname ...string) *types.Node {\n\tif !testing.Testing() {\n\t\tpanic(\"CreateNodeForTest can only be called during tests\")\n\t}\n\n\tif user == nil {\n\t\tpanic(\"CreateNodeForTest requires a valid user\")\n\t}\n\n\tnodeName := defaultTestNodePrefix\n\tif len(hostname) > 0 && hostname[0] != \"\" {\n\t\tnodeName = hostname[0]\n\t}\n\n\t// Create a preauth key for the node\n\tpak, err := hsdb.CreatePreAuthKey(user.TypedID(), false, false, nil, nil)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to create preauth key for test node: %v\", err))\n\t}\n\n\tpakID := pak.ID\n\tnodeKey := key.NewNode()\n\tmachineKey := key.NewMachine()\n\tdiscoKey := key.NewDisco()\n\n\tnode := &types.Node{\n\t\tMachineKey:     machineKey.Public(),\n\t\tNodeKey:        nodeKey.Public(),\n\t\tDiscoKey:       discoKey.Public(),\n\t\tHostname:       nodeName,\n\t\tUserID:         &user.ID,\n\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t\tAuthKeyID:      &pakID,\n\t}\n\n\terr = hsdb.DB.Save(node).Error\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to create test node: %v\", err))\n\t}\n\n\treturn node\n}\n\nfunc (hsdb *HSDatabase) CreateRegisteredNodeForTest(user *types.User, hostname ...string) *types.Node {\n\tif !testing.Testing() {\n\t\tpanic(\"CreateRegisteredNodeForTest can only be called during tests\")\n\t}\n\n\tnode := hsdb.CreateNodeForTest(user, hostname...)\n\n\t// Allocate IPs for the test node using the database's IP allocator\n\t// This is a simplified allocation for testing - in production this would use State.ipAlloc\n\tipv4, ipv6, err := hsdb.allocateTestIPs(node.ID)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to allocate IPs for test node: %v\", err))\n\t}\n\n\tvar registeredNode *types.Node\n\n\terr = hsdb.DB.Transaction(func(tx *gorm.DB) error {\n\t\tvar err error\n\n\t\tregisteredNode, err = RegisterNodeForTest(tx, *node, ipv4, ipv6)\n\n\t\treturn err\n\t})\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to register test node: %v\", err))\n\t}\n\n\treturn registeredNode\n}\n\nfunc (hsdb *HSDatabase) CreateNodesForTest(user *types.User, count int, hostnamePrefix ...string) []*types.Node {\n\tif !testing.Testing() {\n\t\tpanic(\"CreateNodesForTest can only be called during tests\")\n\t}\n\n\tif user == nil {\n\t\tpanic(\"CreateNodesForTest requires a valid user\")\n\t}\n\n\tprefix := defaultTestNodePrefix\n\tif len(hostnamePrefix) > 0 && hostnamePrefix[0] != \"\" {\n\t\tprefix = hostnamePrefix[0]\n\t}\n\n\tnodes := make([]*types.Node, count)\n\tfor i := range count {\n\t\thostname := prefix + \"-\" + strconv.Itoa(i)\n\t\tnodes[i] = hsdb.CreateNodeForTest(user, hostname)\n\t}\n\n\treturn nodes\n}\n\nfunc (hsdb *HSDatabase) CreateRegisteredNodesForTest(user *types.User, count int, hostnamePrefix ...string) []*types.Node {\n\tif !testing.Testing() {\n\t\tpanic(\"CreateRegisteredNodesForTest can only be called during tests\")\n\t}\n\n\tif user == nil {\n\t\tpanic(\"CreateRegisteredNodesForTest requires a valid user\")\n\t}\n\n\tprefix := defaultTestNodePrefix\n\tif len(hostnamePrefix) > 0 && hostnamePrefix[0] != \"\" {\n\t\tprefix = hostnamePrefix[0]\n\t}\n\n\tnodes := make([]*types.Node, count)\n\tfor i := range count {\n\t\thostname := prefix + \"-\" + strconv.Itoa(i)\n\t\tnodes[i] = hsdb.CreateRegisteredNodeForTest(user, hostname)\n\t}\n\n\treturn nodes\n}\n\n// allocateTestIPs allocates sequential test IPs for nodes during testing.\nfunc (hsdb *HSDatabase) allocateTestIPs(nodeID types.NodeID) (*netip.Addr, *netip.Addr, error) {\n\tif !testing.Testing() {\n\t\tpanic(\"allocateTestIPs can only be called during tests\")\n\t}\n\n\t// Use simple sequential allocation for tests\n\t// IPv4: 100.64.x.y (where x = nodeID/256, y = nodeID%256)\n\t// IPv6: fd7a:115c:a1e0::x:y (where x = high byte, y = low byte)\n\t// This supports up to 65535 nodes\n\tconst (\n\t\tmaxTestNodes    = 65535\n\t\tipv4ByteDivisor = 256\n\t)\n\n\tif nodeID > maxTestNodes {\n\t\treturn nil, nil, ErrCouldNotAllocateIP\n\t}\n\n\t// Split nodeID into high and low bytes for IPv4 (100.64.high.low)\n\thighByte := byte(nodeID / ipv4ByteDivisor)\n\tlowByte := byte(nodeID % ipv4ByteDivisor)\n\tipv4 := netip.AddrFrom4([4]byte{100, 64, highByte, lowByte})\n\n\t// For IPv6, use the last two bytes of the address (fd7a:115c:a1e0::high:low)\n\tipv6 := netip.AddrFrom16([16]byte{0xfd, 0x7a, 0x11, 0x5c, 0xa1, 0xe0, 0, 0, 0, 0, 0, 0, 0, 0, highByte, lowByte})\n\n\treturn &ipv4, &ipv6, nil\n}\n"
  },
  {
    "path": "hscontrol/db/node_test.go",
    "content": "package db\n\nimport (\n\t\"crypto/rand\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"net/netip\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/juanfont/headscale/hscontrol/policy\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gorm.io/gorm\"\n\t\"tailscale.com/net/tsaddr\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/key\"\n)\n\nfunc TestGetNode(t *testing.T) {\n\tdb, err := newSQLiteTestDB()\n\trequire.NoError(t, err)\n\n\tuser := db.CreateUserForTest(\"test\")\n\n\t_, err = db.getNode(types.UserID(user.ID), \"testnode\")\n\trequire.Error(t, err)\n\n\tnode := db.CreateNodeForTest(user, \"testnode\")\n\n\t_, err = db.getNode(types.UserID(user.ID), \"testnode\")\n\trequire.NoError(t, err)\n\tassert.Equal(t, \"testnode\", node.Hostname)\n}\n\nfunc TestGetNodeByID(t *testing.T) {\n\tdb, err := newSQLiteTestDB()\n\trequire.NoError(t, err)\n\n\tuser := db.CreateUserForTest(\"test\")\n\n\t_, err = db.GetNodeByID(0)\n\trequire.Error(t, err)\n\n\tnode := db.CreateNodeForTest(user, \"testnode\")\n\n\tretrievedNode, err := db.GetNodeByID(node.ID)\n\trequire.NoError(t, err)\n\tassert.Equal(t, \"testnode\", retrievedNode.Hostname)\n}\n\nfunc TestHardDeleteNode(t *testing.T) {\n\tdb, err := newSQLiteTestDB()\n\trequire.NoError(t, err)\n\n\tuser := db.CreateUserForTest(\"test\")\n\tnode := db.CreateNodeForTest(user, \"testnode3\")\n\n\terr = db.DeleteNode(node)\n\trequire.NoError(t, err)\n\n\t_, err = db.getNode(types.UserID(user.ID), \"testnode3\")\n\trequire.Error(t, err)\n}\n\nfunc TestListPeersManyNodes(t *testing.T) {\n\tdb, err := newSQLiteTestDB()\n\trequire.NoError(t, err)\n\n\tuser := db.CreateUserForTest(\"test\")\n\n\t_, err = db.GetNodeByID(0)\n\trequire.Error(t, err)\n\n\tnodes := db.CreateNodesForTest(user, 11, \"testnode\")\n\n\tfirstNode := nodes[0]\n\tpeersOfFirstNode, err := db.ListPeers(firstNode.ID)\n\trequire.NoError(t, err)\n\n\tassert.Len(t, peersOfFirstNode, 10)\n\tassert.Equal(t, \"testnode-1\", peersOfFirstNode[0].Hostname)\n\tassert.Equal(t, \"testnode-6\", peersOfFirstNode[5].Hostname)\n\tassert.Equal(t, \"testnode-10\", peersOfFirstNode[9].Hostname)\n}\n\nfunc TestExpireNode(t *testing.T) {\n\tdb, err := newSQLiteTestDB()\n\trequire.NoError(t, err)\n\n\tuser, err := db.CreateUser(types.User{Name: \"test\"})\n\trequire.NoError(t, err)\n\n\tpak, err := db.CreatePreAuthKey(user.TypedID(), false, false, nil, nil)\n\trequire.NoError(t, err)\n\n\tpakID := pak.ID\n\n\t_, err = db.getNode(types.UserID(user.ID), \"testnode\")\n\trequire.Error(t, err)\n\n\tnodeKey := key.NewNode()\n\tmachineKey := key.NewMachine()\n\n\tnode := &types.Node{\n\t\tID:             0,\n\t\tMachineKey:     machineKey.Public(),\n\t\tNodeKey:        nodeKey.Public(),\n\t\tHostname:       \"testnode\",\n\t\tUserID:         &user.ID,\n\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t\tAuthKeyID:      &pakID,\n\t\tExpiry:         &time.Time{},\n\t}\n\tdb.DB.Save(node)\n\n\tnodeFromDB, err := db.getNode(types.UserID(user.ID), \"testnode\")\n\trequire.NoError(t, err)\n\trequire.NotNil(t, nodeFromDB)\n\n\tassert.False(t, nodeFromDB.IsExpired())\n\n\tnow := time.Now()\n\terr = db.NodeSetExpiry(nodeFromDB.ID, &now)\n\trequire.NoError(t, err)\n\n\tnodeFromDB, err = db.getNode(types.UserID(user.ID), \"testnode\")\n\trequire.NoError(t, err)\n\n\tassert.True(t, nodeFromDB.IsExpired())\n}\n\nfunc TestDisableNodeExpiry(t *testing.T) {\n\tdb, err := newSQLiteTestDB()\n\trequire.NoError(t, err)\n\n\tuser, err := db.CreateUser(types.User{Name: \"test\"})\n\trequire.NoError(t, err)\n\n\tpak, err := db.CreatePreAuthKey(user.TypedID(), false, false, nil, nil)\n\trequire.NoError(t, err)\n\n\tpakID := pak.ID\n\tnode := &types.Node{\n\t\tID:             0,\n\t\tMachineKey:     key.NewMachine().Public(),\n\t\tNodeKey:        key.NewNode().Public(),\n\t\tHostname:       \"testnode\",\n\t\tUserID:         &user.ID,\n\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t\tAuthKeyID:      &pakID,\n\t\tExpiry:         &time.Time{},\n\t}\n\tdb.DB.Save(node)\n\n\t// Set an expiry first.\n\tpast := time.Now().Add(-time.Hour)\n\terr = db.NodeSetExpiry(node.ID, &past)\n\trequire.NoError(t, err)\n\n\tnodeFromDB, err := db.getNode(types.UserID(user.ID), \"testnode\")\n\trequire.NoError(t, err)\n\tassert.True(t, nodeFromDB.IsExpired(), \"node should be expired\")\n\n\t// Disable expiry by setting nil.\n\terr = db.NodeSetExpiry(node.ID, nil)\n\trequire.NoError(t, err)\n\n\tnodeFromDB, err = db.getNode(types.UserID(user.ID), \"testnode\")\n\trequire.NoError(t, err)\n\tassert.False(t, nodeFromDB.IsExpired(), \"node should not be expired after disabling expiry\")\n\tassert.Nil(t, nodeFromDB.Expiry, \"expiry should be nil after disabling\")\n}\n\nfunc TestSetTags(t *testing.T) {\n\tdb, err := newSQLiteTestDB()\n\trequire.NoError(t, err)\n\n\tuser, err := db.CreateUser(types.User{Name: \"test\"})\n\trequire.NoError(t, err)\n\n\tpak, err := db.CreatePreAuthKey(user.TypedID(), false, false, nil, nil)\n\trequire.NoError(t, err)\n\n\tpakID := pak.ID\n\n\t_, err = db.getNode(types.UserID(user.ID), \"testnode\")\n\trequire.Error(t, err)\n\n\tnodeKey := key.NewNode()\n\tmachineKey := key.NewMachine()\n\n\tnode := &types.Node{\n\t\tID:             0,\n\t\tMachineKey:     machineKey.Public(),\n\t\tNodeKey:        nodeKey.Public(),\n\t\tHostname:       \"testnode\",\n\t\tUserID:         &user.ID,\n\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t\tAuthKeyID:      &pakID,\n\t}\n\n\ttrx := db.DB.Save(node)\n\trequire.NoError(t, trx.Error)\n\n\t// assign simple tags\n\tsTags := []string{\"tag:test\", \"tag:foo\"}\n\terr = db.SetTags(node.ID, sTags)\n\trequire.NoError(t, err)\n\tnode, err = db.getNode(types.UserID(user.ID), \"testnode\")\n\trequire.NoError(t, err)\n\tassert.Equal(t, sTags, node.Tags)\n\n\t// assign duplicate tags, expect no errors but no doubles in DB\n\teTags := []string{\"tag:bar\", \"tag:test\", \"tag:unknown\", \"tag:test\"}\n\terr = db.SetTags(node.ID, eTags)\n\trequire.NoError(t, err)\n\tnode, err = db.getNode(types.UserID(user.ID), \"testnode\")\n\trequire.NoError(t, err)\n\tassert.Equal(t, []string{\"tag:bar\", \"tag:test\", \"tag:unknown\"}, node.Tags)\n}\n\nfunc TestHeadscale_generateGivenName(t *testing.T) {\n\ttype args struct {\n\t\tsuppliedName string\n\t\trandomSuffix bool\n\t}\n\n\ttests := []struct {\n\t\tname    string\n\t\targs    args\n\t\twant    *regexp.Regexp\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname: \"simple node name generation\",\n\t\t\targs: args{\n\t\t\t\tsuppliedName: \"testnode\",\n\t\t\t\trandomSuffix: false,\n\t\t\t},\n\t\t\twant:    regexp.MustCompile(\"^testnode$\"),\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"UPPERCASE node name generation\",\n\t\t\targs: args{\n\t\t\t\tsuppliedName: \"TestNode\",\n\t\t\t\trandomSuffix: false,\n\t\t\t},\n\t\t\twant:    regexp.MustCompile(\"^testnode$\"),\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"node name with 53 chars\",\n\t\t\targs: args{\n\t\t\t\tsuppliedName: \"testmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaachine\",\n\t\t\t\trandomSuffix: false,\n\t\t\t},\n\t\t\twant:    regexp.MustCompile(\"^testmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaachine$\"),\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"node name with 63 chars\",\n\t\t\targs: args{\n\t\t\t\tsuppliedName: \"nodeeeeeee12345678901234567890123456789012345678901234567890123\",\n\t\t\t\trandomSuffix: false,\n\t\t\t},\n\t\t\twant:    regexp.MustCompile(\"^nodeeeeeee12345678901234567890123456789012345678901234567890123$\"),\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"node name with 64 chars\",\n\t\t\targs: args{\n\t\t\t\tsuppliedName: \"nodeeeeeee123456789012345678901234567890123456789012345678901234\",\n\t\t\t\trandomSuffix: false,\n\t\t\t},\n\t\t\twant:    nil,\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"node name with 73 chars\",\n\t\t\targs: args{\n\t\t\t\tsuppliedName: \"nodeeeeeee123456789012345678901234567890123456789012345678901234567890123\",\n\t\t\t\trandomSuffix: false,\n\t\t\t},\n\t\t\twant:    nil,\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"node name with random suffix\",\n\t\t\targs: args{\n\t\t\t\tsuppliedName: \"test\",\n\t\t\t\trandomSuffix: true,\n\t\t\t},\n\t\t\twant:    regexp.MustCompile(fmt.Sprintf(\"^test-[a-z0-9]{%d}$\", NodeGivenNameHashLength)),\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"node name with 63 chars with random suffix\",\n\t\t\targs: args{\n\t\t\t\tsuppliedName: \"nodeeee12345678901234567890123456789012345678901234567890123\",\n\t\t\t\trandomSuffix: true,\n\t\t\t},\n\t\t\twant:    regexp.MustCompile(fmt.Sprintf(\"^nodeeee1234567890123456789012345678901234567890123456-[a-z0-9]{%d}$\", NodeGivenNameHashLength)),\n\t\t\twantErr: false,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot, err := generateGivenName(tt.args.suppliedName, tt.args.randomSuffix)\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\n\t\t\t\t\t\"Headscale.GenerateGivenName() error = %v, wantErr %v\",\n\t\t\t\t\terr,\n\t\t\t\t\ttt.wantErr,\n\t\t\t\t)\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif tt.want != nil && !tt.want.MatchString(got) {\n\t\t\t\tt.Errorf(\n\t\t\t\t\t\"Headscale.GenerateGivenName() = %v, does not match %v\",\n\t\t\t\t\ttt.want,\n\t\t\t\t\tgot,\n\t\t\t\t)\n\t\t\t}\n\n\t\t\tif len(got) > util.LabelHostnameLength {\n\t\t\t\tt.Errorf(\n\t\t\t\t\t\"Headscale.GenerateGivenName() = %v is larger than allowed DNS segment %d\",\n\t\t\t\t\tgot,\n\t\t\t\t\tutil.LabelHostnameLength,\n\t\t\t\t)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestAutoApproveRoutes(t *testing.T) {\n\ttests := []struct {\n\t\tname         string\n\t\tacl          string\n\t\troutes       []netip.Prefix\n\t\twant         []netip.Prefix\n\t\twant2        []netip.Prefix\n\t\texpectChange bool // whether to expect route changes\n\t}{\n\t\t{\n\t\t\tname: \"no-auto-approvers-empty-policy\",\n\t\t\tacl: `\n{\n\t\"groups\": {\n\t\t\"group:admins\": [\"test@\"]\n\t},\n\t\"acls\": [\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"src\": [\"group:admins\"],\n\t\t\t\"dst\": [\"group:admins:*\"]\n\t\t}\n\t]\n}`,\n\t\t\troutes:       []netip.Prefix{netip.MustParsePrefix(\"10.33.0.0/16\")},\n\t\t\twant:         []netip.Prefix{}, // Should be empty - no auto-approvers\n\t\t\twant2:        []netip.Prefix{}, // Should be empty - no auto-approvers\n\t\t\texpectChange: false,            // No changes expected\n\t\t},\n\t\t{\n\t\t\tname: \"no-auto-approvers-explicit-empty\",\n\t\t\tacl: `\n{\n\t\"groups\": {\n\t\t\"group:admins\": [\"test@\"]\n\t},\n\t\"acls\": [\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"src\": [\"group:admins\"],\n\t\t\t\"dst\": [\"group:admins:*\"]\n\t\t}\n\t],\n\t\"autoApprovers\": {\n\t\t\"routes\": {},\n\t\t\"exitNode\": []\n\t}\n}`,\n\t\t\troutes:       []netip.Prefix{netip.MustParsePrefix(\"10.33.0.0/16\")},\n\t\t\twant:         []netip.Prefix{}, // Should be empty - explicitly empty auto-approvers\n\t\t\twant2:        []netip.Prefix{}, // Should be empty - explicitly empty auto-approvers\n\t\t\texpectChange: false,            // No changes expected\n\t\t},\n\t\t{\n\t\t\tname: \"2068-approve-issue-sub-kube\",\n\t\t\tacl: `\n{\n\t\"groups\": {\n\t\t\"group:k8s\": [\"test@\"]\n\t},\n\n// \t\"acls\": [\n// \t\t{\"action\": \"accept\", \"users\": [\"*\"], \"ports\": [\"*:*\"]},\n// \t],\n\n\t\"autoApprovers\": {\n\t\t\"routes\": {\n\t\t\t\"10.42.0.0/16\": [\"test@\"],\n\t\t}\n\t}\n}`,\n\t\t\troutes:       []netip.Prefix{netip.MustParsePrefix(\"10.42.7.0/24\")},\n\t\t\twant:         []netip.Prefix{netip.MustParsePrefix(\"10.42.7.0/24\")},\n\t\t\texpectChange: true, // Routes should be approved\n\t\t},\n\t\t{\n\t\t\tname: \"2068-approve-issue-sub-exit-tag\",\n\t\t\tacl: `\n{\n\t\"tagOwners\": {\n\t\t\"tag:exit\": [\"test@\"],\n\t},\n\n\t\"groups\": {\n\t\t\"group:test\": [\"test@\"]\n\t},\n\n// \t\"acls\": [\n// \t\t{\"action\": \"accept\", \"users\": [\"*\"], \"ports\": [\"*:*\"]},\n// \t],\n\n\t\"autoApprovers\": {\n\t\t\"exitNode\": [\"tag:exit\"],\n\t\t\"routes\": {\n\t\t\t\"10.10.0.0/16\": [\"group:test\"],\n\t\t\t\"10.11.0.0/16\": [\"test@\"],\n\t\t\t\"8.11.0.0/24\": [\"test2@\"], // No nodes\n\t\t}\n\t}\n}`,\n\t\t\troutes: []netip.Prefix{\n\t\t\t\ttsaddr.AllIPv4(),\n\t\t\t\ttsaddr.AllIPv6(),\n\t\t\t\tnetip.MustParsePrefix(\"10.10.0.0/16\"),\n\t\t\t\tnetip.MustParsePrefix(\"10.11.0.0/24\"),\n\n\t\t\t\t// Not approved\n\t\t\t\tnetip.MustParsePrefix(\"8.11.0.0/24\"),\n\t\t\t},\n\t\t\twant: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.10.0.0/16\"),\n\t\t\t\tnetip.MustParsePrefix(\"10.11.0.0/24\"),\n\t\t\t},\n\t\t\twant2: []netip.Prefix{\n\t\t\t\ttsaddr.AllIPv4(),\n\t\t\t\ttsaddr.AllIPv6(),\n\t\t\t},\n\t\t\texpectChange: true, // Routes should be approved\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tpmfs := policy.PolicyManagerFuncsForTest([]byte(tt.acl))\n\t\tfor i, pmf := range pmfs {\n\t\t\tt.Run(fmt.Sprintf(\"%s-policy-index%d\", tt.name, i), func(t *testing.T) {\n\t\t\t\tadb, err := newSQLiteTestDB()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tuser, err := adb.CreateUser(types.User{Name: \"test\"})\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\t_, err = adb.CreateUser(types.User{Name: \"test2\"})\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\ttaggedUser, err := adb.CreateUser(types.User{Name: \"tagged\"})\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tnode := types.Node{\n\t\t\t\t\tID:             1,\n\t\t\t\t\tMachineKey:     key.NewMachine().Public(),\n\t\t\t\t\tNodeKey:        key.NewNode().Public(),\n\t\t\t\t\tHostname:       \"testnode\",\n\t\t\t\t\tUserID:         &user.ID,\n\t\t\t\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tRoutableIPs: tt.routes,\n\t\t\t\t\t},\n\t\t\t\t\tIPv4: new(netip.MustParseAddr(\"100.64.0.1\")),\n\t\t\t\t}\n\n\t\t\t\terr = adb.DB.Save(&node).Error\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tnodeTagged := types.Node{\n\t\t\t\t\tID:             2,\n\t\t\t\t\tMachineKey:     key.NewMachine().Public(),\n\t\t\t\t\tNodeKey:        key.NewNode().Public(),\n\t\t\t\t\tHostname:       \"taggednode\",\n\t\t\t\t\tUserID:         &taggedUser.ID,\n\t\t\t\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tRoutableIPs: tt.routes,\n\t\t\t\t\t},\n\t\t\t\t\tTags: []string{\"tag:exit\"},\n\t\t\t\t\tIPv4: new(netip.MustParseAddr(\"100.64.0.2\")),\n\t\t\t\t}\n\n\t\t\t\terr = adb.DB.Save(&nodeTagged).Error\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tusers, err := adb.ListUsers()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tnodes, err := adb.ListNodes()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tpm, err := pmf(users, nodes.ViewSlice())\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.NotNil(t, pm)\n\n\t\t\t\tnewRoutes1, changed1 := policy.ApproveRoutesWithPolicy(pm, node.View(), node.ApprovedRoutes, tt.routes)\n\t\t\t\tassert.Equal(t, tt.expectChange, changed1)\n\n\t\t\t\tif changed1 {\n\t\t\t\t\terr = SetApprovedRoutes(adb.DB, node.ID, newRoutes1)\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t}\n\n\t\t\t\tnewRoutes2, changed2 := policy.ApproveRoutesWithPolicy(pm, nodeTagged.View(), nodeTagged.ApprovedRoutes, tt.routes)\n\t\t\t\tif changed2 {\n\t\t\t\t\terr = SetApprovedRoutes(adb.DB, nodeTagged.ID, newRoutes2)\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t}\n\n\t\t\t\tnode1ByID, err := adb.GetNodeByID(1)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t// For empty auto-approvers tests, handle nil vs empty slice comparison\n\t\t\t\texpectedRoutes1 := tt.want\n\t\t\t\tif len(expectedRoutes1) == 0 {\n\t\t\t\t\texpectedRoutes1 = nil\n\t\t\t\t}\n\n\t\t\t\tif diff := cmp.Diff(expectedRoutes1, node1ByID.AllApprovedRoutes(), util.Comparers...); diff != \"\" {\n\t\t\t\t\tt.Errorf(\"unexpected enabled routes (-want +got):\\n%s\", diff)\n\t\t\t\t}\n\n\t\t\t\tnode2ByID, err := adb.GetNodeByID(2)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\texpectedRoutes2 := tt.want2\n\t\t\t\tif len(expectedRoutes2) == 0 {\n\t\t\t\t\texpectedRoutes2 = nil\n\t\t\t\t}\n\n\t\t\t\tif diff := cmp.Diff(expectedRoutes2, node2ByID.AllApprovedRoutes(), util.Comparers...); diff != \"\" {\n\t\t\t\t\tt.Errorf(\"unexpected enabled routes (-want +got):\\n%s\", diff)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n}\n\nfunc TestEphemeralGarbageCollectorOrder(t *testing.T) {\n\twant := []types.NodeID{1, 3}\n\tgot := []types.NodeID{}\n\n\tvar mu sync.Mutex\n\n\tdeletionCount := make(chan struct{}, 10)\n\n\te := NewEphemeralGarbageCollector(func(ni types.NodeID) {\n\t\tmu.Lock()\n\t\tdefer mu.Unlock()\n\n\t\tgot = append(got, ni)\n\n\t\tdeletionCount <- struct{}{}\n\t})\n\tgo e.Start()\n\n\t// Use shorter timeouts for faster tests\n\tgo e.Schedule(1, 50*time.Millisecond)\n\tgo e.Schedule(2, 100*time.Millisecond)\n\tgo e.Schedule(3, 150*time.Millisecond)\n\tgo e.Schedule(4, 200*time.Millisecond)\n\n\t// Wait for first deletion (node 1 at 50ms)\n\tselect {\n\tcase <-deletionCount:\n\tcase <-time.After(time.Second):\n\t\tt.Fatal(\"timeout waiting for first deletion\")\n\t}\n\n\t// Cancel nodes 2 and 4\n\tgo e.Cancel(2)\n\tgo e.Cancel(4)\n\n\t// Wait for node 3 to be deleted (at 150ms)\n\tselect {\n\tcase <-deletionCount:\n\tcase <-time.After(time.Second):\n\t\tt.Fatal(\"timeout waiting for second deletion\")\n\t}\n\n\t// Give a bit more time for any unexpected deletions\n\tselect {\n\tcase <-deletionCount:\n\t\t// Unexpected - more deletions than expected\n\tcase <-time.After(300 * time.Millisecond):\n\t\t// Expected - no more deletions\n\t}\n\n\te.Close()\n\n\tmu.Lock()\n\tdefer mu.Unlock()\n\n\tif diff := cmp.Diff(want, got); diff != \"\" {\n\t\tt.Errorf(\"wrong nodes deleted, unexpected result (-want +got):\\n%s\", diff)\n\t}\n}\n\nfunc TestEphemeralGarbageCollectorLoads(t *testing.T) {\n\tvar (\n\t\tgot []types.NodeID\n\t\tmu  sync.Mutex\n\t)\n\n\twant := 1000\n\n\tvar deletedCount int64\n\n\te := NewEphemeralGarbageCollector(func(ni types.NodeID) {\n\t\tmu.Lock()\n\t\tdefer mu.Unlock()\n\n\t\t// Yield to other goroutines to introduce variability\n\t\truntime.Gosched()\n\n\t\tgot = append(got, ni)\n\n\t\tatomic.AddInt64(&deletedCount, 1)\n\t})\n\tgo e.Start()\n\n\t// Use shorter expiry for faster tests\n\tfor i := range want {\n\t\tgo e.Schedule(types.NodeID(i), 100*time.Millisecond) //nolint:gosec // test code, no overflow risk\n\t}\n\n\t// Wait for all deletions to complete\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tcount := atomic.LoadInt64(&deletedCount)\n\t\tassert.Equal(c, int64(want), count, \"all nodes should be deleted\")\n\t}, 10*time.Second, 50*time.Millisecond, \"waiting for all deletions\")\n\n\te.Close()\n\n\tmu.Lock()\n\tdefer mu.Unlock()\n\n\tif len(got) != want {\n\t\tt.Errorf(\"expected %d, got %d\", want, len(got))\n\t}\n}\n\n//nolint:unused\nfunc generateRandomNumber(t *testing.T, maxVal int64) int64 {\n\tt.Helper()\n\n\tmaxB := big.NewInt(maxVal)\n\n\tn, err := rand.Int(rand.Reader, maxB)\n\tif err != nil {\n\t\tt.Fatalf(\"getting random number: %s\", err)\n\t}\n\n\treturn n.Int64() + 1\n}\n\nfunc TestListEphemeralNodes(t *testing.T) {\n\tdb, err := newSQLiteTestDB()\n\tif err != nil {\n\t\tt.Fatalf(\"creating db: %s\", err)\n\t}\n\n\tuser, err := db.CreateUser(types.User{Name: \"test\"})\n\trequire.NoError(t, err)\n\n\tpak, err := db.CreatePreAuthKey(user.TypedID(), false, false, nil, nil)\n\trequire.NoError(t, err)\n\n\tpakEph, err := db.CreatePreAuthKey(user.TypedID(), false, true, nil, nil)\n\trequire.NoError(t, err)\n\n\tpakID := pak.ID\n\tpakEphID := pakEph.ID\n\n\tnode := types.Node{\n\t\tID:             0,\n\t\tMachineKey:     key.NewMachine().Public(),\n\t\tNodeKey:        key.NewNode().Public(),\n\t\tHostname:       \"test\",\n\t\tUserID:         &user.ID,\n\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t\tAuthKeyID:      &pakID,\n\t}\n\n\tnodeEph := types.Node{\n\t\tID:             0,\n\t\tMachineKey:     key.NewMachine().Public(),\n\t\tNodeKey:        key.NewNode().Public(),\n\t\tHostname:       \"ephemeral\",\n\t\tUserID:         &user.ID,\n\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t\tAuthKeyID:      &pakEphID,\n\t}\n\n\terr = db.DB.Save(&node).Error\n\trequire.NoError(t, err)\n\n\terr = db.DB.Save(&nodeEph).Error\n\trequire.NoError(t, err)\n\n\tnodes, err := db.ListNodes()\n\trequire.NoError(t, err)\n\n\tephemeralNodes, err := db.ListEphemeralNodes()\n\trequire.NoError(t, err)\n\n\tassert.Len(t, nodes, 2)\n\tassert.Len(t, ephemeralNodes, 1)\n\n\tassert.Equal(t, nodeEph.ID, ephemeralNodes[0].ID)\n\tassert.Equal(t, nodeEph.AuthKeyID, ephemeralNodes[0].AuthKeyID)\n\tassert.Equal(t, nodeEph.UserID, ephemeralNodes[0].UserID)\n\tassert.Equal(t, nodeEph.Hostname, ephemeralNodes[0].Hostname)\n}\n\nfunc TestNodeNaming(t *testing.T) {\n\tdb, err := newSQLiteTestDB()\n\tif err != nil {\n\t\tt.Fatalf(\"creating db: %s\", err)\n\t}\n\n\tuser, err := db.CreateUser(types.User{Name: \"test\"})\n\trequire.NoError(t, err)\n\n\tuser2, err := db.CreateUser(types.User{Name: \"user2\"})\n\trequire.NoError(t, err)\n\n\tnode := types.Node{\n\t\tID:             0,\n\t\tMachineKey:     key.NewMachine().Public(),\n\t\tNodeKey:        key.NewNode().Public(),\n\t\tHostname:       \"test\",\n\t\tUserID:         &user.ID,\n\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t\tHostinfo:       &tailcfg.Hostinfo{},\n\t}\n\n\tnode2 := types.Node{\n\t\tID:             0,\n\t\tMachineKey:     key.NewMachine().Public(),\n\t\tNodeKey:        key.NewNode().Public(),\n\t\tHostname:       \"test\",\n\t\tUserID:         &user2.ID,\n\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t\tHostinfo:       &tailcfg.Hostinfo{},\n\t}\n\n\t// Using non-ASCII characters in the hostname can\n\t// break your network, so they should be replaced when registering\n\t// a node.\n\t// https://github.com/juanfont/headscale/issues/2343\n\tnodeInvalidHostname := types.Node{\n\t\tMachineKey:     key.NewMachine().Public(),\n\t\tNodeKey:        key.NewNode().Public(),\n\t\tHostname:       \"我的电脑\", //nolint:gosmopolitan // intentional i18n test data\n\t\tUserID:         &user2.ID,\n\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t}\n\n\tnodeShortHostname := types.Node{\n\t\tMachineKey:     key.NewMachine().Public(),\n\t\tNodeKey:        key.NewNode().Public(),\n\t\tHostname:       \"a\",\n\t\tUserID:         &user2.ID,\n\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t}\n\n\terr = db.DB.Save(&node).Error\n\trequire.NoError(t, err)\n\n\terr = db.DB.Save(&node2).Error\n\trequire.NoError(t, err)\n\n\terr = db.DB.Transaction(func(tx *gorm.DB) error {\n\t\t_, err := RegisterNodeForTest(tx, node, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = RegisterNodeForTest(tx, node2, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t_, _ = RegisterNodeForTest(tx, nodeInvalidHostname, new(mpp(\"100.64.0.66/32\").Addr()), nil)\n\t\t_, err = RegisterNodeForTest(tx, nodeShortHostname, new(mpp(\"100.64.0.67/32\").Addr()), nil)\n\n\t\treturn err\n\t})\n\trequire.NoError(t, err)\n\n\tnodes, err := db.ListNodes()\n\trequire.NoError(t, err)\n\n\tassert.Len(t, nodes, 4)\n\n\tt.Logf(\"node1 %s %s\", nodes[0].Hostname, nodes[0].GivenName)\n\tt.Logf(\"node2 %s %s\", nodes[1].Hostname, nodes[1].GivenName)\n\tt.Logf(\"node3 %s %s\", nodes[2].Hostname, nodes[2].GivenName)\n\tt.Logf(\"node4 %s %s\", nodes[3].Hostname, nodes[3].GivenName)\n\n\tassert.Equal(t, nodes[0].Hostname, nodes[0].GivenName)\n\tassert.NotEqual(t, nodes[1].Hostname, nodes[1].GivenName)\n\tassert.Equal(t, nodes[0].Hostname, nodes[1].Hostname)\n\tassert.NotEqual(t, nodes[0].Hostname, nodes[1].GivenName)\n\tassert.Contains(t, nodes[1].GivenName, nodes[0].Hostname)\n\tassert.Equal(t, nodes[0].GivenName, nodes[1].Hostname)\n\tassert.Len(t, nodes[0].Hostname, 4)\n\tassert.Len(t, nodes[1].Hostname, 4)\n\tassert.Len(t, nodes[0].GivenName, 4)\n\tassert.Len(t, nodes[1].GivenName, 13)\n\tassert.Contains(t, nodes[2].Hostname, \"invalid-\") // invalid chars\n\tassert.Contains(t, nodes[2].GivenName, \"invalid-\")\n\tassert.Contains(t, nodes[3].Hostname, \"invalid-\") // too short\n\tassert.Contains(t, nodes[3].GivenName, \"invalid-\")\n\n\t// Nodes can be renamed to a unique name\n\terr = db.Write(func(tx *gorm.DB) error {\n\t\treturn RenameNode(tx, nodes[0].ID, \"newname\")\n\t})\n\trequire.NoError(t, err)\n\n\tnodes, err = db.ListNodes()\n\trequire.NoError(t, err)\n\tassert.Len(t, nodes, 4)\n\tassert.Equal(t, \"test\", nodes[0].Hostname)\n\tassert.Equal(t, \"newname\", nodes[0].GivenName)\n\n\t// Nodes can reuse name that is no longer used\n\terr = db.Write(func(tx *gorm.DB) error {\n\t\treturn RenameNode(tx, nodes[1].ID, \"test\")\n\t})\n\trequire.NoError(t, err)\n\n\tnodes, err = db.ListNodes()\n\trequire.NoError(t, err)\n\tassert.Len(t, nodes, 4)\n\tassert.Equal(t, \"test\", nodes[0].Hostname)\n\tassert.Equal(t, \"newname\", nodes[0].GivenName)\n\tassert.Equal(t, \"test\", nodes[1].GivenName)\n\n\t// Nodes cannot be renamed to used names\n\terr = db.Write(func(tx *gorm.DB) error {\n\t\treturn RenameNode(tx, nodes[0].ID, \"test\")\n\t})\n\trequire.ErrorContains(t, err, \"name is not unique\")\n\n\t// Rename invalid chars\n\terr = db.Write(func(tx *gorm.DB) error {\n\t\treturn RenameNode(tx, nodes[2].ID, \"我的电脑\") //nolint:gosmopolitan // intentional i18n test data\n\t})\n\trequire.ErrorContains(t, err, \"invalid characters\")\n\n\t// Rename too short\n\terr = db.Write(func(tx *gorm.DB) error {\n\t\treturn RenameNode(tx, nodes[3].ID, \"a\")\n\t})\n\trequire.ErrorContains(t, err, \"at least 2 characters\")\n\n\t// Rename with emoji\n\terr = db.Write(func(tx *gorm.DB) error {\n\t\treturn RenameNode(tx, nodes[0].ID, \"hostname-with-💩\")\n\t})\n\trequire.ErrorContains(t, err, \"invalid characters\")\n\n\t// Rename with only emoji\n\terr = db.Write(func(tx *gorm.DB) error {\n\t\treturn RenameNode(tx, nodes[0].ID, \"🚀\")\n\t})\n\tassert.ErrorContains(t, err, \"invalid characters\")\n}\n\nfunc TestRenameNodeComprehensive(t *testing.T) {\n\tdb, err := newSQLiteTestDB()\n\tif err != nil {\n\t\tt.Fatalf(\"creating db: %s\", err)\n\t}\n\n\tuser, err := db.CreateUser(types.User{Name: \"test\"})\n\trequire.NoError(t, err)\n\n\tnode := types.Node{\n\t\tID:             0,\n\t\tMachineKey:     key.NewMachine().Public(),\n\t\tNodeKey:        key.NewNode().Public(),\n\t\tHostname:       \"testnode\",\n\t\tUserID:         &user.ID,\n\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t\tHostinfo:       &tailcfg.Hostinfo{},\n\t}\n\n\terr = db.DB.Save(&node).Error\n\trequire.NoError(t, err)\n\n\terr = db.DB.Transaction(func(tx *gorm.DB) error {\n\t\t_, err := RegisterNodeForTest(tx, node, nil, nil)\n\t\treturn err\n\t})\n\trequire.NoError(t, err)\n\n\tnodes, err := db.ListNodes()\n\trequire.NoError(t, err)\n\tassert.Len(t, nodes, 1)\n\n\ttests := []struct {\n\t\tname    string\n\t\tnewName string\n\t\twantErr string\n\t}{\n\t\t{\n\t\t\tname:    \"uppercase_rejected\",\n\t\t\tnewName: \"User2-Host\",\n\t\t\twantErr: \"must be lowercase\",\n\t\t},\n\t\t{\n\t\t\tname:    \"underscore_rejected\",\n\t\t\tnewName: \"test_node\",\n\t\t\twantErr: \"invalid characters\",\n\t\t},\n\t\t{\n\t\t\tname:    \"at_sign_uppercase_rejected\",\n\t\t\tnewName: \"Test@Host\",\n\t\t\twantErr: \"must be lowercase\",\n\t\t},\n\t\t{\n\t\t\tname:    \"at_sign_rejected\",\n\t\t\tnewName: \"test@host\",\n\t\t\twantErr: \"invalid characters\",\n\t\t},\n\t\t{\n\t\t\tname:    \"chinese_chars_with_dash_rejected\",\n\t\t\tnewName: \"server-北京-01\", //nolint:gosmopolitan // intentional i18n test data\n\t\t\twantErr: \"invalid characters\",\n\t\t},\n\t\t{\n\t\t\tname:    \"chinese_only_rejected\",\n\t\t\tnewName: \"我的电脑\", //nolint:gosmopolitan // intentional i18n test data\n\t\t\twantErr: \"invalid characters\",\n\t\t},\n\t\t{\n\t\t\tname:    \"emoji_with_text_rejected\",\n\t\t\tnewName: \"laptop-🚀\",\n\t\t\twantErr: \"invalid characters\",\n\t\t},\n\t\t{\n\t\t\tname:    \"mixed_chinese_emoji_rejected\",\n\t\t\tnewName: \"测试💻机器\", //nolint:gosmopolitan // intentional i18n test data\n\t\t\twantErr: \"invalid characters\",\n\t\t},\n\t\t{\n\t\t\tname:    \"only_emojis_rejected\",\n\t\t\tnewName: \"🎉🎊\",\n\t\t\twantErr: \"invalid characters\",\n\t\t},\n\t\t{\n\t\t\tname:    \"only_at_signs_rejected\",\n\t\t\tnewName: \"@@@\",\n\t\t\twantErr: \"invalid characters\",\n\t\t},\n\t\t{\n\t\t\tname:    \"starts_with_dash_rejected\",\n\t\t\tnewName: \"-test\",\n\t\t\twantErr: \"cannot start or end with a hyphen\",\n\t\t},\n\t\t{\n\t\t\tname:    \"ends_with_dash_rejected\",\n\t\t\tnewName: \"test-\",\n\t\t\twantErr: \"cannot start or end with a hyphen\",\n\t\t},\n\t\t{\n\t\t\tname:    \"too_long_hostname_rejected\",\n\t\t\tnewName: \"this-is-a-very-long-hostname-that-exceeds-sixty-three-characters-limit\",\n\t\t\twantErr: \"must not exceed 63 characters\",\n\t\t},\n\t\t{\n\t\t\tname:    \"too_short_hostname_rejected\",\n\t\t\tnewName: \"a\",\n\t\t\twantErr: \"at least 2 characters\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\terr := db.Write(func(tx *gorm.DB) error {\n\t\t\t\treturn RenameNode(tx, nodes[0].ID, tt.newName)\n\t\t\t})\n\t\t\tassert.ErrorContains(t, err, tt.wantErr)\n\t\t})\n\t}\n}\n\nfunc TestListPeers(t *testing.T) {\n\t// Setup test database\n\tdb, err := newSQLiteTestDB()\n\tif err != nil {\n\t\tt.Fatalf(\"creating db: %s\", err)\n\t}\n\n\tuser, err := db.CreateUser(types.User{Name: \"test\"})\n\trequire.NoError(t, err)\n\n\tuser2, err := db.CreateUser(types.User{Name: \"user2\"})\n\trequire.NoError(t, err)\n\n\tnode1 := types.Node{\n\t\tID:             0,\n\t\tMachineKey:     key.NewMachine().Public(),\n\t\tNodeKey:        key.NewNode().Public(),\n\t\tHostname:       \"test1\",\n\t\tUserID:         &user.ID,\n\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t\tHostinfo:       &tailcfg.Hostinfo{},\n\t}\n\n\tnode2 := types.Node{\n\t\tID:             0,\n\t\tMachineKey:     key.NewMachine().Public(),\n\t\tNodeKey:        key.NewNode().Public(),\n\t\tHostname:       \"test2\",\n\t\tUserID:         &user2.ID,\n\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t\tHostinfo:       &tailcfg.Hostinfo{},\n\t}\n\n\terr = db.DB.Save(&node1).Error\n\trequire.NoError(t, err)\n\n\terr = db.DB.Save(&node2).Error\n\trequire.NoError(t, err)\n\n\terr = db.DB.Transaction(func(tx *gorm.DB) error {\n\t\t_, err := RegisterNodeForTest(tx, node1, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = RegisterNodeForTest(tx, node2, nil, nil)\n\n\t\treturn err\n\t})\n\trequire.NoError(t, err)\n\n\tnodes, err := db.ListNodes()\n\trequire.NoError(t, err)\n\n\tassert.Len(t, nodes, 2)\n\n\t// No parameter means no filter, should return all peers\n\tnodes, err = db.ListPeers(1)\n\trequire.NoError(t, err)\n\tassert.Len(t, nodes, 1)\n\tassert.Equal(t, \"test2\", nodes[0].Hostname)\n\n\t// Empty node list should return all peers\n\tnodes, err = db.ListPeers(1, types.NodeIDs{}...)\n\trequire.NoError(t, err)\n\tassert.Len(t, nodes, 1)\n\tassert.Equal(t, \"test2\", nodes[0].Hostname)\n\n\t// No match in IDs should return empty list and no error\n\tnodes, err = db.ListPeers(1, types.NodeIDs{3, 4, 5}...)\n\trequire.NoError(t, err)\n\tassert.Empty(t, nodes)\n\n\t// Partial match in IDs\n\tnodes, err = db.ListPeers(1, types.NodeIDs{2, 3}...)\n\trequire.NoError(t, err)\n\tassert.Len(t, nodes, 1)\n\tassert.Equal(t, \"test2\", nodes[0].Hostname)\n\n\t// Several matched IDs, but node ID is still filtered out\n\tnodes, err = db.ListPeers(1, types.NodeIDs{1, 2, 3}...)\n\trequire.NoError(t, err)\n\tassert.Len(t, nodes, 1)\n\tassert.Equal(t, \"test2\", nodes[0].Hostname)\n}\n\nfunc TestListNodes(t *testing.T) {\n\t// Setup test database\n\tdb, err := newSQLiteTestDB()\n\tif err != nil {\n\t\tt.Fatalf(\"creating db: %s\", err)\n\t}\n\n\tuser, err := db.CreateUser(types.User{Name: \"test\"})\n\trequire.NoError(t, err)\n\n\tuser2, err := db.CreateUser(types.User{Name: \"user2\"})\n\trequire.NoError(t, err)\n\n\tnode1 := types.Node{\n\t\tID:             0,\n\t\tMachineKey:     key.NewMachine().Public(),\n\t\tNodeKey:        key.NewNode().Public(),\n\t\tHostname:       \"test1\",\n\t\tUserID:         &user.ID,\n\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t\tHostinfo:       &tailcfg.Hostinfo{},\n\t}\n\n\tnode2 := types.Node{\n\t\tID:             0,\n\t\tMachineKey:     key.NewMachine().Public(),\n\t\tNodeKey:        key.NewNode().Public(),\n\t\tHostname:       \"test2\",\n\t\tUserID:         &user2.ID,\n\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t\tHostinfo:       &tailcfg.Hostinfo{},\n\t}\n\n\terr = db.DB.Save(&node1).Error\n\trequire.NoError(t, err)\n\n\terr = db.DB.Save(&node2).Error\n\trequire.NoError(t, err)\n\n\terr = db.DB.Transaction(func(tx *gorm.DB) error {\n\t\t_, err := RegisterNodeForTest(tx, node1, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = RegisterNodeForTest(tx, node2, nil, nil)\n\n\t\treturn err\n\t})\n\trequire.NoError(t, err)\n\n\tnodes, err := db.ListNodes()\n\trequire.NoError(t, err)\n\n\tassert.Len(t, nodes, 2)\n\n\t// No parameter means no filter, should return all nodes\n\tnodes, err = db.ListNodes()\n\trequire.NoError(t, err)\n\tassert.Len(t, nodes, 2)\n\tassert.Equal(t, \"test1\", nodes[0].Hostname)\n\tassert.Equal(t, \"test2\", nodes[1].Hostname)\n\n\t// Empty node list should return all nodes\n\tnodes, err = db.ListNodes(types.NodeIDs{}...)\n\trequire.NoError(t, err)\n\tassert.Len(t, nodes, 2)\n\tassert.Equal(t, \"test1\", nodes[0].Hostname)\n\tassert.Equal(t, \"test2\", nodes[1].Hostname)\n\n\t// No match in IDs should return empty list and no error\n\tnodes, err = db.ListNodes(types.NodeIDs{3, 4, 5}...)\n\trequire.NoError(t, err)\n\tassert.Empty(t, nodes)\n\n\t// Partial match in IDs\n\tnodes, err = db.ListNodes(types.NodeIDs{2, 3}...)\n\trequire.NoError(t, err)\n\tassert.Len(t, nodes, 1)\n\tassert.Equal(t, \"test2\", nodes[0].Hostname)\n\n\t// Several matched IDs\n\tnodes, err = db.ListNodes(types.NodeIDs{1, 2, 3}...)\n\trequire.NoError(t, err)\n\tassert.Len(t, nodes, 2)\n\tassert.Equal(t, \"test1\", nodes[0].Hostname)\n\tassert.Equal(t, \"test2\", nodes[1].Hostname)\n}\n"
  },
  {
    "path": "hscontrol/db/policy.go",
    "content": "package db\n\nimport (\n\t\"errors\"\n\t\"os\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"gorm.io/gorm\"\n\t\"gorm.io/gorm/clause\"\n)\n\n// SetPolicy sets the policy in the database.\nfunc (hsdb *HSDatabase) SetPolicy(policy string) (*types.Policy, error) {\n\t// Create a new policy.\n\tp := types.Policy{\n\t\tData: policy,\n\t}\n\n\terr := hsdb.DB.Clauses(clause.Returning{}).Create(&p).Error\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &p, nil\n}\n\n// GetPolicy returns the latest policy in the database.\nfunc (hsdb *HSDatabase) GetPolicy() (*types.Policy, error) {\n\treturn GetPolicy(hsdb.DB)\n}\n\n// GetPolicy returns the latest policy from the database.\n// This standalone function can be used in contexts where HSDatabase is not available,\n// such as during migrations.\nfunc GetPolicy(tx *gorm.DB) (*types.Policy, error) {\n\tvar p types.Policy\n\n\t// Query:\n\t// SELECT * FROM policies ORDER BY id DESC LIMIT 1;\n\terr := tx.\n\t\tOrder(\"id DESC\").\n\t\tLimit(1).\n\t\tFirst(&p).Error\n\tif err != nil {\n\t\tif errors.Is(err, gorm.ErrRecordNotFound) {\n\t\t\treturn nil, types.ErrPolicyNotFound\n\t\t}\n\n\t\treturn nil, err\n\t}\n\n\treturn &p, nil\n}\n\n// PolicyBytes loads policy configuration from file or database based on the configured mode.\n// Returns nil if no policy is configured, which is valid.\n// This standalone function can be used in contexts where HSDatabase is not available,\n// such as during migrations.\nfunc PolicyBytes(tx *gorm.DB, cfg *types.Config) ([]byte, error) {\n\tswitch cfg.Policy.Mode {\n\tcase types.PolicyModeFile:\n\t\tpath := cfg.Policy.Path\n\n\t\t// It is fine to start headscale without a policy file.\n\t\tif len(path) == 0 {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\tabsPath := util.AbsolutePathFromConfigPath(path)\n\n\t\treturn os.ReadFile(absPath)\n\n\tcase types.PolicyModeDB:\n\t\tp, err := GetPolicy(tx)\n\t\tif err != nil {\n\t\t\tif errors.Is(err, types.ErrPolicyNotFound) {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif p.Data == \"\" {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\treturn []byte(p.Data), nil\n\t}\n\n\treturn nil, nil\n}\n"
  },
  {
    "path": "hscontrol/db/preauth_keys.go",
    "content": "package db\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"slices\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"golang.org/x/crypto/bcrypt\"\n\t\"gorm.io/gorm\"\n\t\"tailscale.com/util/set\"\n)\n\nvar (\n\tErrPreAuthKeyNotFound          = errors.New(\"auth-key not found\")\n\tErrPreAuthKeyExpired           = errors.New(\"auth-key expired\")\n\tErrSingleUseAuthKeyHasBeenUsed = errors.New(\"auth-key has already been used\")\n\tErrUserMismatch                = errors.New(\"user mismatch\")\n\tErrPreAuthKeyACLTagInvalid     = errors.New(\"auth-key tag is invalid\")\n)\n\nfunc (hsdb *HSDatabase) CreatePreAuthKey(\n\tuid *types.UserID,\n\treusable bool,\n\tephemeral bool,\n\texpiration *time.Time,\n\taclTags []string,\n) (*types.PreAuthKeyNew, error) {\n\treturn Write(hsdb.DB, func(tx *gorm.DB) (*types.PreAuthKeyNew, error) {\n\t\treturn CreatePreAuthKey(tx, uid, reusable, ephemeral, expiration, aclTags)\n\t})\n}\n\nconst (\n\tauthKeyPrefix       = \"hskey-auth-\"\n\tauthKeyPrefixLength = 12\n\tauthKeyLength       = 64\n)\n\n// CreatePreAuthKey creates a new PreAuthKey in a user, and returns it.\n// The uid parameter can be nil for system-created tagged keys.\n// For tagged keys, uid tracks \"created by\" (who created the key).\n// For user-owned keys, uid tracks the node owner.\nfunc CreatePreAuthKey(\n\ttx *gorm.DB,\n\tuid *types.UserID,\n\treusable bool,\n\tephemeral bool,\n\texpiration *time.Time,\n\taclTags []string,\n) (*types.PreAuthKeyNew, error) {\n\t// Validate: must be tagged OR user-owned, not neither\n\tif uid == nil && len(aclTags) == 0 {\n\t\treturn nil, ErrPreAuthKeyNotTaggedOrOwned\n\t}\n\n\tvar (\n\t\tuser   *types.User\n\t\tuserID *uint\n\t)\n\n\tif uid != nil {\n\t\tvar err error\n\n\t\tuser, err = GetUserByID(tx, *uid)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tuserID = &user.ID\n\t}\n\n\t// Remove duplicates and sort for consistency\n\taclTags = set.SetOf(aclTags).Slice()\n\tslices.Sort(aclTags)\n\n\t// TODO(kradalby): factor out and create a reusable tag validation,\n\t// check if there is one in Tailscale's lib.\n\tfor _, tag := range aclTags {\n\t\tif !strings.HasPrefix(tag, \"tag:\") {\n\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\"%w: '%s' did not begin with 'tag:'\",\n\t\t\t\tErrPreAuthKeyACLTagInvalid,\n\t\t\t\ttag,\n\t\t\t)\n\t\t}\n\t}\n\n\tnow := time.Now().UTC()\n\n\tprefix, err := util.GenerateRandomStringURLSafe(authKeyPrefixLength)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Validate generated prefix (should always be valid, but be defensive)\n\tif len(prefix) != authKeyPrefixLength {\n\t\treturn nil, fmt.Errorf(\"%w: generated prefix has invalid length: expected %d, got %d\", ErrPreAuthKeyFailedToParse, authKeyPrefixLength, len(prefix))\n\t}\n\n\tif !isValidBase64URLSafe(prefix) {\n\t\treturn nil, fmt.Errorf(\"%w: generated prefix contains invalid characters\", ErrPreAuthKeyFailedToParse)\n\t}\n\n\ttoBeHashed, err := util.GenerateRandomStringURLSafe(authKeyLength)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Validate generated hash (should always be valid, but be defensive)\n\tif len(toBeHashed) != authKeyLength {\n\t\treturn nil, fmt.Errorf(\"%w: generated hash has invalid length: expected %d, got %d\", ErrPreAuthKeyFailedToParse, authKeyLength, len(toBeHashed))\n\t}\n\n\tif !isValidBase64URLSafe(toBeHashed) {\n\t\treturn nil, fmt.Errorf(\"%w: generated hash contains invalid characters\", ErrPreAuthKeyFailedToParse)\n\t}\n\n\tkeyStr := authKeyPrefix + prefix + \"-\" + toBeHashed\n\n\thash, err := bcrypt.GenerateFromPassword([]byte(toBeHashed), bcrypt.DefaultCost)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tkey := types.PreAuthKey{\n\t\tUserID:     userID, // nil for system-created keys, or \"created by\" for tagged keys\n\t\tUser:       user,   // nil for system-created keys\n\t\tReusable:   reusable,\n\t\tEphemeral:  ephemeral,\n\t\tCreatedAt:  &now,\n\t\tExpiration: expiration,\n\t\tTags:       aclTags, // empty for user-owned keys\n\t\tPrefix:     prefix,  // Store prefix\n\t\tHash:       hash,    // Store hash\n\t}\n\n\tif err := tx.Save(&key).Error; err != nil { //nolint:noinlineerr\n\t\treturn nil, fmt.Errorf(\"creating key in database: %w\", err)\n\t}\n\n\treturn &types.PreAuthKeyNew{\n\t\tID:         key.ID,\n\t\tKey:        keyStr,\n\t\tReusable:   key.Reusable,\n\t\tEphemeral:  key.Ephemeral,\n\t\tTags:       key.Tags,\n\t\tExpiration: key.Expiration,\n\t\tCreatedAt:  key.CreatedAt,\n\t\tUser:       key.User,\n\t}, nil\n}\n\nfunc (hsdb *HSDatabase) ListPreAuthKeys() ([]types.PreAuthKey, error) {\n\treturn Read(hsdb.DB, ListPreAuthKeys)\n}\n\n// ListPreAuthKeys returns all PreAuthKeys in the database.\nfunc ListPreAuthKeys(tx *gorm.DB) ([]types.PreAuthKey, error) {\n\tvar keys []types.PreAuthKey\n\n\terr := tx.Preload(\"User\").Find(&keys).Error\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn keys, nil\n}\n\nvar (\n\tErrPreAuthKeyFailedToParse    = errors.New(\"failed to parse auth-key\")\n\tErrPreAuthKeyNotTaggedOrOwned = errors.New(\"auth-key must be either tagged or owned by user\")\n)\n\nfunc findAuthKey(tx *gorm.DB, keyStr string) (*types.PreAuthKey, error) {\n\tvar pak types.PreAuthKey\n\n\t// Validate input is not empty\n\tif keyStr == \"\" {\n\t\treturn nil, ErrPreAuthKeyFailedToParse\n\t}\n\n\t_, prefixAndHash, found := strings.Cut(keyStr, authKeyPrefix)\n\n\tif !found {\n\t\t// Legacy format (plaintext) - backwards compatibility\n\t\terr := tx.Preload(\"User\").First(&pak, \"key = ?\", keyStr).Error\n\t\tif err != nil {\n\t\t\treturn nil, ErrPreAuthKeyNotFound\n\t\t}\n\n\t\treturn &pak, nil\n\t}\n\n\t// New format: hskey-auth-{12-char-prefix}-{64-char-hash}\n\t// Expected minimum length: 12 (prefix) + 1 (separator) + 64 (hash) = 77\n\tconst expectedMinLength = authKeyPrefixLength + 1 + authKeyLength\n\tif len(prefixAndHash) < expectedMinLength {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"%w: key too short, expected at least %d chars after prefix, got %d\",\n\t\t\tErrPreAuthKeyFailedToParse,\n\t\t\texpectedMinLength,\n\t\t\tlen(prefixAndHash),\n\t\t)\n\t}\n\n\t// Use fixed-length parsing instead of separator-based to handle dashes in base64 URL-safe\n\tprefix := prefixAndHash[:authKeyPrefixLength]\n\n\t// Validate separator at expected position\n\tif prefixAndHash[authKeyPrefixLength] != '-' {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"%w: expected separator '-' at position %d, got '%c'\",\n\t\t\tErrPreAuthKeyFailedToParse,\n\t\t\tauthKeyPrefixLength,\n\t\t\tprefixAndHash[authKeyPrefixLength],\n\t\t)\n\t}\n\n\thash := prefixAndHash[authKeyPrefixLength+1:]\n\n\t// Validate hash length\n\tif len(hash) != authKeyLength {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"%w: hash length mismatch, expected %d chars, got %d\",\n\t\t\tErrPreAuthKeyFailedToParse,\n\t\t\tauthKeyLength,\n\t\t\tlen(hash),\n\t\t)\n\t}\n\n\t// Validate prefix contains only base64 URL-safe characters\n\tif !isValidBase64URLSafe(prefix) {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"%w: prefix contains invalid characters (expected base64 URL-safe: A-Za-z0-9_-)\",\n\t\t\tErrPreAuthKeyFailedToParse,\n\t\t)\n\t}\n\n\t// Validate hash contains only base64 URL-safe characters\n\tif !isValidBase64URLSafe(hash) {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"%w: hash contains invalid characters (expected base64 URL-safe: A-Za-z0-9_-)\",\n\t\t\tErrPreAuthKeyFailedToParse,\n\t\t)\n\t}\n\n\t// Look up key by prefix\n\terr := tx.Preload(\"User\").First(&pak, \"prefix = ?\", prefix).Error\n\tif err != nil {\n\t\treturn nil, ErrPreAuthKeyNotFound\n\t}\n\n\t// Verify hash matches\n\terr = bcrypt.CompareHashAndPassword(pak.Hash, []byte(hash))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid auth key: %w\", err)\n\t}\n\n\treturn &pak, nil\n}\n\n// isValidBase64URLSafe checks if a string contains only base64 URL-safe characters.\nfunc isValidBase64URLSafe(s string) bool {\n\tfor _, c := range s {\n\t\tif (c < 'A' || c > 'Z') && (c < 'a' || c > 'z') && (c < '0' || c > '9') && c != '-' && c != '_' {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\nfunc (hsdb *HSDatabase) GetPreAuthKey(key string) (*types.PreAuthKey, error) {\n\treturn GetPreAuthKey(hsdb.DB, key)\n}\n\n// GetPreAuthKey returns a PreAuthKey for a given key. The caller is responsible\n// for checking if the key is usable (expired or used).\nfunc GetPreAuthKey(tx *gorm.DB, key string) (*types.PreAuthKey, error) {\n\treturn findAuthKey(tx, key)\n}\n\n// DestroyPreAuthKey destroys a preauthkey. Returns error if the PreAuthKey\n// does not exist. This also clears the auth_key_id on any nodes that reference\n// this key.\nfunc DestroyPreAuthKey(tx *gorm.DB, id uint64) error {\n\treturn tx.Transaction(func(db *gorm.DB) error {\n\t\t// First, clear the foreign key reference on any nodes using this key\n\t\terr := db.Model(&types.Node{}).\n\t\t\tWhere(\"auth_key_id = ?\", id).\n\t\t\tUpdate(\"auth_key_id\", nil).Error\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"clearing auth_key_id on nodes: %w\", err)\n\t\t}\n\n\t\t// Then delete the pre-auth key\n\t\terr = tx.Unscoped().Delete(&types.PreAuthKey{}, id).Error\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n}\n\nfunc (hsdb *HSDatabase) ExpirePreAuthKey(id uint64) error {\n\treturn hsdb.Write(func(tx *gorm.DB) error {\n\t\treturn ExpirePreAuthKey(tx, id)\n\t})\n}\n\nfunc (hsdb *HSDatabase) DeletePreAuthKey(id uint64) error {\n\treturn hsdb.Write(func(tx *gorm.DB) error {\n\t\treturn DestroyPreAuthKey(tx, id)\n\t})\n}\n\n// UsePreAuthKey marks a PreAuthKey as used.\nfunc UsePreAuthKey(tx *gorm.DB, k *types.PreAuthKey) error {\n\terr := tx.Model(k).Update(\"used\", true).Error\n\tif err != nil {\n\t\treturn fmt.Errorf(\"updating key used status in database: %w\", err)\n\t}\n\n\tk.Used = true\n\n\treturn nil\n}\n\n// ExpirePreAuthKey marks a PreAuthKey as expired.\nfunc ExpirePreAuthKey(tx *gorm.DB, id uint64) error {\n\tnow := time.Now()\n\treturn tx.Model(&types.PreAuthKey{}).Where(\"id = ?\", id).Update(\"expiration\", now).Error\n}\n"
  },
  {
    "path": "hscontrol/db/preauth_keys_test.go",
    "content": "package db\n\nimport (\n\t\"fmt\"\n\t\"slices\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestCreatePreAuthKey(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\ttest func(*testing.T, *HSDatabase)\n\t}{\n\t\t{\n\t\t\tname: \"error_invalid_user_id\",\n\t\t\ttest: func(t *testing.T, db *HSDatabase) {\n\t\t\t\tt.Helper()\n\n\t\t\t\t_, err := db.CreatePreAuthKey(new(types.UserID(12345)), true, false, nil, nil)\n\t\t\t\tassert.Error(t, err)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"success_create_and_list\",\n\t\t\ttest: func(t *testing.T, db *HSDatabase) {\n\t\t\t\tt.Helper()\n\n\t\t\t\tuser, err := db.CreateUser(types.User{Name: \"test\"})\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tkey, err := db.CreatePreAuthKey(user.TypedID(), true, false, nil, nil)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.NotEmpty(t, key.Key)\n\n\t\t\t\t// List keys for the user\n\t\t\t\tkeys, err := db.ListPreAuthKeys()\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.Len(t, keys, 1)\n\n\t\t\t\t// Verify User association is populated\n\t\t\t\tassert.Equal(t, user.ID, keys[0].User.ID)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tdb, err := newSQLiteTestDB()\n\t\t\trequire.NoError(t, err)\n\n\t\t\ttt.test(t, db)\n\t\t})\n\t}\n}\n\nfunc TestPreAuthKeyACLTags(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\ttest func(*testing.T, *HSDatabase)\n\t}{\n\t\t{\n\t\t\tname: \"reject_malformed_tags\",\n\t\t\ttest: func(t *testing.T, db *HSDatabase) {\n\t\t\t\tt.Helper()\n\n\t\t\t\tuser, err := db.CreateUser(types.User{Name: \"test-tags-1\"})\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t_, err = db.CreatePreAuthKey(user.TypedID(), false, false, nil, []string{\"badtag\"})\n\t\t\t\tassert.Error(t, err)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"deduplicate_and_sort_tags\",\n\t\t\ttest: func(t *testing.T, db *HSDatabase) {\n\t\t\t\tt.Helper()\n\n\t\t\t\tuser, err := db.CreateUser(types.User{Name: \"test-tags-2\"})\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\texpectedTags := []string{\"tag:test1\", \"tag:test2\"}\n\t\t\t\ttagsWithDuplicate := []string{\"tag:test1\", \"tag:test2\", \"tag:test2\"}\n\n\t\t\t\t_, err = db.CreatePreAuthKey(user.TypedID(), false, false, nil, tagsWithDuplicate)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tlistedPaks, err := db.ListPreAuthKeys()\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.Len(t, listedPaks, 1)\n\n\t\t\t\tgotTags := listedPaks[0].Proto().GetAclTags()\n\t\t\t\tslices.Sort(gotTags)\n\t\t\t\tassert.Equal(t, expectedTags, gotTags)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tdb, err := newSQLiteTestDB()\n\t\t\trequire.NoError(t, err)\n\n\t\t\ttt.test(t, db)\n\t\t})\n\t}\n}\n\nfunc TestCannotDeleteAssignedPreAuthKey(t *testing.T) {\n\tdb, err := newSQLiteTestDB()\n\trequire.NoError(t, err)\n\tuser, err := db.CreateUser(types.User{Name: \"test8\"})\n\trequire.NoError(t, err)\n\n\tkey, err := db.CreatePreAuthKey(user.TypedID(), false, false, nil, []string{\"tag:good\"})\n\trequire.NoError(t, err)\n\n\tnode := types.Node{\n\t\tID:             0,\n\t\tHostname:       \"testest\",\n\t\tUserID:         &user.ID,\n\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t\tAuthKeyID:      new(key.ID),\n\t}\n\tdb.DB.Save(&node)\n\n\terr = db.DB.Delete(&types.PreAuthKey{ID: key.ID}).Error\n\trequire.ErrorContains(t, err, \"constraint failed: FOREIGN KEY constraint failed\")\n}\n\nfunc TestPreAuthKeyAuthentication(t *testing.T) {\n\tdb, err := newSQLiteTestDB()\n\trequire.NoError(t, err)\n\n\tuser := db.CreateUserForTest(\"test-user\")\n\n\ttests := []struct {\n\t\tname            string\n\t\tsetupKey        func() string // Returns key string to test\n\t\twantFindErr     bool          // Error when finding the key\n\t\twantValidateErr bool          // Error when validating the key\n\t\tvalidateResult  func(*testing.T, *types.PreAuthKey)\n\t}{\n\t\t{\n\t\t\tname: \"legacy_key_plaintext\",\n\t\t\tsetupKey: func() string {\n\t\t\t\t// Insert legacy key directly using GORM (simulate existing production key)\n\t\t\t\t// Note: We use raw SQL to bypass GORM's handling and set prefix to empty string\n\t\t\t\t// which simulates how legacy keys exist in production databases\n\t\t\t\tlegacyKey := \"abc123def456ghi789jkl012mno345pqr678stu901vwx234yz\"\n\t\t\t\tnow := time.Now()\n\n\t\t\t\t// Use raw SQL to insert with empty prefix to avoid UNIQUE constraint\n\t\t\t\terr := db.DB.Exec(`\n\t\t\t\t\tINSERT INTO pre_auth_keys (key, user_id, reusable, ephemeral, used, created_at)\n\t\t\t\t\tVALUES (?, ?, ?, ?, ?, ?)\n\t\t\t\t`, legacyKey, user.ID, true, false, false, now).Error\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\treturn legacyKey\n\t\t\t},\n\t\t\twantFindErr:     false,\n\t\t\twantValidateErr: false,\n\t\t\tvalidateResult: func(t *testing.T, pak *types.PreAuthKey) {\n\t\t\t\tt.Helper()\n\n\t\t\t\tassert.Equal(t, user.ID, *pak.UserID)\n\t\t\t\tassert.NotEmpty(t, pak.Key) // Legacy keys have Key populated\n\t\t\t\tassert.Empty(t, pak.Prefix) // Legacy keys have empty Prefix\n\t\t\t\tassert.Nil(t, pak.Hash)     // Legacy keys have nil Hash\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"new_key_bcrypt\",\n\t\t\tsetupKey: func() string {\n\t\t\t\t// Create new key via API\n\t\t\t\tkeyStr, err := db.CreatePreAuthKey(\n\t\t\t\t\tuser.TypedID(),\n\t\t\t\t\ttrue, false, nil, []string{\"tag:test\"},\n\t\t\t\t)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\treturn keyStr.Key\n\t\t\t},\n\t\t\twantFindErr:     false,\n\t\t\twantValidateErr: false,\n\t\t\tvalidateResult: func(t *testing.T, pak *types.PreAuthKey) {\n\t\t\t\tt.Helper()\n\n\t\t\t\tassert.Equal(t, user.ID, *pak.UserID)\n\t\t\t\tassert.Empty(t, pak.Key)       // New keys have empty Key\n\t\t\t\tassert.NotEmpty(t, pak.Prefix) // New keys have Prefix\n\t\t\t\tassert.NotNil(t, pak.Hash)     // New keys have Hash\n\t\t\t\tassert.Len(t, pak.Prefix, 12)  // Prefix is 12 chars\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"new_key_format_validation\",\n\t\t\tsetupKey: func() string {\n\t\t\t\tkeyStr, err := db.CreatePreAuthKey(\n\t\t\t\t\tuser.TypedID(),\n\t\t\t\t\ttrue, false, nil, nil,\n\t\t\t\t)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t// Verify format: hskey-auth-{12-char-prefix}-{64-char-hash}\n\t\t\t\t// Use fixed-length parsing since prefix/hash can contain dashes (base64 URL-safe)\n\t\t\t\tassert.True(t, strings.HasPrefix(keyStr.Key, \"hskey-auth-\"))\n\n\t\t\t\t// Extract prefix and hash using fixed-length parsing like the real code does\n\t\t\t\t_, prefixAndHash, found := strings.Cut(keyStr.Key, \"hskey-auth-\")\n\t\t\t\tassert.True(t, found)\n\t\t\t\tassert.GreaterOrEqual(t, len(prefixAndHash), 12+1+64) // prefix + '-' + hash minimum\n\n\t\t\t\tprefix := prefixAndHash[:12]\n\t\t\t\tassert.Len(t, prefix, 12)                     // Prefix is 12 chars\n\t\t\t\tassert.Equal(t, byte('-'), prefixAndHash[12]) // Separator\n\t\t\t\thash := prefixAndHash[13:]\n\t\t\t\tassert.Len(t, hash, 64) // Hash is 64 chars\n\n\t\t\t\treturn keyStr.Key\n\t\t\t},\n\t\t\twantFindErr:     false,\n\t\t\twantValidateErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"invalid_bcrypt_hash\",\n\t\t\tsetupKey: func() string {\n\t\t\t\t// Create valid key\n\t\t\t\tkey, err := db.CreatePreAuthKey(\n\t\t\t\t\tuser.TypedID(),\n\t\t\t\t\ttrue, false, nil, nil,\n\t\t\t\t)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tkeyStr := key.Key\n\n\t\t\t\t// Return key with tampered hash using fixed-length parsing\n\t\t\t\t_, prefixAndHash, _ := strings.Cut(keyStr, \"hskey-auth-\")\n\t\t\t\tprefix := prefixAndHash[:12]\n\n\t\t\t\treturn \"hskey-auth-\" + prefix + \"-\" + \"wrong_hash_here_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\n\t\t\t},\n\t\t\twantFindErr:     true,\n\t\t\twantValidateErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"empty_key\",\n\t\t\tsetupKey: func() string {\n\t\t\t\treturn \"\"\n\t\t\t},\n\t\t\twantFindErr:     true,\n\t\t\twantValidateErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"key_too_short\",\n\t\t\tsetupKey: func() string {\n\t\t\t\treturn \"hskey-auth-short\"\n\t\t\t},\n\t\t\twantFindErr:     true,\n\t\t\twantValidateErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"missing_separator\",\n\t\t\tsetupKey: func() string {\n\t\t\t\treturn \"hskey-auth-ABCDEFGHIJKLabcdefghijklmnopqrstuvwxyz1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\t\t\t},\n\t\t\twantFindErr:     true,\n\t\t\twantValidateErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"hash_too_short\",\n\t\t\tsetupKey: func() string {\n\t\t\t\treturn \"hskey-auth-ABCDEFGHIJKL-short\"\n\t\t\t},\n\t\t\twantFindErr:     true,\n\t\t\twantValidateErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"prefix_with_invalid_chars\",\n\t\t\tsetupKey: func() string {\n\t\t\t\treturn \"hskey-auth-ABC$EF@HIJKL-\" + strings.Repeat(\"a\", 64)\n\t\t\t},\n\t\t\twantFindErr:     true,\n\t\t\twantValidateErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"hash_with_invalid_chars\",\n\t\t\tsetupKey: func() string {\n\t\t\t\treturn \"hskey-auth-ABCDEFGHIJKL-\" + \"invalid$chars\" + strings.Repeat(\"a\", 54)\n\t\t\t},\n\t\t\twantFindErr:     true,\n\t\t\twantValidateErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"prefix_not_found_in_db\",\n\t\t\tsetupKey: func() string {\n\t\t\t\t// Create a validly formatted key but with a prefix that doesn't exist\n\t\t\t\treturn \"hskey-auth-NotInDB12345-\" + strings.Repeat(\"a\", 64)\n\t\t\t},\n\t\t\twantFindErr:     true,\n\t\t\twantValidateErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"expired_legacy_key\",\n\t\t\tsetupKey: func() string {\n\t\t\t\tlegacyKey := \"expired_legacy_key_123456789012345678901234\"\n\t\t\t\tnow := time.Now()\n\t\t\t\texpiration := time.Now().Add(-1 * time.Hour) // Expired 1 hour ago\n\n\t\t\t\t// Use raw SQL to avoid UNIQUE constraint on empty prefix\n\t\t\t\terr := db.DB.Exec(`\n\t\t\t\t\tINSERT INTO pre_auth_keys (key, user_id, reusable, ephemeral, used, created_at, expiration)\n\t\t\t\t\tVALUES (?, ?, ?, ?, ?, ?, ?)\n\t\t\t\t`, legacyKey, user.ID, true, false, false, now, expiration).Error\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\treturn legacyKey\n\t\t\t},\n\t\t\twantFindErr:     false,\n\t\t\twantValidateErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"used_single_use_legacy_key\",\n\t\t\tsetupKey: func() string {\n\t\t\t\tlegacyKey := \"used_legacy_key_123456789012345678901234567\"\n\t\t\t\tnow := time.Now()\n\n\t\t\t\t// Use raw SQL to avoid UNIQUE constraint on empty prefix\n\t\t\t\terr := db.DB.Exec(`\n\t\t\t\t\tINSERT INTO pre_auth_keys (key, user_id, reusable, ephemeral, used, created_at)\n\t\t\t\t\tVALUES (?, ?, ?, ?, ?, ?)\n\t\t\t\t`, legacyKey, user.ID, false, false, true, now).Error\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\treturn legacyKey\n\t\t\t},\n\t\t\twantFindErr:     false,\n\t\t\twantValidateErr: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tkeyStr := tt.setupKey()\n\n\t\t\tpak, err := db.GetPreAuthKey(keyStr)\n\n\t\t\tif tt.wantFindErr {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NotNil(t, pak)\n\n\t\t\t// Check validation if needed\n\t\t\tif tt.wantValidateErr {\n\t\t\t\terr := pak.Validate()\n\t\t\t\tassert.Error(t, err)\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif tt.validateResult != nil {\n\t\t\t\ttt.validateResult(t, pak)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestMultipleLegacyKeysAllowed(t *testing.T) {\n\tdb, err := newSQLiteTestDB()\n\trequire.NoError(t, err)\n\n\tuser, err := db.CreateUser(types.User{Name: \"test-legacy\"})\n\trequire.NoError(t, err)\n\n\t// Create multiple legacy keys by directly inserting with empty prefix\n\t// This simulates the migration scenario where existing databases have multiple\n\t// plaintext keys without prefix/hash fields\n\tnow := time.Now()\n\n\tfor i := range 5 {\n\t\tlegacyKey := fmt.Sprintf(\"legacy_key_%d_%s\", i, strings.Repeat(\"x\", 40))\n\n\t\terr := db.DB.Exec(`\n\t\t\tINSERT INTO pre_auth_keys (key, prefix, hash, user_id, reusable, ephemeral, used, created_at)\n\t\t\tVALUES (?, '', NULL, ?, ?, ?, ?, ?)\n\t\t`, legacyKey, user.ID, true, false, false, now).Error\n\t\trequire.NoError(t, err, \"should allow multiple legacy keys with empty prefix\")\n\t}\n\n\t// Verify all legacy keys can be retrieved\n\tvar legacyKeys []types.PreAuthKey\n\n\terr = db.DB.Where(\"prefix = '' OR prefix IS NULL\").Find(&legacyKeys).Error\n\trequire.NoError(t, err)\n\tassert.Len(t, legacyKeys, 5, \"should have created 5 legacy keys\")\n\n\t// Now create new bcrypt-based keys - these should have unique prefixes\n\tkey1, err := db.CreatePreAuthKey(user.TypedID(), true, false, nil, nil)\n\trequire.NoError(t, err)\n\tassert.NotEmpty(t, key1.Key)\n\n\tkey2, err := db.CreatePreAuthKey(user.TypedID(), true, false, nil, nil)\n\trequire.NoError(t, err)\n\tassert.NotEmpty(t, key2.Key)\n\n\t// Verify the new keys have different prefixes\n\tpak1, err := db.GetPreAuthKey(key1.Key)\n\trequire.NoError(t, err)\n\tassert.NotEmpty(t, pak1.Prefix)\n\n\tpak2, err := db.GetPreAuthKey(key2.Key)\n\trequire.NoError(t, err)\n\tassert.NotEmpty(t, pak2.Prefix)\n\n\tassert.NotEqual(t, pak1.Prefix, pak2.Prefix, \"new keys should have unique prefixes\")\n\n\t// Verify we cannot manually insert duplicate non-empty prefixes\n\tduplicatePrefix := \"test_prefix1\"\n\thash1 := []byte(\"hash1\")\n\thash2 := []byte(\"hash2\")\n\n\t// First insert should succeed\n\terr = db.DB.Exec(`\n\t\tINSERT INTO pre_auth_keys (key, prefix, hash, user_id, reusable, ephemeral, used, created_at)\n\t\tVALUES ('', ?, ?, ?, ?, ?, ?, ?)\n\t`, duplicatePrefix, hash1, user.ID, true, false, false, now).Error\n\trequire.NoError(t, err, \"first key with prefix should succeed\")\n\n\t// Second insert with same prefix should fail\n\terr = db.DB.Exec(`\n\t\tINSERT INTO pre_auth_keys (key, prefix, hash, user_id, reusable, ephemeral, used, created_at)\n\t\tVALUES ('', ?, ?, ?, ?, ?, ?, ?)\n\t`, duplicatePrefix, hash2, user.ID, true, false, false, now).Error\n\trequire.Error(t, err, \"duplicate non-empty prefix should be rejected\")\n\tassert.Contains(t, err.Error(), \"UNIQUE constraint failed\", \"should fail with UNIQUE constraint error\")\n}\n"
  },
  {
    "path": "hscontrol/db/schema.sql",
    "content": "-- This file is the representation of the SQLite schema of Headscale.\n-- It is the \"source of truth\" and is used to validate any migrations\n-- that are run against the database to ensure it ends in the expected state.\n\nCREATE TABLE migrations(id text,PRIMARY KEY(id));\n\nCREATE TABLE users(\n  id integer PRIMARY KEY AUTOINCREMENT,\n  name text,\n  display_name text,\n  email text,\n  provider_identifier text,\n  provider text,\n  profile_pic_url text,\n\n  created_at datetime,\n  updated_at datetime,\n  deleted_at datetime\n);\nCREATE INDEX idx_users_deleted_at ON users(deleted_at);\n\n\n-- The following three UNIQUE indexes work together to enforce the user identity model:\n--\n-- 1. Users can be either local (provider_identifier is NULL) or from external providers (provider_identifier set)\n-- 2. Each external provider identifier must be unique across the system\n-- 3. Local usernames must be unique among local users\n-- 4. The same username can exist across different providers with different identifiers\n--\n-- Examples:\n-- - Can create local user \"alice\" (provider_identifier=NULL)\n-- - Can create external user \"alice\" with GitHub (name=\"alice\", provider_identifier=\"alice_github\")\n-- - Can create external user \"alice\" with Google (name=\"alice\", provider_identifier=\"alice_google\")\n-- - Cannot create another local user \"alice\" (blocked by idx_name_no_provider_identifier)\n-- - Cannot create another user with provider_identifier=\"alice_github\" (blocked by idx_provider_identifier)\n-- - Cannot create user \"bob\" with provider_identifier=\"alice_github\" (blocked by idx_name_provider_identifier)\nCREATE UNIQUE INDEX idx_provider_identifier ON users(provider_identifier) WHERE provider_identifier IS NOT NULL;\nCREATE UNIQUE INDEX idx_name_provider_identifier ON users(name, provider_identifier);\nCREATE UNIQUE INDEX idx_name_no_provider_identifier ON users(name) WHERE provider_identifier IS NULL;\n\nCREATE TABLE pre_auth_keys(\n  id integer PRIMARY KEY AUTOINCREMENT,\n  key text,\n  prefix text,\n  hash blob,\n  user_id integer,\n  reusable numeric,\n  ephemeral numeric DEFAULT false,\n  used numeric DEFAULT false,\n  tags text,\n  expiration datetime,\n\n  created_at datetime,\n\n  CONSTRAINT fk_pre_auth_keys_user FOREIGN KEY(user_id) REFERENCES users(id) ON DELETE SET NULL\n);\nCREATE UNIQUE INDEX idx_pre_auth_keys_prefix ON pre_auth_keys(prefix) WHERE prefix IS NOT NULL AND prefix != '';\n\nCREATE TABLE api_keys(\n  id integer PRIMARY KEY AUTOINCREMENT,\n  prefix text,\n  hash blob,\n  expiration datetime,\n  last_seen datetime,\n\n  created_at datetime\n);\nCREATE UNIQUE INDEX idx_api_keys_prefix ON api_keys(prefix);\n\nCREATE TABLE nodes(\n  id integer PRIMARY KEY AUTOINCREMENT,\n  machine_key text,\n  node_key text,\n  disco_key text,\n\n  endpoints text,\n  host_info text,\n  ipv4 text,\n  ipv6 text,\n  hostname text,\n  given_name varchar(63),\n  -- user_id is NULL for tagged nodes (owned by tags, not a user).\n  -- Only set for user-owned nodes (no tags).\n  user_id integer,\n  register_method text,\n  tags text,\n  auth_key_id integer,\n  last_seen datetime,\n  expiry datetime,\n  approved_routes text,\n\n  created_at datetime,\n  updated_at datetime,\n  deleted_at datetime,\n\n  CONSTRAINT fk_nodes_user FOREIGN KEY(user_id) REFERENCES users(id) ON DELETE CASCADE,\n  CONSTRAINT fk_nodes_auth_key FOREIGN KEY(auth_key_id) REFERENCES pre_auth_keys(id)\n);\n\nCREATE TABLE policies(\n  id integer PRIMARY KEY AUTOINCREMENT,\n  data text,\n\n  created_at datetime,\n  updated_at datetime,\n  deleted_at datetime\n);\nCREATE INDEX idx_policies_deleted_at ON policies(deleted_at);\n\nCREATE TABLE database_versions(\n  id integer PRIMARY KEY,\n  version text NOT NULL,\n  updated_at datetime\n);\n"
  },
  {
    "path": "hscontrol/db/sqliteconfig/config.go",
    "content": "// Package sqliteconfig provides type-safe configuration for SQLite databases\n// with proper enum validation and URL generation for modernc.org/sqlite driver.\npackage sqliteconfig\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n)\n\n// Errors returned by config validation.\nvar (\n\tErrPathEmpty           = errors.New(\"path cannot be empty\")\n\tErrBusyTimeoutNegative = errors.New(\"busy_timeout must be >= 0\")\n\tErrInvalidJournalMode  = errors.New(\"invalid journal_mode\")\n\tErrInvalidAutoVacuum   = errors.New(\"invalid auto_vacuum\")\n\tErrWALAutocheckpoint   = errors.New(\"wal_autocheckpoint must be >= -1\")\n\tErrInvalidSynchronous  = errors.New(\"invalid synchronous\")\n\tErrInvalidTxLock       = errors.New(\"invalid txlock\")\n)\n\nconst (\n\t// DefaultBusyTimeout is the default busy timeout in milliseconds.\n\tDefaultBusyTimeout = 10000\n)\n\n// JournalMode represents SQLite journal_mode pragma values.\n// Journal modes control how SQLite handles write transactions and crash recovery.\n//\n// Performance vs Durability Tradeoffs:\n//\n// WAL (Write-Ahead Logging) - Recommended for production:\n//   - Best performance for concurrent reads/writes\n//   - Readers don't block writers, writers don't block readers\n//   - Excellent crash recovery with minimal data loss risk\n//   - Uses additional .wal and .shm files\n//   - Default choice for Headscale production deployments\n//\n// DELETE - Traditional rollback journal:\n//   - Good performance for single-threaded access\n//   - Readers block writers and vice versa\n//   - Reliable crash recovery but with exclusive locking\n//   - Creates temporary journal files during transactions\n//   - Suitable for low-concurrency scenarios\n//\n// TRUNCATE - Similar to DELETE but faster cleanup:\n//   - Slightly better performance than DELETE\n//   - Same concurrency limitations as DELETE\n//   - Faster transaction commit by truncating instead of deleting journal\n//\n// PERSIST - Journal file remains between transactions:\n//   - Avoids file creation/deletion overhead\n//   - Same concurrency limitations as DELETE\n//   - Good for frequent small transactions\n//\n// MEMORY - Journal kept in memory:\n//   - Fastest performance but NO crash recovery\n//   - Data loss risk on power failure or crash\n//   - Only suitable for temporary or non-critical data\n//\n// OFF - No journaling:\n//   - Maximum performance but NO transaction safety\n//   - High risk of database corruption on crash\n//   - Should only be used for read-only or disposable databases\ntype JournalMode string\n\nconst (\n\t// JournalModeWAL enables Write-Ahead Logging (RECOMMENDED for production).\n\t// Best concurrent performance + crash recovery. Uses additional .wal/.shm files.\n\tJournalModeWAL JournalMode = \"WAL\"\n\n\t// JournalModeDelete uses traditional rollback journaling.\n\t// Good single-threaded performance, readers block writers. Creates temp journal files.\n\tJournalModeDelete JournalMode = \"DELETE\"\n\n\t// JournalModeTruncate is like DELETE but with faster cleanup.\n\t// Slightly better performance than DELETE, same safety with exclusive locking.\n\tJournalModeTruncate JournalMode = \"TRUNCATE\"\n\n\t// JournalModePersist keeps journal file between transactions.\n\t// Good for frequent transactions, avoids file creation/deletion overhead.\n\tJournalModePersist JournalMode = \"PERSIST\"\n\n\t// JournalModeMemory keeps journal in memory (DANGEROUS).\n\t// Fastest performance but NO crash recovery - data loss on power failure.\n\tJournalModeMemory JournalMode = \"MEMORY\"\n\n\t// JournalModeOff disables journaling entirely (EXTREMELY DANGEROUS).\n\t// Maximum performance but high corruption risk. Only for disposable databases.\n\tJournalModeOff JournalMode = \"OFF\"\n)\n\n// IsValid returns true if the JournalMode is valid.\nfunc (j JournalMode) IsValid() bool {\n\tswitch j {\n\tcase JournalModeWAL, JournalModeDelete, JournalModeTruncate,\n\t\tJournalModePersist, JournalModeMemory, JournalModeOff:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\n// String returns the string representation.\nfunc (j JournalMode) String() string {\n\treturn string(j)\n}\n\n// AutoVacuum represents SQLite auto_vacuum pragma values.\n// Auto-vacuum controls how SQLite reclaims space from deleted data.\n//\n// Performance vs Storage Tradeoffs:\n//\n// INCREMENTAL - Recommended for production:\n//   - Reclaims space gradually during normal operations\n//   - Minimal performance impact on writes\n//   - Database size shrinks automatically over time\n//   - Can manually trigger with PRAGMA incremental_vacuum\n//   - Good balance of space efficiency and performance\n//\n// FULL - Automatic space reclamation:\n//   - Immediately reclaims space on every DELETE/DROP\n//   - Higher write overhead due to page reorganization\n//   - Keeps database file size minimal\n//   - Can cause significant slowdowns on large deletions\n//   - Best for applications with frequent deletes and limited storage\n//\n// NONE - No automatic space reclamation:\n//   - Fastest write performance (no vacuum overhead)\n//   - Database file only grows, never shrinks\n//   - Deleted space is reused but file size remains large\n//   - Requires manual VACUUM to reclaim space\n//   - Best for write-heavy workloads where storage isn't constrained\ntype AutoVacuum string\n\nconst (\n\t// AutoVacuumNone disables automatic space reclamation.\n\t// Fastest writes, file only grows. Requires manual VACUUM to reclaim space.\n\tAutoVacuumNone AutoVacuum = \"NONE\"\n\n\t// AutoVacuumFull immediately reclaims space on every DELETE/DROP.\n\t// Minimal file size but slower writes. Can impact performance on large deletions.\n\tAutoVacuumFull AutoVacuum = \"FULL\"\n\n\t// AutoVacuumIncremental reclaims space gradually (RECOMMENDED for production).\n\t// Good balance: minimal write impact, automatic space management over time.\n\tAutoVacuumIncremental AutoVacuum = \"INCREMENTAL\"\n)\n\n// IsValid returns true if the AutoVacuum is valid.\nfunc (a AutoVacuum) IsValid() bool {\n\tswitch a {\n\tcase AutoVacuumNone, AutoVacuumFull, AutoVacuumIncremental:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\n// String returns the string representation.\nfunc (a AutoVacuum) String() string {\n\treturn string(a)\n}\n\n// Synchronous represents SQLite synchronous pragma values.\n// Synchronous mode controls how aggressively SQLite flushes data to disk.\n//\n// Performance vs Durability Tradeoffs:\n//\n// NORMAL - Recommended for production:\n//   - Good balance of performance and safety\n//   - Syncs at critical moments (transaction commits in WAL mode)\n//   - Very low risk of corruption, minimal performance impact\n//   - Safe with WAL mode even with power loss\n//   - Default choice for most production applications\n//\n// FULL - Maximum durability:\n//   - Syncs to disk after every write operation\n//   - Highest data safety, virtually no corruption risk\n//   - Significant performance penalty (up to 50% slower)\n//   - Recommended for critical data where corruption is unacceptable\n//\n// EXTRA - Paranoid mode:\n//   - Even more aggressive syncing than FULL\n//   - Maximum possible data safety\n//   - Severe performance impact\n//   - Only for extremely critical scenarios\n//\n// OFF - Maximum performance, minimum safety:\n//   - No syncing, relies on OS to flush data\n//   - Fastest possible performance\n//   - High risk of corruption on power failure or crash\n//   - Only suitable for non-critical or easily recreatable data\ntype Synchronous string\n\nconst (\n\t// SynchronousOff disables syncing (DANGEROUS).\n\t// Fastest performance but high corruption risk on power failure. Avoid in production.\n\tSynchronousOff Synchronous = \"OFF\"\n\n\t// SynchronousNormal provides balanced performance and safety (RECOMMENDED).\n\t// Good performance with low corruption risk. Safe with WAL mode on power loss.\n\tSynchronousNormal Synchronous = \"NORMAL\"\n\n\t// SynchronousFull provides maximum durability with performance cost.\n\t// Syncs after every write. Up to 50% slower but virtually no corruption risk.\n\tSynchronousFull Synchronous = \"FULL\"\n\n\t// SynchronousExtra provides paranoid-level data safety (EXTREME).\n\t// Maximum safety with severe performance impact. Rarely needed in practice.\n\tSynchronousExtra Synchronous = \"EXTRA\"\n)\n\n// IsValid returns true if the Synchronous is valid.\nfunc (s Synchronous) IsValid() bool {\n\tswitch s {\n\tcase SynchronousOff, SynchronousNormal, SynchronousFull, SynchronousExtra:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\n// String returns the string representation.\nfunc (s Synchronous) String() string {\n\treturn string(s)\n}\n\n// TxLock represents SQLite transaction lock mode.\n// Transaction lock mode determines when write locks are acquired during transactions.\n//\n// Lock Acquisition Behavior:\n//\n// DEFERRED - SQLite default, acquire lock lazily:\n//   - Transaction starts without any lock\n//   - First read acquires SHARED lock\n//   - First write attempts to upgrade to RESERVED lock\n//   - If another transaction holds RESERVED: SQLITE_BUSY (potential deadlock)\n//   - Can cause deadlocks when multiple connections attempt concurrent writes\n//\n// IMMEDIATE - Recommended for write-heavy workloads:\n//   - Transaction immediately acquires RESERVED lock at BEGIN\n//   - If lock unavailable, waits up to busy_timeout before failing\n//   - Other writers queue orderly instead of deadlocking\n//   - Prevents the upgrade-lock deadlock scenario\n//   - Slight overhead for read-only transactions that don't need locks\n//\n// EXCLUSIVE - Maximum isolation:\n//   - Transaction immediately acquires EXCLUSIVE lock at BEGIN\n//   - No other connections can read or write\n//   - Highest isolation but lowest concurrency\n//   - Rarely needed in practice\ntype TxLock string\n\nconst (\n\t// TxLockDeferred acquires locks lazily (SQLite default).\n\t// Risk of SQLITE_BUSY deadlocks with concurrent writers. Use for read-heavy workloads.\n\tTxLockDeferred TxLock = \"deferred\"\n\n\t// TxLockImmediate acquires write lock immediately (RECOMMENDED for production).\n\t// Prevents deadlocks by acquiring RESERVED lock at transaction start.\n\t// Writers queue orderly, respecting busy_timeout.\n\tTxLockImmediate TxLock = \"immediate\"\n\n\t// TxLockExclusive acquires exclusive lock immediately.\n\t// Maximum isolation, no concurrent reads or writes. Rarely needed.\n\tTxLockExclusive TxLock = \"exclusive\"\n)\n\n// IsValid returns true if the TxLock is valid.\nfunc (t TxLock) IsValid() bool {\n\tswitch t {\n\tcase TxLockDeferred, TxLockImmediate, TxLockExclusive, \"\":\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\n// String returns the string representation.\nfunc (t TxLock) String() string {\n\treturn string(t)\n}\n\n// Config holds SQLite database configuration with type-safe enums.\n// This configuration balances performance, durability, and operational requirements\n// for Headscale's SQLite database usage patterns.\ntype Config struct {\n\tPath              string      // file path or \":memory:\"\n\tBusyTimeout       int         // milliseconds (0 = default/disabled)\n\tJournalMode       JournalMode // journal mode (affects concurrency and crash recovery)\n\tAutoVacuum        AutoVacuum  // auto vacuum mode (affects storage efficiency)\n\tWALAutocheckpoint int         // pages (-1 = default/not set, 0 = disabled, >0 = enabled)\n\tSynchronous       Synchronous // synchronous mode (affects durability vs performance)\n\tForeignKeys       bool        // enable foreign key constraints (data integrity)\n\tTxLock            TxLock      // transaction lock mode (affects write concurrency)\n}\n\n// Default returns the production configuration optimized for Headscale's usage patterns.\n// This configuration prioritizes:\n//   - Concurrent access (WAL mode for multiple readers/writers)\n//   - Data durability with good performance (NORMAL synchronous)\n//   - Automatic space management (INCREMENTAL auto-vacuum)\n//   - Data integrity (foreign key constraints enabled)\n//   - Safe concurrent writes (IMMEDIATE transaction lock)\n//   - Reasonable timeout for busy database scenarios (10s)\nfunc Default(path string) *Config {\n\treturn &Config{\n\t\tPath:              path,\n\t\tBusyTimeout:       DefaultBusyTimeout,\n\t\tJournalMode:       JournalModeWAL,\n\t\tAutoVacuum:        AutoVacuumIncremental,\n\t\tWALAutocheckpoint: 1000,\n\t\tSynchronous:       SynchronousNormal,\n\t\tForeignKeys:       true,\n\t\tTxLock:            TxLockImmediate,\n\t}\n}\n\n// Memory returns a configuration for in-memory databases.\nfunc Memory() *Config {\n\treturn &Config{\n\t\tPath:              \":memory:\",\n\t\tWALAutocheckpoint: -1, // not set, use driver default\n\t\tForeignKeys:       true,\n\t}\n}\n\n// Validate checks if all configuration values are valid.\nfunc (c *Config) Validate() error {\n\tif c.Path == \"\" {\n\t\treturn ErrPathEmpty\n\t}\n\n\tif c.BusyTimeout < 0 {\n\t\treturn fmt.Errorf(\"%w, got %d\", ErrBusyTimeoutNegative, c.BusyTimeout)\n\t}\n\n\tif c.JournalMode != \"\" && !c.JournalMode.IsValid() {\n\t\treturn fmt.Errorf(\"%w: %s\", ErrInvalidJournalMode, c.JournalMode)\n\t}\n\n\tif c.AutoVacuum != \"\" && !c.AutoVacuum.IsValid() {\n\t\treturn fmt.Errorf(\"%w: %s\", ErrInvalidAutoVacuum, c.AutoVacuum)\n\t}\n\n\tif c.WALAutocheckpoint < -1 {\n\t\treturn fmt.Errorf(\"%w, got %d\", ErrWALAutocheckpoint, c.WALAutocheckpoint)\n\t}\n\n\tif c.Synchronous != \"\" && !c.Synchronous.IsValid() {\n\t\treturn fmt.Errorf(\"%w: %s\", ErrInvalidSynchronous, c.Synchronous)\n\t}\n\n\tif c.TxLock != \"\" && !c.TxLock.IsValid() {\n\t\treturn fmt.Errorf(\"%w: %s\", ErrInvalidTxLock, c.TxLock)\n\t}\n\n\treturn nil\n}\n\n// ToURL builds a properly encoded SQLite connection string using _pragma parameters\n// compatible with modernc.org/sqlite driver.\nfunc (c *Config) ToURL() (string, error) {\n\terr := c.Validate()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"invalid config: %w\", err)\n\t}\n\n\tvar pragmas []string\n\n\t// Add pragma parameters only if they're set (non-zero/non-empty)\n\tif c.BusyTimeout > 0 {\n\t\tpragmas = append(pragmas, fmt.Sprintf(\"busy_timeout=%d\", c.BusyTimeout))\n\t}\n\n\tif c.JournalMode != \"\" {\n\t\tpragmas = append(pragmas, fmt.Sprintf(\"journal_mode=%s\", c.JournalMode))\n\t}\n\n\tif c.AutoVacuum != \"\" {\n\t\tpragmas = append(pragmas, fmt.Sprintf(\"auto_vacuum=%s\", c.AutoVacuum))\n\t}\n\n\tif c.WALAutocheckpoint >= 0 {\n\t\tpragmas = append(pragmas, fmt.Sprintf(\"wal_autocheckpoint=%d\", c.WALAutocheckpoint))\n\t}\n\n\tif c.Synchronous != \"\" {\n\t\tpragmas = append(pragmas, fmt.Sprintf(\"synchronous=%s\", c.Synchronous))\n\t}\n\n\tif c.ForeignKeys {\n\t\tpragmas = append(pragmas, \"foreign_keys=ON\")\n\t}\n\n\t// Handle different database types\n\tvar baseURL string\n\tif c.Path == \":memory:\" {\n\t\tbaseURL = \":memory:\"\n\t} else {\n\t\tbaseURL = \"file:\" + c.Path\n\t}\n\n\t// Build query parameters\n\tqueryParts := make([]string, 0, 1+len(pragmas))\n\n\t// Add _txlock first (it's a connection parameter, not a pragma)\n\tif c.TxLock != \"\" {\n\t\tqueryParts = append(queryParts, \"_txlock=\"+string(c.TxLock))\n\t}\n\n\t// Add pragma parameters\n\tfor _, pragma := range pragmas {\n\t\tqueryParts = append(queryParts, \"_pragma=\"+pragma)\n\t}\n\n\tif len(queryParts) > 0 {\n\t\tbaseURL += \"?\" + strings.Join(queryParts, \"&\")\n\t}\n\n\treturn baseURL, nil\n}\n"
  },
  {
    "path": "hscontrol/db/sqliteconfig/config_test.go",
    "content": "package sqliteconfig\n\nimport (\n\t\"testing\"\n)\n\nfunc TestJournalMode(t *testing.T) {\n\ttests := []struct {\n\t\tmode  JournalMode\n\t\tvalid bool\n\t}{\n\t\t{JournalModeWAL, true},\n\t\t{JournalModeDelete, true},\n\t\t{JournalModeTruncate, true},\n\t\t{JournalModePersist, true},\n\t\t{JournalModeMemory, true},\n\t\t{JournalModeOff, true},\n\t\t{JournalMode(\"INVALID\"), false},\n\t\t{JournalMode(\"\"), false},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(string(tt.mode), func(t *testing.T) {\n\t\t\tif got := tt.mode.IsValid(); got != tt.valid {\n\t\t\t\tt.Errorf(\"JournalMode(%q).IsValid() = %v, want %v\", tt.mode, got, tt.valid)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestAutoVacuum(t *testing.T) {\n\ttests := []struct {\n\t\tmode  AutoVacuum\n\t\tvalid bool\n\t}{\n\t\t{AutoVacuumNone, true},\n\t\t{AutoVacuumFull, true},\n\t\t{AutoVacuumIncremental, true},\n\t\t{AutoVacuum(\"INVALID\"), false},\n\t\t{AutoVacuum(\"\"), false},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(string(tt.mode), func(t *testing.T) {\n\t\t\tif got := tt.mode.IsValid(); got != tt.valid {\n\t\t\t\tt.Errorf(\"AutoVacuum(%q).IsValid() = %v, want %v\", tt.mode, got, tt.valid)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestSynchronous(t *testing.T) {\n\ttests := []struct {\n\t\tmode  Synchronous\n\t\tvalid bool\n\t}{\n\t\t{SynchronousOff, true},\n\t\t{SynchronousNormal, true},\n\t\t{SynchronousFull, true},\n\t\t{SynchronousExtra, true},\n\t\t{Synchronous(\"INVALID\"), false},\n\t\t{Synchronous(\"\"), false},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(string(tt.mode), func(t *testing.T) {\n\t\t\tif got := tt.mode.IsValid(); got != tt.valid {\n\t\t\t\tt.Errorf(\"Synchronous(%q).IsValid() = %v, want %v\", tt.mode, got, tt.valid)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestTxLock(t *testing.T) {\n\ttests := []struct {\n\t\tmode  TxLock\n\t\tvalid bool\n\t}{\n\t\t{TxLockDeferred, true},\n\t\t{TxLockImmediate, true},\n\t\t{TxLockExclusive, true},\n\t\t{TxLock(\"\"), true},           // empty is valid (uses driver default)\n\t\t{TxLock(\"IMMEDIATE\"), false}, // uppercase is invalid\n\t\t{TxLock(\"INVALID\"), false},\n\t}\n\n\tfor _, tt := range tests {\n\t\tname := string(tt.mode)\n\t\tif name == \"\" {\n\t\t\tname = \"empty\"\n\t\t}\n\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tif got := tt.mode.IsValid(); got != tt.valid {\n\t\t\t\tt.Errorf(\"TxLock(%q).IsValid() = %v, want %v\", tt.mode, got, tt.valid)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestTxLockString(t *testing.T) {\n\ttests := []struct {\n\t\tmode TxLock\n\t\twant string\n\t}{\n\t\t{TxLockDeferred, \"deferred\"},\n\t\t{TxLockImmediate, \"immediate\"},\n\t\t{TxLockExclusive, \"exclusive\"},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.want, func(t *testing.T) {\n\t\t\tif got := tt.mode.String(); got != tt.want {\n\t\t\t\tt.Errorf(\"TxLock.String() = %q, want %q\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestConfigValidate(t *testing.T) {\n\ttests := []struct {\n\t\tname    string\n\t\tconfig  *Config\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname:   \"valid default config\",\n\t\t\tconfig: Default(\"/path/to/db.sqlite\"),\n\t\t},\n\t\t{\n\t\t\tname: \"empty path\",\n\t\t\tconfig: &Config{\n\t\t\t\tPath: \"\",\n\t\t\t},\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"negative busy timeout\",\n\t\t\tconfig: &Config{\n\t\t\t\tPath:        \"/path/to/db.sqlite\",\n\t\t\t\tBusyTimeout: -1,\n\t\t\t},\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"invalid journal mode\",\n\t\t\tconfig: &Config{\n\t\t\t\tPath:        \"/path/to/db.sqlite\",\n\t\t\t\tJournalMode: JournalMode(\"INVALID\"),\n\t\t\t},\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"invalid txlock\",\n\t\t\tconfig: &Config{\n\t\t\t\tPath:   \"/path/to/db.sqlite\",\n\t\t\t\tTxLock: TxLock(\"INVALID\"),\n\t\t\t},\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"valid txlock immediate\",\n\t\t\tconfig: &Config{\n\t\t\t\tPath:   \"/path/to/db.sqlite\",\n\t\t\t\tTxLock: TxLockImmediate,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\terr := tt.config.Validate()\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"Config.Validate() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestConfigToURL(t *testing.T) {\n\ttests := []struct {\n\t\tname   string\n\t\tconfig *Config\n\t\twant   string\n\t}{\n\t\t{\n\t\t\tname:   \"default config includes txlock immediate\",\n\t\t\tconfig: Default(\"/path/to/db.sqlite\"),\n\t\t\twant:   \"file:/path/to/db.sqlite?_txlock=immediate&_pragma=busy_timeout=10000&_pragma=journal_mode=WAL&_pragma=auto_vacuum=INCREMENTAL&_pragma=wal_autocheckpoint=1000&_pragma=synchronous=NORMAL&_pragma=foreign_keys=ON\",\n\t\t},\n\t\t{\n\t\t\tname:   \"memory config\",\n\t\t\tconfig: Memory(),\n\t\t\twant:   \":memory:?_pragma=foreign_keys=ON\",\n\t\t},\n\t\t{\n\t\t\tname: \"minimal config\",\n\t\t\tconfig: &Config{\n\t\t\t\tPath:              \"/simple/db.sqlite\",\n\t\t\t\tWALAutocheckpoint: -1, // not set\n\t\t\t},\n\t\t\twant: \"file:/simple/db.sqlite\",\n\t\t},\n\t\t{\n\t\t\tname: \"custom config\",\n\t\t\tconfig: &Config{\n\t\t\t\tPath:              \"/custom/db.sqlite\",\n\t\t\t\tBusyTimeout:       5000,\n\t\t\t\tJournalMode:       JournalModeDelete,\n\t\t\t\tWALAutocheckpoint: -1, // not set\n\t\t\t\tSynchronous:       SynchronousFull,\n\t\t\t\tForeignKeys:       true,\n\t\t\t},\n\t\t\twant: \"file:/custom/db.sqlite?_pragma=busy_timeout=5000&_pragma=journal_mode=DELETE&_pragma=synchronous=FULL&_pragma=foreign_keys=ON\",\n\t\t},\n\t\t{\n\t\t\tname: \"memory with custom timeout\",\n\t\t\tconfig: &Config{\n\t\t\t\tPath:              \":memory:\",\n\t\t\t\tBusyTimeout:       2000,\n\t\t\t\tWALAutocheckpoint: -1, // not set\n\t\t\t\tForeignKeys:       true,\n\t\t\t},\n\t\t\twant: \":memory:?_pragma=busy_timeout=2000&_pragma=foreign_keys=ON\",\n\t\t},\n\t\t{\n\t\t\tname: \"wal autocheckpoint zero\",\n\t\t\tconfig: &Config{\n\t\t\t\tPath:              \"/test.db\",\n\t\t\t\tWALAutocheckpoint: 0,\n\t\t\t},\n\t\t\twant: \"file:/test.db?_pragma=wal_autocheckpoint=0\",\n\t\t},\n\t\t{\n\t\t\tname: \"all options\",\n\t\t\tconfig: &Config{\n\t\t\t\tPath:              \"/full.db\",\n\t\t\t\tBusyTimeout:       15000,\n\t\t\t\tJournalMode:       JournalModeWAL,\n\t\t\t\tAutoVacuum:        AutoVacuumFull,\n\t\t\t\tWALAutocheckpoint: 1000,\n\t\t\t\tSynchronous:       SynchronousExtra,\n\t\t\t\tForeignKeys:       true,\n\t\t\t},\n\t\t\twant: \"file:/full.db?_pragma=busy_timeout=15000&_pragma=journal_mode=WAL&_pragma=auto_vacuum=FULL&_pragma=wal_autocheckpoint=1000&_pragma=synchronous=EXTRA&_pragma=foreign_keys=ON\",\n\t\t},\n\t\t{\n\t\t\tname: \"with txlock immediate\",\n\t\t\tconfig: &Config{\n\t\t\t\tPath:              \"/test.db\",\n\t\t\t\tBusyTimeout:       5000,\n\t\t\t\tTxLock:            TxLockImmediate,\n\t\t\t\tWALAutocheckpoint: -1,\n\t\t\t\tForeignKeys:       true,\n\t\t\t},\n\t\t\twant: \"file:/test.db?_txlock=immediate&_pragma=busy_timeout=5000&_pragma=foreign_keys=ON\",\n\t\t},\n\t\t{\n\t\t\tname: \"with txlock deferred\",\n\t\t\tconfig: &Config{\n\t\t\t\tPath:              \"/test.db\",\n\t\t\t\tTxLock:            TxLockDeferred,\n\t\t\t\tWALAutocheckpoint: -1,\n\t\t\t\tForeignKeys:       true,\n\t\t\t},\n\t\t\twant: \"file:/test.db?_txlock=deferred&_pragma=foreign_keys=ON\",\n\t\t},\n\t\t{\n\t\t\tname: \"with txlock exclusive\",\n\t\t\tconfig: &Config{\n\t\t\t\tPath:              \"/test.db\",\n\t\t\t\tTxLock:            TxLockExclusive,\n\t\t\t\tWALAutocheckpoint: -1,\n\t\t\t},\n\t\t\twant: \"file:/test.db?_txlock=exclusive\",\n\t\t},\n\t\t{\n\t\t\tname: \"empty txlock omitted from URL\",\n\t\t\tconfig: &Config{\n\t\t\t\tPath:              \"/test.db\",\n\t\t\t\tTxLock:            \"\",\n\t\t\t\tBusyTimeout:       1000,\n\t\t\t\tWALAutocheckpoint: -1,\n\t\t\t\tForeignKeys:       true,\n\t\t\t},\n\t\t\twant: \"file:/test.db?_pragma=busy_timeout=1000&_pragma=foreign_keys=ON\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot, err := tt.config.ToURL()\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"Config.ToURL() error = %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif got != tt.want {\n\t\t\t\tt.Errorf(\"Config.ToURL() = %q, want %q\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestConfigToURLInvalid(t *testing.T) {\n\tconfig := &Config{\n\t\tPath:        \"\",\n\t\tBusyTimeout: -1,\n\t}\n\n\t_, err := config.ToURL()\n\tif err == nil {\n\t\tt.Error(\"Config.ToURL() with invalid config should return error\")\n\t}\n}\n\nfunc TestDefaultConfigHasTxLockImmediate(t *testing.T) {\n\tconfig := Default(\"/test.db\")\n\tif config.TxLock != TxLockImmediate {\n\t\tt.Errorf(\"Default().TxLock = %q, want %q\", config.TxLock, TxLockImmediate)\n\t}\n}\n"
  },
  {
    "path": "hscontrol/db/sqliteconfig/integration_test.go",
    "content": "package sqliteconfig\n\nimport (\n\t\"context\"\n\t\"database/sql\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"testing\"\n\n\t_ \"modernc.org/sqlite\"\n)\n\nconst memoryDBPath = \":memory:\"\n\n// TestSQLiteDriverPragmaIntegration verifies that the modernc.org/sqlite driver\n// correctly applies all pragma settings from URL parameters, ensuring they work\n// the same as the old SQL PRAGMA statements approach.\nfunc TestSQLiteDriverPragmaIntegration(t *testing.T) {\n\ttests := []struct {\n\t\tname     string\n\t\tconfig   *Config\n\t\texpected map[string]any\n\t}{\n\t\t{\n\t\t\tname:   \"default configuration\",\n\t\t\tconfig: Default(\"/tmp/test.db\"),\n\t\t\texpected: map[string]any{\n\t\t\t\t\"busy_timeout\":       10000,\n\t\t\t\t\"journal_mode\":       \"wal\",\n\t\t\t\t\"auto_vacuum\":        2, // INCREMENTAL = 2\n\t\t\t\t\"wal_autocheckpoint\": 1000,\n\t\t\t\t\"synchronous\":        1, // NORMAL = 1\n\t\t\t\t\"foreign_keys\":       1, // ON = 1\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:   \"memory database with foreign keys\",\n\t\t\tconfig: Memory(),\n\t\t\texpected: map[string]any{\n\t\t\t\t\"foreign_keys\": 1, // ON = 1\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"custom configuration\",\n\t\t\tconfig: &Config{\n\t\t\t\tPath:              \"/tmp/custom.db\",\n\t\t\t\tBusyTimeout:       5000,\n\t\t\t\tJournalMode:       JournalModeDelete,\n\t\t\t\tAutoVacuum:        AutoVacuumFull,\n\t\t\t\tWALAutocheckpoint: 1000,\n\t\t\t\tSynchronous:       SynchronousFull,\n\t\t\t\tForeignKeys:       true,\n\t\t\t},\n\t\t\texpected: map[string]any{\n\t\t\t\t\"busy_timeout\":       5000,\n\t\t\t\t\"journal_mode\":       \"delete\",\n\t\t\t\t\"auto_vacuum\":        1, // FULL = 1\n\t\t\t\t\"wal_autocheckpoint\": 1000,\n\t\t\t\t\"synchronous\":        2, // FULL = 2\n\t\t\t\t\"foreign_keys\":       1, // ON = 1\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"foreign keys disabled\",\n\t\t\tconfig: &Config{\n\t\t\t\tPath:        \"/tmp/no_fk.db\",\n\t\t\t\tForeignKeys: false,\n\t\t\t},\n\t\t\texpected: map[string]any{\n\t\t\t\t// foreign_keys should not be set (defaults to 0/OFF)\n\t\t\t\t\"foreign_keys\": 0,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\t// Create temporary database file if not memory\n\t\t\tif tt.config.Path == memoryDBPath {\n\t\t\t\t// For memory databases, no changes needed\n\t\t\t} else {\n\t\t\t\ttempDir := t.TempDir()\n\t\t\t\tdbPath := filepath.Join(tempDir, \"test.db\")\n\t\t\t\t// Update config with actual temp path\n\t\t\t\tconfigCopy := *tt.config\n\t\t\t\tconfigCopy.Path = dbPath\n\t\t\t\ttt.config = &configCopy\n\t\t\t}\n\n\t\t\t// Generate URL and open database\n\t\t\turl, err := tt.config.ToURL()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Failed to generate URL: %v\", err)\n\t\t\t}\n\n\t\t\tt.Logf(\"Opening database with URL: %s\", url)\n\n\t\t\tdb, err := sql.Open(\"sqlite\", url)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Failed to open database: %v\", err)\n\t\t\t}\n\t\t\tdefer db.Close()\n\n\t\t\t// Test connection\n\t\t\tctx := context.Background()\n\n\t\t\terr = db.PingContext(ctx)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Failed to ping database: %v\", err)\n\t\t\t}\n\n\t\t\t// Verify each expected pragma setting\n\t\t\tfor pragma, expectedValue := range tt.expected {\n\t\t\t\tt.Run(\"pragma_\"+pragma, func(t *testing.T) {\n\t\t\t\t\tvar actualValue any\n\n\t\t\t\t\tquery := \"PRAGMA \" + pragma\n\n\t\t\t\t\terr := db.QueryRowContext(ctx, query).Scan(&actualValue)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Fatalf(\"Failed to query %s: %v\", query, err)\n\t\t\t\t\t}\n\n\t\t\t\t\tt.Logf(\"%s: expected=%v, actual=%v\", pragma, expectedValue, actualValue)\n\n\t\t\t\t\t// Handle type conversion for comparison\n\t\t\t\t\tswitch expected := expectedValue.(type) {\n\t\t\t\t\tcase int:\n\t\t\t\t\t\tif actual, ok := actualValue.(int64); ok {\n\t\t\t\t\t\t\tif int64(expected) != actual {\n\t\t\t\t\t\t\t\tt.Errorf(\"%s: expected %d, got %d\", pragma, expected, actual)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tt.Errorf(\"%s: expected int %d, got %T %v\", pragma, expected, actualValue, actualValue)\n\t\t\t\t\t\t}\n\t\t\t\t\tcase string:\n\t\t\t\t\t\tif actual, ok := actualValue.(string); ok {\n\t\t\t\t\t\t\tif expected != actual {\n\t\t\t\t\t\t\t\tt.Errorf(\"%s: expected %q, got %q\", pragma, expected, actual)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tt.Errorf(\"%s: expected string %q, got %T %v\", pragma, expected, actualValue, actualValue)\n\t\t\t\t\t\t}\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tt.Errorf(\"Unsupported expected type for %s: %T\", pragma, expectedValue)\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TestForeignKeyConstraintEnforcement verifies that foreign key constraints\n// are actually enforced when enabled via URL parameters.\nfunc TestForeignKeyConstraintEnforcement(t *testing.T) {\n\ttempDir := t.TempDir()\n\n\tdbPath := filepath.Join(tempDir, \"fk_test.db\")\n\tconfig := Default(dbPath)\n\n\turl, err := config.ToURL()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to generate URL: %v\", err)\n\t}\n\n\tdb, err := sql.Open(\"sqlite\", url)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to open database: %v\", err)\n\t}\n\tdefer db.Close()\n\n\tctx := context.Background()\n\n\t// Create test tables with foreign key relationship\n\tschema := `\n\t\tCREATE TABLE parent (\n\t\t\tid INTEGER PRIMARY KEY,\n\t\t\tname TEXT NOT NULL\n\t\t);\n\n\t\tCREATE TABLE child (\n\t\t\tid INTEGER PRIMARY KEY,\n\t\t\tparent_id INTEGER NOT NULL,\n\t\t\tname TEXT NOT NULL,\n\t\t\tFOREIGN KEY (parent_id) REFERENCES parent(id)\n\t\t);\n\t`\n\n\t_, err = db.ExecContext(ctx, schema)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create schema: %v\", err)\n\t}\n\n\t// Insert parent record\n\t_, err = db.ExecContext(ctx, \"INSERT INTO parent (id, name) VALUES (1, 'Parent 1')\")\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to insert parent: %v\", err)\n\t}\n\n\t// Test 1: Valid foreign key should work\n\t_, err = db.ExecContext(ctx, \"INSERT INTO child (id, parent_id, name) VALUES (1, 1, 'Child 1')\")\n\tif err != nil {\n\t\tt.Fatalf(\"Valid foreign key insert failed: %v\", err)\n\t}\n\n\t// Test 2: Invalid foreign key should fail\n\t_, err = db.ExecContext(ctx, \"INSERT INTO child (id, parent_id, name) VALUES (2, 999, 'Child 2')\")\n\tif err == nil {\n\t\tt.Error(\"Expected foreign key constraint violation, but insert succeeded\")\n\t} else if !contains(err.Error(), \"FOREIGN KEY constraint failed\") {\n\t\tt.Errorf(\"Expected foreign key constraint error, got: %v\", err)\n\t} else {\n\t\tt.Logf(\"✓ Foreign key constraint correctly enforced: %v\", err)\n\t}\n\n\t// Test 3: Deleting referenced parent should fail\n\t_, err = db.ExecContext(ctx, \"DELETE FROM parent WHERE id = 1\")\n\tif err == nil {\n\t\tt.Error(\"Expected foreign key constraint violation when deleting referenced parent\")\n\t} else if !contains(err.Error(), \"FOREIGN KEY constraint failed\") {\n\t\tt.Errorf(\"Expected foreign key constraint error on delete, got: %v\", err)\n\t} else {\n\t\tt.Logf(\"✓ Foreign key constraint correctly prevented parent deletion: %v\", err)\n\t}\n}\n\n// TestJournalModeValidation verifies that the journal_mode setting is applied correctly.\nfunc TestJournalModeValidation(t *testing.T) {\n\tmodes := []struct {\n\t\tmode     JournalMode\n\t\texpected string\n\t}{\n\t\t{JournalModeWAL, \"wal\"},\n\t\t{JournalModeDelete, \"delete\"},\n\t\t{JournalModeTruncate, \"truncate\"},\n\t\t{JournalModeMemory, \"memory\"},\n\t}\n\n\tfor _, tt := range modes {\n\t\tt.Run(string(tt.mode), func(t *testing.T) {\n\t\t\ttempDir := t.TempDir()\n\n\t\t\tdbPath := filepath.Join(tempDir, \"journal_test.db\")\n\t\t\tconfig := &Config{\n\t\t\t\tPath:        dbPath,\n\t\t\t\tJournalMode: tt.mode,\n\t\t\t\tForeignKeys: true,\n\t\t\t}\n\n\t\t\turl, err := config.ToURL()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Failed to generate URL: %v\", err)\n\t\t\t}\n\n\t\t\tdb, err := sql.Open(\"sqlite\", url)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Failed to open database: %v\", err)\n\t\t\t}\n\t\t\tdefer db.Close()\n\n\t\t\tvar actualMode string\n\n\t\t\terr = db.QueryRowContext(context.Background(), \"PRAGMA journal_mode\").Scan(&actualMode)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Failed to query journal_mode: %v\", err)\n\t\t\t}\n\n\t\t\tif actualMode != tt.expected {\n\t\t\t\tt.Errorf(\"journal_mode: expected %q, got %q\", tt.expected, actualMode)\n\t\t\t} else {\n\t\t\t\tt.Logf(\"✓ journal_mode correctly set to: %s\", actualMode)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// contains checks if a string contains a substring (helper function).\nfunc contains(str, substr string) bool {\n\treturn strings.Contains(str, substr)\n}\n"
  },
  {
    "path": "hscontrol/db/suite_test.go",
    "content": "package db\n\nimport (\n\t\"log\"\n\t\"net/url\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/rs/zerolog\"\n\t\"zombiezen.com/go/postgrestest\"\n)\n\nfunc newSQLiteTestDB() (*HSDatabase, error) {\n\ttmpDir, err := os.MkdirTemp(\"\", \"headscale-db-test-*\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog.Printf(\"database path: %s\", tmpDir+\"/headscale_test.db\")\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\n\tdb, err := NewHeadscaleDatabase(\n\t\t&types.Config{\n\t\t\tDatabase: types.DatabaseConfig{\n\t\t\t\tType: types.DatabaseSqlite,\n\t\t\t\tSqlite: types.SqliteConfig{\n\t\t\t\t\tPath: tmpDir + \"/headscale_test.db\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tPolicy: types.PolicyConfig{\n\t\t\t\tMode: types.PolicyModeDB,\n\t\t\t},\n\t\t},\n\t\temptyCache(),\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn db, nil\n}\n\nfunc newPostgresTestDB(t *testing.T) *HSDatabase {\n\tt.Helper()\n\n\treturn newHeadscaleDBFromPostgresURL(t, newPostgresDBForTest(t))\n}\n\nfunc newPostgresDBForTest(t *testing.T) *url.URL {\n\tt.Helper()\n\n\tctx := t.Context()\n\n\tsrv, err := postgrestest.Start(ctx)\n\tif err != nil {\n\t\tt.Skipf(\"start postgres: %s\", err)\n\t}\n\n\tt.Cleanup(srv.Cleanup)\n\n\tu, err := srv.CreateDatabase(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tt.Logf(\"created local postgres: %s\", u)\n\tpu, _ := url.Parse(u)\n\n\treturn pu\n}\n\nfunc newHeadscaleDBFromPostgresURL(t *testing.T, pu *url.URL) *HSDatabase {\n\tt.Helper()\n\n\tpass, _ := pu.User.Password()\n\tport, _ := strconv.Atoi(pu.Port())\n\n\tdb, err := NewHeadscaleDatabase(\n\t\t&types.Config{\n\t\t\tDatabase: types.DatabaseConfig{\n\t\t\t\tType: types.DatabasePostgres,\n\t\t\t\tPostgres: types.PostgresConfig{\n\t\t\t\t\tHost: pu.Hostname(),\n\t\t\t\t\tUser: pu.User.Username(),\n\t\t\t\t\tName: strings.TrimLeft(pu.Path, \"/\"),\n\t\t\t\t\tPass: pass,\n\t\t\t\t\tPort: port,\n\t\t\t\t\tSsl:  \"disable\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tPolicy: types.PolicyConfig{\n\t\t\t\tMode: types.PolicyModeDB,\n\t\t\t},\n\t\t},\n\t\temptyCache(),\n\t)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treturn db\n}\n"
  },
  {
    "path": "hscontrol/db/testdata/sqlite/failing-node-preauth-constraint_dump.sql",
    "content": "PRAGMA foreign_keys=OFF;\nBEGIN TRANSACTION;\nCREATE TABLE IF NOT EXISTS \"api_keys\" (`id` integer,`prefix` text UNIQUE,`hash` blob,`created_at` datetime,`expiration` datetime,`last_seen` datetime,PRIMARY KEY (`id`));\nINSERT INTO api_keys VALUES(1,'hFKcRjLyfw',X'243261243130242e68554a6739332e6658333061326457723637464f2e6146424c74726e4542474c6c746437597a4253534d6f3677326d3944664d61','2023-04-09 22:34:28.624250346+00:00','2023-07-08 22:34:28.559681279+00:00',NULL);\nINSERT INTO api_keys VALUES(2,'88Wbitubag',X'243261243130246f7932506d53375033334b733861376e7745434f3665674e776e517659374b5474326a30686958446c6c55696c3568513948307665','2024-07-28 21:59:38.786936789+00:00','2024-10-26 21:59:38.724189498+00:00',NULL);\nCREATE TABLE `migrations` (`id` text,PRIMARY KEY (`id`));\nINSERT INTO migrations VALUES('202312101416');\nINSERT INTO migrations VALUES('202312101430');\nINSERT INTO migrations VALUES('202402151347');\nINSERT INTO migrations VALUES('2024041121742');\nINSERT INTO migrations VALUES('202406021630');\nINSERT INTO migrations VALUES('202409271400');\nINSERT INTO migrations VALUES('202407191627');\nINSERT INTO migrations VALUES('202408181235');\nINSERT INTO migrations VALUES('202501221827');\nINSERT INTO migrations VALUES('202501311657');\nCREATE TABLE `policies` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`data` text);\nCREATE TABLE IF NOT EXISTS \"pre_auth_keys\"  (`id` integer,`key` text,`user_id` integer,`reusable` numeric,`ephemeral` numeric DEFAULT false,`used` numeric DEFAULT false,`created_at` datetime,`expiration` datetime,`tags` text,PRIMARY KEY (`id`),CONSTRAINT `fk_pre_auth_keys_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE SET NULL);\nCREATE TABLE IF NOT EXISTS \"nodes\"  (`id` integer,`machine_key` text,`node_key` text,`disco_key` text,`hostname` text,`given_name` varchar(63),`user_id` integer,`register_method` text,`forced_tags` text,`auth_key_id` integer,`last_seen` datetime,`expiry` datetime,`host_info` text,`endpoints` text,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`ipv4` text,`ipv6` text,PRIMARY KEY (`id`),CONSTRAINT `fk_nodes_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_nodes_auth_key` FOREIGN KEY (`auth_key_id`) REFERENCES `pre_auth_keys`(`id`));\nINSERT INTO nodes VALUES(1,'mkey:a0ab77456320823945ae0331823e3c0d516fae9585bd42698dfa1ac3d7679e63','nodekey:7c84167ab68f494942de14deb83587fd841843de2bac105b6c670048c160554f','discokey:53075b3c6cad3b62a2a29caea61beeb93f66b8c75cb89dac465236a5bbf57759','hostname_1','given_name1',1,'cli','[\"tag:sshclient\",\"tag:ssh\"]',0,'2025-02-05 16:46:13.960213431+00:00','0001-01-01 00:00:00+00:00','{}','[]','2023-03-30 23:18:17.612740902+00:00','2025-02-05 16:46:13.960284003+00:00',NULL,'100.64.0.1','fd7a:115c:a1e0::1');\nINSERT INTO nodes VALUES(2,'mkey:f63dda7495db68077080364ba4109f48dee7a59310b9ed4968beb40d038eb622','nodekey:8186817337049e092e6ea02507091d8e9686924d46ad0e74a90370ec0113c440','discokey:28a2df7e73b8196c6859c94329443a28f9605b2b83541b685c1db666bd835775','hostname_2','given_name2',1,'cli','[\"tag:sshclient\"]',0,'2024-07-30 17:37:24.266006395+00:00','0001-01-01 00:00:00+00:00','{}','[]','2023-03-30 23:20:01.05202704+00:00','2024-07-30 17:37:24.266082813+00:00',NULL,'100.64.0.2','fd7a:115c:a1e0::2');\nINSERT INTO nodes VALUES(3,'mkey:0af53661fedf5143af3ea79e596928302e51c9fc9f0ea9ed1f2bb7d54778b80e','nodekey:8defd8272fd2851601158b2444fc8d1ab12b6187ec5db154b7a83bb75b2ce952','discokey:ba9d1ffac1997acbd8d281b8711699daa77ed91691772683ebbfdaafa2518a52','hostname_3','given_name3',1,'cli','[\"tag:ssh\"]',0,'2025-02-05 16:48:00.460606473+00:00','0001-01-01 00:00:00+00:00','{}','[]','2023-03-30 23:36:04.930844845+00:00','2025-02-05 16:48:00.460679869+00:00',NULL,'100.64.0.3','fd7a:115c:a1e0::3');\nINSERT INTO nodes VALUES(4,'mkey:365e2055485de89e65e63c13e426b1ec5d5606327d63955b38be1d3f8cbbac6c','nodekey:996b9814e405f572fc0338f91b0c53f3a3a9a5b1ae0d2846d179195778d50909','discokey:ed72cb545b46b3e2ed0332f9cb4d7f4e774ea5834e2cbadc43c9bf7918ef2503','hostname_4','given_name4',1,'cli','[\"tag:ssh\"]',0,'2025-02-05 16:48:00.460607206+00:00','0001-01-01 00:00:00+00:00','{}','[]','2023-03-31 15:51:56.149734121+00:00','2025-02-05 16:48:00.46092239+00:00',NULL,'100.64.0.4','fd7a:115c:a1e0::4');\nINSERT INTO nodes VALUES(5,'mkey:1d04be488182a66cd7df4596ac59a40613eac6465a331af9ac6c91bb70754a25','nodekey:9b617f3e7941ac70b76f0e40c55543173e0432d4a9bb8bcb8b25d93b60a5da0e','discokey:15834557115cb889e8362e7f2cae1cfd7e78e754cb7310cff6b5c5b5d3027e35','hostname_5','given_name5',1,'cli','[\"tag:sshclient\",\"tag:ssh\"]',0,'2023-04-21 15:07:38.796218079+00:00','0001-01-01 00:00:00+00:00','{}','[]','2023-04-21 13:16:19.148836255+00:00','2024-04-17 15:39:21.339518261+00:00',NULL,'100.64.0.5','fd7a:115c:a1e0::5');\nINSERT INTO nodes VALUES(6,'mkey:ed649503734e31eafad7f884ac8ee36ba0922c57cda8b6946cb439b1ed645676','nodekey:200484e66b43012eca81ec8850e4b5d1dd8fa538dfebdaac718f202cd2f1f955','discokey:600651ed2436ce5a49e71b3980f93070d888e6d65d608a64be29fdeed9f7bd6b','hostname_6','given_name6',1,'cli','[\"tag:ssh\"]',0,'2023-07-09 16:56:18.876491583+00:00','0001-01-01 00:00:00+00:00','{}','[]','2023-05-07 10:30:54.520661376+00:00','2024-04-17 15:39:23.182648721+00:00',NULL,'100.64.0.6','fd7a:115c:a1e0::6');\nCREATE TABLE IF NOT EXISTS \"routes\"  (`id` integer,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`node_id` integer NOT NULL,`prefix` text,`advertised` numeric,`enabled` numeric,`is_primary` numeric,PRIMARY KEY (`id`),CONSTRAINT `fk_nodes_routes` FOREIGN KEY (`node_id`) REFERENCES `nodes`(`id`) ON DELETE CASCADE);\nCREATE TABLE IF NOT EXISTS \"users\"  (`id` integer,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`name` text UNIQUE,`display_name` text,`email` text,`provider_identifier` text,`provider` text,`profile_pic_url` text,PRIMARY KEY (`id`));\nINSERT INTO users VALUES(1,'2023-03-30 23:08:54.151102578+00:00','2023-03-30 23:08:54.151102578+00:00',NULL,'username_1','display_name_1','email_1@example.com',NULL,NULL,NULL);\nDELETE FROM sqlite_sequence;\nCREATE UNIQUE INDEX `idx_api_keys_prefix` ON `api_keys`(`prefix`);\nCREATE INDEX `idx_policies_deleted_at` ON `policies`(`deleted_at`);\nCREATE INDEX `idx_routes_deleted_at` ON `routes`(`deleted_at`);\nCREATE INDEX `idx_users_deleted_at` ON `users`(`deleted_at`);\nCOMMIT;\n"
  },
  {
    "path": "hscontrol/db/testdata/sqlite/headscale_0.26.0-beta.1_dump.sql",
    "content": "PRAGMA foreign_keys=OFF;\nBEGIN TRANSACTION;\nCREATE TABLE `migrations` (`id` text,PRIMARY KEY (`id`));\nINSERT INTO migrations VALUES('202312101416');\nINSERT INTO migrations VALUES('202312101430');\nINSERT INTO migrations VALUES('202402151347');\nINSERT INTO migrations VALUES('2024041121742');\nINSERT INTO migrations VALUES('202406021630');\nINSERT INTO migrations VALUES('202409271400');\nINSERT INTO migrations VALUES('202407191627');\nINSERT INTO migrations VALUES('202408181235');\nINSERT INTO migrations VALUES('202501221827');\nINSERT INTO migrations VALUES('202501311657');\nINSERT INTO migrations VALUES('202502070949');\nINSERT INTO migrations VALUES('202502131714');\nINSERT INTO migrations VALUES('202502171819');\nCREATE TABLE `users` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`name` text,`display_name` text,`email` text,`provider_identifier` text,`provider` text,`profile_pic_url` text);\nCREATE TABLE `pre_auth_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`key` text,`user_id` integer,`reusable` numeric,`ephemeral` numeric DEFAULT false,`used` numeric DEFAULT false,`tags` text,`created_at` datetime,`expiration` datetime,CONSTRAINT `fk_pre_auth_keys_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE SET NULL);\nCREATE TABLE `api_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`prefix` text,`hash` blob,`created_at` datetime,`expiration` datetime,`last_seen` datetime);\nCREATE TABLE `policies` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`data` text);\nCREATE TABLE IF NOT EXISTS \"nodes\"  (`id` integer PRIMARY KEY AUTOINCREMENT,`machine_key` text,`node_key` text,`disco_key` text,`endpoints` text,`host_info` text,`ipv4` text,`ipv6` text,`hostname` text,`given_name` varchar(63),`user_id` integer,`register_method` text,`forced_tags` text,`auth_key_id` integer,`expiry` datetime,`approved_routes` text,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,CONSTRAINT `fk_nodes_auth_key` FOREIGN KEY (`auth_key_id`) REFERENCES `pre_auth_keys`(`id`),CONSTRAINT `fk_nodes_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE CASCADE);\nDELETE FROM sqlite_sequence;\nINSERT INTO sqlite_sequence VALUES('nodes',0);\nCREATE INDEX `idx_users_deleted_at` ON `users`(`deleted_at`);\nCREATE UNIQUE INDEX `idx_api_keys_prefix` ON `api_keys`(`prefix`);\nCREATE INDEX `idx_policies_deleted_at` ON `policies`(`deleted_at`);\nCREATE UNIQUE INDEX idx_provider_identifier ON users (provider_identifier) WHERE provider_identifier IS NOT NULL;\nCREATE UNIQUE INDEX idx_name_provider_identifier ON users (name,provider_identifier);\nCREATE UNIQUE INDEX idx_name_no_provider_identifier ON users (name) WHERE provider_identifier IS NULL;\nCOMMIT;\n"
  },
  {
    "path": "hscontrol/db/testdata/sqlite/headscale_0.26.0-beta.2_dump.sql",
    "content": "PRAGMA foreign_keys=OFF;\nBEGIN TRANSACTION;\nCREATE TABLE `migrations` (`id` text,PRIMARY KEY (`id`));\nINSERT INTO migrations VALUES('202312101416');\nINSERT INTO migrations VALUES('202312101430');\nINSERT INTO migrations VALUES('202402151347');\nINSERT INTO migrations VALUES('2024041121742');\nINSERT INTO migrations VALUES('202406021630');\nINSERT INTO migrations VALUES('202409271400');\nINSERT INTO migrations VALUES('202407191627');\nINSERT INTO migrations VALUES('202408181235');\nINSERT INTO migrations VALUES('202501221827');\nINSERT INTO migrations VALUES('202501311657');\nINSERT INTO migrations VALUES('202502070949');\nINSERT INTO migrations VALUES('202502131714');\nINSERT INTO migrations VALUES('202502171819');\nINSERT INTO migrations VALUES('202505091439');\nCREATE TABLE `users` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`name` text,`display_name` text,`email` text,`provider_identifier` text,`provider` text,`profile_pic_url` text);\nCREATE TABLE `pre_auth_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`key` text,`user_id` integer,`reusable` numeric,`ephemeral` numeric DEFAULT false,`used` numeric DEFAULT false,`tags` text,`created_at` datetime,`expiration` datetime,CONSTRAINT `fk_pre_auth_keys_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE SET NULL);\nCREATE TABLE `api_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`prefix` text,`hash` blob,`created_at` datetime,`expiration` datetime,`last_seen` datetime);\nCREATE TABLE IF NOT EXISTS \"nodes\"  (`id` integer PRIMARY KEY AUTOINCREMENT,`machine_key` text,`node_key` text,`disco_key` text,`endpoints` text,`host_info` text,`ipv4` text,`ipv6` text,`hostname` text,`given_name` varchar(63),`user_id` integer,`register_method` text,`forced_tags` text,`auth_key_id` integer,`expiry` datetime,`last_seen` datetime,`approved_routes` text,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,CONSTRAINT `fk_nodes_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_nodes_auth_key` FOREIGN KEY (`auth_key_id`) REFERENCES `pre_auth_keys`(`id`));\nCREATE TABLE `policies` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`data` text);\nDELETE FROM sqlite_sequence;\nINSERT INTO sqlite_sequence VALUES('nodes',0);\nCREATE INDEX `idx_users_deleted_at` ON `users`(`deleted_at`);\nCREATE UNIQUE INDEX `idx_api_keys_prefix` ON `api_keys`(`prefix`);\nCREATE INDEX `idx_policies_deleted_at` ON `policies`(`deleted_at`);\nCREATE UNIQUE INDEX idx_provider_identifier ON users (provider_identifier) WHERE provider_identifier IS NOT NULL;\nCREATE UNIQUE INDEX idx_name_provider_identifier ON users (name,provider_identifier);\nCREATE UNIQUE INDEX idx_name_no_provider_identifier ON users (name) WHERE provider_identifier IS NULL;\nCOMMIT;\n"
  },
  {
    "path": "hscontrol/db/testdata/sqlite/headscale_0.26.0_dump.sql",
    "content": "PRAGMA foreign_keys=OFF;\nBEGIN TRANSACTION;\nCREATE TABLE `migrations` (`id` text,PRIMARY KEY (`id`));\nINSERT INTO migrations VALUES('202312101416');\nINSERT INTO migrations VALUES('202312101430');\nINSERT INTO migrations VALUES('202402151347');\nINSERT INTO migrations VALUES('2024041121742');\nINSERT INTO migrations VALUES('202406021630');\nINSERT INTO migrations VALUES('202409271400');\nINSERT INTO migrations VALUES('202407191627');\nINSERT INTO migrations VALUES('202408181235');\nINSERT INTO migrations VALUES('202501221827');\nINSERT INTO migrations VALUES('202501311657');\nINSERT INTO migrations VALUES('202502070949');\nINSERT INTO migrations VALUES('202502131714');\nINSERT INTO migrations VALUES('202502171819');\nINSERT INTO migrations VALUES('202505091439');\nINSERT INTO migrations VALUES('202505141324');\nCREATE TABLE `users` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`name` text,`display_name` text,`email` text,`provider_identifier` text,`provider` text,`profile_pic_url` text);\nCREATE TABLE `pre_auth_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`key` text,`user_id` integer,`reusable` numeric,`ephemeral` numeric DEFAULT false,`used` numeric DEFAULT false,`tags` text,`created_at` datetime,`expiration` datetime,CONSTRAINT `fk_pre_auth_keys_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE SET NULL);\nCREATE TABLE `api_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`prefix` text,`hash` blob,`created_at` datetime,`expiration` datetime,`last_seen` datetime);\nCREATE TABLE IF NOT EXISTS \"nodes\"  (`id` integer PRIMARY KEY AUTOINCREMENT,`machine_key` text,`node_key` text,`disco_key` text,`endpoints` text,`host_info` text,`ipv4` text,`ipv6` text,`hostname` text,`given_name` varchar(63),`user_id` integer,`register_method` text,`forced_tags` text,`auth_key_id` integer,`expiry` datetime,`last_seen` datetime,`approved_routes` text,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,CONSTRAINT `fk_nodes_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_nodes_auth_key` FOREIGN KEY (`auth_key_id`) REFERENCES `pre_auth_keys`(`id`));\nCREATE TABLE `policies` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`data` text);\nDELETE FROM sqlite_sequence;\nINSERT INTO sqlite_sequence VALUES('nodes',0);\nCREATE INDEX `idx_users_deleted_at` ON `users`(`deleted_at`);\nCREATE UNIQUE INDEX `idx_api_keys_prefix` ON `api_keys`(`prefix`);\nCREATE INDEX `idx_policies_deleted_at` ON `policies`(`deleted_at`);\nCREATE UNIQUE INDEX idx_provider_identifier ON users (provider_identifier) WHERE provider_identifier IS NOT NULL;\nCREATE UNIQUE INDEX idx_name_provider_identifier ON users (name,provider_identifier);\nCREATE UNIQUE INDEX idx_name_no_provider_identifier ON users (name) WHERE provider_identifier IS NULL;\nCOMMIT;\n"
  },
  {
    "path": "hscontrol/db/testdata/sqlite/headscale_0.26.1_dump-litestream.sql",
    "content": "PRAGMA foreign_keys=OFF;\nBEGIN TRANSACTION;\nCREATE TABLE `migrations` (`id` text,PRIMARY KEY (`id`));\nINSERT INTO migrations VALUES('202312101416');\nINSERT INTO migrations VALUES('202312101430');\nINSERT INTO migrations VALUES('202402151347');\nINSERT INTO migrations VALUES('2024041121742');\nINSERT INTO migrations VALUES('202406021630');\nINSERT INTO migrations VALUES('202409271400');\nINSERT INTO migrations VALUES('202407191627');\nINSERT INTO migrations VALUES('202408181235');\nINSERT INTO migrations VALUES('202501221827');\nINSERT INTO migrations VALUES('202501311657');\nINSERT INTO migrations VALUES('202502070949');\nINSERT INTO migrations VALUES('202502131714');\nINSERT INTO migrations VALUES('202502171819');\nINSERT INTO migrations VALUES('202505091439');\nINSERT INTO migrations VALUES('202505141324');\nCREATE TABLE `users` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`name` text,`display_name` text,`email` text,`provider_identifier` text,`provider` text,`profile_pic_url` text);\nCREATE TABLE `pre_auth_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`key` text,`user_id` integer,`reusable` numeric,`ephemeral` numeric DEFAULT false,`used` numeric DEFAULT false,`tags` text,`created_at` datetime,`expiration` datetime,CONSTRAINT `fk_pre_auth_keys_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE SET NULL);\nCREATE TABLE `api_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`prefix` text,`hash` blob,`created_at` datetime,`expiration` datetime,`last_seen` datetime);\nCREATE TABLE IF NOT EXISTS \"nodes\"  (`id` integer PRIMARY KEY AUTOINCREMENT,`machine_key` text,`node_key` text,`disco_key` text,`endpoints` text,`host_info` text,`ipv4` text,`ipv6` text,`hostname` text,`given_name` varchar(63),`user_id` integer,`register_method` text,`forced_tags` text,`auth_key_id` integer,`expiry` datetime,`last_seen` datetime,`approved_routes` text,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,CONSTRAINT `fk_nodes_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_nodes_auth_key` FOREIGN KEY (`auth_key_id`) REFERENCES `pre_auth_keys`(`id`));\nCREATE TABLE `policies` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`data` text);\nDELETE FROM sqlite_sequence;\nINSERT INTO sqlite_sequence VALUES('nodes',0);\nCREATE INDEX `idx_users_deleted_at` ON `users`(`deleted_at`);\nCREATE UNIQUE INDEX `idx_api_keys_prefix` ON `api_keys`(`prefix`);\nCREATE INDEX `idx_policies_deleted_at` ON `policies`(`deleted_at`);\nCREATE UNIQUE INDEX idx_provider_identifier ON users (provider_identifier) WHERE provider_identifier IS NOT NULL;\nCREATE UNIQUE INDEX idx_name_provider_identifier ON users (name,provider_identifier);\nCREATE UNIQUE INDEX idx_name_no_provider_identifier ON users (name) WHERE provider_identifier IS NULL;\nCREATE TABLE _litestream_seq (id INTEGER PRIMARY KEY, seq INTEGER);\nCREATE TABLE _litestream_lock (id INTEGER);\nCOMMIT;\n"
  },
  {
    "path": "hscontrol/db/testdata/sqlite/headscale_0.26.1_dump.sql",
    "content": "PRAGMA foreign_keys=OFF;\nBEGIN TRANSACTION;\nCREATE TABLE `migrations` (`id` text,PRIMARY KEY (`id`));\nINSERT INTO migrations VALUES('202312101416');\nINSERT INTO migrations VALUES('202312101430');\nINSERT INTO migrations VALUES('202402151347');\nINSERT INTO migrations VALUES('2024041121742');\nINSERT INTO migrations VALUES('202406021630');\nINSERT INTO migrations VALUES('202409271400');\nINSERT INTO migrations VALUES('202407191627');\nINSERT INTO migrations VALUES('202408181235');\nINSERT INTO migrations VALUES('202501221827');\nINSERT INTO migrations VALUES('202501311657');\nINSERT INTO migrations VALUES('202502070949');\nINSERT INTO migrations VALUES('202502131714');\nINSERT INTO migrations VALUES('202502171819');\nINSERT INTO migrations VALUES('202505091439');\nINSERT INTO migrations VALUES('202505141324');\nCREATE TABLE `users` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`name` text,`display_name` text,`email` text,`provider_identifier` text,`provider` text,`profile_pic_url` text);\nCREATE TABLE `pre_auth_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`key` text,`user_id` integer,`reusable` numeric,`ephemeral` numeric DEFAULT false,`used` numeric DEFAULT false,`tags` text,`created_at` datetime,`expiration` datetime,CONSTRAINT `fk_pre_auth_keys_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE SET NULL);\nCREATE TABLE `api_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`prefix` text,`hash` blob,`created_at` datetime,`expiration` datetime,`last_seen` datetime);\nCREATE TABLE IF NOT EXISTS \"nodes\"  (`id` integer PRIMARY KEY AUTOINCREMENT,`machine_key` text,`node_key` text,`disco_key` text,`endpoints` text,`host_info` text,`ipv4` text,`ipv6` text,`hostname` text,`given_name` varchar(63),`user_id` integer,`register_method` text,`forced_tags` text,`auth_key_id` integer,`expiry` datetime,`last_seen` datetime,`approved_routes` text,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,CONSTRAINT `fk_nodes_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_nodes_auth_key` FOREIGN KEY (`auth_key_id`) REFERENCES `pre_auth_keys`(`id`));\nCREATE TABLE `policies` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`data` text);\nDELETE FROM sqlite_sequence;\nINSERT INTO sqlite_sequence VALUES('nodes',0);\nCREATE INDEX `idx_users_deleted_at` ON `users`(`deleted_at`);\nCREATE UNIQUE INDEX `idx_api_keys_prefix` ON `api_keys`(`prefix`);\nCREATE INDEX `idx_policies_deleted_at` ON `policies`(`deleted_at`);\nCREATE UNIQUE INDEX idx_provider_identifier ON users (provider_identifier) WHERE provider_identifier IS NOT NULL;\nCREATE UNIQUE INDEX idx_name_provider_identifier ON users (name,provider_identifier);\nCREATE UNIQUE INDEX idx_name_no_provider_identifier ON users (name) WHERE provider_identifier IS NULL;\nCOMMIT;\n"
  },
  {
    "path": "hscontrol/db/testdata/sqlite/headscale_0.26.1_dump_schema-to-0.27.0-old-table-cleanup.sql",
    "content": "PRAGMA foreign_keys=OFF;\nBEGIN TRANSACTION;\nCREATE TABLE `migrations` (`id` text,PRIMARY KEY (`id`));\nINSERT INTO migrations VALUES('202312101416');\nINSERT INTO migrations VALUES('202312101430');\nINSERT INTO migrations VALUES('202402151347');\nINSERT INTO migrations VALUES('2024041121742');\nINSERT INTO migrations VALUES('202406021630');\nINSERT INTO migrations VALUES('202409271400');\nINSERT INTO migrations VALUES('202407191627');\nINSERT INTO migrations VALUES('202408181235');\nINSERT INTO migrations VALUES('202501221827');\nINSERT INTO migrations VALUES('202501311657');\nINSERT INTO migrations VALUES('202502070949');\nINSERT INTO migrations VALUES('202502131714');\nINSERT INTO migrations VALUES('202502171819');\nINSERT INTO migrations VALUES('202505091439');\nINSERT INTO migrations VALUES('202505141324');\nCREATE TABLE `users` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`name` text,`display_name` text,`email` text,`provider_identifier` text,`provider` text,`profile_pic_url` text);\nCREATE TABLE `pre_auth_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`key` text,`user_id` integer,`reusable` numeric,`ephemeral` numeric DEFAULT false,`used` numeric DEFAULT false,`tags` text,`created_at` datetime,`expiration` datetime,CONSTRAINT `fk_pre_auth_keys_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE SET NULL);\nCREATE TABLE `api_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`prefix` text,`hash` blob,`created_at` datetime,`expiration` datetime,`last_seen` datetime);\nCREATE TABLE IF NOT EXISTS \"nodes\"  (`id` integer PRIMARY KEY AUTOINCREMENT,`machine_key` text,`node_key` text,`disco_key` text,`endpoints` text,`host_info` text,`ipv4` text,`ipv6` text,`hostname` text,`given_name` varchar(63),`user_id` integer,`register_method` text,`forced_tags` text,`auth_key_id` integer,`expiry` datetime,`last_seen` datetime,`approved_routes` text,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,CONSTRAINT `fk_nodes_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_nodes_auth_key` FOREIGN KEY (`auth_key_id`) REFERENCES `pre_auth_keys`(`id`));\nCREATE TABLE `policies` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`data` text);\nDELETE FROM sqlite_sequence;\nINSERT INTO sqlite_sequence VALUES('nodes',0);\nCREATE INDEX `idx_users_deleted_at` ON `users`(`deleted_at`);\nCREATE UNIQUE INDEX `idx_api_keys_prefix` ON `api_keys`(`prefix`);\nCREATE INDEX `idx_policies_deleted_at` ON `policies`(`deleted_at`);\nCREATE UNIQUE INDEX idx_provider_identifier ON users (provider_identifier) WHERE provider_identifier IS NOT NULL;\nCREATE UNIQUE INDEX idx_name_provider_identifier ON users (name,provider_identifier);\nCREATE UNIQUE INDEX idx_name_no_provider_identifier ON users (name) WHERE provider_identifier IS NULL;\n\n-- Create all the old tables we have had and ensure they are clean up.\nCREATE TABLE `namespaces` (`id` text,`deleted_at` datetime,PRIMARY KEY (`id`));\nCREATE TABLE `machines` (`id` text,PRIMARY KEY (`id`));\nCREATE TABLE `kvs` (`id` text,PRIMARY KEY (`id`));\nCREATE TABLE `shared_machines` (`id` text,`deleted_at` datetime,PRIMARY KEY (`id`));\nCREATE TABLE `pre_auth_key_acl_tags` (`id` text,PRIMARY KEY (`id`));\nCREATE TABLE `routes` (`id` text,`deleted_at` datetime,PRIMARY KEY (`id`));\n\nCREATE INDEX `idx_routes_deleted_at` ON `routes`(`deleted_at`);\nCREATE INDEX `idx_namespaces_deleted_at` ON `namespaces`(`deleted_at`);\nCREATE INDEX `idx_shared_machines_deleted_at` ON `shared_machines`(`deleted_at`);\n\nCOMMIT;\n"
  },
  {
    "path": "hscontrol/db/testdata/sqlite/request_tags_migration_test.sql",
    "content": "-- Test SQL dump for RequestTags migration (202601121700-migrate-hostinfo-request-tags)\n-- and forced_tags->tags rename migration (202511131445-node-forced-tags-to-tags)\n--\n-- This dump simulates a 0.27.x database where:\n-- - Tags from --advertise-tags were stored only in host_info.RequestTags\n-- - The tags column is still named forced_tags\n--\n-- Test scenarios:\n-- 1. Node with RequestTags that user is authorized for (should be migrated)\n-- 2. Node with RequestTags that user is NOT authorized for (should be rejected)\n-- 3. Node with existing forced_tags that should be preserved\n-- 4. Node with RequestTags that overlap with existing tags (no duplicates)\n-- 5. Node without RequestTags (should be unchanged)\n-- 6. Node with RequestTags via group membership (should be migrated)\n\nPRAGMA foreign_keys=OFF;\nBEGIN TRANSACTION;\n\n-- Migrations table - includes all migrations BEFORE the two tag migrations\nCREATE TABLE `migrations` (`id` text,PRIMARY KEY (`id`));\nINSERT INTO migrations VALUES('202312101416');\nINSERT INTO migrations VALUES('202312101430');\nINSERT INTO migrations VALUES('202402151347');\nINSERT INTO migrations VALUES('2024041121742');\nINSERT INTO migrations VALUES('202406021630');\nINSERT INTO migrations VALUES('202409271400');\nINSERT INTO migrations VALUES('202407191627');\nINSERT INTO migrations VALUES('202408181235');\nINSERT INTO migrations VALUES('202501221827');\nINSERT INTO migrations VALUES('202501311657');\nINSERT INTO migrations VALUES('202502070949');\nINSERT INTO migrations VALUES('202502131714');\nINSERT INTO migrations VALUES('202502171819');\nINSERT INTO migrations VALUES('202505091439');\nINSERT INTO migrations VALUES('202505141324');\nINSERT INTO migrations VALUES('202507021200');\nINSERT INTO migrations VALUES('202510311551');\nINSERT INTO migrations VALUES('202511101554-drop-old-idx');\nINSERT INTO migrations VALUES('202511011637-preauthkey-bcrypt');\nINSERT INTO migrations VALUES('202511122344-remove-newline-index');\n-- Note: 202511131445-node-forced-tags-to-tags is NOT included - it will run\n-- Note: 202601121700-migrate-hostinfo-request-tags is NOT included - it will run\n\n-- Users table\n-- Note: User names must match the usernames in the policy (with @)\nCREATE TABLE `users` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`name` text,`display_name` text,`email` text,`provider_identifier` text,`provider` text,`profile_pic_url` text);\nINSERT INTO users VALUES(1,'2024-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00',NULL,'user1@example.com','User One','user1@example.com',NULL,NULL,NULL);\nINSERT INTO users VALUES(2,'2024-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00',NULL,'user2@example.com','User Two','user2@example.com',NULL,NULL,NULL);\nINSERT INTO users VALUES(3,'2024-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00',NULL,'admin1@example.com','Admin One','admin1@example.com',NULL,NULL,NULL);\n\n-- Pre-auth keys table\nCREATE TABLE `pre_auth_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`key` text,`user_id` integer,`reusable` numeric,`ephemeral` numeric DEFAULT false,`used` numeric DEFAULT false,`tags` text,`created_at` datetime,`expiration` datetime,`prefix` text,`hash` blob,CONSTRAINT `fk_pre_auth_keys_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE SET NULL);\n\n-- API keys table\nCREATE TABLE `api_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`prefix` text,`hash` blob,`created_at` datetime,`expiration` datetime,`last_seen` datetime);\n\n-- Nodes table - using OLD schema with forced_tags (not tags)\nCREATE TABLE IF NOT EXISTS \"nodes\" (`id` integer PRIMARY KEY AUTOINCREMENT,`machine_key` text,`node_key` text,`disco_key` text,`endpoints` text,`host_info` text,`ipv4` text,`ipv6` text,`hostname` text,`given_name` varchar(63),`user_id` integer,`register_method` text,`forced_tags` text,`auth_key_id` integer,`expiry` datetime,`last_seen` datetime,`approved_routes` text,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,CONSTRAINT `fk_nodes_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_nodes_auth_key` FOREIGN KEY (`auth_key_id`) REFERENCES `pre_auth_keys`(`id`));\n\n-- Node 1: user1 owns it, has RequestTags for tag:server (user1 is authorized for this tag)\n-- Expected: tag:server should be added to tags\nINSERT INTO nodes VALUES(1,'mkey:a0ab77456320823945ae0331823e3c0d516fae9585bd42698dfa1ac3d7679e01','nodekey:7c84167ab68f494942de14deb83587fd841843de2bac105b6c670048c1605501','discokey:53075b3c6cad3b62a2a29caea61beeb93f66b8c75cb89dac465236a5bbf57701','[]','{\"RequestTags\":[\"tag:server\"]}','100.64.0.1','fd7a:115c:a1e0::1','node1','node1',1,'oidc','[]',NULL,'0001-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00','[]','2024-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00',NULL);\n\n-- Node 2: user1 owns it, has RequestTags for tag:unauthorized (user1 is NOT authorized for this tag)\n-- Expected: tag:unauthorized should be rejected, tags stays empty\nINSERT INTO nodes VALUES(2,'mkey:a0ab77456320823945ae0331823e3c0d516fae9585bd42698dfa1ac3d7679e02','nodekey:7c84167ab68f494942de14deb83587fd841843de2bac105b6c670048c1605502','discokey:53075b3c6cad3b62a2a29caea61beeb93f66b8c75cb89dac465236a5bbf57702','[]','{\"RequestTags\":[\"tag:unauthorized\"]}','100.64.0.2','fd7a:115c:a1e0::2','node2','node2',1,'oidc','[]',NULL,'0001-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00','[]','2024-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00',NULL);\n\n-- Node 3: user2 owns it, has RequestTags for tag:client (user2 is authorized)\n-- Also has existing forced_tags that should be preserved\n-- Expected: tag:client added, tag:existing preserved\nINSERT INTO nodes VALUES(3,'mkey:a0ab77456320823945ae0331823e3c0d516fae9585bd42698dfa1ac3d7679e03','nodekey:7c84167ab68f494942de14deb83587fd841843de2bac105b6c670048c1605503','discokey:53075b3c6cad3b62a2a29caea61beeb93f66b8c75cb89dac465236a5bbf57703','[]','{\"RequestTags\":[\"tag:client\"]}','100.64.0.3','fd7a:115c:a1e0::3','node3','node3',2,'oidc','[\"tag:existing\"]',NULL,'0001-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00','[]','2024-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00',NULL);\n\n-- Node 4: user1 owns it, has RequestTags for tag:server which already exists in forced_tags\n-- Expected: no duplicates, tags should be [\"tag:server\"]\nINSERT INTO nodes VALUES(4,'mkey:a0ab77456320823945ae0331823e3c0d516fae9585bd42698dfa1ac3d7679e04','nodekey:7c84167ab68f494942de14deb83587fd841843de2bac105b6c670048c1605504','discokey:53075b3c6cad3b62a2a29caea61beeb93f66b8c75cb89dac465236a5bbf57704','[]','{\"RequestTags\":[\"tag:server\"]}','100.64.0.4','fd7a:115c:a1e0::4','node4','node4',1,'oidc','[\"tag:server\"]',NULL,'0001-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00','[]','2024-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00',NULL);\n\n-- Node 5: user2 owns it, no RequestTags in host_info\n-- Expected: tags unchanged (empty)\nINSERT INTO nodes VALUES(5,'mkey:a0ab77456320823945ae0331823e3c0d516fae9585bd42698dfa1ac3d7679e05','nodekey:7c84167ab68f494942de14deb83587fd841843de2bac105b6c670048c1605505','discokey:53075b3c6cad3b62a2a29caea61beeb93f66b8c75cb89dac465236a5bbf57705','[]','{}','100.64.0.5','fd7a:115c:a1e0::5','node5','node5',2,'oidc','[]',NULL,'0001-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00','[]','2024-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00',NULL);\n\n-- Node 6: admin1 owns it, has RequestTags for tag:admin (admin1 is in group:admins which owns tag:admin)\n-- Expected: tag:admin should be added via group membership\nINSERT INTO nodes VALUES(6,'mkey:a0ab77456320823945ae0331823e3c0d516fae9585bd42698dfa1ac3d7679e06','nodekey:7c84167ab68f494942de14deb83587fd841843de2bac105b6c670048c1605506','discokey:53075b3c6cad3b62a2a29caea61beeb93f66b8c75cb89dac465236a5bbf57706','[]','{\"RequestTags\":[\"tag:admin\"]}','100.64.0.6','fd7a:115c:a1e0::6','node6','node6',3,'oidc','[]',NULL,'0001-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00','[]','2024-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00',NULL);\n\n-- Node 7: user1 owns it, has multiple RequestTags (tag:server authorized, tag:forbidden not authorized)\n-- Expected: tag:server added, tag:forbidden rejected\nINSERT INTO nodes VALUES(7,'mkey:a0ab77456320823945ae0331823e3c0d516fae9585bd42698dfa1ac3d7679e07','nodekey:7c84167ab68f494942de14deb83587fd841843de2bac105b6c670048c1605507','discokey:53075b3c6cad3b62a2a29caea61beeb93f66b8c75cb89dac465236a5bbf57707','[]','{\"RequestTags\":[\"tag:server\",\"tag:forbidden\"]}','100.64.0.7','fd7a:115c:a1e0::7','node7','node7',1,'oidc','[]',NULL,'0001-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00','[]','2024-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00',NULL);\n\n-- Policies table with tagOwners defining who can use which tags\n-- Note: Usernames in policy must contain @ (e.g., user1@example.com or just user1@)\nCREATE TABLE `policies` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`data` text);\nINSERT INTO policies VALUES(1,'2024-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00',NULL,'{\n  \"groups\": {\n    \"group:admins\": [\"admin1@example.com\"]\n  },\n  \"tagOwners\": {\n    \"tag:server\": [\"user1@example.com\"],\n    \"tag:client\": [\"user1@example.com\", \"user2@example.com\"],\n    \"tag:admin\": [\"group:admins\"]\n  },\n  \"acls\": [\n    {\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]}\n  ]\n}');\n\n-- Indexes (using exact format expected by schema validation)\nDELETE FROM sqlite_sequence;\nINSERT INTO sqlite_sequence VALUES('users',3);\nINSERT INTO sqlite_sequence VALUES('nodes',7);\nINSERT INTO sqlite_sequence VALUES('policies',1);\nCREATE INDEX idx_users_deleted_at ON users(deleted_at);\nCREATE UNIQUE INDEX idx_api_keys_prefix ON api_keys(prefix);\nCREATE INDEX idx_policies_deleted_at ON policies(deleted_at);\nCREATE UNIQUE INDEX idx_provider_identifier ON users(provider_identifier) WHERE provider_identifier IS NOT NULL;\nCREATE UNIQUE INDEX idx_name_provider_identifier ON users(name, provider_identifier);\nCREATE UNIQUE INDEX idx_name_no_provider_identifier ON users(name) WHERE provider_identifier IS NULL;\nCREATE UNIQUE INDEX IF NOT EXISTS idx_pre_auth_keys_prefix ON pre_auth_keys(prefix) WHERE prefix IS NOT NULL AND prefix != '';\n\nCOMMIT;\n"
  },
  {
    "path": "hscontrol/db/text_serialiser.go",
    "content": "package db\n\nimport (\n\t\"context\"\n\t\"encoding\"\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"gorm.io/gorm/schema\"\n)\n\nvar (\n\terrUnmarshalTextValue = errors.New(\"unmarshalling text value\")\n\terrUnsupportedType    = errors.New(\"unsupported type\")\n\terrTextMarshalerOnly  = errors.New(\"only encoding.TextMarshaler is supported\")\n)\n\n// Got from https://github.com/xdg-go/strum/blob/main/types.go\nvar textUnmarshalerType = reflect.TypeFor[encoding.TextUnmarshaler]()\n\nfunc isTextUnmarshaler(rv reflect.Value) bool {\n\treturn rv.Type().Implements(textUnmarshalerType)\n}\n\nfunc maybeInstantiatePtr(rv reflect.Value) {\n\tif rv.Kind() == reflect.Ptr && rv.IsNil() {\n\t\tnp := reflect.New(rv.Type().Elem())\n\t\trv.Set(np)\n\t}\n}\n\nfunc decodingError(name string, err error) error {\n\treturn fmt.Errorf(\"decoding to %s: %w\", name, err)\n}\n\n// TextSerialiser implements the Serialiser interface for fields that\n// have a type that implements encoding.TextUnmarshaler.\ntype TextSerialiser struct{}\n\nfunc (TextSerialiser) Scan(ctx context.Context, field *schema.Field, dst reflect.Value, dbValue any) error {\n\tfieldValue := reflect.New(field.FieldType)\n\n\t// If the field is a pointer, we need to dereference it to get the actual type\n\t// so we do not end with a second pointer.\n\tif fieldValue.Elem().Kind() == reflect.Ptr {\n\t\tfieldValue = fieldValue.Elem()\n\t}\n\n\tif dbValue != nil {\n\t\tvar bytes []byte\n\n\t\tswitch v := dbValue.(type) {\n\t\tcase []byte:\n\t\t\tbytes = v\n\t\tcase string:\n\t\t\tbytes = []byte(v)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"%w: %#v\", errUnmarshalTextValue, dbValue)\n\t\t}\n\n\t\tif isTextUnmarshaler(fieldValue) {\n\t\t\tmaybeInstantiatePtr(fieldValue)\n\t\t\tf := fieldValue.MethodByName(\"UnmarshalText\")\n\t\t\targs := []reflect.Value{reflect.ValueOf(bytes)}\n\n\t\t\tret := f.Call(args)\n\t\t\tif !ret[0].IsNil() {\n\t\t\t\tif err, ok := ret[0].Interface().(error); ok {\n\t\t\t\t\treturn decodingError(field.Name, err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// If the underlying field is to a pointer type, we need to\n\t\t\t// assign the value as a pointer to it.\n\t\t\t// If it is not a pointer, we need to assign the value to the\n\t\t\t// field.\n\t\t\tdstField := field.ReflectValueOf(ctx, dst)\n\t\t\tif dstField.Kind() == reflect.Ptr {\n\t\t\t\tdstField.Set(fieldValue)\n\t\t\t} else {\n\t\t\t\tdstField.Set(fieldValue.Elem())\n\t\t\t}\n\n\t\t\treturn nil\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"%w: %T\", errUnsupportedType, fieldValue.Interface())\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (TextSerialiser) Value(ctx context.Context, field *schema.Field, dst reflect.Value, fieldValue any) (any, error) {\n\tswitch v := fieldValue.(type) {\n\tcase encoding.TextMarshaler:\n\t\t// If the value is nil, we return nil, however, go nil values are not\n\t\t// always comparable, particularly when reflection is involved:\n\t\t// https://dev.to/arxeiss/in-go-nil-is-not-equal-to-nil-sometimes-jn8\n\t\tif v == nil || (reflect.ValueOf(v).Kind() == reflect.Ptr && reflect.ValueOf(v).IsNil()) {\n\t\t\treturn nil, nil //nolint:nilnil // intentional: nil value for GORM serializer\n\t\t}\n\n\t\tb, err := v.MarshalText()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn string(b), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"%w, got %T\", errTextMarshalerOnly, v)\n\t}\n}\n"
  },
  {
    "path": "hscontrol/db/user_update_test.go",
    "content": "package db\n\nimport (\n\t\"database/sql\"\n\t\"testing\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gorm.io/gorm\"\n)\n\n// TestUserUpdatePreservesUnchangedFields verifies that updating a user\n// preserves fields that aren't modified. This test validates the fix\n// for using Updates() instead of Save() in UpdateUser-like operations.\nfunc TestUserUpdatePreservesUnchangedFields(t *testing.T) {\n\tdatabase := dbForTest(t)\n\n\t// Create a user with all fields set\n\tinitialUser := types.User{\n\t\tName:        \"testuser\",\n\t\tDisplayName: \"Test User Display\",\n\t\tEmail:       \"test@example.com\",\n\t\tProviderIdentifier: sql.NullString{\n\t\t\tString: \"provider-123\",\n\t\t\tValid:  true,\n\t\t},\n\t}\n\n\tcreatedUser, err := database.CreateUser(initialUser)\n\trequire.NoError(t, err)\n\trequire.NotNil(t, createdUser)\n\n\t// Verify initial state\n\tassert.Equal(t, \"testuser\", createdUser.Name)\n\tassert.Equal(t, \"Test User Display\", createdUser.DisplayName)\n\tassert.Equal(t, \"test@example.com\", createdUser.Email)\n\tassert.True(t, createdUser.ProviderIdentifier.Valid)\n\tassert.Equal(t, \"provider-123\", createdUser.ProviderIdentifier.String)\n\n\t// Simulate what UpdateUser does: load user, modify one field, save\n\t_, err = Write(database.DB, func(tx *gorm.DB) (*types.User, error) {\n\t\tuser, err := GetUserByID(tx, types.UserID(createdUser.ID))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Modify ONLY DisplayName\n\t\tuser.DisplayName = \"Updated Display Name\"\n\n\t\t// This is the line being tested - currently uses Save() which writes ALL fields, potentially overwriting unchanged ones\n\t\terr = tx.Save(user).Error\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn user, nil\n\t})\n\trequire.NoError(t, err)\n\n\t// Read user back from database\n\tupdatedUser, err := Read(database.DB, func(rx *gorm.DB) (*types.User, error) {\n\t\treturn GetUserByID(rx, types.UserID(createdUser.ID))\n\t})\n\trequire.NoError(t, err)\n\n\t// Verify that DisplayName was updated\n\tassert.Equal(t, \"Updated Display Name\", updatedUser.DisplayName)\n\n\t// CRITICAL: Verify that other fields were NOT overwritten\n\t// With Save(), these assertions should pass because the user object\n\t// was loaded from DB and has all fields populated.\n\t// But if Updates() is used, these will also pass (and it's safer).\n\tassert.Equal(t, \"testuser\", updatedUser.Name, \"Name should be preserved\")\n\tassert.Equal(t, \"test@example.com\", updatedUser.Email, \"Email should be preserved\")\n\tassert.True(t, updatedUser.ProviderIdentifier.Valid, \"ProviderIdentifier should be preserved\")\n\tassert.Equal(t, \"provider-123\", updatedUser.ProviderIdentifier.String, \"ProviderIdentifier value should be preserved\")\n}\n\n// TestUserUpdateWithUpdatesMethod tests that using Updates() instead of Save()\n// works correctly and only updates modified fields.\nfunc TestUserUpdateWithUpdatesMethod(t *testing.T) {\n\tdatabase := dbForTest(t)\n\n\t// Create a user\n\tinitialUser := types.User{\n\t\tName:        \"testuser\",\n\t\tDisplayName: \"Original Display\",\n\t\tEmail:       \"original@example.com\",\n\t\tProviderIdentifier: sql.NullString{\n\t\t\tString: \"provider-abc\",\n\t\t\tValid:  true,\n\t\t},\n\t}\n\n\tcreatedUser, err := database.CreateUser(initialUser)\n\trequire.NoError(t, err)\n\n\t// Update using Updates() method\n\t_, err = Write(database.DB, func(tx *gorm.DB) (*types.User, error) {\n\t\tuser, err := GetUserByID(tx, types.UserID(createdUser.ID))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Modify multiple fields\n\t\tuser.DisplayName = \"New Display\"\n\t\tuser.Email = \"new@example.com\"\n\n\t\t// Use Updates() instead of Save()\n\t\terr = tx.Updates(user).Error\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn user, nil\n\t})\n\trequire.NoError(t, err)\n\n\t// Verify changes\n\tupdatedUser, err := Read(database.DB, func(rx *gorm.DB) (*types.User, error) {\n\t\treturn GetUserByID(rx, types.UserID(createdUser.ID))\n\t})\n\trequire.NoError(t, err)\n\n\t// Verify updated fields\n\tassert.Equal(t, \"New Display\", updatedUser.DisplayName)\n\tassert.Equal(t, \"new@example.com\", updatedUser.Email)\n\n\t// Verify preserved fields\n\tassert.Equal(t, \"testuser\", updatedUser.Name)\n\tassert.True(t, updatedUser.ProviderIdentifier.Valid)\n\tassert.Equal(t, \"provider-abc\", updatedUser.ProviderIdentifier.String)\n}\n"
  },
  {
    "path": "hscontrol/db/users.go",
    "content": "package db\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"testing\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"gorm.io/gorm\"\n)\n\nvar (\n\tErrUserExists            = errors.New(\"user already exists\")\n\tErrUserNotFound          = errors.New(\"user not found\")\n\tErrUserStillHasNodes     = errors.New(\"user not empty: node(s) found\")\n\tErrUserWhereInvalidCount = errors.New(\"expect 0 or 1 where User structs\")\n\tErrUserNotUnique         = errors.New(\"expected exactly one user\")\n)\n\nfunc (hsdb *HSDatabase) CreateUser(user types.User) (*types.User, error) {\n\treturn Write(hsdb.DB, func(tx *gorm.DB) (*types.User, error) {\n\t\treturn CreateUser(tx, user)\n\t})\n}\n\n// CreateUser creates a new User. Returns error if could not be created\n// or another user already exists.\nfunc CreateUser(tx *gorm.DB, user types.User) (*types.User, error) {\n\terr := util.ValidateHostname(user.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = tx.Create(&user).Error\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating user: %w\", err)\n\t}\n\n\treturn &user, nil\n}\n\nfunc (hsdb *HSDatabase) DestroyUser(uid types.UserID) error {\n\treturn hsdb.Write(func(tx *gorm.DB) error {\n\t\treturn DestroyUser(tx, uid)\n\t})\n}\n\n// DestroyUser destroys a User. Returns error if the User does\n// not exist or if there are user-owned nodes associated with it.\n// Tagged nodes have user_id = NULL so they do not block deletion.\nfunc DestroyUser(tx *gorm.DB, uid types.UserID) error {\n\tuser, err := GetUserByID(tx, uid)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnodes, err := ListNodesByUser(tx, uid)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(nodes) > 0 {\n\t\treturn ErrUserStillHasNodes\n\t}\n\n\tkeys, err := ListPreAuthKeys(tx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, key := range keys {\n\t\terr = DestroyPreAuthKey(tx, key.ID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif result := tx.Unscoped().Delete(&user); result.Error != nil {\n\t\treturn result.Error\n\t}\n\n\treturn nil\n}\n\nfunc (hsdb *HSDatabase) RenameUser(uid types.UserID, newName string) error {\n\treturn hsdb.Write(func(tx *gorm.DB) error {\n\t\treturn RenameUser(tx, uid, newName)\n\t})\n}\n\nvar ErrCannotChangeOIDCUser = errors.New(\"cannot edit OIDC user\")\n\n// RenameUser renames a User. Returns error if the User does\n// not exist or if another User exists with the new name.\nfunc RenameUser(tx *gorm.DB, uid types.UserID, newName string) error {\n\tvar err error\n\n\toldUser, err := GetUserByID(tx, uid)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = util.ValidateHostname(newName); err != nil { //nolint:noinlineerr\n\t\treturn err\n\t}\n\n\tif oldUser.Provider == util.RegisterMethodOIDC {\n\t\treturn ErrCannotChangeOIDCUser\n\t}\n\n\toldUser.Name = newName\n\n\terr = tx.Updates(&oldUser).Error\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (hsdb *HSDatabase) GetUserByID(uid types.UserID) (*types.User, error) {\n\treturn GetUserByID(hsdb.DB, uid)\n}\n\nfunc GetUserByID(tx *gorm.DB, uid types.UserID) (*types.User, error) {\n\tuser := types.User{}\n\tif result := tx.First(&user, \"id = ?\", uid); errors.Is(\n\t\tresult.Error,\n\t\tgorm.ErrRecordNotFound,\n\t) {\n\t\treturn nil, ErrUserNotFound\n\t}\n\n\treturn &user, nil\n}\n\nfunc (hsdb *HSDatabase) GetUserByOIDCIdentifier(id string) (*types.User, error) {\n\treturn Read(hsdb.DB, func(rx *gorm.DB) (*types.User, error) {\n\t\treturn GetUserByOIDCIdentifier(rx, id)\n\t})\n}\n\nfunc GetUserByOIDCIdentifier(tx *gorm.DB, id string) (*types.User, error) {\n\tuser := types.User{}\n\tif result := tx.First(&user, \"provider_identifier = ?\", id); errors.Is(\n\t\tresult.Error,\n\t\tgorm.ErrRecordNotFound,\n\t) {\n\t\treturn nil, ErrUserNotFound\n\t}\n\n\treturn &user, nil\n}\n\nfunc (hsdb *HSDatabase) ListUsers(where ...*types.User) ([]types.User, error) {\n\treturn ListUsers(hsdb.DB, where...)\n}\n\n// ListUsers gets all the existing users.\nfunc ListUsers(tx *gorm.DB, where ...*types.User) ([]types.User, error) {\n\tif len(where) > 1 {\n\t\treturn nil, fmt.Errorf(\"%w, got %d\", ErrUserWhereInvalidCount, len(where))\n\t}\n\n\tvar user *types.User\n\tif len(where) == 1 {\n\t\tuser = where[0]\n\t}\n\n\tusers := []types.User{}\n\n\terr := tx.Where(user).Find(&users).Error\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn users, nil\n}\n\n// GetUserByName returns a user if the provided username is\n// unique, and otherwise an error.\nfunc (hsdb *HSDatabase) GetUserByName(name string) (*types.User, error) {\n\tusers, err := hsdb.ListUsers(&types.User{Name: name})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(users) == 0 {\n\t\treturn nil, ErrUserNotFound\n\t}\n\n\tif len(users) != 1 {\n\t\treturn nil, fmt.Errorf(\"%w, found %d\", ErrUserNotUnique, len(users))\n\t}\n\n\treturn &users[0], nil\n}\n\n// ListNodesByUser gets all the nodes in a given user.\nfunc ListNodesByUser(tx *gorm.DB, uid types.UserID) (types.Nodes, error) {\n\tnodes := types.Nodes{}\n\n\tuidPtr := uint(uid)\n\n\terr := tx.Preload(\"AuthKey\").Preload(\"AuthKey.User\").Preload(\"User\").Where(&types.Node{UserID: &uidPtr}).Find(&nodes).Error\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn nodes, nil\n}\n\nfunc (hsdb *HSDatabase) CreateUserForTest(name ...string) *types.User {\n\tif !testing.Testing() {\n\t\tpanic(\"CreateUserForTest can only be called during tests\")\n\t}\n\n\tuserName := \"testuser\"\n\tif len(name) > 0 && name[0] != \"\" {\n\t\tuserName = name[0]\n\t}\n\n\tuser, err := hsdb.CreateUser(types.User{Name: userName})\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to create test user: %v\", err))\n\t}\n\n\treturn user\n}\n\nfunc (hsdb *HSDatabase) CreateUsersForTest(count int, namePrefix ...string) []*types.User {\n\tif !testing.Testing() {\n\t\tpanic(\"CreateUsersForTest can only be called during tests\")\n\t}\n\n\tprefix := \"testuser\"\n\tif len(namePrefix) > 0 && namePrefix[0] != \"\" {\n\t\tprefix = namePrefix[0]\n\t}\n\n\tusers := make([]*types.User, count)\n\tfor i := range count {\n\t\tname := prefix + \"-\" + strconv.Itoa(i)\n\t\tusers[i] = hsdb.CreateUserForTest(name)\n\t}\n\n\treturn users\n}\n"
  },
  {
    "path": "hscontrol/db/users_test.go",
    "content": "package db\n\nimport (\n\t\"testing\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gorm.io/gorm\"\n)\n\nfunc TestCreateAndDestroyUser(t *testing.T) {\n\tdb, err := newSQLiteTestDB()\n\trequire.NoError(t, err)\n\n\tuser := db.CreateUserForTest(\"test\")\n\tassert.Equal(t, \"test\", user.Name)\n\n\tusers, err := db.ListUsers()\n\trequire.NoError(t, err)\n\tassert.Len(t, users, 1)\n\n\terr = db.DestroyUser(types.UserID(user.ID))\n\trequire.NoError(t, err)\n\n\t_, err = db.GetUserByID(types.UserID(user.ID))\n\tassert.Error(t, err)\n}\n\nfunc TestDestroyUserErrors(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\ttest func(*testing.T, *HSDatabase)\n\t}{\n\t\t{\n\t\t\tname: \"error_user_not_found\",\n\t\t\ttest: func(t *testing.T, db *HSDatabase) {\n\t\t\t\tt.Helper()\n\n\t\t\t\terr := db.DestroyUser(9998)\n\t\t\t\tassert.ErrorIs(t, err, ErrUserNotFound)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"success_deletes_preauthkeys\",\n\t\t\ttest: func(t *testing.T, db *HSDatabase) {\n\t\t\t\tt.Helper()\n\n\t\t\t\tuser := db.CreateUserForTest(\"test\")\n\n\t\t\t\tpak, err := db.CreatePreAuthKey(user.TypedID(), false, false, nil, nil)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\terr = db.DestroyUser(types.UserID(user.ID))\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t// Verify preauth key was deleted (need to search by prefix for new keys)\n\t\t\t\tvar foundPak types.PreAuthKey\n\n\t\t\t\tresult := db.DB.First(&foundPak, \"id = ?\", pak.ID)\n\t\t\t\tassert.ErrorIs(t, result.Error, gorm.ErrRecordNotFound)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"error_user_has_nodes\",\n\t\t\ttest: func(t *testing.T, db *HSDatabase) {\n\t\t\t\tt.Helper()\n\n\t\t\t\tuser, err := db.CreateUser(types.User{Name: \"test\"})\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tpak, err := db.CreatePreAuthKey(user.TypedID(), false, false, nil, nil)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tpakID := pak.ID\n\n\t\t\t\tnode := types.Node{\n\t\t\t\t\tID:             0,\n\t\t\t\t\tHostname:       \"testnode\",\n\t\t\t\t\tUserID:         &user.ID,\n\t\t\t\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t\t\t\t\tAuthKeyID:      &pakID,\n\t\t\t\t}\n\t\t\t\ttrx := db.DB.Save(&node)\n\t\t\t\trequire.NoError(t, trx.Error)\n\n\t\t\t\terr = db.DestroyUser(types.UserID(user.ID))\n\t\t\t\tassert.ErrorIs(t, err, ErrUserStillHasNodes)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t// https://github.com/juanfont/headscale/issues/3077\n\t\t\t// Tagged nodes have user_id = NULL, so they do not block\n\t\t\t// user deletion and are unaffected by ON DELETE CASCADE.\n\t\t\tname: \"success_user_only_has_tagged_nodes\",\n\t\t\ttest: func(t *testing.T, db *HSDatabase) {\n\t\t\t\tt.Helper()\n\n\t\t\t\tuser, err := db.CreateUser(types.User{Name: \"test\"})\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t// Create a tagged node with no user_id (the invariant).\n\t\t\t\tnode := types.Node{\n\t\t\t\t\tID:             0,\n\t\t\t\t\tHostname:       \"tagged-node\",\n\t\t\t\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t\t\t\t\tTags:           []string{\"tag:server\"},\n\t\t\t\t}\n\t\t\t\ttrx := db.DB.Save(&node)\n\t\t\t\trequire.NoError(t, trx.Error)\n\n\t\t\t\terr = db.DestroyUser(types.UserID(user.ID))\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t// User is gone.\n\t\t\t\t_, err = db.GetUserByID(types.UserID(user.ID))\n\t\t\t\trequire.ErrorIs(t, err, ErrUserNotFound)\n\n\t\t\t\t// Tagged node survives.\n\t\t\t\tvar survivingNode types.Node\n\n\t\t\t\tresult := db.DB.First(&survivingNode, \"id = ?\", node.ID)\n\t\t\t\trequire.NoError(t, result.Error)\n\t\t\t\tassert.Nil(t, survivingNode.UserID)\n\t\t\t\tassert.Equal(t, []string{\"tag:server\"}, survivingNode.Tags)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t// A user who has both tagged and user-owned nodes cannot\n\t\t\t// be deleted; the user-owned nodes still block deletion.\n\t\t\tname: \"error_user_has_tagged_and_owned_nodes\",\n\t\t\ttest: func(t *testing.T, db *HSDatabase) {\n\t\t\t\tt.Helper()\n\n\t\t\t\tuser, err := db.CreateUser(types.User{Name: \"test\"})\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t// Tagged node: no user_id.\n\t\t\t\ttaggedNode := types.Node{\n\t\t\t\t\tID:             0,\n\t\t\t\t\tHostname:       \"tagged-node\",\n\t\t\t\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t\t\t\t\tTags:           []string{\"tag:server\"},\n\t\t\t\t}\n\t\t\t\ttrx := db.DB.Save(&taggedNode)\n\t\t\t\trequire.NoError(t, trx.Error)\n\n\t\t\t\t// User-owned node: has user_id.\n\t\t\t\townedNode := types.Node{\n\t\t\t\t\tID:             0,\n\t\t\t\t\tHostname:       \"owned-node\",\n\t\t\t\t\tUserID:         &user.ID,\n\t\t\t\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t\t\t\t}\n\t\t\t\ttrx = db.DB.Save(&ownedNode)\n\t\t\t\trequire.NoError(t, trx.Error)\n\n\t\t\t\terr = db.DestroyUser(types.UserID(user.ID))\n\t\t\t\trequire.ErrorIs(t, err, ErrUserStillHasNodes)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tdb, err := newSQLiteTestDB()\n\t\t\trequire.NoError(t, err)\n\n\t\t\ttt.test(t, db)\n\t\t})\n\t}\n}\n\nfunc TestRenameUser(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\ttest func(*testing.T, *HSDatabase)\n\t}{\n\t\t{\n\t\t\tname: \"success_rename\",\n\t\t\ttest: func(t *testing.T, db *HSDatabase) {\n\t\t\t\tt.Helper()\n\n\t\t\t\tuserTest := db.CreateUserForTest(\"test\")\n\t\t\t\tassert.Equal(t, \"test\", userTest.Name)\n\n\t\t\t\tusers, err := db.ListUsers()\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.Len(t, users, 1)\n\n\t\t\t\terr = db.RenameUser(types.UserID(userTest.ID), \"test-renamed\")\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tusers, err = db.ListUsers(&types.User{Name: \"test\"})\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.Empty(t, users)\n\n\t\t\t\tusers, err = db.ListUsers(&types.User{Name: \"test-renamed\"})\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.Len(t, users, 1)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"error_user_not_found\",\n\t\t\ttest: func(t *testing.T, db *HSDatabase) {\n\t\t\t\tt.Helper()\n\n\t\t\t\terr := db.RenameUser(99988, \"test\")\n\t\t\t\tassert.ErrorIs(t, err, ErrUserNotFound)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"error_duplicate_name\",\n\t\t\ttest: func(t *testing.T, db *HSDatabase) {\n\t\t\t\tt.Helper()\n\n\t\t\t\tuserTest := db.CreateUserForTest(\"test\")\n\t\t\t\tuserTest2 := db.CreateUserForTest(\"test2\")\n\n\t\t\t\tassert.Equal(t, \"test\", userTest.Name)\n\t\t\t\tassert.Equal(t, \"test2\", userTest2.Name)\n\n\t\t\t\terr := db.RenameUser(types.UserID(userTest2.ID), \"test\")\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tassert.Contains(t, err.Error(), \"UNIQUE constraint failed\")\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tdb, err := newSQLiteTestDB()\n\t\t\trequire.NoError(t, err)\n\n\t\t\ttt.test(t, db)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "hscontrol/db/versioncheck.go",
    "content": "package db\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/rs/zerolog/log\"\n\t\"gorm.io/gorm\"\n)\n\nvar errVersionUpgrade = errors.New(\"version upgrade not supported\")\n\nvar errVersionDowngrade = errors.New(\"version downgrade not supported\")\n\nvar errVersionMajorChange = errors.New(\"major version change not supported\")\n\nvar errVersionParse = errors.New(\"cannot parse version\")\n\nvar errVersionFormat = errors.New(\n\t\"version does not follow semver major.minor.patch format\",\n)\n\n// DatabaseVersion tracks the headscale version that last\n// successfully started against this database.\n// It is a single-row table (ID is always 1).\ntype DatabaseVersion struct {\n\tID        uint   `gorm:\"primaryKey\"`\n\tVersion   string `gorm:\"not null\"`\n\tUpdatedAt time.Time\n}\n\n// semver holds parsed major.minor.patch components.\ntype semver struct {\n\tMajor int\n\tMinor int\n\tPatch int\n}\n\nfunc (s semver) String() string {\n\treturn fmt.Sprintf(\"v%d.%d.%d\", s.Major, s.Minor, s.Patch)\n}\n\n// parseVersion parses a version string like \"v0.25.0\", \"0.25.1\",\n// \"v0.25.0-beta.1\", or \"v0.25.0-rc1+build123\" into its major, minor,\n// patch components. Pre-release and build metadata suffixes are stripped.\nfunc parseVersion(s string) (semver, error) {\n\tif s == \"\" || s == \"dev\" {\n\t\treturn semver{}, fmt.Errorf(\"%q: %w\", s, errVersionParse)\n\t}\n\n\tv := strings.TrimPrefix(s, \"v\")\n\n\t// Strip pre-release suffix (everything after first '-')\n\t// and build metadata (everything after first '+').\n\tif idx := strings.IndexAny(v, \"-+\"); idx != -1 {\n\t\tv = v[:idx]\n\t}\n\n\tparts := strings.Split(v, \".\")\n\tif len(parts) != 3 {\n\t\treturn semver{}, fmt.Errorf(\"%q: %w\", s, errVersionFormat)\n\t}\n\n\tmajor, err := strconv.Atoi(parts[0])\n\tif err != nil {\n\t\treturn semver{}, fmt.Errorf(\"invalid major version in %q: %w\", s, err)\n\t}\n\n\tminor, err := strconv.Atoi(parts[1])\n\tif err != nil {\n\t\treturn semver{}, fmt.Errorf(\"invalid minor version in %q: %w\", s, err)\n\t}\n\n\tpatch, err := strconv.Atoi(parts[2])\n\tif err != nil {\n\t\treturn semver{}, fmt.Errorf(\"invalid patch version in %q: %w\", s, err)\n\t}\n\n\treturn semver{Major: major, Minor: minor, Patch: patch}, nil\n}\n\n// ensureDatabaseVersionTable creates the database_versions table if it\n// does not already exist. Uses GORM AutoMigrate to handle dialect\n// differences between SQLite (datetime) and PostgreSQL (timestamp).\n// This runs before gormigrate migrations.\nfunc ensureDatabaseVersionTable(db *gorm.DB) error {\n\terr := db.AutoMigrate(&DatabaseVersion{})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating database version table: %w\", err)\n\t}\n\n\treturn nil\n}\n\n// getDatabaseVersion reads the stored version from the database.\n// Returns an empty string if no version has been stored yet.\nfunc getDatabaseVersion(db *gorm.DB) (string, error) {\n\tvar version string\n\n\tresult := db.Raw(\"SELECT version FROM database_versions WHERE id = 1\").Scan(&version)\n\tif result.Error != nil {\n\t\treturn \"\", fmt.Errorf(\"reading database version: %w\", result.Error)\n\t}\n\n\tif result.RowsAffected == 0 {\n\t\treturn \"\", nil\n\t}\n\n\treturn version, nil\n}\n\n// setDatabaseVersion upserts the version row in the database.\nfunc setDatabaseVersion(db *gorm.DB, version string) error {\n\tnow := time.Now().UTC()\n\n\t// Try update first, then insert if no rows affected.\n\tresult := db.Exec(\n\t\t\"UPDATE database_versions SET version = ?, updated_at = ? WHERE id = 1\",\n\t\tversion, now,\n\t)\n\tif result.Error != nil {\n\t\treturn fmt.Errorf(\"updating database version: %w\", result.Error)\n\t}\n\n\tif result.RowsAffected == 0 {\n\t\terr := db.Exec(\n\t\t\t\"INSERT INTO database_versions (id, version, updated_at) VALUES (1, ?, ?)\",\n\t\t\tversion, now,\n\t\t).Error\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"inserting database version: %w\", err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// isDev reports whether a version string represents a development build\n// that should skip version checking.\nfunc isDev(version string) bool {\n\treturn version == \"\" || version == \"dev\" || version == \"(devel)\"\n}\n\n// checkVersionUpgradePath verifies that the running headscale version\n// is compatible with the version that last used this database.\n//\n// Rules:\n//   - If the running binary has no version (\"dev\" or empty), warn and skip.\n//   - If no version is stored in the database, allow (first run with this feature).\n//   - If the stored version is \"dev\", allow (previous run was unversioned).\n//   - Same minor version: always allowed (patch changes in either direction).\n//   - Single minor version upgrade (stored.minor+1 == current.minor): allowed.\n//   - Multi-minor upgrade or any minor downgrade: blocked with a fatal error.\nfunc checkVersionUpgradePath(db *gorm.DB) error {\n\terr := ensureDatabaseVersionTable(db)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcurrentVersion := types.GetVersionInfo().Version\n\n\t// Running binary has no real version — skip the check but\n\t// preserve whatever version is already stored.\n\tif isDev(currentVersion) {\n\t\tstoredVersion, err := getDatabaseVersion(db)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif storedVersion != \"\" && !isDev(storedVersion) {\n\t\t\tlog.Warn().\n\t\t\t\tStr(\"database_version\", storedVersion).\n\t\t\t\tMsg(\"running a development build of headscale without a version number, \" +\n\t\t\t\t\t\"database version check is skipped, the stored database version is preserved\")\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tstoredVersion, err := getDatabaseVersion(db)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// No stored version — first run with this feature. Allow startup;\n\t// the version will be stored after migrations succeed.\n\tif storedVersion == \"\" {\n\t\treturn nil\n\t}\n\n\t// Previous run was an unversioned build — no meaningful comparison.\n\tif isDev(storedVersion) {\n\t\treturn nil\n\t}\n\n\tcurrent, err := parseVersion(currentVersion)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"parsing current version: %w\", err)\n\t}\n\n\tstored, err := parseVersion(storedVersion)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"parsing stored database version: %w\", err)\n\t}\n\n\tif current.Major != stored.Major {\n\t\treturn fmt.Errorf(\n\t\t\t\"headscale version %s cannot be used with a database last used by %s: %w\",\n\t\t\tcurrentVersion, storedVersion, errVersionMajorChange,\n\t\t)\n\t}\n\n\tminorDiff := current.Minor - stored.Minor\n\n\tswitch {\n\tcase minorDiff == 0:\n\t\t// Same minor version — patch changes are always fine.\n\t\treturn nil\n\n\tcase minorDiff == 1:\n\t\t// Single minor version upgrade — allowed.\n\t\treturn nil\n\n\tcase minorDiff > 1:\n\t\t// Multi-minor upgrade — blocked.\n\t\treturn fmt.Errorf(\n\t\t\t\"headscale version %s cannot be used with a database last used by %s, \"+\n\t\t\t\t\"upgrading more than one minor version at a time is not supported, \"+\n\t\t\t\t\"please upgrade to the latest v%d.%d.x release first, then to %s, \"+\n\t\t\t\t\"release page: https://github.com/juanfont/headscale/releases: %w\",\n\t\t\tcurrentVersion, storedVersion,\n\t\t\tstored.Major, stored.Minor+1,\n\t\t\tcurrent.String(),\n\t\t\terrVersionUpgrade,\n\t\t)\n\n\tdefault:\n\t\t// minorDiff < 0 — any minor downgrade is blocked.\n\t\treturn fmt.Errorf(\n\t\t\t\"headscale version %s cannot be used with a database last used by %s, \"+\n\t\t\t\t\"downgrading to a previous minor version is not supported, \"+\n\t\t\t\t\"release page: https://github.com/juanfont/headscale/releases: %w\",\n\t\t\tcurrentVersion, storedVersion,\n\t\t\terrVersionDowngrade,\n\t\t)\n\t}\n}\n"
  },
  {
    "path": "hscontrol/db/versioncheck_test.go",
    "content": "package db\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/glebarez/sqlite\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gorm.io/gorm\"\n)\n\nfunc TestParseVersion(t *testing.T) {\n\ttests := []struct {\n\t\tinput   string\n\t\twant    semver\n\t\twantErr bool\n\t}{\n\t\t{input: \"v0.25.0\", want: semver{0, 25, 0}},\n\t\t{input: \"0.25.0\", want: semver{0, 25, 0}},\n\t\t{input: \"v0.25.1\", want: semver{0, 25, 1}},\n\t\t{input: \"v1.0.0\", want: semver{1, 0, 0}},\n\t\t{input: \"v0.28.3\", want: semver{0, 28, 3}},\n\t\t// Pre-release suffixes stripped\n\t\t{input: \"v0.25.0-beta.1\", want: semver{0, 25, 0}},\n\t\t{input: \"v0.25.0-rc1\", want: semver{0, 25, 0}},\n\t\t// Build metadata stripped\n\t\t{input: \"v0.25.0+build123\", want: semver{0, 25, 0}},\n\t\t{input: \"v0.25.0-beta.1+build123\", want: semver{0, 25, 0}},\n\t\t// Invalid inputs\n\t\t{input: \"\", wantErr: true},\n\t\t{input: \"dev\", wantErr: true},\n\t\t{input: \"vfoo.bar.baz\", wantErr: true},\n\t\t{input: \"v1.2\", wantErr: true},\n\t\t{input: \"v1\", wantErr: true},\n\t\t{input: \"not-a-version\", wantErr: true},\n\t\t{input: \"v1.2.3.4\", wantErr: true},\n\t\t{input: \"(devel)\", wantErr: true},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.input, func(t *testing.T) {\n\t\t\tgot, err := parseVersion(tt.input)\n\t\t\tif tt.wantErr {\n\t\t\t\tassert.Error(t, err)\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.Equal(t, tt.want, got)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestSemverString(t *testing.T) {\n\ts := semver{0, 28, 3}\n\tassert.Equal(t, \"v0.28.3\", s.String())\n}\n\nfunc TestIsDev(t *testing.T) {\n\tassert.True(t, isDev(\"\"))\n\tassert.True(t, isDev(\"dev\"))\n\tassert.True(t, isDev(\"(devel)\"))\n\tassert.False(t, isDev(\"v0.28.0\"))\n\tassert.False(t, isDev(\"0.28.0\"))\n}\n\n// versionTestDB creates an in-memory SQLite database with the\n// database_versions table already bootstrapped.\nfunc versionTestDB(t *testing.T) *gorm.DB {\n\tt.Helper()\n\n\tdb, err := gorm.Open(sqlite.Open(\"file::memory:\"), &gorm.Config{})\n\trequire.NoError(t, err)\n\n\terr = ensureDatabaseVersionTable(db)\n\trequire.NoError(t, err)\n\n\treturn db\n}\n\nfunc TestSetAndGetDatabaseVersion(t *testing.T) {\n\tdb := versionTestDB(t)\n\n\t// Initially empty\n\tv, err := getDatabaseVersion(db)\n\trequire.NoError(t, err)\n\tassert.Empty(t, v)\n\n\t// Set a version\n\terr = setDatabaseVersion(db, \"v0.27.0\")\n\trequire.NoError(t, err)\n\n\tv, err = getDatabaseVersion(db)\n\trequire.NoError(t, err)\n\tassert.Equal(t, \"v0.27.0\", v)\n\n\t// Update the version (upsert)\n\terr = setDatabaseVersion(db, \"v0.28.0\")\n\trequire.NoError(t, err)\n\n\tv, err = getDatabaseVersion(db)\n\trequire.NoError(t, err)\n\tassert.Equal(t, \"v0.28.0\", v)\n}\n\nfunc TestEnsureDatabaseVersionTableIdempotent(t *testing.T) {\n\tdb, err := gorm.Open(sqlite.Open(\"file::memory:\"), &gorm.Config{})\n\trequire.NoError(t, err)\n\n\t// Call twice — should not error\n\terr = ensureDatabaseVersionTable(db)\n\trequire.NoError(t, err)\n\n\terr = ensureDatabaseVersionTable(db)\n\trequire.NoError(t, err)\n}\n\n// TestCheckVersionUpgradePathDirect tests the version comparison logic\n// by directly seeding the database, bypassing types.GetVersionInfo()\n// (which returns \"dev\" in test environments and cannot be overridden).\nfunc TestCheckVersionUpgradePathDirect(t *testing.T) {\n\ttests := []struct {\n\t\tname           string\n\t\tstoredVersion  string // empty means no row stored\n\t\tcurrentVersion string\n\t\twantErr        bool\n\t\terrContains    string\n\t}{\n\t\t// Fresh database (no stored version)\n\t\t{\n\t\t\tname:           \"fresh db allows any version\",\n\t\t\tstoredVersion:  \"\",\n\t\t\tcurrentVersion: \"v0.28.0\",\n\t\t},\n\n\t\t// Stored is dev\n\t\t{\n\t\t\tname:           \"real version over dev db\",\n\t\t\tstoredVersion:  \"dev\",\n\t\t\tcurrentVersion: \"v0.28.0\",\n\t\t},\n\t\t{\n\t\t\tname:           \"devel version in db\",\n\t\t\tstoredVersion:  \"(devel)\",\n\t\t\tcurrentVersion: \"v0.28.0\",\n\t\t},\n\n\t\t// Same version\n\t\t{\n\t\t\tname:           \"same version\",\n\t\t\tstoredVersion:  \"v0.27.0\",\n\t\t\tcurrentVersion: \"v0.27.0\",\n\t\t},\n\n\t\t// Patch changes within same minor\n\t\t{\n\t\t\tname:           \"patch upgrade\",\n\t\t\tstoredVersion:  \"v0.27.0\",\n\t\t\tcurrentVersion: \"v0.27.3\",\n\t\t},\n\t\t{\n\t\t\tname:           \"patch downgrade within same minor\",\n\t\t\tstoredVersion:  \"v0.27.3\",\n\t\t\tcurrentVersion: \"v0.27.0\",\n\t\t},\n\n\t\t// Single minor upgrade\n\t\t{\n\t\t\tname:           \"single minor upgrade\",\n\t\t\tstoredVersion:  \"v0.27.0\",\n\t\t\tcurrentVersion: \"v0.28.0\",\n\t\t},\n\t\t{\n\t\t\tname:           \"single minor upgrade with different patches\",\n\t\t\tstoredVersion:  \"v0.27.3\",\n\t\t\tcurrentVersion: \"v0.28.1\",\n\t\t},\n\n\t\t// Multi-minor upgrade (blocked)\n\t\t{\n\t\t\tname:           \"two minor versions ahead\",\n\t\t\tstoredVersion:  \"v0.25.0\",\n\t\t\tcurrentVersion: \"v0.27.0\",\n\t\t\twantErr:        true,\n\t\t\terrContains:    \"latest v0.26.x\",\n\t\t},\n\t\t{\n\t\t\tname:           \"three minor versions ahead\",\n\t\t\tstoredVersion:  \"v0.25.0\",\n\t\t\tcurrentVersion: \"v0.28.0\",\n\t\t\twantErr:        true,\n\t\t\terrContains:    \"latest v0.26.x\",\n\t\t},\n\n\t\t// Minor downgrades (blocked)\n\t\t{\n\t\t\tname:           \"single minor downgrade\",\n\t\t\tstoredVersion:  \"v0.28.0\",\n\t\t\tcurrentVersion: \"v0.27.0\",\n\t\t\twantErr:        true,\n\t\t\terrContains:    \"downgrading\",\n\t\t},\n\t\t{\n\t\t\tname:           \"multi minor downgrade\",\n\t\t\tstoredVersion:  \"v0.28.0\",\n\t\t\tcurrentVersion: \"v0.25.0\",\n\t\t\twantErr:        true,\n\t\t\terrContains:    \"downgrading\",\n\t\t},\n\n\t\t// Major version mismatch\n\t\t{\n\t\t\tname:           \"major version upgrade\",\n\t\t\tstoredVersion:  \"v0.28.0\",\n\t\t\tcurrentVersion: \"v1.0.0\",\n\t\t\twantErr:        true,\n\t\t\terrContains:    \"major version\",\n\t\t},\n\t\t{\n\t\t\tname:           \"major version downgrade\",\n\t\t\tstoredVersion:  \"v1.0.0\",\n\t\t\tcurrentVersion: \"v0.28.0\",\n\t\t\twantErr:        true,\n\t\t\terrContains:    \"major version\",\n\t\t},\n\n\t\t// Pre-release versions\n\t\t{\n\t\t\tname:           \"pre-release single minor upgrade\",\n\t\t\tstoredVersion:  \"v0.27.0\",\n\t\t\tcurrentVersion: \"v0.28.0-beta.1\",\n\t\t},\n\t\t{\n\t\t\tname:           \"pre-release multi minor upgrade blocked\",\n\t\t\tstoredVersion:  \"v0.25.0\",\n\t\t\tcurrentVersion: \"v0.27.0-rc1\",\n\t\t\twantErr:        true,\n\t\t\terrContains:    \"latest v0.26.x\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tdb := versionTestDB(t)\n\n\t\t\t// Seed the stored version if provided\n\t\t\tif tt.storedVersion != \"\" {\n\t\t\t\terr := setDatabaseVersion(db, tt.storedVersion)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t}\n\n\t\t\terr := checkVersionUpgradePathFromVersions(db, tt.currentVersion)\n\t\t\tif tt.wantErr {\n\t\t\t\trequire.Error(t, err)\n\n\t\t\t\tif tt.errContains != \"\" {\n\t\t\t\t\tassert.Contains(t, err.Error(), tt.errContains)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// checkVersionUpgradePathFromVersions is a test helper that runs the\n// version comparison logic with a specific currentVersion string,\n// bypassing types.GetVersionInfo(). It replicates the logic from\n// checkVersionUpgradePath but accepts the version as a parameter.\nfunc checkVersionUpgradePathFromVersions(db *gorm.DB, currentVersion string) error {\n\tif isDev(currentVersion) {\n\t\treturn nil\n\t}\n\n\tstoredVersion, err := getDatabaseVersion(db)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif storedVersion == \"\" {\n\t\treturn nil\n\t}\n\n\tif isDev(storedVersion) {\n\t\treturn nil\n\t}\n\n\tcurrent, err := parseVersion(currentVersion)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstored, err := parseVersion(storedVersion)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif current.Major != stored.Major {\n\t\treturn errVersionMajorChange\n\t}\n\n\tminorDiff := current.Minor - stored.Minor\n\n\tswitch {\n\tcase minorDiff == 0:\n\t\treturn nil\n\tcase minorDiff == 1:\n\t\treturn nil\n\tcase minorDiff > 1:\n\t\treturn fmt.Errorf(\n\t\t\t\"please upgrade to the latest v%d.%d.x release first: %w\",\n\t\t\tstored.Major, stored.Minor+1,\n\t\t\terrVersionUpgrade,\n\t\t)\n\tdefault:\n\t\treturn fmt.Errorf(\"downgrading: %w\", errVersionDowngrade)\n\t}\n}\n"
  },
  {
    "path": "hscontrol/debug.go",
    "content": "package hscontrol\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"strings\"\n\n\t\"github.com/arl/statsviz\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/prometheus/client_golang/prometheus/promhttp\"\n\t\"tailscale.com/tsweb\"\n)\n\nfunc (h *Headscale) debugHTTPServer() *http.Server {\n\tdebugMux := http.NewServeMux()\n\tdebug := tsweb.Debugger(debugMux)\n\n\t// State overview endpoint\n\tdebug.Handle(\"overview\", \"State overview\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// Check Accept header to determine response format\n\t\tacceptHeader := r.Header.Get(\"Accept\")\n\t\twantsJSON := strings.Contains(acceptHeader, \"application/json\")\n\n\t\tif wantsJSON {\n\t\t\toverview := h.state.DebugOverviewJSON()\n\n\t\t\toverviewJSON, err := json.MarshalIndent(overview, \"\", \"  \")\n\t\t\tif err != nil {\n\t\t\t\thttpError(w, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t_, _ = w.Write(overviewJSON)\n\t\t} else {\n\t\t\t// Default to text/plain for backward compatibility\n\t\t\toverview := h.state.DebugOverview()\n\n\t\t\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t_, _ = w.Write([]byte(overview))\n\t\t}\n\t}))\n\n\t// Configuration endpoint\n\tdebug.Handle(\"config\", \"Current configuration\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tconfig := h.state.DebugConfig()\n\n\t\tconfigJSON, err := json.MarshalIndent(config, \"\", \"  \")\n\t\tif err != nil {\n\t\t\thttpError(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_, _ = w.Write(configJSON)\n\t}))\n\n\t// Policy endpoint\n\tdebug.Handle(\"policy\", \"Current policy\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tpolicy, err := h.state.DebugPolicy()\n\t\tif err != nil {\n\t\t\thttpError(w, err)\n\t\t\treturn\n\t\t}\n\t\t// Policy data is HuJSON, which is a superset of JSON\n\t\t// Set content type based on Accept header preference\n\t\tacceptHeader := r.Header.Get(\"Accept\")\n\t\tif strings.Contains(acceptHeader, \"application/json\") {\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t} else {\n\t\t\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\t\t}\n\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_, _ = w.Write([]byte(policy))\n\t}))\n\n\t// Filter rules endpoint\n\tdebug.Handle(\"filter\", \"Current filter rules\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfilter, err := h.state.DebugFilter()\n\t\tif err != nil {\n\t\t\thttpError(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tfilterJSON, err := json.MarshalIndent(filter, \"\", \"  \")\n\t\tif err != nil {\n\t\t\thttpError(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_, _ = w.Write(filterJSON)\n\t}))\n\n\t// SSH policies endpoint\n\tdebug.Handle(\"ssh\", \"SSH policies per node\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tsshPolicies := h.state.DebugSSHPolicies()\n\n\t\tsshJSON, err := json.MarshalIndent(sshPolicies, \"\", \"  \")\n\t\tif err != nil {\n\t\t\thttpError(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_, _ = w.Write(sshJSON)\n\t}))\n\n\t// DERP map endpoint\n\tdebug.Handle(\"derp\", \"DERP map configuration\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// Check Accept header to determine response format\n\t\tacceptHeader := r.Header.Get(\"Accept\")\n\t\twantsJSON := strings.Contains(acceptHeader, \"application/json\")\n\n\t\tif wantsJSON {\n\t\t\tderpInfo := h.state.DebugDERPJSON()\n\n\t\t\tderpJSON, err := json.MarshalIndent(derpInfo, \"\", \"  \")\n\t\t\tif err != nil {\n\t\t\t\thttpError(w, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t_, _ = w.Write(derpJSON)\n\t\t} else {\n\t\t\t// Default to text/plain for backward compatibility\n\t\t\tderpInfo := h.state.DebugDERPMap()\n\n\t\t\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t_, _ = w.Write([]byte(derpInfo))\n\t\t}\n\t}))\n\n\t// NodeStore endpoint\n\tdebug.Handle(\"nodestore\", \"NodeStore information\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// Check Accept header to determine response format\n\t\tacceptHeader := r.Header.Get(\"Accept\")\n\t\twantsJSON := strings.Contains(acceptHeader, \"application/json\")\n\n\t\tif wantsJSON {\n\t\t\tnodeStoreNodes := h.state.DebugNodeStoreJSON()\n\n\t\t\tnodeStoreJSON, err := json.MarshalIndent(nodeStoreNodes, \"\", \"  \")\n\t\t\tif err != nil {\n\t\t\t\thttpError(w, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t_, _ = w.Write(nodeStoreJSON)\n\t\t} else {\n\t\t\t// Default to text/plain for backward compatibility\n\t\t\tnodeStoreInfo := h.state.DebugNodeStore()\n\n\t\t\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t_, _ = w.Write([]byte(nodeStoreInfo))\n\t\t}\n\t}))\n\n\t// Registration cache endpoint\n\tdebug.Handle(\"registration-cache\", \"Registration cache information\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tcacheInfo := h.state.DebugRegistrationCache()\n\n\t\tcacheJSON, err := json.MarshalIndent(cacheInfo, \"\", \"  \")\n\t\tif err != nil {\n\t\t\thttpError(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_, _ = w.Write(cacheJSON)\n\t}))\n\n\t// Routes endpoint\n\tdebug.Handle(\"routes\", \"Primary routes\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// Check Accept header to determine response format\n\t\tacceptHeader := r.Header.Get(\"Accept\")\n\t\twantsJSON := strings.Contains(acceptHeader, \"application/json\")\n\n\t\tif wantsJSON {\n\t\t\troutes := h.state.DebugRoutes()\n\n\t\t\troutesJSON, err := json.MarshalIndent(routes, \"\", \"  \")\n\t\t\tif err != nil {\n\t\t\t\thttpError(w, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t_, _ = w.Write(routesJSON)\n\t\t} else {\n\t\t\t// Default to text/plain for backward compatibility\n\t\t\troutes := h.state.DebugRoutesString()\n\n\t\t\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t_, _ = w.Write([]byte(routes))\n\t\t}\n\t}))\n\n\t// Policy manager endpoint\n\tdebug.Handle(\"policy-manager\", \"Policy manager state\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// Check Accept header to determine response format\n\t\tacceptHeader := r.Header.Get(\"Accept\")\n\t\twantsJSON := strings.Contains(acceptHeader, \"application/json\")\n\n\t\tif wantsJSON {\n\t\t\tpolicyManagerInfo := h.state.DebugPolicyManagerJSON()\n\n\t\t\tpolicyManagerJSON, err := json.MarshalIndent(policyManagerInfo, \"\", \"  \")\n\t\t\tif err != nil {\n\t\t\t\thttpError(w, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t_, _ = w.Write(policyManagerJSON)\n\t\t} else {\n\t\t\t// Default to text/plain for backward compatibility\n\t\t\tpolicyManagerInfo := h.state.DebugPolicyManager()\n\n\t\t\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t_, _ = w.Write([]byte(policyManagerInfo))\n\t\t}\n\t}))\n\n\tdebug.Handle(\"mapresponses\", \"Map responses for all nodes\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tres, err := h.mapBatcher.DebugMapResponses()\n\t\tif err != nil {\n\t\t\thttpError(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tif res == nil {\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t_, _ = w.Write([]byte(\"HEADSCALE_DEBUG_DUMP_MAPRESPONSE_PATH not set\"))\n\n\t\t\treturn\n\t\t}\n\n\t\tresJSON, err := json.MarshalIndent(res, \"\", \"  \")\n\t\tif err != nil {\n\t\t\thttpError(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_, _ = w.Write(resJSON)\n\t}))\n\n\t// Batcher endpoint\n\tdebug.Handle(\"batcher\", \"Batcher connected nodes\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// Check Accept header to determine response format\n\t\tacceptHeader := r.Header.Get(\"Accept\")\n\t\twantsJSON := strings.Contains(acceptHeader, \"application/json\")\n\n\t\tif wantsJSON {\n\t\t\tbatcherInfo := h.debugBatcherJSON()\n\n\t\t\tbatcherJSON, err := json.MarshalIndent(batcherInfo, \"\", \"  \")\n\t\t\tif err != nil {\n\t\t\t\thttpError(w, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t_, _ = w.Write(batcherJSON)\n\t\t} else {\n\t\t\t// Default to text/plain for backward compatibility\n\t\t\tbatcherInfo := h.debugBatcher()\n\n\t\t\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t_, _ = w.Write([]byte(batcherInfo))\n\t\t}\n\t}))\n\n\terr := statsviz.Register(debugMux)\n\tif err == nil {\n\t\tdebug.URL(\"/debug/statsviz\", \"Statsviz (visualise go metrics)\")\n\t}\n\n\tdebug.URL(\"/metrics\", \"Prometheus metrics\")\n\tdebugMux.Handle(\"/metrics\", promhttp.Handler())\n\n\tdebugHTTPServer := &http.Server{\n\t\tAddr:         h.cfg.MetricsAddr,\n\t\tHandler:      debugMux,\n\t\tReadTimeout:  types.HTTPTimeout,\n\t\tWriteTimeout: 0,\n\t}\n\n\treturn debugHTTPServer\n}\n\n// debugBatcher returns debug information about the batcher's connected nodes.\nfunc (h *Headscale) debugBatcher() string {\n\tvar sb strings.Builder\n\tsb.WriteString(\"=== Batcher Connected Nodes ===\\n\\n\")\n\n\ttotalNodes := 0\n\tconnectedCount := 0\n\n\t// Collect nodes and sort them by ID\n\ttype nodeStatus struct {\n\t\tid                types.NodeID\n\t\tconnected         bool\n\t\tactiveConnections int\n\t}\n\n\tvar nodes []nodeStatus\n\n\tdebugInfo := h.mapBatcher.Debug()\n\tfor nodeID, info := range debugInfo {\n\t\tnodes = append(nodes, nodeStatus{\n\t\t\tid:                nodeID,\n\t\t\tconnected:         info.Connected,\n\t\t\tactiveConnections: info.ActiveConnections,\n\t\t})\n\t\ttotalNodes++\n\n\t\tif info.Connected {\n\t\t\tconnectedCount++\n\t\t}\n\t}\n\n\t// Sort by node ID\n\tfor i := 0; i < len(nodes); i++ {\n\t\tfor j := i + 1; j < len(nodes); j++ {\n\t\t\tif nodes[i].id > nodes[j].id {\n\t\t\t\tnodes[i], nodes[j] = nodes[j], nodes[i]\n\t\t\t}\n\t\t}\n\t}\n\n\t// Output sorted nodes\n\tfor _, node := range nodes {\n\t\tstatus := \"disconnected\"\n\t\tif node.connected {\n\t\t\tstatus = \"connected\"\n\t\t}\n\n\t\tif node.activeConnections > 0 {\n\t\t\tsb.WriteString(fmt.Sprintf(\"Node %d:\\t%s (%d connections)\\n\", node.id, status, node.activeConnections))\n\t\t} else {\n\t\t\tsb.WriteString(fmt.Sprintf(\"Node %d:\\t%s\\n\", node.id, status))\n\t\t}\n\t}\n\n\tsb.WriteString(fmt.Sprintf(\"\\nSummary: %d connected, %d total\\n\", connectedCount, totalNodes))\n\n\treturn sb.String()\n}\n\n// DebugBatcherInfo represents batcher connection information in a structured format.\ntype DebugBatcherInfo struct {\n\tConnectedNodes map[string]DebugBatcherNodeInfo `json:\"connected_nodes\"` // NodeID -> node connection info\n\tTotalNodes     int                             `json:\"total_nodes\"`\n}\n\n// DebugBatcherNodeInfo represents connection information for a single node.\ntype DebugBatcherNodeInfo struct {\n\tConnected         bool `json:\"connected\"`\n\tActiveConnections int  `json:\"active_connections\"`\n}\n\n// debugBatcherJSON returns structured debug information about the batcher's connected nodes.\nfunc (h *Headscale) debugBatcherJSON() DebugBatcherInfo {\n\tinfo := DebugBatcherInfo{\n\t\tConnectedNodes: make(map[string]DebugBatcherNodeInfo),\n\t\tTotalNodes:     0,\n\t}\n\n\tdebugInfo := h.mapBatcher.Debug()\n\tfor nodeID, debugData := range debugInfo {\n\t\tinfo.ConnectedNodes[fmt.Sprintf(\"%d\", nodeID)] = DebugBatcherNodeInfo{\n\t\t\tConnected:         debugData.Connected,\n\t\t\tActiveConnections: debugData.ActiveConnections,\n\t\t}\n\t\tinfo.TotalNodes++\n\t}\n\n\treturn info\n}\n"
  },
  {
    "path": "hscontrol/derp/derp.go",
    "content": "package derp\n\nimport (\n\t\"cmp\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"hash/crc64\"\n\t\"io\"\n\t\"maps\"\n\t\"math/rand\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"reflect\"\n\t\"slices\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/spf13/viper\"\n\t\"gopkg.in/yaml.v3\"\n\t\"tailscale.com/tailcfg\"\n)\n\nfunc loadDERPMapFromPath(path string) (*tailcfg.DERPMap, error) {\n\tderpFile, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer derpFile.Close()\n\n\tvar derpMap tailcfg.DERPMap\n\n\tb, err := io.ReadAll(derpFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = yaml.Unmarshal(b, &derpMap)\n\n\treturn &derpMap, err\n}\n\nfunc loadDERPMapFromURL(addr url.URL) (*tailcfg.DERPMap, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), types.HTTPTimeout)\n\tdefer cancel()\n\n\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, addr.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient := http.Client{\n\t\tTimeout: types.HTTPTimeout,\n\t}\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbody, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar derpMap tailcfg.DERPMap\n\n\terr = json.Unmarshal(body, &derpMap)\n\n\treturn &derpMap, err\n}\n\n// mergeDERPMaps naively merges a list of DERPMaps into a single\n// DERPMap, it will _only_ look at the Regions, an integer.\n// If a region exists in two of the given DERPMaps, the region\n// form the _last_ DERPMap will be preserved.\n// An empty DERPMap list will result in a DERPMap with no regions.\nfunc mergeDERPMaps(derpMaps []*tailcfg.DERPMap) *tailcfg.DERPMap {\n\tresult := tailcfg.DERPMap{\n\t\tOmitDefaultRegions: false,\n\t\tRegions:            map[int]*tailcfg.DERPRegion{},\n\t}\n\n\tfor _, derpMap := range derpMaps {\n\t\tmaps.Copy(result.Regions, derpMap.Regions)\n\t}\n\n\tfor id, region := range result.Regions {\n\t\tif region == nil {\n\t\t\tdelete(result.Regions, id)\n\t\t}\n\t}\n\n\treturn &result\n}\n\nfunc GetDERPMap(cfg types.DERPConfig) (*tailcfg.DERPMap, error) {\n\tvar derpMaps []*tailcfg.DERPMap\n\tif cfg.DERPMap != nil {\n\t\tderpMaps = append(derpMaps, cfg.DERPMap)\n\t}\n\n\tfor _, addr := range cfg.URLs {\n\t\tderpMap, err := loadDERPMapFromURL(addr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tderpMaps = append(derpMaps, derpMap)\n\t}\n\n\tfor _, path := range cfg.Paths {\n\t\tderpMap, err := loadDERPMapFromPath(path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tderpMaps = append(derpMaps, derpMap)\n\t}\n\n\tderpMap := mergeDERPMaps(derpMaps)\n\tshuffleDERPMap(derpMap)\n\n\treturn derpMap, nil\n}\n\nfunc shuffleDERPMap(dm *tailcfg.DERPMap) {\n\tif dm == nil || len(dm.Regions) == 0 {\n\t\treturn\n\t}\n\n\t// Collect region IDs and sort them to ensure deterministic iteration order.\n\t// Map iteration order is non-deterministic in Go, which would cause the\n\t// shuffle to be non-deterministic even with a fixed seed.\n\tids := make([]int, 0, len(dm.Regions))\n\tfor id := range dm.Regions {\n\t\tids = append(ids, id)\n\t}\n\n\tslices.Sort(ids)\n\n\tfor _, id := range ids {\n\t\tregion := dm.Regions[id]\n\t\tif len(region.Nodes) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tdm.Regions[id] = shuffleRegionNoClone(region)\n\t}\n}\n\nvar crc64Table = crc64.MakeTable(crc64.ISO)\n\nvar (\n\tderpRandomOnce sync.Once\n\tderpRandomInst *rand.Rand\n\tderpRandomMu   sync.Mutex\n)\n\nfunc derpRandom() *rand.Rand {\n\tderpRandomMu.Lock()\n\tdefer derpRandomMu.Unlock()\n\n\tderpRandomOnce.Do(func() {\n\t\tseed := cmp.Or(viper.GetString(\"dns.base_domain\"), time.Now().String())\n\t\trnd := rand.New(rand.NewSource(0))                        //nolint:gosec // weak random is fine for DERP scrambling\n\t\trnd.Seed(int64(crc64.Checksum([]byte(seed), crc64Table))) //nolint:gosec // safe conversion\n\t\tderpRandomInst = rnd\n\t})\n\n\treturn derpRandomInst\n}\n\nfunc resetDerpRandomForTesting() {\n\tderpRandomMu.Lock()\n\tdefer derpRandomMu.Unlock()\n\n\tderpRandomOnce = sync.Once{}\n\tderpRandomInst = nil\n}\n\nfunc shuffleRegionNoClone(r *tailcfg.DERPRegion) *tailcfg.DERPRegion {\n\tderpRandom().Shuffle(len(r.Nodes), reflect.Swapper(r.Nodes))\n\treturn r\n}\n"
  },
  {
    "path": "hscontrol/derp/derp_test.go",
    "content": "package derp\n\nimport (\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/spf13/viper\"\n\t\"tailscale.com/tailcfg\"\n)\n\nfunc TestShuffleDERPMapDeterministic(t *testing.T) {\n\ttests := []struct {\n\t\tname       string\n\t\tbaseDomain string\n\t\tderpMap    *tailcfg.DERPMap\n\t\texpected   *tailcfg.DERPMap\n\t}{\n\t\t{\n\t\t\tname:       \"single region with 4 nodes\",\n\t\t\tbaseDomain: \"test1.example.com\",\n\t\t\tderpMap: &tailcfg.DERPMap{\n\t\t\t\tRegions: map[int]*tailcfg.DERPRegion{\n\t\t\t\t\t1: {\n\t\t\t\t\t\tRegionID:   1,\n\t\t\t\t\t\tRegionCode: \"nyc\",\n\t\t\t\t\t\tRegionName: \"New York City\",\n\t\t\t\t\t\tNodes: []*tailcfg.DERPNode{\n\t\t\t\t\t\t\t{Name: \"1f\", RegionID: 1, HostName: \"derp1f.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"1g\", RegionID: 1, HostName: \"derp1g.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"1h\", RegionID: 1, HostName: \"derp1h.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"1i\", RegionID: 1, HostName: \"derp1i.tailscale.com\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: &tailcfg.DERPMap{\n\t\t\t\tRegions: map[int]*tailcfg.DERPRegion{\n\t\t\t\t\t1: {\n\t\t\t\t\t\tRegionID:   1,\n\t\t\t\t\t\tRegionCode: \"nyc\",\n\t\t\t\t\t\tRegionName: \"New York City\",\n\t\t\t\t\t\tNodes: []*tailcfg.DERPNode{\n\t\t\t\t\t\t\t{Name: \"1g\", RegionID: 1, HostName: \"derp1g.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"1f\", RegionID: 1, HostName: \"derp1f.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"1i\", RegionID: 1, HostName: \"derp1i.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"1h\", RegionID: 1, HostName: \"derp1h.tailscale.com\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:       \"multiple regions with nodes\",\n\t\t\tbaseDomain: \"test2.example.com\",\n\t\t\tderpMap: &tailcfg.DERPMap{\n\t\t\t\tRegions: map[int]*tailcfg.DERPRegion{\n\t\t\t\t\t10: {\n\t\t\t\t\t\tRegionID:   10,\n\t\t\t\t\t\tRegionCode: \"sea\",\n\t\t\t\t\t\tRegionName: \"Seattle\",\n\t\t\t\t\t\tNodes: []*tailcfg.DERPNode{\n\t\t\t\t\t\t\t{Name: \"10b\", RegionID: 10, HostName: \"derp10b.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"10c\", RegionID: 10, HostName: \"derp10c.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"10d\", RegionID: 10, HostName: \"derp10d.tailscale.com\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t2: {\n\t\t\t\t\t\tRegionID:   2,\n\t\t\t\t\t\tRegionCode: \"sfo\",\n\t\t\t\t\t\tRegionName: \"San Francisco\",\n\t\t\t\t\t\tNodes: []*tailcfg.DERPNode{\n\t\t\t\t\t\t\t{Name: \"2d\", RegionID: 2, HostName: \"derp2d.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"2e\", RegionID: 2, HostName: \"derp2e.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"2f\", RegionID: 2, HostName: \"derp2f.tailscale.com\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: &tailcfg.DERPMap{\n\t\t\t\tRegions: map[int]*tailcfg.DERPRegion{\n\t\t\t\t\t10: {\n\t\t\t\t\t\tRegionID:   10,\n\t\t\t\t\t\tRegionCode: \"sea\",\n\t\t\t\t\t\tRegionName: \"Seattle\",\n\t\t\t\t\t\tNodes: []*tailcfg.DERPNode{\n\t\t\t\t\t\t\t{Name: \"10d\", RegionID: 10, HostName: \"derp10d.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"10c\", RegionID: 10, HostName: \"derp10c.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"10b\", RegionID: 10, HostName: \"derp10b.tailscale.com\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t2: {\n\t\t\t\t\t\tRegionID:   2,\n\t\t\t\t\t\tRegionCode: \"sfo\",\n\t\t\t\t\t\tRegionName: \"San Francisco\",\n\t\t\t\t\t\tNodes: []*tailcfg.DERPNode{\n\t\t\t\t\t\t\t{Name: \"2d\", RegionID: 2, HostName: \"derp2d.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"2e\", RegionID: 2, HostName: \"derp2e.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"2f\", RegionID: 2, HostName: \"derp2f.tailscale.com\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:       \"large region with many nodes\",\n\t\t\tbaseDomain: \"test3.example.com\",\n\t\t\tderpMap: &tailcfg.DERPMap{\n\t\t\t\tRegions: map[int]*tailcfg.DERPRegion{\n\t\t\t\t\t4: {\n\t\t\t\t\t\tRegionID:   4,\n\t\t\t\t\t\tRegionCode: \"fra\",\n\t\t\t\t\t\tRegionName: \"Frankfurt\",\n\t\t\t\t\t\tNodes: []*tailcfg.DERPNode{\n\t\t\t\t\t\t\t{Name: \"4f\", RegionID: 4, HostName: \"derp4f.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"4g\", RegionID: 4, HostName: \"derp4g.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"4h\", RegionID: 4, HostName: \"derp4h.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"4i\", RegionID: 4, HostName: \"derp4i.tailscale.com\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: &tailcfg.DERPMap{\n\t\t\t\tRegions: map[int]*tailcfg.DERPRegion{\n\t\t\t\t\t4: {\n\t\t\t\t\t\tRegionID:   4,\n\t\t\t\t\t\tRegionCode: \"fra\",\n\t\t\t\t\t\tRegionName: \"Frankfurt\",\n\t\t\t\t\t\tNodes: []*tailcfg.DERPNode{\n\t\t\t\t\t\t\t{Name: \"4f\", RegionID: 4, HostName: \"derp4f.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"4h\", RegionID: 4, HostName: \"derp4h.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"4g\", RegionID: 4, HostName: \"derp4g.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"4i\", RegionID: 4, HostName: \"derp4i.tailscale.com\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:       \"same region different base domain\",\n\t\t\tbaseDomain: \"different.example.com\",\n\t\t\tderpMap: &tailcfg.DERPMap{\n\t\t\t\tRegions: map[int]*tailcfg.DERPRegion{\n\t\t\t\t\t4: {\n\t\t\t\t\t\tRegionID:   4,\n\t\t\t\t\t\tRegionCode: \"fra\",\n\t\t\t\t\t\tRegionName: \"Frankfurt\",\n\t\t\t\t\t\tNodes: []*tailcfg.DERPNode{\n\t\t\t\t\t\t\t{Name: \"4f\", RegionID: 4, HostName: \"derp4f.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"4g\", RegionID: 4, HostName: \"derp4g.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"4h\", RegionID: 4, HostName: \"derp4h.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"4i\", RegionID: 4, HostName: \"derp4i.tailscale.com\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: &tailcfg.DERPMap{\n\t\t\t\tRegions: map[int]*tailcfg.DERPRegion{\n\t\t\t\t\t4: {\n\t\t\t\t\t\tRegionID:   4,\n\t\t\t\t\t\tRegionCode: \"fra\",\n\t\t\t\t\t\tRegionName: \"Frankfurt\",\n\t\t\t\t\t\tNodes: []*tailcfg.DERPNode{\n\t\t\t\t\t\t\t{Name: \"4g\", RegionID: 4, HostName: \"derp4g.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"4i\", RegionID: 4, HostName: \"derp4i.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"4f\", RegionID: 4, HostName: \"derp4f.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"4h\", RegionID: 4, HostName: \"derp4h.tailscale.com\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:       \"same dataset with another base domain\",\n\t\t\tbaseDomain: \"another.example.com\",\n\t\t\tderpMap: &tailcfg.DERPMap{\n\t\t\t\tRegions: map[int]*tailcfg.DERPRegion{\n\t\t\t\t\t4: {\n\t\t\t\t\t\tRegionID:   4,\n\t\t\t\t\t\tRegionCode: \"fra\",\n\t\t\t\t\t\tRegionName: \"Frankfurt\",\n\t\t\t\t\t\tNodes: []*tailcfg.DERPNode{\n\t\t\t\t\t\t\t{Name: \"4f\", RegionID: 4, HostName: \"derp4f.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"4g\", RegionID: 4, HostName: \"derp4g.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"4h\", RegionID: 4, HostName: \"derp4h.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"4i\", RegionID: 4, HostName: \"derp4i.tailscale.com\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: &tailcfg.DERPMap{\n\t\t\t\tRegions: map[int]*tailcfg.DERPRegion{\n\t\t\t\t\t4: {\n\t\t\t\t\t\tRegionID:   4,\n\t\t\t\t\t\tRegionCode: \"fra\",\n\t\t\t\t\t\tRegionName: \"Frankfurt\",\n\t\t\t\t\t\tNodes: []*tailcfg.DERPNode{\n\t\t\t\t\t\t\t{Name: \"4h\", RegionID: 4, HostName: \"derp4h.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"4f\", RegionID: 4, HostName: \"derp4f.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"4g\", RegionID: 4, HostName: \"derp4g.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"4i\", RegionID: 4, HostName: \"derp4i.tailscale.com\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:       \"same dataset with yet another base domain\",\n\t\t\tbaseDomain: \"yetanother.example.com\",\n\t\t\tderpMap: &tailcfg.DERPMap{\n\t\t\t\tRegions: map[int]*tailcfg.DERPRegion{\n\t\t\t\t\t4: {\n\t\t\t\t\t\tRegionID:   4,\n\t\t\t\t\t\tRegionCode: \"fra\",\n\t\t\t\t\t\tRegionName: \"Frankfurt\",\n\t\t\t\t\t\tNodes: []*tailcfg.DERPNode{\n\t\t\t\t\t\t\t{Name: \"4f\", RegionID: 4, HostName: \"derp4f.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"4g\", RegionID: 4, HostName: \"derp4g.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"4h\", RegionID: 4, HostName: \"derp4h.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"4i\", RegionID: 4, HostName: \"derp4i.tailscale.com\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: &tailcfg.DERPMap{\n\t\t\t\tRegions: map[int]*tailcfg.DERPRegion{\n\t\t\t\t\t4: {\n\t\t\t\t\t\tRegionID:   4,\n\t\t\t\t\t\tRegionCode: \"fra\",\n\t\t\t\t\t\tRegionName: \"Frankfurt\",\n\t\t\t\t\t\tNodes: []*tailcfg.DERPNode{\n\t\t\t\t\t\t\t{Name: \"4i\", RegionID: 4, HostName: \"derp4i.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"4h\", RegionID: 4, HostName: \"derp4h.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"4f\", RegionID: 4, HostName: \"derp4f.tailscale.com\"},\n\t\t\t\t\t\t\t{Name: \"4g\", RegionID: 4, HostName: \"derp4g.tailscale.com\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tviper.Set(\"dns.base_domain\", tt.baseDomain)\n\n\t\t\tdefer viper.Reset()\n\n\t\t\tresetDerpRandomForTesting()\n\n\t\t\ttestMap := tt.derpMap.View().AsStruct()\n\t\t\tshuffleDERPMap(testMap)\n\n\t\t\tif diff := cmp.Diff(tt.expected, testMap); diff != \"\" {\n\t\t\t\tt.Errorf(\"Shuffled DERP map doesn't match expected (-expected +actual):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestShuffleDERPMapEdgeCases(t *testing.T) {\n\ttests := []struct {\n\t\tname    string\n\t\tderpMap *tailcfg.DERPMap\n\t}{\n\t\t{\n\t\t\tname:    \"nil derp map\",\n\t\t\tderpMap: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"empty derp map\",\n\t\t\tderpMap: &tailcfg.DERPMap{\n\t\t\t\tRegions: map[int]*tailcfg.DERPRegion{},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"region with no nodes\",\n\t\t\tderpMap: &tailcfg.DERPMap{\n\t\t\t\tRegions: map[int]*tailcfg.DERPRegion{\n\t\t\t\t\t1: {\n\t\t\t\t\t\tRegionID:   1,\n\t\t\t\t\t\tRegionCode: \"empty\",\n\t\t\t\t\t\tRegionName: \"Empty Region\",\n\t\t\t\t\t\tNodes:      []*tailcfg.DERPNode{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"region with single node\",\n\t\t\tderpMap: &tailcfg.DERPMap{\n\t\t\t\tRegions: map[int]*tailcfg.DERPRegion{\n\t\t\t\t\t1: {\n\t\t\t\t\t\tRegionID:   1,\n\t\t\t\t\t\tRegionCode: \"single\",\n\t\t\t\t\t\tRegionName: \"Single Node Region\",\n\t\t\t\t\t\tNodes: []*tailcfg.DERPNode{\n\t\t\t\t\t\t\t{Name: \"1a\", RegionID: 1, HostName: \"derp1a.tailscale.com\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tshuffleDERPMap(tt.derpMap)\n\t\t})\n\t}\n}\n\nfunc TestShuffleDERPMapWithoutBaseDomain(t *testing.T) {\n\tviper.Reset()\n\tresetDerpRandomForTesting()\n\n\tderpMap := &tailcfg.DERPMap{\n\t\tRegions: map[int]*tailcfg.DERPRegion{\n\t\t\t1: {\n\t\t\t\tRegionID:   1,\n\t\t\t\tRegionCode: \"test\",\n\t\t\t\tRegionName: \"Test Region\",\n\t\t\t\tNodes: []*tailcfg.DERPNode{\n\t\t\t\t\t{Name: \"1a\", RegionID: 1, HostName: \"derp1a.test.com\"},\n\t\t\t\t\t{Name: \"1b\", RegionID: 1, HostName: \"derp1b.test.com\"},\n\t\t\t\t\t{Name: \"1c\", RegionID: 1, HostName: \"derp1c.test.com\"},\n\t\t\t\t\t{Name: \"1d\", RegionID: 1, HostName: \"derp1d.test.com\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\toriginal := derpMap.View().AsStruct()\n\tshuffleDERPMap(derpMap)\n\n\tif len(derpMap.Regions) != 1 || len(derpMap.Regions[1].Nodes) != 4 {\n\t\tt.Error(\"Shuffle corrupted DERP map structure\")\n\t}\n\n\toriginalNodes := make(map[string]bool)\n\tfor _, node := range original.Regions[1].Nodes {\n\t\toriginalNodes[node.Name] = true\n\t}\n\n\tshuffledNodes := make(map[string]bool)\n\tfor _, node := range derpMap.Regions[1].Nodes {\n\t\tshuffledNodes[node.Name] = true\n\t}\n\n\tif diff := cmp.Diff(originalNodes, shuffledNodes); diff != \"\" {\n\t\tt.Errorf(\"Shuffle changed node set (-original +shuffled):\\n%s\", diff)\n\t}\n}\n"
  },
  {
    "path": "hscontrol/derp/server/derp_server.go",
    "content": "package server\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/netip\"\n\t\"net/url\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/coder/websocket\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/rs/zerolog/log\"\n\t\"tailscale.com/derp\"\n\t\"tailscale.com/derp/derpserver\"\n\t\"tailscale.com/envknob\"\n\t\"tailscale.com/net/stun\"\n\t\"tailscale.com/net/wsconn\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/key\"\n)\n\n// fastStartHeader is the header (with value \"1\") that signals to the HTTP\n// server that the DERP HTTP client does not want the HTTP 101 response\n// headers and it will begin writing & reading the DERP protocol immediately\n// following its HTTP request.\nconst (\n\tfastStartHeader  = \"Derp-Fast-Start\"\n\tDerpVerifyScheme = \"headscale-derp-verify\"\n)\n\n// debugUseDERPIP is a debug-only flag that causes the DERP server to resolve\n// hostnames to IP addresses when generating the DERP region configuration.\n// This is useful for integration testing where DNS resolution may be unreliable.\nvar debugUseDERPIP = envknob.Bool(\"HEADSCALE_DEBUG_DERP_USE_IP\")\n\ntype DERPServer struct {\n\tserverURL     string\n\tkey           key.NodePrivate\n\tcfg           *types.DERPConfig\n\ttailscaleDERP *derpserver.Server\n}\n\nfunc NewDERPServer(\n\tserverURL string,\n\tderpKey key.NodePrivate,\n\tcfg *types.DERPConfig,\n) (*DERPServer, error) {\n\tlog.Trace().Caller().Msg(\"creating new embedded DERP server\")\n\tserver := derpserver.New(derpKey, util.TSLogfWrapper()) // nolint // zerolinter complains\n\n\tif cfg.ServerVerifyClients {\n\t\tserver.SetVerifyClientURL(DerpVerifyScheme + \"://verify\")\n\t\tserver.SetVerifyClientURLFailOpen(false)\n\t}\n\n\treturn &DERPServer{\n\t\tserverURL:     serverURL,\n\t\tkey:           derpKey,\n\t\tcfg:           cfg,\n\t\ttailscaleDERP: server,\n\t}, nil\n}\n\nfunc (d *DERPServer) GenerateRegion() (tailcfg.DERPRegion, error) {\n\tserverURL, err := url.Parse(d.serverURL)\n\tif err != nil {\n\t\treturn tailcfg.DERPRegion{}, err\n\t}\n\n\tvar (\n\t\thost    string\n\t\tport    int\n\t\tportStr string\n\t)\n\n\t// Extract hostname and port from URL\n\thost, portStr, err = net.SplitHostPort(serverURL.Host)\n\tif err != nil {\n\t\tif serverURL.Scheme == \"https\" {\n\t\t\thost = serverURL.Host\n\t\t\tport = 443\n\t\t} else {\n\t\t\thost = serverURL.Host\n\t\t\tport = 80\n\t\t}\n\t} else {\n\t\tport, err = strconv.Atoi(portStr)\n\t\tif err != nil {\n\t\t\treturn tailcfg.DERPRegion{}, err\n\t\t}\n\t}\n\n\t// If debug flag is set, resolve hostname to IP address\n\tif debugUseDERPIP {\n\t\tips, err := new(net.Resolver).LookupIPAddr(context.Background(), host)\n\t\tif err != nil {\n\t\t\tlog.Error().Caller().Err(err).Msgf(\"failed to resolve DERP hostname %s to IP, using hostname\", host)\n\t\t} else if len(ips) > 0 {\n\t\t\t// Use the first IP address\n\t\t\tipStr := ips[0].IP.String()\n\t\t\tlog.Info().Caller().Msgf(\"HEADSCALE_DEBUG_DERP_USE_IP: resolved %s to %s\", host, ipStr)\n\t\t\thost = ipStr\n\t\t}\n\t}\n\n\tlocalDERPregion := tailcfg.DERPRegion{\n\t\tRegionID:   d.cfg.ServerRegionID,\n\t\tRegionCode: d.cfg.ServerRegionCode,\n\t\tRegionName: d.cfg.ServerRegionName,\n\t\tAvoid:      false,\n\t\tNodes: []*tailcfg.DERPNode{\n\t\t\t{\n\t\t\t\tName:     strconv.Itoa(d.cfg.ServerRegionID),\n\t\t\t\tRegionID: d.cfg.ServerRegionID,\n\t\t\t\tHostName: host,\n\t\t\t\tDERPPort: port,\n\t\t\t\tIPv4:     d.cfg.IPv4,\n\t\t\t\tIPv6:     d.cfg.IPv6,\n\t\t\t},\n\t\t},\n\t}\n\n\t_, portSTUNStr, err := net.SplitHostPort(d.cfg.STUNAddr)\n\tif err != nil {\n\t\treturn tailcfg.DERPRegion{}, err\n\t}\n\n\tportSTUN, err := strconv.Atoi(portSTUNStr)\n\tif err != nil {\n\t\treturn tailcfg.DERPRegion{}, err\n\t}\n\n\tlocalDERPregion.Nodes[0].STUNPort = portSTUN\n\n\tlog.Info().Caller().Msgf(\"derp region: %+v\", localDERPregion)\n\tlog.Info().Caller().Msgf(\"derp nodes[0]: %+v\", localDERPregion.Nodes[0])\n\n\treturn localDERPregion, nil\n}\n\nfunc (d *DERPServer) DERPHandler(\n\twriter http.ResponseWriter,\n\treq *http.Request,\n) {\n\tlog.Trace().Caller().Msgf(\"/derp request from %v\", req.RemoteAddr)\n\tupgrade := strings.ToLower(req.Header.Get(\"Upgrade\"))\n\n\tif upgrade != \"websocket\" && upgrade != \"derp\" {\n\t\tif upgrade != \"\" {\n\t\t\tlog.Warn().\n\t\t\t\tCaller().\n\t\t\t\tMsg(\"No Upgrade header in DERP server request. If headscale is behind a reverse proxy, make sure it is configured to pass WebSockets through.\")\n\t\t}\n\n\t\twriter.Header().Set(\"Content-Type\", \"text/plain\")\n\t\twriter.WriteHeader(http.StatusUpgradeRequired)\n\n\t\t_, err := writer.Write([]byte(\"DERP requires connection upgrade\"))\n\t\tif err != nil {\n\t\t\tlog.Error().\n\t\t\t\tCaller().\n\t\t\t\tErr(err).\n\t\t\t\tMsg(\"Failed to write HTTP response\")\n\t\t}\n\n\t\treturn\n\t}\n\n\tif strings.Contains(req.Header.Get(\"Sec-Websocket-Protocol\"), \"derp\") {\n\t\td.serveWebsocket(writer, req)\n\t} else {\n\t\td.servePlain(writer, req)\n\t}\n}\n\nfunc (d *DERPServer) serveWebsocket(writer http.ResponseWriter, req *http.Request) {\n\twebsocketConn, err := websocket.Accept(writer, req, &websocket.AcceptOptions{\n\t\tSubprotocols:   []string{\"derp\"},\n\t\tOriginPatterns: []string{\"*\"},\n\t\t// Disable compression because DERP transmits WireGuard messages that\n\t\t// are not compressible.\n\t\t// Additionally, Safari has a broken implementation of compression\n\t\t// (see https://github.com/nhooyr/websocket/issues/218) that makes\n\t\t// enabling it actively harmful.\n\t\tCompressionMode: websocket.CompressionDisabled,\n\t})\n\tif err != nil {\n\t\tlog.Error().\n\t\t\tCaller().\n\t\t\tErr(err).\n\t\t\tMsg(\"Failed to upgrade websocket request\")\n\n\t\twriter.Header().Set(\"Content-Type\", \"text/plain\")\n\t\twriter.WriteHeader(http.StatusInternalServerError)\n\n\t\t_, err = writer.Write([]byte(\"Failed to upgrade websocket request\"))\n\t\tif err != nil {\n\t\t\tlog.Error().\n\t\t\t\tCaller().\n\t\t\t\tErr(err).\n\t\t\t\tMsg(\"Failed to write HTTP response\")\n\t\t}\n\n\t\treturn\n\t}\n\tdefer websocketConn.Close(websocket.StatusInternalError, \"closing\")\n\n\tif websocketConn.Subprotocol() != \"derp\" {\n\t\twebsocketConn.Close(websocket.StatusPolicyViolation, \"client must speak the derp subprotocol\")\n\n\t\treturn\n\t}\n\n\twc := wsconn.NetConn(req.Context(), websocketConn, websocket.MessageBinary, req.RemoteAddr)\n\tbrw := bufio.NewReadWriter(bufio.NewReader(wc), bufio.NewWriter(wc))\n\td.tailscaleDERP.Accept(req.Context(), wc, brw, req.RemoteAddr)\n}\n\nfunc (d *DERPServer) servePlain(writer http.ResponseWriter, req *http.Request) {\n\tfastStart := req.Header.Get(fastStartHeader) == \"1\"\n\n\thijacker, ok := writer.(http.Hijacker)\n\tif !ok {\n\t\tlog.Error().Caller().Msg(\"derp requires Hijacker interface from Gin\")\n\t\twriter.Header().Set(\"Content-Type\", \"text/plain\")\n\t\twriter.WriteHeader(http.StatusInternalServerError)\n\n\t\t_, err := writer.Write([]byte(\"HTTP does not support general TCP support\"))\n\t\tif err != nil {\n\t\t\tlog.Error().\n\t\t\t\tCaller().\n\t\t\t\tErr(err).\n\t\t\t\tMsg(\"Failed to write HTTP response\")\n\t\t}\n\n\t\treturn\n\t}\n\n\tnetConn, conn, err := hijacker.Hijack()\n\tif err != nil {\n\t\tlog.Error().Caller().Err(err).Msgf(\"hijack failed\")\n\t\twriter.Header().Set(\"Content-Type\", \"text/plain\")\n\t\twriter.WriteHeader(http.StatusInternalServerError)\n\n\t\t_, err = writer.Write([]byte(\"HTTP does not support general TCP support\"))\n\t\tif err != nil {\n\t\t\tlog.Error().\n\t\t\t\tCaller().\n\t\t\t\tErr(err).\n\t\t\t\tMsg(\"Failed to write HTTP response\")\n\t\t}\n\n\t\treturn\n\t}\n\n\tlog.Trace().Caller().Msgf(\"hijacked connection from %v\", req.RemoteAddr)\n\n\tif !fastStart {\n\t\tpubKey := d.key.Public()\n\t\tpubKeyStr, _ := pubKey.MarshalText() //nolint\n\t\tfmt.Fprintf(conn, \"HTTP/1.1 101 Switching Protocols\\r\\n\"+\n\t\t\t\"Upgrade: DERP\\r\\n\"+\n\t\t\t\"Connection: Upgrade\\r\\n\"+\n\t\t\t\"Derp-Version: %v\\r\\n\"+\n\t\t\t\"Derp-Public-Key: %s\\r\\n\\r\\n\",\n\t\t\tderp.ProtocolVersion,\n\t\t\tstring(pubKeyStr))\n\t}\n\n\td.tailscaleDERP.Accept(req.Context(), netConn, conn, netConn.RemoteAddr().String())\n}\n\n// DERPProbeHandler is the endpoint that js/wasm clients hit to measure\n// DERP latency, since they can't do UDP STUN queries.\nfunc DERPProbeHandler(\n\twriter http.ResponseWriter,\n\treq *http.Request,\n) {\n\tswitch req.Method {\n\tcase http.MethodHead, http.MethodGet:\n\t\twriter.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\t\twriter.WriteHeader(http.StatusOK)\n\tdefault:\n\t\twriter.WriteHeader(http.StatusMethodNotAllowed)\n\n\t\t_, err := writer.Write([]byte(\"bogus probe method\"))\n\t\tif err != nil {\n\t\t\tlog.Error().\n\t\t\t\tCaller().\n\t\t\t\tErr(err).\n\t\t\t\tMsg(\"Failed to write HTTP response\")\n\t\t}\n\t}\n}\n\n// DERPBootstrapDNSHandler implements the /bootstrap-dns endpoint\n// Described in https://github.com/tailscale/tailscale/issues/1405,\n// this endpoint provides a way to help a client when it fails to start up\n// because its DNS are broken.\n// The initial implementation is here https://github.com/tailscale/tailscale/pull/1406\n// They have a cache, but not clear if that is really necessary at Headscale, uh, scale.\n// An example implementation is found here https://derp.tailscale.com/bootstrap-dns\n// Coordination server is included automatically, since local DERP is using the same DNS Name in d.serverURL.\nfunc DERPBootstrapDNSHandler(\n\tderpMap tailcfg.DERPMapView,\n) func(http.ResponseWriter, *http.Request) {\n\treturn func(\n\t\twriter http.ResponseWriter,\n\t\treq *http.Request,\n\t) {\n\t\tdnsEntries := make(map[string][]net.IP)\n\n\t\tresolvCtx, cancel := context.WithTimeout(req.Context(), time.Minute)\n\t\tdefer cancel()\n\n\t\tvar resolver net.Resolver\n\n\t\tfor _, region := range derpMap.Regions().All() { //nolint:unqueryvet // not SQLBoiler, tailcfg iterator\n\t\t\tfor _, node := range region.Nodes().All() { //nolint:unqueryvet // not SQLBoiler, tailcfg iterator\n\t\t\t\taddrs, err := resolver.LookupIP(resolvCtx, \"ip\", node.HostName())\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Trace().\n\t\t\t\t\t\tCaller().\n\t\t\t\t\t\tErr(err).\n\t\t\t\t\t\tMsgf(\"bootstrap DNS lookup failed %q\", node.HostName())\n\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tdnsEntries[node.HostName()] = addrs\n\t\t\t}\n\t\t}\n\n\t\twriter.Header().Set(\"Content-Type\", \"application/json\")\n\t\twriter.WriteHeader(http.StatusOK)\n\n\t\terr := json.NewEncoder(writer).Encode(dnsEntries)\n\t\tif err != nil {\n\t\t\tlog.Error().\n\t\t\t\tCaller().\n\t\t\t\tErr(err).\n\t\t\t\tMsg(\"Failed to write HTTP response\")\n\t\t}\n\t}\n}\n\n// ServeSTUN starts a STUN server on the configured addr.\nfunc (d *DERPServer) ServeSTUN() {\n\tpacketConn, err := new(net.ListenConfig).ListenPacket(context.Background(), \"udp\", d.cfg.STUNAddr)\n\tif err != nil {\n\t\tlog.Fatal().Msgf(\"failed to open STUN listener: %v\", err)\n\t}\n\n\tlog.Info().Msgf(\"stun server started at %s\", packetConn.LocalAddr())\n\n\tudpConn, ok := packetConn.(*net.UDPConn)\n\tif !ok {\n\t\tlog.Fatal().Msg(\"stun listener is not a UDP listener\")\n\t}\n\n\tserverSTUNListener(context.Background(), udpConn)\n}\n\nfunc serverSTUNListener(ctx context.Context, packetConn *net.UDPConn) {\n\tvar (\n\t\tbuf       [64 << 10]byte\n\t\tbytesRead int\n\t\tudpAddr   *net.UDPAddr\n\t\terr       error\n\t)\n\n\tfor {\n\t\tbytesRead, udpAddr, err = packetConn.ReadFromUDP(buf[:])\n\t\tif err != nil {\n\t\t\tif ctx.Err() != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlog.Error().Caller().Err(err).Msgf(\"stun ReadFrom\")\n\n\t\t\t// Rate limit error logging - wait before retrying, but respect context cancellation\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase <-time.After(time.Second):\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Trace().Caller().Msgf(\"stun request from %v\", udpAddr)\n\n\t\tpkt := buf[:bytesRead]\n\t\tif !stun.Is(pkt) {\n\t\t\tlog.Trace().Caller().Msgf(\"udp packet is not stun\")\n\n\t\t\tcontinue\n\t\t}\n\n\t\ttxid, err := stun.ParseBindingRequest(pkt)\n\t\tif err != nil {\n\t\t\tlog.Trace().Caller().Err(err).Msgf(\"stun parse error\")\n\n\t\t\tcontinue\n\t\t}\n\n\t\taddr, _ := netip.AddrFromSlice(udpAddr.IP)\n\t\tres := stun.Response(txid, netip.AddrPortFrom(addr, uint16(udpAddr.Port))) //nolint:gosec // port is always <=65535\n\n\t\t_, err = packetConn.WriteTo(res, udpAddr)\n\t\tif err != nil {\n\t\t\tlog.Trace().Caller().Err(err).Msgf(\"issue writing to UDP\")\n\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc NewDERPVerifyTransport(handleVerifyRequest func(*http.Request, io.Writer) error) *DERPVerifyTransport {\n\treturn &DERPVerifyTransport{\n\t\thandleVerifyRequest: handleVerifyRequest,\n\t}\n}\n\ntype DERPVerifyTransport struct {\n\thandleVerifyRequest func(*http.Request, io.Writer) error\n}\n\nfunc (t *DERPVerifyTransport) RoundTrip(req *http.Request) (*http.Response, error) {\n\tbuf := new(bytes.Buffer)\n\n\terr := t.handleVerifyRequest(req, buf)\n\tif err != nil {\n\t\tlog.Error().Caller().Err(err).Msg(\"failed to handle client verify request\")\n\n\t\treturn nil, err\n\t}\n\n\tresp := &http.Response{\n\t\tStatusCode: http.StatusOK,\n\t\tBody:       io.NopCloser(buf),\n\t}\n\n\treturn resp, nil\n}\n"
  },
  {
    "path": "hscontrol/dns/extrarecords.go",
    "content": "package dns\n\nimport (\n\t\"context\"\n\t\"crypto/sha256\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\n\t\"github.com/cenkalti/backoff/v5\"\n\t\"github.com/fsnotify/fsnotify\"\n\t\"github.com/rs/zerolog/log\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/util/set\"\n)\n\n// ErrPathIsDirectory is returned when a directory path is provided where a file is expected.\nvar ErrPathIsDirectory = errors.New(\"path is a directory, only file is supported\")\n\ntype ExtraRecordsMan struct {\n\tmu      sync.RWMutex\n\trecords set.Set[tailcfg.DNSRecord]\n\twatcher *fsnotify.Watcher\n\tpath    string\n\n\tupdateCh chan []tailcfg.DNSRecord\n\tcloseCh  chan struct{}\n\thashes   map[string][32]byte\n}\n\n// NewExtraRecordsManager creates a new ExtraRecordsMan and starts watching the file at the given path.\nfunc NewExtraRecordsManager(path string) (*ExtraRecordsMan, error) {\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating watcher: %w\", err)\n\t}\n\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"getting file info: %w\", err)\n\t}\n\n\tif fi.IsDir() {\n\t\treturn nil, fmt.Errorf(\"%w: %s\", ErrPathIsDirectory, path)\n\t}\n\n\trecords, hash, err := readExtraRecordsFromPath(path)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"reading extra records from path: %w\", err)\n\t}\n\n\ter := &ExtraRecordsMan{\n\t\twatcher: watcher,\n\t\tpath:    path,\n\t\trecords: set.SetOf(records),\n\t\thashes: map[string][32]byte{\n\t\t\tpath: hash,\n\t\t},\n\t\tcloseCh:  make(chan struct{}),\n\t\tupdateCh: make(chan []tailcfg.DNSRecord),\n\t}\n\n\terr = watcher.Add(path)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"adding path to watcher: %w\", err)\n\t}\n\n\tlog.Trace().Caller().Strs(\"watching\", watcher.WatchList()).Msg(\"started filewatcher\")\n\n\treturn er, nil\n}\n\nfunc (e *ExtraRecordsMan) Records() []tailcfg.DNSRecord {\n\te.mu.RLock()\n\tdefer e.mu.RUnlock()\n\n\treturn e.records.Slice()\n}\n\nfunc (e *ExtraRecordsMan) Run() {\n\tfor {\n\t\tselect {\n\t\tcase <-e.closeCh:\n\t\t\treturn\n\t\tcase event, ok := <-e.watcher.Events:\n\t\t\tif !ok {\n\t\t\t\tlog.Error().Caller().Msgf(\"file watcher event channel closing\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tswitch event.Op {\n\t\t\tcase fsnotify.Create, fsnotify.Write, fsnotify.Chmod:\n\t\t\t\tlog.Trace().Caller().Str(\"path\", event.Name).Str(\"op\", event.Op.String()).Msg(\"extra records received filewatch event\")\n\n\t\t\t\tif event.Name != e.path {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\te.updateRecords()\n\n\t\t\t\t// If a file is removed or renamed, fsnotify will lose track of it\n\t\t\t\t// and not watch it. We will therefore attempt to re-add it with a backoff.\n\t\t\tcase fsnotify.Remove, fsnotify.Rename:\n\t\t\t\t_, err := backoff.Retry(context.Background(), func() (struct{}, error) {\n\t\t\t\t\tif _, err := os.Stat(e.path); err != nil { //nolint:noinlineerr\n\t\t\t\t\t\treturn struct{}{}, err\n\t\t\t\t\t}\n\n\t\t\t\t\treturn struct{}{}, nil\n\t\t\t\t}, backoff.WithBackOff(backoff.NewExponentialBackOff()))\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error().Caller().Err(err).Msgf(\"extra records filewatcher retrying to find file after delete\")\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\terr = e.watcher.Add(e.path)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error().Caller().Err(err).Msgf(\"extra records filewatcher re-adding file after delete failed, giving up.\")\n\t\t\t\t\treturn\n\t\t\t\t} else {\n\t\t\t\t\tlog.Trace().Caller().Str(\"path\", e.path).Msg(\"extra records file re-added after delete\")\n\t\t\t\t\te.updateRecords()\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase err, ok := <-e.watcher.Errors:\n\t\t\tif !ok {\n\t\t\t\tlog.Error().Caller().Msgf(\"file watcher error channel closing\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlog.Error().Caller().Err(err).Msgf(\"extra records filewatcher returned error: %q\", err)\n\t\t}\n\t}\n}\n\nfunc (e *ExtraRecordsMan) Close() {\n\te.watcher.Close()\n\tclose(e.closeCh)\n}\n\nfunc (e *ExtraRecordsMan) UpdateCh() <-chan []tailcfg.DNSRecord {\n\treturn e.updateCh\n}\n\nfunc (e *ExtraRecordsMan) updateRecords() {\n\trecords, newHash, err := readExtraRecordsFromPath(e.path)\n\tif err != nil {\n\t\tlog.Error().Caller().Err(err).Msgf(\"reading extra records from path: %s\", e.path)\n\t\treturn\n\t}\n\n\t// If there are no records, ignore the update.\n\tif records == nil {\n\t\treturn\n\t}\n\n\te.mu.Lock()\n\tdefer e.mu.Unlock()\n\n\t// If there has not been any change, ignore the update.\n\tif oldHash, ok := e.hashes[e.path]; ok {\n\t\tif newHash == oldHash {\n\t\t\treturn\n\t\t}\n\t}\n\n\toldCount := e.records.Len()\n\n\te.records = set.SetOf(records)\n\te.hashes[e.path] = newHash\n\n\tlog.Trace().Caller().Interface(\"records\", e.records).Msgf(\"extra records updated from path, count old: %d, new: %d\", oldCount, e.records.Len())\n\n\te.updateCh <- e.records.Slice()\n}\n\n// readExtraRecordsFromPath reads a JSON file of tailcfg.DNSRecord\n// and returns the records and the hash of the file.\nfunc readExtraRecordsFromPath(path string) ([]tailcfg.DNSRecord, [32]byte, error) {\n\tb, err := os.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, [32]byte{}, fmt.Errorf(\"reading path: %s, err: %w\", path, err)\n\t}\n\n\t// If the read was triggered too fast, and the file is not complete, ignore the update\n\t// if the file is empty. A consecutive update will be triggered when the file is complete.\n\tif len(b) == 0 {\n\t\treturn nil, [32]byte{}, nil\n\t}\n\n\tvar records []tailcfg.DNSRecord\n\n\terr = json.Unmarshal(b, &records)\n\tif err != nil {\n\t\treturn nil, [32]byte{}, fmt.Errorf(\"unmarshalling records, content: %q: %w\", string(b), err)\n\t}\n\n\thash := sha256.Sum256(b)\n\n\treturn records, hash, nil\n}\n"
  },
  {
    "path": "hscontrol/grpcv1.go",
    "content": "//go:generate buf generate --template ../buf.gen.yaml -o .. ../proto\n\n// nolint\npackage hscontrol\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/netip\"\n\t\"os\"\n\t\"slices\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/rs/zerolog/log\"\n\t\"google.golang.org/grpc/codes\"\n\t\"google.golang.org/grpc/status\"\n\t\"google.golang.org/protobuf/types/known/timestamppb\"\n\t\"gorm.io/gorm\"\n\t\"tailscale.com/net/tsaddr\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/key\"\n\t\"tailscale.com/types/views\"\n\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\t\"github.com/juanfont/headscale/hscontrol/state\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/juanfont/headscale/hscontrol/util/zlog/zf\"\n)\n\ntype headscaleV1APIServer struct { // v1.HeadscaleServiceServer\n\tv1.UnimplementedHeadscaleServiceServer\n\th *Headscale\n}\n\nfunc newHeadscaleV1APIServer(h *Headscale) v1.HeadscaleServiceServer {\n\treturn headscaleV1APIServer{\n\t\th: h,\n\t}\n}\n\nfunc (api headscaleV1APIServer) CreateUser(\n\tctx context.Context,\n\trequest *v1.CreateUserRequest,\n) (*v1.CreateUserResponse, error) {\n\tnewUser := types.User{\n\t\tName:          request.GetName(),\n\t\tDisplayName:   request.GetDisplayName(),\n\t\tEmail:         request.GetEmail(),\n\t\tProfilePicURL: request.GetPictureUrl(),\n\t}\n\tuser, policyChanged, err := api.h.state.CreateUser(newUser)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"creating user: %s\", err)\n\t}\n\n\t// CreateUser returns a policy change response if the user creation affected policy.\n\t// This triggers a full policy re-evaluation for all connected nodes.\n\tapi.h.Change(policyChanged)\n\n\treturn &v1.CreateUserResponse{User: user.Proto()}, nil\n}\n\nfunc (api headscaleV1APIServer) RenameUser(\n\tctx context.Context,\n\trequest *v1.RenameUserRequest,\n) (*v1.RenameUserResponse, error) {\n\toldUser, err := api.h.state.GetUserByID(types.UserID(request.GetOldId()))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, c, err := api.h.state.RenameUser(types.UserID(oldUser.ID), request.GetNewName())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Send policy update notifications if needed\n\tapi.h.Change(c)\n\n\tnewUser, err := api.h.state.GetUserByName(request.GetNewName())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &v1.RenameUserResponse{User: newUser.Proto()}, nil\n}\n\nfunc (api headscaleV1APIServer) DeleteUser(\n\tctx context.Context,\n\trequest *v1.DeleteUserRequest,\n) (*v1.DeleteUserResponse, error) {\n\tuser, err := api.h.state.GetUserByID(types.UserID(request.GetId()))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpolicyChanged, err := api.h.state.DeleteUser(types.UserID(user.ID))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Use the change returned from DeleteUser which includes proper policy updates\n\tapi.h.Change(policyChanged)\n\n\treturn &v1.DeleteUserResponse{}, nil\n}\n\nfunc (api headscaleV1APIServer) ListUsers(\n\tctx context.Context,\n\trequest *v1.ListUsersRequest,\n) (*v1.ListUsersResponse, error) {\n\tvar err error\n\tvar users []types.User\n\n\tswitch {\n\tcase request.GetName() != \"\":\n\t\tusers, err = api.h.state.ListUsersWithFilter(&types.User{Name: request.GetName()})\n\tcase request.GetEmail() != \"\":\n\t\tusers, err = api.h.state.ListUsersWithFilter(&types.User{Email: request.GetEmail()})\n\tcase request.GetId() != 0:\n\t\tusers, err = api.h.state.ListUsersWithFilter(&types.User{Model: gorm.Model{ID: uint(request.GetId())}})\n\tdefault:\n\t\tusers, err = api.h.state.ListAllUsers()\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := make([]*v1.User, len(users))\n\tfor index, user := range users {\n\t\tresponse[index] = user.Proto()\n\t}\n\n\tsort.Slice(response, func(i, j int) bool {\n\t\treturn response[i].Id < response[j].Id\n\t})\n\n\treturn &v1.ListUsersResponse{Users: response}, nil\n}\n\nfunc (api headscaleV1APIServer) CreatePreAuthKey(\n\tctx context.Context,\n\trequest *v1.CreatePreAuthKeyRequest,\n) (*v1.CreatePreAuthKeyResponse, error) {\n\tvar expiration time.Time\n\tif request.GetExpiration() != nil {\n\t\texpiration = request.GetExpiration().AsTime()\n\t}\n\n\tfor _, tag := range request.AclTags {\n\t\terr := validateTag(tag)\n\t\tif err != nil {\n\t\t\treturn &v1.CreatePreAuthKeyResponse{\n\t\t\t\tPreAuthKey: nil,\n\t\t\t}, status.Error(codes.InvalidArgument, err.Error())\n\t\t}\n\t}\n\n\tvar userID *types.UserID\n\tif request.GetUser() != 0 {\n\t\tuser, err := api.h.state.GetUserByID(types.UserID(request.GetUser()))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tuserID = user.TypedID()\n\t}\n\n\tpreAuthKey, err := api.h.state.CreatePreAuthKey(\n\t\tuserID,\n\t\trequest.GetReusable(),\n\t\trequest.GetEphemeral(),\n\t\t&expiration,\n\t\trequest.AclTags,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &v1.CreatePreAuthKeyResponse{PreAuthKey: preAuthKey.Proto()}, nil\n}\n\nfunc (api headscaleV1APIServer) ExpirePreAuthKey(\n\tctx context.Context,\n\trequest *v1.ExpirePreAuthKeyRequest,\n) (*v1.ExpirePreAuthKeyResponse, error) {\n\terr := api.h.state.ExpirePreAuthKey(request.GetId())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &v1.ExpirePreAuthKeyResponse{}, nil\n}\n\nfunc (api headscaleV1APIServer) DeletePreAuthKey(\n\tctx context.Context,\n\trequest *v1.DeletePreAuthKeyRequest,\n) (*v1.DeletePreAuthKeyResponse, error) {\n\terr := api.h.state.DeletePreAuthKey(request.GetId())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &v1.DeletePreAuthKeyResponse{}, nil\n}\n\nfunc (api headscaleV1APIServer) ListPreAuthKeys(\n\tctx context.Context,\n\trequest *v1.ListPreAuthKeysRequest,\n) (*v1.ListPreAuthKeysResponse, error) {\n\tpreAuthKeys, err := api.h.state.ListPreAuthKeys()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := make([]*v1.PreAuthKey, len(preAuthKeys))\n\tfor index, key := range preAuthKeys {\n\t\tresponse[index] = key.Proto()\n\t}\n\n\tsort.Slice(response, func(i, j int) bool {\n\t\treturn response[i].Id < response[j].Id\n\t})\n\n\treturn &v1.ListPreAuthKeysResponse{PreAuthKeys: response}, nil\n}\n\nfunc (api headscaleV1APIServer) RegisterNode(\n\tctx context.Context,\n\trequest *v1.RegisterNodeRequest,\n) (*v1.RegisterNodeResponse, error) {\n\t// Generate ephemeral registration key for tracking this registration flow in logs\n\tregistrationKey, err := util.GenerateRegistrationKey()\n\tif err != nil {\n\t\tlog.Warn().Err(err).Msg(\"failed to generate registration key\")\n\t\tregistrationKey = \"\" // Continue without key if generation fails\n\t}\n\n\tlog.Trace().\n\t\tCaller().\n\t\tStr(zf.UserName, request.GetUser()).\n\t\tStr(zf.RegistrationID, request.GetKey()).\n\t\tStr(zf.RegistrationKey, registrationKey).\n\t\tMsg(\"registering node\")\n\n\tregistrationId, err := types.AuthIDFromString(request.GetKey())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tuser, err := api.h.state.GetUserByName(request.GetUser())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"looking up user: %w\", err)\n\t}\n\n\tnode, nodeChange, err := api.h.state.HandleNodeFromAuthPath(\n\t\tregistrationId,\n\t\ttypes.UserID(user.ID),\n\t\tnil,\n\t\tutil.RegisterMethodCLI,\n\t)\n\tif err != nil {\n\t\tlog.Error().\n\t\t\tStr(zf.RegistrationKey, registrationKey).\n\t\t\tErr(err).\n\t\t\tMsg(\"failed to register node\")\n\t\treturn nil, err\n\t}\n\n\tlog.Info().\n\t\tStr(zf.RegistrationKey, registrationKey).\n\t\tEmbedObject(node).\n\t\tMsg(\"node registered successfully\")\n\n\t// This is a bit of a back and forth, but we have a bit of a chicken and egg\n\t// dependency here.\n\t// Because the way the policy manager works, we need to have the node\n\t// in the database, then add it to the policy manager and then we can\n\t// approve the route. This means we get this dance where the node is\n\t// first added to the database, then we add it to the policy manager via\n\t// SaveNode (which automatically updates the policy manager) and then we can auto approve the routes.\n\t// As that only approves the struct object, we need to save it again and\n\t// ensure we send an update.\n\t// This works, but might be another good candidate for doing some sort of\n\t// eventbus.\n\trouteChange, err := api.h.state.AutoApproveRoutes(node)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"auto approving routes: %w\", err)\n\t}\n\n\t// Send both changes. Empty changes are ignored by Change().\n\tapi.h.Change(nodeChange, routeChange)\n\n\treturn &v1.RegisterNodeResponse{Node: node.Proto()}, nil\n}\n\nfunc (api headscaleV1APIServer) GetNode(\n\tctx context.Context,\n\trequest *v1.GetNodeRequest,\n) (*v1.GetNodeResponse, error) {\n\tnode, ok := api.h.state.GetNodeByID(types.NodeID(request.GetNodeId()))\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.NotFound, \"node not found\")\n\t}\n\n\tresp := node.Proto()\n\n\treturn &v1.GetNodeResponse{Node: resp}, nil\n}\n\nfunc (api headscaleV1APIServer) SetTags(\n\tctx context.Context,\n\trequest *v1.SetTagsRequest,\n) (*v1.SetTagsResponse, error) {\n\t// Validate tags not empty - tagged nodes must have at least one tag\n\tif len(request.GetTags()) == 0 {\n\t\treturn &v1.SetTagsResponse{\n\t\t\t\tNode: nil,\n\t\t\t}, status.Error(\n\t\t\t\tcodes.InvalidArgument,\n\t\t\t\t\"cannot remove all tags from a node - tagged nodes must have at least one tag\",\n\t\t\t)\n\t}\n\n\t// Validate tag format\n\tfor _, tag := range request.GetTags() {\n\t\terr := validateTag(tag)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// User XOR Tags: nodes are either tagged or user-owned, never both.\n\t// Setting tags on a user-owned node converts it to a tagged node.\n\t// Once tagged, a node cannot be converted back to user-owned.\n\t_, found := api.h.state.GetNodeByID(types.NodeID(request.GetNodeId()))\n\tif !found {\n\t\treturn &v1.SetTagsResponse{\n\t\t\tNode: nil,\n\t\t}, status.Error(codes.NotFound, \"node not found\")\n\t}\n\n\tnode, nodeChange, err := api.h.state.SetNodeTags(types.NodeID(request.GetNodeId()), request.GetTags())\n\tif err != nil {\n\t\treturn &v1.SetTagsResponse{\n\t\t\tNode: nil,\n\t\t}, status.Error(codes.InvalidArgument, err.Error())\n\t}\n\n\tapi.h.Change(nodeChange)\n\n\tlog.Trace().\n\t\tCaller().\n\t\tEmbedObject(node).\n\t\tStrs(\"tags\", request.GetTags()).\n\t\tMsg(\"changing tags of node\")\n\n\treturn &v1.SetTagsResponse{Node: node.Proto()}, nil\n}\n\nfunc (api headscaleV1APIServer) SetApprovedRoutes(\n\tctx context.Context,\n\trequest *v1.SetApprovedRoutesRequest,\n) (*v1.SetApprovedRoutesResponse, error) {\n\tlog.Debug().\n\t\tCaller().\n\t\tUint64(zf.NodeID, request.GetNodeId()).\n\t\tStrs(\"requestedRoutes\", request.GetRoutes()).\n\t\tMsg(\"gRPC SetApprovedRoutes called\")\n\n\tvar newApproved []netip.Prefix\n\tfor _, route := range request.GetRoutes() {\n\t\tprefix, err := netip.ParsePrefix(route)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"parsing route: %w\", err)\n\t\t}\n\n\t\t// If the prefix is an exit route, add both. The client expect both\n\t\t// to annotate the node as an exit node.\n\t\tif prefix == tsaddr.AllIPv4() || prefix == tsaddr.AllIPv6() {\n\t\t\tnewApproved = append(newApproved, tsaddr.AllIPv4(), tsaddr.AllIPv6())\n\t\t} else {\n\t\t\tnewApproved = append(newApproved, prefix)\n\t\t}\n\t}\n\tslices.SortFunc(newApproved, netip.Prefix.Compare)\n\tnewApproved = slices.Compact(newApproved)\n\n\tnode, nodeChange, err := api.h.state.SetApprovedRoutes(types.NodeID(request.GetNodeId()), newApproved)\n\tif err != nil {\n\t\treturn nil, status.Error(codes.InvalidArgument, err.Error())\n\t}\n\n\t// Always propagate node changes from SetApprovedRoutes\n\tapi.h.Change(nodeChange)\n\n\tproto := node.Proto()\n\t// Populate SubnetRoutes with PrimaryRoutes to ensure it includes only the\n\t// routes that are actively served from the node (per architectural requirement in types/node.go)\n\tprimaryRoutes := api.h.state.GetNodePrimaryRoutes(node.ID())\n\tproto.SubnetRoutes = util.PrefixesToString(primaryRoutes)\n\n\tlog.Debug().\n\t\tCaller().\n\t\tEmbedObject(node).\n\t\tStrs(\"approvedRoutes\", util.PrefixesToString(node.ApprovedRoutes().AsSlice())).\n\t\tStrs(\"primaryRoutes\", util.PrefixesToString(primaryRoutes)).\n\t\tStrs(\"finalSubnetRoutes\", proto.SubnetRoutes).\n\t\tMsg(\"gRPC SetApprovedRoutes completed\")\n\n\treturn &v1.SetApprovedRoutesResponse{Node: proto}, nil\n}\n\nfunc validateTag(tag string) error {\n\tif strings.Index(tag, \"tag:\") != 0 {\n\t\treturn errors.New(\"tag must start with the string 'tag:'\")\n\t}\n\tif strings.ToLower(tag) != tag {\n\t\treturn errors.New(\"tag should be lowercase\")\n\t}\n\tif len(strings.Fields(tag)) > 1 {\n\t\treturn errors.New(\"tags must not contain spaces\")\n\t}\n\treturn nil\n}\n\nfunc (api headscaleV1APIServer) DeleteNode(\n\tctx context.Context,\n\trequest *v1.DeleteNodeRequest,\n) (*v1.DeleteNodeResponse, error) {\n\tnode, ok := api.h.state.GetNodeByID(types.NodeID(request.GetNodeId()))\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.NotFound, \"node not found\")\n\t}\n\n\tnodeChange, err := api.h.state.DeleteNode(node)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tapi.h.Change(nodeChange)\n\n\treturn &v1.DeleteNodeResponse{}, nil\n}\n\nfunc (api headscaleV1APIServer) ExpireNode(\n\tctx context.Context,\n\trequest *v1.ExpireNodeRequest,\n) (*v1.ExpireNodeResponse, error) {\n\tif request.GetDisableExpiry() && request.GetExpiry() != nil {\n\t\treturn nil, status.Error(\n\t\t\tcodes.InvalidArgument,\n\t\t\t\"cannot set both disable_expiry and expiry\",\n\t\t)\n\t}\n\n\t// Handle disable expiry request - node will never expire.\n\tif request.GetDisableExpiry() {\n\t\tnode, nodeChange, err := api.h.state.SetNodeExpiry(\n\t\t\ttypes.NodeID(request.GetNodeId()), nil,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tapi.h.Change(nodeChange)\n\n\t\tlog.Trace().\n\t\t\tCaller().\n\t\t\tEmbedObject(node).\n\t\t\tMsg(\"node expiry disabled\")\n\n\t\treturn &v1.ExpireNodeResponse{Node: node.Proto()}, nil\n\t}\n\n\texpiry := time.Now()\n\tif request.GetExpiry() != nil {\n\t\texpiry = request.GetExpiry().AsTime()\n\t}\n\n\tnode, nodeChange, err := api.h.state.SetNodeExpiry(\n\t\ttypes.NodeID(request.GetNodeId()), &expiry,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// TODO(kradalby): Ensure that both the selfupdate and peer updates are sent\n\tapi.h.Change(nodeChange)\n\n\tlog.Trace().\n\t\tCaller().\n\t\tEmbedObject(node).\n\t\tTime(zf.ExpiresAt, expiry).\n\t\tMsg(\"node expired\")\n\n\treturn &v1.ExpireNodeResponse{Node: node.Proto()}, nil\n}\n\nfunc (api headscaleV1APIServer) RenameNode(\n\tctx context.Context,\n\trequest *v1.RenameNodeRequest,\n) (*v1.RenameNodeResponse, error) {\n\tnode, nodeChange, err := api.h.state.RenameNode(types.NodeID(request.GetNodeId()), request.GetNewName())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// TODO(kradalby): investigate if we need selfupdate\n\tapi.h.Change(nodeChange)\n\n\tlog.Trace().\n\t\tCaller().\n\t\tEmbedObject(node).\n\t\tStr(zf.NewName, request.GetNewName()).\n\t\tMsg(\"node renamed\")\n\n\treturn &v1.RenameNodeResponse{Node: node.Proto()}, nil\n}\n\nfunc (api headscaleV1APIServer) ListNodes(\n\tctx context.Context,\n\trequest *v1.ListNodesRequest,\n) (*v1.ListNodesResponse, error) {\n\t// TODO(kradalby): it looks like this can be simplified a lot,\n\t// the filtering of nodes by user, vs nodes as a whole can\n\t// probably be done once.\n\t// TODO(kradalby): This should be done in one tx.\n\tif request.GetUser() != \"\" {\n\t\tuser, err := api.h.state.GetUserByName(request.GetUser())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tnodes := api.h.state.ListNodesByUser(types.UserID(user.ID))\n\n\t\tresponse := nodesToProto(api.h.state, nodes)\n\t\treturn &v1.ListNodesResponse{Nodes: response}, nil\n\t}\n\n\tnodes := api.h.state.ListNodes()\n\n\tresponse := nodesToProto(api.h.state, nodes)\n\treturn &v1.ListNodesResponse{Nodes: response}, nil\n}\n\nfunc nodesToProto(state *state.State, nodes views.Slice[types.NodeView]) []*v1.Node {\n\tresponse := make([]*v1.Node, nodes.Len())\n\tfor index, node := range nodes.All() {\n\t\tresp := node.Proto()\n\n\t\t// Tags-as-identity: tagged nodes show as TaggedDevices user in API responses\n\t\t// (UserID may be set internally for \"created by\" tracking)\n\t\tif node.IsTagged() {\n\t\t\tresp.User = types.TaggedDevices.Proto()\n\t\t}\n\n\t\tresp.SubnetRoutes = util.PrefixesToString(append(state.GetNodePrimaryRoutes(node.ID()), node.ExitRoutes()...))\n\t\tresponse[index] = resp\n\t}\n\n\tsort.Slice(response, func(i, j int) bool {\n\t\treturn response[i].Id < response[j].Id\n\t})\n\n\treturn response\n}\n\nfunc (api headscaleV1APIServer) BackfillNodeIPs(\n\tctx context.Context,\n\trequest *v1.BackfillNodeIPsRequest,\n) (*v1.BackfillNodeIPsResponse, error) {\n\tlog.Trace().Caller().Msg(\"backfill called\")\n\n\tif !request.Confirmed {\n\t\treturn nil, errors.New(\"not confirmed, aborting\")\n\t}\n\n\tchanges, err := api.h.state.BackfillNodeIPs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &v1.BackfillNodeIPsResponse{Changes: changes}, nil\n}\n\nfunc (api headscaleV1APIServer) CreateApiKey(\n\tctx context.Context,\n\trequest *v1.CreateApiKeyRequest,\n) (*v1.CreateApiKeyResponse, error) {\n\tvar expiration time.Time\n\tif request.GetExpiration() != nil {\n\t\texpiration = request.GetExpiration().AsTime()\n\t}\n\n\tapiKey, _, err := api.h.state.CreateAPIKey(&expiration)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &v1.CreateApiKeyResponse{ApiKey: apiKey}, nil\n}\n\n// apiKeyIdentifier is implemented by requests that identify an API key.\ntype apiKeyIdentifier interface {\n\tGetId() uint64\n\tGetPrefix() string\n}\n\n// getAPIKey retrieves an API key by ID or prefix from the request.\n// Returns InvalidArgument if neither or both are provided.\nfunc (api headscaleV1APIServer) getAPIKey(req apiKeyIdentifier) (*types.APIKey, error) {\n\thasID := req.GetId() != 0\n\thasPrefix := req.GetPrefix() != \"\"\n\n\tswitch {\n\tcase hasID && hasPrefix:\n\t\treturn nil, status.Error(codes.InvalidArgument, \"provide either id or prefix, not both\")\n\tcase hasID:\n\t\treturn api.h.state.GetAPIKeyByID(req.GetId())\n\tcase hasPrefix:\n\t\treturn api.h.state.GetAPIKey(req.GetPrefix())\n\tdefault:\n\t\treturn nil, status.Error(codes.InvalidArgument, \"must provide id or prefix\")\n\t}\n}\n\nfunc (api headscaleV1APIServer) ExpireApiKey(\n\tctx context.Context,\n\trequest *v1.ExpireApiKeyRequest,\n) (*v1.ExpireApiKeyResponse, error) {\n\tapiKey, err := api.getAPIKey(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = api.h.state.ExpireAPIKey(apiKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &v1.ExpireApiKeyResponse{}, nil\n}\n\nfunc (api headscaleV1APIServer) ListApiKeys(\n\tctx context.Context,\n\trequest *v1.ListApiKeysRequest,\n) (*v1.ListApiKeysResponse, error) {\n\tapiKeys, err := api.h.state.ListAPIKeys()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := make([]*v1.ApiKey, len(apiKeys))\n\tfor index, key := range apiKeys {\n\t\tresponse[index] = key.Proto()\n\t}\n\n\tsort.Slice(response, func(i, j int) bool {\n\t\treturn response[i].Id < response[j].Id\n\t})\n\n\treturn &v1.ListApiKeysResponse{ApiKeys: response}, nil\n}\n\nfunc (api headscaleV1APIServer) DeleteApiKey(\n\tctx context.Context,\n\trequest *v1.DeleteApiKeyRequest,\n) (*v1.DeleteApiKeyResponse, error) {\n\tapiKey, err := api.getAPIKey(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := api.h.state.DestroyAPIKey(*apiKey); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &v1.DeleteApiKeyResponse{}, nil\n}\n\nfunc (api headscaleV1APIServer) GetPolicy(\n\t_ context.Context,\n\t_ *v1.GetPolicyRequest,\n) (*v1.GetPolicyResponse, error) {\n\tswitch api.h.cfg.Policy.Mode {\n\tcase types.PolicyModeDB:\n\t\tp, err := api.h.state.GetPolicy()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"loading ACL from database: %w\", err)\n\t\t}\n\n\t\treturn &v1.GetPolicyResponse{\n\t\t\tPolicy:    p.Data,\n\t\t\tUpdatedAt: timestamppb.New(p.UpdatedAt),\n\t\t}, nil\n\tcase types.PolicyModeFile:\n\t\t// Read the file and return the contents as-is.\n\t\tabsPath := util.AbsolutePathFromConfigPath(api.h.cfg.Policy.Path)\n\t\tf, err := os.Open(absPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"reading policy from path %q: %w\", absPath, err)\n\t\t}\n\n\t\tdefer f.Close()\n\n\t\tb, err := io.ReadAll(f)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"reading policy from file: %w\", err)\n\t\t}\n\n\t\treturn &v1.GetPolicyResponse{Policy: string(b)}, nil\n\t}\n\n\treturn nil, fmt.Errorf(\"no supported policy mode found in configuration, policy.mode: %q\", api.h.cfg.Policy.Mode)\n}\n\nfunc (api headscaleV1APIServer) SetPolicy(\n\t_ context.Context,\n\trequest *v1.SetPolicyRequest,\n) (*v1.SetPolicyResponse, error) {\n\tif api.h.cfg.Policy.Mode != types.PolicyModeDB {\n\t\treturn nil, types.ErrPolicyUpdateIsDisabled\n\t}\n\n\tp := request.GetPolicy()\n\n\t// Validate and reject configuration that would error when applied\n\t// when creating a map response. This requires nodes, so there is still\n\t// a scenario where they might be allowed if the server has no nodes\n\t// yet, but it should help for the general case and for hot reloading\n\t// configurations.\n\tnodes := api.h.state.ListNodes()\n\n\t_, err := api.h.state.SetPolicy([]byte(p))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"setting policy: %w\", err)\n\t}\n\n\tif nodes.Len() > 0 {\n\t\t_, err = api.h.state.SSHPolicy(nodes.At(0))\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"verifying SSH rules: %w\", err)\n\t\t}\n\t}\n\n\tupdated, err := api.h.state.SetPolicyInDB(p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Always reload policy to ensure route re-evaluation, even if policy content hasn't changed.\n\t// This ensures that routes are re-evaluated for auto-approval in cases where routes\n\t// were manually disabled but could now be auto-approved with the current policy.\n\tcs, err := api.h.state.ReloadPolicy()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"reloading policy: %w\", err)\n\t}\n\n\tif len(cs) > 0 {\n\t\tapi.h.Change(cs...)\n\t} else {\n\t\tlog.Debug().\n\t\t\tCaller().\n\t\t\tMsg(\"No policy changes to distribute because ReloadPolicy returned empty changeset\")\n\t}\n\n\tresponse := &v1.SetPolicyResponse{\n\t\tPolicy:    updated.Data,\n\t\tUpdatedAt: timestamppb.New(updated.UpdatedAt),\n\t}\n\n\tlog.Debug().\n\t\tCaller().\n\t\tMsg(\"gRPC SetPolicy completed successfully because response prepared\")\n\n\treturn response, nil\n}\n\n// The following service calls are for testing and debugging\nfunc (api headscaleV1APIServer) DebugCreateNode(\n\tctx context.Context,\n\trequest *v1.DebugCreateNodeRequest,\n) (*v1.DebugCreateNodeResponse, error) {\n\tuser, err := api.h.state.GetUserByName(request.GetUser())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\troutes, err := util.StringToIPPrefix(request.GetRoutes())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog.Trace().\n\t\tCaller().\n\t\tInterface(\"route-prefix\", routes).\n\t\tInterface(\"route-str\", request.GetRoutes()).\n\t\tMsg(\"Creating routes for node\")\n\n\thostinfo := tailcfg.Hostinfo{\n\t\tRoutableIPs: routes,\n\t\tOS:          \"TestOS\",\n\t\tHostname:    request.GetName(),\n\t}\n\n\tregistrationId, err := types.AuthIDFromString(request.GetKey())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnewNode := types.Node{\n\t\tNodeKey:    key.NewNode().Public(),\n\t\tMachineKey: key.NewMachine().Public(),\n\t\tHostname:   request.GetName(),\n\t\tUser:       user,\n\n\t\tExpiry:   &time.Time{},\n\t\tLastSeen: &time.Time{},\n\n\t\tHostinfo: &hostinfo,\n\t}\n\n\tlog.Debug().\n\t\tCaller().\n\t\tStr(\"registration_id\", registrationId.String()).\n\t\tMsg(\"adding debug machine via CLI, appending to registration cache\")\n\n\tauthRegReq := types.NewRegisterAuthRequest(newNode)\n\tapi.h.state.SetAuthCacheEntry(registrationId, authRegReq)\n\n\treturn &v1.DebugCreateNodeResponse{Node: newNode.Proto()}, nil\n}\n\nfunc (api headscaleV1APIServer) Health(\n\tctx context.Context,\n\trequest *v1.HealthRequest,\n) (*v1.HealthResponse, error) {\n\tvar healthErr error\n\tresponse := &v1.HealthResponse{}\n\n\tif err := api.h.state.PingDB(ctx); err != nil {\n\t\thealthErr = fmt.Errorf(\"pinging database: %w\", err)\n\t} else {\n\t\tresponse.DatabaseConnectivity = true\n\t}\n\n\tif healthErr != nil {\n\t\tlog.Error().Err(healthErr).Msg(\"health check failed\")\n\t}\n\n\treturn response, healthErr\n}\n\nfunc (api headscaleV1APIServer) AuthRegister(\n\tctx context.Context,\n\trequest *v1.AuthRegisterRequest,\n) (*v1.AuthRegisterResponse, error) {\n\tresp, err := api.RegisterNode(ctx, &v1.RegisterNodeRequest{\n\t\tKey:  request.GetAuthId(),\n\t\tUser: request.GetUser(),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &v1.AuthRegisterResponse{Node: resp.GetNode()}, nil\n}\n\nfunc (api headscaleV1APIServer) AuthApprove(\n\tctx context.Context,\n\trequest *v1.AuthApproveRequest,\n) (*v1.AuthApproveResponse, error) {\n\tauthID, err := types.AuthIDFromString(request.GetAuthId())\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.InvalidArgument, \"invalid auth_id: %v\", err)\n\t}\n\n\tauthReq, ok := api.h.state.GetAuthCacheEntry(authID)\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.NotFound, \"no pending auth session for auth_id %s\", authID)\n\t}\n\n\tauthReq.FinishAuth(types.AuthVerdict{})\n\n\treturn &v1.AuthApproveResponse{}, nil\n}\n\nfunc (api headscaleV1APIServer) AuthReject(\n\tctx context.Context,\n\trequest *v1.AuthRejectRequest,\n) (*v1.AuthRejectResponse, error) {\n\tauthID, err := types.AuthIDFromString(request.GetAuthId())\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.InvalidArgument, \"invalid auth_id: %v\", err)\n\t}\n\n\tauthReq, ok := api.h.state.GetAuthCacheEntry(authID)\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.NotFound, \"no pending auth session for auth_id %s\", authID)\n\t}\n\n\tauthReq.FinishAuth(types.AuthVerdict{\n\t\tErr: fmt.Errorf(\"auth request rejected\"),\n\t})\n\n\treturn &v1.AuthRejectResponse{}, nil\n}\n\nfunc (api headscaleV1APIServer) mustEmbedUnimplementedHeadscaleServiceServer() {}\n"
  },
  {
    "path": "hscontrol/grpcv1_test.go",
    "content": "package hscontrol\n\nimport (\n\t\"context\"\n\t\"testing\"\n\t\"time\"\n\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"google.golang.org/grpc/codes\"\n\t\"google.golang.org/grpc/status\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/key\"\n)\n\nfunc Test_validateTag(t *testing.T) {\n\ttype args struct {\n\t\ttag string\n\t}\n\n\ttests := []struct {\n\t\tname    string\n\t\targs    args\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname:    \"valid tag\",\n\t\t\targs:    args{tag: \"tag:test\"},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname:    \"tag without tag prefix\",\n\t\t\targs:    args{tag: \"test\"},\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname:    \"uppercase tag\",\n\t\t\targs:    args{tag: \"tag:tEST\"},\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname:    \"tag that contains space\",\n\t\t\targs:    args{tag: \"tag:this is a spaced tag\"},\n\t\t\twantErr: true,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\terr := validateTag(tt.args.tag)\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"validateTag() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TestSetTags_Conversion tests the conversion of user-owned nodes to tagged nodes.\n// The tags-as-identity model allows one-way conversion from user-owned to tagged.\n// Tag authorization is checked via the policy manager - unauthorized tags are rejected.\nfunc TestSetTags_Conversion(t *testing.T) {\n\tt.Parallel()\n\n\tapp := createTestApp(t)\n\n\t// Create test user and nodes\n\tuser := app.state.CreateUserForTest(\"test-user\")\n\n\t// Create a pre-auth key WITHOUT tags for user-owned node\n\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), false, false, nil, nil)\n\trequire.NoError(t, err)\n\n\tmachineKey1 := key.NewMachine()\n\tnodeKey1 := key.NewNode()\n\n\t// Register a user-owned node (via untagged PreAuthKey)\n\tuserOwnedReq := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak.Key,\n\t\t},\n\t\tNodeKey: nodeKey1.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"user-owned-node\",\n\t\t},\n\t}\n\t_, err = app.handleRegisterWithAuthKey(userOwnedReq, machineKey1.Public())\n\trequire.NoError(t, err)\n\n\t// Get the created node\n\tuserOwnedNode, found := app.state.GetNodeByNodeKey(nodeKey1.Public())\n\trequire.True(t, found)\n\n\t// Create API server instance\n\tapiServer := newHeadscaleV1APIServer(app)\n\n\ttests := []struct {\n\t\tname           string\n\t\tnodeID         uint64\n\t\ttags           []string\n\t\twantErr        bool\n\t\twantCode       codes.Code\n\t\twantErrMessage string\n\t}{\n\t\t{\n\t\t\t// Conversion is allowed, but tag authorization fails without tagOwners\n\t\t\tname:           \"reject unauthorized tags on user-owned node\",\n\t\t\tnodeID:         uint64(userOwnedNode.ID()),\n\t\t\ttags:           []string{\"tag:server\"},\n\t\t\twantErr:        true,\n\t\t\twantCode:       codes.InvalidArgument,\n\t\t\twantErrMessage: \"requested tags\",\n\t\t},\n\t\t{\n\t\t\t// Conversion is allowed, but tag authorization fails without tagOwners\n\t\t\tname:           \"reject multiple unauthorized tags\",\n\t\t\tnodeID:         uint64(userOwnedNode.ID()),\n\t\t\ttags:           []string{\"tag:server\", \"tag:database\"},\n\t\t\twantErr:        true,\n\t\t\twantCode:       codes.InvalidArgument,\n\t\t\twantErrMessage: \"requested tags\",\n\t\t},\n\t\t{\n\t\t\tname:           \"reject non-existent node\",\n\t\t\tnodeID:         99999,\n\t\t\ttags:           []string{\"tag:server\"},\n\t\t\twantErr:        true,\n\t\t\twantCode:       codes.NotFound,\n\t\t\twantErrMessage: \"node not found\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tresp, err := apiServer.SetTags(context.Background(), &v1.SetTagsRequest{\n\t\t\t\tNodeId: tt.nodeID,\n\t\t\t\tTags:   tt.tags,\n\t\t\t})\n\n\t\t\tif tt.wantErr {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tst, ok := status.FromError(err)\n\t\t\t\trequire.True(t, ok, \"error should be a gRPC status error\")\n\t\t\t\tassert.Equal(t, tt.wantCode, st.Code())\n\t\t\t\tassert.Contains(t, st.Message(), tt.wantErrMessage)\n\t\t\t\tassert.Nil(t, resp.GetNode())\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.NotNil(t, resp)\n\t\t\t\tassert.NotNil(t, resp.GetNode())\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TestSetTags_TaggedNode tests that SetTags correctly identifies tagged nodes\n// and doesn't reject them with the \"user-owned nodes\" error.\n// Note: This test doesn't validate ACL tag authorization - that's tested elsewhere.\nfunc TestSetTags_TaggedNode(t *testing.T) {\n\tt.Parallel()\n\n\tapp := createTestApp(t)\n\n\t// Create test user and tagged pre-auth key\n\tuser := app.state.CreateUserForTest(\"test-user\")\n\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), false, false, nil, []string{\"tag:initial\"})\n\trequire.NoError(t, err)\n\n\tmachineKey := key.NewMachine()\n\tnodeKey := key.NewNode()\n\n\t// Register a tagged node (via tagged PreAuthKey)\n\ttaggedReq := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak.Key,\n\t\t},\n\t\tNodeKey: nodeKey.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"tagged-node\",\n\t\t},\n\t}\n\t_, err = app.handleRegisterWithAuthKey(taggedReq, machineKey.Public())\n\trequire.NoError(t, err)\n\n\t// Get the created node\n\ttaggedNode, found := app.state.GetNodeByNodeKey(nodeKey.Public())\n\trequire.True(t, found)\n\tassert.True(t, taggedNode.IsTagged(), \"Node should be tagged\")\n\tassert.False(t, taggedNode.UserID().Valid(), \"Tagged node should not have UserID\")\n\n\t// Create API server instance\n\tapiServer := newHeadscaleV1APIServer(app)\n\n\t// Test: SetTags should work on tagged nodes.\n\tresp, err := apiServer.SetTags(context.Background(), &v1.SetTagsRequest{\n\t\tNodeId: uint64(taggedNode.ID()),\n\t\tTags:   []string{\"tag:initial\"}, // Keep existing tag to avoid ACL validation issues\n\t})\n\n\t// The call should NOT fail with \"cannot set tags on user-owned nodes\"\n\tif err != nil {\n\t\tst, ok := status.FromError(err)\n\t\trequire.True(t, ok)\n\t\t// If error is about unauthorized tags, that's fine - ACL validation is working\n\t\t// If error is about user-owned nodes, that's the bug we're testing for\n\t\tassert.NotContains(t, st.Message(), \"user-owned nodes\", \"Should not reject tagged nodes as user-owned\")\n\t} else {\n\t\t// Success is also fine\n\t\tassert.NotNil(t, resp)\n\t}\n}\n\n// TestSetTags_CannotRemoveAllTags tests that SetTags rejects attempts to remove\n// all tags from a tagged node, enforcing Tailscale's requirement that tagged\n// nodes must have at least one tag.\nfunc TestSetTags_CannotRemoveAllTags(t *testing.T) {\n\tt.Parallel()\n\n\tapp := createTestApp(t)\n\n\t// Create test user and tagged pre-auth key\n\tuser := app.state.CreateUserForTest(\"test-user\")\n\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), false, false, nil, []string{\"tag:server\"})\n\trequire.NoError(t, err)\n\n\tmachineKey := key.NewMachine()\n\tnodeKey := key.NewNode()\n\n\t// Register a tagged node\n\ttaggedReq := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak.Key,\n\t\t},\n\t\tNodeKey: nodeKey.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"tagged-node\",\n\t\t},\n\t}\n\t_, err = app.handleRegisterWithAuthKey(taggedReq, machineKey.Public())\n\trequire.NoError(t, err)\n\n\t// Get the created node\n\ttaggedNode, found := app.state.GetNodeByNodeKey(nodeKey.Public())\n\trequire.True(t, found)\n\tassert.True(t, taggedNode.IsTagged())\n\n\t// Create API server instance\n\tapiServer := newHeadscaleV1APIServer(app)\n\n\t// Attempt to remove all tags (empty array)\n\tresp, err := apiServer.SetTags(context.Background(), &v1.SetTagsRequest{\n\t\tNodeId: uint64(taggedNode.ID()),\n\t\tTags:   []string{}, // Empty - attempting to remove all tags\n\t})\n\n\t// Should fail with InvalidArgument error\n\trequire.Error(t, err)\n\tst, ok := status.FromError(err)\n\trequire.True(t, ok, \"error should be a gRPC status error\")\n\tassert.Equal(t, codes.InvalidArgument, st.Code())\n\tassert.Contains(t, st.Message(), \"cannot remove all tags\")\n\tassert.Nil(t, resp.GetNode())\n}\n\n// TestDeleteUser_ReturnsProperChangeSignal tests issue #2967 fix:\n// When a user is deleted, the state should return a non-empty change signal\n// to ensure policy manager is updated and clients are notified immediately.\nfunc TestDeleteUser_ReturnsProperChangeSignal(t *testing.T) {\n\tt.Parallel()\n\n\tapp := createTestApp(t)\n\n\t// Create a user\n\tuser := app.state.CreateUserForTest(\"test-user-to-delete\")\n\trequire.NotNil(t, user)\n\n\t// Delete the user and verify a non-empty change is returned\n\t// Issue #2967: Without the fix, DeleteUser returned an empty change,\n\t// causing stale policy state until another user operation triggered an update.\n\tchangeSignal, err := app.state.DeleteUser(*user.TypedID())\n\trequire.NoError(t, err, \"DeleteUser should succeed\")\n\tassert.False(t, changeSignal.IsEmpty(), \"DeleteUser should return a non-empty change signal (issue #2967)\")\n}\n\n// TestDeleteUser_TaggedNodeSurvives tests that deleting a user succeeds when\n// the user's only nodes are tagged, and that those nodes remain in the\n// NodeStore with nil UserID.\n// https://github.com/juanfont/headscale/issues/3077\nfunc TestDeleteUser_TaggedNodeSurvives(t *testing.T) {\n\tt.Parallel()\n\n\tapp := createTestApp(t)\n\n\tuser := app.state.CreateUserForTest(\"legacy-user\")\n\n\t// Register a tagged node via the full auth flow.\n\ttags := []string{\"tag:server\"}\n\tpak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, tags)\n\trequire.NoError(t, err)\n\n\tmachineKey := key.NewMachine()\n\tnodeKey := key.NewNode()\n\n\tregReq := tailcfg.RegisterRequest{\n\t\tAuth: &tailcfg.RegisterResponseAuth{\n\t\t\tAuthKey: pak.Key,\n\t\t},\n\t\tNodeKey: nodeKey.Public(),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"tagged-server\",\n\t\t},\n\t\tExpiry: time.Now().Add(24 * time.Hour),\n\t}\n\n\tresp, err := app.handleRegisterWithAuthKey(regReq, machineKey.Public())\n\trequire.NoError(t, err)\n\trequire.True(t, resp.MachineAuthorized)\n\n\t// Verify the registered node has nil UserID (enforced invariant).\n\tnode, found := app.state.GetNodeByNodeKey(nodeKey.Public())\n\trequire.True(t, found)\n\trequire.True(t, node.IsTagged())\n\tassert.False(t, node.UserID().Valid(),\n\t\t\"tagged node should have nil UserID after registration\")\n\n\tnodeID := node.ID()\n\n\t// NodeStore should not list the tagged node under any user.\n\tnodesForUser := app.state.ListNodesByUser(types.UserID(user.ID))\n\tassert.Equal(t, 0, nodesForUser.Len(),\n\t\t\"tagged nodes should not appear in nodesByUser index\")\n\n\t// Delete the user.\n\tchangeSignal, err := app.state.DeleteUser(*user.TypedID())\n\trequire.NoError(t, err)\n\tassert.False(t, changeSignal.IsEmpty())\n\n\t// Tagged node survives in the NodeStore.\n\tnodeAfter, found := app.state.GetNodeByID(nodeID)\n\trequire.True(t, found, \"tagged node should survive user deletion\")\n\tassert.True(t, nodeAfter.IsTagged())\n\tassert.False(t, nodeAfter.UserID().Valid())\n\n\t// Tagged node appears in the global list.\n\tallNodes := app.state.ListNodes()\n\tfoundInAll := false\n\n\tfor _, n := range allNodes.All() {\n\t\tif n.ID() == nodeID {\n\t\t\tfoundInAll = true\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\tassert.True(t, foundInAll, \"tagged node should appear in the global node list\")\n}\n\n// TestExpireApiKey_ByID tests that API keys can be expired by ID.\nfunc TestExpireApiKey_ByID(t *testing.T) {\n\tt.Parallel()\n\n\tapp := createTestApp(t)\n\tapiServer := newHeadscaleV1APIServer(app)\n\n\t// Create an API key\n\tcreateResp, err := apiServer.CreateApiKey(context.Background(), &v1.CreateApiKeyRequest{})\n\trequire.NoError(t, err)\n\trequire.NotEmpty(t, createResp.GetApiKey())\n\n\t// List keys to get the ID\n\tlistResp, err := apiServer.ListApiKeys(context.Background(), &v1.ListApiKeysRequest{})\n\trequire.NoError(t, err)\n\trequire.Len(t, listResp.GetApiKeys(), 1)\n\n\tkeyID := listResp.GetApiKeys()[0].GetId()\n\n\t// Expire by ID\n\t_, err = apiServer.ExpireApiKey(context.Background(), &v1.ExpireApiKeyRequest{\n\t\tId: keyID,\n\t})\n\trequire.NoError(t, err)\n\n\t// Verify key is expired (expiration is set to now or in the past)\n\tlistResp, err = apiServer.ListApiKeys(context.Background(), &v1.ListApiKeysRequest{})\n\trequire.NoError(t, err)\n\trequire.Len(t, listResp.GetApiKeys(), 1)\n\tassert.NotNil(t, listResp.GetApiKeys()[0].GetExpiration(), \"expiration should be set\")\n}\n\n// TestExpireApiKey_ByPrefix tests that API keys can still be expired by prefix.\nfunc TestExpireApiKey_ByPrefix(t *testing.T) {\n\tt.Parallel()\n\n\tapp := createTestApp(t)\n\tapiServer := newHeadscaleV1APIServer(app)\n\n\t// Create an API key\n\tcreateResp, err := apiServer.CreateApiKey(context.Background(), &v1.CreateApiKeyRequest{})\n\trequire.NoError(t, err)\n\trequire.NotEmpty(t, createResp.GetApiKey())\n\n\t// List keys to get the prefix\n\tlistResp, err := apiServer.ListApiKeys(context.Background(), &v1.ListApiKeysRequest{})\n\trequire.NoError(t, err)\n\trequire.Len(t, listResp.GetApiKeys(), 1)\n\n\tkeyPrefix := listResp.GetApiKeys()[0].GetPrefix()\n\n\t// Expire by prefix\n\t_, err = apiServer.ExpireApiKey(context.Background(), &v1.ExpireApiKeyRequest{\n\t\tPrefix: keyPrefix,\n\t})\n\trequire.NoError(t, err)\n}\n\n// TestDeleteApiKey_ByID tests that API keys can be deleted by ID.\nfunc TestDeleteApiKey_ByID(t *testing.T) {\n\tt.Parallel()\n\n\tapp := createTestApp(t)\n\tapiServer := newHeadscaleV1APIServer(app)\n\n\t// Create an API key\n\tcreateResp, err := apiServer.CreateApiKey(context.Background(), &v1.CreateApiKeyRequest{})\n\trequire.NoError(t, err)\n\trequire.NotEmpty(t, createResp.GetApiKey())\n\n\t// List keys to get the ID\n\tlistResp, err := apiServer.ListApiKeys(context.Background(), &v1.ListApiKeysRequest{})\n\trequire.NoError(t, err)\n\trequire.Len(t, listResp.GetApiKeys(), 1)\n\n\tkeyID := listResp.GetApiKeys()[0].GetId()\n\n\t// Delete by ID\n\t_, err = apiServer.DeleteApiKey(context.Background(), &v1.DeleteApiKeyRequest{\n\t\tId: keyID,\n\t})\n\trequire.NoError(t, err)\n\n\t// Verify key is deleted\n\tlistResp, err = apiServer.ListApiKeys(context.Background(), &v1.ListApiKeysRequest{})\n\trequire.NoError(t, err)\n\tassert.Empty(t, listResp.GetApiKeys())\n}\n\n// TestDeleteApiKey_ByPrefix tests that API keys can still be deleted by prefix.\nfunc TestDeleteApiKey_ByPrefix(t *testing.T) {\n\tt.Parallel()\n\n\tapp := createTestApp(t)\n\tapiServer := newHeadscaleV1APIServer(app)\n\n\t// Create an API key\n\tcreateResp, err := apiServer.CreateApiKey(context.Background(), &v1.CreateApiKeyRequest{})\n\trequire.NoError(t, err)\n\trequire.NotEmpty(t, createResp.GetApiKey())\n\n\t// List keys to get the prefix\n\tlistResp, err := apiServer.ListApiKeys(context.Background(), &v1.ListApiKeysRequest{})\n\trequire.NoError(t, err)\n\trequire.Len(t, listResp.GetApiKeys(), 1)\n\n\tkeyPrefix := listResp.GetApiKeys()[0].GetPrefix()\n\n\t// Delete by prefix\n\t_, err = apiServer.DeleteApiKey(context.Background(), &v1.DeleteApiKeyRequest{\n\t\tPrefix: keyPrefix,\n\t})\n\trequire.NoError(t, err)\n\n\t// Verify key is deleted\n\tlistResp, err = apiServer.ListApiKeys(context.Background(), &v1.ListApiKeysRequest{})\n\trequire.NoError(t, err)\n\tassert.Empty(t, listResp.GetApiKeys())\n}\n\n// TestExpireApiKey_NoIdentifier tests that an error is returned when neither ID nor prefix is provided.\nfunc TestExpireApiKey_NoIdentifier(t *testing.T) {\n\tt.Parallel()\n\n\tapp := createTestApp(t)\n\tapiServer := newHeadscaleV1APIServer(app)\n\n\t_, err := apiServer.ExpireApiKey(context.Background(), &v1.ExpireApiKeyRequest{})\n\trequire.Error(t, err)\n\tst, ok := status.FromError(err)\n\trequire.True(t, ok, \"error should be a gRPC status error\")\n\tassert.Equal(t, codes.InvalidArgument, st.Code())\n\tassert.Contains(t, st.Message(), \"must provide id or prefix\")\n}\n\n// TestDeleteApiKey_NoIdentifier tests that an error is returned when neither ID nor prefix is provided.\nfunc TestDeleteApiKey_NoIdentifier(t *testing.T) {\n\tt.Parallel()\n\n\tapp := createTestApp(t)\n\tapiServer := newHeadscaleV1APIServer(app)\n\n\t_, err := apiServer.DeleteApiKey(context.Background(), &v1.DeleteApiKeyRequest{})\n\trequire.Error(t, err)\n\tst, ok := status.FromError(err)\n\trequire.True(t, ok, \"error should be a gRPC status error\")\n\tassert.Equal(t, codes.InvalidArgument, st.Code())\n\tassert.Contains(t, st.Message(), \"must provide id or prefix\")\n}\n\n// TestExpireApiKey_BothIdentifiers tests that an error is returned when both ID and prefix are provided.\nfunc TestExpireApiKey_BothIdentifiers(t *testing.T) {\n\tt.Parallel()\n\n\tapp := createTestApp(t)\n\tapiServer := newHeadscaleV1APIServer(app)\n\n\t_, err := apiServer.ExpireApiKey(context.Background(), &v1.ExpireApiKeyRequest{\n\t\tId:     1,\n\t\tPrefix: \"test\",\n\t})\n\trequire.Error(t, err)\n\tst, ok := status.FromError(err)\n\trequire.True(t, ok, \"error should be a gRPC status error\")\n\tassert.Equal(t, codes.InvalidArgument, st.Code())\n\tassert.Contains(t, st.Message(), \"provide either id or prefix, not both\")\n}\n\n// TestDeleteApiKey_BothIdentifiers tests that an error is returned when both ID and prefix are provided.\nfunc TestDeleteApiKey_BothIdentifiers(t *testing.T) {\n\tt.Parallel()\n\n\tapp := createTestApp(t)\n\tapiServer := newHeadscaleV1APIServer(app)\n\n\t_, err := apiServer.DeleteApiKey(context.Background(), &v1.DeleteApiKeyRequest{\n\t\tId:     1,\n\t\tPrefix: \"test\",\n\t})\n\trequire.Error(t, err)\n\tst, ok := status.FromError(err)\n\trequire.True(t, ok, \"error should be a gRPC status error\")\n\tassert.Equal(t, codes.InvalidArgument, st.Code())\n\tassert.Contains(t, st.Message(), \"provide either id or prefix, not both\")\n}\n"
  },
  {
    "path": "hscontrol/handlers.go",
    "content": "package hscontrol\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/assets\"\n\t\"github.com/juanfont/headscale/hscontrol/templates\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/rs/zerolog/log\"\n\t\"tailscale.com/tailcfg\"\n)\n\nconst (\n\t// NoiseCapabilityVersion is used by Tailscale clients to indicate\n\t// their codebase version. Tailscale clients can communicate over TS2021\n\t// from CapabilityVersion 28, but we only have good support for it\n\t// since https://github.com/tailscale/tailscale/pull/4323 (Noise in any HTTPS port).\n\t//\n\t// Related to this change, there is https://github.com/tailscale/tailscale/pull/5379,\n\t// where CapabilityVersion 39 is introduced to indicate #4323 was merged.\n\t//\n\t// See also https://github.com/tailscale/tailscale/blob/main/tailcfg/tailcfg.go\n\tNoiseCapabilityVersion = 39\n\n\treservedResponseHeaderSize = 4\n)\n\n// httpError logs an error and sends an HTTP error response with the given.\nfunc httpError(w http.ResponseWriter, err error) {\n\tif herr, ok := errors.AsType[HTTPError](err); ok {\n\t\thttp.Error(w, herr.Msg, herr.Code)\n\t\tlog.Error().Err(herr.Err).Int(\"code\", herr.Code).Msgf(\"user msg: %s\", herr.Msg)\n\t} else {\n\t\thttp.Error(w, \"internal server error\", http.StatusInternalServerError)\n\t\tlog.Error().Err(err).Int(\"code\", http.StatusInternalServerError).Msg(\"http internal server error\")\n\t}\n}\n\n// HTTPError represents an error that is surfaced to the user via web.\ntype HTTPError struct {\n\tCode int    // HTTP response code to send to client; 0 means 500\n\tMsg  string // Response body to send to client\n\tErr  error  // Detailed error to log on the server\n}\n\nfunc (e HTTPError) Error() string { return fmt.Sprintf(\"http error[%d]: %s, %s\", e.Code, e.Msg, e.Err) }\nfunc (e HTTPError) Unwrap() error { return e.Err }\n\n// NewHTTPError returns an HTTPError containing the given information.\nfunc NewHTTPError(code int, msg string, err error) HTTPError {\n\treturn HTTPError{Code: code, Msg: msg, Err: err}\n}\n\nvar errMethodNotAllowed = NewHTTPError(http.StatusMethodNotAllowed, \"method not allowed\", nil)\n\nvar ErrRegisterMethodCLIDoesNotSupportExpire = errors.New(\n\t\"machines registered with CLI do not support expiry\",\n)\n\nfunc parseCapabilityVersion(req *http.Request) (tailcfg.CapabilityVersion, error) {\n\tclientCapabilityStr := req.URL.Query().Get(\"v\")\n\n\tif clientCapabilityStr == \"\" {\n\t\treturn 0, NewHTTPError(http.StatusBadRequest, \"capability version must be set\", nil)\n\t}\n\n\tclientCapabilityVersion, err := strconv.Atoi(clientCapabilityStr)\n\tif err != nil {\n\t\treturn 0, NewHTTPError(http.StatusBadRequest, \"invalid capability version\", fmt.Errorf(\"parsing capability version: %w\", err))\n\t}\n\n\treturn tailcfg.CapabilityVersion(clientCapabilityVersion), nil\n}\n\nfunc (h *Headscale) handleVerifyRequest(\n\treq *http.Request,\n\twriter io.Writer,\n) error {\n\tbody, err := io.ReadAll(req.Body)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"reading request body: %w\", err)\n\t}\n\n\tvar derpAdmitClientRequest tailcfg.DERPAdmitClientRequest\n\tif err := json.Unmarshal(body, &derpAdmitClientRequest); err != nil { //nolint:noinlineerr\n\t\treturn NewHTTPError(http.StatusBadRequest, \"Bad Request: invalid JSON\", fmt.Errorf(\"parsing DERP client request: %w\", err))\n\t}\n\n\tnodes := h.state.ListNodes()\n\n\t// Check if any node has the requested NodeKey\n\tvar nodeKeyFound bool\n\n\tfor _, node := range nodes.All() {\n\t\tif node.NodeKey() == derpAdmitClientRequest.NodePublic {\n\t\t\tnodeKeyFound = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tresp := &tailcfg.DERPAdmitClientResponse{\n\t\tAllow: nodeKeyFound,\n\t}\n\n\treturn json.NewEncoder(writer).Encode(resp)\n}\n\n// VerifyHandler see https://github.com/tailscale/tailscale/blob/964282d34f06ecc06ce644769c66b0b31d118340/derp/derp_server.go#L1159\n// DERP use verifyClientsURL to verify whether a client is allowed to connect to the DERP server.\nfunc (h *Headscale) VerifyHandler(\n\twriter http.ResponseWriter,\n\treq *http.Request,\n) {\n\tif req.Method != http.MethodPost {\n\t\thttpError(writer, errMethodNotAllowed)\n\t\treturn\n\t}\n\n\terr := h.handleVerifyRequest(req, writer)\n\tif err != nil {\n\t\thttpError(writer, err)\n\t\treturn\n\t}\n\n\twriter.Header().Set(\"Content-Type\", \"application/json\")\n}\n\n// KeyHandler provides the Headscale pub key\n// Listens in /key.\nfunc (h *Headscale) KeyHandler(\n\twriter http.ResponseWriter,\n\treq *http.Request,\n) {\n\t// New Tailscale clients send a 'v' parameter to indicate the CurrentCapabilityVersion\n\tcapVer, err := parseCapabilityVersion(req)\n\tif err != nil {\n\t\thttpError(writer, err)\n\t\treturn\n\t}\n\n\t// TS2021 (Tailscale v2 protocol) requires to have a different key\n\tif capVer >= NoiseCapabilityVersion {\n\t\tresp := tailcfg.OverTLSPublicKeyResponse{\n\t\t\tPublicKey: h.noisePrivateKey.Public(),\n\t\t}\n\n\t\twriter.Header().Set(\"Content-Type\", \"application/json\")\n\n\t\terr := json.NewEncoder(writer).Encode(resp)\n\t\tif err != nil {\n\t\t\tlog.Error().Err(err).Msg(\"failed to encode public key response\")\n\t\t}\n\n\t\treturn\n\t}\n}\n\nfunc (h *Headscale) HealthHandler(\n\twriter http.ResponseWriter,\n\treq *http.Request,\n) {\n\trespond := func(err error) {\n\t\twriter.Header().Set(\"Content-Type\", \"application/health+json; charset=utf-8\")\n\n\t\tres := struct {\n\t\t\tStatus string `json:\"status\"`\n\t\t}{\n\t\t\tStatus: \"pass\",\n\t\t}\n\n\t\tif err != nil {\n\t\t\twriter.WriteHeader(http.StatusInternalServerError)\n\n\t\t\tres.Status = \"fail\"\n\t\t}\n\n\t\tencErr := json.NewEncoder(writer).Encode(res)\n\t\tif encErr != nil {\n\t\t\tlog.Error().Err(encErr).Msg(\"failed to encode health response\")\n\t\t}\n\t}\n\n\terr := h.state.PingDB(req.Context())\n\tif err != nil {\n\t\trespond(err)\n\n\t\treturn\n\t}\n\n\trespond(nil)\n}\n\nfunc (h *Headscale) RobotsHandler(\n\twriter http.ResponseWriter,\n\treq *http.Request,\n) {\n\twriter.Header().Set(\"Content-Type\", \"text/plain\")\n\twriter.WriteHeader(http.StatusOK)\n\n\t_, err := writer.Write([]byte(\"User-agent: *\\nDisallow: /\"))\n\tif err != nil {\n\t\tlog.Error().\n\t\t\tCaller().\n\t\t\tErr(err).\n\t\t\tMsg(\"Failed to write HTTP response\")\n\t}\n}\n\n// VersionHandler returns version information about the Headscale server\n// Listens in /version.\nfunc (h *Headscale) VersionHandler(\n\twriter http.ResponseWriter,\n\treq *http.Request,\n) {\n\twriter.Header().Set(\"Content-Type\", \"application/json\")\n\twriter.WriteHeader(http.StatusOK)\n\n\tversionInfo := types.GetVersionInfo()\n\n\terr := json.NewEncoder(writer).Encode(versionInfo)\n\tif err != nil {\n\t\tlog.Error().\n\t\t\tCaller().\n\t\t\tErr(err).\n\t\t\tMsg(\"Failed to write version response\")\n\t}\n}\n\ntype AuthProviderWeb struct {\n\tserverURL string\n}\n\nfunc NewAuthProviderWeb(serverURL string) *AuthProviderWeb {\n\treturn &AuthProviderWeb{\n\t\tserverURL: serverURL,\n\t}\n}\n\nfunc (a *AuthProviderWeb) RegisterURL(authID types.AuthID) string {\n\treturn fmt.Sprintf(\n\t\t\"%s/register/%s\",\n\t\tstrings.TrimSuffix(a.serverURL, \"/\"),\n\t\tauthID.String())\n}\n\nfunc (a *AuthProviderWeb) AuthURL(authID types.AuthID) string {\n\treturn fmt.Sprintf(\n\t\t\"%s/auth/%s\",\n\t\tstrings.TrimSuffix(a.serverURL, \"/\"),\n\t\tauthID.String())\n}\n\nfunc (a *AuthProviderWeb) AuthHandler(\n\twriter http.ResponseWriter,\n\treq *http.Request,\n) {\n\tauthID, err := authIDFromRequest(req)\n\tif err != nil {\n\t\thttpError(writer, err)\n\t\treturn\n\t}\n\n\twriter.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\twriter.WriteHeader(http.StatusOK)\n\n\t_, err = writer.Write([]byte(templates.AuthWeb(\n\t\t\"Authentication check\",\n\t\t\"Run the command below in the headscale server to approve this authentication request:\",\n\t\t\"headscale auth approve --auth-id \"+authID.String(),\n\t).Render()))\n\tif err != nil {\n\t\tlog.Error().Err(err).Msg(\"failed to write auth response\")\n\t}\n}\n\nfunc authIDFromRequest(req *http.Request) (types.AuthID, error) {\n\traw, err := urlParam[string](req, \"auth_id\")\n\tif err != nil {\n\t\treturn \"\", NewHTTPError(http.StatusBadRequest, \"invalid auth id\", fmt.Errorf(\"parsing auth_id from URL: %w\", err))\n\t}\n\n\t// We need to make sure we dont open for XSS style injections, if the parameter that\n\t// is passed as a key is not parsable/validated as a NodePublic key, then fail to render\n\t// the template and log an error.\n\tauthId, err := types.AuthIDFromString(raw)\n\tif err != nil {\n\t\treturn \"\", NewHTTPError(http.StatusBadRequest, \"invalid auth id\", fmt.Errorf(\"parsing auth_id from URL: %w\", err))\n\t}\n\n\treturn authId, nil\n}\n\n// RegisterHandler shows a simple message in the browser to point to the CLI\n// Listens in /register/:registration_id.\n//\n// This is not part of the Tailscale control API, as we could send whatever URL\n// in the RegisterResponse.AuthURL field.\nfunc (a *AuthProviderWeb) RegisterHandler(\n\twriter http.ResponseWriter,\n\treq *http.Request,\n) {\n\tauthId, err := authIDFromRequest(req)\n\tif err != nil {\n\t\thttpError(writer, err)\n\t\treturn\n\t}\n\n\twriter.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\twriter.WriteHeader(http.StatusOK)\n\n\t_, err = writer.Write([]byte(templates.AuthWeb(\n\t\t\"Node registration\",\n\t\t\"Run the command below in the headscale server to add this node to your network:\",\n\t\tfmt.Sprintf(\"headscale auth register --auth-id %s --user USERNAME\", authId.String()),\n\t).Render()))\n\tif err != nil {\n\t\tlog.Error().Err(err).Msg(\"failed to write register response\")\n\t}\n}\n\nfunc FaviconHandler(writer http.ResponseWriter, req *http.Request) {\n\twriter.Header().Set(\"Content-Type\", \"image/png\")\n\thttp.ServeContent(writer, req, \"favicon.ico\", time.Unix(0, 0), bytes.NewReader(assets.Favicon))\n}\n\n// BlankHandler returns a blank page with favicon linked.\nfunc BlankHandler(writer http.ResponseWriter, res *http.Request) {\n\twriter.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\twriter.WriteHeader(http.StatusOK)\n\n\t_, err := writer.Write([]byte(templates.BlankPage().Render()))\n\tif err != nil {\n\t\tlog.Error().\n\t\t\tCaller().\n\t\t\tErr(err).\n\t\t\tMsg(\"Failed to write HTTP response\")\n\t}\n}\n"
  },
  {
    "path": "hscontrol/mapper/batcher.go",
    "content": "package mapper\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/state\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/types/change\"\n\t\"github.com/juanfont/headscale/hscontrol/util/zlog/zf\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/prometheus/client_golang/prometheus/promauto\"\n\t\"github.com/puzpuzpuz/xsync/v4\"\n\t\"github.com/rs/zerolog/log\"\n\t\"tailscale.com/tailcfg\"\n)\n\n// Mapper errors.\nvar (\n\tErrInvalidNodeID      = errors.New(\"invalid nodeID\")\n\tErrMapperNil          = errors.New(\"mapper is nil\")\n\tErrNodeConnectionNil  = errors.New(\"nodeConnection is nil\")\n\tErrNodeNotFoundMapper = errors.New(\"node not found\")\n)\n\n// offlineNodeCleanupThreshold is how long a node must be disconnected\n// before cleanupOfflineNodes removes its in-memory state.\nconst offlineNodeCleanupThreshold = 15 * time.Minute\n\nvar mapResponseGenerated = promauto.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"headscale\",\n\tName:      \"mapresponse_generated_total\",\n\tHelp:      \"total count of mapresponses generated by response type\",\n}, []string{\"response_type\"})\n\nfunc NewBatcher(batchTime time.Duration, workers int, mapper *mapper) *Batcher {\n\treturn &Batcher{\n\t\tmapper:  mapper,\n\t\tworkers: workers,\n\t\ttick:    time.NewTicker(batchTime),\n\n\t\t// The size of this channel is arbitrary chosen, the sizing should be revisited.\n\t\tworkCh: make(chan work, workers*200),\n\t\tdone:   make(chan struct{}),\n\t\tnodes:  xsync.NewMap[types.NodeID, *multiChannelNodeConn](),\n\t}\n}\n\n// NewBatcherAndMapper creates a new Batcher with its mapper.\nfunc NewBatcherAndMapper(cfg *types.Config, state *state.State) *Batcher {\n\tm := newMapper(cfg, state)\n\tb := NewBatcher(cfg.Tuning.BatchChangeDelay, cfg.Tuning.BatcherWorkers, m)\n\tm.batcher = b\n\n\treturn b\n}\n\n// nodeConnection interface for different connection implementations.\ntype nodeConnection interface {\n\tnodeID() types.NodeID\n\tversion() tailcfg.CapabilityVersion\n\tsend(data *tailcfg.MapResponse) error\n\t// computePeerDiff returns peers that were previously sent but are no longer in the current list.\n\tcomputePeerDiff(currentPeers []tailcfg.NodeID) (removed []tailcfg.NodeID)\n\t// updateSentPeers updates the tracking of which peers have been sent to this node.\n\tupdateSentPeers(resp *tailcfg.MapResponse)\n}\n\n// generateMapResponse generates a [tailcfg.MapResponse] for the given NodeID based on the provided [change.Change].\nfunc generateMapResponse(nc nodeConnection, mapper *mapper, r change.Change) (*tailcfg.MapResponse, error) {\n\tnodeID := nc.nodeID()\n\tversion := nc.version()\n\n\tif r.IsEmpty() {\n\t\treturn nil, nil //nolint:nilnil // Empty response means nothing to send\n\t}\n\n\tif nodeID == 0 {\n\t\treturn nil, fmt.Errorf(\"%w: %d\", ErrInvalidNodeID, nodeID)\n\t}\n\n\tif mapper == nil {\n\t\treturn nil, fmt.Errorf(\"%w for nodeID %d\", ErrMapperNil, nodeID)\n\t}\n\n\t// Handle self-only responses\n\tif r.IsSelfOnly() && r.TargetNode != nodeID {\n\t\treturn nil, nil //nolint:nilnil // No response needed for other nodes when self-only\n\t}\n\n\t// Check if this is a self-update (the changed node is the receiving node).\n\t// When true, ensure the response includes the node's self info so it sees\n\t// its own attribute changes (e.g., tags changed via admin API).\n\tisSelfUpdate := r.OriginNode != 0 && r.OriginNode == nodeID\n\n\tvar (\n\t\tmapResp *tailcfg.MapResponse\n\t\terr     error\n\t)\n\n\t// Track metric using categorized type, not free-form reason\n\tmapResponseGenerated.WithLabelValues(r.Type()).Inc()\n\n\t// Check if this requires runtime peer visibility computation (e.g., policy changes)\n\tif r.RequiresRuntimePeerComputation {\n\t\tcurrentPeers := mapper.state.ListPeers(nodeID)\n\n\t\tcurrentPeerIDs := make([]tailcfg.NodeID, 0, currentPeers.Len())\n\t\tfor _, peer := range currentPeers.All() {\n\t\t\tcurrentPeerIDs = append(currentPeerIDs, peer.ID().NodeID())\n\t\t}\n\n\t\tremovedPeers := nc.computePeerDiff(currentPeerIDs)\n\t\t// Include self node when this is a self-update (e.g., node's own tags changed)\n\t\t// so the node sees its updated self info along with new packet filters.\n\t\tmapResp, err = mapper.policyChangeResponse(nodeID, version, removedPeers, currentPeers, isSelfUpdate)\n\t} else if isSelfUpdate {\n\t\t// Non-policy self-update: just send the self node info\n\t\tmapResp, err = mapper.selfMapResponse(nodeID, version)\n\t} else {\n\t\tmapResp, err = mapper.buildFromChange(nodeID, version, &r)\n\t}\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"generating map response for nodeID %d: %w\", nodeID, err)\n\t}\n\n\treturn mapResp, nil\n}\n\n// handleNodeChange generates and sends a [tailcfg.MapResponse] for a given node and [change.Change].\nfunc handleNodeChange(nc nodeConnection, mapper *mapper, r change.Change) error {\n\tif nc == nil {\n\t\treturn ErrNodeConnectionNil\n\t}\n\n\tnodeID := nc.nodeID()\n\n\tlog.Debug().Caller().Uint64(zf.NodeID, nodeID.Uint64()).Str(zf.Reason, r.Reason).Msg(\"node change processing started\")\n\n\tdata, err := generateMapResponse(nc, mapper, r)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"generating map response for node %d: %w\", nodeID, err)\n\t}\n\n\tif data == nil {\n\t\t// No data to send is valid for some response types\n\t\treturn nil\n\t}\n\n\t// Send the map response\n\terr = nc.send(data)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"sending map response to node %d: %w\", nodeID, err)\n\t}\n\n\t// Update peer tracking after successful send\n\tnc.updateSentPeers(data)\n\n\treturn nil\n}\n\n// workResult represents the result of processing a change.\ntype workResult struct {\n\tmapResponse *tailcfg.MapResponse\n\terr         error\n}\n\n// work represents a unit of work to be processed by workers.\n// All pending changes for a node are bundled into a single work item\n// so that one worker processes them sequentially. This prevents\n// out-of-order MapResponse delivery and races on lastSentPeers\n// that occur when multiple workers process changes for the same node.\ntype work struct {\n\tchanges  []change.Change\n\tnodeID   types.NodeID\n\tresultCh chan<- workResult // optional channel for synchronous operations\n}\n\n// Batcher errors.\nvar (\n\terrConnectionClosed      = errors.New(\"connection channel already closed\")\n\tErrInitialMapSendTimeout = errors.New(\"sending initial map: timeout\")\n\tErrBatcherShuttingDown   = errors.New(\"batcher shutting down\")\n\tErrConnectionSendTimeout = errors.New(\"timeout sending to channel (likely stale connection)\")\n)\n\n// Batcher batches and distributes map responses to connected nodes.\n// It uses concurrent maps, per-node mutexes, and a worker pool.\n//\n// Lifecycle: Call Start() to spawn workers, then Close() to shut down.\n// Close() blocks until all workers have exited. A Batcher must not\n// be reused after Close().\ntype Batcher struct {\n\ttick    *time.Ticker\n\tmapper  *mapper\n\tworkers int\n\n\tnodes *xsync.Map[types.NodeID, *multiChannelNodeConn]\n\n\t// Work queue channel\n\tworkCh   chan work\n\tdone     chan struct{}\n\tdoneOnce sync.Once // Ensures done is only closed once\n\n\t// wg tracks the doWork and all worker goroutines so that Close()\n\t// can block until they have fully exited.\n\twg sync.WaitGroup\n\n\tstarted atomic.Bool // Ensures Start() is only called once\n\n\t// Metrics\n\ttotalNodes      atomic.Int64\n\tworkQueuedCount atomic.Int64\n\tworkProcessed   atomic.Int64\n\tworkErrors      atomic.Int64\n}\n\n// AddNode registers a new node connection with the batcher and sends an initial map response.\n// It creates or updates the node's connection data, validates the initial map generation,\n// and notifies other nodes that this node has come online.\n// The stop function tears down the owning session if this connection is later declared stale.\nfunc (b *Batcher) AddNode(\n\tid types.NodeID,\n\tc chan<- *tailcfg.MapResponse,\n\tversion tailcfg.CapabilityVersion,\n\tstop func(),\n) error {\n\taddNodeStart := time.Now()\n\tnlog := log.With().Uint64(zf.NodeID, id.Uint64()).Logger()\n\n\t// Generate connection ID\n\tconnID := generateConnectionID()\n\n\t// Create new connection entry\n\tnow := time.Now()\n\tnewEntry := &connectionEntry{\n\t\tid:      connID,\n\t\tc:       c,\n\t\tversion: version,\n\t\tcreated: now,\n\t\tstop:    stop,\n\t}\n\t// Initialize last used timestamp\n\tnewEntry.lastUsed.Store(now.Unix())\n\n\t// Get or create multiChannelNodeConn - this reuses existing offline nodes for rapid reconnection\n\tnodeConn, loaded := b.nodes.LoadOrStore(id, newMultiChannelNodeConn(id, b.mapper))\n\n\tif !loaded {\n\t\tb.totalNodes.Add(1)\n\t}\n\n\t// Add connection to the list (lock-free)\n\tnodeConn.addConnection(newEntry)\n\n\t// Use the worker pool for controlled concurrency instead of direct generation\n\tinitialMap, err := b.MapResponseFromChange(id, change.FullSelf(id))\n\tif err != nil {\n\t\tnlog.Error().Err(err).Msg(\"initial map generation failed\")\n\t\tnodeConn.removeConnectionByChannel(c)\n\n\t\tif !nodeConn.hasActiveConnections() {\n\t\t\tnodeConn.markDisconnected()\n\t\t}\n\n\t\treturn fmt.Errorf(\"generating initial map for node %d: %w\", id, err)\n\t}\n\n\t// Use a blocking send with timeout for initial map since the channel should be ready\n\t// and we want to avoid the race condition where the receiver isn't ready yet\n\tselect {\n\tcase c <- initialMap:\n\t\t// Success\n\tcase <-time.After(5 * time.Second): //nolint:mnd\n\t\tnlog.Error().Err(ErrInitialMapSendTimeout).Msg(\"initial map send timeout\")\n\t\tnlog.Debug().Caller().Dur(\"timeout.duration\", 5*time.Second). //nolint:mnd\n\t\t\t\t\t\t\t\t\t\tMsg(\"initial map send timed out because channel was blocked or receiver not ready\")\n\t\tnodeConn.removeConnectionByChannel(c)\n\n\t\tif !nodeConn.hasActiveConnections() {\n\t\t\tnodeConn.markDisconnected()\n\t\t}\n\n\t\treturn fmt.Errorf(\"%w for node %d\", ErrInitialMapSendTimeout, id)\n\t}\n\n\t// Mark the node as connected now that the initial map was sent.\n\tnodeConn.markConnected()\n\n\t// Node will automatically receive updates through the normal flow\n\t// The initial full map already contains all current state\n\n\tnlog.Debug().Caller().Dur(zf.TotalDuration, time.Since(addNodeStart)).\n\t\tInt(\"active.connections\", nodeConn.getActiveConnectionCount()).\n\t\tMsg(\"node connection established in batcher\")\n\n\treturn nil\n}\n\n// RemoveNode disconnects a node from the batcher, marking it as offline and cleaning up its state.\n// It validates the connection channel matches one of the current connections, closes that specific connection,\n// and keeps the node entry alive for rapid reconnections instead of aggressive deletion.\n// Reports if the node still has active connections after removal.\nfunc (b *Batcher) RemoveNode(id types.NodeID, c chan<- *tailcfg.MapResponse) bool {\n\tnlog := log.With().Uint64(zf.NodeID, id.Uint64()).Logger()\n\n\tnodeConn, exists := b.nodes.Load(id)\n\tif !exists || nodeConn == nil {\n\t\tnlog.Debug().Caller().Msg(\"removeNode called for non-existent node\")\n\t\treturn false\n\t}\n\n\t// Remove specific connection\n\tremoved := nodeConn.removeConnectionByChannel(c)\n\tif !removed {\n\t\tnlog.Debug().Caller().Msg(\"removeNode: channel not found, connection already removed or invalid\")\n\t}\n\n\t// Check if node has any remaining active connections\n\tif nodeConn.hasActiveConnections() {\n\t\tnlog.Debug().Caller().\n\t\t\tInt(\"active.connections\", nodeConn.getActiveConnectionCount()).\n\t\t\tMsg(\"node connection removed but keeping online, other connections remain\")\n\n\t\treturn true // Node still has active connections\n\t}\n\n\t// No active connections - keep the node entry alive for rapid reconnections\n\t// The node will get a fresh full map when it reconnects\n\tnlog.Debug().Caller().Msg(\"node disconnected from batcher, keeping entry for rapid reconnection\")\n\tnodeConn.markDisconnected()\n\n\treturn false\n}\n\n// AddWork queues a change to be processed by the batcher.\nfunc (b *Batcher) AddWork(r ...change.Change) {\n\tb.addToBatch(r...)\n}\n\nfunc (b *Batcher) Start() {\n\tif !b.started.CompareAndSwap(false, true) {\n\t\treturn\n\t}\n\n\tb.wg.Add(1)\n\n\tgo b.doWork()\n}\n\nfunc (b *Batcher) Close() {\n\t// Signal shutdown to all goroutines, only once.\n\t// Workers and queueWork both select on done, so closing it\n\t// is sufficient for graceful shutdown. We intentionally do NOT\n\t// close workCh here because processBatchedChanges or\n\t// MapResponseFromChange may still be sending on it concurrently.\n\tb.doneOnce.Do(func() {\n\t\tclose(b.done)\n\t})\n\n\t// Wait for all worker goroutines (and doWork) to exit before\n\t// tearing down node connections. This prevents workers from\n\t// sending on connections that are being closed concurrently.\n\tb.wg.Wait()\n\n\t// Stop the ticker to prevent resource leaks.\n\tb.tick.Stop()\n\n\t// Close the underlying channels supplying the data to the clients.\n\tb.nodes.Range(func(nodeID types.NodeID, conn *multiChannelNodeConn) bool {\n\t\tif conn == nil {\n\t\t\treturn true\n\t\t}\n\n\t\tconn.close()\n\n\t\treturn true\n\t})\n}\n\nfunc (b *Batcher) doWork() {\n\tdefer b.wg.Done()\n\n\tfor i := range b.workers {\n\t\tb.wg.Add(1)\n\n\t\tgo b.worker(i + 1)\n\t}\n\n\t// Create a cleanup ticker for removing truly disconnected nodes\n\tcleanupTicker := time.NewTicker(5 * time.Minute)\n\tdefer cleanupTicker.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-b.tick.C:\n\t\t\t// Process batched changes\n\t\t\tb.processBatchedChanges()\n\t\tcase <-cleanupTicker.C:\n\t\t\t// Clean up nodes that have been offline for too long\n\t\t\tb.cleanupOfflineNodes()\n\t\tcase <-b.done:\n\t\t\tlog.Info().Msg(\"batcher done channel closed, stopping to feed workers\")\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (b *Batcher) worker(workerID int) {\n\tdefer b.wg.Done()\n\n\twlog := log.With().Int(zf.WorkerID, workerID).Logger()\n\n\tfor {\n\t\tselect {\n\t\tcase w, ok := <-b.workCh:\n\t\t\tif !ok {\n\t\t\t\twlog.Debug().Msg(\"worker channel closing, shutting down\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tb.workProcessed.Add(1)\n\n\t\t\t// Synchronous path: a caller is blocking on resultCh\n\t\t\t// waiting for a generated MapResponse (used by AddNode\n\t\t\t// for the initial map). Always contains a single change.\n\t\t\tif w.resultCh != nil {\n\t\t\t\tvar result workResult\n\n\t\t\t\tif nc, exists := b.nodes.Load(w.nodeID); exists && nc != nil {\n\t\t\t\t\t// Hold workMu so concurrent async work for this\n\t\t\t\t\t// node waits until the initial map is sent.\n\t\t\t\t\tnc.workMu.Lock()\n\n\t\t\t\t\tvar err error\n\n\t\t\t\t\tresult.mapResponse, err = generateMapResponse(nc, b.mapper, w.changes[0])\n\n\t\t\t\t\tresult.err = err\n\t\t\t\t\tif result.err != nil {\n\t\t\t\t\t\tb.workErrors.Add(1)\n\t\t\t\t\t\twlog.Error().Err(result.err).\n\t\t\t\t\t\t\tUint64(zf.NodeID, w.nodeID.Uint64()).\n\t\t\t\t\t\t\tStr(zf.Reason, w.changes[0].Reason).\n\t\t\t\t\t\t\tMsg(\"failed to generate map response for synchronous work\")\n\t\t\t\t\t} else if result.mapResponse != nil {\n\t\t\t\t\t\tnc.updateSentPeers(result.mapResponse)\n\t\t\t\t\t}\n\n\t\t\t\t\tnc.workMu.Unlock()\n\t\t\t\t} else {\n\t\t\t\t\tresult.err = fmt.Errorf(\"%w: %d\", ErrNodeNotFoundMapper, w.nodeID)\n\n\t\t\t\t\tb.workErrors.Add(1)\n\t\t\t\t\twlog.Error().Err(result.err).\n\t\t\t\t\t\tUint64(zf.NodeID, w.nodeID.Uint64()).\n\t\t\t\t\t\tMsg(\"node not found for synchronous work\")\n\t\t\t\t}\n\n\t\t\t\tselect {\n\t\t\t\tcase w.resultCh <- result:\n\t\t\t\tcase <-b.done:\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Async path: process all bundled changes sequentially.\n\t\t\t// workMu ensures that if another worker picks up the next\n\t\t\t// tick's bundle for the same node, it waits until we\n\t\t\t// finish — preventing out-of-order delivery and races\n\t\t\t// on lastSentPeers (Clear+Store vs Range).\n\t\t\tif nc, exists := b.nodes.Load(w.nodeID); exists && nc != nil {\n\t\t\t\tnc.workMu.Lock()\n\t\t\t\tfor _, ch := range w.changes {\n\t\t\t\t\terr := nc.change(ch)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tb.workErrors.Add(1)\n\t\t\t\t\t\twlog.Error().Err(err).\n\t\t\t\t\t\t\tUint64(zf.NodeID, w.nodeID.Uint64()).\n\t\t\t\t\t\t\tStr(zf.Reason, ch.Reason).\n\t\t\t\t\t\t\tMsg(\"failed to apply change\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tnc.workMu.Unlock()\n\t\t\t}\n\t\tcase <-b.done:\n\t\t\twlog.Debug().Msg(\"batcher shutting down, exiting worker\")\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// queueWork safely queues work.\nfunc (b *Batcher) queueWork(w work) {\n\tb.workQueuedCount.Add(1)\n\n\tselect {\n\tcase b.workCh <- w:\n\t\t// Successfully queued\n\tcase <-b.done:\n\t\t// Batcher is shutting down\n\t\treturn\n\t}\n}\n\n// addToBatch adds changes to the pending batch.\nfunc (b *Batcher) addToBatch(changes ...change.Change) {\n\t// Clean up any nodes being permanently removed from the system.\n\t//\n\t// This handles the case where a node is deleted from state but the batcher\n\t// still has it registered. By cleaning up here, we prevent \"node not found\"\n\t// errors when workers try to generate map responses for deleted nodes.\n\t//\n\t// Safety: change.Change.PeersRemoved is ONLY populated when nodes are actually\n\t// deleted from the system (via change.NodeRemoved in state.DeleteNode). Policy\n\t// changes that affect peer visibility do NOT use this field - they set\n\t// RequiresRuntimePeerComputation=true and compute removed peers at runtime,\n\t// putting them in tailcfg.MapResponse.PeersRemoved (a different struct).\n\t// Therefore, this cleanup only removes nodes that are truly being deleted,\n\t// not nodes that are still connected but have lost visibility of certain peers.\n\t//\n\t// See: https://github.com/juanfont/headscale/issues/2924\n\tfor _, ch := range changes {\n\t\tfor _, removedID := range ch.PeersRemoved {\n\t\t\tif _, existed := b.nodes.LoadAndDelete(removedID); existed {\n\t\t\t\tb.totalNodes.Add(-1)\n\t\t\t\tlog.Debug().\n\t\t\t\t\tUint64(zf.NodeID, removedID.Uint64()).\n\t\t\t\t\tMsg(\"removed deleted node from batcher\")\n\t\t\t}\n\t\t}\n\t}\n\n\t// Short circuit if any of the changes is a full update, which\n\t// means we can skip sending individual changes.\n\tif change.HasFull(changes) {\n\t\tb.nodes.Range(func(_ types.NodeID, nc *multiChannelNodeConn) bool {\n\t\t\tif nc == nil {\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tnc.pendingMu.Lock()\n\t\t\tnc.pending = []change.Change{change.FullUpdate()}\n\t\t\tnc.pendingMu.Unlock()\n\n\t\t\treturn true\n\t\t})\n\n\t\treturn\n\t}\n\n\tbroadcast, targeted := change.SplitTargetedAndBroadcast(changes)\n\n\t// Handle targeted changes - send only to the specific node\n\tfor _, ch := range targeted {\n\t\tif nc, ok := b.nodes.Load(ch.TargetNode); ok && nc != nil {\n\t\t\tnc.appendPending(ch)\n\t\t}\n\t}\n\n\t// Handle broadcast changes - send to all nodes, filtering as needed\n\tif len(broadcast) > 0 {\n\t\tb.nodes.Range(func(nodeID types.NodeID, nc *multiChannelNodeConn) bool {\n\t\t\tif nc == nil {\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tfiltered := change.FilterForNode(nodeID, broadcast)\n\n\t\t\tif len(filtered) > 0 {\n\t\t\t\tnc.appendPending(filtered...)\n\t\t\t}\n\n\t\t\treturn true\n\t\t})\n\t}\n}\n\n// processBatchedChanges processes all pending batched changes.\nfunc (b *Batcher) processBatchedChanges() {\n\tb.nodes.Range(func(nodeID types.NodeID, nc *multiChannelNodeConn) bool {\n\t\tif nc == nil {\n\t\t\treturn true\n\t\t}\n\n\t\tpending := nc.drainPending()\n\t\tif len(pending) == 0 {\n\t\t\treturn true\n\t\t}\n\n\t\t// Queue a single work item containing all pending changes.\n\t\t// One item per node ensures a single worker processes them\n\t\t// sequentially, preventing out-of-order delivery.\n\t\tb.queueWork(work{changes: pending, nodeID: nodeID, resultCh: nil})\n\n\t\treturn true\n\t})\n}\n\n// cleanupOfflineNodes removes nodes that have been offline for too long to prevent memory leaks.\n// Uses Compute() for atomic check-and-delete to prevent TOCTOU races where a node\n// reconnects between the hasActiveConnections() check and the Delete() call.\nfunc (b *Batcher) cleanupOfflineNodes() {\n\tvar nodesToCleanup []types.NodeID\n\n\t// Find nodes that have been offline for too long by scanning b.nodes\n\t// and checking each node's disconnectedAt timestamp.\n\tb.nodes.Range(func(nodeID types.NodeID, nc *multiChannelNodeConn) bool {\n\t\tif nc != nil && !nc.hasActiveConnections() && nc.offlineDuration() > offlineNodeCleanupThreshold {\n\t\t\tnodesToCleanup = append(nodesToCleanup, nodeID)\n\t\t}\n\n\t\treturn true\n\t})\n\n\t// Clean up the identified nodes using Compute() for atomic check-and-delete.\n\t// This prevents a TOCTOU race where a node reconnects (adding an active\n\t// connection) between the hasActiveConnections() check and the Delete() call.\n\tcleaned := 0\n\n\tfor _, nodeID := range nodesToCleanup {\n\t\tb.nodes.Compute(\n\t\t\tnodeID,\n\t\t\tfunc(conn *multiChannelNodeConn, loaded bool) (*multiChannelNodeConn, xsync.ComputeOp) {\n\t\t\t\tif !loaded || conn == nil || conn.hasActiveConnections() {\n\t\t\t\t\treturn conn, xsync.CancelOp\n\t\t\t\t}\n\n\t\t\t\t// Perform all bookkeeping inside the Compute callback so\n\t\t\t\t// that a concurrent AddNode (which calls LoadOrStore on\n\t\t\t\t// b.nodes) cannot slip in between the delete and the\n\t\t\t\t// counter update.\n\t\t\t\tb.totalNodes.Add(-1)\n\n\t\t\t\tcleaned++\n\n\t\t\t\tlog.Info().Uint64(zf.NodeID, nodeID.Uint64()).\n\t\t\t\t\tDur(\"offline_duration\", offlineNodeCleanupThreshold).\n\t\t\t\t\tMsg(\"cleaning up node that has been offline for too long\")\n\n\t\t\t\treturn conn, xsync.DeleteOp\n\t\t\t},\n\t\t)\n\t}\n\n\tif cleaned > 0 {\n\t\tlog.Info().Int(zf.CleanedNodes, cleaned).\n\t\t\tMsg(\"completed cleanup of long-offline nodes\")\n\t}\n}\n\n// IsConnected is a lock-free read that checks if a node is connected.\n// A node is considered connected if it has active connections or has\n// not been marked as disconnected.\nfunc (b *Batcher) IsConnected(id types.NodeID) bool {\n\tnodeConn, exists := b.nodes.Load(id)\n\tif !exists || nodeConn == nil {\n\t\treturn false\n\t}\n\n\treturn nodeConn.isConnected()\n}\n\n// ConnectedMap returns a lock-free map of all known nodes and their\n// connection status (true = connected, false = disconnected).\nfunc (b *Batcher) ConnectedMap() *xsync.Map[types.NodeID, bool] {\n\tret := xsync.NewMap[types.NodeID, bool]()\n\n\tb.nodes.Range(func(id types.NodeID, nc *multiChannelNodeConn) bool {\n\t\tif nc != nil {\n\t\t\tret.Store(id, nc.isConnected())\n\t\t}\n\n\t\treturn true\n\t})\n\n\treturn ret\n}\n\n// MapResponseFromChange queues work to generate a map response and waits for the result.\n// This allows synchronous map generation using the same worker pool.\nfunc (b *Batcher) MapResponseFromChange(id types.NodeID, ch change.Change) (*tailcfg.MapResponse, error) {\n\tresultCh := make(chan workResult, 1)\n\n\t// Queue the work with a result channel using the safe queueing method\n\tb.queueWork(work{changes: []change.Change{ch}, nodeID: id, resultCh: resultCh})\n\n\t// Wait for the result\n\tselect {\n\tcase result := <-resultCh:\n\t\treturn result.mapResponse, result.err\n\tcase <-b.done:\n\t\treturn nil, fmt.Errorf(\"%w while generating map response for node %d\", ErrBatcherShuttingDown, id)\n\t}\n}\n\n// DebugNodeInfo contains debug information about a node's connections.\ntype DebugNodeInfo struct {\n\tConnected         bool `json:\"connected\"`\n\tActiveConnections int  `json:\"active_connections\"`\n}\n\n// Debug returns a pre-baked map of node debug information for the debug interface.\nfunc (b *Batcher) Debug() map[types.NodeID]DebugNodeInfo {\n\tresult := make(map[types.NodeID]DebugNodeInfo)\n\n\tb.nodes.Range(func(id types.NodeID, nc *multiChannelNodeConn) bool {\n\t\tif nc == nil {\n\t\t\treturn true\n\t\t}\n\n\t\tresult[id] = DebugNodeInfo{\n\t\t\tConnected:         nc.isConnected(),\n\t\t\tActiveConnections: nc.getActiveConnectionCount(),\n\t\t}\n\n\t\treturn true\n\t})\n\n\treturn result\n}\n\nfunc (b *Batcher) DebugMapResponses() (map[types.NodeID][]tailcfg.MapResponse, error) {\n\treturn b.mapper.debugMapResponses()\n}\n\n// WorkErrors returns the count of work errors encountered.\n// This is primarily useful for testing and debugging.\nfunc (b *Batcher) WorkErrors() int64 {\n\treturn b.workErrors.Load()\n}\n"
  },
  {
    "path": "hscontrol/mapper/batcher_bench_test.go",
    "content": "package mapper\n\n// Benchmarks for batcher components and full pipeline.\n//\n// Organized into three tiers:\n// - Component benchmarks: individual functions (connectionEntry.send, computePeerDiff, etc.)\n// - System benchmarks: batching mechanics (addToBatch, processBatchedChanges, broadcast)\n// - Full pipeline benchmarks: end-to-end with real DB (gated behind !testing.Short())\n//\n// All benchmarks use sub-benchmarks with 10/100/1000 node counts for scaling analysis.\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/types/change\"\n\t\"github.com/puzpuzpuz/xsync/v4\"\n\t\"github.com/rs/zerolog\"\n\t\"tailscale.com/tailcfg\"\n)\n\n// ============================================================================\n// Component Benchmarks\n// ============================================================================\n\n// BenchmarkConnectionEntry_Send measures the throughput of sending a single\n// MapResponse through a connectionEntry with a buffered channel.\nfunc BenchmarkConnectionEntry_Send(b *testing.B) {\n\tch := make(chan *tailcfg.MapResponse, b.N+1)\n\tentry := makeConnectionEntry(\"bench-conn\", ch)\n\tdata := testMapResponse()\n\n\tb.ResetTimer()\n\n\tfor range b.N {\n\t\t_ = entry.send(data)\n\t}\n}\n\n// BenchmarkMultiChannelSend measures broadcast throughput to multiple connections.\nfunc BenchmarkMultiChannelSend(b *testing.B) {\n\tfor _, connCount := range []int{1, 3, 10} {\n\t\tb.Run(fmt.Sprintf(\"%dconn\", connCount), func(b *testing.B) {\n\t\t\tmc := newMultiChannelNodeConn(1, nil)\n\n\t\t\tchannels := make([]chan *tailcfg.MapResponse, connCount)\n\t\t\tfor i := range channels {\n\t\t\t\tchannels[i] = make(chan *tailcfg.MapResponse, b.N+1)\n\t\t\t\tmc.addConnection(makeConnectionEntry(fmt.Sprintf(\"conn-%d\", i), channels[i]))\n\t\t\t}\n\n\t\t\tdata := testMapResponse()\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor range b.N {\n\t\t\t\t_ = mc.send(data)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkComputePeerDiff measures the cost of computing peer diffs at scale.\nfunc BenchmarkComputePeerDiff(b *testing.B) {\n\tfor _, peerCount := range []int{10, 100, 1000} {\n\t\tb.Run(fmt.Sprintf(\"%dpeers\", peerCount), func(b *testing.B) {\n\t\t\tmc := newMultiChannelNodeConn(1, nil)\n\n\t\t\t// Populate tracked peers: 1..peerCount\n\t\t\tfor i := 1; i <= peerCount; i++ {\n\t\t\t\tmc.lastSentPeers.Store(tailcfg.NodeID(i), struct{}{})\n\t\t\t}\n\n\t\t\t// Current peers: remove ~10% (every 10th peer is missing)\n\t\t\tcurrent := make([]tailcfg.NodeID, 0, peerCount)\n\t\t\tfor i := 1; i <= peerCount; i++ {\n\t\t\t\tif i%10 != 0 {\n\t\t\t\t\tcurrent = append(current, tailcfg.NodeID(i))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor range b.N {\n\t\t\t\t_ = mc.computePeerDiff(current)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkUpdateSentPeers measures the cost of updating peer tracking state.\nfunc BenchmarkUpdateSentPeers(b *testing.B) {\n\tfor _, peerCount := range []int{10, 100, 1000} {\n\t\tb.Run(fmt.Sprintf(\"%dpeers_full\", peerCount), func(b *testing.B) {\n\t\t\tmc := newMultiChannelNodeConn(1, nil)\n\n\t\t\t// Pre-build response with full peer list\n\t\t\tpeerIDs := make([]tailcfg.NodeID, peerCount)\n\t\t\tfor i := range peerIDs {\n\t\t\t\tpeerIDs[i] = tailcfg.NodeID(i + 1)\n\t\t\t}\n\n\t\t\tresp := testMapResponseWithPeers(peerIDs...)\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor range b.N {\n\t\t\t\tmc.updateSentPeers(resp)\n\t\t\t}\n\t\t})\n\n\t\tb.Run(fmt.Sprintf(\"%dpeers_incremental\", peerCount), func(b *testing.B) {\n\t\t\tmc := newMultiChannelNodeConn(1, nil)\n\n\t\t\t// Pre-populate with existing peers\n\t\t\tfor i := 1; i <= peerCount; i++ {\n\t\t\t\tmc.lastSentPeers.Store(tailcfg.NodeID(i), struct{}{})\n\t\t\t}\n\n\t\t\t// Build incremental response: add 10% new peers\n\t\t\taddCount := peerCount / 10\n\t\t\tif addCount == 0 {\n\t\t\t\taddCount = 1\n\t\t\t}\n\n\t\t\tresp := testMapResponse()\n\n\t\t\tresp.PeersChanged = make([]*tailcfg.Node, addCount)\n\t\t\tfor i := range addCount {\n\t\t\t\tresp.PeersChanged[i] = &tailcfg.Node{ID: tailcfg.NodeID(peerCount + i + 1)}\n\t\t\t}\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor range b.N {\n\t\t\t\tmc.updateSentPeers(resp)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// ============================================================================\n// System Benchmarks (no DB, batcher mechanics only)\n// ============================================================================\n\n// benchBatcher creates a lightweight batcher for benchmarks. Unlike the test\n// helper, it doesn't register cleanup and suppresses logging.\nfunc benchBatcher(nodeCount, bufferSize int) (*Batcher, map[types.NodeID]chan *tailcfg.MapResponse) {\n\tb := &Batcher{\n\t\ttick:    time.NewTicker(1 * time.Hour), // never fires during bench\n\t\tworkers: 4,\n\t\tworkCh:  make(chan work, 4*200),\n\t\tnodes:   xsync.NewMap[types.NodeID, *multiChannelNodeConn](),\n\t\tdone:    make(chan struct{}),\n\t}\n\n\tchannels := make(map[types.NodeID]chan *tailcfg.MapResponse, nodeCount)\n\tfor i := 1; i <= nodeCount; i++ {\n\t\tid := types.NodeID(i) //nolint:gosec // benchmark with small controlled values\n\t\tmc := newMultiChannelNodeConn(id, nil)\n\t\tch := make(chan *tailcfg.MapResponse, bufferSize)\n\t\tentry := &connectionEntry{\n\t\t\tid:      fmt.Sprintf(\"conn-%d\", i),\n\t\t\tc:       ch,\n\t\t\tversion: tailcfg.CapabilityVersion(100),\n\t\t\tcreated: time.Now(),\n\t\t}\n\t\tentry.lastUsed.Store(time.Now().Unix())\n\t\tmc.addConnection(entry)\n\t\tb.nodes.Store(id, mc)\n\t\tchannels[id] = ch\n\t}\n\n\tb.totalNodes.Store(int64(nodeCount))\n\n\treturn b, channels\n}\n\n// BenchmarkAddToBatch_Broadcast measures the cost of broadcasting a change\n// to all nodes via addToBatch (no worker processing, just queuing).\nfunc BenchmarkAddToBatch_Broadcast(b *testing.B) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, nodeCount := range []int{10, 100, 1000} {\n\t\tb.Run(fmt.Sprintf(\"%dnodes\", nodeCount), func(b *testing.B) {\n\t\t\tbatcher, _ := benchBatcher(nodeCount, 10)\n\n\t\t\tdefer func() {\n\t\t\t\tclose(batcher.done)\n\t\t\t\tbatcher.tick.Stop()\n\t\t\t}()\n\n\t\t\tch := change.DERPMap()\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor range b.N {\n\t\t\t\tbatcher.addToBatch(ch)\n\t\t\t\t// Clear pending to avoid unbounded growth\n\t\t\t\tbatcher.nodes.Range(func(_ types.NodeID, nc *multiChannelNodeConn) bool {\n\t\t\t\t\tnc.drainPending()\n\t\t\t\t\treturn true\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkAddToBatch_Targeted measures the cost of adding a targeted change\n// to a single node.\nfunc BenchmarkAddToBatch_Targeted(b *testing.B) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, nodeCount := range []int{10, 100, 1000} {\n\t\tb.Run(fmt.Sprintf(\"%dnodes\", nodeCount), func(b *testing.B) {\n\t\t\tbatcher, _ := benchBatcher(nodeCount, 10)\n\n\t\t\tdefer func() {\n\t\t\t\tclose(batcher.done)\n\t\t\t\tbatcher.tick.Stop()\n\t\t\t}()\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor i := range b.N {\n\t\t\t\ttargetID := types.NodeID(1 + (i % nodeCount)) //nolint:gosec // benchmark\n\t\t\t\tch := change.Change{\n\t\t\t\t\tReason:     \"bench-targeted\",\n\t\t\t\t\tTargetNode: targetID,\n\t\t\t\t\tPeerPatches: []*tailcfg.PeerChange{\n\t\t\t\t\t\t{NodeID: tailcfg.NodeID(targetID)}, //nolint:gosec // benchmark\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\tbatcher.addToBatch(ch)\n\t\t\t\t// Clear pending periodically to avoid growth\n\t\t\t\tif i%100 == 99 {\n\t\t\t\t\tbatcher.nodes.Range(func(_ types.NodeID, nc *multiChannelNodeConn) bool {\n\t\t\t\t\t\tnc.drainPending()\n\t\t\t\t\t\treturn true\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkAddToBatch_FullUpdate measures the cost of a FullUpdate broadcast.\nfunc BenchmarkAddToBatch_FullUpdate(b *testing.B) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, nodeCount := range []int{10, 100, 1000} {\n\t\tb.Run(fmt.Sprintf(\"%dnodes\", nodeCount), func(b *testing.B) {\n\t\t\tbatcher, _ := benchBatcher(nodeCount, 10)\n\n\t\t\tdefer func() {\n\t\t\t\tclose(batcher.done)\n\t\t\t\tbatcher.tick.Stop()\n\t\t\t}()\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor range b.N {\n\t\t\t\tbatcher.addToBatch(change.FullUpdate())\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkProcessBatchedChanges measures the cost of moving pending changes\n// to the work queue.\nfunc BenchmarkProcessBatchedChanges(b *testing.B) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, nodeCount := range []int{10, 100, 1000} {\n\t\tb.Run(fmt.Sprintf(\"%dpending\", nodeCount), func(b *testing.B) {\n\t\t\tbatcher, _ := benchBatcher(nodeCount, 10)\n\t\t\t// Use a very large work channel to avoid blocking\n\t\t\tbatcher.workCh = make(chan work, nodeCount*b.N+1)\n\n\t\t\tdefer func() {\n\t\t\t\tclose(batcher.done)\n\t\t\t\tbatcher.tick.Stop()\n\t\t\t}()\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor range b.N {\n\t\t\t\tb.StopTimer()\n\t\t\t\t// Seed pending changes\n\t\t\t\tfor i := 1; i <= nodeCount; i++ {\n\t\t\t\t\tif nc, ok := batcher.nodes.Load(types.NodeID(i)); ok { //nolint:gosec // benchmark\n\t\t\t\t\t\tnc.appendPending(change.DERPMap())\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tb.StartTimer()\n\n\t\t\t\tbatcher.processBatchedChanges()\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkBroadcastToN measures end-to-end broadcast: addToBatch + processBatchedChanges\n// to N nodes. Does NOT include worker processing (MapResponse generation).\nfunc BenchmarkBroadcastToN(b *testing.B) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, nodeCount := range []int{10, 100, 1000} {\n\t\tb.Run(fmt.Sprintf(\"%dnodes\", nodeCount), func(b *testing.B) {\n\t\t\tbatcher, _ := benchBatcher(nodeCount, 10)\n\t\t\tbatcher.workCh = make(chan work, nodeCount*b.N+1)\n\n\t\t\tdefer func() {\n\t\t\t\tclose(batcher.done)\n\t\t\t\tbatcher.tick.Stop()\n\t\t\t}()\n\n\t\t\tch := change.DERPMap()\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor range b.N {\n\t\t\t\tbatcher.addToBatch(ch)\n\t\t\t\tbatcher.processBatchedChanges()\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkMultiChannelBroadcast measures the cost of sending a MapResponse\n// to N nodes each with varying connection counts.\nfunc BenchmarkMultiChannelBroadcast(b *testing.B) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, nodeCount := range []int{10, 100, 1000} {\n\t\tb.Run(fmt.Sprintf(\"%dnodes\", nodeCount), func(b *testing.B) {\n\t\t\tbatcher, _ := benchBatcher(nodeCount, b.N+1)\n\n\t\t\tdefer func() {\n\t\t\t\tclose(batcher.done)\n\t\t\t\tbatcher.tick.Stop()\n\t\t\t}()\n\n\t\t\t// Add extra connections to every 3rd node\n\t\t\tfor i := 1; i <= nodeCount; i++ {\n\t\t\t\tif i%3 == 0 {\n\t\t\t\t\tif mc, ok := batcher.nodes.Load(types.NodeID(i)); ok { //nolint:gosec // benchmark\n\t\t\t\t\t\tfor j := range 2 {\n\t\t\t\t\t\t\tch := make(chan *tailcfg.MapResponse, b.N+1)\n\t\t\t\t\t\t\tentry := &connectionEntry{\n\t\t\t\t\t\t\t\tid:      fmt.Sprintf(\"extra-%d-%d\", i, j),\n\t\t\t\t\t\t\t\tc:       ch,\n\t\t\t\t\t\t\t\tversion: tailcfg.CapabilityVersion(100),\n\t\t\t\t\t\t\t\tcreated: time.Now(),\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tentry.lastUsed.Store(time.Now().Unix())\n\t\t\t\t\t\t\tmc.addConnection(entry)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tdata := testMapResponse()\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor range b.N {\n\t\t\t\tbatcher.nodes.Range(func(_ types.NodeID, mc *multiChannelNodeConn) bool {\n\t\t\t\t\t_ = mc.send(data)\n\t\t\t\t\treturn true\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkConcurrentAddToBatch measures addToBatch throughput under\n// concurrent access from multiple goroutines.\nfunc BenchmarkConcurrentAddToBatch(b *testing.B) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, nodeCount := range []int{10, 100, 1000} {\n\t\tb.Run(fmt.Sprintf(\"%dnodes\", nodeCount), func(b *testing.B) {\n\t\t\tbatcher, _ := benchBatcher(nodeCount, 10)\n\n\t\t\tdefer func() {\n\t\t\t\tclose(batcher.done)\n\t\t\t\tbatcher.tick.Stop()\n\t\t\t}()\n\n\t\t\t// Background goroutine to drain pending periodically\n\t\t\tdrainDone := make(chan struct{})\n\n\t\t\tgo func() {\n\t\t\t\tdefer close(drainDone)\n\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-batcher.done:\n\t\t\t\t\t\treturn\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tbatcher.nodes.Range(func(_ types.NodeID, nc *multiChannelNodeConn) bool {\n\t\t\t\t\t\t\tnc.drainPending()\n\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t})\n\t\t\t\t\t\ttime.Sleep(time.Millisecond) //nolint:forbidigo // benchmark drain loop\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tch := change.DERPMap()\n\n\t\t\tb.ResetTimer()\n\t\t\tb.RunParallel(func(pb *testing.PB) {\n\t\t\t\tfor pb.Next() {\n\t\t\t\t\tbatcher.addToBatch(ch)\n\t\t\t\t}\n\t\t\t})\n\t\t\tb.StopTimer()\n\n\t\t\t// Cleanup\n\t\t\tclose(batcher.done)\n\t\t\t<-drainDone\n\t\t\t// Re-open done so the defer doesn't double-close\n\t\t\tbatcher.done = make(chan struct{})\n\t\t})\n\t}\n}\n\n// BenchmarkIsConnected measures the read throughput of IsConnected checks.\nfunc BenchmarkIsConnected(b *testing.B) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, nodeCount := range []int{10, 100, 1000} {\n\t\tb.Run(fmt.Sprintf(\"%dnodes\", nodeCount), func(b *testing.B) {\n\t\t\tbatcher, _ := benchBatcher(nodeCount, 1)\n\n\t\t\tdefer func() {\n\t\t\t\tclose(batcher.done)\n\t\t\t\tbatcher.tick.Stop()\n\t\t\t}()\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor i := range b.N {\n\t\t\t\tid := types.NodeID(1 + (i % nodeCount)) //nolint:gosec // benchmark\n\t\t\t\t_ = batcher.IsConnected(id)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkConnectedMap measures the cost of building the full connected map.\nfunc BenchmarkConnectedMap(b *testing.B) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, nodeCount := range []int{10, 100, 1000} {\n\t\tb.Run(fmt.Sprintf(\"%dnodes\", nodeCount), func(b *testing.B) {\n\t\t\tbatcher, channels := benchBatcher(nodeCount, 1)\n\n\t\t\tdefer func() {\n\t\t\t\tclose(batcher.done)\n\t\t\t\tbatcher.tick.Stop()\n\t\t\t}()\n\n\t\t\t// Disconnect 10% of nodes for a realistic mix\n\t\t\tfor i := 1; i <= nodeCount; i++ {\n\t\t\t\tif i%10 == 0 {\n\t\t\t\t\tid := types.NodeID(i) //nolint:gosec // benchmark\n\t\t\t\t\tif mc, ok := batcher.nodes.Load(id); ok {\n\t\t\t\t\t\tmc.removeConnectionByChannel(channels[id])\n\t\t\t\t\t\tmc.markDisconnected()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor range b.N {\n\t\t\t\t_ = batcher.ConnectedMap()\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkConnectionChurn measures the cost of add/remove connection cycling\n// which simulates client reconnection patterns.\nfunc BenchmarkConnectionChurn(b *testing.B) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, nodeCount := range []int{10, 100, 1000} {\n\t\tb.Run(fmt.Sprintf(\"%dnodes\", nodeCount), func(b *testing.B) {\n\t\t\tbatcher, channels := benchBatcher(nodeCount, 10)\n\n\t\t\tdefer func() {\n\t\t\t\tclose(batcher.done)\n\t\t\t\tbatcher.tick.Stop()\n\t\t\t}()\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor i := range b.N {\n\t\t\t\tid := types.NodeID(1 + (i % nodeCount)) //nolint:gosec // benchmark\n\n\t\t\t\tmc, ok := batcher.nodes.Load(id)\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// Remove old connection\n\t\t\t\toldCh := channels[id]\n\t\t\t\tmc.removeConnectionByChannel(oldCh)\n\n\t\t\t\t// Add new connection\n\t\t\t\tnewCh := make(chan *tailcfg.MapResponse, 10)\n\t\t\t\tentry := &connectionEntry{\n\t\t\t\t\tid:      fmt.Sprintf(\"churn-%d\", i),\n\t\t\t\t\tc:       newCh,\n\t\t\t\t\tversion: tailcfg.CapabilityVersion(100),\n\t\t\t\t\tcreated: time.Now(),\n\t\t\t\t}\n\t\t\t\tentry.lastUsed.Store(time.Now().Unix())\n\t\t\t\tmc.addConnection(entry)\n\n\t\t\t\tchannels[id] = newCh\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkConcurrentSendAndChurn measures the combined cost of sends happening\n// concurrently with connection churn - the hot path in production.\nfunc BenchmarkConcurrentSendAndChurn(b *testing.B) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, nodeCount := range []int{10, 100} {\n\t\tb.Run(fmt.Sprintf(\"%dnodes\", nodeCount), func(b *testing.B) {\n\t\t\tbatcher, channels := benchBatcher(nodeCount, 100)\n\n\t\t\tvar mu sync.Mutex // protect channels map\n\n\t\t\tstopChurn := make(chan struct{})\n\t\t\tdefer close(stopChurn)\n\n\t\t\t// Background churn on 10% of nodes\n\t\t\tgo func() {\n\t\t\t\ti := 0\n\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-stopChurn:\n\t\t\t\t\t\treturn\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tid := types.NodeID(1 + (i % nodeCount)) //nolint:gosec // benchmark\n\t\t\t\t\t\tif i%10 == 0 {                          // only churn 10%\n\t\t\t\t\t\t\tmc, ok := batcher.nodes.Load(id)\n\t\t\t\t\t\t\tif ok {\n\t\t\t\t\t\t\t\tmu.Lock()\n\t\t\t\t\t\t\t\toldCh := channels[id]\n\t\t\t\t\t\t\t\tmu.Unlock()\n\t\t\t\t\t\t\t\tmc.removeConnectionByChannel(oldCh)\n\n\t\t\t\t\t\t\t\tnewCh := make(chan *tailcfg.MapResponse, 100)\n\t\t\t\t\t\t\t\tentry := &connectionEntry{\n\t\t\t\t\t\t\t\t\tid:      fmt.Sprintf(\"churn-%d\", i),\n\t\t\t\t\t\t\t\t\tc:       newCh,\n\t\t\t\t\t\t\t\t\tversion: tailcfg.CapabilityVersion(100),\n\t\t\t\t\t\t\t\t\tcreated: time.Now(),\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tentry.lastUsed.Store(time.Now().Unix())\n\t\t\t\t\t\t\t\tmc.addConnection(entry)\n\t\t\t\t\t\t\t\tmu.Lock()\n\t\t\t\t\t\t\t\tchannels[id] = newCh\n\t\t\t\t\t\t\t\tmu.Unlock()\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\ti++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tdata := testMapResponse()\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor range b.N {\n\t\t\t\tbatcher.nodes.Range(func(_ types.NodeID, mc *multiChannelNodeConn) bool {\n\t\t\t\t\t_ = mc.send(data)\n\t\t\t\t\treturn true\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\n// ============================================================================\n// Full Pipeline Benchmarks (with DB)\n// ============================================================================\n\n// BenchmarkAddNode measures the cost of adding nodes to the batcher,\n// including initial MapResponse generation from a real database.\nfunc BenchmarkAddNode(b *testing.B) {\n\tif testing.Short() {\n\t\tb.Skip(\"skipping full pipeline benchmark in short mode\")\n\t}\n\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, nodeCount := range []int{10, 100} {\n\t\tb.Run(fmt.Sprintf(\"%dnodes\", nodeCount), func(b *testing.B) {\n\t\t\ttestData, cleanup := setupBatcherWithTestData(b, NewBatcherAndMapper, 1, nodeCount, largeBufferSize)\n\t\t\tdefer cleanup()\n\n\t\t\tbatcher := testData.Batcher\n\t\t\tallNodes := testData.Nodes\n\n\t\t\t// Start consumers\n\t\t\tfor i := range allNodes {\n\t\t\t\tallNodes[i].start()\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\tfor i := range allNodes {\n\t\t\t\t\tallNodes[i].cleanup()\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor range b.N {\n\t\t\t\t// Connect all nodes (measuring AddNode cost)\n\t\t\t\tfor i := range allNodes {\n\t\t\t\t\tnode := &allNodes[i]\n\t\t\t\t\t_ = batcher.AddNode(node.n.ID, node.ch, tailcfg.CapabilityVersion(100), nil)\n\t\t\t\t}\n\n\t\t\t\tb.StopTimer()\n\t\t\t\t// Disconnect for next iteration\n\t\t\t\tfor i := range allNodes {\n\t\t\t\t\tnode := &allNodes[i]\n\t\t\t\t\tbatcher.RemoveNode(node.n.ID, node.ch)\n\t\t\t\t}\n\t\t\t\t// Drain channels\n\t\t\t\tfor i := range allNodes {\n\t\t\t\t\tfor {\n\t\t\t\t\t\tselect {\n\t\t\t\t\t\tcase <-allNodes[i].ch:\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tgoto drained\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\tdrained:\n\t\t\t\t}\n\n\t\t\t\tb.StartTimer()\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkFullPipeline measures the full pipeline cost: addToBatch → processBatchedChanges\n// → worker → generateMapResponse → send, with real nodes from a database.\nfunc BenchmarkFullPipeline(b *testing.B) {\n\tif testing.Short() {\n\t\tb.Skip(\"skipping full pipeline benchmark in short mode\")\n\t}\n\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, nodeCount := range []int{10, 100} {\n\t\tb.Run(fmt.Sprintf(\"%dnodes\", nodeCount), func(b *testing.B) {\n\t\t\ttestData, cleanup := setupBatcherWithTestData(b, NewBatcherAndMapper, 1, nodeCount, largeBufferSize)\n\t\t\tdefer cleanup()\n\n\t\t\tbatcher := testData.Batcher\n\t\t\tallNodes := testData.Nodes\n\n\t\t\t// Start consumers\n\t\t\tfor i := range allNodes {\n\t\t\t\tallNodes[i].start()\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\tfor i := range allNodes {\n\t\t\t\t\tallNodes[i].cleanup()\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\t// Connect all nodes first\n\t\t\tfor i := range allNodes {\n\t\t\t\tnode := &allNodes[i]\n\n\t\t\t\terr := batcher.AddNode(node.n.ID, node.ch, tailcfg.CapabilityVersion(100), nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\tb.Fatalf(\"failed to add node %d: %v\", i, err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Wait for initial maps to settle\n\t\t\ttime.Sleep(200 * time.Millisecond) //nolint:forbidigo // benchmark coordination\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor range b.N {\n\t\t\t\tbatcher.AddWork(change.DERPMap())\n\t\t\t\t// Allow workers to process (the batcher tick is what normally\n\t\t\t\t// triggers processBatchedChanges, but for benchmarks we need\n\t\t\t\t// to give the system time to process)\n\t\t\t\ttime.Sleep(20 * time.Millisecond) //nolint:forbidigo // benchmark coordination\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkMapResponseFromChange measures the cost of synchronous\n// MapResponse generation for individual nodes.\nfunc BenchmarkMapResponseFromChange(b *testing.B) {\n\tif testing.Short() {\n\t\tb.Skip(\"skipping full pipeline benchmark in short mode\")\n\t}\n\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, nodeCount := range []int{10, 100} {\n\t\tb.Run(fmt.Sprintf(\"%dnodes\", nodeCount), func(b *testing.B) {\n\t\t\ttestData, cleanup := setupBatcherWithTestData(b, NewBatcherAndMapper, 1, nodeCount, largeBufferSize)\n\t\t\tdefer cleanup()\n\n\t\t\tbatcher := testData.Batcher\n\t\t\tallNodes := testData.Nodes\n\n\t\t\t// Start consumers\n\t\t\tfor i := range allNodes {\n\t\t\t\tallNodes[i].start()\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\tfor i := range allNodes {\n\t\t\t\t\tallNodes[i].cleanup()\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\t// Connect all nodes\n\t\t\tfor i := range allNodes {\n\t\t\t\tnode := &allNodes[i]\n\n\t\t\t\terr := batcher.AddNode(node.n.ID, node.ch, tailcfg.CapabilityVersion(100), nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\tb.Fatalf(\"failed to add node %d: %v\", i, err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ttime.Sleep(200 * time.Millisecond) //nolint:forbidigo // benchmark coordination\n\n\t\t\tch := change.DERPMap()\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor i := range b.N {\n\t\t\t\tnodeIdx := i % len(allNodes)\n\t\t\t\t_, _ = batcher.MapResponseFromChange(allNodes[nodeIdx].n.ID, ch)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "hscontrol/mapper/batcher_concurrency_test.go",
    "content": "package mapper\n\n// Concurrency, lifecycle, and scale tests for the batcher.\n// Tests in this file exercise:\n// - addToBatch and processBatchedChanges under concurrent access\n// - cleanupOfflineNodes correctness\n// - Batcher lifecycle (Close, shutdown, double-close)\n// - 1000-node scale testing of batching and channel mechanics\n//\n// Most tests use the lightweight batcher helper which creates a batcher with\n// pre-populated nodes but NO database, enabling fast 1000-node tests.\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/types/change\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/puzpuzpuz/xsync/v4\"\n\t\"github.com/rs/zerolog\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/tailcfg\"\n)\n\n// ============================================================================\n// Lightweight Batcher Helper (no database needed)\n// ============================================================================\n\n// lightweightBatcher provides a batcher with pre-populated nodes for testing\n// the batching, channel, and concurrency mechanics without database overhead.\ntype lightweightBatcher struct {\n\tb        *Batcher\n\tchannels map[types.NodeID]chan *tailcfg.MapResponse\n}\n\n// setupLightweightBatcher creates a batcher with nodeCount pre-populated nodes.\n// Each node gets a buffered channel of bufferSize. The batcher's worker loop\n// is NOT started (no doWork), so addToBatch/processBatchedChanges can be tested\n// in isolation. Use startWorkers() if you need the full loop.\nfunc setupLightweightBatcher(t *testing.T, nodeCount, bufferSize int) *lightweightBatcher {\n\tt.Helper()\n\n\tb := &Batcher{\n\t\ttick:    time.NewTicker(10 * time.Millisecond),\n\t\tworkers: 4,\n\t\tworkCh:  make(chan work, 4*200),\n\t\tnodes:   xsync.NewMap[types.NodeID, *multiChannelNodeConn](),\n\t\tdone:    make(chan struct{}),\n\t}\n\n\tchannels := make(map[types.NodeID]chan *tailcfg.MapResponse, nodeCount)\n\tfor i := 1; i <= nodeCount; i++ {\n\t\tid := types.NodeID(i)                  //nolint:gosec // test with small controlled values\n\t\tmc := newMultiChannelNodeConn(id, nil) // nil mapper is fine for channel tests\n\t\tch := make(chan *tailcfg.MapResponse, bufferSize)\n\t\tentry := &connectionEntry{\n\t\t\tid:      fmt.Sprintf(\"conn-%d\", i),\n\t\t\tc:       ch,\n\t\t\tversion: tailcfg.CapabilityVersion(100),\n\t\t\tcreated: time.Now(),\n\t\t}\n\t\tentry.lastUsed.Store(time.Now().Unix())\n\t\tmc.addConnection(entry)\n\t\tb.nodes.Store(id, mc)\n\t\tchannels[id] = ch\n\t}\n\n\tb.totalNodes.Store(int64(nodeCount))\n\n\treturn &lightweightBatcher{b: b, channels: channels}\n}\n\nfunc (lb *lightweightBatcher) cleanup() {\n\tlb.b.doneOnce.Do(func() {\n\t\tclose(lb.b.done)\n\t})\n\tlb.b.tick.Stop()\n}\n\n// countTotalPending counts total pending change entries across all nodes.\nfunc countTotalPending(b *Batcher) int {\n\tcount := 0\n\n\tb.nodes.Range(func(_ types.NodeID, nc *multiChannelNodeConn) bool {\n\t\tnc.pendingMu.Lock()\n\t\tcount += len(nc.pending)\n\t\tnc.pendingMu.Unlock()\n\n\t\treturn true\n\t})\n\n\treturn count\n}\n\n// countNodesPending counts how many nodes have pending changes.\nfunc countNodesPending(b *Batcher) int {\n\tcount := 0\n\n\tb.nodes.Range(func(_ types.NodeID, nc *multiChannelNodeConn) bool {\n\t\tnc.pendingMu.Lock()\n\t\thasPending := len(nc.pending) > 0\n\t\tnc.pendingMu.Unlock()\n\n\t\tif hasPending {\n\t\t\tcount++\n\t\t}\n\n\t\treturn true\n\t})\n\n\treturn count\n}\n\n// getPendingForNode returns pending changes for a specific node.\nfunc getPendingForNode(b *Batcher, id types.NodeID) []change.Change {\n\tnc, ok := b.nodes.Load(id)\n\tif !ok {\n\t\treturn nil\n\t}\n\n\tnc.pendingMu.Lock()\n\tpending := make([]change.Change, len(nc.pending))\n\tcopy(pending, nc.pending)\n\tnc.pendingMu.Unlock()\n\n\treturn pending\n}\n\n// runConcurrently runs n goroutines executing fn, waits for all to finish,\n// and returns the number of panics caught.\nfunc runConcurrently(t *testing.T, n int, fn func(i int)) int {\n\tt.Helper()\n\n\tvar (\n\t\twg     sync.WaitGroup\n\t\tpanics atomic.Int64\n\t)\n\n\tfor i := range n {\n\t\twg.Add(1)\n\n\t\tgo func(idx int) {\n\t\t\tdefer wg.Done()\n\t\t\tdefer func() {\n\t\t\t\tif r := recover(); r != nil {\n\t\t\t\t\tpanics.Add(1)\n\t\t\t\t\tt.Logf(\"panic in goroutine %d: %v\", idx, r)\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tfn(idx)\n\t\t}(i)\n\t}\n\n\twg.Wait()\n\n\treturn int(panics.Load())\n}\n\n// runConcurrentlyWithTimeout is like runConcurrently but fails if not done\n// within timeout (deadlock detection).\nfunc runConcurrentlyWithTimeout(t *testing.T, n int, timeout time.Duration, fn func(i int)) int {\n\tt.Helper()\n\n\tdone := make(chan int, 1)\n\n\tgo func() {\n\t\tdone <- runConcurrently(t, n, fn)\n\t}()\n\n\tselect {\n\tcase panics := <-done:\n\t\treturn panics\n\tcase <-time.After(timeout):\n\t\tt.Fatalf(\"deadlock detected: %d goroutines did not complete within %v\", n, timeout)\n\t\treturn -1\n\t}\n}\n\n// ============================================================================\n// addToBatch Concurrency Tests\n// ============================================================================\n\n// TestAddToBatch_ConcurrentTargeted_NoDataLoss verifies that concurrent\n// targeted addToBatch calls do not lose data.\n//\n// Previously (Bug #1): addToBatch used LoadOrStore→append→Store on a\n// separate pendingChanges map, which was NOT atomic. Two goroutines could\n// Load the same slice, both append, and one Store would overwrite the other.\n// FIX: pendingChanges moved into multiChannelNodeConn with mutex protection,\n// eliminating the race entirely.\nfunc TestAddToBatch_ConcurrentTargeted_NoDataLoss(t *testing.T) {\n\tlb := setupLightweightBatcher(t, 10, 10)\n\tdefer lb.cleanup()\n\n\ttargetNode := types.NodeID(1)\n\n\tconst goroutines = 100\n\n\t// Each goroutine adds one targeted change to the same node\n\tpanics := runConcurrentlyWithTimeout(t, goroutines, 10*time.Second, func(i int) {\n\t\tch := change.Change{\n\t\t\tReason:     fmt.Sprintf(\"targeted-%d\", i),\n\t\t\tTargetNode: targetNode,\n\t\t\tPeerPatches: []*tailcfg.PeerChange{\n\t\t\t\t{NodeID: tailcfg.NodeID(i + 100)}, //nolint:gosec // test\n\t\t\t},\n\t\t}\n\t\tlb.b.addToBatch(ch)\n\t})\n\n\trequire.Zero(t, panics, \"no panics expected\")\n\n\t// All 100 changes MUST be present. The Load→append→Store race causes\n\t// data loss: typically 30-50% of changes are silently dropped.\n\tpending := getPendingForNode(lb.b, targetNode)\n\tt.Logf(\"targeted changes: expected=%d, got=%d (lost=%d)\",\n\t\tgoroutines, len(pending), goroutines-len(pending))\n\n\tassert.Len(t, pending, goroutines,\n\t\t\"addToBatch lost %d/%d targeted changes under concurrent access\",\n\t\tgoroutines-len(pending), goroutines)\n}\n\n// TestAddToBatch_ConcurrentBroadcast verifies that concurrent broadcasts\n// distribute changes to all nodes.\nfunc TestAddToBatch_ConcurrentBroadcast(t *testing.T) {\n\tlb := setupLightweightBatcher(t, 50, 10)\n\tdefer lb.cleanup()\n\n\tconst goroutines = 50\n\n\tpanics := runConcurrentlyWithTimeout(t, goroutines, 10*time.Second, func(_ int) {\n\t\tlb.b.addToBatch(change.DERPMap())\n\t})\n\n\tassert.Zero(t, panics, \"no panics expected\")\n\n\t// Each node should have received some DERP changes\n\tnodesWithPending := countNodesPending(lb.b)\n\tt.Logf(\"nodes with pending changes: %d/%d\", nodesWithPending, 50)\n\tassert.Positive(t, nodesWithPending,\n\t\t\"at least some nodes should have pending changes after broadcast\")\n}\n\n// TestAddToBatch_FullUpdateOverrides verifies that a FullUpdate replaces\n// all pending changes for every node.\nfunc TestAddToBatch_FullUpdateOverrides(t *testing.T) {\n\tlb := setupLightweightBatcher(t, 10, 10)\n\tdefer lb.cleanup()\n\n\t// Add some targeted changes first\n\tfor i := 1; i <= 10; i++ {\n\t\tlb.b.addToBatch(change.Change{\n\t\t\tReason:     \"pre-existing\",\n\t\t\tTargetNode: types.NodeID(i), //nolint:gosec // test with small values\n\t\t\tPeerPatches: []*tailcfg.PeerChange{\n\t\t\t\t{NodeID: tailcfg.NodeID(100 + i)}, //nolint:gosec // test with small values\n\t\t\t},\n\t\t})\n\t}\n\n\t// Full update should replace all pending changes\n\tlb.b.addToBatch(change.FullUpdate())\n\n\t// Every node should have exactly one pending change (the FullUpdate)\n\tlb.b.nodes.Range(func(id types.NodeID, _ *multiChannelNodeConn) bool {\n\t\tpending := getPendingForNode(lb.b, id)\n\t\trequire.Len(t, pending, 1, \"node %d should have exactly 1 pending (FullUpdate)\", id)\n\t\tassert.True(t, pending[0].IsFull(), \"pending change should be a full update\")\n\n\t\treturn true\n\t})\n}\n\n// TestAddToBatch_NodeRemovalCleanup verifies that PeersRemoved in a change\n// cleans up the node from the batcher's internal state.\nfunc TestAddToBatch_NodeRemovalCleanup(t *testing.T) {\n\tlb := setupLightweightBatcher(t, 5, 10)\n\tdefer lb.cleanup()\n\n\tremovedNode := types.NodeID(3)\n\n\t// Verify node exists before removal\n\t_, exists := lb.b.nodes.Load(removedNode)\n\trequire.True(t, exists, \"node 3 should exist before removal\")\n\n\t// Send a change that includes node 3 in PeersRemoved\n\tlb.b.addToBatch(change.Change{\n\t\tReason:       \"node deleted\",\n\t\tPeersRemoved: []types.NodeID{removedNode},\n\t})\n\n\t// Node should be removed from the nodes map\n\t_, exists = lb.b.nodes.Load(removedNode)\n\tassert.False(t, exists, \"node 3 should be removed from nodes map\")\n\n\tpending := getPendingForNode(lb.b, removedNode)\n\tassert.Empty(t, pending, \"node 3 should have no pending changes\")\n\n\tassert.Equal(t, int64(4), lb.b.totalNodes.Load(), \"total nodes should be decremented\")\n}\n\n// ============================================================================\n// processBatchedChanges Tests\n// ============================================================================\n\n// TestProcessBatchedChanges_QueuesWork verifies that processBatchedChanges\n// moves pending changes to the work queue and clears them.\nfunc TestProcessBatchedChanges_QueuesWork(t *testing.T) {\n\tlb := setupLightweightBatcher(t, 3, 10)\n\tdefer lb.cleanup()\n\n\t// Add pending changes for each node\n\tfor i := 1; i <= 3; i++ {\n\t\tif nc, ok := lb.b.nodes.Load(types.NodeID(i)); ok { //nolint:gosec // test\n\t\t\tnc.appendPending(change.DERPMap())\n\t\t}\n\t}\n\n\tlb.b.processBatchedChanges()\n\n\t// Pending should be cleared\n\tassert.Equal(t, 0, countNodesPending(lb.b),\n\t\t\"all pending changes should be cleared after processing\")\n\n\t// Work items should be on the work channel\n\tassert.Len(t, lb.b.workCh, 3,\n\t\t\"3 work items should be queued\")\n}\n\n// TestProcessBatchedChanges_ConcurrentAdd_NoDataLoss verifies that concurrent\n// addToBatch and processBatchedChanges calls do not lose data.\n//\n// Previously (Bug #2): processBatchedChanges used Range→Delete on a separate\n// pendingChanges map. A concurrent addToBatch could Store new changes between\n// Range reading the key and Delete removing it, losing freshly-stored changes.\n// FIX: pendingChanges moved into multiChannelNodeConn with atomic drainPending(),\n// eliminating the race entirely.\nfunc TestProcessBatchedChanges_ConcurrentAdd_NoDataLoss(t *testing.T) {\n\t// Use a single node to maximize contention on one key.\n\tlb := setupLightweightBatcher(t, 1, 10)\n\tdefer lb.cleanup()\n\n\t// Use a large work channel so processBatchedChanges never blocks.\n\tlb.b.workCh = make(chan work, 100000)\n\n\tconst iterations = 500\n\n\tvar addedCount atomic.Int64\n\n\tvar wg sync.WaitGroup\n\n\t// Goroutine 1: continuously add targeted changes to node 1\n\n\twg.Go(func() {\n\t\tfor i := range iterations {\n\t\t\tlb.b.addToBatch(change.Change{\n\t\t\t\tReason:     fmt.Sprintf(\"add-%d\", i),\n\t\t\t\tTargetNode: types.NodeID(1),\n\t\t\t\tPeerPatches: []*tailcfg.PeerChange{\n\t\t\t\t\t{NodeID: tailcfg.NodeID(i + 100)}, //nolint:gosec // test\n\t\t\t\t},\n\t\t\t})\n\t\t\taddedCount.Add(1)\n\t\t}\n\t})\n\n\t// Goroutine 2: continuously process batched changes\n\n\twg.Go(func() {\n\t\tfor range iterations {\n\t\t\tlb.b.processBatchedChanges()\n\t\t}\n\t})\n\n\twg.Wait()\n\n\t// One final process to flush any remaining\n\tlb.b.processBatchedChanges()\n\n\t// Count total changes across all bundled work items in the channel.\n\t// Each work item may contain multiple changes since processBatchedChanges\n\t// bundles all pending changes per node into a single work item.\n\tqueuedChanges := 0\n\n\tworkItems := len(lb.b.workCh)\n\tfor range workItems {\n\t\tw := <-lb.b.workCh\n\t\tqueuedChanges += len(w.changes)\n\t}\n\t// Also count any still-pending\n\tremaining := len(getPendingForNode(lb.b, types.NodeID(1)))\n\n\ttotal := queuedChanges + remaining\n\tadded := int(addedCount.Load())\n\n\tt.Logf(\"added=%d, queued_changes=%d (in %d work items), still_pending=%d, total_accounted=%d, lost=%d\",\n\t\tadded, queuedChanges, workItems, remaining, total, added-total)\n\n\t// Every added change must either be in the work queue or still pending.\n\tassert.Equal(t, added, total,\n\t\t\"processBatchedChanges has %d inconsistent changes (%d added vs %d accounted) \"+\n\t\t\t\"under concurrent access\",\n\t\ttotal-added, added, total)\n}\n\n// TestProcessBatchedChanges_EmptyPending verifies processBatchedChanges\n// is a no-op when there are no pending changes.\nfunc TestProcessBatchedChanges_EmptyPending(t *testing.T) {\n\tlb := setupLightweightBatcher(t, 5, 10)\n\tdefer lb.cleanup()\n\n\tlb.b.processBatchedChanges()\n\n\tassert.Empty(t, lb.b.workCh,\n\t\t\"no work should be queued when there are no pending changes\")\n}\n\n// TestProcessBatchedChanges_BundlesChangesPerNode verifies that multiple\n// pending changes for the same node are bundled into a single work item.\n// This prevents out-of-order delivery when different workers pick up\n// separate changes for the same node.\nfunc TestProcessBatchedChanges_BundlesChangesPerNode(t *testing.T) {\n\tlb := setupLightweightBatcher(t, 3, 10)\n\tdefer lb.cleanup()\n\n\t// Add multiple pending changes for node 1\n\tif nc, ok := lb.b.nodes.Load(types.NodeID(1)); ok {\n\t\tnc.appendPending(change.DERPMap())\n\t\tnc.appendPending(change.DNSConfig())\n\t\tnc.appendPending(change.PolicyOnly())\n\t}\n\t// Single change for node 2\n\tif nc, ok := lb.b.nodes.Load(types.NodeID(2)); ok {\n\t\tnc.appendPending(change.DERPMap())\n\t}\n\n\tlb.b.processBatchedChanges()\n\n\t// Should produce exactly 2 work items: one per node with pending changes.\n\t// Node 3 had no pending changes, so no work item for it.\n\tassert.Len(t, lb.b.workCh, 2,\n\t\t\"should produce one work item per node, not per change\")\n\n\t// Drain and verify the bundled changes are intact\n\ttotalChanges := 0\n\n\tfor range 2 {\n\t\tw := <-lb.b.workCh\n\n\t\ttotalChanges += len(w.changes)\n\t\tif w.nodeID == types.NodeID(1) {\n\t\t\tassert.Len(t, w.changes, 3,\n\t\t\t\t\"node 1's work item should contain all 3 changes\")\n\t\t} else {\n\t\t\tassert.Len(t, w.changes, 1,\n\t\t\t\t\"node 2's work item should contain 1 change\")\n\t\t}\n\t}\n\n\tassert.Equal(t, 4, totalChanges, \"total changes across all work items\")\n}\n\n// TestWorkMu_PreventsInterTickRace verifies that workMu serializes change\n// processing across consecutive batch ticks. Without workMu, two workers\n// could process bundles from tick N and tick N+1 concurrently for the same\n// node, causing out-of-order delivery and races on lastSentPeers.\nfunc TestWorkMu_PreventsInterTickRace(t *testing.T) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tmc := newMultiChannelNodeConn(1, nil)\n\tch := make(chan *tailcfg.MapResponse, 100)\n\tentry := &connectionEntry{\n\t\tid:      \"test\",\n\t\tc:       ch,\n\t\tversion: tailcfg.CapabilityVersion(100),\n\t\tcreated: time.Now(),\n\t}\n\tentry.lastUsed.Store(time.Now().Unix())\n\tmc.addConnection(entry)\n\n\t// Track the order in which work completes\n\tvar (\n\t\torder []int\n\t\tmu    sync.Mutex\n\t)\n\n\trecord := func(id int) {\n\t\tmu.Lock()\n\n\t\torder = append(order, id)\n\t\tmu.Unlock()\n\t}\n\n\tvar wg sync.WaitGroup\n\n\t// Simulate two workers grabbing consecutive tick bundles.\n\t// Worker 1 holds workMu and sleeps, worker 2 must wait.\n\twg.Go(func() {\n\t\tmc.workMu.Lock()\n\t\t// Simulate processing time for tick N's bundle\n\t\ttime.Sleep(50 * time.Millisecond) //nolint:forbidigo\n\t\trecord(1)\n\t\tmc.workMu.Unlock()\n\t})\n\n\t// Small delay so worker 1 grabs the lock first\n\ttime.Sleep(5 * time.Millisecond) //nolint:forbidigo\n\n\twg.Go(func() {\n\t\tmc.workMu.Lock()\n\t\trecord(2)\n\t\tmc.workMu.Unlock()\n\t})\n\n\twg.Wait()\n\n\tmu.Lock()\n\tdefer mu.Unlock()\n\n\trequire.Len(t, order, 2)\n\tassert.Equal(t, 1, order[0], \"worker 1 (tick N) should complete first\")\n\tassert.Equal(t, 2, order[1], \"worker 2 (tick N+1) should complete second\")\n}\n\n// ============================================================================\n// cleanupOfflineNodes Tests\n// ============================================================================\n\n// TestCleanupOfflineNodes_RemovesOld verifies that nodes offline longer\n// than the 15-minute threshold are removed.\nfunc TestCleanupOfflineNodes_RemovesOld(t *testing.T) {\n\tlb := setupLightweightBatcher(t, 5, 10)\n\tdefer lb.cleanup()\n\n\t// Remove node 3's active connections and mark it disconnected 20 minutes ago\n\tif mc, ok := lb.b.nodes.Load(types.NodeID(3)); ok {\n\t\tch := lb.channels[types.NodeID(3)]\n\t\tmc.removeConnectionByChannel(ch)\n\n\t\toldTime := time.Now().Add(-20 * time.Minute)\n\t\tmc.disconnectedAt.Store(&oldTime)\n\t}\n\n\tlb.b.cleanupOfflineNodes()\n\n\t_, exists := lb.b.nodes.Load(types.NodeID(3))\n\tassert.False(t, exists, \"node 3 should be cleaned up (offline >15min)\")\n\n\t// Other nodes should still be present\n\t_, exists = lb.b.nodes.Load(types.NodeID(1))\n\tassert.True(t, exists, \"node 1 should still exist\")\n}\n\n// TestCleanupOfflineNodes_KeepsRecent verifies that recently disconnected\n// nodes are not cleaned up.\nfunc TestCleanupOfflineNodes_KeepsRecent(t *testing.T) {\n\tlb := setupLightweightBatcher(t, 5, 10)\n\tdefer lb.cleanup()\n\n\t// Remove node 3's connections and mark it disconnected 5 minutes ago (under threshold)\n\tif mc, ok := lb.b.nodes.Load(types.NodeID(3)); ok {\n\t\tch := lb.channels[types.NodeID(3)]\n\t\tmc.removeConnectionByChannel(ch)\n\n\t\trecentTime := time.Now().Add(-5 * time.Minute)\n\t\tmc.disconnectedAt.Store(&recentTime)\n\t}\n\n\tlb.b.cleanupOfflineNodes()\n\n\t_, exists := lb.b.nodes.Load(types.NodeID(3))\n\tassert.True(t, exists, \"node 3 should NOT be cleaned up (offline <15min)\")\n}\n\n// TestCleanupOfflineNodes_KeepsActive verifies that nodes with active\n// connections are never cleaned up, even if disconnect time is set.\nfunc TestCleanupOfflineNodes_KeepsActive(t *testing.T) {\n\tlb := setupLightweightBatcher(t, 5, 10)\n\tdefer lb.cleanup()\n\n\t// Set old disconnect time but keep the connection active\n\tif mc, ok := lb.b.nodes.Load(types.NodeID(3)); ok {\n\t\toldTime := time.Now().Add(-20 * time.Minute)\n\t\tmc.disconnectedAt.Store(&oldTime)\n\t}\n\t// Don't remove connection - node still has active connections\n\n\tlb.b.cleanupOfflineNodes()\n\n\t_, exists := lb.b.nodes.Load(types.NodeID(3))\n\tassert.True(t, exists,\n\t\t\"node 3 should NOT be cleaned up (still has active connections)\")\n}\n\n// ============================================================================\n// Batcher Lifecycle Tests\n// ============================================================================\n\n// TestBatcher_CloseStopsWorkers verifies that Close() signals workers to stop\n// and doesn't deadlock.\nfunc TestBatcher_CloseStopsWorkers(t *testing.T) {\n\tlb := setupLightweightBatcher(t, 3, 10)\n\n\t// Start workers\n\tlb.b.Start()\n\n\t// Queue some work\n\tif nc, ok := lb.b.nodes.Load(types.NodeID(1)); ok {\n\t\tnc.appendPending(change.DERPMap())\n\t}\n\n\tlb.b.processBatchedChanges()\n\n\t// Close should not deadlock\n\tdone := make(chan struct{})\n\n\tgo func() {\n\t\tlb.b.Close()\n\t\tclose(done)\n\t}()\n\n\tselect {\n\tcase <-done:\n\t\t// Success\n\tcase <-time.After(5 * time.Second):\n\t\tt.Fatal(\"Close() deadlocked\")\n\t}\n}\n\n// TestBatcher_CloseMultipleTimes_DoubleClosePanic exercises Bug #4:\n// multiChannelNodeConn.close() has no idempotency guard. Calling Close()\n// concurrently triggers close() on the same channels multiple times,\n// panicking with \"close of closed channel\".\n//\n// BUG: batcher_lockfree.go:555-565 - close() calls close(conn.c) with no guard\n// FIX: Add sync.Once or atomic.Bool to multiChannelNodeConn.close().\nfunc TestBatcher_CloseMultipleTimes_DoubleClosePanic(t *testing.T) {\n\tlb := setupLightweightBatcher(t, 3, 10)\n\tlb.b.Start()\n\n\t// Close multiple times concurrently.\n\t// The done channel and workCh are protected by sync.Once and should not panic.\n\t// But node connection close() WILL panic because it has no idempotency guard.\n\tpanics := runConcurrently(t, 10, func(_ int) {\n\t\tlb.b.Close()\n\t})\n\n\tassert.Zero(t, panics,\n\t\t\"BUG #4: %d panics from concurrent Close() due to \"+\n\t\t\t\"multiChannelNodeConn.close() lacking idempotency guard. \"+\n\t\t\t\"Fix: add sync.Once or atomic.Bool to close()\", panics)\n}\n\n// TestBatcher_MapResponseDuringShutdown verifies that MapResponseFromChange\n// returns ErrBatcherShuttingDown when the batcher is closed.\nfunc TestBatcher_MapResponseDuringShutdown(t *testing.T) {\n\tlb := setupLightweightBatcher(t, 3, 10)\n\n\t// Close the done channel\n\tclose(lb.b.done)\n\n\t_, err := lb.b.MapResponseFromChange(types.NodeID(1), change.DERPMap())\n\tassert.ErrorIs(t, err, ErrBatcherShuttingDown)\n}\n\n// TestBatcher_IsConnectedReflectsState verifies IsConnected accurately\n// reflects the connection state of nodes.\nfunc TestBatcher_IsConnectedReflectsState(t *testing.T) {\n\tlb := setupLightweightBatcher(t, 5, 10)\n\tdefer lb.cleanup()\n\n\t// All nodes should be connected\n\tfor i := 1; i <= 5; i++ {\n\t\tassert.True(t, lb.b.IsConnected(types.NodeID(i)), //nolint:gosec // test\n\t\t\t\"node %d should be connected\", i)\n\t}\n\n\t// Non-existent node should not be connected\n\tassert.False(t, lb.b.IsConnected(types.NodeID(999)))\n\n\t// Disconnect node 3 (remove connection + mark disconnected)\n\tif mc, ok := lb.b.nodes.Load(types.NodeID(3)); ok {\n\t\tmc.removeConnectionByChannel(lb.channels[types.NodeID(3)])\n\t\tmc.markDisconnected()\n\t}\n\n\tassert.False(t, lb.b.IsConnected(types.NodeID(3)),\n\t\t\"node 3 should not be connected after disconnection\")\n\n\t// Other nodes should still be connected\n\tassert.True(t, lb.b.IsConnected(types.NodeID(1)))\n\tassert.True(t, lb.b.IsConnected(types.NodeID(5)))\n}\n\n// TestBatcher_ConnectedMapConsistency verifies ConnectedMap returns accurate\n// state for all nodes.\nfunc TestBatcher_ConnectedMapConsistency(t *testing.T) {\n\tlb := setupLightweightBatcher(t, 5, 10)\n\tdefer lb.cleanup()\n\n\t// Disconnect node 2\n\tif mc, ok := lb.b.nodes.Load(types.NodeID(2)); ok {\n\t\tmc.removeConnectionByChannel(lb.channels[types.NodeID(2)])\n\t\tmc.markDisconnected()\n\t}\n\n\tcm := lb.b.ConnectedMap()\n\n\t// Connected nodes\n\tfor _, id := range []types.NodeID{1, 3, 4, 5} {\n\t\tval, ok := cm.Load(id)\n\t\tassert.True(t, ok, \"node %d should be in ConnectedMap\", id)\n\t\tassert.True(t, val, \"node %d should be connected\", id)\n\t}\n\n\t// Disconnected node\n\tval, ok := cm.Load(types.NodeID(2))\n\tassert.True(t, ok, \"node 2 should be in ConnectedMap\")\n\tassert.False(t, val, \"node 2 should be disconnected\")\n}\n\n// ============================================================================\n// Bug Reproduction Tests (all expected to FAIL until bugs are fixed)\n// ============================================================================\n\n// TestBug3_CleanupOfflineNodes_TOCTOU exercises Bug #3:\n// TestBug3_CleanupOfflineNodes_TOCTOU exercises the TOCTOU race in\n// cleanupOfflineNodes. Without the Compute() fix, the old code did:\n//\n//  1. Range connected map → collect candidates\n//  2. Load node → check hasActiveConnections() == false\n//  3. Delete node\n//\n// Between steps 2 and 3, AddNode could reconnect the node via\n// LoadOrStore, adding a connection to the existing entry. The\n// subsequent Delete would then remove the live reconnected node.\n//\n// FIX: Use Compute() on b.nodes for atomic check-and-delete. Inside\n// the Compute closure, hasActiveConnections() is checked and the\n// entry is only deleted if still inactive. A concurrent AddNode that\n// calls addConnection() on the same entry makes hasActiveConnections()\n// return true, causing Compute to cancel the delete.\nfunc TestBug3_CleanupOfflineNodes_TOCTOU(t *testing.T) {\n\tlb := setupLightweightBatcher(t, 5, 10)\n\tdefer lb.cleanup()\n\n\ttargetNode := types.NodeID(3)\n\n\t// Remove node 3's active connections and mark it disconnected >15 minutes ago\n\tif mc, ok := lb.b.nodes.Load(targetNode); ok {\n\t\tch := lb.channels[targetNode]\n\t\tmc.removeConnectionByChannel(ch)\n\n\t\toldTime := time.Now().Add(-20 * time.Minute)\n\t\tmc.disconnectedAt.Store(&oldTime)\n\t}\n\n\t// Verify node 3 has no active connections before we start.\n\tif mc, ok := lb.b.nodes.Load(targetNode); ok {\n\t\trequire.False(t, mc.hasActiveConnections(),\n\t\t\t\"precondition: node 3 should have no active connections\")\n\t}\n\n\t// Simulate a reconnection that happens BEFORE cleanup's Compute() runs.\n\t// With the Compute() fix, the atomic check inside Compute sees\n\t// hasActiveConnections()==true and cancels the delete.\n\tmc, exists := lb.b.nodes.Load(targetNode)\n\trequire.True(t, exists, \"node 3 should exist before reconnection\")\n\n\tnewCh := make(chan *tailcfg.MapResponse, 10)\n\tentry := &connectionEntry{\n\t\tid:      \"reconnected\",\n\t\tc:       newCh,\n\t\tversion: tailcfg.CapabilityVersion(100),\n\t\tcreated: time.Now(),\n\t}\n\tentry.lastUsed.Store(time.Now().Unix())\n\tmc.addConnection(entry)\n\tmc.markConnected()\n\tlb.channels[targetNode] = newCh\n\n\t// Now run cleanup. Node 3 is in the candidates list (old disconnect\n\t// time) but has been reconnected. The Compute() fix should see the\n\t// active connection and cancel the delete.\n\tlb.b.cleanupOfflineNodes()\n\n\t// Node 3 MUST still exist because it has an active connection.\n\t_, stillExists := lb.b.nodes.Load(targetNode)\n\tassert.True(t, stillExists,\n\t\t\"BUG #3: cleanupOfflineNodes deleted node %d despite it having an active \"+\n\t\t\t\"connection. The Compute() fix should atomically check \"+\n\t\t\t\"hasActiveConnections() and cancel the delete.\",\n\t\ttargetNode)\n\n\t// Also verify the concurrent case: cleanup and reconnection racing.\n\t// Set up node 3 as offline again.\n\tmc.removeConnectionByChannel(newCh)\n\n\toldTime2 := time.Now().Add(-20 * time.Minute)\n\tmc.disconnectedAt.Store(&oldTime2)\n\n\tvar wg sync.WaitGroup\n\n\t// Run 100 iterations of concurrent cleanup + reconnection.\n\t// With Compute(), either cleanup wins (node deleted, LoadOrStore\n\t// recreates) or reconnection wins (Compute sees active conn, cancels).\n\t// Either way the node must exist after both complete.\n\tfor range 100 {\n\t\twg.Go(func() {\n\t\t\t// Simulate reconnection via addConnection (like AddNode does)\n\t\t\tif mc, ok := lb.b.nodes.Load(targetNode); ok {\n\t\t\t\treconnCh := make(chan *tailcfg.MapResponse, 10)\n\t\t\t\treconnEntry := &connectionEntry{\n\t\t\t\t\tid:      \"race-reconn\",\n\t\t\t\t\tc:       reconnCh,\n\t\t\t\t\tversion: tailcfg.CapabilityVersion(100),\n\t\t\t\t\tcreated: time.Now(),\n\t\t\t\t}\n\t\t\t\treconnEntry.lastUsed.Store(time.Now().Unix())\n\t\t\t\tmc.addConnection(reconnEntry)\n\t\t\t\tmc.markConnected()\n\t\t\t}\n\t\t})\n\n\t\twg.Go(func() {\n\t\t\tlb.b.cleanupOfflineNodes()\n\t\t})\n\t}\n\n\twg.Wait()\n}\n\n// TestBug5_WorkerPanicKillsWorkerPermanently exercises Bug #5:\n// If b.nodes.Load() returns exists=true but a nil *multiChannelNodeConn,\n// the worker would panic on a nil pointer dereference. Without nil guards,\n// this kills the worker goroutine permanently (no recover), reducing\n// throughput and eventually deadlocking when all workers are dead.\n//\n// BUG: batcher_lockfree.go worker() - no nil check after b.nodes.Load()\n// FIX: Add nil guard: `exists && nc != nil` in both sync and async paths.\nfunc TestBug5_WorkerPanicKillsWorkerPermanently(t *testing.T) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tlb := setupLightweightBatcher(t, 3, 10)\n\tdefer lb.cleanup()\n\n\tlb.b.workers = 2\n\tlb.b.Start()\n\n\t// Give workers time to start\n\ttime.Sleep(50 * time.Millisecond) //nolint:forbidigo // concurrency test coordination\n\n\t// Store a nil value in b.nodes for a specific node ID.\n\t// This simulates a race where a node entry exists but the value is nil\n\t// (e.g., concurrent cleanup setting nil before deletion).\n\tnilNodeID := types.NodeID(55555)\n\tlb.b.nodes.Store(nilNodeID, nil)\n\n\t// Queue async work (resultCh=nil) targeting the nil node.\n\t// Without the nil guard, this would panic: nc.change(w.c) on nil nc.\n\tfor range 10 {\n\t\tlb.b.queueWork(work{\n\t\t\tchanges: []change.Change{change.DERPMap()},\n\t\t\tnodeID:  nilNodeID,\n\t\t})\n\t}\n\n\t// Queue sync work (with resultCh) targeting the nil node.\n\t// Without the nil guard, this would panic: generateMapResponse(nc, ...)\n\t// on nil nc.\n\tfor range 5 {\n\t\tresultCh := make(chan workResult, 1)\n\t\tlb.b.queueWork(work{\n\t\t\tchanges:  []change.Change{change.DERPMap()},\n\t\t\tnodeID:   nilNodeID,\n\t\t\tresultCh: resultCh,\n\t\t})\n\t\t// Read the result so workers don't block.\n\t\tselect {\n\t\tcase res := <-resultCh:\n\t\t\t// With nil guard, result should have nil mapResponse (no work done).\n\t\t\tassert.Nil(t, res.mapResponse,\n\t\t\t\t\"sync work for nil node should return nil mapResponse\")\n\t\tcase <-time.After(2 * time.Second):\n\t\t\tt.Fatal(\"timed out waiting for sync work result — worker may have panicked\")\n\t\t}\n\t}\n\n\t// Wait for async work to drain\n\ttime.Sleep(100 * time.Millisecond) //nolint:forbidigo // concurrency test coordination\n\n\t// Now queue valid work for a real node to prove workers are still alive.\n\tbeforeValid := lb.b.workProcessed.Load()\n\tfor range 5 {\n\t\tlb.b.queueWork(work{\n\t\t\tchanges: []change.Change{change.DERPMap()},\n\t\t\tnodeID:  types.NodeID(1),\n\t\t})\n\t}\n\n\ttime.Sleep(200 * time.Millisecond) //nolint:forbidigo // concurrency test coordination\n\n\tafterValid := lb.b.workProcessed.Load()\n\tvalidProcessed := afterValid - beforeValid\n\tt.Logf(\"valid work processed after nil-node work: %d/5\", validProcessed)\n\n\tassert.Equal(t, int64(5), validProcessed,\n\t\t\"workers must remain functional after encountering nil node entries\")\n}\n\n// TestBug6_StartCalledMultipleTimes_GoroutineLeak exercises Bug #6:\n// Start() creates a new done channel and launches doWork() every time,\n// with no guard against multiple calls. Each call spawns (workers+1)\n// goroutines that never get cleaned up.\n//\n// BUG: batcher_lockfree.go:163-166 - Start() has no \"already started\" check\n// FIX: Add sync.Once or atomic.Bool to prevent multiple Start() calls.\nfunc TestBug6_StartCalledMultipleTimes_GoroutineLeak(t *testing.T) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tlb := setupLightweightBatcher(t, 3, 10)\n\tlb.b.workers = 2\n\n\tgoroutinesBefore := runtime.NumGoroutine()\n\n\t// Call Start() once - this should launch (workers + 1) goroutines\n\t// (1 for doWork + workers for worker())\n\tlb.b.Start()\n\ttime.Sleep(50 * time.Millisecond) //nolint:forbidigo // concurrency test coordination\n\n\tgoroutinesAfterFirst := runtime.NumGoroutine()\n\tfirstStartDelta := goroutinesAfterFirst - goroutinesBefore\n\tt.Logf(\"goroutines: before=%d, after_first_Start=%d, delta=%d\",\n\t\tgoroutinesBefore, goroutinesAfterFirst, firstStartDelta)\n\n\t// Call Start() again - this SHOULD be a no-op\n\t// BUG: it creates a NEW done channel (orphaning goroutines listening on the old one)\n\t// and launches another doWork()+workers set\n\tlb.b.Start()\n\ttime.Sleep(50 * time.Millisecond) //nolint:forbidigo // concurrency test coordination\n\n\tgoroutinesAfterSecond := runtime.NumGoroutine()\n\tsecondStartDelta := goroutinesAfterSecond - goroutinesAfterFirst\n\tt.Logf(\"goroutines: after_second_Start=%d, delta=%d (should be 0)\",\n\t\tgoroutinesAfterSecond, secondStartDelta)\n\n\t// Call Start() a third time\n\tlb.b.Start()\n\ttime.Sleep(50 * time.Millisecond) //nolint:forbidigo // concurrency test coordination\n\n\tgoroutinesAfterThird := runtime.NumGoroutine()\n\tthirdStartDelta := goroutinesAfterThird - goroutinesAfterSecond\n\tt.Logf(\"goroutines: after_third_Start=%d, delta=%d (should be 0)\",\n\t\tgoroutinesAfterThird, thirdStartDelta)\n\n\t// Close() only closes the LAST done channel, leaving earlier goroutines leaked\n\tlb.b.Close()\n\ttime.Sleep(100 * time.Millisecond) //nolint:forbidigo // concurrency test coordination\n\n\tgoroutinesAfterClose := runtime.NumGoroutine()\n\tt.Logf(\"goroutines after Close: %d (leaked: %d)\",\n\t\tgoroutinesAfterClose, goroutinesAfterClose-goroutinesBefore)\n\n\t// Second Start() should NOT have created new goroutines\n\tassert.Zero(t, secondStartDelta,\n\t\t\"BUG #6: second Start() call leaked %d goroutines. \"+\n\t\t\t\"Start() has no idempotency guard, each call spawns new goroutines. \"+\n\t\t\t\"Fix: add sync.Once or atomic.Bool to prevent multiple Start() calls\",\n\t\tsecondStartDelta)\n}\n\n// TestBug7_CleanupOfflineNodes_PendingChangesCleanedStructurally verifies that\n// pending changes are automatically cleaned up when a node is removed from the\n// nodes map, because pending state lives inside multiChannelNodeConn.\n//\n// Previously (Bug #7): pendingChanges was a separate map that was NOT cleaned\n// when cleanupOfflineNodes removed a node, causing orphaned entries.\n// FIX: pendingChanges moved into multiChannelNodeConn — deleting the node\n// from b.nodes automatically drops its pending changes.\nfunc TestBug7_CleanupOfflineNodes_PendingChangesCleanedStructurally(t *testing.T) {\n\tlb := setupLightweightBatcher(t, 5, 10)\n\tdefer lb.cleanup()\n\n\ttargetNode := types.NodeID(3)\n\n\t// Remove node 3's connections and mark it disconnected >15 minutes ago\n\tif mc, ok := lb.b.nodes.Load(targetNode); ok {\n\t\tch := lb.channels[targetNode]\n\t\tmc.removeConnectionByChannel(ch)\n\n\t\toldTime := time.Now().Add(-20 * time.Minute)\n\t\tmc.disconnectedAt.Store(&oldTime)\n\t}\n\n\t// Add pending changes for node 3 before cleanup\n\tif nc, ok := lb.b.nodes.Load(targetNode); ok {\n\t\tnc.appendPending(change.DERPMap())\n\t}\n\n\t// Verify pending exists before cleanup\n\tpending := getPendingForNode(lb.b, targetNode)\n\trequire.Len(t, pending, 1, \"node 3 should have pending changes before cleanup\")\n\n\t// Run cleanup\n\tlb.b.cleanupOfflineNodes()\n\n\t// Node 3 should be removed from the nodes map\n\t_, existsInNodes := lb.b.nodes.Load(targetNode)\n\tassert.False(t, existsInNodes, \"node 3 should be removed from nodes map\")\n\n\t// Pending changes are structurally gone because the node was deleted.\n\t// getPendingForNode returns nil for non-existent nodes.\n\tpendingAfter := getPendingForNode(lb.b, targetNode)\n\tassert.Empty(t, pendingAfter,\n\t\t\"pending changes should be gone after node deletion (structural fix)\")\n}\n\n// TestBug8_SerialTimeoutUnderWriteLock exercises Bug #8 (performance):\n// multiChannelNodeConn.send() originally held the write lock for the ENTIRE\n// duration of sending to all connections. Each send has a 50ms timeout for\n// stale connections. With N stale connections, the write lock was held for\n// N*50ms, blocking all addConnection/removeConnection calls.\n//\n// BUG: mutex.Lock() held during all conn.send() calls, each with 50ms timeout.\n//\n//\t5 stale connections = 250ms lock hold, blocking addConnection/removeConnection.\n//\n// FIX: Snapshot connections under read lock, release, send without any lock\n//\n//\t(timeouts happen here), then write-lock only to remove failed connections.\n//\tThe lock is now held only for O(N) pointer copies, not for N*50ms I/O.\nfunc TestBug8_SerialTimeoutUnderWriteLock(t *testing.T) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tmc := newMultiChannelNodeConn(1, nil)\n\n\t// Add 5 stale connections (unbuffered, no reader = will timeout at 50ms each)\n\tconst staleCount = 5\n\tfor i := range staleCount {\n\t\tch := make(chan *tailcfg.MapResponse) // unbuffered\n\t\tmc.addConnection(makeConnectionEntry(fmt.Sprintf(\"stale-%d\", i), ch))\n\t}\n\n\t// The key test: verify that the mutex is NOT held during the slow sends.\n\t// We do this by trying to acquire the lock from another goroutine during\n\t// the send. With the old code (lock held for 250ms), this would block.\n\t// With the fix, the lock is free during sends.\n\tlockAcquired := make(chan time.Duration, 1)\n\n\tgo func() {\n\t\t// Give send() a moment to start (it will be in the unlocked send window)\n\t\ttime.Sleep(20 * time.Millisecond) //nolint:forbidigo // concurrency test coordination\n\n\t\t// Try to acquire the write lock. It should succeed quickly because\n\t\t// the lock is only held briefly for the snapshot and cleanup.\n\t\tstart := time.Now()\n\n\t\tmc.mutex.Lock()\n\t\tlockWait := time.Since(start)\n\t\tmc.mutex.Unlock()\n\n\t\tlockAcquired <- lockWait\n\t}()\n\n\t// Run send() with 5 stale connections. Total wall time will be ~250ms\n\t// (5 * 50ms serial timeouts), but the lock should be free during sends.\n\t_ = mc.send(testMapResponse())\n\n\tlockWait := <-lockAcquired\n\tt.Logf(\"lock acquisition during send() with %d stale connections waited %v\",\n\t\tstaleCount, lockWait)\n\n\t// The lock wait should be very short (<50ms) since the lock is released\n\t// before sending. With the old code it would be ~230ms (250ms - 20ms sleep).\n\tassert.Less(t, lockWait, 50*time.Millisecond,\n\t\t\"mutex was held for %v during send() with %d stale connections; \"+\n\t\t\t\"lock should be released before sending to allow \"+\n\t\t\t\"concurrent addConnection/removeConnection calls\",\n\t\tlockWait, staleCount)\n}\n\n// TestBug1_BroadcastNoDataLoss verifies that concurrent broadcast addToBatch\n// calls do not lose data.\n//\n// Previously (Bug #1, broadcast path): Same Load→append→Store race as targeted\n// changes, but on the broadcast code path within the Range callback.\n// FIX: pendingChanges moved into multiChannelNodeConn with mutex protection.\nfunc TestBug1_BroadcastNoDataLoss(t *testing.T) {\n\t// Use many nodes so the Range iteration takes longer, widening the race window\n\tlb := setupLightweightBatcher(t, 100, 10)\n\tdefer lb.cleanup()\n\n\tconst goroutines = 50\n\n\t// Each goroutine broadcasts a DERPMap change to all 100 nodes\n\tpanics := runConcurrentlyWithTimeout(t, goroutines, 10*time.Second, func(_ int) {\n\t\tlb.b.addToBatch(change.DERPMap())\n\t})\n\n\trequire.Zero(t, panics, \"no panics expected\")\n\n\t// Each of the 100 nodes should have exactly `goroutines` pending changes.\n\t// The race causes some nodes to have fewer.\n\tvar (\n\t\ttotalLost     int\n\t\tnodesWithLoss int\n\t)\n\n\tlb.b.nodes.Range(func(id types.NodeID, _ *multiChannelNodeConn) bool {\n\t\tpending := getPendingForNode(lb.b, id)\n\t\tif len(pending) < goroutines {\n\t\t\ttotalLost += goroutines - len(pending)\n\t\t\tnodesWithLoss++\n\t\t}\n\n\t\treturn true\n\t})\n\n\tt.Logf(\"broadcast data loss: %d total changes lost across %d/%d nodes\",\n\t\ttotalLost, nodesWithLoss, 100)\n\n\tassert.Zero(t, totalLost,\n\t\t\"broadcast lost %d changes across %d nodes under concurrent access\",\n\t\ttotalLost, nodesWithLoss)\n}\n\n// ============================================================================\n// 1000-Node Scale Tests (lightweight, no DB)\n// ============================================================================\n\n// TestScale1000_AddToBatch_Broadcast verifies that broadcasting to 1000 nodes\n// works correctly under concurrent access.\nfunc TestScale1000_AddToBatch_Broadcast(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping 1000-node test in short mode\")\n\t}\n\n\tzerolog.SetGlobalLevel(zerolog.ErrorLevel)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tlb := setupLightweightBatcher(t, 1000, 10)\n\tdefer lb.cleanup()\n\n\tconst concurrentBroadcasts = 100\n\n\tpanics := runConcurrentlyWithTimeout(t, concurrentBroadcasts, 30*time.Second, func(_ int) {\n\t\tlb.b.addToBatch(change.DERPMap())\n\t})\n\n\tassert.Zero(t, panics, \"no panics expected\")\n\n\tnodesWithPending := countNodesPending(lb.b)\n\ttotalPending := countTotalPending(lb.b)\n\n\tt.Logf(\"1000-node broadcast: %d/%d nodes have pending, %d total pending items\",\n\t\tnodesWithPending, 1000, totalPending)\n\n\t// All 1000 nodes should have at least some pending changes\n\t// (may lose some due to Bug #1 race, but should have most)\n\tassert.GreaterOrEqual(t, nodesWithPending, 900,\n\t\t\"at least 90%% of nodes should have pending changes\")\n}\n\n// TestScale1000_ProcessBatchedWithConcurrentAdd tests processBatchedChanges\n// running concurrently with addToBatch at 1000 nodes.\nfunc TestScale1000_ProcessBatchedWithConcurrentAdd(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping 1000-node test in short mode\")\n\t}\n\n\tzerolog.SetGlobalLevel(zerolog.ErrorLevel)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tlb := setupLightweightBatcher(t, 1000, 10)\n\tdefer lb.cleanup()\n\n\t// Use a large work channel to avoid blocking.\n\t// 50 broadcasts × 1000 nodes = up to 50,000 work items.\n\tlb.b.workCh = make(chan work, 100000)\n\n\tvar wg sync.WaitGroup\n\n\t// Producer: add broadcasts\n\n\twg.Go(func() {\n\t\tfor range 50 {\n\t\t\tlb.b.addToBatch(change.DERPMap())\n\t\t}\n\t})\n\n\t// Consumer: process batched changes repeatedly\n\n\twg.Go(func() {\n\t\tfor range 50 {\n\t\t\tlb.b.processBatchedChanges()\n\t\t\ttime.Sleep(1 * time.Millisecond) //nolint:forbidigo // concurrency test coordination\n\t\t}\n\t})\n\n\tdone := make(chan struct{})\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(done)\n\t}()\n\n\tselect {\n\tcase <-done:\n\t\tt.Logf(\"1000-node concurrent add+process completed without deadlock\")\n\tcase <-time.After(30 * time.Second):\n\t\tt.Fatal(\"deadlock detected in 1000-node concurrent add+process\")\n\t}\n\n\tqueuedWork := len(lb.b.workCh)\n\tt.Logf(\"work items queued: %d\", queuedWork)\n\tassert.Positive(t, queuedWork, \"should have queued some work items\")\n}\n\n// TestScale1000_MultiChannelBroadcast tests broadcasting a MapResponse\n// to 1000 nodes, each with 1-3 connections.\nfunc TestScale1000_MultiChannelBroadcast(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping 1000-node test in short mode\")\n\t}\n\n\tzerolog.SetGlobalLevel(zerolog.ErrorLevel)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tconst (\n\t\tnodeCount  = 1000\n\t\tbufferSize = 5\n\t)\n\n\t// Create nodes with varying connection counts\n\tb := &Batcher{\n\t\ttick:    time.NewTicker(10 * time.Millisecond),\n\t\tworkers: 4,\n\t\tworkCh:  make(chan work, 4*200),\n\t\tnodes:   xsync.NewMap[types.NodeID, *multiChannelNodeConn](),\n\t\tdone:    make(chan struct{}),\n\t}\n\n\tdefer func() {\n\t\tclose(b.done)\n\t\tb.tick.Stop()\n\t}()\n\n\ttype nodeChannels struct {\n\t\tchannels []chan *tailcfg.MapResponse\n\t}\n\n\tallNodeChannels := make(map[types.NodeID]*nodeChannels, nodeCount)\n\n\tfor i := 1; i <= nodeCount; i++ {\n\t\tid := types.NodeID(i) //nolint:gosec // test with small controlled values\n\t\tmc := newMultiChannelNodeConn(id, nil)\n\n\t\tconnCount := 1 + (i % 3) // 1, 2, or 3 connections\n\t\tnc := &nodeChannels{channels: make([]chan *tailcfg.MapResponse, connCount)}\n\n\t\tfor j := range connCount {\n\t\t\tch := make(chan *tailcfg.MapResponse, bufferSize)\n\t\t\tnc.channels[j] = ch\n\t\t\tentry := &connectionEntry{\n\t\t\t\tid:      fmt.Sprintf(\"conn-%d-%d\", i, j),\n\t\t\t\tc:       ch,\n\t\t\t\tversion: tailcfg.CapabilityVersion(100),\n\t\t\t\tcreated: time.Now(),\n\t\t\t}\n\t\t\tentry.lastUsed.Store(time.Now().Unix())\n\t\t\tmc.addConnection(entry)\n\t\t}\n\n\t\tb.nodes.Store(id, mc)\n\t\tallNodeChannels[id] = nc\n\t}\n\n\t// Broadcast to all nodes\n\tdata := testMapResponse()\n\n\tvar successCount, failCount atomic.Int64\n\n\tstart := time.Now()\n\n\tb.nodes.Range(func(id types.NodeID, mc *multiChannelNodeConn) bool {\n\t\terr := mc.send(data)\n\t\tif err != nil {\n\t\t\tfailCount.Add(1)\n\t\t} else {\n\t\t\tsuccessCount.Add(1)\n\t\t}\n\n\t\treturn true\n\t})\n\n\telapsed := time.Since(start)\n\n\tt.Logf(\"broadcast to %d nodes: %d success, %d failures, took %v\",\n\t\tnodeCount, successCount.Load(), failCount.Load(), elapsed)\n\n\tassert.Equal(t, int64(nodeCount), successCount.Load(),\n\t\t\"all nodes should receive broadcast successfully\")\n\tassert.Zero(t, failCount.Load(), \"no broadcast failures expected\")\n\n\t// Verify at least some channels received data\n\treceivedCount := 0\n\n\tfor _, nc := range allNodeChannels {\n\t\tfor _, ch := range nc.channels {\n\t\t\tselect {\n\t\t\tcase <-ch:\n\t\t\t\treceivedCount++\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t}\n\n\tt.Logf(\"channels that received data: %d\", receivedCount)\n\tassert.Positive(t, receivedCount, \"channels should have received broadcast data\")\n}\n\n// TestScale1000_ConnectionChurn tests 1000 nodes with 10% churning connections\n// while broadcasts are happening. Stable nodes should not lose data.\nfunc TestScale1000_ConnectionChurn(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping 1000-node test in short mode\")\n\t}\n\n\tzerolog.SetGlobalLevel(zerolog.ErrorLevel)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tlb := setupLightweightBatcher(t, 1000, 20)\n\tdefer lb.cleanup()\n\n\tconst churnNodes = 100 // 10% of nodes churn\n\n\tconst churnCycles = 50\n\n\tvar (\n\t\tpanics atomic.Int64\n\t\twg     sync.WaitGroup\n\t)\n\n\t// Churn goroutine: rapidly add/remove connections for nodes 901-1000\n\n\twg.Go(func() {\n\t\tfor cycle := range churnCycles {\n\t\t\tfor i := 901; i <= 901+churnNodes-1; i++ {\n\t\t\t\tid := types.NodeID(i) //nolint:gosec // test with small controlled values\n\n\t\t\t\tmc, exists := lb.b.nodes.Load(id)\n\t\t\t\tif !exists {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// Remove old connection\n\t\t\t\toldCh := lb.channels[id]\n\t\t\t\tmc.removeConnectionByChannel(oldCh)\n\n\t\t\t\t// Add new connection\n\t\t\t\tnewCh := make(chan *tailcfg.MapResponse, 20)\n\t\t\t\tentry := &connectionEntry{\n\t\t\t\t\tid:      fmt.Sprintf(\"churn-%d-%d\", i, cycle),\n\t\t\t\t\tc:       newCh,\n\t\t\t\t\tversion: tailcfg.CapabilityVersion(100),\n\t\t\t\t\tcreated: time.Now(),\n\t\t\t\t}\n\t\t\t\tentry.lastUsed.Store(time.Now().Unix())\n\t\t\t\tmc.addConnection(entry)\n\n\t\t\t\tlb.channels[id] = newCh\n\t\t\t}\n\t\t}\n\t})\n\n\t// Broadcast goroutine: send addToBatch calls during churn\n\n\twg.Go(func() {\n\t\tfor range churnCycles {\n\t\t\tfunc() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tif r := recover(); r != nil {\n\t\t\t\t\t\tpanics.Add(1)\n\t\t\t\t\t}\n\t\t\t\t}()\n\n\t\t\t\tlb.b.addToBatch(change.DERPMap())\n\t\t\t}()\n\t\t}\n\t})\n\n\tdone := make(chan struct{})\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(done)\n\t}()\n\n\tselect {\n\tcase <-done:\n\t\t// Success\n\tcase <-time.After(30 * time.Second):\n\t\tt.Fatal(\"deadlock in 1000-node connection churn test\")\n\t}\n\n\tassert.Zero(t, panics.Load(), \"no panics during connection churn\")\n\n\t// Verify stable nodes (1-900) still have active connections\n\tstableConnected := 0\n\n\tfor i := 1; i <= 900; i++ {\n\t\tif mc, exists := lb.b.nodes.Load(types.NodeID(i)); exists { //nolint:gosec // test\n\t\t\tif mc.hasActiveConnections() {\n\t\t\t\tstableConnected++\n\t\t\t}\n\t\t}\n\t}\n\n\tt.Logf(\"stable nodes still connected: %d/900\", stableConnected)\n\tassert.Equal(t, 900, stableConnected,\n\t\t\"all stable nodes should retain their connections during churn\")\n}\n\n// TestScale1000_ConcurrentAddRemove tests concurrent AddNode-like and\n// RemoveNode-like operations at 1000-node scale.\nfunc TestScale1000_ConcurrentAddRemove(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping 1000-node test in short mode\")\n\t}\n\n\tzerolog.SetGlobalLevel(zerolog.ErrorLevel)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tlb := setupLightweightBatcher(t, 1000, 10)\n\tdefer lb.cleanup()\n\n\tconst goroutines = 200\n\n\tpanics := runConcurrentlyWithTimeout(t, goroutines, 30*time.Second, func(i int) {\n\t\tid := types.NodeID(1 + (i % 1000)) //nolint:gosec // test\n\n\t\tmc, exists := lb.b.nodes.Load(id)\n\t\tif !exists {\n\t\t\treturn\n\t\t}\n\n\t\tif i%2 == 0 {\n\t\t\t// Add a new connection\n\t\t\tch := make(chan *tailcfg.MapResponse, 10)\n\t\t\tentry := &connectionEntry{\n\t\t\t\tid:      fmt.Sprintf(\"concurrent-%d\", i),\n\t\t\t\tc:       ch,\n\t\t\t\tversion: tailcfg.CapabilityVersion(100),\n\t\t\t\tcreated: time.Now(),\n\t\t\t}\n\t\t\tentry.lastUsed.Store(time.Now().Unix())\n\t\t\tmc.addConnection(entry)\n\t\t} else {\n\t\t\t// Try to remove a connection (may fail if already removed)\n\t\t\tch := lb.channels[id]\n\t\t\tmc.removeConnectionByChannel(ch)\n\t\t}\n\t})\n\n\tassert.Zero(t, panics, \"no panics during concurrent add/remove at 1000 nodes\")\n}\n\n// TestScale1000_IsConnectedConsistency verifies IsConnected returns consistent\n// results during rapid connection state changes at 1000-node scale.\nfunc TestScale1000_IsConnectedConsistency(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping 1000-node test in short mode\")\n\t}\n\n\tzerolog.SetGlobalLevel(zerolog.ErrorLevel)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tlb := setupLightweightBatcher(t, 1000, 10)\n\tdefer lb.cleanup()\n\n\tvar (\n\t\tpanics atomic.Int64\n\t\twg     sync.WaitGroup\n\t)\n\n\t// Goroutines reading IsConnected\n\n\twg.Go(func() {\n\t\tfor range 1000 {\n\t\t\tfunc() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tif r := recover(); r != nil {\n\t\t\t\t\t\tpanics.Add(1)\n\t\t\t\t\t}\n\t\t\t\t}()\n\n\t\t\t\tfor i := 1; i <= 1000; i++ {\n\t\t\t\t\t_ = lb.b.IsConnected(types.NodeID(i)) //nolint:gosec // test\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t})\n\n\t// Goroutine modifying connection state via disconnectedAt on the node conn\n\n\twg.Go(func() {\n\t\tfor i := range 100 {\n\t\t\tid := types.NodeID(1 + (i % 1000)) //nolint:gosec // test\n\t\t\tif mc, ok := lb.b.nodes.Load(id); ok {\n\t\t\t\tif i%2 == 0 {\n\t\t\t\t\tmc.markDisconnected() // disconnect\n\t\t\t\t} else {\n\t\t\t\t\tmc.markConnected() // reconnect\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\n\tdone := make(chan struct{})\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(done)\n\t}()\n\n\tselect {\n\tcase <-done:\n\t\t// Success\n\tcase <-time.After(30 * time.Second):\n\t\tt.Fatal(\"deadlock in IsConnected consistency test\")\n\t}\n\n\tassert.Zero(t, panics.Load(),\n\t\t\"IsConnected should not panic under concurrent modification\")\n}\n\n// TestScale1000_BroadcastDuringNodeChurn tests that broadcast addToBatch\n// calls work correctly while 20% of nodes are joining and leaving.\nfunc TestScale1000_BroadcastDuringNodeChurn(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping 1000-node test in short mode\")\n\t}\n\n\tzerolog.SetGlobalLevel(zerolog.ErrorLevel)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tlb := setupLightweightBatcher(t, 1000, 10)\n\tdefer lb.cleanup()\n\n\tvar (\n\t\tpanics atomic.Int64\n\t\twg     sync.WaitGroup\n\t)\n\n\t// Node churn: 20% of nodes (nodes 801-1000) joining/leaving\n\n\twg.Go(func() {\n\t\tfor cycle := range 20 {\n\t\t\tfor i := 801; i <= 1000; i++ {\n\t\t\t\tfunc() {\n\t\t\t\t\tdefer func() {\n\t\t\t\t\t\tif r := recover(); r != nil {\n\t\t\t\t\t\t\tpanics.Add(1)\n\t\t\t\t\t\t}\n\t\t\t\t\t}()\n\n\t\t\t\t\tid := types.NodeID(i) //nolint:gosec // test\n\t\t\t\t\tif cycle%2 == 0 {\n\t\t\t\t\t\t// \"Remove\" node\n\t\t\t\t\t\tlb.b.nodes.Delete(id)\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// \"Add\" node back\n\t\t\t\t\t\tmc := newMultiChannelNodeConn(id, nil)\n\t\t\t\t\t\tch := make(chan *tailcfg.MapResponse, 10)\n\t\t\t\t\t\tentry := &connectionEntry{\n\t\t\t\t\t\t\tid:      fmt.Sprintf(\"rechurn-%d-%d\", i, cycle),\n\t\t\t\t\t\t\tc:       ch,\n\t\t\t\t\t\t\tversion: tailcfg.CapabilityVersion(100),\n\t\t\t\t\t\t\tcreated: time.Now(),\n\t\t\t\t\t\t}\n\t\t\t\t\t\tentry.lastUsed.Store(time.Now().Unix())\n\t\t\t\t\t\tmc.addConnection(entry)\n\t\t\t\t\t\tlb.b.nodes.Store(id, mc)\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t}\n\t\t}\n\t})\n\n\t// Concurrent broadcasts\n\n\twg.Go(func() {\n\t\tfor range 50 {\n\t\t\tfunc() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tif r := recover(); r != nil {\n\t\t\t\t\t\tpanics.Add(1)\n\t\t\t\t\t}\n\t\t\t\t}()\n\n\t\t\t\tlb.b.addToBatch(change.DERPMap())\n\t\t\t}()\n\t\t}\n\t})\n\n\tdone := make(chan struct{})\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(done)\n\t}()\n\n\tselect {\n\tcase <-done:\n\t\tt.Logf(\"broadcast during churn completed, panics: %d\", panics.Load())\n\tcase <-time.After(30 * time.Second):\n\t\tt.Fatal(\"deadlock in broadcast during node churn\")\n\t}\n\n\tassert.Zero(t, panics.Load(),\n\t\t\"broadcast during node churn should not panic\")\n}\n\n// TestScale1000_WorkChannelSaturation tests that the work channel doesn't\n// deadlock when it fills up (queueWork selects on done channel as escape).\nfunc TestScale1000_WorkChannelSaturation(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping 1000-node test in short mode\")\n\t}\n\n\tzerolog.SetGlobalLevel(zerolog.ErrorLevel)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\t// Create batcher with SMALL work channel to force saturation\n\tb := &Batcher{\n\t\ttick:    time.NewTicker(10 * time.Millisecond),\n\t\tworkers: 2,\n\t\tworkCh:  make(chan work, 10), // Very small - will saturate\n\t\tnodes:   xsync.NewMap[types.NodeID, *multiChannelNodeConn](),\n\t\tdone:    make(chan struct{}),\n\t}\n\n\tdefer func() {\n\t\tclose(b.done)\n\t\tb.tick.Stop()\n\t}()\n\n\t// Add 1000 nodes\n\tfor i := 1; i <= 1000; i++ {\n\t\tid := types.NodeID(i) //nolint:gosec // test\n\t\tmc := newMultiChannelNodeConn(id, nil)\n\t\tch := make(chan *tailcfg.MapResponse, 1)\n\t\tentry := &connectionEntry{\n\t\t\tid:      fmt.Sprintf(\"conn-%d\", i),\n\t\t\tc:       ch,\n\t\t\tversion: tailcfg.CapabilityVersion(100),\n\t\t\tcreated: time.Now(),\n\t\t}\n\t\tentry.lastUsed.Store(time.Now().Unix())\n\t\tmc.addConnection(entry)\n\t\tb.nodes.Store(id, mc)\n\t}\n\n\t// Add pending changes for all 1000 nodes\n\tfor i := 1; i <= 1000; i++ {\n\t\tif nc, ok := b.nodes.Load(types.NodeID(i)); ok { //nolint:gosec // test\n\t\t\tnc.appendPending(change.DERPMap())\n\t\t}\n\t}\n\n\t// processBatchedChanges should not deadlock even with small work channel.\n\t// queueWork uses select with b.done as escape hatch.\n\t// Start a consumer to slowly drain the work channel.\n\tvar consumed atomic.Int64\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-b.workCh:\n\t\t\t\tconsumed.Add(1)\n\t\t\tcase <-b.done:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tdone := make(chan struct{})\n\n\tgo func() {\n\t\tb.processBatchedChanges()\n\t\tclose(done)\n\t}()\n\n\tselect {\n\tcase <-done:\n\t\tt.Logf(\"processBatchedChanges completed, consumed %d work items\", consumed.Load())\n\tcase <-time.After(30 * time.Second):\n\t\tt.Fatal(\"processBatchedChanges deadlocked with saturated work channel\")\n\t}\n}\n\n// TestScale1000_FullUpdate_AllNodesGetPending verifies that a FullUpdate\n// creates pending entries for all 1000 nodes.\nfunc TestScale1000_FullUpdate_AllNodesGetPending(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping 1000-node test in short mode\")\n\t}\n\n\tzerolog.SetGlobalLevel(zerolog.ErrorLevel)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tlb := setupLightweightBatcher(t, 1000, 10)\n\tdefer lb.cleanup()\n\n\tlb.b.addToBatch(change.FullUpdate())\n\n\tnodesWithPending := countNodesPending(lb.b)\n\tassert.Equal(t, 1000, nodesWithPending,\n\t\t\"FullUpdate should create pending entries for all 1000 nodes\")\n\n\t// Verify each node has exactly one full update pending\n\tlb.b.nodes.Range(func(id types.NodeID, _ *multiChannelNodeConn) bool {\n\t\tpending := getPendingForNode(lb.b, id)\n\t\trequire.Len(t, pending, 1, \"node %d should have 1 pending change\", id)\n\t\tassert.True(t, pending[0].IsFull(), \"pending change for node %d should be full\", id)\n\n\t\treturn true\n\t})\n}\n\n// ============================================================================\n// 1000-Node Full Pipeline Tests (with DB)\n// ============================================================================\n\n// TestScale1000_AllToAll_FullPipeline tests the complete pipeline:\n// create 1000 nodes in DB, add them to batcher, send FullUpdate,\n// verify all nodes see 999 peers.\nfunc TestScale1000_AllToAll_FullPipeline(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping 1000-node full pipeline test in short mode\")\n\t}\n\n\tif util.RaceEnabled {\n\t\tt.Skip(\"skipping 1000-node test with race detector (bcrypt setup too slow)\")\n\t}\n\n\tzerolog.SetGlobalLevel(zerolog.ErrorLevel)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tt.Logf(\"setting up 1000-node test environment (this may take a minute)...\")\n\n\ttestData, cleanup := setupBatcherWithTestData(t, NewBatcherAndMapper, 1, 1000, 200)\n\tdefer cleanup()\n\n\tbatcher := testData.Batcher\n\tallNodes := testData.Nodes\n\n\tt.Logf(\"created %d nodes, connecting to batcher...\", len(allNodes))\n\n\t// Start update consumers\n\tfor i := range allNodes {\n\t\tallNodes[i].start()\n\t}\n\n\t// Connect all nodes\n\tfor i := range allNodes {\n\t\tnode := &allNodes[i]\n\n\t\terr := batcher.AddNode(node.n.ID, node.ch, tailcfg.CapabilityVersion(100), nil)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to add node %d: %v\", i, err)\n\t\t}\n\t\t// Yield periodically to avoid overwhelming the work queue\n\t\tif i%50 == 49 {\n\t\t\ttime.Sleep(10 * time.Millisecond) //nolint:forbidigo // concurrency test coordination\n\t\t}\n\t}\n\n\tt.Logf(\"all nodes connected, sending FullUpdate and waiting for convergence...\")\n\n\t// Send FullUpdate\n\tbatcher.AddWork(change.FullUpdate())\n\n\texpectedPeers := len(allNodes) - 1 // Each sees all others\n\n\t// Wait for all nodes to see all peers\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tconvergedCount := 0\n\n\t\tfor i := range allNodes {\n\t\t\tif int(allNodes[i].maxPeersCount.Load()) >= expectedPeers {\n\t\t\t\tconvergedCount++\n\t\t\t}\n\t\t}\n\n\t\tassert.Equal(c, len(allNodes), convergedCount,\n\t\t\t\"all nodes should see %d peers (converged: %d/%d)\",\n\t\t\texpectedPeers, convergedCount, len(allNodes))\n\t}, 5*time.Minute, 5*time.Second, \"waiting for 1000-node convergence\")\n\n\t// Final statistics\n\ttotalUpdates := int64(0)\n\tminPeers := len(allNodes)\n\tmaxPeers := 0\n\n\tfor i := range allNodes {\n\t\tstats := allNodes[i].cleanup()\n\n\t\ttotalUpdates += stats.TotalUpdates\n\t\tif stats.MaxPeersSeen < minPeers {\n\t\t\tminPeers = stats.MaxPeersSeen\n\t\t}\n\n\t\tif stats.MaxPeersSeen > maxPeers {\n\t\t\tmaxPeers = stats.MaxPeersSeen\n\t\t}\n\t}\n\n\tt.Logf(\"1000-node pipeline: total_updates=%d, min_peers=%d, max_peers=%d, expected=%d\",\n\t\ttotalUpdates, minPeers, maxPeers, expectedPeers)\n\n\tassert.GreaterOrEqual(t, minPeers, expectedPeers,\n\t\t\"all nodes should have seen at least %d peers\", expectedPeers)\n}\n"
  },
  {
    "path": "hscontrol/mapper/batcher_scale_bench_test.go",
    "content": "package mapper\n\n// Scale benchmarks for the batcher system.\n//\n// These benchmarks systematically increase node counts to find scaling limits\n// and identify bottlenecks. Organized into tiers:\n//\n// Tier 1 - O(1) operations: should stay flat regardless of node count\n// Tier 2 - O(N) lightweight: batch queuing and processing (no MapResponse generation)\n// Tier 3 - O(N) heavier: map building, peer diff, peer tracking\n// Tier 4 - Concurrent contention: multi-goroutine access under load\n//\n// Node count progression: 100, 500, 1000, 2000, 5000, 10000, 20000, 50000\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/types/change\"\n\t\"github.com/rs/zerolog\"\n\t\"tailscale.com/tailcfg\"\n)\n\n// scaleCounts defines the node counts used across all scaling benchmarks.\n// Tier 1 (O(1)) tests up to 50k; Tier 2-4 test up to 10k-20k.\nvar (\n\tscaleCountsO1     = []int{100, 500, 1000, 2000, 5000, 10000, 20000, 50000}\n\tscaleCountsLinear = []int{100, 500, 1000, 2000, 5000, 10000}\n\tscaleCountsHeavy  = []int{100, 500, 1000, 2000, 5000, 10000}\n\tscaleCountsConc   = []int{100, 500, 1000, 2000, 5000}\n)\n\n// ============================================================================\n// Tier 1: O(1) Operations — should scale flat\n// ============================================================================\n\n// BenchmarkScale_IsConnected tests single-node lookup at increasing map sizes.\nfunc BenchmarkScale_IsConnected(b *testing.B) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, n := range scaleCountsO1 {\n\t\tb.Run(strconv.Itoa(n), func(b *testing.B) {\n\t\t\tbatcher, _ := benchBatcher(n, 1)\n\n\t\t\tdefer func() {\n\t\t\t\tclose(batcher.done)\n\t\t\t\tbatcher.tick.Stop()\n\t\t\t}()\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor i := range b.N {\n\t\t\t\tid := types.NodeID(1 + (i % n)) //nolint:gosec\n\t\t\t\t_ = batcher.IsConnected(id)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkScale_AddToBatch_Targeted tests single-node targeted change at\n// increasing map sizes. The map size should not affect per-operation cost.\nfunc BenchmarkScale_AddToBatch_Targeted(b *testing.B) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, n := range scaleCountsO1 {\n\t\tb.Run(strconv.Itoa(n), func(b *testing.B) {\n\t\t\tbatcher, _ := benchBatcher(n, 10)\n\n\t\t\tdefer func() {\n\t\t\t\tclose(batcher.done)\n\t\t\t\tbatcher.tick.Stop()\n\t\t\t}()\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor i := range b.N {\n\t\t\t\ttargetID := types.NodeID(1 + (i % n)) //nolint:gosec\n\t\t\t\tch := change.Change{\n\t\t\t\t\tReason:     \"scale-targeted\",\n\t\t\t\t\tTargetNode: targetID,\n\t\t\t\t\tPeerPatches: []*tailcfg.PeerChange{\n\t\t\t\t\t\t{NodeID: tailcfg.NodeID(targetID)}, //nolint:gosec\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\tbatcher.addToBatch(ch)\n\t\t\t\t// Drain every 100 ops to avoid unbounded growth\n\t\t\t\tif i%100 == 99 {\n\t\t\t\t\tbatcher.nodes.Range(func(_ types.NodeID, nc *multiChannelNodeConn) bool {\n\t\t\t\t\t\tnc.drainPending()\n\t\t\t\t\t\treturn true\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkScale_ConnectionChurn tests add/remove connection cycle.\n// The map size should not affect per-operation cost for a single node.\nfunc BenchmarkScale_ConnectionChurn(b *testing.B) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, n := range scaleCountsO1 {\n\t\tb.Run(strconv.Itoa(n), func(b *testing.B) {\n\t\t\tbatcher, channels := benchBatcher(n, 10)\n\n\t\t\tdefer func() {\n\t\t\t\tclose(batcher.done)\n\t\t\t\tbatcher.tick.Stop()\n\t\t\t}()\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor i := range b.N {\n\t\t\t\tid := types.NodeID(1 + (i % n)) //nolint:gosec\n\n\t\t\t\tmc, ok := batcher.nodes.Load(id)\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\toldCh := channels[id]\n\t\t\t\tmc.removeConnectionByChannel(oldCh)\n\n\t\t\t\tnewCh := make(chan *tailcfg.MapResponse, 10)\n\t\t\t\tentry := &connectionEntry{\n\t\t\t\t\tid:      fmt.Sprintf(\"sc-%d\", i),\n\t\t\t\t\tc:       newCh,\n\t\t\t\t\tversion: tailcfg.CapabilityVersion(100),\n\t\t\t\t\tcreated: time.Now(),\n\t\t\t\t}\n\t\t\t\tentry.lastUsed.Store(time.Now().Unix())\n\t\t\t\tmc.addConnection(entry)\n\n\t\t\t\tchannels[id] = newCh\n\t\t\t}\n\t\t})\n\t}\n}\n\n// ============================================================================\n// Tier 2: O(N) Lightweight — batch mechanics without MapResponse generation\n// ============================================================================\n\n// BenchmarkScale_AddToBatch_Broadcast tests broadcasting a change to ALL nodes.\n// Cost should scale linearly with node count.\nfunc BenchmarkScale_AddToBatch_Broadcast(b *testing.B) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, n := range scaleCountsLinear {\n\t\tb.Run(strconv.Itoa(n), func(b *testing.B) {\n\t\t\tbatcher, _ := benchBatcher(n, 10)\n\n\t\t\tdefer func() {\n\t\t\t\tclose(batcher.done)\n\t\t\t\tbatcher.tick.Stop()\n\t\t\t}()\n\n\t\t\tch := change.DERPMap()\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor range b.N {\n\t\t\t\tbatcher.addToBatch(ch)\n\t\t\t\t// Drain to avoid unbounded growth\n\t\t\t\tbatcher.nodes.Range(func(_ types.NodeID, nc *multiChannelNodeConn) bool {\n\t\t\t\t\tnc.drainPending()\n\t\t\t\t\treturn true\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkScale_AddToBatch_FullUpdate tests FullUpdate broadcast cost.\nfunc BenchmarkScale_AddToBatch_FullUpdate(b *testing.B) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, n := range scaleCountsLinear {\n\t\tb.Run(strconv.Itoa(n), func(b *testing.B) {\n\t\t\tbatcher, _ := benchBatcher(n, 10)\n\n\t\t\tdefer func() {\n\t\t\t\tclose(batcher.done)\n\t\t\t\tbatcher.tick.Stop()\n\t\t\t}()\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor range b.N {\n\t\t\t\tbatcher.addToBatch(change.FullUpdate())\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkScale_ProcessBatchedChanges tests draining pending changes into work queue.\nfunc BenchmarkScale_ProcessBatchedChanges(b *testing.B) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, n := range scaleCountsLinear {\n\t\tb.Run(strconv.Itoa(n), func(b *testing.B) {\n\t\t\tbatcher, _ := benchBatcher(n, 10)\n\t\t\tbatcher.workCh = make(chan work, n*b.N+1)\n\n\t\t\tdefer func() {\n\t\t\t\tclose(batcher.done)\n\t\t\t\tbatcher.tick.Stop()\n\t\t\t}()\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor range b.N {\n\t\t\t\tb.StopTimer()\n\n\t\t\t\tfor i := 1; i <= n; i++ {\n\t\t\t\t\tif nc, ok := batcher.nodes.Load(types.NodeID(i)); ok { //nolint:gosec\n\t\t\t\t\t\tnc.appendPending(change.DERPMap())\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tb.StartTimer()\n\t\t\t\tbatcher.processBatchedChanges()\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkScale_BroadcastToN tests end-to-end: addToBatch + processBatchedChanges.\nfunc BenchmarkScale_BroadcastToN(b *testing.B) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, n := range scaleCountsLinear {\n\t\tb.Run(strconv.Itoa(n), func(b *testing.B) {\n\t\t\tbatcher, _ := benchBatcher(n, 10)\n\t\t\tbatcher.workCh = make(chan work, n*b.N+1)\n\n\t\t\tdefer func() {\n\t\t\t\tclose(batcher.done)\n\t\t\t\tbatcher.tick.Stop()\n\t\t\t}()\n\n\t\t\tch := change.DERPMap()\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor range b.N {\n\t\t\t\tbatcher.addToBatch(ch)\n\t\t\t\tbatcher.processBatchedChanges()\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkScale_SendToAll tests raw channel send cost to N nodes (no batching).\n// This isolates the multiChannelNodeConn.send() cost.\n// Uses large buffered channels to avoid goroutine drain overhead.\nfunc BenchmarkScale_SendToAll(b *testing.B) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, n := range scaleCountsLinear {\n\t\tb.Run(strconv.Itoa(n), func(b *testing.B) {\n\t\t\t// b.N+1 buffer so sends never block\n\t\t\tbatcher, _ := benchBatcher(n, b.N+1)\n\n\t\t\tdefer func() {\n\t\t\t\tclose(batcher.done)\n\t\t\t\tbatcher.tick.Stop()\n\t\t\t}()\n\n\t\t\tdata := testMapResponse()\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor range b.N {\n\t\t\t\tbatcher.nodes.Range(func(_ types.NodeID, mc *multiChannelNodeConn) bool {\n\t\t\t\t\t_ = mc.send(data)\n\t\t\t\t\treturn true\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\n// ============================================================================\n// Tier 3: O(N) Heavier — map building, peer diff, peer tracking\n// ============================================================================\n\n// BenchmarkScale_ConnectedMap tests building the full connected/disconnected map.\nfunc BenchmarkScale_ConnectedMap(b *testing.B) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, n := range scaleCountsHeavy {\n\t\tb.Run(strconv.Itoa(n), func(b *testing.B) {\n\t\t\tbatcher, channels := benchBatcher(n, 1)\n\n\t\t\tdefer func() {\n\t\t\t\tclose(batcher.done)\n\t\t\t\tbatcher.tick.Stop()\n\t\t\t}()\n\n\t\t\t// 10% disconnected for realism\n\t\t\tfor i := 1; i <= n; i++ {\n\t\t\t\tif i%10 == 0 {\n\t\t\t\t\tid := types.NodeID(i) //nolint:gosec\n\t\t\t\t\tif mc, ok := batcher.nodes.Load(id); ok {\n\t\t\t\t\t\tmc.removeConnectionByChannel(channels[id])\n\t\t\t\t\t\tmc.markDisconnected()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor range b.N {\n\t\t\t\t_ = batcher.ConnectedMap()\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkScale_ComputePeerDiff tests peer diff computation at scale.\n// Each node tracks N-1 peers, with 10% removed.\nfunc BenchmarkScale_ComputePeerDiff(b *testing.B) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, n := range scaleCountsHeavy {\n\t\tb.Run(strconv.Itoa(n), func(b *testing.B) {\n\t\t\tmc := newMultiChannelNodeConn(1, nil)\n\n\t\t\t// Track N peers\n\t\t\tfor i := 1; i <= n; i++ {\n\t\t\t\tmc.lastSentPeers.Store(tailcfg.NodeID(i), struct{}{})\n\t\t\t}\n\n\t\t\t// Current: 90% present (every 10th missing)\n\t\t\tcurrent := make([]tailcfg.NodeID, 0, n)\n\t\t\tfor i := 1; i <= n; i++ {\n\t\t\t\tif i%10 != 0 {\n\t\t\t\t\tcurrent = append(current, tailcfg.NodeID(i))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor range b.N {\n\t\t\t\t_ = mc.computePeerDiff(current)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkScale_UpdateSentPeers_Full tests full peer list update.\nfunc BenchmarkScale_UpdateSentPeers_Full(b *testing.B) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, n := range scaleCountsHeavy {\n\t\tb.Run(strconv.Itoa(n), func(b *testing.B) {\n\t\t\tmc := newMultiChannelNodeConn(1, nil)\n\n\t\t\tpeerIDs := make([]tailcfg.NodeID, n)\n\t\t\tfor i := range peerIDs {\n\t\t\t\tpeerIDs[i] = tailcfg.NodeID(i + 1)\n\t\t\t}\n\n\t\t\tresp := testMapResponseWithPeers(peerIDs...)\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor range b.N {\n\t\t\t\tmc.updateSentPeers(resp)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkScale_UpdateSentPeers_Incremental tests incremental peer updates (10% new).\nfunc BenchmarkScale_UpdateSentPeers_Incremental(b *testing.B) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, n := range scaleCountsHeavy {\n\t\tb.Run(strconv.Itoa(n), func(b *testing.B) {\n\t\t\tmc := newMultiChannelNodeConn(1, nil)\n\n\t\t\t// Pre-populate\n\t\t\tfor i := 1; i <= n; i++ {\n\t\t\t\tmc.lastSentPeers.Store(tailcfg.NodeID(i), struct{}{})\n\t\t\t}\n\n\t\t\taddCount := n / 10\n\t\t\tif addCount == 0 {\n\t\t\t\taddCount = 1\n\t\t\t}\n\n\t\t\tresp := testMapResponse()\n\n\t\t\tresp.PeersChanged = make([]*tailcfg.Node, addCount)\n\t\t\tfor i := range addCount {\n\t\t\t\tresp.PeersChanged[i] = &tailcfg.Node{ID: tailcfg.NodeID(n + i + 1)}\n\t\t\t}\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor range b.N {\n\t\t\t\tmc.updateSentPeers(resp)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkScale_MultiChannelBroadcast tests sending to N nodes, each with\n// ~1.6 connections on average (every 3rd node has 3 connections).\n// Uses large buffered channels to avoid goroutine drain overhead.\nfunc BenchmarkScale_MultiChannelBroadcast(b *testing.B) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, n := range scaleCountsHeavy {\n\t\tb.Run(strconv.Itoa(n), func(b *testing.B) {\n\t\t\t// Use b.N+1 buffer so sends never block\n\t\t\tbatcher, _ := benchBatcher(n, b.N+1)\n\n\t\t\tdefer func() {\n\t\t\t\tclose(batcher.done)\n\t\t\t\tbatcher.tick.Stop()\n\t\t\t}()\n\n\t\t\t// Add extra connections to every 3rd node (also buffered)\n\t\t\tfor i := 1; i <= n; i++ {\n\t\t\t\tif i%3 == 0 {\n\t\t\t\t\tif mc, ok := batcher.nodes.Load(types.NodeID(i)); ok { //nolint:gosec\n\t\t\t\t\t\tfor j := range 2 {\n\t\t\t\t\t\t\tch := make(chan *tailcfg.MapResponse, b.N+1)\n\t\t\t\t\t\t\tentry := &connectionEntry{\n\t\t\t\t\t\t\t\tid:      fmt.Sprintf(\"extra-%d-%d\", i, j),\n\t\t\t\t\t\t\t\tc:       ch,\n\t\t\t\t\t\t\t\tversion: tailcfg.CapabilityVersion(100),\n\t\t\t\t\t\t\t\tcreated: time.Now(),\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tentry.lastUsed.Store(time.Now().Unix())\n\t\t\t\t\t\t\tmc.addConnection(entry)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tdata := testMapResponse()\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor range b.N {\n\t\t\t\tbatcher.nodes.Range(func(_ types.NodeID, mc *multiChannelNodeConn) bool {\n\t\t\t\t\t_ = mc.send(data)\n\t\t\t\t\treturn true\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\n// ============================================================================\n// Tier 4: Concurrent Contention — multi-goroutine access\n// ============================================================================\n\n// BenchmarkScale_ConcurrentAddToBatch tests parallel addToBatch throughput.\nfunc BenchmarkScale_ConcurrentAddToBatch(b *testing.B) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, n := range scaleCountsConc {\n\t\tb.Run(strconv.Itoa(n), func(b *testing.B) {\n\t\t\tbatcher, _ := benchBatcher(n, 10)\n\n\t\t\tdrainDone := make(chan struct{})\n\n\t\t\tgo func() {\n\t\t\t\tdefer close(drainDone)\n\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-batcher.done:\n\t\t\t\t\t\treturn\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tbatcher.nodes.Range(func(_ types.NodeID, nc *multiChannelNodeConn) bool {\n\t\t\t\t\t\t\tnc.drainPending()\n\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t})\n\t\t\t\t\t\ttime.Sleep(time.Millisecond) //nolint:forbidigo\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tch := change.DERPMap()\n\n\t\t\tb.ResetTimer()\n\t\t\tb.RunParallel(func(pb *testing.PB) {\n\t\t\t\tfor pb.Next() {\n\t\t\t\t\tbatcher.addToBatch(ch)\n\t\t\t\t}\n\t\t\t})\n\t\t\tb.StopTimer()\n\n\t\t\tclose(batcher.done)\n\t\t\t<-drainDone\n\n\t\t\tbatcher.done = make(chan struct{})\n\t\t\tbatcher.tick.Stop()\n\t\t})\n\t}\n}\n\n// BenchmarkScale_ConcurrentSendAndChurn tests the production hot path:\n// sending to all nodes while 10% of connections are churning concurrently.\n// Uses large buffered channels to avoid goroutine drain overhead.\nfunc BenchmarkScale_ConcurrentSendAndChurn(b *testing.B) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, n := range scaleCountsConc {\n\t\tb.Run(strconv.Itoa(n), func(b *testing.B) {\n\t\t\tbatcher, channels := benchBatcher(n, b.N+1)\n\n\t\t\tvar mu sync.Mutex\n\n\t\t\tstopChurn := make(chan struct{})\n\n\t\t\tgo func() {\n\t\t\t\ti := 0\n\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-stopChurn:\n\t\t\t\t\t\treturn\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tid := types.NodeID(1 + (i % n)) //nolint:gosec\n\t\t\t\t\t\tif i%10 == 0 {\n\t\t\t\t\t\t\tmc, ok := batcher.nodes.Load(id)\n\t\t\t\t\t\t\tif ok {\n\t\t\t\t\t\t\t\tmu.Lock()\n\t\t\t\t\t\t\t\toldCh := channels[id]\n\t\t\t\t\t\t\t\tmu.Unlock()\n\t\t\t\t\t\t\t\tmc.removeConnectionByChannel(oldCh)\n\n\t\t\t\t\t\t\t\tnewCh := make(chan *tailcfg.MapResponse, b.N+1)\n\t\t\t\t\t\t\t\tentry := &connectionEntry{\n\t\t\t\t\t\t\t\t\tid:      fmt.Sprintf(\"sc-churn-%d\", i),\n\t\t\t\t\t\t\t\t\tc:       newCh,\n\t\t\t\t\t\t\t\t\tversion: tailcfg.CapabilityVersion(100),\n\t\t\t\t\t\t\t\t\tcreated: time.Now(),\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tentry.lastUsed.Store(time.Now().Unix())\n\t\t\t\t\t\t\t\tmc.addConnection(entry)\n\t\t\t\t\t\t\t\tmu.Lock()\n\t\t\t\t\t\t\t\tchannels[id] = newCh\n\t\t\t\t\t\t\t\tmu.Unlock()\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\ti++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tdata := testMapResponse()\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor range b.N {\n\t\t\t\tbatcher.nodes.Range(func(_ types.NodeID, mc *multiChannelNodeConn) bool {\n\t\t\t\t\t_ = mc.send(data)\n\t\t\t\t\treturn true\n\t\t\t\t})\n\t\t\t}\n\n\t\t\tb.StopTimer()\n\t\t\tclose(stopChurn)\n\t\t\tclose(batcher.done)\n\t\t\tbatcher.tick.Stop()\n\t\t})\n\t}\n}\n\n// BenchmarkScale_MixedWorkload simulates a realistic production workload:\n// - 70% targeted changes (single node updates)\n// - 20% DERP map changes (broadcast)\n// - 10% full updates (broadcast with full map)\n// All while 10% of connections are churning.\nfunc BenchmarkScale_MixedWorkload(b *testing.B) {\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, n := range scaleCountsConc {\n\t\tb.Run(strconv.Itoa(n), func(b *testing.B) {\n\t\t\tbatcher, channels := benchBatcher(n, 10)\n\t\t\tbatcher.workCh = make(chan work, n*100+1)\n\n\t\t\tvar mu sync.Mutex\n\n\t\t\tstopChurn := make(chan struct{})\n\n\t\t\t// Background churn on 10% of nodes\n\t\t\tgo func() {\n\t\t\t\ti := 0\n\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-stopChurn:\n\t\t\t\t\t\treturn\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tid := types.NodeID(1 + (i % n)) //nolint:gosec\n\t\t\t\t\t\tif i%10 == 0 {\n\t\t\t\t\t\t\tmc, ok := batcher.nodes.Load(id)\n\t\t\t\t\t\t\tif ok {\n\t\t\t\t\t\t\t\tmu.Lock()\n\t\t\t\t\t\t\t\toldCh := channels[id]\n\t\t\t\t\t\t\t\tmu.Unlock()\n\t\t\t\t\t\t\t\tmc.removeConnectionByChannel(oldCh)\n\n\t\t\t\t\t\t\t\tnewCh := make(chan *tailcfg.MapResponse, 10)\n\t\t\t\t\t\t\t\tentry := &connectionEntry{\n\t\t\t\t\t\t\t\t\tid:      fmt.Sprintf(\"mix-churn-%d\", i),\n\t\t\t\t\t\t\t\t\tc:       newCh,\n\t\t\t\t\t\t\t\t\tversion: tailcfg.CapabilityVersion(100),\n\t\t\t\t\t\t\t\t\tcreated: time.Now(),\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tentry.lastUsed.Store(time.Now().Unix())\n\t\t\t\t\t\t\t\tmc.addConnection(entry)\n\t\t\t\t\t\t\t\tmu.Lock()\n\t\t\t\t\t\t\t\tchannels[id] = newCh\n\t\t\t\t\t\t\t\tmu.Unlock()\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\ti++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\t// Background batch processor\n\t\t\tstopProc := make(chan struct{})\n\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-stopProc:\n\t\t\t\t\t\treturn\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tbatcher.processBatchedChanges()\n\t\t\t\t\t\ttime.Sleep(time.Millisecond) //nolint:forbidigo\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\t// Background work channel consumer (simulates workers)\n\t\t\tstopWorkers := make(chan struct{})\n\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-batcher.workCh:\n\t\t\t\t\tcase <-stopWorkers:\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor i := range b.N {\n\t\t\t\tswitch {\n\t\t\t\tcase i%10 < 7: // 70% targeted\n\t\t\t\t\ttargetID := types.NodeID(1 + (i % n)) //nolint:gosec\n\t\t\t\t\tbatcher.addToBatch(change.Change{\n\t\t\t\t\t\tReason:     \"mixed-targeted\",\n\t\t\t\t\t\tTargetNode: targetID,\n\t\t\t\t\t\tPeerPatches: []*tailcfg.PeerChange{\n\t\t\t\t\t\t\t{NodeID: tailcfg.NodeID(targetID)}, //nolint:gosec\n\t\t\t\t\t\t},\n\t\t\t\t\t})\n\t\t\t\tcase i%10 < 9: // 20% DERP map broadcast\n\t\t\t\t\tbatcher.addToBatch(change.DERPMap())\n\t\t\t\tdefault: // 10% full update\n\t\t\t\t\tbatcher.addToBatch(change.FullUpdate())\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tb.StopTimer()\n\t\t\tclose(stopChurn)\n\t\t\tclose(stopProc)\n\t\t\tclose(stopWorkers)\n\t\t\tclose(batcher.done)\n\t\t\tbatcher.tick.Stop()\n\t\t})\n\t}\n}\n\n// ============================================================================\n// Tier 5: DB-dependent — AddNode with real MapResponse generation\n// ============================================================================\n\n// BenchmarkScale_AddAllNodes measures the cost of connecting ALL N nodes\n// to a batcher backed by a real database. Each AddNode generates an initial\n// MapResponse containing all peer data, so cost is O(N) per node, O(N²) total.\nfunc BenchmarkScale_AddAllNodes(b *testing.B) {\n\tif testing.Short() {\n\t\tb.Skip(\"skipping full pipeline benchmark in short mode\")\n\t}\n\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, nodeCount := range []int{10, 50, 100, 200, 500} {\n\t\tb.Run(strconv.Itoa(nodeCount), func(b *testing.B) {\n\t\t\ttestData, cleanup := setupBatcherWithTestData(b, NewBatcherAndMapper, 1, nodeCount, largeBufferSize)\n\t\t\tdefer cleanup()\n\n\t\t\tbatcher := testData.Batcher\n\t\t\tallNodes := testData.Nodes\n\n\t\t\tfor i := range allNodes {\n\t\t\t\tallNodes[i].start()\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\tfor i := range allNodes {\n\t\t\t\t\tallNodes[i].cleanup()\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor range b.N {\n\t\t\t\tfor i := range allNodes {\n\t\t\t\t\tnode := &allNodes[i]\n\t\t\t\t\t_ = batcher.AddNode(node.n.ID, node.ch, tailcfg.CapabilityVersion(100), nil)\n\t\t\t\t}\n\n\t\t\t\tb.StopTimer()\n\n\t\t\t\tfor i := range allNodes {\n\t\t\t\t\tnode := &allNodes[i]\n\t\t\t\t\tbatcher.RemoveNode(node.n.ID, node.ch)\n\t\t\t\t}\n\n\t\t\t\tfor i := range allNodes {\n\t\t\t\t\tfor {\n\t\t\t\t\t\tselect {\n\t\t\t\t\t\tcase <-allNodes[i].ch:\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tgoto drained\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\tdrained:\n\t\t\t\t}\n\n\t\t\t\tb.StartTimer()\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkScale_SingleAddNode measures the cost of adding ONE node to an\n// already-populated batcher. This is the real production scenario: a new node\n// joins an existing network. The cost should scale with the number of existing\n// peers since the initial MapResponse includes all peer data.\nfunc BenchmarkScale_SingleAddNode(b *testing.B) {\n\tif testing.Short() {\n\t\tb.Skip(\"skipping full pipeline benchmark in short mode\")\n\t}\n\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, nodeCount := range []int{10, 50, 100, 200, 500, 1000} {\n\t\tb.Run(strconv.Itoa(nodeCount), func(b *testing.B) {\n\t\t\ttestData, cleanup := setupBatcherWithTestData(b, NewBatcherAndMapper, 1, nodeCount, largeBufferSize)\n\t\t\tdefer cleanup()\n\n\t\t\tbatcher := testData.Batcher\n\t\t\tallNodes := testData.Nodes\n\n\t\t\tfor i := range allNodes {\n\t\t\t\tallNodes[i].start()\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\tfor i := range allNodes {\n\t\t\t\t\tallNodes[i].cleanup()\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\t// Connect all nodes except the last one\n\t\t\tfor i := range len(allNodes) - 1 {\n\t\t\t\tnode := &allNodes[i]\n\n\t\t\t\terr := batcher.AddNode(node.n.ID, node.ch, tailcfg.CapabilityVersion(100), nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\tb.Fatalf(\"failed to add node %d: %v\", i, err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ttime.Sleep(200 * time.Millisecond) //nolint:forbidigo\n\n\t\t\t// Benchmark: repeatedly add and remove the last node\n\t\t\tlastNode := &allNodes[len(allNodes)-1]\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor range b.N {\n\t\t\t\t_ = batcher.AddNode(lastNode.n.ID, lastNode.ch, tailcfg.CapabilityVersion(100), nil)\n\n\t\t\t\tb.StopTimer()\n\t\t\t\tbatcher.RemoveNode(lastNode.n.ID, lastNode.ch)\n\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-lastNode.ch:\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tgoto drainDone\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\tdrainDone:\n\t\t\t\tb.StartTimer()\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkScale_MapResponse_DERPMap measures MapResponse generation for a\n// DERPMap change. This is a lightweight change that doesn't touch peers.\nfunc BenchmarkScale_MapResponse_DERPMap(b *testing.B) {\n\tif testing.Short() {\n\t\tb.Skip(\"skipping full pipeline benchmark in short mode\")\n\t}\n\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, nodeCount := range []int{10, 50, 100, 200, 500} {\n\t\tb.Run(strconv.Itoa(nodeCount), func(b *testing.B) {\n\t\t\ttestData, cleanup := setupBatcherWithTestData(b, NewBatcherAndMapper, 1, nodeCount, largeBufferSize)\n\t\t\tdefer cleanup()\n\n\t\t\tbatcher := testData.Batcher\n\t\t\tallNodes := testData.Nodes\n\n\t\t\tfor i := range allNodes {\n\t\t\t\tallNodes[i].start()\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\tfor i := range allNodes {\n\t\t\t\t\tallNodes[i].cleanup()\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tfor i := range allNodes {\n\t\t\t\tnode := &allNodes[i]\n\n\t\t\t\terr := batcher.AddNode(node.n.ID, node.ch, tailcfg.CapabilityVersion(100), nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\tb.Fatalf(\"failed to add node %d: %v\", i, err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ttime.Sleep(200 * time.Millisecond) //nolint:forbidigo\n\n\t\t\tch := change.DERPMap()\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor i := range b.N {\n\t\t\t\tnodeIdx := i % len(allNodes)\n\t\t\t\t_, _ = batcher.MapResponseFromChange(allNodes[nodeIdx].n.ID, ch)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkScale_MapResponse_FullUpdate measures MapResponse generation for a\n// FullUpdate change. This forces full peer serialization — the primary bottleneck\n// for large networks.\nfunc BenchmarkScale_MapResponse_FullUpdate(b *testing.B) {\n\tif testing.Short() {\n\t\tb.Skip(\"skipping full pipeline benchmark in short mode\")\n\t}\n\n\tzerolog.SetGlobalLevel(zerolog.Disabled)\n\tdefer zerolog.SetGlobalLevel(zerolog.DebugLevel)\n\n\tfor _, nodeCount := range []int{10, 50, 100, 200, 500} {\n\t\tb.Run(strconv.Itoa(nodeCount), func(b *testing.B) {\n\t\t\ttestData, cleanup := setupBatcherWithTestData(b, NewBatcherAndMapper, 1, nodeCount, largeBufferSize)\n\t\t\tdefer cleanup()\n\n\t\t\tbatcher := testData.Batcher\n\t\t\tallNodes := testData.Nodes\n\n\t\t\tfor i := range allNodes {\n\t\t\t\tallNodes[i].start()\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\tfor i := range allNodes {\n\t\t\t\t\tallNodes[i].cleanup()\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tfor i := range allNodes {\n\t\t\t\tnode := &allNodes[i]\n\n\t\t\t\terr := batcher.AddNode(node.n.ID, node.ch, tailcfg.CapabilityVersion(100), nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\tb.Fatalf(\"failed to add node %d: %v\", i, err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ttime.Sleep(200 * time.Millisecond) //nolint:forbidigo\n\n\t\t\tch := change.FullUpdate()\n\n\t\t\tb.ResetTimer()\n\n\t\t\tfor i := range b.N {\n\t\t\t\tnodeIdx := i % len(allNodes)\n\t\t\t\t_, _ = batcher.MapResponseFromChange(allNodes[nodeIdx].n.ID, ch)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "hscontrol/mapper/batcher_test.go",
    "content": "package mapper\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net/netip\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/db\"\n\t\"github.com/juanfont/headscale/hscontrol/derp\"\n\t\"github.com/juanfont/headscale/hscontrol/state\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/types/change\"\n\t\"github.com/rs/zerolog\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/tailcfg\"\n\t\"zgo.at/zcache/v2\"\n)\n\nvar errNodeNotFoundAfterAdd = errors.New(\"node not found after adding to batcher\")\n\ntype batcherFunc func(cfg *types.Config, state *state.State) *Batcher\n\n// batcherTestCase defines a batcher function with a descriptive name for testing.\ntype batcherTestCase struct {\n\tname string\n\tfn   batcherFunc\n}\n\n// testBatcherWrapper wraps a real batcher to add online/offline notifications\n// that would normally be sent by poll.go in production.\ntype testBatcherWrapper struct {\n\t*Batcher\n\n\tstate *state.State\n\n\t// connectGens tracks per-node connect generations so RemoveNode can pass\n\t// the correct generation to State.Disconnect(), matching production behavior.\n\tconnectGens sync.Map // types.NodeID → uint64\n}\n\nfunc (t *testBatcherWrapper) AddNode(id types.NodeID, c chan<- *tailcfg.MapResponse, version tailcfg.CapabilityVersion, stop func()) error {\n\t// Mark node as online in state before AddNode to match production behavior\n\t// This ensures the NodeStore has correct online status for change processing\n\tif t.state != nil {\n\t\t// Use Connect to properly mark node online in NodeStore and track the\n\t\t// generation so RemoveNode can pass it to Disconnect().\n\t\t_, gen := t.state.Connect(id)\n\t\tt.connectGens.Store(id, gen)\n\t}\n\n\t// First add the node to the real batcher\n\terr := t.Batcher.AddNode(id, c, version, stop)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Send the online notification that poll.go would normally send\n\t// This ensures other nodes get notified about this node coming online\n\tnode, ok := t.state.GetNodeByID(id)\n\tif !ok {\n\t\treturn fmt.Errorf(\"%w: %d\", errNodeNotFoundAfterAdd, id)\n\t}\n\n\tt.AddWork(change.NodeOnlineFor(node))\n\n\treturn nil\n}\n\nfunc (t *testBatcherWrapper) RemoveNode(id types.NodeID, c chan<- *tailcfg.MapResponse) bool {\n\t// Mark node as offline in state BEFORE removing from batcher\n\t// This ensures the NodeStore has correct offline status when the change is processed\n\tif t.state != nil {\n\t\tvar gen uint64\n\n\t\tif v, ok := t.connectGens.LoadAndDelete(id); ok {\n\t\t\tif g, ok := v.(uint64); ok {\n\t\t\t\tgen = g\n\t\t\t}\n\t\t}\n\n\t\t_, _ = t.state.Disconnect(id, gen)\n\t}\n\n\t// Send the offline notification that poll.go would normally send\n\t// Do this BEFORE removing from batcher so the change can be processed\n\tnode, ok := t.state.GetNodeByID(id)\n\tif ok {\n\t\tt.AddWork(change.NodeOfflineFor(node))\n\t}\n\n\t// Finally remove from the real batcher\n\treturn t.Batcher.RemoveNode(id, c)\n}\n\n// wrapBatcherForTest wraps a batcher with test-specific behavior.\nfunc wrapBatcherForTest(b *Batcher, state *state.State) *testBatcherWrapper {\n\treturn &testBatcherWrapper{Batcher: b, state: state}\n}\n\n// allBatcherFunctions contains all batcher implementations to test.\nvar allBatcherFunctions = []batcherTestCase{\n\t{\"Default\", NewBatcherAndMapper},\n}\n\n// emptyCache creates an empty registration cache for testing.\nfunc emptyCache() *zcache.Cache[types.AuthID, types.AuthRequest] {\n\treturn zcache.New[types.AuthID, types.AuthRequest](time.Minute, time.Hour)\n}\n\n// Test configuration constants.\nconst (\n\t// Test data configuration.\n\ttestUserCount    = 3\n\ttestNodesPerUser = 2\n\n\t// Timing configuration.\n\ttestTimeout     = 120 * time.Second // Increased for more intensive tests\n\tupdateTimeout   = 5 * time.Second\n\tdeadlockTimeout = 30 * time.Second\n\n\t// Channel configuration.\n\tnormalBufferSize = 50\n\tsmallBufferSize  = 3\n\ttinyBufferSize   = 1 // For maximum contention\n\tlargeBufferSize  = 200\n)\n\n// TestData contains all test entities created for a test scenario.\ntype TestData struct {\n\tDatabase *db.HSDatabase\n\tUsers    []*types.User\n\tNodes    []node\n\tState    *state.State\n\tConfig   *types.Config\n\tBatcher  *testBatcherWrapper\n}\n\ntype node struct {\n\tn  *types.Node\n\tch chan *tailcfg.MapResponse\n\n\t// Update tracking (all accessed atomically for thread safety)\n\tupdateCount   int64\n\tpatchCount    int64\n\tfullCount     int64\n\tmaxPeersCount atomic.Int64\n\tlastPeerCount atomic.Int64\n\tstop          chan struct{}\n\tstopped       chan struct{}\n}\n\n// setupBatcherWithTestData creates a comprehensive test environment with real\n// database test data including users and registered nodes.\n//\n// This helper creates a database, populates it with test data, then creates\n// a state and batcher using the SAME database for testing. This provides real\n// node data for testing full map responses and comprehensive update scenarios.\n//\n// Returns TestData struct containing all created entities and a cleanup function.\nfunc setupBatcherWithTestData(\n\tt testing.TB,\n\tbf batcherFunc,\n\tuserCount, nodesPerUser, bufferSize int,\n) (*TestData, func()) {\n\tt.Helper()\n\n\t// Create database and populate with test data first\n\ttmpDir := t.TempDir()\n\tdbPath := tmpDir + \"/headscale_test.db\"\n\n\tprefixV4 := netip.MustParsePrefix(\"100.64.0.0/10\")\n\tprefixV6 := netip.MustParsePrefix(\"fd7a:115c:a1e0::/48\")\n\n\tcfg := &types.Config{\n\t\tDatabase: types.DatabaseConfig{\n\t\t\tType: types.DatabaseSqlite,\n\t\t\tSqlite: types.SqliteConfig{\n\t\t\t\tPath: dbPath,\n\t\t\t},\n\t\t},\n\t\tPrefixV4:     &prefixV4,\n\t\tPrefixV6:     &prefixV6,\n\t\tIPAllocation: types.IPAllocationStrategySequential,\n\t\tBaseDomain:   \"headscale.test\",\n\t\tPolicy: types.PolicyConfig{\n\t\t\tMode: types.PolicyModeDB,\n\t\t},\n\t\tDERP: types.DERPConfig{\n\t\t\tServerEnabled: false,\n\t\t\tDERPMap: &tailcfg.DERPMap{\n\t\t\t\tRegions: map[int]*tailcfg.DERPRegion{\n\t\t\t\t\t999: {\n\t\t\t\t\t\tRegionID: 999,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tTuning: types.Tuning{\n\t\t\tBatchChangeDelay:      10 * time.Millisecond,\n\t\t\tBatcherWorkers:        types.DefaultBatcherWorkers(), // Use same logic as config.go\n\t\t\tNodeStoreBatchSize:    state.TestBatchSize,\n\t\t\tNodeStoreBatchTimeout: state.TestBatchTimeout,\n\t\t},\n\t}\n\n\t// Create database and populate it with test data\n\tdatabase, err := db.NewHeadscaleDatabase(\n\t\tcfg,\n\t\temptyCache(),\n\t)\n\tif err != nil {\n\t\tt.Fatalf(\"setting up database: %s\", err)\n\t}\n\n\t// Create test users and nodes in the database\n\tusers := database.CreateUsersForTest(userCount, \"testuser\")\n\n\tallNodes := make([]node, 0, userCount*nodesPerUser)\n\tfor _, user := range users {\n\t\tdbNodes := database.CreateRegisteredNodesForTest(user, nodesPerUser, \"node\")\n\t\tfor i := range dbNodes {\n\t\t\tallNodes = append(allNodes, node{\n\t\t\t\tn:  dbNodes[i],\n\t\t\t\tch: make(chan *tailcfg.MapResponse, bufferSize),\n\t\t\t})\n\t\t}\n\t}\n\n\t// Now create state using the same database\n\tstate, err := state.NewState(cfg)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create state: %v\", err)\n\t}\n\n\tderpMap, err := derp.GetDERPMap(cfg.DERP)\n\trequire.NoError(t, err)\n\trequire.NotNil(t, derpMap)\n\n\tstate.SetDERPMap(derpMap)\n\n\t// Set up a permissive policy that allows all communication for testing\n\tallowAllPolicy := `{\n\t\t\"acls\": [\n\t\t\t{\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"src\": [\"*\"],\n\t\t\t\t\"dst\": [\"*:*\"]\n\t\t\t}\n\t\t]\n\t}`\n\n\t_, err = state.SetPolicy([]byte(allowAllPolicy))\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to set allow-all policy: %v\", err)\n\t}\n\n\t// Create batcher with the state and wrap it for testing\n\tbatcher := wrapBatcherForTest(bf(cfg, state), state)\n\tbatcher.Start()\n\n\ttestData := &TestData{\n\t\tDatabase: database,\n\t\tUsers:    users,\n\t\tNodes:    allNodes,\n\t\tState:    state,\n\t\tConfig:   cfg,\n\t\tBatcher:  batcher,\n\t}\n\n\tcleanup := func() {\n\t\tbatcher.Close()\n\t\tstate.Close()\n\t\tdatabase.Close()\n\t}\n\n\treturn testData, cleanup\n}\n\ntype UpdateStats struct {\n\tTotalUpdates int\n\tUpdateSizes  []int\n\tLastUpdate   time.Time\n}\n\n// updateTracker provides thread-safe tracking of updates per node.\ntype updateTracker struct {\n\tmu    sync.RWMutex\n\tstats map[types.NodeID]*UpdateStats\n}\n\n// newUpdateTracker creates a new update tracker.\nfunc newUpdateTracker() *updateTracker {\n\treturn &updateTracker{\n\t\tstats: make(map[types.NodeID]*UpdateStats),\n\t}\n}\n\n// recordUpdate records an update for a specific node.\nfunc (ut *updateTracker) recordUpdate(nodeID types.NodeID, updateSize int) {\n\tut.mu.Lock()\n\tdefer ut.mu.Unlock()\n\n\tif ut.stats[nodeID] == nil {\n\t\tut.stats[nodeID] = &UpdateStats{}\n\t}\n\n\tstats := ut.stats[nodeID]\n\tstats.TotalUpdates++\n\tstats.UpdateSizes = append(stats.UpdateSizes, updateSize)\n\tstats.LastUpdate = time.Now()\n}\n\n// getStats returns a copy of the statistics for a node.\n//\n//nolint:unused\nfunc (ut *updateTracker) getStats(nodeID types.NodeID) UpdateStats {\n\tut.mu.RLock()\n\tdefer ut.mu.RUnlock()\n\n\tif stats, exists := ut.stats[nodeID]; exists {\n\t\t// Return a copy to avoid race conditions\n\t\treturn UpdateStats{\n\t\t\tTotalUpdates: stats.TotalUpdates,\n\t\t\tUpdateSizes:  append([]int{}, stats.UpdateSizes...),\n\t\t\tLastUpdate:   stats.LastUpdate,\n\t\t}\n\t}\n\n\treturn UpdateStats{}\n}\n\n// getAllStats returns a copy of all statistics.\nfunc (ut *updateTracker) getAllStats() map[types.NodeID]UpdateStats {\n\tut.mu.RLock()\n\tdefer ut.mu.RUnlock()\n\n\tresult := make(map[types.NodeID]UpdateStats)\n\tfor nodeID, stats := range ut.stats {\n\t\tresult[nodeID] = UpdateStats{\n\t\t\tTotalUpdates: stats.TotalUpdates,\n\t\t\tUpdateSizes:  append([]int{}, stats.UpdateSizes...),\n\t\t\tLastUpdate:   stats.LastUpdate,\n\t\t}\n\t}\n\n\treturn result\n}\n\nfunc assertDERPMapResponse(t *testing.T, resp *tailcfg.MapResponse) {\n\tt.Helper()\n\n\tassert.NotNil(t, resp.DERPMap, \"DERPMap should not be nil in response\")\n\tassert.Len(t, resp.DERPMap.Regions, 1, \"Expected exactly one DERP region in response\")\n\tassert.Equal(t, 999, resp.DERPMap.Regions[999].RegionID, \"Expected DERP region ID to be 999\")\n}\n\nfunc assertOnlineMapResponse(t *testing.T, resp *tailcfg.MapResponse, expected bool) {\n\tt.Helper()\n\n\t// Check for peer changes patch (new online/offline notifications use patches)\n\tif len(resp.PeersChangedPatch) > 0 {\n\t\trequire.Len(t, resp.PeersChangedPatch, 1)\n\t\tassert.Equal(t, expected, *resp.PeersChangedPatch[0].Online)\n\n\t\treturn\n\t}\n\n\t// Fallback to old format for backwards compatibility\n\trequire.Len(t, resp.Peers, 1)\n\tassert.Equal(t, expected, resp.Peers[0].Online)\n}\n\n// UpdateInfo contains parsed information about an update.\ntype UpdateInfo struct {\n\tIsFull     bool\n\tIsPatch    bool\n\tIsDERP     bool\n\tPeerCount  int\n\tPatchCount int\n}\n\n// parseUpdateAndAnalyze parses an update and returns detailed information.\nfunc parseUpdateAndAnalyze(resp *tailcfg.MapResponse) UpdateInfo {\n\treturn UpdateInfo{\n\t\tPeerCount:  len(resp.Peers),\n\t\tPatchCount: len(resp.PeersChangedPatch),\n\t\tIsFull:     len(resp.Peers) > 0,\n\t\tIsPatch:    len(resp.PeersChangedPatch) > 0,\n\t\tIsDERP:     resp.DERPMap != nil,\n\t}\n}\n\n// start begins consuming updates from the node's channel and tracking stats.\nfunc (n *node) start() {\n\t// Prevent multiple starts on the same node\n\tif n.stop != nil {\n\t\treturn // Already started\n\t}\n\n\tn.stop = make(chan struct{})\n\tn.stopped = make(chan struct{})\n\n\tgo func() {\n\t\tdefer close(n.stopped)\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase data := <-n.ch:\n\t\t\t\tatomic.AddInt64(&n.updateCount, 1)\n\n\t\t\t\t// Parse update and track detailed stats\n\t\t\t\tinfo := parseUpdateAndAnalyze(data)\n\t\t\t\t{\n\t\t\t\t\t// Track update types\n\t\t\t\t\tif info.IsFull {\n\t\t\t\t\t\tatomic.AddInt64(&n.fullCount, 1)\n\t\t\t\t\t\tn.lastPeerCount.Store(int64(info.PeerCount))\n\t\t\t\t\t\t// Update max peers seen using compare-and-swap for thread safety\n\t\t\t\t\t\tfor {\n\t\t\t\t\t\t\tcurrent := n.maxPeersCount.Load()\n\t\t\t\t\t\t\tif int64(info.PeerCount) <= current {\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif n.maxPeersCount.CompareAndSwap(current, int64(info.PeerCount)) {\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif info.IsPatch {\n\t\t\t\t\t\tatomic.AddInt64(&n.patchCount, 1)\n\t\t\t\t\t\t// For patches, we track how many patch items using compare-and-swap\n\t\t\t\t\t\tfor {\n\t\t\t\t\t\t\tcurrent := n.maxPeersCount.Load()\n\t\t\t\t\t\t\tif int64(info.PatchCount) <= current {\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif n.maxPeersCount.CompareAndSwap(current, int64(info.PatchCount)) {\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\tcase <-n.stop:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\n\n// NodeStats contains final statistics for a node.\ntype NodeStats struct {\n\tTotalUpdates  int64\n\tPatchUpdates  int64\n\tFullUpdates   int64\n\tMaxPeersSeen  int\n\tLastPeerCount int\n}\n\n// cleanup stops the update consumer and returns final stats.\nfunc (n *node) cleanup() NodeStats {\n\tif n.stop != nil {\n\t\tclose(n.stop)\n\t\t<-n.stopped // Wait for goroutine to finish\n\t}\n\n\treturn NodeStats{\n\t\tTotalUpdates:  atomic.LoadInt64(&n.updateCount),\n\t\tPatchUpdates:  atomic.LoadInt64(&n.patchCount),\n\t\tFullUpdates:   atomic.LoadInt64(&n.fullCount),\n\t\tMaxPeersSeen:  int(n.maxPeersCount.Load()),\n\t\tLastPeerCount: int(n.lastPeerCount.Load()),\n\t}\n}\n\n// validateUpdateContent validates that the update data contains a proper MapResponse.\nfunc validateUpdateContent(resp *tailcfg.MapResponse) (bool, string) {\n\tif resp == nil {\n\t\treturn false, \"nil MapResponse\"\n\t}\n\n\t// Simple validation - just check if it's a valid MapResponse\n\treturn true, \"valid\"\n}\n\n// TestEnhancedNodeTracking verifies that the enhanced node tracking works correctly.\nfunc TestEnhancedNodeTracking(t *testing.T) {\n\t// Create a simple test node\n\ttestNode := node{\n\t\tn:  &types.Node{ID: 1},\n\t\tch: make(chan *tailcfg.MapResponse, 10),\n\t}\n\n\t// Start the enhanced tracking\n\ttestNode.start()\n\n\t// Create a simple MapResponse that should be parsed correctly\n\tresp := tailcfg.MapResponse{\n\t\tKeepAlive: false,\n\t\tPeers: []*tailcfg.Node{\n\t\t\t{ID: 2},\n\t\t\t{ID: 3},\n\t\t},\n\t}\n\n\t// Send the data to the node's channel\n\ttestNode.ch <- &resp\n\n\t// Wait for tracking goroutine to process the update\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tassert.GreaterOrEqual(c, atomic.LoadInt64(&testNode.updateCount), int64(1), \"should have processed the update\")\n\t}, time.Second, 10*time.Millisecond, \"waiting for update to be processed\")\n\n\t// Check stats\n\tstats := testNode.cleanup()\n\tt.Logf(\"Enhanced tracking stats: Total=%d, Full=%d, Patch=%d, MaxPeers=%d\",\n\t\tstats.TotalUpdates, stats.FullUpdates, stats.PatchUpdates, stats.MaxPeersSeen)\n\n\trequire.Equal(t, int64(1), stats.TotalUpdates, \"Expected 1 total update\")\n\trequire.Equal(t, int64(1), stats.FullUpdates, \"Expected 1 full update\")\n\trequire.Equal(t, 2, stats.MaxPeersSeen, \"Expected 2 max peers seen\")\n}\n\n// TestEnhancedTrackingWithBatcher verifies enhanced tracking works with a real batcher.\nfunc TestEnhancedTrackingWithBatcher(t *testing.T) {\n\tfor _, batcherFunc := range allBatcherFunctions {\n\t\tt.Run(batcherFunc.name, func(t *testing.T) {\n\t\t\t// Create test environment with 1 node\n\t\t\ttestData, cleanup := setupBatcherWithTestData(t, batcherFunc.fn, 1, 1, 10)\n\t\t\tdefer cleanup()\n\n\t\t\tbatcher := testData.Batcher\n\t\t\ttestNode := &testData.Nodes[0]\n\n\t\t\tt.Logf(\"Testing enhanced tracking with node ID %d\", testNode.n.ID)\n\n\t\t\t// Start enhanced tracking for the node\n\t\t\ttestNode.start()\n\n\t\t\t// Connect the node to the batcher\n\t\t\t_ = batcher.AddNode(testNode.n.ID, testNode.ch, tailcfg.CapabilityVersion(100), nil)\n\n\t\t\t// Wait for connection to be established\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tassert.True(c, batcher.IsConnected(testNode.n.ID), \"node should be connected\")\n\t\t\t}, time.Second, 10*time.Millisecond, \"waiting for node connection\")\n\n\t\t\t// Generate work and wait for updates to be processed\n\t\t\tbatcher.AddWork(change.FullUpdate())\n\t\t\tbatcher.AddWork(change.PolicyChange())\n\t\t\tbatcher.AddWork(change.DERPMap())\n\n\t\t\t// Wait for updates to be processed (at least 1 update received)\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tassert.GreaterOrEqual(c, atomic.LoadInt64(&testNode.updateCount), int64(1), \"should have received updates\")\n\t\t\t}, time.Second, 10*time.Millisecond, \"waiting for updates to be processed\")\n\n\t\t\t// Check stats\n\t\t\tstats := testNode.cleanup()\n\t\t\tt.Logf(\"Enhanced tracking with batcher: Total=%d, Full=%d, Patch=%d, MaxPeers=%d\",\n\t\t\t\tstats.TotalUpdates, stats.FullUpdates, stats.PatchUpdates, stats.MaxPeersSeen)\n\n\t\t\tif stats.TotalUpdates == 0 {\n\t\t\t\tt.Error(\n\t\t\t\t\t\"Enhanced tracking with batcher received 0 updates - batcher may not be working\",\n\t\t\t\t)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TestBatcherScalabilityAllToAll tests the batcher's ability to handle rapid node joins\n// and ensure all nodes can see all other nodes. This is a critical test for mesh network\n// functionality where every node must be able to communicate with every other node.\nfunc TestBatcherScalabilityAllToAll(t *testing.T) {\n\t// Reduce verbose application logging for cleaner test output\n\toriginalLevel := zerolog.GlobalLevel()\n\tdefer zerolog.SetGlobalLevel(originalLevel)\n\n\tzerolog.SetGlobalLevel(zerolog.ErrorLevel)\n\n\t// Test cases: different node counts to stress test the all-to-all connectivity\n\ttestCases := []struct {\n\t\tname      string\n\t\tnodeCount int\n\t}{\n\t\t{\"10_nodes\", 10},   // Quick baseline test\n\t\t{\"100_nodes\", 100}, // Full scalability test ~2 minutes\n\t\t// Large-scale tests commented out - uncomment for scalability testing\n\t\t// {\"1000_nodes\", 1000},  // ~12 minutes\n\t\t// {\"2000_nodes\", 2000},  // ~60+ minutes\n\t\t// {\"5000_nodes\", 5000},  // Not recommended - database bottleneck\n\t}\n\n\tfor _, batcherFunc := range allBatcherFunctions {\n\t\tt.Run(batcherFunc.name, func(t *testing.T) {\n\t\t\tfor _, tc := range testCases {\n\t\t\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\t\t\tt.Logf(\n\t\t\t\t\t\t\"ALL-TO-ALL TEST: %d nodes with %s batcher\",\n\t\t\t\t\t\ttc.nodeCount,\n\t\t\t\t\t\tbatcherFunc.name,\n\t\t\t\t\t)\n\n\t\t\t\t\t// Create test environment - all nodes from same user so they can be peers\n\t\t\t\t\t// We need enough users to support the node count (max 1000 nodes per user)\n\t\t\t\t\tusersNeeded := max(1, (tc.nodeCount+999)/1000)\n\t\t\t\t\tnodesPerUser := (tc.nodeCount + usersNeeded - 1) / usersNeeded\n\n\t\t\t\t\t// Use large buffer to avoid blocking during rapid joins\n\t\t\t\t\t// Buffer needs to handle nodeCount * average_updates_per_node\n\t\t\t\t\t// Estimate: each node receives ~2*nodeCount updates during all-to-all\n\t\t\t\t\t// For very large tests (>1000 nodes), limit buffer to avoid excessive memory\n\t\t\t\t\tbufferSize := max(1000, min(tc.nodeCount*2, 10000))\n\n\t\t\t\t\ttestData, cleanup := setupBatcherWithTestData(\n\t\t\t\t\t\tt,\n\t\t\t\t\t\tbatcherFunc.fn,\n\t\t\t\t\t\tusersNeeded,\n\t\t\t\t\t\tnodesPerUser,\n\t\t\t\t\t\tbufferSize,\n\t\t\t\t\t)\n\t\t\t\t\tdefer cleanup()\n\n\t\t\t\t\tbatcher := testData.Batcher\n\t\t\t\t\tallNodes := testData.Nodes[:tc.nodeCount] // Limit to requested count\n\n\t\t\t\t\tt.Logf(\n\t\t\t\t\t\t\"Created %d nodes across %d users, buffer size: %d\",\n\t\t\t\t\t\tlen(allNodes),\n\t\t\t\t\t\tusersNeeded,\n\t\t\t\t\t\tbufferSize,\n\t\t\t\t\t)\n\n\t\t\t\t\t// Start enhanced tracking for all nodes\n\t\t\t\t\tfor i := range allNodes {\n\t\t\t\t\t\tallNodes[i].start()\n\t\t\t\t\t}\n\n\t\t\t\t\t// Yield to allow tracking goroutines to start\n\t\t\t\t\truntime.Gosched()\n\n\t\t\t\t\tstartTime := time.Now()\n\n\t\t\t\t\t// Join all nodes as fast as possible\n\t\t\t\t\tt.Logf(\"Joining %d nodes as fast as possible...\", len(allNodes))\n\n\t\t\t\t\tfor i := range allNodes {\n\t\t\t\t\t\tnode := &allNodes[i]\n\t\t\t\t\t\t_ = batcher.AddNode(node.n.ID, node.ch, tailcfg.CapabilityVersion(100), nil)\n\n\t\t\t\t\t\t// Issue full update after each join to ensure connectivity\n\t\t\t\t\t\tbatcher.AddWork(change.FullUpdate())\n\n\t\t\t\t\t\t// Yield to scheduler for large node counts to prevent overwhelming the work queue\n\t\t\t\t\t\tif tc.nodeCount > 100 && i%50 == 49 {\n\t\t\t\t\t\t\truntime.Gosched()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tjoinTime := time.Since(startTime)\n\t\t\t\t\tt.Logf(\"All nodes joined in %v, waiting for full connectivity...\", joinTime)\n\n\t\t\t\t\t// Wait for all updates to propagate until all nodes achieve connectivity\n\t\t\t\t\texpectedPeers := tc.nodeCount - 1 // Each node should see all others except itself\n\n\t\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t\tconnectedCount := 0\n\n\t\t\t\t\t\tfor i := range allNodes {\n\t\t\t\t\t\t\tnode := &allNodes[i]\n\n\t\t\t\t\t\t\tcurrentMaxPeers := int(node.maxPeersCount.Load())\n\t\t\t\t\t\t\tif currentMaxPeers >= expectedPeers {\n\t\t\t\t\t\t\t\tconnectedCount++\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tprogress := float64(connectedCount) / float64(len(allNodes)) * 100\n\t\t\t\t\t\tt.Logf(\"Progress: %d/%d nodes (%.1f%%) have seen %d+ peers\",\n\t\t\t\t\t\t\tconnectedCount, len(allNodes), progress, expectedPeers)\n\n\t\t\t\t\t\tassert.Equal(c, len(allNodes), connectedCount, \"all nodes should achieve full connectivity\")\n\t\t\t\t\t}, 5*time.Minute, 5*time.Second, \"waiting for full connectivity\")\n\n\t\t\t\t\tt.Logf(\"All nodes achieved full connectivity\")\n\n\t\t\t\t\ttotalTime := time.Since(startTime)\n\n\t\t\t\t\t// Disconnect all nodes\n\t\t\t\t\tfor i := range allNodes {\n\t\t\t\t\t\tnode := &allNodes[i]\n\t\t\t\t\t\tbatcher.RemoveNode(node.n.ID, node.ch)\n\t\t\t\t\t}\n\n\t\t\t\t\t// Wait for all nodes to be disconnected\n\t\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t\tfor i := range allNodes {\n\t\t\t\t\t\t\tassert.False(c, batcher.IsConnected(allNodes[i].n.ID), \"node should be disconnected\")\n\t\t\t\t\t\t}\n\t\t\t\t\t}, 5*time.Second, 50*time.Millisecond, \"waiting for nodes to disconnect\")\n\n\t\t\t\t\t// Collect final statistics\n\t\t\t\t\ttotalUpdates := int64(0)\n\t\t\t\t\ttotalFull := int64(0)\n\t\t\t\t\tmaxPeersGlobal := 0\n\t\t\t\t\tminPeersSeen := tc.nodeCount\n\t\t\t\t\tsuccessfulNodes := 0\n\n\t\t\t\t\tnodeDetails := make([]string, 0, min(10, len(allNodes)))\n\n\t\t\t\t\tfor i := range allNodes {\n\t\t\t\t\t\tnode := &allNodes[i]\n\t\t\t\t\t\tstats := node.cleanup()\n\n\t\t\t\t\t\ttotalUpdates += stats.TotalUpdates\n\t\t\t\t\t\ttotalFull += stats.FullUpdates\n\n\t\t\t\t\t\tif stats.MaxPeersSeen > maxPeersGlobal {\n\t\t\t\t\t\t\tmaxPeersGlobal = stats.MaxPeersSeen\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif stats.MaxPeersSeen < minPeersSeen {\n\t\t\t\t\t\t\tminPeersSeen = stats.MaxPeersSeen\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif stats.MaxPeersSeen >= expectedPeers {\n\t\t\t\t\t\t\tsuccessfulNodes++\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// Collect details for first few nodes or failing nodes\n\t\t\t\t\t\tif len(nodeDetails) < 10 || stats.MaxPeersSeen < expectedPeers {\n\t\t\t\t\t\t\tnodeDetails = append(nodeDetails,\n\t\t\t\t\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\t\t\t\t\"Node %d: %d updates (%d full), max %d peers\",\n\t\t\t\t\t\t\t\t\tnode.n.ID,\n\t\t\t\t\t\t\t\t\tstats.TotalUpdates,\n\t\t\t\t\t\t\t\t\tstats.FullUpdates,\n\t\t\t\t\t\t\t\t\tstats.MaxPeersSeen,\n\t\t\t\t\t\t\t\t))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// Final results\n\t\t\t\t\tt.Logf(\"ALL-TO-ALL RESULTS: %d nodes, %d total updates (%d full)\",\n\t\t\t\t\t\tlen(allNodes), totalUpdates, totalFull)\n\t\t\t\t\tt.Logf(\n\t\t\t\t\t\t\"  Connectivity: %d/%d nodes successful (%.1f%%)\",\n\t\t\t\t\t\tsuccessfulNodes,\n\t\t\t\t\t\tlen(allNodes),\n\t\t\t\t\t\tfloat64(successfulNodes)/float64(len(allNodes))*100,\n\t\t\t\t\t)\n\t\t\t\t\tt.Logf(\"  Peers seen: min=%d, max=%d, expected=%d\",\n\t\t\t\t\t\tminPeersSeen, maxPeersGlobal, expectedPeers)\n\t\t\t\t\tt.Logf(\"  Timing: join=%v, total=%v\", joinTime, totalTime)\n\n\t\t\t\t\t// Show sample of node details\n\t\t\t\t\tif len(nodeDetails) > 0 {\n\t\t\t\t\t\tt.Logf(\"  Node sample:\")\n\n\t\t\t\t\t\tfor _, detail := range nodeDetails[:min(5, len(nodeDetails))] {\n\t\t\t\t\t\t\tt.Logf(\"    %s\", detail)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif len(nodeDetails) > 5 {\n\t\t\t\t\t\t\tt.Logf(\"    ... (%d more nodes)\", len(nodeDetails)-5)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// Final verification: Since we waited until all nodes achieved connectivity,\n\t\t\t\t\t// this should always pass, but we verify the final state for completeness\n\t\t\t\t\tif successfulNodes == len(allNodes) {\n\t\t\t\t\t\tt.Logf(\n\t\t\t\t\t\t\t\"PASS: All-to-all connectivity achieved for %d nodes\",\n\t\t\t\t\t\t\tlen(allNodes),\n\t\t\t\t\t\t)\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// This should not happen since we loop until success, but handle it just in case\n\t\t\t\t\t\tfailedNodes := len(allNodes) - successfulNodes\n\t\t\t\t\t\tt.Errorf(\"UNEXPECTED: %d/%d nodes still failed after waiting for connectivity (expected %d, some saw %d-%d)\",\n\t\t\t\t\t\t\tfailedNodes, len(allNodes), expectedPeers, minPeersSeen, maxPeersGlobal)\n\n\t\t\t\t\t\t// Show details of failed nodes for debugging\n\t\t\t\t\t\tif len(nodeDetails) > 5 {\n\t\t\t\t\t\t\tt.Logf(\"Failed nodes details:\")\n\n\t\t\t\t\t\t\tfor _, detail := range nodeDetails[5:] {\n\t\t\t\t\t\t\t\tif !strings.Contains(detail, fmt.Sprintf(\"max %d peers\", expectedPeers)) {\n\t\t\t\t\t\t\t\t\tt.Logf(\"  %s\", detail)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TestBatcherBasicOperations verifies core batcher functionality by testing\n// the basic lifecycle of adding nodes, processing updates, and removing nodes.\n//\n// Enhanced with real database test data, this test creates a registered node\n// and tests both DERP updates and full node updates. It validates the fundamental\n// add/remove operations and basic work processing pipeline with actual update\n// content validation instead of just byte count checks.\nfunc TestBatcherBasicOperations(t *testing.T) {\n\tfor _, batcherFunc := range allBatcherFunctions {\n\t\tt.Run(batcherFunc.name, func(t *testing.T) {\n\t\t\t// Create test environment with real database and nodes\n\t\t\ttestData, cleanup := setupBatcherWithTestData(t, batcherFunc.fn, 1, 2, 8)\n\t\t\tdefer cleanup()\n\n\t\t\tbatcher := testData.Batcher\n\t\t\ttn := &testData.Nodes[0]\n\t\t\ttn2 := &testData.Nodes[1]\n\n\t\t\t// Test AddNode with real node ID\n\t\t\t_ = batcher.AddNode(tn.n.ID, tn.ch, 100, nil)\n\n\t\t\tif !batcher.IsConnected(tn.n.ID) {\n\t\t\t\tt.Error(\"Node should be connected after AddNode\")\n\t\t\t}\n\n\t\t\t// Test work processing with DERP change\n\t\t\tbatcher.AddWork(change.DERPMap())\n\n\t\t\t// Wait for update and validate content\n\t\t\tselect {\n\t\t\tcase data := <-tn.ch:\n\t\t\t\tassertDERPMapResponse(t, data)\n\t\t\tcase <-time.After(200 * time.Millisecond):\n\t\t\t\tt.Error(\"Did not receive expected DERP update\")\n\t\t\t}\n\n\t\t\t// Drain any initial messages from first node\n\t\t\tdrainChannelTimeout(tn.ch, 100*time.Millisecond)\n\n\t\t\t// Add the second node and verify update message\n\t\t\t_ = batcher.AddNode(tn2.n.ID, tn2.ch, 100, nil)\n\t\t\tassert.True(t, batcher.IsConnected(tn2.n.ID))\n\n\t\t\t// First node should get an update that second node has connected.\n\t\t\tselect {\n\t\t\tcase data := <-tn.ch:\n\t\t\t\tassertOnlineMapResponse(t, data, true)\n\t\t\tcase <-time.After(500 * time.Millisecond):\n\t\t\t\tt.Error(\"Did not receive expected Online response update\")\n\t\t\t}\n\n\t\t\t// Second node should receive its initial full map\n\t\t\tselect {\n\t\t\tcase data := <-tn2.ch:\n\t\t\t\t// Verify it's a full map response\n\t\t\t\tassert.NotNil(t, data)\n\t\t\t\tassert.True(\n\t\t\t\t\tt,\n\t\t\t\t\tlen(data.Peers) >= 1 || data.Node != nil,\n\t\t\t\t\t\"Should receive initial full map\",\n\t\t\t\t)\n\t\t\tcase <-time.After(500 * time.Millisecond):\n\t\t\t\tt.Error(\"Second node should receive its initial full map\")\n\t\t\t}\n\n\t\t\t// Disconnect the second node\n\t\t\tbatcher.RemoveNode(tn2.n.ID, tn2.ch)\n\t\t\t// Note: IsConnected may return true during grace period for DNS resolution\n\n\t\t\t// First node should get update that second has disconnected.\n\t\t\tselect {\n\t\t\tcase data := <-tn.ch:\n\t\t\t\tassertOnlineMapResponse(t, data, false)\n\t\t\tcase <-time.After(500 * time.Millisecond):\n\t\t\t\tt.Error(\"Did not receive expected Online response update\")\n\t\t\t}\n\n\t\t\t// // Test node-specific update with real node data\n\t\t\t// batcher.AddWork(change.NodeKeyChanged(tn.n.ID))\n\n\t\t\t// // Wait for node update (may be empty for certain node changes)\n\t\t\t// select {\n\t\t\t// case data := <-tn.ch:\n\t\t\t// \tt.Logf(\"Received node update: %d bytes\", len(data))\n\t\t\t// \tif len(data) == 0 {\n\t\t\t// \t\tt.Logf(\"Empty node update (expected for some node changes in test environment)\")\n\t\t\t// \t} else {\n\t\t\t// \t\tif valid, updateType := validateUpdateContent(data); !valid {\n\t\t\t// \t\t\tt.Errorf(\"Invalid node update content: %s\", updateType)\n\t\t\t// \t\t} else {\n\t\t\t// \t\t\tt.Logf(\"Valid node update type: %s\", updateType)\n\t\t\t// \t\t}\n\t\t\t// \t}\n\t\t\t// case <-time.After(200 * time.Millisecond):\n\t\t\t// \t// Node changes might not always generate updates in test environment\n\t\t\t// \tt.Logf(\"No node update received (may be expected in test environment)\")\n\t\t\t// }\n\n\t\t\t// Test RemoveNode\n\t\t\tbatcher.RemoveNode(tn.n.ID, tn.ch)\n\t\t\t// Note: IsConnected may return true during grace period for DNS resolution\n\t\t\t// The node is actually removed from active connections but grace period allows DNS lookups\n\t\t})\n\t}\n}\n\nfunc drainChannelTimeout(ch <-chan *tailcfg.MapResponse, timeout time.Duration) {\n\ttimer := time.NewTimer(timeout)\n\tdefer timer.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ch:\n\t\t\t// Drain message\n\t\tcase <-timer.C:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// TestBatcherUpdateTypes tests different types of updates and verifies\n// that the batcher correctly processes them based on their content.\n//\n// Enhanced with real database test data, this test creates registered nodes\n// and tests various update types including DERP changes, node-specific changes,\n// and full updates. This validates the change classification logic and ensures\n// different update types are handled appropriately with actual node data.\n// func TestBatcherUpdateTypes(t *testing.T) {\n// \tfor _, batcherFunc := range allBatcherFunctions {\n// \t\tt.Run(batcherFunc.name, func(t *testing.T) {\n// \t\t\t// Create test environment with real database and nodes\n// \t\t\ttestData, cleanup := setupBatcherWithTestData(t, batcherFunc.fn, 1, 2, 8)\n// \t\t\tdefer cleanup()\n\n// \t\t\tbatcher := testData.Batcher\n// \t\t\ttestNodes := testData.Nodes\n\n// \t\t\tch := make(chan *tailcfg.MapResponse, 10)\n// \t\t\t// Use real node ID from test data\n// \t\t\tbatcher.AddNode(testNodes[0].n.ID, ch, false, \"zstd\", tailcfg.CapabilityVersion(100))\n\n// \t\t\ttests := []struct {\n// \t\t\t\tname        string\n// \t\t\t\tchangeSet   change.ChangeSet\n// \t\t\t\texpectData  bool // whether we expect to receive data\n// \t\t\t\tdescription string\n// \t\t\t}{\n// \t\t\t\t{\n// \t\t\t\t\tname:        \"DERP change\",\n// \t\t\t\t\tchangeSet:   change.DERPMapResponse(),\n// \t\t\t\t\texpectData:  true,\n// \t\t\t\t\tdescription: \"DERP changes should generate map updates\",\n// \t\t\t\t},\n// \t\t\t\t{\n// \t\t\t\t\tname:        \"Node key expiry\",\n// \t\t\t\t\tchangeSet:   change.KeyExpiryFor(testNodes[1].n.ID),\n// \t\t\t\t\texpectData:  true,\n// \t\t\t\t\tdescription: \"Node key expiry with real node data\",\n// \t\t\t\t},\n// \t\t\t\t{\n// \t\t\t\t\tname:        \"Node new registration\",\n// \t\t\t\t\tchangeSet:   change.NodeAddedResponse(testNodes[1].n.ID),\n// \t\t\t\t\texpectData:  true,\n// \t\t\t\t\tdescription: \"New node registration with real data\",\n// \t\t\t\t},\n// \t\t\t\t{\n// \t\t\t\t\tname:        \"Full update\",\n// \t\t\t\t\tchangeSet:   change.FullUpdateResponse(),\n// \t\t\t\t\texpectData:  true,\n// \t\t\t\t\tdescription: \"Full updates with real node data\",\n// \t\t\t\t},\n// \t\t\t\t{\n// \t\t\t\t\tname:        \"Policy change\",\n// \t\t\t\t\tchangeSet:   change.PolicyChangeResponse(),\n// \t\t\t\t\texpectData:  true,\n// \t\t\t\t\tdescription: \"Policy updates with real node data\",\n// \t\t\t\t},\n// \t\t\t}\n\n// \t\t\tfor _, tt := range tests {\n// \t\t\t\tt.Run(tt.name, func(t *testing.T) {\n// \t\t\t\t\tt.Logf(\"Testing: %s\", tt.description)\n\n// \t\t\t\t\t// Clear any existing updates\n// \t\t\t\t\tselect {\n// \t\t\t\t\tcase <-ch:\n// \t\t\t\t\tdefault:\n// \t\t\t\t\t}\n\n// \t\t\t\t\tbatcher.AddWork(tt.changeSet)\n\n// \t\t\t\t\tselect {\n// \t\t\t\t\tcase data := <-ch:\n// \t\t\t\t\t\tif !tt.expectData {\n// \t\t\t\t\t\t\tt.Errorf(\"Unexpected update for %s: %d bytes\", tt.name, len(data))\n// \t\t\t\t\t\t} else {\n// \t\t\t\t\t\t\tt.Logf(\"%s: received %d bytes\", tt.name, len(data))\n\n// \t\t\t\t\t\t\t// Validate update content when we have data\n// \t\t\t\t\t\t\tif len(data) > 0 {\n// \t\t\t\t\t\t\t\tif valid, updateType := validateUpdateContent(data); !valid {\n// \t\t\t\t\t\t\t\t\tt.Errorf(\"Invalid update content for %s: %s\", tt.name, updateType)\n// \t\t\t\t\t\t\t\t} else {\n// \t\t\t\t\t\t\t\t\tt.Logf(\"%s: valid update type: %s\", tt.name, updateType)\n// \t\t\t\t\t\t\t\t}\n// \t\t\t\t\t\t\t} else {\n// \t\t\t\t\t\t\t\tt.Logf(\"%s: empty update (may be expected for some node changes)\", tt.name)\n// \t\t\t\t\t\t\t}\n// \t\t\t\t\t\t}\n// \t\t\t\t\tcase <-time.After(100 * time.Millisecond):\n// \t\t\t\t\t\tif tt.expectData {\n// \t\t\t\t\t\t\tt.Errorf(\"Expected update for %s (%s) but none received\", tt.name, tt.description)\n// \t\t\t\t\t\t} else {\n// \t\t\t\t\t\t\tt.Logf(\"%s: no update (expected)\", tt.name)\n// \t\t\t\t\t\t}\n// \t\t\t\t\t}\n// \t\t\t\t})\n// \t\t\t}\n// \t\t})\n// \t}\n// }\n\n// TestBatcherWorkQueueBatching tests that multiple changes get batched\n// together and sent as a single update to reduce network overhead.\n//\n// Enhanced with real database test data, this test creates registered nodes\n// and rapidly submits multiple types of changes including DERP updates and\n// node changes. Due to the batching mechanism with BatchChangeDelay, these\n// should be combined into fewer updates. This validates that the batching\n// system works correctly with real node data and mixed change types.\nfunc TestBatcherWorkQueueBatching(t *testing.T) {\n\tfor _, batcherFunc := range allBatcherFunctions {\n\t\tt.Run(batcherFunc.name, func(t *testing.T) {\n\t\t\t// Create test environment with real database and nodes\n\t\t\ttestData, cleanup := setupBatcherWithTestData(t, batcherFunc.fn, 1, 2, 8)\n\t\t\tdefer cleanup()\n\n\t\t\tbatcher := testData.Batcher\n\t\t\ttestNodes := testData.Nodes\n\n\t\t\tch := make(chan *tailcfg.MapResponse, 10)\n\t\t\t_ = batcher.AddNode(testNodes[0].n.ID, ch, tailcfg.CapabilityVersion(100), nil)\n\n\t\t\t// Track update content for validation\n\t\t\tvar receivedUpdates []*tailcfg.MapResponse\n\n\t\t\t// Add multiple changes rapidly to test batching\n\t\t\tbatcher.AddWork(change.DERPMap())\n\t\t\t// Use a valid expiry time for testing since test nodes don't have expiry set\n\t\t\ttestExpiry := time.Now().Add(24 * time.Hour)\n\t\t\tbatcher.AddWork(change.KeyExpiryFor(testNodes[1].n.ID, testExpiry))\n\t\t\tbatcher.AddWork(change.DERPMap())\n\t\t\tbatcher.AddWork(change.NodeAdded(testNodes[1].n.ID))\n\t\t\tbatcher.AddWork(change.DERPMap())\n\n\t\t\t// Collect updates with timeout\n\t\t\tupdateCount := 0\n\t\t\ttimeout := time.After(200 * time.Millisecond)\n\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase data := <-ch:\n\t\t\t\t\tupdateCount++\n\n\t\t\t\t\treceivedUpdates = append(receivedUpdates, data)\n\n\t\t\t\t\t// Validate update content\n\t\t\t\t\tif data != nil {\n\t\t\t\t\t\tif valid, reason := validateUpdateContent(data); valid {\n\t\t\t\t\t\t\tt.Logf(\"Update %d: valid\", updateCount)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tt.Logf(\"Update %d: invalid: %s\", updateCount, reason)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tt.Logf(\"Update %d: nil update\", updateCount)\n\t\t\t\t\t}\n\t\t\t\tcase <-timeout:\n\t\t\t\t\t// Expected: 5 explicit changes + 1 initial from AddNode + 1 NodeOnline from wrapper = 7 updates\n\t\t\t\t\texpectedUpdates := 7\n\t\t\t\t\tt.Logf(\"Received %d updates from %d changes (expected %d)\",\n\t\t\t\t\t\tupdateCount, 5, expectedUpdates)\n\n\t\t\t\t\tif updateCount != expectedUpdates {\n\t\t\t\t\t\tt.Errorf(\n\t\t\t\t\t\t\t\"Expected %d updates but received %d\",\n\t\t\t\t\t\t\texpectedUpdates,\n\t\t\t\t\t\t\tupdateCount,\n\t\t\t\t\t\t)\n\t\t\t\t\t}\n\n\t\t\t\t\t// Validate that all updates have valid content\n\t\t\t\t\tvalidUpdates := 0\n\n\t\t\t\t\tfor _, data := range receivedUpdates {\n\t\t\t\t\t\tif data != nil {\n\t\t\t\t\t\t\tif valid, _ := validateUpdateContent(data); valid {\n\t\t\t\t\t\t\t\tvalidUpdates++\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif validUpdates != updateCount {\n\t\t\t\t\t\tt.Errorf(\"Expected all %d updates to be valid, but only %d were valid\",\n\t\t\t\t\t\t\tupdateCount, validUpdates)\n\t\t\t\t\t}\n\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TestBatcherWorkerChannelSafety tests that worker goroutines handle closed\n// channels safely without panicking when processing work items.\n//\n// Enhanced with real database test data, this test creates rapid connect/disconnect\n// cycles using registered nodes while simultaneously queuing real work items.\n// This creates a race where workers might try to send to channels that have been\n// closed by node removal. The test validates that the safeSend() method properly\n// handles closed channels with real update workloads.\nfunc TestBatcherWorkerChannelSafety(t *testing.T) {\n\tfor _, batcherFunc := range allBatcherFunctions {\n\t\tt.Run(batcherFunc.name, func(t *testing.T) {\n\t\t\t// Create test environment with real database and nodes\n\t\t\ttestData, cleanup := setupBatcherWithTestData(t, batcherFunc.fn, 1, 1, 8)\n\t\t\tdefer cleanup()\n\n\t\t\tbatcher := testData.Batcher\n\t\t\ttestNode := &testData.Nodes[0]\n\n\t\t\tvar (\n\t\t\t\tpanics        int\n\t\t\t\tchannelErrors int\n\t\t\t\tinvalidData   int\n\t\t\t\tmutex         sync.Mutex\n\t\t\t)\n\n\t\t\t// Test rapid connect/disconnect with work generation\n\n\t\t\tfor i := range 50 {\n\t\t\t\tfunc() {\n\t\t\t\t\tdefer func() {\n\t\t\t\t\t\tif r := recover(); r != nil {\n\t\t\t\t\t\t\tmutex.Lock()\n\n\t\t\t\t\t\t\tpanics++\n\n\t\t\t\t\t\t\tmutex.Unlock()\n\t\t\t\t\t\t\tt.Logf(\"Panic caught: %v\", r)\n\t\t\t\t\t\t}\n\t\t\t\t\t}()\n\n\t\t\t\t\tch := make(chan *tailcfg.MapResponse, 5)\n\n\t\t\t\t\t// Add node and immediately queue real work\n\t\t\t\t\t_ = batcher.AddNode(testNode.n.ID, ch, tailcfg.CapabilityVersion(100), nil)\n\t\t\t\t\tbatcher.AddWork(change.DERPMap())\n\n\t\t\t\t\t// Consumer goroutine to validate data and detect channel issues\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tdefer func() {\n\t\t\t\t\t\t\tif r := recover(); r != nil {\n\t\t\t\t\t\t\t\tmutex.Lock()\n\n\t\t\t\t\t\t\t\tchannelErrors++\n\n\t\t\t\t\t\t\t\tmutex.Unlock()\n\t\t\t\t\t\t\t\tt.Logf(\"Channel consumer panic: %v\", r)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}()\n\n\t\t\t\t\t\tfor {\n\t\t\t\t\t\t\tselect {\n\t\t\t\t\t\t\tcase data, ok := <-ch:\n\t\t\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\t\t\t// Channel was closed, which is expected\n\t\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t// Validate the data we received\n\t\t\t\t\t\t\t\tif valid, reason := validateUpdateContent(data); !valid {\n\t\t\t\t\t\t\t\t\tmutex.Lock()\n\n\t\t\t\t\t\t\t\t\tinvalidData++\n\n\t\t\t\t\t\t\t\t\tmutex.Unlock()\n\t\t\t\t\t\t\t\t\tt.Logf(\"Invalid data received: %s\", reason)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcase <-time.After(10 * time.Millisecond):\n\t\t\t\t\t\t\t\t// Timeout waiting for data\n\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}()\n\n\t\t\t\t\t// Add node-specific work occasionally\n\t\t\t\t\tif i%10 == 0 {\n\t\t\t\t\t\t// Use a valid expiry time for testing since test nodes don't have expiry set\n\t\t\t\t\t\ttestExpiry := time.Now().Add(24 * time.Hour)\n\t\t\t\t\t\tbatcher.AddWork(change.KeyExpiryFor(testNode.n.ID, testExpiry))\n\t\t\t\t\t}\n\n\t\t\t\t\t// Rapid removal creates race between worker and removal\n\t\t\t\t\tfor range i % 3 {\n\t\t\t\t\t\truntime.Gosched() // Introduce timing variability\n\t\t\t\t\t}\n\n\t\t\t\t\tbatcher.RemoveNode(testNode.n.ID, ch)\n\n\t\t\t\t\t// Yield to allow workers to process and close channels\n\t\t\t\t\truntime.Gosched()\n\t\t\t\t}()\n\t\t\t}\n\n\t\t\tmutex.Lock()\n\t\t\tdefer mutex.Unlock()\n\n\t\t\tt.Logf(\n\t\t\t\t\"Worker safety test results: %d panics, %d channel errors, %d invalid data packets\",\n\t\t\t\tpanics,\n\t\t\t\tchannelErrors,\n\t\t\t\tinvalidData,\n\t\t\t)\n\n\t\t\t// Test failure conditions\n\t\t\tif panics > 0 {\n\t\t\t\tt.Errorf(\"Worker channel safety failed with %d panics\", panics)\n\t\t\t}\n\n\t\t\tif channelErrors > 0 {\n\t\t\t\tt.Errorf(\"Channel handling failed with %d channel errors\", channelErrors)\n\t\t\t}\n\n\t\t\tif invalidData > 0 {\n\t\t\t\tt.Errorf(\"Data validation failed with %d invalid data packets\", invalidData)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TestBatcherConcurrentClients tests that concurrent connection lifecycle changes\n// don't affect other stable clients' ability to receive updates.\n//\n// The test sets up real test data with multiple users and registered nodes,\n// then creates stable clients and churning clients that rapidly connect and\n// disconnect. Work is generated continuously during these connection churn cycles using\n// real node data. The test validates that stable clients continue to function\n// normally and receive proper updates despite the connection churn from other clients,\n// ensuring system stability under concurrent load.\n//\n//nolint:gocyclo // complex concurrent test scenario\nfunc TestBatcherConcurrentClients(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping concurrent client test in short mode\")\n\t}\n\n\tfor _, batcherFunc := range allBatcherFunctions {\n\t\tt.Run(batcherFunc.name, func(t *testing.T) {\n\t\t\t// Create comprehensive test environment with real data\n\t\t\ttestData, cleanup := setupBatcherWithTestData(\n\t\t\t\tt,\n\t\t\t\tbatcherFunc.fn,\n\t\t\t\ttestUserCount,\n\t\t\t\ttestNodesPerUser,\n\t\t\t\t8,\n\t\t\t)\n\t\t\tdefer cleanup()\n\n\t\t\tbatcher := testData.Batcher\n\t\t\tallNodes := testData.Nodes\n\n\t\t\t// Create update tracker for monitoring all updates\n\t\t\ttracker := newUpdateTracker()\n\n\t\t\t// Set up stable clients using real node IDs\n\t\t\tstableNodes := allNodes[:len(allNodes)/2] // Use first half as stable\n\t\t\tstableChannels := make(map[types.NodeID]chan *tailcfg.MapResponse)\n\n\t\t\tfor i := range stableNodes {\n\t\t\t\tnode := &stableNodes[i]\n\t\t\t\tch := make(chan *tailcfg.MapResponse, normalBufferSize)\n\t\t\t\tstableChannels[node.n.ID] = ch\n\t\t\t\t_ = batcher.AddNode(node.n.ID, ch, tailcfg.CapabilityVersion(100), nil)\n\n\t\t\t\t// Monitor updates for each stable client\n\t\t\t\tgo func(nodeID types.NodeID, channel chan *tailcfg.MapResponse) {\n\t\t\t\t\tfor {\n\t\t\t\t\t\tselect {\n\t\t\t\t\t\tcase data, ok := <-channel:\n\t\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\t\t// Channel was closed, exit gracefully\n\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif valid, reason := validateUpdateContent(data); valid {\n\t\t\t\t\t\t\t\ttracker.recordUpdate(\n\t\t\t\t\t\t\t\t\tnodeID,\n\t\t\t\t\t\t\t\t\t1,\n\t\t\t\t\t\t\t\t) // Use 1 as update size since we have MapResponse\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tt.Errorf(\"Invalid update received for stable node %d: %s\", nodeID, reason)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\tcase <-time.After(testTimeout):\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}(node.n.ID, ch)\n\t\t\t}\n\n\t\t\t// Use remaining nodes for connection churn testing\n\t\t\tchurningNodes := allNodes[len(allNodes)/2:]\n\t\t\tchurningChannels := make(map[types.NodeID]chan *tailcfg.MapResponse)\n\n\t\t\tvar churningChannelsMutex sync.Mutex // Protect concurrent map access\n\n\t\t\tvar wg sync.WaitGroup\n\n\t\t\tnumCycles := 10 // Reduced for simpler test\n\t\t\tpanicCount := 0\n\n\t\t\tvar panicMutex sync.Mutex\n\n\t\t\t// Track deadlock with timeout\n\t\t\tdone := make(chan struct{})\n\n\t\t\tgo func() {\n\t\t\t\tdefer close(done)\n\n\t\t\t\t// Connection churn cycles - rapidly connect/disconnect to test concurrency safety\n\t\t\t\tfor i := range numCycles {\n\t\t\t\t\tfor j := range churningNodes {\n\t\t\t\t\t\tnode := &churningNodes[j]\n\n\t\t\t\t\t\twg.Add(2)\n\n\t\t\t\t\t\t// Connect churning node\n\t\t\t\t\t\tgo func(nodeID types.NodeID) {\n\t\t\t\t\t\t\tdefer func() {\n\t\t\t\t\t\t\t\tif r := recover(); r != nil {\n\t\t\t\t\t\t\t\t\tpanicMutex.Lock()\n\n\t\t\t\t\t\t\t\t\tpanicCount++\n\n\t\t\t\t\t\t\t\t\tpanicMutex.Unlock()\n\t\t\t\t\t\t\t\t\tt.Logf(\"Panic in churning connect: %v\", r)\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\twg.Done()\n\t\t\t\t\t\t\t}()\n\n\t\t\t\t\t\t\tch := make(chan *tailcfg.MapResponse, smallBufferSize)\n\n\t\t\t\t\t\t\tchurningChannelsMutex.Lock()\n\n\t\t\t\t\t\t\tchurningChannels[nodeID] = ch\n\n\t\t\t\t\t\t\tchurningChannelsMutex.Unlock()\n\n\t\t\t\t\t\t\t_ = batcher.AddNode(nodeID, ch, tailcfg.CapabilityVersion(100), nil)\n\n\t\t\t\t\t\t\t// Consume updates to prevent blocking\n\t\t\t\t\t\t\tgo func() {\n\t\t\t\t\t\t\t\tfor {\n\t\t\t\t\t\t\t\t\tselect {\n\t\t\t\t\t\t\t\t\tcase data, ok := <-ch:\n\t\t\t\t\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\t\t\t\t\t// Channel was closed, exit gracefully\n\t\t\t\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\t\tif valid, _ := validateUpdateContent(data); valid {\n\t\t\t\t\t\t\t\t\t\t\ttracker.recordUpdate(\n\t\t\t\t\t\t\t\t\t\t\t\tnodeID,\n\t\t\t\t\t\t\t\t\t\t\t\t1,\n\t\t\t\t\t\t\t\t\t\t\t) // Use 1 as update size since we have MapResponse\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\tcase <-time.After(500 * time.Millisecond):\n\t\t\t\t\t\t\t\t\t\t// Longer timeout to prevent premature exit during heavy load\n\t\t\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}()\n\t\t\t\t\t\t}(node.n.ID)\n\n\t\t\t\t\t\t// Disconnect churning node\n\t\t\t\t\t\tgo func(nodeID types.NodeID) {\n\t\t\t\t\t\t\tdefer func() {\n\t\t\t\t\t\t\t\tif r := recover(); r != nil {\n\t\t\t\t\t\t\t\t\tpanicMutex.Lock()\n\n\t\t\t\t\t\t\t\t\tpanicCount++\n\n\t\t\t\t\t\t\t\t\tpanicMutex.Unlock()\n\t\t\t\t\t\t\t\t\tt.Logf(\"Panic in churning disconnect: %v\", r)\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\twg.Done()\n\t\t\t\t\t\t\t}()\n\n\t\t\t\t\t\t\tfor range i % 5 {\n\t\t\t\t\t\t\t\truntime.Gosched() // Introduce timing variability\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tchurningChannelsMutex.Lock()\n\n\t\t\t\t\t\t\tch, exists := churningChannels[nodeID]\n\n\t\t\t\t\t\t\tchurningChannelsMutex.Unlock()\n\n\t\t\t\t\t\t\tif exists {\n\t\t\t\t\t\t\t\tbatcher.RemoveNode(nodeID, ch)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}(node.n.ID)\n\t\t\t\t\t}\n\n\t\t\t\t\t// Generate various types of work during racing\n\t\t\t\t\tif i%3 == 0 {\n\t\t\t\t\t\t// DERP changes\n\t\t\t\t\t\tbatcher.AddWork(change.DERPMap())\n\t\t\t\t\t}\n\n\t\t\t\t\tif i%5 == 0 {\n\t\t\t\t\t\t// Full updates using real node data\n\t\t\t\t\t\tbatcher.AddWork(change.FullUpdate())\n\t\t\t\t\t}\n\n\t\t\t\t\tif i%7 == 0 && len(allNodes) > 0 {\n\t\t\t\t\t\t// Node-specific changes using real nodes\n\t\t\t\t\t\tnode := &allNodes[i%len(allNodes)]\n\t\t\t\t\t\t// Use a valid expiry time for testing since test nodes don't have expiry set\n\t\t\t\t\t\ttestExpiry := time.Now().Add(24 * time.Hour)\n\t\t\t\t\t\tbatcher.AddWork(change.KeyExpiryFor(node.n.ID, testExpiry))\n\t\t\t\t\t}\n\n\t\t\t\t\t// Yield to allow some batching\n\t\t\t\t\truntime.Gosched()\n\t\t\t\t}\n\n\t\t\t\twg.Wait()\n\t\t\t}()\n\n\t\t\t// Deadlock detection\n\t\t\tselect {\n\t\t\tcase <-done:\n\t\t\t\tt.Logf(\"Connection churn cycles completed successfully\")\n\t\t\tcase <-time.After(deadlockTimeout):\n\t\t\t\tt.Error(\"Test timed out - possible deadlock detected\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Yield to allow any in-flight updates to complete\n\t\t\truntime.Gosched()\n\n\t\t\t// Validate results\n\t\t\tpanicMutex.Lock()\n\n\t\t\tfinalPanicCount := panicCount\n\n\t\t\tpanicMutex.Unlock()\n\n\t\t\tallStats := tracker.getAllStats()\n\n\t\t\t// Calculate expected vs actual updates\n\t\t\tstableUpdateCount := 0\n\t\t\tchurningUpdateCount := 0\n\n\t\t\t// Count actual update sources to understand the pattern\n\t\t\t// Let's track what we observe rather than trying to predict\n\t\t\texpectedDerpUpdates := (numCycles + 2) / 3\n\t\t\texpectedFullUpdates := (numCycles + 4) / 5\n\t\t\texpectedKeyUpdates := (numCycles + 6) / 7\n\t\t\ttotalGeneratedWork := expectedDerpUpdates + expectedFullUpdates + expectedKeyUpdates\n\n\t\t\tt.Logf(\"Work generated: %d DERP + %d Full + %d KeyExpiry = %d total AddWork calls\",\n\t\t\t\texpectedDerpUpdates, expectedFullUpdates, expectedKeyUpdates, totalGeneratedWork)\n\n\t\t\tfor i := range stableNodes {\n\t\t\t\tnode := &stableNodes[i]\n\t\t\t\tif stats, exists := allStats[node.n.ID]; exists {\n\t\t\t\t\tstableUpdateCount += stats.TotalUpdates\n\t\t\t\t\tt.Logf(\"Stable node %d: %d updates\",\n\t\t\t\t\t\tnode.n.ID, stats.TotalUpdates)\n\t\t\t\t}\n\n\t\t\t\t// Verify stable clients are still connected\n\t\t\t\tif !batcher.IsConnected(node.n.ID) {\n\t\t\t\t\tt.Errorf(\"Stable node %d should still be connected\", node.n.ID)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor i := range churningNodes {\n\t\t\t\tnode := &churningNodes[i]\n\t\t\t\tif stats, exists := allStats[node.n.ID]; exists {\n\t\t\t\t\tchurningUpdateCount += stats.TotalUpdates\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tt.Logf(\"Total updates - Stable clients: %d, Churning clients: %d\",\n\t\t\t\tstableUpdateCount, churningUpdateCount)\n\t\t\tt.Logf(\n\t\t\t\t\"Average per stable client: %.1f updates\",\n\t\t\t\tfloat64(stableUpdateCount)/float64(len(stableNodes)),\n\t\t\t)\n\t\t\tt.Logf(\"Panics during test: %d\", finalPanicCount)\n\n\t\t\t// Validate test success criteria\n\t\t\tif finalPanicCount > 0 {\n\t\t\t\tt.Errorf(\"Test failed with %d panics\", finalPanicCount)\n\t\t\t}\n\n\t\t\t// Basic sanity check - stable clients should receive some updates\n\t\t\tif stableUpdateCount == 0 {\n\t\t\t\tt.Error(\"Stable clients received no updates - batcher may not be working\")\n\t\t\t}\n\n\t\t\t// Verify all stable clients are still functional\n\t\t\tfor i := range stableNodes {\n\t\t\t\tnode := &stableNodes[i]\n\t\t\t\tif !batcher.IsConnected(node.n.ID) {\n\t\t\t\t\tt.Errorf(\"Stable node %d lost connection during racing\", node.n.ID)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TestBatcherFullPeerUpdates verifies that when multiple nodes are connected\n// and we send a FullSet update, nodes receive the complete peer list.\nfunc TestBatcherFullPeerUpdates(t *testing.T) {\n\tfor _, batcherFunc := range allBatcherFunctions {\n\t\tt.Run(batcherFunc.name, func(t *testing.T) {\n\t\t\t// Create test environment with 3 nodes from same user (so they can be peers)\n\t\t\ttestData, cleanup := setupBatcherWithTestData(t, batcherFunc.fn, 1, 3, 10)\n\t\t\tdefer cleanup()\n\n\t\t\tbatcher := testData.Batcher\n\t\t\tallNodes := testData.Nodes\n\n\t\t\tt.Logf(\"Created %d nodes in database\", len(allNodes))\n\n\t\t\t// Connect nodes one at a time and wait for each to be connected\n\t\t\tfor i := range allNodes {\n\t\t\t\tnode := &allNodes[i]\n\t\t\t\t_ = batcher.AddNode(node.n.ID, node.ch, tailcfg.CapabilityVersion(100), nil)\n\t\t\t\tt.Logf(\"Connected node %d (ID: %d)\", i, node.n.ID)\n\n\t\t\t\t// Wait for node to be connected\n\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\tassert.True(c, batcher.IsConnected(node.n.ID), \"node should be connected\")\n\t\t\t\t}, time.Second, 10*time.Millisecond, \"waiting for node connection\")\n\t\t\t}\n\n\t\t\t// Wait for all NodeCameOnline events to be processed\n\t\t\tt.Logf(\"Waiting for NodeCameOnline events to settle...\")\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tfor i := range allNodes {\n\t\t\t\t\tassert.True(c, batcher.IsConnected(allNodes[i].n.ID), \"all nodes should be connected\")\n\t\t\t\t}\n\t\t\t}, 5*time.Second, 50*time.Millisecond, \"waiting for all nodes to connect\")\n\n\t\t\t// Check how many peers each node should see\n\t\t\tfor i := range allNodes {\n\t\t\t\tnode := &allNodes[i]\n\t\t\t\tpeers := testData.State.ListPeers(node.n.ID)\n\t\t\t\tt.Logf(\"Node %d should see %d peers from state\", i, peers.Len())\n\t\t\t}\n\n\t\t\t// Send a full update - this should generate full peer lists\n\t\t\tt.Logf(\"Sending FullSet update...\")\n\t\t\tbatcher.AddWork(change.FullUpdate())\n\n\t\t\t// Wait for FullSet work items to be processed\n\t\t\tt.Logf(\"Waiting for FullSet to be processed...\")\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t// Check that some data is available in at least one channel\n\t\t\t\tfound := false\n\n\t\t\t\tfor i := range allNodes {\n\t\t\t\t\tif len(allNodes[i].ch) > 0 {\n\t\t\t\t\t\tfound = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tassert.True(c, found, \"no updates received yet\")\n\t\t\t}, 5*time.Second, 50*time.Millisecond, \"waiting for FullSet updates\")\n\n\t\t\t// Check what each node receives - read multiple updates\n\t\t\ttotalUpdates := 0\n\t\t\tfoundFullUpdate := false\n\n\t\t\t// Read all available updates for each node\n\t\t\tfor i := range allNodes {\n\t\t\t\tnodeUpdates := 0\n\n\t\t\t\tt.Logf(\"Reading updates for node %d:\", i)\n\n\t\t\t\t// Read up to 10 updates per node or until timeout/no more data\n\t\t\t\tfor updateNum := range 10 {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase data := <-allNodes[i].ch:\n\t\t\t\t\t\tnodeUpdates++\n\t\t\t\t\t\ttotalUpdates++\n\n\t\t\t\t\t\t// Parse and examine the update - data is already a MapResponse\n\t\t\t\t\t\tif data == nil {\n\t\t\t\t\t\t\tt.Errorf(\"Node %d update %d: nil MapResponse\", i, updateNum)\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tupdateType := \"unknown\"\n\t\t\t\t\t\tif len(data.Peers) > 0 {\n\t\t\t\t\t\t\tupdateType = \"FULL\"\n\t\t\t\t\t\t\tfoundFullUpdate = true\n\t\t\t\t\t\t} else if len(data.PeersChangedPatch) > 0 {\n\t\t\t\t\t\t\tupdateType = \"PATCH\"\n\t\t\t\t\t\t} else if data.DERPMap != nil {\n\t\t\t\t\t\t\tupdateType = \"DERP\"\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tt.Logf(\n\t\t\t\t\t\t\t\"  Update %d: %s - Peers=%d, PeersChangedPatch=%d, DERPMap=%v\",\n\t\t\t\t\t\t\tupdateNum,\n\t\t\t\t\t\t\tupdateType,\n\t\t\t\t\t\t\tlen(data.Peers),\n\t\t\t\t\t\t\tlen(data.PeersChangedPatch),\n\t\t\t\t\t\t\tdata.DERPMap != nil,\n\t\t\t\t\t\t)\n\n\t\t\t\t\t\tif len(data.Peers) > 0 {\n\t\t\t\t\t\t\tt.Logf(\"    Full peer list with %d peers\", len(data.Peers))\n\n\t\t\t\t\t\t\tfor j, peer := range data.Peers[:min(3, len(data.Peers))] {\n\t\t\t\t\t\t\t\tt.Logf(\n\t\t\t\t\t\t\t\t\t\"      Peer %d: NodeID=%d, Online=%v\",\n\t\t\t\t\t\t\t\t\tj,\n\t\t\t\t\t\t\t\t\tpeer.ID,\n\t\t\t\t\t\t\t\t\tpeer.Online,\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif len(data.PeersChangedPatch) > 0 {\n\t\t\t\t\t\t\tt.Logf(\"    Patch update with %d changes\", len(data.PeersChangedPatch))\n\n\t\t\t\t\t\t\tfor j, patch := range data.PeersChangedPatch[:min(3, len(data.PeersChangedPatch))] {\n\t\t\t\t\t\t\t\tt.Logf(\n\t\t\t\t\t\t\t\t\t\"      Patch %d: NodeID=%d, Online=%v\",\n\t\t\t\t\t\t\t\t\tj,\n\t\t\t\t\t\t\t\t\tpatch.NodeID,\n\t\t\t\t\t\t\t\t\tpatch.Online,\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\tcase <-time.After(500 * time.Millisecond):\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tt.Logf(\"Node %d received %d updates\", i, nodeUpdates)\n\t\t\t}\n\n\t\t\tt.Logf(\"Total updates received across all nodes: %d\", totalUpdates)\n\n\t\t\tif !foundFullUpdate {\n\t\t\t\tt.Errorf(\"CRITICAL: No FULL updates received despite sending change.FullUpdateResponse()!\")\n\t\t\t\tt.Errorf(\n\t\t\t\t\t\"This confirms the bug - FullSet updates are not generating full peer responses\",\n\t\t\t\t)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TestBatcherRapidReconnection reproduces the issue where nodes connecting with the same ID\n// at the same time cause /debug/batcher to show nodes as disconnected when they should be connected.\n// This specifically tests the multi-channel batcher implementation issue.\nfunc TestBatcherRapidReconnection(t *testing.T) {\n\tfor _, batcherFunc := range allBatcherFunctions {\n\t\tt.Run(batcherFunc.name, func(t *testing.T) {\n\t\t\ttestData, cleanup := setupBatcherWithTestData(t, batcherFunc.fn, 1, 3, 10)\n\t\t\tdefer cleanup()\n\n\t\t\tbatcher := testData.Batcher\n\t\t\tallNodes := testData.Nodes\n\n\t\t\tt.Logf(\"=== RAPID RECONNECTION TEST ===\")\n\t\t\tt.Logf(\"Testing rapid connect/disconnect with %d nodes\", len(allNodes))\n\n\t\t\t// Connect all nodes initially.\n\t\t\tt.Logf(\"Connecting all nodes...\")\n\n\t\t\tfor i := range allNodes {\n\t\t\t\tnode := &allNodes[i]\n\n\t\t\t\terr := batcher.AddNode(node.n.ID, node.ch, tailcfg.CapabilityVersion(100), nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"Failed to add node %d: %v\", i, err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Wait for all connections to settle\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tfor i := range allNodes {\n\t\t\t\t\tassert.True(c, batcher.IsConnected(allNodes[i].n.ID), \"node should be connected\")\n\t\t\t\t}\n\t\t\t}, 5*time.Second, 50*time.Millisecond, \"waiting for connections to settle\")\n\n\t\t\t// Rapid disconnect ALL nodes (simulating nodes going down).\n\t\t\tt.Logf(\"Rapid disconnect all nodes...\")\n\n\t\t\tfor i := range allNodes {\n\t\t\t\tnode := &allNodes[i]\n\t\t\t\tremoved := batcher.RemoveNode(node.n.ID, node.ch)\n\t\t\t\tt.Logf(\"Node %d RemoveNode result: %t\", i, removed)\n\t\t\t}\n\n\t\t\t// Rapid reconnect with NEW channels (simulating nodes coming back up).\n\t\t\tt.Logf(\"Rapid reconnect with new channels...\")\n\n\t\t\tnewChannels := make([]chan *tailcfg.MapResponse, len(allNodes))\n\t\t\tfor i := range allNodes {\n\t\t\t\tnode := &allNodes[i]\n\t\t\t\tnewChannels[i] = make(chan *tailcfg.MapResponse, 10)\n\n\t\t\t\terr := batcher.AddNode(node.n.ID, newChannels[i], tailcfg.CapabilityVersion(100), nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"Failed to reconnect node %d: %v\", i, err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Wait for all reconnections to settle\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tfor i := range allNodes {\n\t\t\t\t\tassert.True(c, batcher.IsConnected(allNodes[i].n.ID), \"node should be reconnected\")\n\t\t\t\t}\n\t\t\t}, 5*time.Second, 50*time.Millisecond, \"waiting for reconnections to settle\")\n\n\t\t\t// Check debug status after reconnection.\n\t\t\tt.Logf(\"Checking debug status...\")\n\n\t\t\tdebugInfo := batcher.Debug()\n\t\t\tdisconnectedCount := 0\n\n\t\t\tfor i := range allNodes {\n\t\t\t\tnode := &allNodes[i]\n\t\t\t\tif info, exists := debugInfo[node.n.ID]; exists {\n\t\t\t\t\tt.Logf(\"Node %d (ID %d): debug info = %+v\", i, node.n.ID, info)\n\n\t\t\t\t\tif !info.Connected {\n\t\t\t\t\t\tdisconnectedCount++\n\n\t\t\t\t\t\tt.Logf(\"BUG REPRODUCED: Node %d shows as disconnected in debug but should be connected\", i)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tdisconnectedCount++\n\n\t\t\t\t\tt.Logf(\"Node %d missing from debug info entirely\", i)\n\t\t\t\t}\n\n\t\t\t\t// Also check IsConnected method\n\t\t\t\tif !batcher.IsConnected(node.n.ID) {\n\t\t\t\t\tt.Logf(\"Node %d IsConnected() returns false\", i)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif disconnectedCount > 0 {\n\t\t\t\tt.Logf(\"ISSUE REPRODUCED: %d/%d nodes show as disconnected in debug\", disconnectedCount, len(allNodes))\n\t\t\t} else {\n\t\t\t\tt.Logf(\"All nodes show as connected - working correctly\")\n\t\t\t}\n\n\t\t\t// Test if \"disconnected\" nodes can actually receive updates.\n\t\t\tt.Logf(\"Testing if nodes can receive updates despite debug status...\")\n\n\t\t\t// Send a change that should reach all nodes\n\t\t\tbatcher.AddWork(change.DERPMap())\n\n\t\t\treceivedCount := 0\n\t\t\ttimeout := time.After(500 * time.Millisecond)\n\n\t\t\tfor i := range allNodes {\n\t\t\t\tselect {\n\t\t\t\tcase update := <-newChannels[i]:\n\t\t\t\t\tif update != nil {\n\t\t\t\t\t\treceivedCount++\n\n\t\t\t\t\t\tt.Logf(\"Node %d received update successfully\", i)\n\t\t\t\t\t}\n\t\t\t\tcase <-timeout:\n\t\t\t\t\tt.Logf(\"Node %d timed out waiting for update\", i)\n\t\t\t\t\tgoto done\n\t\t\t\t}\n\t\t\t}\n\n\t\tdone:\n\t\t\tt.Logf(\"Update delivery test: %d/%d nodes received updates\", receivedCount, len(allNodes))\n\n\t\t\tif receivedCount < len(allNodes) {\n\t\t\t\tt.Logf(\"Some nodes failed to receive updates - confirming the issue\")\n\t\t\t}\n\t\t})\n\t}\n}\n\n//nolint:gocyclo // complex multi-connection test scenario\nfunc TestBatcherMultiConnection(t *testing.T) {\n\tfor _, batcherFunc := range allBatcherFunctions {\n\t\tt.Run(batcherFunc.name, func(t *testing.T) {\n\t\t\ttestData, cleanup := setupBatcherWithTestData(t, batcherFunc.fn, 1, 2, 10)\n\t\t\tdefer cleanup()\n\n\t\t\tbatcher := testData.Batcher\n\t\t\tnode1 := &testData.Nodes[0]\n\t\t\tnode2 := &testData.Nodes[1]\n\n\t\t\tt.Logf(\"=== MULTI-CONNECTION TEST ===\")\n\n\t\t\t// Connect first node with initial connection.\n\t\t\tt.Logf(\"Connecting node 1 with first connection...\")\n\n\t\t\terr := batcher.AddNode(node1.n.ID, node1.ch, tailcfg.CapabilityVersion(100), nil)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Failed to add node1: %v\", err)\n\t\t\t}\n\n\t\t\t// Connect second node for comparison\n\t\t\terr = batcher.AddNode(node2.n.ID, node2.ch, tailcfg.CapabilityVersion(100), nil)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Failed to add node2: %v\", err)\n\t\t\t}\n\n\t\t\t// Wait for initial connections\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tassert.True(c, batcher.IsConnected(node1.n.ID), \"node1 should be connected\")\n\t\t\t\tassert.True(c, batcher.IsConnected(node2.n.ID), \"node2 should be connected\")\n\t\t\t}, time.Second, 10*time.Millisecond, \"waiting for initial connections\")\n\n\t\t\t// Add second connection for node1 (multi-connection scenario).\n\t\t\tt.Logf(\"Adding second connection for node 1...\")\n\n\t\t\tsecondChannel := make(chan *tailcfg.MapResponse, 10)\n\n\t\t\terr = batcher.AddNode(node1.n.ID, secondChannel, tailcfg.CapabilityVersion(100), nil)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Failed to add second connection for node1: %v\", err)\n\t\t\t}\n\n\t\t\t// Yield to allow connection to be processed\n\t\t\truntime.Gosched()\n\n\t\t\t// Add third connection for node1.\n\t\t\tt.Logf(\"Adding third connection for node 1...\")\n\n\t\t\tthirdChannel := make(chan *tailcfg.MapResponse, 10)\n\n\t\t\terr = batcher.AddNode(node1.n.ID, thirdChannel, tailcfg.CapabilityVersion(100), nil)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Failed to add third connection for node1: %v\", err)\n\t\t\t}\n\n\t\t\t// Yield to allow connection to be processed\n\t\t\truntime.Gosched()\n\n\t\t\t// Verify debug status shows correct connection count.\n\t\t\tt.Logf(\"Verifying debug status shows multiple connections...\")\n\n\t\t\tdebugInfo := batcher.Debug()\n\n\t\t\tif info, exists := debugInfo[node1.n.ID]; exists {\n\t\t\t\tt.Logf(\"Node1 debug info: %+v\", info)\n\n\t\t\t\tif info.ActiveConnections != 3 {\n\t\t\t\t\tt.Errorf(\"Node1 should have 3 active connections, got %d\", info.ActiveConnections)\n\t\t\t\t} else {\n\t\t\t\t\tt.Logf(\"SUCCESS: Node1 correctly shows 3 active connections\")\n\t\t\t\t}\n\n\t\t\t\tif !info.Connected {\n\t\t\t\t\tt.Errorf(\"Node1 should show as connected with 3 active connections\")\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif info, exists := debugInfo[node2.n.ID]; exists {\n\t\t\t\tif info.ActiveConnections != 1 {\n\t\t\t\t\tt.Errorf(\"Node2 should have 1 active connection, got %d\", info.ActiveConnections)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Send update and verify ALL connections receive it.\n\t\t\tt.Logf(\"Testing update distribution to all connections...\")\n\n\t\t\t// Clear any existing updates from all channels\n\t\t\tclearChannel := func(ch chan *tailcfg.MapResponse) {\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-ch:\n\t\t\t\t\t\t// drain\n\t\t\t\t\tdefault:\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tclearChannel(node1.ch)\n\t\t\tclearChannel(secondChannel)\n\t\t\tclearChannel(thirdChannel)\n\t\t\tclearChannel(node2.ch)\n\n\t\t\t// Send a change notification from node2 (so node1 should receive it on all connections)\n\t\t\ttestChangeSet := change.NodeAdded(node2.n.ID)\n\n\t\t\tbatcher.AddWork(testChangeSet)\n\n\t\t\t// Wait for updates to propagate to at least one channel\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tassert.Positive(c, len(node1.ch)+len(secondChannel)+len(thirdChannel), \"should have received updates\")\n\t\t\t}, 5*time.Second, 50*time.Millisecond, \"waiting for updates to propagate\")\n\n\t\t\t// Verify all three connections for node1 receive the update\n\t\t\tconnection1Received := false\n\t\t\tconnection2Received := false\n\t\t\tconnection3Received := false\n\n\t\t\tselect {\n\t\t\tcase mapResp := <-node1.ch:\n\t\t\t\tconnection1Received = (mapResp != nil)\n\t\t\t\tt.Logf(\"Node1 connection 1 received update: %t\", connection1Received)\n\t\t\tcase <-time.After(500 * time.Millisecond):\n\t\t\t\tt.Errorf(\"Node1 connection 1 did not receive update\")\n\t\t\t}\n\n\t\t\tselect {\n\t\t\tcase mapResp := <-secondChannel:\n\t\t\t\tconnection2Received = (mapResp != nil)\n\t\t\t\tt.Logf(\"Node1 connection 2 received update: %t\", connection2Received)\n\t\t\tcase <-time.After(500 * time.Millisecond):\n\t\t\t\tt.Errorf(\"Node1 connection 2 did not receive update\")\n\t\t\t}\n\n\t\t\tselect {\n\t\t\tcase mapResp := <-thirdChannel:\n\t\t\t\tconnection3Received = (mapResp != nil)\n\t\t\t\tt.Logf(\"Node1 connection 3 received update: %t\", connection3Received)\n\t\t\tcase <-time.After(500 * time.Millisecond):\n\t\t\t\tt.Errorf(\"Node1 connection 3 did not receive update\")\n\t\t\t}\n\n\t\t\tif connection1Received && connection2Received && connection3Received {\n\t\t\t\tt.Logf(\"SUCCESS: All three connections for node1 received the update\")\n\t\t\t} else {\n\t\t\t\tt.Errorf(\"FAILURE: Multi-connection broadcast failed - conn1: %t, conn2: %t, conn3: %t\",\n\t\t\t\t\tconnection1Received, connection2Received, connection3Received)\n\t\t\t}\n\n\t\t\t// Test connection removal and verify remaining connections still work.\n\t\t\tt.Logf(\"Testing connection removal...\")\n\n\t\t\t// Remove the second connection\n\t\t\tremoved := batcher.RemoveNode(node1.n.ID, secondChannel)\n\t\t\tif !removed {\n\t\t\t\tt.Errorf(\"Failed to remove second connection for node1\")\n\t\t\t}\n\n\t\t\t// Yield to allow removal to be processed\n\t\t\truntime.Gosched()\n\n\t\t\t// Verify debug status shows 2 connections now\n\t\t\tdebugInfo2 := batcher.Debug()\n\t\t\tif info, exists := debugInfo2[node1.n.ID]; exists {\n\t\t\t\tif info.ActiveConnections != 2 {\n\t\t\t\t\tt.Errorf(\"Node1 should have 2 active connections after removal, got %d\", info.ActiveConnections)\n\t\t\t\t} else {\n\t\t\t\t\tt.Logf(\"SUCCESS: Node1 correctly shows 2 active connections after removal\")\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Send another update and verify remaining connections still work\n\t\t\tclearChannel(node1.ch)\n\t\t\tclearChannel(thirdChannel)\n\n\t\t\ttestChangeSet2 := change.NodeAdded(node2.n.ID)\n\n\t\t\tbatcher.AddWork(testChangeSet2)\n\n\t\t\t// Wait for updates to propagate to remaining channels\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tassert.Positive(c, len(node1.ch)+len(thirdChannel), \"should have received updates\")\n\t\t\t}, 5*time.Second, 50*time.Millisecond, \"waiting for updates to propagate\")\n\n\t\t\t// Verify remaining connections still receive updates\n\t\t\tremaining1Received := false\n\t\t\tremaining3Received := false\n\n\t\t\tselect {\n\t\t\tcase mapResp := <-node1.ch:\n\t\t\t\tremaining1Received = (mapResp != nil)\n\t\t\tcase <-time.After(500 * time.Millisecond):\n\t\t\t\tt.Errorf(\"Node1 connection 1 did not receive update after removal\")\n\t\t\t}\n\n\t\t\tselect {\n\t\t\tcase mapResp := <-thirdChannel:\n\t\t\t\tremaining3Received = (mapResp != nil)\n\t\t\tcase <-time.After(500 * time.Millisecond):\n\t\t\t\tt.Errorf(\"Node1 connection 3 did not receive update after removal\")\n\t\t\t}\n\n\t\t\tif remaining1Received && remaining3Received {\n\t\t\t\tt.Logf(\"SUCCESS: Remaining connections still receive updates after removal\")\n\t\t\t} else {\n\t\t\t\tt.Errorf(\"FAILURE: Remaining connections failed to receive updates - conn1: %t, conn3: %t\",\n\t\t\t\t\tremaining1Received, remaining3Received)\n\t\t\t}\n\n\t\t\t// Drain secondChannel of any messages received before removal\n\t\t\t// (the test wrapper sends NodeOffline before removal, which may have reached this channel)\n\t\t\tclearChannel(secondChannel)\n\n\t\t\t// Verify second channel no longer receives new updates after being removed\n\t\t\tselect {\n\t\t\tcase <-secondChannel:\n\t\t\t\tt.Errorf(\"Removed connection still received update - this should not happen\")\n\t\t\tcase <-time.After(100 * time.Millisecond):\n\t\t\t\tt.Logf(\"SUCCESS: Removed connection correctly no longer receives updates\")\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TestNodeDeletedWhileChangesPending reproduces issue #2924 where deleting a node\n// from state while there are pending changes for that node in the batcher causes\n// \"node not found\" errors. The race condition occurs when:\n// 1. Node is connected and changes are queued for it\n// 2. Node is deleted from state (NodeStore) but not from batcher\n// 3. Batcher worker tries to generate map response for deleted node\n// 4. Mapper fails to find node in state, causing repeated \"node not found\" errors.\nfunc TestNodeDeletedWhileChangesPending(t *testing.T) {\n\tfor _, batcherFunc := range allBatcherFunctions {\n\t\tt.Run(batcherFunc.name, func(t *testing.T) {\n\t\t\t// Create test environment with 3 nodes\n\t\t\ttestData, cleanup := setupBatcherWithTestData(t, batcherFunc.fn, 1, 3, normalBufferSize)\n\t\t\tdefer cleanup()\n\n\t\t\tbatcher := testData.Batcher\n\t\t\tst := testData.State\n\t\t\tnode1 := &testData.Nodes[0]\n\t\t\tnode2 := &testData.Nodes[1]\n\t\t\tnode3 := &testData.Nodes[2]\n\n\t\t\tt.Logf(\"Testing issue #2924: Node1=%d, Node2=%d, Node3=%d\",\n\t\t\t\tnode1.n.ID, node2.n.ID, node3.n.ID)\n\n\t\t\t// Helper to drain channels\n\t\t\tdrainCh := func(ch chan *tailcfg.MapResponse) {\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-ch:\n\t\t\t\t\t\t// drain\n\t\t\t\t\tdefault:\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Start update consumers for all nodes\n\t\t\tnode1.start()\n\t\t\tnode2.start()\n\t\t\tnode3.start()\n\n\t\t\tdefer node1.cleanup()\n\t\t\tdefer node2.cleanup()\n\t\t\tdefer node3.cleanup()\n\n\t\t\t// Connect all nodes to the batcher\n\t\t\trequire.NoError(t, batcher.AddNode(node1.n.ID, node1.ch, tailcfg.CapabilityVersion(100), nil))\n\t\t\trequire.NoError(t, batcher.AddNode(node2.n.ID, node2.ch, tailcfg.CapabilityVersion(100), nil))\n\t\t\trequire.NoError(t, batcher.AddNode(node3.n.ID, node3.ch, tailcfg.CapabilityVersion(100), nil))\n\n\t\t\t// Wait for all nodes to be connected\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tassert.True(c, batcher.IsConnected(node1.n.ID), \"node1 should be connected\")\n\t\t\t\tassert.True(c, batcher.IsConnected(node2.n.ID), \"node2 should be connected\")\n\t\t\t\tassert.True(c, batcher.IsConnected(node3.n.ID), \"node3 should be connected\")\n\t\t\t}, 5*time.Second, 50*time.Millisecond, \"waiting for nodes to connect\")\n\n\t\t\t// Get initial work errors count\n\t\t\tlfb := unwrapBatcher(batcher)\n\t\t\tinitialWorkErrors := lfb.WorkErrors()\n\t\t\tt.Logf(\"Initial work errors: %d\", initialWorkErrors)\n\n\t\t\t// Clear channels to prepare for the test\n\t\t\tdrainCh(node1.ch)\n\t\t\tdrainCh(node2.ch)\n\t\t\tdrainCh(node3.ch)\n\n\t\t\t// Get node view for deletion\n\t\t\tnodeToDelete, ok := st.GetNodeByID(node3.n.ID)\n\t\t\trequire.True(t, ok, \"node3 should exist in state\")\n\n\t\t\t// Delete the node from state - this returns a NodeRemoved change\n\t\t\t// In production, this change is sent to batcher via app.Change()\n\t\t\tnodeChange, err := st.DeleteNode(nodeToDelete)\n\t\t\trequire.NoError(t, err, \"should be able to delete node from state\")\n\t\t\tt.Logf(\"Deleted node %d from state, change: %s\", node3.n.ID, nodeChange.Reason)\n\n\t\t\t// Verify node is deleted from state\n\t\t\t_, exists := st.GetNodeByID(node3.n.ID)\n\t\t\trequire.False(t, exists, \"node3 should be deleted from state\")\n\n\t\t\t// Send the NodeRemoved change to batcher (this is what app.Change() does)\n\t\t\t// With the fix, this should clean up node3 from batcher's internal state\n\t\t\tbatcher.AddWork(nodeChange)\n\n\t\t\t// Wait for the batcher to process the removal and clean up the node\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tassert.False(c, batcher.IsConnected(node3.n.ID), \"node3 should be disconnected from batcher\")\n\t\t\t}, 5*time.Second, 50*time.Millisecond, \"waiting for node removal to be processed\")\n\n\t\t\tt.Logf(\"Node %d connected in batcher after NodeRemoved: %v\", node3.n.ID, batcher.IsConnected(node3.n.ID))\n\n\t\t\t// Now queue changes that would have caused errors before the fix\n\t\t\t// With the fix, these should NOT cause \"node not found\" errors\n\t\t\t// because node3 was cleaned up when NodeRemoved was processed\n\t\t\tbatcher.AddWork(change.FullUpdate())\n\t\t\tbatcher.AddWork(change.PolicyChange())\n\n\t\t\t// Wait for work to be processed and verify no errors occurred\n\t\t\t// With the fix, no new errors should occur because the deleted node\n\t\t\t// was cleaned up from batcher state when NodeRemoved was processed\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tfinalWorkErrors := lfb.WorkErrors()\n\t\t\t\tnewErrors := finalWorkErrors - initialWorkErrors\n\t\t\t\tassert.Zero(c, newErrors, \"Fix for #2924: should have no work errors after node deletion\")\n\t\t\t}, 5*time.Second, 100*time.Millisecond, \"waiting for work processing to complete without errors\")\n\n\t\t\t// Verify remaining nodes still work correctly\n\t\t\tdrainCh(node1.ch)\n\t\t\tdrainCh(node2.ch)\n\t\t\tbatcher.AddWork(change.NodeAdded(node1.n.ID))\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t// Node 1 and 2 should receive updates\n\t\t\t\tstats1 := NodeStats{TotalUpdates: atomic.LoadInt64(&node1.updateCount)}\n\t\t\t\tstats2 := NodeStats{TotalUpdates: atomic.LoadInt64(&node2.updateCount)}\n\t\t\t\tassert.Positive(c, stats1.TotalUpdates, \"node1 should have received updates\")\n\t\t\t\tassert.Positive(c, stats2.TotalUpdates, \"node2 should have received updates\")\n\t\t\t}, 5*time.Second, 100*time.Millisecond, \"waiting for remaining nodes to receive updates\")\n\t\t})\n\t}\n}\n\nfunc TestRemoveNodeChannelAlreadyRemoved(t *testing.T) {\n\tfor _, batcherFunc := range allBatcherFunctions {\n\t\tt.Run(batcherFunc.name, func(t *testing.T) {\n\t\t\tt.Run(\"marks disconnected when removed channel was last active connection\", func(t *testing.T) {\n\t\t\t\ttestData, cleanup := setupBatcherWithTestData(t, batcherFunc.fn, 1, 1, normalBufferSize)\n\t\t\t\tdefer cleanup()\n\n\t\t\t\tlfb := unwrapBatcher(testData.Batcher)\n\n\t\t\t\tnodeID := testData.Nodes[0].n.ID\n\t\t\t\tch := make(chan *tailcfg.MapResponse, normalBufferSize)\n\t\t\t\trequire.NoError(t, lfb.AddNode(nodeID, ch, tailcfg.CapabilityVersion(100), nil))\n\n\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\tassert.True(c, lfb.IsConnected(nodeID), \"node should be connected after AddNode\")\n\t\t\t\t}, 5*time.Second, 50*time.Millisecond, \"waiting for node to be connected\")\n\n\t\t\t\tnodeConn, exists := lfb.nodes.Load(nodeID)\n\t\t\t\trequire.True(t, exists, \"node connection should exist\")\n\t\t\t\trequire.True(t, nodeConn.removeConnectionByChannel(ch), \"manual channel removal should succeed\")\n\n\t\t\t\tremoved := lfb.RemoveNode(nodeID, ch)\n\t\t\t\tassert.False(t, removed, \"RemoveNode should report no remaining active connections\")\n\n\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\tassert.False(c, lfb.IsConnected(nodeID), \"node should be disconnected after last connection is gone\")\n\t\t\t\t}, 5*time.Second, 50*time.Millisecond, \"waiting for node to be disconnected\")\n\n\t\t\t\tclose(ch)\n\t\t\t})\n\n\t\t\tt.Run(\"keeps connected when another connection is still active\", func(t *testing.T) {\n\t\t\t\ttestData, cleanup := setupBatcherWithTestData(t, batcherFunc.fn, 1, 1, normalBufferSize)\n\t\t\t\tdefer cleanup()\n\n\t\t\t\tlfb := unwrapBatcher(testData.Batcher)\n\n\t\t\t\tnodeID := testData.Nodes[0].n.ID\n\t\t\t\tch1 := make(chan *tailcfg.MapResponse, normalBufferSize)\n\t\t\t\tch2 := make(chan *tailcfg.MapResponse, normalBufferSize)\n\n\t\t\t\trequire.NoError(t, lfb.AddNode(nodeID, ch1, tailcfg.CapabilityVersion(100), nil))\n\t\t\t\trequire.NoError(t, lfb.AddNode(nodeID, ch2, tailcfg.CapabilityVersion(100), nil))\n\n\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\tassert.True(c, lfb.IsConnected(nodeID), \"node should be connected after AddNode\")\n\t\t\t\t}, 5*time.Second, 50*time.Millisecond, \"waiting for node to be connected\")\n\n\t\t\t\tnodeConn, exists := lfb.nodes.Load(nodeID)\n\t\t\t\trequire.True(t, exists, \"node connection should exist\")\n\t\t\t\trequire.True(t, nodeConn.removeConnectionByChannel(ch1), \"manual channel removal should succeed\")\n\n\t\t\t\tremoved := lfb.RemoveNode(nodeID, ch1)\n\t\t\t\tassert.True(t, removed, \"RemoveNode should report node still has active connections\")\n\t\t\t\tassert.True(t, lfb.IsConnected(nodeID), \"node should still be connected while another connection exists\")\n\t\t\t\tassert.Equal(t, 1, nodeConn.getActiveConnectionCount(), \"exactly one active connection should remain\")\n\n\t\t\t\tclose(ch1)\n\t\t\t})\n\t\t})\n\t}\n}\n\n// unwrapBatcher extracts the underlying *Batcher from the test wrapper.\nfunc unwrapBatcher(b *testBatcherWrapper) *Batcher {\n\treturn b.Batcher\n}\n"
  },
  {
    "path": "hscontrol/mapper/batcher_unit_test.go",
    "content": "package mapper\n\n// Unit tests for batcher components that do NOT require database setup.\n// These tests exercise connectionEntry, multiChannelNodeConn, computePeerDiff,\n// updateSentPeers, generateMapResponse branching, and handleNodeChange in isolation.\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"runtime\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/types/change\"\n\t\"github.com/puzpuzpuz/xsync/v4\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/tailcfg\"\n)\n\n// ============================================================================\n// Mock Infrastructure\n// ============================================================================\n\n// mockNodeConnection implements nodeConnection for isolated unit testing\n// of generateMapResponse and handleNodeChange without a real database.\ntype mockNodeConnection struct {\n\tid  types.NodeID\n\tver tailcfg.CapabilityVersion\n\n\t// sendFn allows injecting custom send behavior.\n\t// If nil, sends are recorded and succeed.\n\tsendFn func(*tailcfg.MapResponse) error\n\n\t// sent records all successful sends for assertion.\n\tsent []*tailcfg.MapResponse\n\tmu   sync.Mutex\n\n\t// Peer tracking\n\tpeers *xsync.Map[tailcfg.NodeID, struct{}]\n}\n\nfunc newMockNodeConnection(id types.NodeID) *mockNodeConnection {\n\treturn &mockNodeConnection{\n\t\tid:    id,\n\t\tver:   tailcfg.CapabilityVersion(100),\n\t\tpeers: xsync.NewMap[tailcfg.NodeID, struct{}](),\n\t}\n}\n\n// withSendError configures the mock to return the given error on send.\nfunc (m *mockNodeConnection) withSendError(err error) *mockNodeConnection {\n\tm.sendFn = func(_ *tailcfg.MapResponse) error { return err }\n\treturn m\n}\n\nfunc (m *mockNodeConnection) nodeID() types.NodeID               { return m.id }\nfunc (m *mockNodeConnection) version() tailcfg.CapabilityVersion { return m.ver }\n\nfunc (m *mockNodeConnection) send(data *tailcfg.MapResponse) error {\n\tif m.sendFn != nil {\n\t\treturn m.sendFn(data)\n\t}\n\n\tm.mu.Lock()\n\tm.sent = append(m.sent, data)\n\tm.mu.Unlock()\n\n\treturn nil\n}\n\nfunc (m *mockNodeConnection) computePeerDiff(currentPeers []tailcfg.NodeID) []tailcfg.NodeID {\n\tcurrentSet := make(map[tailcfg.NodeID]struct{}, len(currentPeers))\n\tfor _, id := range currentPeers {\n\t\tcurrentSet[id] = struct{}{}\n\t}\n\n\tvar removed []tailcfg.NodeID\n\n\tm.peers.Range(func(id tailcfg.NodeID, _ struct{}) bool {\n\t\tif _, exists := currentSet[id]; !exists {\n\t\t\tremoved = append(removed, id)\n\t\t}\n\n\t\treturn true\n\t})\n\n\treturn removed\n}\n\nfunc (m *mockNodeConnection) updateSentPeers(resp *tailcfg.MapResponse) {\n\tif resp == nil {\n\t\treturn\n\t}\n\n\tif resp.Peers != nil {\n\t\tm.peers.Clear()\n\n\t\tfor _, peer := range resp.Peers {\n\t\t\tm.peers.Store(peer.ID, struct{}{})\n\t\t}\n\t}\n\n\tfor _, peer := range resp.PeersChanged {\n\t\tm.peers.Store(peer.ID, struct{}{})\n\t}\n\n\tfor _, id := range resp.PeersRemoved {\n\t\tm.peers.Delete(id)\n\t}\n}\n\n// getSent returns a thread-safe copy of all sent responses.\nfunc (m *mockNodeConnection) getSent() []*tailcfg.MapResponse {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\treturn append([]*tailcfg.MapResponse{}, m.sent...)\n}\n\n// ============================================================================\n// Test Helpers\n// ============================================================================\n\n// testMapResponse creates a minimal valid MapResponse for testing.\nfunc testMapResponse() *tailcfg.MapResponse {\n\tnow := time.Now()\n\n\treturn &tailcfg.MapResponse{\n\t\tControlTime: &now,\n\t}\n}\n\n// testMapResponseWithPeers creates a MapResponse with the given peer IDs.\nfunc testMapResponseWithPeers(peerIDs ...tailcfg.NodeID) *tailcfg.MapResponse {\n\tresp := testMapResponse()\n\n\tresp.Peers = make([]*tailcfg.Node, len(peerIDs))\n\tfor i, id := range peerIDs {\n\t\tresp.Peers[i] = &tailcfg.Node{ID: id}\n\t}\n\n\treturn resp\n}\n\n// ids is a convenience for creating a slice of tailcfg.NodeID.\nfunc ids(nodeIDs ...tailcfg.NodeID) []tailcfg.NodeID {\n\treturn nodeIDs\n}\n\n// expectReceive asserts that a message arrives on the channel within 100ms.\nfunc expectReceive(t *testing.T, ch <-chan *tailcfg.MapResponse, msg string) *tailcfg.MapResponse {\n\tt.Helper()\n\n\tconst timeout = 100 * time.Millisecond\n\n\tselect {\n\tcase data := <-ch:\n\t\treturn data\n\tcase <-time.After(timeout):\n\t\tt.Fatalf(\"expected to receive on channel within %v: %s\", timeout, msg)\n\t\treturn nil\n\t}\n}\n\n// expectNoReceive asserts that no message arrives within timeout.\nfunc expectNoReceive(t *testing.T, ch <-chan *tailcfg.MapResponse, timeout time.Duration, msg string) {\n\tt.Helper()\n\n\tselect {\n\tcase data := <-ch:\n\t\tt.Fatalf(\"expected no receive but got %+v: %s\", data, msg)\n\tcase <-time.After(timeout):\n\t\t// Expected\n\t}\n}\n\n// makeConnectionEntry creates a connectionEntry with the given channel.\nfunc makeConnectionEntry(id string, ch chan<- *tailcfg.MapResponse) *connectionEntry {\n\tentry := &connectionEntry{\n\t\tid:      id,\n\t\tc:       ch,\n\t\tversion: tailcfg.CapabilityVersion(100),\n\t\tcreated: time.Now(),\n\t}\n\tentry.lastUsed.Store(time.Now().Unix())\n\n\treturn entry\n}\n\n// ============================================================================\n// connectionEntry.send() Tests\n// ============================================================================\n\nfunc TestConnectionEntry_SendSuccess(t *testing.T) {\n\tch := make(chan *tailcfg.MapResponse, 1)\n\tentry := makeConnectionEntry(\"test-conn\", ch)\n\tdata := testMapResponse()\n\n\tbeforeSend := time.Now().Unix()\n\terr := entry.send(data)\n\n\trequire.NoError(t, err)\n\tassert.GreaterOrEqual(t, entry.lastUsed.Load(), beforeSend,\n\t\t\"lastUsed should be updated after successful send\")\n\n\t// Verify data was actually sent\n\treceived := expectReceive(t, ch, \"data should be on channel\")\n\tassert.Equal(t, data, received)\n}\n\nfunc TestConnectionEntry_SendNilData(t *testing.T) {\n\tch := make(chan *tailcfg.MapResponse, 1)\n\tentry := makeConnectionEntry(\"test-conn\", ch)\n\n\terr := entry.send(nil)\n\n\trequire.NoError(t, err, \"nil data should return nil error\")\n\texpectNoReceive(t, ch, 10*time.Millisecond, \"nil data should not be sent to channel\")\n}\n\nfunc TestConnectionEntry_SendTimeout(t *testing.T) {\n\t// Unbuffered channel with no reader = always blocks\n\tch := make(chan *tailcfg.MapResponse)\n\tentry := makeConnectionEntry(\"test-conn\", ch)\n\tdata := testMapResponse()\n\n\tstart := time.Now()\n\terr := entry.send(data)\n\telapsed := time.Since(start)\n\n\trequire.ErrorIs(t, err, ErrConnectionSendTimeout)\n\tassert.GreaterOrEqual(t, elapsed, 40*time.Millisecond,\n\t\t\"should wait approximately 50ms before timeout\")\n}\n\nfunc TestConnectionEntry_SendClosed(t *testing.T) {\n\tch := make(chan *tailcfg.MapResponse, 1)\n\tentry := makeConnectionEntry(\"test-conn\", ch)\n\n\t// Mark as closed before sending\n\tentry.closed.Store(true)\n\n\terr := entry.send(testMapResponse())\n\n\trequire.ErrorIs(t, err, errConnectionClosed)\n\texpectNoReceive(t, ch, 10*time.Millisecond,\n\t\t\"closed entry should not send data to channel\")\n}\n\nfunc TestConnectionEntry_SendUpdatesLastUsed(t *testing.T) {\n\tch := make(chan *tailcfg.MapResponse, 1)\n\tentry := makeConnectionEntry(\"test-conn\", ch)\n\n\t// Set lastUsed to a past time\n\tpastTime := time.Now().Add(-1 * time.Hour).Unix()\n\tentry.lastUsed.Store(pastTime)\n\n\terr := entry.send(testMapResponse())\n\trequire.NoError(t, err)\n\n\tassert.Greater(t, entry.lastUsed.Load(), pastTime,\n\t\t\"lastUsed should be updated to current time after send\")\n}\n\n// ============================================================================\n// multiChannelNodeConn.send() Tests\n// ============================================================================\n\nfunc TestMultiChannelSend_AllSuccess(t *testing.T) {\n\tmc := newMultiChannelNodeConn(1, nil)\n\n\t// Create 3 buffered channels (all will succeed)\n\tchannels := make([]chan *tailcfg.MapResponse, 3)\n\tfor i := range channels {\n\t\tchannels[i] = make(chan *tailcfg.MapResponse, 1)\n\t\tmc.addConnection(makeConnectionEntry(fmt.Sprintf(\"conn-%d\", i), channels[i]))\n\t}\n\n\tdata := testMapResponse()\n\terr := mc.send(data)\n\n\trequire.NoError(t, err)\n\tassert.Equal(t, 3, mc.getActiveConnectionCount(),\n\t\t\"all connections should remain active after success\")\n\n\t// Verify all channels received the data\n\tfor i, ch := range channels {\n\t\treceived := expectReceive(t, ch,\n\t\t\tfmt.Sprintf(\"channel %d should receive data\", i))\n\t\tassert.Equal(t, data, received)\n\t}\n}\n\nfunc TestMultiChannelSend_PartialFailure(t *testing.T) {\n\tmc := newMultiChannelNodeConn(1, nil)\n\n\t// 2 buffered channels (will succeed) + 1 unbuffered (will timeout)\n\tgoodCh1 := make(chan *tailcfg.MapResponse, 1)\n\tgoodCh2 := make(chan *tailcfg.MapResponse, 1)\n\tbadCh := make(chan *tailcfg.MapResponse) // unbuffered, no reader\n\n\tmc.addConnection(makeConnectionEntry(\"good-1\", goodCh1))\n\tmc.addConnection(makeConnectionEntry(\"bad\", badCh))\n\tmc.addConnection(makeConnectionEntry(\"good-2\", goodCh2))\n\n\terr := mc.send(testMapResponse())\n\n\trequire.NoError(t, err, \"should succeed if at least one connection works\")\n\tassert.Equal(t, 2, mc.getActiveConnectionCount(),\n\t\t\"failed connection should be removed\")\n\n\t// Good channels should have received data\n\texpectReceive(t, goodCh1, \"good-1 should receive\")\n\texpectReceive(t, goodCh2, \"good-2 should receive\")\n}\n\nfunc TestMultiChannelSend_AllFail(t *testing.T) {\n\tmc := newMultiChannelNodeConn(1, nil)\n\n\t// All unbuffered channels with no readers\n\tfor i := range 3 {\n\t\tch := make(chan *tailcfg.MapResponse) // unbuffered\n\t\tmc.addConnection(makeConnectionEntry(fmt.Sprintf(\"bad-%d\", i), ch))\n\t}\n\n\terr := mc.send(testMapResponse())\n\n\trequire.Error(t, err, \"should return error when all connections fail\")\n\tassert.Equal(t, 0, mc.getActiveConnectionCount(),\n\t\t\"all failed connections should be removed\")\n}\n\nfunc TestMultiChannelSend_ZeroConnections(t *testing.T) {\n\tmc := newMultiChannelNodeConn(1, nil)\n\n\terr := mc.send(testMapResponse())\n\n\trequire.NoError(t, err,\n\t\t\"sending to node with 0 connections should succeed silently (rapid reconnection scenario)\")\n}\n\nfunc TestMultiChannelSend_NilData(t *testing.T) {\n\tmc := newMultiChannelNodeConn(1, nil)\n\tch := make(chan *tailcfg.MapResponse, 1)\n\tmc.addConnection(makeConnectionEntry(\"conn\", ch))\n\n\terr := mc.send(nil)\n\n\trequire.NoError(t, err, \"nil data should return nil immediately\")\n\texpectNoReceive(t, ch, 10*time.Millisecond, \"nil data should not be sent\")\n}\n\nfunc TestMultiChannelSend_FailedConnectionRemoved(t *testing.T) {\n\tmc := newMultiChannelNodeConn(1, nil)\n\n\tgoodCh := make(chan *tailcfg.MapResponse, 10) // large buffer\n\tbadCh := make(chan *tailcfg.MapResponse)      // unbuffered, will timeout\n\n\tmc.addConnection(makeConnectionEntry(\"good\", goodCh))\n\tmc.addConnection(makeConnectionEntry(\"bad\", badCh))\n\n\tassert.Equal(t, 2, mc.getActiveConnectionCount())\n\n\t// First send: bad connection removed\n\terr := mc.send(testMapResponse())\n\trequire.NoError(t, err)\n\tassert.Equal(t, 1, mc.getActiveConnectionCount())\n\n\t// Second send: only good connection remains, should succeed\n\terr = mc.send(testMapResponse())\n\trequire.NoError(t, err)\n\tassert.Equal(t, 1, mc.getActiveConnectionCount())\n}\n\nfunc TestMultiChannelSend_UpdateCount(t *testing.T) {\n\tmc := newMultiChannelNodeConn(1, nil)\n\tch := make(chan *tailcfg.MapResponse, 10)\n\tmc.addConnection(makeConnectionEntry(\"conn\", ch))\n\n\tassert.Equal(t, int64(0), mc.updateCount.Load())\n\n\t_ = mc.send(testMapResponse())\n\tassert.Equal(t, int64(1), mc.updateCount.Load())\n\n\t_ = mc.send(testMapResponse())\n\tassert.Equal(t, int64(2), mc.updateCount.Load())\n}\n\n// ============================================================================\n// multiChannelNodeConn.close() Tests\n// ============================================================================\n\nfunc TestMultiChannelClose_MarksEntriesClosed(t *testing.T) {\n\tmc := newMultiChannelNodeConn(1, nil)\n\n\tentries := make([]*connectionEntry, 3)\n\tfor i := range entries {\n\t\tch := make(chan *tailcfg.MapResponse, 1)\n\t\tentries[i] = makeConnectionEntry(fmt.Sprintf(\"conn-%d\", i), ch)\n\t\tmc.addConnection(entries[i])\n\t}\n\n\tmc.close()\n\n\tfor i, entry := range entries {\n\t\tassert.True(t, entry.closed.Load(),\n\t\t\t\"entry %d should be marked as closed\", i)\n\t}\n}\n\nfunc TestMultiChannelClose_PreventsSendPanic(t *testing.T) {\n\tmc := newMultiChannelNodeConn(1, nil)\n\tch := make(chan *tailcfg.MapResponse, 1)\n\tentry := makeConnectionEntry(\"conn\", ch)\n\tmc.addConnection(entry)\n\n\tmc.close()\n\n\t// After close, connectionEntry.send should return errConnectionClosed\n\t// (not panic on send to closed channel)\n\terr := entry.send(testMapResponse())\n\trequire.ErrorIs(t, err, errConnectionClosed,\n\t\t\"send after close should return errConnectionClosed, not panic\")\n}\n\n// ============================================================================\n// multiChannelNodeConn connection management Tests\n// ============================================================================\n\nfunc TestMultiChannelNodeConn_AddRemoveConnections(t *testing.T) {\n\tmc := newMultiChannelNodeConn(1, nil)\n\n\tch1 := make(chan *tailcfg.MapResponse, 1)\n\tch2 := make(chan *tailcfg.MapResponse, 1)\n\tch3 := make(chan *tailcfg.MapResponse, 1)\n\n\t// Add connections\n\tmc.addConnection(makeConnectionEntry(\"c1\", ch1))\n\tassert.Equal(t, 1, mc.getActiveConnectionCount())\n\tassert.True(t, mc.hasActiveConnections())\n\n\tmc.addConnection(makeConnectionEntry(\"c2\", ch2))\n\tmc.addConnection(makeConnectionEntry(\"c3\", ch3))\n\tassert.Equal(t, 3, mc.getActiveConnectionCount())\n\n\t// Remove by channel pointer\n\tassert.True(t, mc.removeConnectionByChannel(ch2))\n\tassert.Equal(t, 2, mc.getActiveConnectionCount())\n\n\t// Remove non-existent channel\n\tnonExistentCh := make(chan *tailcfg.MapResponse)\n\tassert.False(t, mc.removeConnectionByChannel(nonExistentCh))\n\tassert.Equal(t, 2, mc.getActiveConnectionCount())\n\n\t// Remove remaining\n\tassert.True(t, mc.removeConnectionByChannel(ch1))\n\tassert.True(t, mc.removeConnectionByChannel(ch3))\n\tassert.Equal(t, 0, mc.getActiveConnectionCount())\n\tassert.False(t, mc.hasActiveConnections())\n}\n\nfunc TestMultiChannelNodeConn_Version(t *testing.T) {\n\tmc := newMultiChannelNodeConn(1, nil)\n\n\t// No connections - version should be 0\n\tassert.Equal(t, tailcfg.CapabilityVersion(0), mc.version())\n\n\t// Add connection with version 100\n\tch := make(chan *tailcfg.MapResponse, 1)\n\tentry := makeConnectionEntry(\"conn\", ch)\n\tentry.version = tailcfg.CapabilityVersion(100)\n\tmc.addConnection(entry)\n\n\tassert.Equal(t, tailcfg.CapabilityVersion(100), mc.version())\n}\n\n// ============================================================================\n// computePeerDiff Tests\n// ============================================================================\n\nfunc TestComputePeerDiff(t *testing.T) {\n\ttests := []struct {\n\t\tname        string\n\t\ttracked     []tailcfg.NodeID // peers previously sent to client\n\t\tcurrent     []tailcfg.NodeID // peers visible now\n\t\twantRemoved []tailcfg.NodeID // expected removed peers\n\t}{\n\t\t{\n\t\t\tname:        \"no_changes\",\n\t\t\ttracked:     ids(1, 2, 3),\n\t\t\tcurrent:     ids(1, 2, 3),\n\t\t\twantRemoved: nil,\n\t\t},\n\t\t{\n\t\t\tname:        \"one_removed\",\n\t\t\ttracked:     ids(1, 2, 3),\n\t\t\tcurrent:     ids(1, 3),\n\t\t\twantRemoved: ids(2),\n\t\t},\n\t\t{\n\t\t\tname:        \"multiple_removed\",\n\t\t\ttracked:     ids(1, 2, 3, 4, 5),\n\t\t\tcurrent:     ids(2, 4),\n\t\t\twantRemoved: ids(1, 3, 5),\n\t\t},\n\t\t{\n\t\t\tname:        \"all_removed\",\n\t\t\ttracked:     ids(1, 2, 3),\n\t\t\tcurrent:     nil,\n\t\t\twantRemoved: ids(1, 2, 3),\n\t\t},\n\t\t{\n\t\t\tname:        \"peers_added_no_removal\",\n\t\t\ttracked:     ids(1),\n\t\t\tcurrent:     ids(1, 2, 3),\n\t\t\twantRemoved: nil,\n\t\t},\n\t\t{\n\t\t\tname:        \"empty_tracked\",\n\t\t\ttracked:     nil,\n\t\t\tcurrent:     ids(1, 2, 3),\n\t\t\twantRemoved: nil,\n\t\t},\n\t\t{\n\t\t\tname:        \"both_empty\",\n\t\t\ttracked:     nil,\n\t\t\tcurrent:     nil,\n\t\t\twantRemoved: nil,\n\t\t},\n\t\t{\n\t\t\tname:        \"disjoint_sets\",\n\t\t\ttracked:     ids(1, 2, 3),\n\t\t\tcurrent:     ids(4, 5, 6),\n\t\t\twantRemoved: ids(1, 2, 3),\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tmc := newMultiChannelNodeConn(1, nil)\n\n\t\t\t// Populate tracked peers\n\t\t\tfor _, id := range tt.tracked {\n\t\t\t\tmc.lastSentPeers.Store(id, struct{}{})\n\t\t\t}\n\n\t\t\tgot := mc.computePeerDiff(tt.current)\n\n\t\t\tassert.ElementsMatch(t, tt.wantRemoved, got,\n\t\t\t\t\"removed peers should match expected\")\n\t\t})\n\t}\n}\n\n// ============================================================================\n// updateSentPeers Tests\n// ============================================================================\n\nfunc TestUpdateSentPeers(t *testing.T) {\n\tt.Run(\"full_peer_list_replaces_all\", func(t *testing.T) {\n\t\tmc := newMultiChannelNodeConn(1, nil)\n\t\t// Pre-populate with old peers\n\t\tmc.lastSentPeers.Store(tailcfg.NodeID(100), struct{}{})\n\t\tmc.lastSentPeers.Store(tailcfg.NodeID(200), struct{}{})\n\n\t\t// Send full peer list\n\t\tmc.updateSentPeers(testMapResponseWithPeers(1, 2, 3))\n\n\t\t// Old peers should be gone\n\t\t_, exists := mc.lastSentPeers.Load(tailcfg.NodeID(100))\n\t\tassert.False(t, exists, \"old peer 100 should be cleared\")\n\n\t\t// New peers should be tracked\n\t\tfor _, id := range ids(1, 2, 3) {\n\t\t\t_, exists := mc.lastSentPeers.Load(id)\n\t\t\tassert.True(t, exists, \"peer %d should be tracked\", id)\n\t\t}\n\t})\n\n\tt.Run(\"incremental_add_via_PeersChanged\", func(t *testing.T) {\n\t\tmc := newMultiChannelNodeConn(1, nil)\n\t\tmc.lastSentPeers.Store(tailcfg.NodeID(1), struct{}{})\n\n\t\tresp := testMapResponse()\n\t\tresp.PeersChanged = []*tailcfg.Node{{ID: 2}, {ID: 3}}\n\t\tmc.updateSentPeers(resp)\n\n\t\t// All three should be tracked\n\t\tfor _, id := range ids(1, 2, 3) {\n\t\t\t_, exists := mc.lastSentPeers.Load(id)\n\t\t\tassert.True(t, exists, \"peer %d should be tracked\", id)\n\t\t}\n\t})\n\n\tt.Run(\"incremental_remove_via_PeersRemoved\", func(t *testing.T) {\n\t\tmc := newMultiChannelNodeConn(1, nil)\n\t\tmc.lastSentPeers.Store(tailcfg.NodeID(1), struct{}{})\n\t\tmc.lastSentPeers.Store(tailcfg.NodeID(2), struct{}{})\n\t\tmc.lastSentPeers.Store(tailcfg.NodeID(3), struct{}{})\n\n\t\tresp := testMapResponse()\n\t\tresp.PeersRemoved = ids(2)\n\t\tmc.updateSentPeers(resp)\n\n\t\t_, exists1 := mc.lastSentPeers.Load(tailcfg.NodeID(1))\n\t\t_, exists2 := mc.lastSentPeers.Load(tailcfg.NodeID(2))\n\t\t_, exists3 := mc.lastSentPeers.Load(tailcfg.NodeID(3))\n\n\t\tassert.True(t, exists1, \"peer 1 should remain\")\n\t\tassert.False(t, exists2, \"peer 2 should be removed\")\n\t\tassert.True(t, exists3, \"peer 3 should remain\")\n\t})\n\n\tt.Run(\"nil_response_is_noop\", func(t *testing.T) {\n\t\tmc := newMultiChannelNodeConn(1, nil)\n\t\tmc.lastSentPeers.Store(tailcfg.NodeID(1), struct{}{})\n\n\t\tmc.updateSentPeers(nil)\n\n\t\t_, exists := mc.lastSentPeers.Load(tailcfg.NodeID(1))\n\t\tassert.True(t, exists, \"nil response should not change tracked peers\")\n\t})\n\n\tt.Run(\"full_then_incremental_sequence\", func(t *testing.T) {\n\t\tmc := newMultiChannelNodeConn(1, nil)\n\n\t\t// Step 1: Full peer list\n\t\tmc.updateSentPeers(testMapResponseWithPeers(1, 2, 3))\n\n\t\t// Step 2: Add peer 4\n\t\tresp := testMapResponse()\n\t\tresp.PeersChanged = []*tailcfg.Node{{ID: 4}}\n\t\tmc.updateSentPeers(resp)\n\n\t\t// Step 3: Remove peer 2\n\t\tresp2 := testMapResponse()\n\t\tresp2.PeersRemoved = ids(2)\n\t\tmc.updateSentPeers(resp2)\n\n\t\t// Should have 1, 3, 4\n\t\tfor _, id := range ids(1, 3, 4) {\n\t\t\t_, exists := mc.lastSentPeers.Load(id)\n\t\t\tassert.True(t, exists, \"peer %d should be tracked\", id)\n\t\t}\n\n\t\t_, exists := mc.lastSentPeers.Load(tailcfg.NodeID(2))\n\t\tassert.False(t, exists, \"peer 2 should have been removed\")\n\t})\n\n\tt.Run(\"empty_full_peer_list_clears_all\", func(t *testing.T) {\n\t\tmc := newMultiChannelNodeConn(1, nil)\n\t\tmc.lastSentPeers.Store(tailcfg.NodeID(1), struct{}{})\n\t\tmc.lastSentPeers.Store(tailcfg.NodeID(2), struct{}{})\n\n\t\t// Empty Peers slice (not nil) means \"no peers\"\n\t\tresp := testMapResponse()\n\t\tresp.Peers = []*tailcfg.Node{} // empty, not nil\n\t\tmc.updateSentPeers(resp)\n\n\t\tcount := 0\n\n\t\tmc.lastSentPeers.Range(func(_ tailcfg.NodeID, _ struct{}) bool {\n\t\t\tcount++\n\t\t\treturn true\n\t\t})\n\t\tassert.Equal(t, 0, count, \"empty peer list should clear all tracking\")\n\t})\n}\n\n// ============================================================================\n// generateMapResponse Tests (branching logic only, no DB needed)\n// ============================================================================\n\nfunc TestGenerateMapResponse_EmptyChange(t *testing.T) {\n\tmc := newMockNodeConnection(1)\n\n\tresp, err := generateMapResponse(mc, nil, change.Change{})\n\n\trequire.NoError(t, err)\n\tassert.Nil(t, resp, \"empty change should return nil response\")\n}\n\nfunc TestGenerateMapResponse_InvalidNodeID(t *testing.T) {\n\tmc := newMockNodeConnection(0) // Invalid ID\n\n\tresp, err := generateMapResponse(mc, &mapper{}, change.DERPMap())\n\n\trequire.ErrorIs(t, err, ErrInvalidNodeID)\n\tassert.Nil(t, resp)\n}\n\nfunc TestGenerateMapResponse_NilMapper(t *testing.T) {\n\tmc := newMockNodeConnection(1)\n\n\tresp, err := generateMapResponse(mc, nil, change.DERPMap())\n\n\trequire.ErrorIs(t, err, ErrMapperNil)\n\tassert.Nil(t, resp)\n}\n\nfunc TestGenerateMapResponse_SelfOnlyOtherNode(t *testing.T) {\n\tmc := newMockNodeConnection(1)\n\n\t// SelfUpdate targeted at node 99 should be skipped for node 1\n\tch := change.SelfUpdate(99)\n\tresp, err := generateMapResponse(mc, &mapper{}, ch)\n\n\trequire.NoError(t, err)\n\tassert.Nil(t, resp,\n\t\t\"self-only change targeted at different node should return nil\")\n}\n\nfunc TestGenerateMapResponse_SelfOnlySameNode(t *testing.T) {\n\t// SelfUpdate targeted at node 1: IsSelfOnly()=true and TargetNode==nodeID\n\t// This should NOT be short-circuited - it should attempt to generate.\n\t// We verify the routing logic by checking that the change is not empty\n\t// and not filtered out (unlike SelfOnlyOtherNode above).\n\tch := change.SelfUpdate(1)\n\tassert.False(t, ch.IsEmpty(), \"SelfUpdate should not be empty\")\n\tassert.True(t, ch.IsSelfOnly(), \"SelfUpdate should be self-only\")\n\tassert.True(t, ch.ShouldSendToNode(1), \"should be sent to target node\")\n\tassert.False(t, ch.ShouldSendToNode(2), \"should NOT be sent to other nodes\")\n}\n\n// ============================================================================\n// handleNodeChange Tests\n// ============================================================================\n\nfunc TestHandleNodeChange_NilConnection(t *testing.T) {\n\terr := handleNodeChange(nil, nil, change.DERPMap())\n\n\tassert.ErrorIs(t, err, ErrNodeConnectionNil)\n}\n\nfunc TestHandleNodeChange_EmptyChange(t *testing.T) {\n\tmc := newMockNodeConnection(1)\n\n\terr := handleNodeChange(mc, nil, change.Change{})\n\n\trequire.NoError(t, err, \"empty change should not send anything\")\n\tassert.Empty(t, mc.getSent(), \"no data should be sent for empty change\")\n}\n\nvar errConnectionBroken = errors.New(\"connection broken\")\n\nfunc TestHandleNodeChange_SendError(t *testing.T) {\n\tmc := newMockNodeConnection(1).withSendError(errConnectionBroken)\n\n\t// Need a real mapper for this test - we can't easily mock it.\n\t// Instead, test that when generateMapResponse returns nil data,\n\t// no send occurs. The send error path requires a valid MapResponse\n\t// which requires a mapper with state.\n\t// So we test the nil-data path here.\n\terr := handleNodeChange(mc, nil, change.Change{})\n\tassert.NoError(t, err, \"empty change produces nil data, no send needed\")\n}\n\nfunc TestHandleNodeChange_NilDataNoSend(t *testing.T) {\n\tmc := newMockNodeConnection(1)\n\n\t// SelfUpdate targeted at different node produces nil data\n\tch := change.SelfUpdate(99)\n\terr := handleNodeChange(mc, &mapper{}, ch)\n\n\trequire.NoError(t, err, \"nil data should not cause error\")\n\tassert.Empty(t, mc.getSent(), \"nil data should not trigger send\")\n}\n\n// ============================================================================\n// connectionEntry concurrent safety Tests\n// ============================================================================\n\nfunc TestConnectionEntry_ConcurrentSends(t *testing.T) {\n\tch := make(chan *tailcfg.MapResponse, 100)\n\tentry := makeConnectionEntry(\"concurrent\", ch)\n\n\tvar (\n\t\twg           sync.WaitGroup\n\t\tsuccessCount atomic.Int64\n\t)\n\n\t// 50 goroutines sending concurrently\n\n\tfor range 50 {\n\t\twg.Go(func() {\n\t\t\terr := entry.send(testMapResponse())\n\t\t\tif err == nil {\n\t\t\t\tsuccessCount.Add(1)\n\t\t\t}\n\t\t})\n\t}\n\n\twg.Wait()\n\n\tassert.Equal(t, int64(50), successCount.Load(),\n\t\t\"all sends to buffered channel should succeed\")\n\n\t// Drain and count\n\tcount := 0\n\n\tfor range len(ch) {\n\t\t<-ch\n\n\t\tcount++\n\t}\n\n\tassert.Equal(t, 50, count, \"all 50 messages should be on channel\")\n}\n\nfunc TestConnectionEntry_ConcurrentSendAndClose(t *testing.T) {\n\tch := make(chan *tailcfg.MapResponse, 100)\n\tentry := makeConnectionEntry(\"race\", ch)\n\n\tvar (\n\t\twg       sync.WaitGroup\n\t\tpanicked atomic.Bool\n\t)\n\n\t// Goroutines sending rapidly\n\n\tfor range 20 {\n\t\twg.Go(func() {\n\t\t\tdefer func() {\n\t\t\t\tif r := recover(); r != nil {\n\t\t\t\t\tpanicked.Store(true)\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tfor range 10 {\n\t\t\t\t_ = entry.send(testMapResponse())\n\t\t\t}\n\t\t})\n\t}\n\n\t// Close midway through\n\n\twg.Go(func() {\n\t\ttime.Sleep(1 * time.Millisecond) //nolint:forbidigo // concurrency test coordination\n\t\tentry.closed.Store(true)\n\t})\n\n\twg.Wait()\n\n\tassert.False(t, panicked.Load(),\n\t\t\"concurrent send and close should not panic\")\n}\n\n// ============================================================================\n// multiChannelNodeConn concurrent Tests\n// ============================================================================\n\nfunc TestMultiChannelSend_ConcurrentAddAndSend(t *testing.T) {\n\tmc := newMultiChannelNodeConn(1, nil)\n\n\t// Start with one connection\n\tch1 := make(chan *tailcfg.MapResponse, 100)\n\tmc.addConnection(makeConnectionEntry(\"initial\", ch1))\n\n\tvar (\n\t\twg       sync.WaitGroup\n\t\tpanicked atomic.Bool\n\t)\n\n\t// Goroutine adding connections\n\n\twg.Go(func() {\n\t\tdefer func() {\n\t\t\tif r := recover(); r != nil {\n\t\t\t\tpanicked.Store(true)\n\t\t\t}\n\t\t}()\n\n\t\tfor i := range 10 {\n\t\t\tch := make(chan *tailcfg.MapResponse, 100)\n\t\t\tmc.addConnection(makeConnectionEntry(fmt.Sprintf(\"added-%d\", i), ch))\n\t\t}\n\t})\n\n\t// Goroutine sending data\n\n\twg.Go(func() {\n\t\tdefer func() {\n\t\t\tif r := recover(); r != nil {\n\t\t\t\tpanicked.Store(true)\n\t\t\t}\n\t\t}()\n\n\t\tfor range 20 {\n\t\t\t_ = mc.send(testMapResponse())\n\t\t}\n\t})\n\n\twg.Wait()\n\n\tassert.False(t, panicked.Load(),\n\t\t\"concurrent add and send should not panic (mutex protects both)\")\n}\n\nfunc TestMultiChannelSend_ConcurrentRemoveAndSend(t *testing.T) {\n\tmc := newMultiChannelNodeConn(1, nil)\n\n\tchannels := make([]chan *tailcfg.MapResponse, 10)\n\tfor i := range channels {\n\t\tchannels[i] = make(chan *tailcfg.MapResponse, 100)\n\t\tmc.addConnection(makeConnectionEntry(fmt.Sprintf(\"conn-%d\", i), channels[i]))\n\t}\n\n\tvar (\n\t\twg       sync.WaitGroup\n\t\tpanicked atomic.Bool\n\t)\n\n\t// Goroutine removing connections\n\n\twg.Go(func() {\n\t\tdefer func() {\n\t\t\tif r := recover(); r != nil {\n\t\t\t\tpanicked.Store(true)\n\t\t\t}\n\t\t}()\n\n\t\tfor _, ch := range channels {\n\t\t\tmc.removeConnectionByChannel(ch)\n\t\t}\n\t})\n\n\t// Goroutine sending data concurrently\n\n\twg.Go(func() {\n\t\tdefer func() {\n\t\t\tif r := recover(); r != nil {\n\t\t\t\tpanicked.Store(true)\n\t\t\t}\n\t\t}()\n\n\t\tfor range 20 {\n\t\t\t_ = mc.send(testMapResponse())\n\t\t}\n\t})\n\n\twg.Wait()\n\n\tassert.False(t, panicked.Load(),\n\t\t\"concurrent remove and send should not panic\")\n}\n\n// ============================================================================\n// Regression tests for H1 (timer leak) and H3 (lifecycle)\n// ============================================================================\n\n// TestConnectionEntry_SendFastPath_TimerStopped is a regression guard for H1.\n// Before the fix, connectionEntry.send used time.After(50ms) which leaked a\n// timer into the runtime heap on every call even when the channel send\n// succeeded immediately. The fix switched to time.NewTimer + defer Stop().\n//\n// This test sends many messages on a buffered (non-blocking) channel and\n// checks that the number of live goroutines stays bounded, which would\n// grow without bound under the old time.After approach at high call rates.\nfunc TestConnectionEntry_SendFastPath_TimerStopped(t *testing.T) {\n\tconst sends = 5000\n\n\tch := make(chan *tailcfg.MapResponse, sends)\n\n\tentry := &connectionEntry{\n\t\tid:      \"timer-leak-test\",\n\t\tc:       ch,\n\t\tversion: 100,\n\t\tcreated: time.Now(),\n\t}\n\n\tresp := testMapResponse()\n\n\tfor range sends {\n\t\terr := entry.send(resp)\n\t\trequire.NoError(t, err)\n\t}\n\n\t// Drain the channel so we aren't holding references.\n\tfor range sends {\n\t\t<-ch\n\t}\n\n\t// Force a GC + timer cleanup pass.\n\truntime.GC()\n\n\t// If timers were leaking we'd see a goroutine count much higher\n\t// than baseline. With 5000 leaked timers the count would be\n\t// noticeably elevated. We just check it's reasonable.\n\tnumGR := runtime.NumGoroutine()\n\tassert.Less(t, numGR, 200,\n\t\t\"goroutine count after %d fast-path sends should be bounded; got %d (possible timer leak)\", sends, numGR)\n}\n\n// TestBatcher_CloseWaitsForWorkers is a regression guard for H3.\n// Before the fix, Close() would tear down node connections while workers\n// were potentially still running, risking sends on closed channels.\n// The fix added sync.WaitGroup tracking so Close() blocks until all\n// worker goroutines exit.\nfunc TestBatcher_CloseWaitsForWorkers(t *testing.T) {\n\tb := NewBatcher(50*time.Millisecond, 4, nil)\n\n\tgoroutinesBefore := runtime.NumGoroutine()\n\n\tb.Start()\n\n\t// Give workers time to start.\n\ttime.Sleep(20 * time.Millisecond) //nolint:forbidigo // test timing\n\n\tgoroutinesDuring := runtime.NumGoroutine()\n\n\t// We expect at least 5 new goroutines: 1 doWork + 4 workers.\n\tassert.GreaterOrEqual(t, goroutinesDuring-goroutinesBefore, 5,\n\t\t\"expected doWork + 4 workers to be running\")\n\n\t// Close should block until all workers have exited.\n\tb.Close()\n\n\t// After Close returns, goroutines should have dropped back.\n\t// Allow a small margin for runtime goroutines.\n\tgoroutinesAfter := runtime.NumGoroutine()\n\tassert.InDelta(t, goroutinesBefore, goroutinesAfter, 3,\n\t\t\"goroutines should return to baseline after Close(); before=%d after=%d\",\n\t\tgoroutinesBefore, goroutinesAfter)\n}\n\n// TestBatcher_CloseThenStartIsNoop verifies the lifecycle contract:\n// once a Batcher has been started, calling Start() again is a no-op\n// (the started flag prevents double-start).\nfunc TestBatcher_CloseThenStartIsNoop(t *testing.T) {\n\tb := NewBatcher(50*time.Millisecond, 2, nil)\n\n\tb.Start()\n\tb.Close()\n\n\tgoroutinesBefore := runtime.NumGoroutine()\n\n\t// Second Start should be a no-op because started is already true.\n\tb.Start()\n\n\t// Allow a moment for any hypothetical goroutine to appear.\n\ttime.Sleep(10 * time.Millisecond) //nolint:forbidigo // test timing\n\n\tgoroutinesAfter := runtime.NumGoroutine()\n\n\tassert.InDelta(t, goroutinesBefore, goroutinesAfter, 1,\n\t\t\"Start() after Close() should not spawn new goroutines; before=%d after=%d\",\n\t\tgoroutinesBefore, goroutinesAfter)\n}\n\n// TestBatcher_CloseStopsTicker verifies that Close() stops the internal\n// ticker, preventing resource leaks.\nfunc TestBatcher_CloseStopsTicker(t *testing.T) {\n\tb := NewBatcher(10*time.Millisecond, 1, nil)\n\n\tb.Start()\n\tb.Close()\n\n\t// After Close, the ticker should be stopped. Reading from a stopped\n\t// ticker's channel should not deliver any values.\n\tselect {\n\tcase <-b.tick.C:\n\t\tt.Fatal(\"ticker fired after Close(); ticker.Stop() was not called\")\n\tcase <-time.After(50 * time.Millisecond): //nolint:forbidigo // test timing\n\t\t// Expected: no tick received.\n\t}\n}\n\n// ============================================================================\n// Regression tests for M1, M3, M7\n// ============================================================================\n\n// TestBatcher_CloseBeforeStart_DoesNotHang is a regression guard for M1.\n// Before the fix, done was nil until Start() was called. queueWork and\n// MapResponseFromChange select on done, so a nil channel would block\n// forever when workCh was full. With done initialized in NewBatcher,\n// Close() can be called safely before Start().\nfunc TestBatcher_CloseBeforeStart_DoesNotHang(t *testing.T) {\n\tb := NewBatcher(50*time.Millisecond, 2, nil)\n\n\t// Close without Start must not panic or hang.\n\tdone := make(chan struct{})\n\n\tgo func() {\n\t\tb.Close()\n\t\tclose(done)\n\t}()\n\n\tselect {\n\tcase <-done:\n\t\t// Success: Close returned promptly.\n\tcase <-time.After(2 * time.Second): //nolint:forbidigo // test timing\n\t\tt.Fatal(\"Close() before Start() hung; done channel was likely nil\")\n\t}\n}\n\n// TestBatcher_QueueWorkAfterClose_DoesNotHang verifies that queueWork\n// returns immediately via the done channel when the batcher is closed,\n// even without Start() having been called.\nfunc TestBatcher_QueueWorkAfterClose_DoesNotHang(t *testing.T) {\n\tb := NewBatcher(50*time.Millisecond, 1, nil)\n\tb.Close()\n\n\tdone := make(chan struct{})\n\n\tgo func() {\n\t\t// queueWork selects on done; with done closed this must return.\n\t\tb.queueWork(work{})\n\t\tclose(done)\n\t}()\n\n\tselect {\n\tcase <-done:\n\t\t// Success\n\tcase <-time.After(2 * time.Second): //nolint:forbidigo // test timing\n\t\tt.Fatal(\"queueWork hung after Close(); done channel select not working\")\n\t}\n}\n\n// TestIsConnected_FalseAfterAddNodeFailure is a regression guard for M3.\n// Before the fix, AddNode error paths removed the connection but did not\n// mark the node as disconnected. IsConnected would return true for a\n// node with zero active connections.\nfunc TestIsConnected_FalseAfterAddNodeFailure(t *testing.T) {\n\tb := NewBatcher(50*time.Millisecond, 2, nil)\n\tb.Start()\n\n\tdefer b.Close()\n\n\tid := types.NodeID(42)\n\n\t// Pre-create the node entry so AddNode reuses it, and set up a\n\t// multiChannelNodeConn with no mapper so MapResponseFromChange will fail.\n\t// markConnected() simulates a previous session leaving it connected.\n\tnc := newMultiChannelNodeConn(id, nil)\n\tnc.markConnected()\n\tb.nodes.Store(id, nc)\n\n\tch := make(chan *tailcfg.MapResponse, 1)\n\n\terr := b.AddNode(id, ch, 100, func() {})\n\trequire.Error(t, err, \"AddNode should fail with nil mapper\")\n\n\t// After failure, the node should NOT be reported as connected.\n\tassert.False(t, b.IsConnected(id),\n\t\t\"IsConnected should return false after AddNode failure with no remaining connections\")\n}\n\n// TestRemoveConnectionAtIndex_NilsTrailingSlot is a regression guard for M7.\n// Before the fix, removeConnectionAtIndexLocked used append(s[:i], s[i+1:]...)\n// which left a stale pointer in the backing array's last slot. The fix\n// uses copy + explicit nil of the trailing element.\nfunc TestRemoveConnectionAtIndex_NilsTrailingSlot(t *testing.T) {\n\tmc := newMultiChannelNodeConn(1, nil)\n\n\t// Manually add three entries under the lock.\n\tentries := make([]*connectionEntry, 3)\n\tfor i := range entries {\n\t\tentries[i] = &connectionEntry{id: fmt.Sprintf(\"conn-%d\", i), c: make(chan<- *tailcfg.MapResponse)}\n\t}\n\n\tmc.mutex.Lock()\n\tmc.connections = append(mc.connections, entries...)\n\n\t// Remove the middle entry (index 1).\n\tremoved := mc.removeConnectionAtIndexLocked(1, false)\n\trequire.Equal(t, entries[1], removed)\n\n\t// After removal, len should be 2 and the backing array slot at\n\t// index 2 (the old len-1) should be nil.\n\trequire.Len(t, mc.connections, 2)\n\tassert.Equal(t, entries[0], mc.connections[0])\n\tassert.Equal(t, entries[2], mc.connections[1])\n\n\t// Check the backing array directly: the slot just past the new\n\t// length must be nil to avoid retaining the pointer.\n\tbacking := mc.connections[:3]\n\tassert.Nil(t, backing[2],\n\t\t\"trailing slot in backing array should be nil after removal\")\n\n\tmc.mutex.Unlock()\n}\n"
  },
  {
    "path": "hscontrol/mapper/builder.go",
    "content": "package mapper\n\nimport (\n\t\"net/netip\"\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/policy\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/views\"\n\t\"tailscale.com/util/multierr\"\n)\n\n// MapResponseBuilder provides a fluent interface for building tailcfg.MapResponse.\ntype MapResponseBuilder struct {\n\tresp   *tailcfg.MapResponse\n\tmapper *mapper\n\tnodeID types.NodeID\n\tcapVer tailcfg.CapabilityVersion\n\terrs   []error\n\n\tdebugType debugType\n}\n\ntype debugType string\n\nconst (\n\tfullResponseDebug   debugType = \"full\"\n\tselfResponseDebug   debugType = \"self\"\n\tchangeResponseDebug debugType = \"change\"\n\tpolicyResponseDebug debugType = \"policy\"\n)\n\n// NewMapResponseBuilder creates a new builder with basic fields set.\nfunc (m *mapper) NewMapResponseBuilder(nodeID types.NodeID) *MapResponseBuilder {\n\tnow := time.Now()\n\n\treturn &MapResponseBuilder{\n\t\tresp: &tailcfg.MapResponse{\n\t\t\tKeepAlive:   false,\n\t\t\tControlTime: &now,\n\t\t},\n\t\tmapper: m,\n\t\tnodeID: nodeID,\n\t\terrs:   nil,\n\t}\n}\n\n// addError adds an error to the builder's error list.\nfunc (b *MapResponseBuilder) addError(err error) {\n\tif err != nil {\n\t\tb.errs = append(b.errs, err)\n\t}\n}\n\n// hasErrors returns true if the builder has accumulated any errors.\nfunc (b *MapResponseBuilder) hasErrors() bool {\n\treturn len(b.errs) > 0\n}\n\n// WithCapabilityVersion sets the capability version for the response.\nfunc (b *MapResponseBuilder) WithCapabilityVersion(capVer tailcfg.CapabilityVersion) *MapResponseBuilder {\n\tb.capVer = capVer\n\treturn b\n}\n\n// WithSelfNode adds the requesting node to the response.\nfunc (b *MapResponseBuilder) WithSelfNode() *MapResponseBuilder {\n\tnv, ok := b.mapper.state.GetNodeByID(b.nodeID)\n\tif !ok {\n\t\tb.addError(ErrNodeNotFoundMapper)\n\t\treturn b\n\t}\n\n\t_, matchers := b.mapper.state.Filter()\n\n\ttailnode, err := nv.TailNode(\n\t\tb.capVer,\n\t\tfunc(id types.NodeID) []netip.Prefix {\n\t\t\treturn policy.ReduceRoutes(nv, b.mapper.state.GetNodePrimaryRoutes(id), matchers)\n\t\t},\n\t\tb.mapper.cfg)\n\tif err != nil {\n\t\tb.addError(err)\n\t\treturn b\n\t}\n\n\tb.resp.Node = tailnode\n\n\treturn b\n}\n\nfunc (b *MapResponseBuilder) WithDebugType(t debugType) *MapResponseBuilder {\n\tif debugDumpMapResponsePath != \"\" {\n\t\tb.debugType = t\n\t}\n\n\treturn b\n}\n\n// WithDERPMap adds the DERP map to the response.\nfunc (b *MapResponseBuilder) WithDERPMap() *MapResponseBuilder {\n\tb.resp.DERPMap = b.mapper.state.DERPMap().AsStruct()\n\treturn b\n}\n\n// WithDomain adds the domain configuration.\nfunc (b *MapResponseBuilder) WithDomain() *MapResponseBuilder {\n\tb.resp.Domain = b.mapper.cfg.Domain()\n\treturn b\n}\n\n// WithCollectServicesDisabled sets the collect services flag to false.\nfunc (b *MapResponseBuilder) WithCollectServicesDisabled() *MapResponseBuilder {\n\tb.resp.CollectServices.Set(false)\n\treturn b\n}\n\n// WithDebugConfig adds debug configuration\n// It disables log tailing if the mapper's LogTail is not enabled.\nfunc (b *MapResponseBuilder) WithDebugConfig() *MapResponseBuilder {\n\tb.resp.Debug = &tailcfg.Debug{\n\t\tDisableLogTail: !b.mapper.cfg.LogTail.Enabled,\n\t}\n\n\treturn b\n}\n\n// WithSSHPolicy adds SSH policy configuration for the requesting node.\nfunc (b *MapResponseBuilder) WithSSHPolicy() *MapResponseBuilder {\n\tnode, ok := b.mapper.state.GetNodeByID(b.nodeID)\n\tif !ok {\n\t\tb.addError(ErrNodeNotFoundMapper)\n\t\treturn b\n\t}\n\n\tsshPolicy, err := b.mapper.state.SSHPolicy(node)\n\tif err != nil {\n\t\tb.addError(err)\n\t\treturn b\n\t}\n\n\tb.resp.SSHPolicy = sshPolicy\n\n\treturn b\n}\n\n// WithDNSConfig adds DNS configuration for the requesting node.\nfunc (b *MapResponseBuilder) WithDNSConfig() *MapResponseBuilder {\n\tnode, ok := b.mapper.state.GetNodeByID(b.nodeID)\n\tif !ok {\n\t\tb.addError(ErrNodeNotFoundMapper)\n\t\treturn b\n\t}\n\n\tb.resp.DNSConfig = generateDNSConfig(b.mapper.cfg, node)\n\n\treturn b\n}\n\n// WithUserProfiles adds user profiles for the requesting node and given peers.\nfunc (b *MapResponseBuilder) WithUserProfiles(peers views.Slice[types.NodeView]) *MapResponseBuilder {\n\tnode, ok := b.mapper.state.GetNodeByID(b.nodeID)\n\tif !ok {\n\t\tb.addError(ErrNodeNotFoundMapper)\n\t\treturn b\n\t}\n\n\tb.resp.UserProfiles = generateUserProfiles(node, peers)\n\n\treturn b\n}\n\n// WithPacketFilters adds packet filter rules based on policy.\nfunc (b *MapResponseBuilder) WithPacketFilters() *MapResponseBuilder {\n\tnode, ok := b.mapper.state.GetNodeByID(b.nodeID)\n\tif !ok {\n\t\tb.addError(ErrNodeNotFoundMapper)\n\t\treturn b\n\t}\n\n\t// FilterForNode returns rules already reduced to only those relevant for this node.\n\t// For autogroup:self policies, it returns per-node compiled rules.\n\t// For global policies, it returns the global filter reduced for this node.\n\tfilter, err := b.mapper.state.FilterForNode(node)\n\tif err != nil {\n\t\tb.addError(err)\n\t\treturn b\n\t}\n\n\t// CapVer 81: 2023-11-17: MapResponse.PacketFilters (incremental packet filter updates)\n\t// Currently, we do not send incremental package filters, however using the\n\t// new PacketFilters field and \"base\" allows us to send a full update when we\n\t// have to send an empty list, avoiding the hack in the else block.\n\tb.resp.PacketFilters = map[string][]tailcfg.FilterRule{\n\t\t\"base\": filter,\n\t}\n\n\treturn b\n}\n\n// WithPeers adds full peer list with policy filtering (for full map response).\nfunc (b *MapResponseBuilder) WithPeers(peers views.Slice[types.NodeView]) *MapResponseBuilder {\n\ttailPeers, err := b.buildTailPeers(peers)\n\tif err != nil {\n\t\tb.addError(err)\n\t\treturn b\n\t}\n\n\tb.resp.Peers = tailPeers\n\n\treturn b\n}\n\n// WithPeerChanges adds changed peers with policy filtering (for incremental updates).\nfunc (b *MapResponseBuilder) WithPeerChanges(peers views.Slice[types.NodeView]) *MapResponseBuilder {\n\ttailPeers, err := b.buildTailPeers(peers)\n\tif err != nil {\n\t\tb.addError(err)\n\t\treturn b\n\t}\n\n\tb.resp.PeersChanged = tailPeers\n\n\treturn b\n}\n\n// buildTailPeers converts views.Slice[types.NodeView] to []tailcfg.Node with policy filtering and sorting.\nfunc (b *MapResponseBuilder) buildTailPeers(peers views.Slice[types.NodeView]) ([]*tailcfg.Node, error) {\n\tnode, ok := b.mapper.state.GetNodeByID(b.nodeID)\n\tif !ok {\n\t\treturn nil, ErrNodeNotFoundMapper\n\t}\n\n\t// Get unreduced matchers for peer relationship determination.\n\t// MatchersForNode returns unreduced matchers that include all rules where the node\n\t// could be either source or destination. This is different from FilterForNode which\n\t// returns reduced rules for packet filtering (only rules where node is destination).\n\tmatchers, err := b.mapper.state.MatchersForNode(node)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// If there are filter rules present, see if there are any nodes that cannot\n\t// access each-other at all and remove them from the peers.\n\tvar changedViews views.Slice[types.NodeView]\n\tif len(matchers) > 0 {\n\t\tchangedViews = policy.ReduceNodes(node, peers, matchers)\n\t} else {\n\t\tchangedViews = peers\n\t}\n\n\ttailPeers, err := types.TailNodes(\n\t\tchangedViews, b.capVer,\n\t\tfunc(id types.NodeID) []netip.Prefix {\n\t\t\treturn policy.ReduceRoutes(node, b.mapper.state.GetNodePrimaryRoutes(id), matchers)\n\t\t},\n\t\tb.mapper.cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Peers is always returned sorted by Node.ID.\n\tsort.SliceStable(tailPeers, func(x, y int) bool {\n\t\treturn tailPeers[x].ID < tailPeers[y].ID\n\t})\n\n\treturn tailPeers, nil\n}\n\n// WithPeerChangedPatch adds peer change patches.\nfunc (b *MapResponseBuilder) WithPeerChangedPatch(changes []*tailcfg.PeerChange) *MapResponseBuilder {\n\tb.resp.PeersChangedPatch = changes\n\treturn b\n}\n\n// WithPeersRemoved adds removed peer IDs.\nfunc (b *MapResponseBuilder) WithPeersRemoved(removedIDs ...types.NodeID) *MapResponseBuilder {\n\ttailscaleIDs := make([]tailcfg.NodeID, 0, len(removedIDs))\n\tfor _, id := range removedIDs {\n\t\ttailscaleIDs = append(tailscaleIDs, id.NodeID())\n\t}\n\n\tb.resp.PeersRemoved = tailscaleIDs\n\n\treturn b\n}\n\n// Build finalizes the response and returns marshaled bytes.\nfunc (b *MapResponseBuilder) Build() (*tailcfg.MapResponse, error) {\n\tif len(b.errs) > 0 {\n\t\treturn nil, multierr.New(b.errs...)\n\t}\n\n\tif debugDumpMapResponsePath != \"\" {\n\t\twriteDebugMapResponse(b.resp, b.debugType, b.nodeID)\n\t}\n\n\treturn b.resp, nil\n}\n"
  },
  {
    "path": "hscontrol/mapper/builder_test.go",
    "content": "package mapper\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/state\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/tailcfg\"\n)\n\nfunc TestMapResponseBuilder_Basic(t *testing.T) {\n\tcfg := &types.Config{\n\t\tBaseDomain: \"example.com\",\n\t\tLogTail: types.LogTailConfig{\n\t\t\tEnabled: true,\n\t\t},\n\t}\n\n\tmockState := &state.State{}\n\tm := &mapper{\n\t\tcfg:   cfg,\n\t\tstate: mockState,\n\t}\n\n\tnodeID := types.NodeID(1)\n\n\tbuilder := m.NewMapResponseBuilder(nodeID)\n\n\t// Test basic builder creation\n\tassert.NotNil(t, builder)\n\tassert.Equal(t, nodeID, builder.nodeID)\n\tassert.NotNil(t, builder.resp)\n\tassert.False(t, builder.resp.KeepAlive)\n\tassert.NotNil(t, builder.resp.ControlTime)\n\tassert.WithinDuration(t, time.Now(), *builder.resp.ControlTime, time.Second)\n}\n\nfunc TestMapResponseBuilder_WithCapabilityVersion(t *testing.T) {\n\tcfg := &types.Config{}\n\tmockState := &state.State{}\n\tm := &mapper{\n\t\tcfg:   cfg,\n\t\tstate: mockState,\n\t}\n\n\tnodeID := types.NodeID(1)\n\tcapVer := tailcfg.CapabilityVersion(42)\n\n\tbuilder := m.NewMapResponseBuilder(nodeID).\n\t\tWithCapabilityVersion(capVer)\n\n\tassert.Equal(t, capVer, builder.capVer)\n\tassert.False(t, builder.hasErrors())\n}\n\nfunc TestMapResponseBuilder_WithDomain(t *testing.T) {\n\tdomain := \"test.example.com\"\n\tcfg := &types.Config{\n\t\tServerURL:  \"https://test.example.com\",\n\t\tBaseDomain: domain,\n\t}\n\n\tmockState := &state.State{}\n\tm := &mapper{\n\t\tcfg:   cfg,\n\t\tstate: mockState,\n\t}\n\n\tnodeID := types.NodeID(1)\n\n\tbuilder := m.NewMapResponseBuilder(nodeID).\n\t\tWithDomain()\n\n\tassert.Equal(t, domain, builder.resp.Domain)\n\tassert.False(t, builder.hasErrors())\n}\n\nfunc TestMapResponseBuilder_WithCollectServicesDisabled(t *testing.T) {\n\tcfg := &types.Config{}\n\tmockState := &state.State{}\n\tm := &mapper{\n\t\tcfg:   cfg,\n\t\tstate: mockState,\n\t}\n\n\tnodeID := types.NodeID(1)\n\n\tbuilder := m.NewMapResponseBuilder(nodeID).\n\t\tWithCollectServicesDisabled()\n\n\tvalue, isSet := builder.resp.CollectServices.Get()\n\tassert.True(t, isSet)\n\tassert.False(t, value)\n\tassert.False(t, builder.hasErrors())\n}\n\nfunc TestMapResponseBuilder_WithDebugConfig(t *testing.T) {\n\ttests := []struct {\n\t\tname           string\n\t\tlogTailEnabled bool\n\t\texpected       bool\n\t}{\n\t\t{\n\t\t\tname:           \"LogTail enabled\",\n\t\t\tlogTailEnabled: true,\n\t\t\texpected:       false, // DisableLogTail should be false when LogTail is enabled\n\t\t},\n\t\t{\n\t\t\tname:           \"LogTail disabled\",\n\t\t\tlogTailEnabled: false,\n\t\t\texpected:       true, // DisableLogTail should be true when LogTail is disabled\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tcfg := &types.Config{\n\t\t\t\tLogTail: types.LogTailConfig{\n\t\t\t\t\tEnabled: tt.logTailEnabled,\n\t\t\t\t},\n\t\t\t}\n\t\t\tmockState := &state.State{}\n\t\t\tm := &mapper{\n\t\t\t\tcfg:   cfg,\n\t\t\t\tstate: mockState,\n\t\t\t}\n\n\t\t\tnodeID := types.NodeID(1)\n\n\t\t\tbuilder := m.NewMapResponseBuilder(nodeID).\n\t\t\t\tWithDebugConfig()\n\n\t\t\trequire.NotNil(t, builder.resp.Debug)\n\t\t\tassert.Equal(t, tt.expected, builder.resp.Debug.DisableLogTail)\n\t\t\tassert.False(t, builder.hasErrors())\n\t\t})\n\t}\n}\n\nfunc TestMapResponseBuilder_WithPeerChangedPatch(t *testing.T) {\n\tcfg := &types.Config{}\n\tmockState := &state.State{}\n\tm := &mapper{\n\t\tcfg:   cfg,\n\t\tstate: mockState,\n\t}\n\n\tnodeID := types.NodeID(1)\n\tchanges := []*tailcfg.PeerChange{\n\t\t{\n\t\t\tNodeID:     123,\n\t\t\tDERPRegion: 1,\n\t\t},\n\t\t{\n\t\t\tNodeID:     456,\n\t\t\tDERPRegion: 2,\n\t\t},\n\t}\n\n\tbuilder := m.NewMapResponseBuilder(nodeID).\n\t\tWithPeerChangedPatch(changes)\n\n\tassert.Equal(t, changes, builder.resp.PeersChangedPatch)\n\tassert.False(t, builder.hasErrors())\n}\n\nfunc TestMapResponseBuilder_WithPeersRemoved(t *testing.T) {\n\tcfg := &types.Config{}\n\tmockState := &state.State{}\n\tm := &mapper{\n\t\tcfg:   cfg,\n\t\tstate: mockState,\n\t}\n\n\tnodeID := types.NodeID(1)\n\tremovedID1 := types.NodeID(123)\n\tremovedID2 := types.NodeID(456)\n\n\tbuilder := m.NewMapResponseBuilder(nodeID).\n\t\tWithPeersRemoved(removedID1, removedID2)\n\n\texpected := []tailcfg.NodeID{\n\t\tremovedID1.NodeID(),\n\t\tremovedID2.NodeID(),\n\t}\n\tassert.Equal(t, expected, builder.resp.PeersRemoved)\n\tassert.False(t, builder.hasErrors())\n}\n\nfunc TestMapResponseBuilder_ErrorHandling(t *testing.T) {\n\tcfg := &types.Config{}\n\tmockState := &state.State{}\n\tm := &mapper{\n\t\tcfg:   cfg,\n\t\tstate: mockState,\n\t}\n\n\tnodeID := types.NodeID(1)\n\n\t// Simulate an error in the builder\n\tbuilder := m.NewMapResponseBuilder(nodeID)\n\tbuilder.addError(assert.AnError)\n\n\t// All subsequent calls should continue to work and accumulate errors\n\tresult := builder.\n\t\tWithDomain().\n\t\tWithCollectServicesDisabled().\n\t\tWithDebugConfig()\n\n\tassert.True(t, result.hasErrors())\n\tassert.Len(t, result.errs, 1)\n\tassert.Equal(t, assert.AnError, result.errs[0])\n\n\t// Build should return the error\n\tdata, err := result.Build()\n\tassert.Nil(t, data)\n\tassert.Error(t, err)\n}\n\nfunc TestMapResponseBuilder_ChainedCalls(t *testing.T) {\n\tdomain := \"chained.example.com\"\n\tcfg := &types.Config{\n\t\tServerURL:  \"https://chained.example.com\",\n\t\tBaseDomain: domain,\n\t\tLogTail: types.LogTailConfig{\n\t\t\tEnabled: false,\n\t\t},\n\t}\n\n\tmockState := &state.State{}\n\tm := &mapper{\n\t\tcfg:   cfg,\n\t\tstate: mockState,\n\t}\n\n\tnodeID := types.NodeID(1)\n\tcapVer := tailcfg.CapabilityVersion(99)\n\n\tbuilder := m.NewMapResponseBuilder(nodeID).\n\t\tWithCapabilityVersion(capVer).\n\t\tWithDomain().\n\t\tWithCollectServicesDisabled().\n\t\tWithDebugConfig()\n\n\t// Verify all fields are set correctly\n\tassert.Equal(t, capVer, builder.capVer)\n\tassert.Equal(t, domain, builder.resp.Domain)\n\tvalue, isSet := builder.resp.CollectServices.Get()\n\tassert.True(t, isSet)\n\tassert.False(t, value)\n\tassert.NotNil(t, builder.resp.Debug)\n\tassert.True(t, builder.resp.Debug.DisableLogTail)\n\tassert.False(t, builder.hasErrors())\n}\n\nfunc TestMapResponseBuilder_MultipleWithPeersRemoved(t *testing.T) {\n\tcfg := &types.Config{}\n\tmockState := &state.State{}\n\tm := &mapper{\n\t\tcfg:   cfg,\n\t\tstate: mockState,\n\t}\n\n\tnodeID := types.NodeID(1)\n\tremovedID1 := types.NodeID(100)\n\tremovedID2 := types.NodeID(200)\n\n\t// Test calling WithPeersRemoved multiple times\n\tbuilder := m.NewMapResponseBuilder(nodeID).\n\t\tWithPeersRemoved(removedID1).\n\t\tWithPeersRemoved(removedID2)\n\n\t// Second call should overwrite the first\n\texpected := []tailcfg.NodeID{removedID2.NodeID()}\n\tassert.Equal(t, expected, builder.resp.PeersRemoved)\n\tassert.False(t, builder.hasErrors())\n}\n\nfunc TestMapResponseBuilder_EmptyPeerChangedPatch(t *testing.T) {\n\tcfg := &types.Config{}\n\tmockState := &state.State{}\n\tm := &mapper{\n\t\tcfg:   cfg,\n\t\tstate: mockState,\n\t}\n\n\tnodeID := types.NodeID(1)\n\n\tbuilder := m.NewMapResponseBuilder(nodeID).\n\t\tWithPeerChangedPatch([]*tailcfg.PeerChange{})\n\n\tassert.Empty(t, builder.resp.PeersChangedPatch)\n\tassert.False(t, builder.hasErrors())\n}\n\nfunc TestMapResponseBuilder_NilPeerChangedPatch(t *testing.T) {\n\tcfg := &types.Config{}\n\tmockState := &state.State{}\n\tm := &mapper{\n\t\tcfg:   cfg,\n\t\tstate: mockState,\n\t}\n\n\tnodeID := types.NodeID(1)\n\n\tbuilder := m.NewMapResponseBuilder(nodeID).\n\t\tWithPeerChangedPatch(nil)\n\n\tassert.Nil(t, builder.resp.PeersChangedPatch)\n\tassert.False(t, builder.hasErrors())\n}\n\nfunc TestMapResponseBuilder_MultipleErrors(t *testing.T) {\n\tcfg := &types.Config{}\n\tmockState := &state.State{}\n\tm := &mapper{\n\t\tcfg:   cfg,\n\t\tstate: mockState,\n\t}\n\n\tnodeID := types.NodeID(1)\n\n\t// Create a builder and add multiple errors\n\tbuilder := m.NewMapResponseBuilder(nodeID)\n\tbuilder.addError(assert.AnError)\n\tbuilder.addError(assert.AnError)\n\tbuilder.addError(nil) // This should be ignored\n\n\t// All subsequent calls should continue to work\n\tresult := builder.\n\t\tWithDomain().\n\t\tWithCollectServicesDisabled()\n\n\tassert.True(t, result.hasErrors())\n\tassert.Len(t, result.errs, 2) // nil error should be ignored\n\n\t// Build should return a multierr\n\tdata, err := result.Build()\n\trequire.Nil(t, data)\n\trequire.Error(t, err)\n\n\t// The error should contain information about multiple errors\n\tassert.Contains(t, err.Error(), \"multiple errors\")\n}\n"
  },
  {
    "path": "hscontrol/mapper/mapper.go",
    "content": "package mapper\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io/fs\"\n\t\"net/url\"\n\t\"os\"\n\t\"path\"\n\t\"slices\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/state\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/types/change\"\n\t\"github.com/rs/zerolog/log\"\n\t\"tailscale.com/envknob\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/dnstype\"\n\t\"tailscale.com/types/views\"\n)\n\nconst (\n\tnextDNSDoHPrefix     = \"https://dns.nextdns.io\"\n\tdebugMapResponsePerm = 0o755\n)\n\nvar debugDumpMapResponsePath = envknob.String(\"HEADSCALE_DEBUG_DUMP_MAPRESPONSE_PATH\")\n\n// TODO: Optimise\n// As this work continues, the idea is that there will be one Mapper instance\n// per node, attached to the open stream between the control and client.\n// This means that this can hold a state per node and we can use that to\n// improve the mapresponses sent.\n// We could:\n// - Keep information about the previous mapresponse so we can send a diff\n// - Store hashes\n// - Create a \"minifier\" that removes info not needed for the node\n// - some sort of batching, wait for 5 or 60 seconds before sending\n\ntype mapper struct {\n\t// Configuration\n\tstate   *state.State\n\tcfg     *types.Config\n\tbatcher *Batcher\n\n\tcreated time.Time\n}\n\n//nolint:unused\ntype patch struct {\n\ttimestamp time.Time\n\tchange    *tailcfg.PeerChange\n}\n\nfunc newMapper(\n\tcfg *types.Config,\n\tstate *state.State,\n) *mapper {\n\t// uid, _ := util.GenerateRandomStringDNSSafe(mapperIDLength)\n\treturn &mapper{\n\t\tstate: state,\n\t\tcfg:   cfg,\n\n\t\tcreated: time.Now(),\n\t}\n}\n\n// generateUserProfiles creates user profiles for MapResponse.\nfunc generateUserProfiles(\n\tnode types.NodeView,\n\tpeers views.Slice[types.NodeView],\n) []tailcfg.UserProfile {\n\tuserMap := make(map[uint]*types.UserView)\n\tids := make([]uint, 0, len(userMap))\n\n\tuser := node.Owner()\n\tif !user.Valid() {\n\t\tlog.Error().\n\t\t\tEmbedObject(node).\n\t\t\tMsg(\"node has no valid owner, skipping user profile generation\")\n\n\t\treturn nil\n\t}\n\n\tuserID := user.Model().ID\n\tuserMap[userID] = &user\n\tids = append(ids, userID)\n\n\tfor _, peer := range peers.All() {\n\t\tpeerUser := peer.Owner()\n\t\tif !peerUser.Valid() {\n\t\t\tcontinue\n\t\t}\n\n\t\tpeerUserID := peerUser.Model().ID\n\t\tuserMap[peerUserID] = &peerUser\n\t\tids = append(ids, peerUserID)\n\t}\n\n\tslices.Sort(ids)\n\tids = slices.Compact(ids)\n\n\tvar profiles []tailcfg.UserProfile\n\n\tfor _, id := range ids {\n\t\tif userMap[id] != nil {\n\t\t\tprofiles = append(profiles, userMap[id].TailscaleUserProfile())\n\t\t}\n\t}\n\n\treturn profiles\n}\n\nfunc generateDNSConfig(\n\tcfg *types.Config,\n\tnode types.NodeView,\n) *tailcfg.DNSConfig {\n\tif cfg.TailcfgDNSConfig == nil {\n\t\treturn nil\n\t}\n\n\tdnsConfig := cfg.TailcfgDNSConfig.Clone()\n\n\taddNextDNSMetadata(dnsConfig.Resolvers, node)\n\n\treturn dnsConfig\n}\n\n// If any nextdns DoH resolvers are present in the list of resolvers it will\n// take metadata from the node metadata and instruct tailscale to add it\n// to the requests. This makes it possible to identify from which device the\n// requests come in the NextDNS dashboard.\n//\n// This will produce a resolver like:\n// `https://dns.nextdns.io/<nextdns-id>?device_name=node-name&device_model=linux&device_ip=100.64.0.1`\nfunc addNextDNSMetadata(resolvers []*dnstype.Resolver, node types.NodeView) {\n\tfor _, resolver := range resolvers {\n\t\tif strings.HasPrefix(resolver.Addr, nextDNSDoHPrefix) {\n\t\t\tattrs := url.Values{\n\t\t\t\t\"device_name\":  []string{node.Hostname()},\n\t\t\t\t\"device_model\": []string{node.Hostinfo().OS()},\n\t\t\t}\n\n\t\t\tif len(node.IPs()) > 0 {\n\t\t\t\tattrs.Add(\"device_ip\", node.IPs()[0].String())\n\t\t\t}\n\n\t\t\tresolver.Addr = fmt.Sprintf(\"%s?%s\", resolver.Addr, attrs.Encode())\n\t\t}\n\t}\n}\n\n// fullMapResponse returns a MapResponse for the given node.\n//\n//nolint:unused\nfunc (m *mapper) fullMapResponse(\n\tnodeID types.NodeID,\n\tcapVer tailcfg.CapabilityVersion,\n) (*tailcfg.MapResponse, error) {\n\tpeers := m.state.ListPeers(nodeID)\n\n\treturn m.NewMapResponseBuilder(nodeID).\n\t\tWithDebugType(fullResponseDebug).\n\t\tWithCapabilityVersion(capVer).\n\t\tWithSelfNode().\n\t\tWithDERPMap().\n\t\tWithDomain().\n\t\tWithCollectServicesDisabled().\n\t\tWithDebugConfig().\n\t\tWithSSHPolicy().\n\t\tWithDNSConfig().\n\t\tWithUserProfiles(peers).\n\t\tWithPacketFilters().\n\t\tWithPeers(peers).\n\t\tBuild()\n}\n\nfunc (m *mapper) selfMapResponse(\n\tnodeID types.NodeID,\n\tcapVer tailcfg.CapabilityVersion,\n) (*tailcfg.MapResponse, error) {\n\tma, err := m.NewMapResponseBuilder(nodeID).\n\t\tWithDebugType(selfResponseDebug).\n\t\tWithCapabilityVersion(capVer).\n\t\tWithSelfNode().\n\t\tBuild()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Set the peers to nil, to ensure the node does not think\n\t// its getting a new list.\n\tma.Peers = nil\n\n\treturn ma, err\n}\n\n// policyChangeResponse creates a MapResponse for policy changes.\n// It sends:\n// - PeersRemoved for peers that are no longer visible after the policy change\n// - PeersChanged for remaining peers (their AllowedIPs may have changed due to policy)\n// - Updated PacketFilters\n// - Updated SSHPolicy (SSH rules may reference users/groups that changed)\n// - Optionally, the node's own self info (when includeSelf is true)\n// This avoids the issue where an empty Peers slice is interpreted by Tailscale\n// clients as \"no change\" rather than \"no peers\".\n// When includeSelf is true, the node's self info is included so that a node\n// whose own attributes changed (e.g., tags via admin API) sees its updated\n// self info along with the new packet filters.\nfunc (m *mapper) policyChangeResponse(\n\tnodeID types.NodeID,\n\tcapVer tailcfg.CapabilityVersion,\n\tremovedPeers []tailcfg.NodeID,\n\tcurrentPeers views.Slice[types.NodeView],\n\tincludeSelf bool,\n) (*tailcfg.MapResponse, error) {\n\tbuilder := m.NewMapResponseBuilder(nodeID).\n\t\tWithDebugType(policyResponseDebug).\n\t\tWithCapabilityVersion(capVer).\n\t\tWithPacketFilters().\n\t\tWithSSHPolicy()\n\n\tif includeSelf {\n\t\tbuilder = builder.WithSelfNode()\n\t}\n\n\tif len(removedPeers) > 0 {\n\t\t// Convert tailcfg.NodeID to types.NodeID for WithPeersRemoved\n\t\tremovedIDs := make([]types.NodeID, len(removedPeers))\n\t\tfor i, id := range removedPeers {\n\t\t\tremovedIDs[i] = types.NodeID(id) //nolint:gosec // NodeID types are equivalent\n\t\t}\n\n\t\tbuilder.WithPeersRemoved(removedIDs...)\n\t}\n\n\t// Send remaining peers in PeersChanged - their AllowedIPs may have\n\t// changed due to the policy update (e.g., different routes allowed).\n\tif currentPeers.Len() > 0 {\n\t\tbuilder.WithPeerChanges(currentPeers)\n\t}\n\n\treturn builder.Build()\n}\n\n// buildFromChange builds a MapResponse from a change.Change specification.\n// This provides fine-grained control over what gets included in the response.\nfunc (m *mapper) buildFromChange(\n\tnodeID types.NodeID,\n\tcapVer tailcfg.CapabilityVersion,\n\tresp *change.Change,\n) (*tailcfg.MapResponse, error) {\n\tif resp.IsEmpty() {\n\t\treturn nil, nil //nolint:nilnil // Empty response means nothing to send, not an error\n\t}\n\n\t// If this is a self-update (the changed node is the receiving node),\n\t// send a self-update response to ensure the node sees its own changes.\n\tif resp.OriginNode != 0 && resp.OriginNode == nodeID {\n\t\treturn m.selfMapResponse(nodeID, capVer)\n\t}\n\n\tbuilder := m.NewMapResponseBuilder(nodeID).\n\t\tWithCapabilityVersion(capVer).\n\t\tWithDebugType(changeResponseDebug)\n\n\tif resp.IncludeSelf {\n\t\tbuilder.WithSelfNode()\n\t}\n\n\tif resp.IncludeDERPMap {\n\t\tbuilder.WithDERPMap()\n\t}\n\n\tif resp.IncludeDNS {\n\t\tbuilder.WithDNSConfig()\n\t}\n\n\tif resp.IncludeDomain {\n\t\tbuilder.WithDomain()\n\t}\n\n\tif resp.IncludePolicy {\n\t\tbuilder.WithPacketFilters()\n\t\tbuilder.WithSSHPolicy()\n\t}\n\n\tif resp.SendAllPeers {\n\t\tpeers := m.state.ListPeers(nodeID)\n\t\tbuilder.WithUserProfiles(peers)\n\t\tbuilder.WithPeers(peers)\n\t} else {\n\t\tif len(resp.PeersChanged) > 0 {\n\t\t\tpeers := m.state.ListPeers(nodeID, resp.PeersChanged...)\n\t\t\tbuilder.WithUserProfiles(peers)\n\t\t\tbuilder.WithPeerChanges(peers)\n\t\t}\n\n\t\tif len(resp.PeersRemoved) > 0 {\n\t\t\tbuilder.WithPeersRemoved(resp.PeersRemoved...)\n\t\t}\n\t}\n\n\tif len(resp.PeerPatches) > 0 {\n\t\tbuilder.WithPeerChangedPatch(resp.PeerPatches)\n\t}\n\n\treturn builder.Build()\n}\n\nfunc writeDebugMapResponse(\n\tresp *tailcfg.MapResponse,\n\tt debugType,\n\tnodeID types.NodeID,\n) {\n\tbody, err := json.MarshalIndent(resp, \"\", \"  \")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tperms := fs.FileMode(debugMapResponsePerm)\n\tmPath := path.Join(debugDumpMapResponsePath, fmt.Sprintf(\"%d\", nodeID))\n\n\terr = os.MkdirAll(mPath, perms)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tnow := time.Now().Format(\"2006-01-02T15-04-05.999999999\")\n\n\tmapResponsePath := path.Join(\n\t\tmPath,\n\t\tfmt.Sprintf(\"%s-%s.json\", now, t),\n\t)\n\n\tlog.Trace().Msgf(\"writing MapResponse to %s\", mapResponsePath)\n\n\terr = os.WriteFile(mapResponsePath, body, perms)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc (m *mapper) debugMapResponses() (map[types.NodeID][]tailcfg.MapResponse, error) {\n\tif debugDumpMapResponsePath == \"\" {\n\t\treturn nil, nil //nolint:nilnil // intentional: no data when debug path not set\n\t}\n\n\treturn ReadMapResponsesFromDirectory(debugDumpMapResponsePath)\n}\n\nfunc ReadMapResponsesFromDirectory(dir string) (map[types.NodeID][]tailcfg.MapResponse, error) {\n\tnodes, err := os.ReadDir(dir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresult := make(map[types.NodeID][]tailcfg.MapResponse)\n\n\tfor _, node := range nodes {\n\t\tif !node.IsDir() {\n\t\t\tcontinue\n\t\t}\n\n\t\tnodeIDu, err := strconv.ParseUint(node.Name(), 10, 64)\n\t\tif err != nil {\n\t\t\tlog.Error().Err(err).Msgf(\"parsing node ID from dir %s\", node.Name())\n\t\t\tcontinue\n\t\t}\n\n\t\tnodeID := types.NodeID(nodeIDu)\n\n\t\tfiles, err := os.ReadDir(path.Join(dir, node.Name()))\n\t\tif err != nil {\n\t\t\tlog.Error().Err(err).Msgf(\"reading dir %s\", node.Name())\n\t\t\tcontinue\n\t\t}\n\n\t\tslices.SortStableFunc(files, func(a, b fs.DirEntry) int {\n\t\t\treturn strings.Compare(a.Name(), b.Name())\n\t\t})\n\n\t\tfor _, file := range files {\n\t\t\tif file.IsDir() || !strings.HasSuffix(file.Name(), \".json\") {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tbody, err := os.ReadFile(path.Join(dir, node.Name(), file.Name()))\n\t\t\tif err != nil {\n\t\t\t\tlog.Error().Err(err).Msgf(\"reading file %s\", file.Name())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar resp tailcfg.MapResponse\n\n\t\t\terr = json.Unmarshal(body, &resp)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error().Err(err).Msgf(\"unmarshalling file %s\", file.Name())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tresult[nodeID] = append(result[nodeID], resp)\n\t\t}\n\t}\n\n\treturn result, nil\n}\n"
  },
  {
    "path": "hscontrol/mapper/mapper_test.go",
    "content": "package mapper\n\nimport (\n\t\"fmt\"\n\t\"net/netip\"\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/google/go-cmp/cmp/cmpopts\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/dnstype\"\n)\n\nvar iap = func(ipStr string) *netip.Addr {\n\tip := netip.MustParseAddr(ipStr)\n\treturn &ip\n}\n\nfunc TestDNSConfigMapResponse(t *testing.T) {\n\ttests := []struct {\n\t\tmagicDNS bool\n\t\twant     *tailcfg.DNSConfig\n\t}{\n\t\t{\n\t\t\tmagicDNS: true,\n\t\t\twant: &tailcfg.DNSConfig{\n\t\t\t\tRoutes: map[string][]*dnstype.Resolver{},\n\t\t\t\tDomains: []string{\n\t\t\t\t\t\"foobar.headscale.net\",\n\t\t\t\t},\n\t\t\t\tProxied: true,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tmagicDNS: false,\n\t\t\twant: &tailcfg.DNSConfig{\n\t\t\t\tDomains: []string{\"foobar.headscale.net\"},\n\t\t\t\tProxied: false,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(fmt.Sprintf(\"with-magicdns-%v\", tt.magicDNS), func(t *testing.T) {\n\t\t\tmach := func(hostname, username string, userid uint) *types.Node {\n\t\t\t\treturn &types.Node{\n\t\t\t\t\tHostname: hostname,\n\t\t\t\t\tUserID:   new(userid),\n\t\t\t\t\tUser: &types.User{\n\t\t\t\t\t\tName: username,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tbaseDomain := \"foobar.headscale.net\"\n\n\t\t\tdnsConfigOrig := tailcfg.DNSConfig{\n\t\t\t\tRoutes:  make(map[string][]*dnstype.Resolver),\n\t\t\t\tDomains: []string{baseDomain},\n\t\t\t\tProxied: tt.magicDNS,\n\t\t\t}\n\n\t\t\tnodeInShared1 := mach(\"test_get_shared_nodes_1\", \"shared1\", 1)\n\n\t\t\tgot := generateDNSConfig(\n\t\t\t\t&types.Config{\n\t\t\t\t\tTailcfgDNSConfig: &dnsConfigOrig,\n\t\t\t\t},\n\t\t\t\tnodeInShared1.View(),\n\t\t\t)\n\n\t\t\tif diff := cmp.Diff(tt.want, got, cmpopts.EquateEmpty()); diff != \"\" {\n\t\t\t\tt.Errorf(\"expandAlias() unexpected result (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "hscontrol/mapper/node_conn.go",
    "content": "package mapper\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/types/change\"\n\t\"github.com/juanfont/headscale/hscontrol/util/zlog/zf\"\n\t\"github.com/puzpuzpuz/xsync/v4\"\n\t\"github.com/rs/zerolog\"\n\t\"github.com/rs/zerolog/log\"\n\t\"tailscale.com/tailcfg\"\n)\n\n// connectionEntry represents a single connection to a node.\ntype connectionEntry struct {\n\tid       string // unique connection ID\n\tc        chan<- *tailcfg.MapResponse\n\tversion  tailcfg.CapabilityVersion\n\tcreated  time.Time\n\tstop     func()\n\tlastUsed atomic.Int64 // Unix timestamp of last successful send\n\tclosed   atomic.Bool  // Indicates if this connection has been closed\n}\n\n// multiChannelNodeConn manages multiple concurrent connections for a single node.\ntype multiChannelNodeConn struct {\n\tid     types.NodeID\n\tmapper *mapper\n\tlog    zerolog.Logger\n\n\tmutex       sync.RWMutex\n\tconnections []*connectionEntry\n\n\t// pendingMu protects pending changes independently of the connection mutex.\n\t// This avoids contention between addToBatch (which appends changes) and\n\t// send() (which sends data to connections).\n\tpendingMu sync.Mutex\n\tpending   []change.Change\n\n\t// workMu serializes change processing for this node across batch ticks.\n\t// Without this, two workers could process consecutive ticks' bundles\n\t// concurrently, causing out-of-order MapResponse delivery and races\n\t// on lastSentPeers (Clear+Store in updateSentPeers vs Range in\n\t// computePeerDiff).\n\tworkMu sync.Mutex\n\n\tcloseOnce   sync.Once\n\tupdateCount atomic.Int64\n\n\t// disconnectedAt records when the last connection was removed.\n\t// nil means the node is considered connected (or newly created);\n\t// non-nil means the node disconnected at the stored timestamp.\n\t// Used by cleanupOfflineNodes to evict stale entries.\n\tdisconnectedAt atomic.Pointer[time.Time]\n\n\t// lastSentPeers tracks which peers were last sent to this node.\n\t// This enables computing diffs for policy changes instead of sending\n\t// full peer lists (which clients interpret as \"no change\" when empty).\n\t// Using xsync.Map for lock-free concurrent access.\n\tlastSentPeers *xsync.Map[tailcfg.NodeID, struct{}]\n}\n\n// connIDCounter is a monotonically increasing counter used to generate\n// unique connection identifiers without the overhead of crypto/rand.\n// Connection IDs are process-local and need not be cryptographically random.\nvar connIDCounter atomic.Uint64\n\n// generateConnectionID generates a unique connection identifier.\nfunc generateConnectionID() string {\n\treturn strconv.FormatUint(connIDCounter.Add(1), 10)\n}\n\n// newMultiChannelNodeConn creates a new multi-channel node connection.\nfunc newMultiChannelNodeConn(id types.NodeID, mapper *mapper) *multiChannelNodeConn {\n\treturn &multiChannelNodeConn{\n\t\tid:            id,\n\t\tmapper:        mapper,\n\t\tlastSentPeers: xsync.NewMap[tailcfg.NodeID, struct{}](),\n\t\tlog:           log.With().Uint64(zf.NodeID, id.Uint64()).Logger(),\n\t}\n}\n\nfunc (mc *multiChannelNodeConn) close() {\n\tmc.closeOnce.Do(func() {\n\t\tmc.mutex.Lock()\n\t\tdefer mc.mutex.Unlock()\n\n\t\tfor _, conn := range mc.connections {\n\t\t\tmc.stopConnection(conn)\n\t\t}\n\t})\n}\n\n// stopConnection marks a connection as closed and tears down the owning session\n// at most once, even if multiple cleanup paths race to remove it.\nfunc (mc *multiChannelNodeConn) stopConnection(conn *connectionEntry) {\n\tif conn.closed.CompareAndSwap(false, true) {\n\t\tif conn.stop != nil {\n\t\t\tconn.stop()\n\t\t}\n\t}\n}\n\n// removeConnectionAtIndexLocked removes the active connection at index.\n// If stopConnection is true, it also stops that session.\n// Caller must hold mc.mutex.\nfunc (mc *multiChannelNodeConn) removeConnectionAtIndexLocked(i int, stopConnection bool) *connectionEntry {\n\tconn := mc.connections[i]\n\tcopy(mc.connections[i:], mc.connections[i+1:])\n\tmc.connections[len(mc.connections)-1] = nil // release pointer for GC\n\tmc.connections = mc.connections[:len(mc.connections)-1]\n\n\tif stopConnection {\n\t\tmc.stopConnection(conn)\n\t}\n\n\treturn conn\n}\n\n// addConnection adds a new connection.\nfunc (mc *multiChannelNodeConn) addConnection(entry *connectionEntry) {\n\tmc.mutex.Lock()\n\tdefer mc.mutex.Unlock()\n\n\tmc.connections = append(mc.connections, entry)\n\tmc.log.Debug().Str(zf.ConnID, entry.id).\n\t\tInt(\"total_connections\", len(mc.connections)).\n\t\tMsg(\"connection added\")\n}\n\n// removeConnectionByChannel removes a connection by matching channel pointer.\nfunc (mc *multiChannelNodeConn) removeConnectionByChannel(c chan<- *tailcfg.MapResponse) bool {\n\tmc.mutex.Lock()\n\tdefer mc.mutex.Unlock()\n\n\tfor i, entry := range mc.connections {\n\t\tif entry.c == c {\n\t\t\tmc.removeConnectionAtIndexLocked(i, false)\n\t\t\tmc.log.Debug().Str(zf.ConnID, entry.id).\n\t\t\t\tInt(\"remaining_connections\", len(mc.connections)).\n\t\t\t\tMsg(\"connection removed\")\n\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n// hasActiveConnections checks if the node has any active connections.\nfunc (mc *multiChannelNodeConn) hasActiveConnections() bool {\n\tmc.mutex.RLock()\n\tdefer mc.mutex.RUnlock()\n\n\treturn len(mc.connections) > 0\n}\n\n// getActiveConnectionCount returns the number of active connections.\nfunc (mc *multiChannelNodeConn) getActiveConnectionCount() int {\n\tmc.mutex.RLock()\n\tdefer mc.mutex.RUnlock()\n\n\treturn len(mc.connections)\n}\n\n// markConnected clears the disconnect timestamp, indicating the node\n// has an active connection.\nfunc (mc *multiChannelNodeConn) markConnected() {\n\tmc.disconnectedAt.Store(nil)\n}\n\n// markDisconnected records the current time as the moment the node\n// lost its last connection. Used by cleanupOfflineNodes to determine\n// how long the node has been offline.\nfunc (mc *multiChannelNodeConn) markDisconnected() {\n\tnow := time.Now()\n\tmc.disconnectedAt.Store(&now)\n}\n\n// isConnected returns true if the node has active connections or has\n// not been marked as disconnected.\nfunc (mc *multiChannelNodeConn) isConnected() bool {\n\tif mc.hasActiveConnections() {\n\t\treturn true\n\t}\n\n\treturn mc.disconnectedAt.Load() == nil\n}\n\n// offlineDuration returns how long the node has been disconnected.\n// Returns 0 if the node is connected or has never been marked as disconnected.\nfunc (mc *multiChannelNodeConn) offlineDuration() time.Duration {\n\tt := mc.disconnectedAt.Load()\n\tif t == nil {\n\t\treturn 0\n\t}\n\n\treturn time.Since(*t)\n}\n\n// appendPending appends changes to this node's pending change list.\n// Thread-safe via pendingMu; does not contend with the connection mutex.\nfunc (mc *multiChannelNodeConn) appendPending(changes ...change.Change) {\n\tmc.pendingMu.Lock()\n\tmc.pending = append(mc.pending, changes...)\n\tmc.pendingMu.Unlock()\n}\n\n// drainPending atomically removes and returns all pending changes.\n// Returns nil if there are no pending changes.\nfunc (mc *multiChannelNodeConn) drainPending() []change.Change {\n\tmc.pendingMu.Lock()\n\tp := mc.pending\n\tmc.pending = nil\n\tmc.pendingMu.Unlock()\n\n\treturn p\n}\n\n// send broadcasts data to all active connections for the node.\n//\n// To avoid holding the write lock during potentially slow sends (each stale\n// connection can block for up to 50ms), the method snapshots connections under\n// a read lock, sends without any lock held, then write-locks only to remove\n// failures. New connections added between the snapshot and cleanup are safe:\n// they receive a full initial map via AddNode, so missing this update causes\n// no data loss.\nfunc (mc *multiChannelNodeConn) send(data *tailcfg.MapResponse) error {\n\tif data == nil {\n\t\treturn nil\n\t}\n\n\t// Snapshot connections under read lock.\n\tmc.mutex.RLock()\n\n\tif len(mc.connections) == 0 {\n\t\tmc.mutex.RUnlock()\n\t\tmc.log.Trace().\n\t\t\tMsg(\"send: no active connections, skipping\")\n\n\t\treturn nil\n\t}\n\n\t// Copy the slice so we can release the read lock before sending.\n\tsnapshot := make([]*connectionEntry, len(mc.connections))\n\tcopy(snapshot, mc.connections)\n\tmc.mutex.RUnlock()\n\n\tmc.log.Trace().\n\t\tInt(\"total_connections\", len(snapshot)).\n\t\tMsg(\"send: broadcasting\")\n\n\t// Send to all connections without holding any lock.\n\t// Stale connection timeouts (50ms each) happen here without blocking\n\t// other goroutines that need the mutex.\n\tvar (\n\t\tlastErr      error\n\t\tsuccessCount int\n\t\tfailed       []*connectionEntry\n\t)\n\n\tfor _, conn := range snapshot {\n\t\terr := conn.send(data)\n\t\tif err != nil {\n\t\t\tlastErr = err\n\n\t\t\tfailed = append(failed, conn)\n\n\t\t\tmc.log.Warn().Err(err).\n\t\t\t\tStr(zf.ConnID, conn.id).\n\t\t\t\tMsg(\"send: connection failed\")\n\t\t} else {\n\t\t\tsuccessCount++\n\t\t}\n\t}\n\n\t// Write-lock only to remove failed connections.\n\tif len(failed) > 0 {\n\t\tmc.mutex.Lock()\n\t\t// Remove by pointer identity: only remove entries that still exist\n\t\t// in the current connections slice and match a failed pointer.\n\t\t// New connections added since the snapshot are not affected.\n\t\tfailedSet := make(map[*connectionEntry]struct{}, len(failed))\n\t\tfor _, f := range failed {\n\t\t\tfailedSet[f] = struct{}{}\n\t\t}\n\n\t\tclean := mc.connections[:0]\n\t\tfor _, conn := range mc.connections {\n\t\t\tif _, isFailed := failedSet[conn]; !isFailed {\n\t\t\t\tclean = append(clean, conn)\n\t\t\t} else {\n\t\t\t\tmc.log.Debug().\n\t\t\t\t\tStr(zf.ConnID, conn.id).\n\t\t\t\t\tMsg(\"send: removing failed connection\")\n\t\t\t\t// Tear down the owning session so the old serveLongPoll\n\t\t\t\t// goroutine exits instead of lingering as a stale session.\n\t\t\t\tmc.stopConnection(conn)\n\t\t\t}\n\t\t}\n\n\t\t// Nil out trailing slots so removed *connectionEntry values\n\t\t// are not retained by the backing array.\n\t\tfor i := len(clean); i < len(mc.connections); i++ {\n\t\t\tmc.connections[i] = nil\n\t\t}\n\n\t\tmc.connections = clean\n\t\tmc.mutex.Unlock()\n\t}\n\n\tmc.updateCount.Add(1)\n\n\tmc.log.Trace().\n\t\tInt(\"successful_sends\", successCount).\n\t\tInt(\"failed_connections\", len(failed)).\n\t\tMsg(\"send: broadcast complete\")\n\n\t// Success if at least one send succeeded\n\tif successCount > 0 {\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"node %d: all connections failed, last error: %w\", mc.id, lastErr)\n}\n\n// send sends data to a single connection entry with timeout-based stale connection detection.\nfunc (entry *connectionEntry) send(data *tailcfg.MapResponse) error {\n\tif data == nil {\n\t\treturn nil\n\t}\n\n\t// Check if the connection has been closed to prevent send on closed channel panic.\n\t// This can happen during shutdown when Close() is called while workers are still processing.\n\tif entry.closed.Load() {\n\t\treturn fmt.Errorf(\"connection %s: %w\", entry.id, errConnectionClosed)\n\t}\n\n\t// Use a short timeout to detect stale connections where the client isn't reading the channel.\n\t// This is critical for detecting Docker containers that are forcefully terminated\n\t// but still have channels that appear open.\n\t//\n\t// We use time.NewTimer + Stop instead of time.After to avoid leaking timers.\n\t// time.After creates a timer that lives in the runtime's timer heap until it fires,\n\t// even when the send succeeds immediately. On the hot path (1000+ nodes per tick),\n\t// this leaks thousands of timers per second.\n\ttimer := time.NewTimer(50 * time.Millisecond) //nolint:mnd\n\tdefer timer.Stop()\n\n\tselect {\n\tcase entry.c <- data:\n\t\t// Update last used timestamp on successful send\n\t\tentry.lastUsed.Store(time.Now().Unix())\n\t\treturn nil\n\tcase <-timer.C:\n\t\t// Connection is likely stale - client isn't reading from channel\n\t\t// This catches the case where Docker containers are killed but channels remain open\n\t\treturn fmt.Errorf(\"connection %s: %w\", entry.id, ErrConnectionSendTimeout)\n\t}\n}\n\n// nodeID returns the node ID.\nfunc (mc *multiChannelNodeConn) nodeID() types.NodeID {\n\treturn mc.id\n}\n\n// version returns the capability version from the first active connection.\n// All connections for a node should have the same version in practice.\nfunc (mc *multiChannelNodeConn) version() tailcfg.CapabilityVersion {\n\tmc.mutex.RLock()\n\tdefer mc.mutex.RUnlock()\n\n\tif len(mc.connections) == 0 {\n\t\treturn 0\n\t}\n\n\treturn mc.connections[0].version\n}\n\n// updateSentPeers updates the tracked peer state based on a sent MapResponse.\n// This must be called after successfully sending a response to keep track of\n// what the client knows about, enabling accurate diffs for future updates.\nfunc (mc *multiChannelNodeConn) updateSentPeers(resp *tailcfg.MapResponse) {\n\tif resp == nil {\n\t\treturn\n\t}\n\n\t// Full peer list replaces tracked state entirely\n\tif resp.Peers != nil {\n\t\tmc.lastSentPeers.Clear()\n\n\t\tfor _, peer := range resp.Peers {\n\t\t\tmc.lastSentPeers.Store(peer.ID, struct{}{})\n\t\t}\n\t}\n\n\t// Incremental additions\n\tfor _, peer := range resp.PeersChanged {\n\t\tmc.lastSentPeers.Store(peer.ID, struct{}{})\n\t}\n\n\t// Incremental removals\n\tfor _, id := range resp.PeersRemoved {\n\t\tmc.lastSentPeers.Delete(id)\n\t}\n}\n\n// computePeerDiff compares the current peer list against what was last sent\n// and returns the peers that were removed (in lastSentPeers but not in current).\nfunc (mc *multiChannelNodeConn) computePeerDiff(currentPeers []tailcfg.NodeID) []tailcfg.NodeID {\n\tcurrentSet := make(map[tailcfg.NodeID]struct{}, len(currentPeers))\n\tfor _, id := range currentPeers {\n\t\tcurrentSet[id] = struct{}{}\n\t}\n\n\tvar removed []tailcfg.NodeID\n\n\t// Find removed: in lastSentPeers but not in current\n\tmc.lastSentPeers.Range(func(id tailcfg.NodeID, _ struct{}) bool {\n\t\tif _, exists := currentSet[id]; !exists {\n\t\t\tremoved = append(removed, id)\n\t\t}\n\n\t\treturn true\n\t})\n\n\treturn removed\n}\n\n// change applies a change to all active connections for the node.\nfunc (mc *multiChannelNodeConn) change(r change.Change) error {\n\treturn handleNodeChange(mc, mc.mapper, r)\n}\n"
  },
  {
    "path": "hscontrol/mapper/tail_test.go",
    "content": "package mapper\n\nimport (\n\t\"encoding/json\"\n\t\"net/netip\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/google/go-cmp/cmp/cmpopts\"\n\t\"github.com/juanfont/headscale/hscontrol/routes\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"tailscale.com/net/tsaddr\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/key\"\n)\n\nfunc TestTailNode(t *testing.T) {\n\tmustNK := func(str string) key.NodePublic {\n\t\tvar k key.NodePublic\n\n\t\t_ = k.UnmarshalText([]byte(str))\n\n\t\treturn k\n\t}\n\n\tmustDK := func(str string) key.DiscoPublic {\n\t\tvar k key.DiscoPublic\n\n\t\t_ = k.UnmarshalText([]byte(str))\n\n\t\treturn k\n\t}\n\n\tmustMK := func(str string) key.MachinePublic {\n\t\tvar k key.MachinePublic\n\n\t\t_ = k.UnmarshalText([]byte(str))\n\n\t\treturn k\n\t}\n\n\thiview := func(hoin tailcfg.Hostinfo) tailcfg.HostinfoView {\n\t\treturn hoin.View()\n\t}\n\n\tcreated := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)\n\tlastSeen := time.Date(2009, time.November, 10, 23, 9, 0, 0, time.UTC)\n\texpire := time.Date(2500, time.November, 11, 23, 0, 0, 0, time.UTC)\n\n\ttests := []struct {\n\t\tname       string\n\t\tnode       *types.Node\n\t\tpol        []byte\n\t\tdnsConfig  *tailcfg.DNSConfig\n\t\tbaseDomain string\n\t\twant       *tailcfg.Node\n\t\twantErr    bool\n\t}{\n\t\t{\n\t\t\tname: \"empty-node\",\n\t\t\tnode: &types.Node{\n\t\t\t\tGivenName: \"empty\",\n\t\t\t\tHostinfo:  &tailcfg.Hostinfo{},\n\t\t\t},\n\t\t\tdnsConfig:  &tailcfg.DNSConfig{},\n\t\t\tbaseDomain: \"\",\n\t\t\twant: &tailcfg.Node{\n\t\t\t\tName:              \"empty\",\n\t\t\t\tStableID:          \"0\",\n\t\t\t\tHomeDERP:          0,\n\t\t\t\tLegacyDERPString:  \"127.3.3.40:0\",\n\t\t\t\tHostinfo:          hiview(tailcfg.Hostinfo{}),\n\t\t\t\tMachineAuthorized: true,\n\n\t\t\t\tCapMap: tailcfg.NodeCapMap{\n\t\t\t\t\ttailcfg.CapabilityFileSharing: []tailcfg.RawMessage{},\n\t\t\t\t\ttailcfg.CapabilityAdmin:       []tailcfg.RawMessage{},\n\t\t\t\t\ttailcfg.CapabilitySSH:         []tailcfg.RawMessage{},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"minimal-node\",\n\t\t\tnode: &types.Node{\n\t\t\t\tID: 0,\n\t\t\t\tMachineKey: mustMK(\n\t\t\t\t\t\"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507\",\n\t\t\t\t),\n\t\t\t\tNodeKey: mustNK(\n\t\t\t\t\t\"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe\",\n\t\t\t\t),\n\t\t\t\tDiscoKey: mustDK(\n\t\t\t\t\t\"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084\",\n\t\t\t\t),\n\t\t\t\tIPv4:      iap(\"100.64.0.1\"),\n\t\t\t\tHostname:  \"mini\",\n\t\t\t\tGivenName: \"mini\",\n\t\t\t\tUserID:    new(uint(0)),\n\t\t\t\tUser: &types.User{\n\t\t\t\t\tName: \"mini\",\n\t\t\t\t},\n\t\t\t\tTags:     []string{},\n\t\t\t\tAuthKey:  &types.PreAuthKey{},\n\t\t\t\tLastSeen: &lastSeen,\n\t\t\t\tExpiry:   &expire,\n\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\tRoutableIPs: []netip.Prefix{\n\t\t\t\t\t\ttsaddr.AllIPv4(),\n\t\t\t\t\t\ttsaddr.AllIPv6(),\n\t\t\t\t\t\tnetip.MustParsePrefix(\"192.168.0.0/24\"),\n\t\t\t\t\t\tnetip.MustParsePrefix(\"172.0.0.0/10\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tApprovedRoutes: []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6(), netip.MustParsePrefix(\"192.168.0.0/24\")},\n\t\t\t\tCreatedAt:      created,\n\t\t\t},\n\t\t\tdnsConfig:  &tailcfg.DNSConfig{},\n\t\t\tbaseDomain: \"\",\n\t\t\twant: &tailcfg.Node{\n\t\t\t\tID:       0,\n\t\t\t\tStableID: \"0\",\n\t\t\t\tName:     \"mini\",\n\n\t\t\t\tUser: 0,\n\n\t\t\t\tKey: mustNK(\n\t\t\t\t\t\"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe\",\n\t\t\t\t),\n\t\t\t\tKeyExpiry: expire,\n\n\t\t\t\tMachine: mustMK(\n\t\t\t\t\t\"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507\",\n\t\t\t\t),\n\t\t\t\tDiscoKey: mustDK(\n\t\t\t\t\t\"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084\",\n\t\t\t\t),\n\t\t\t\tAddresses: []netip.Prefix{netip.MustParsePrefix(\"100.64.0.1/32\")},\n\t\t\t\tAllowedIPs: []netip.Prefix{\n\t\t\t\t\ttsaddr.AllIPv4(),\n\t\t\t\t\tnetip.MustParsePrefix(\"100.64.0.1/32\"),\n\t\t\t\t\tnetip.MustParsePrefix(\"192.168.0.0/24\"),\n\t\t\t\t\ttsaddr.AllIPv6(),\n\t\t\t\t},\n\t\t\t\tPrimaryRoutes: []netip.Prefix{\n\t\t\t\t\tnetip.MustParsePrefix(\"192.168.0.0/24\"),\n\t\t\t\t},\n\t\t\t\tHomeDERP:         0,\n\t\t\t\tLegacyDERPString: \"127.3.3.40:0\",\n\t\t\t\tHostinfo: hiview(tailcfg.Hostinfo{\n\t\t\t\t\tRoutableIPs: []netip.Prefix{\n\t\t\t\t\t\ttsaddr.AllIPv4(),\n\t\t\t\t\t\ttsaddr.AllIPv6(),\n\t\t\t\t\t\tnetip.MustParsePrefix(\"192.168.0.0/24\"),\n\t\t\t\t\t\tnetip.MustParsePrefix(\"172.0.0.0/10\"),\n\t\t\t\t\t},\n\t\t\t\t}),\n\t\t\t\tCreated: created,\n\n\t\t\t\tTags: []string{},\n\n\t\t\t\tMachineAuthorized: true,\n\n\t\t\t\tCapMap: tailcfg.NodeCapMap{\n\t\t\t\t\ttailcfg.CapabilityFileSharing: []tailcfg.RawMessage{},\n\t\t\t\t\ttailcfg.CapabilityAdmin:       []tailcfg.RawMessage{},\n\t\t\t\t\ttailcfg.CapabilitySSH:         []tailcfg.RawMessage{},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"check-dot-suffix-on-node-name\",\n\t\t\tnode: &types.Node{\n\t\t\t\tGivenName: \"minimal\",\n\t\t\t\tHostinfo:  &tailcfg.Hostinfo{},\n\t\t\t},\n\t\t\tdnsConfig:  &tailcfg.DNSConfig{},\n\t\t\tbaseDomain: \"example.com\",\n\t\t\twant: &tailcfg.Node{\n\t\t\t\t// a node name should have a dot appended\n\t\t\t\tName:              \"minimal.example.com.\",\n\t\t\t\tStableID:          \"0\",\n\t\t\t\tHomeDERP:          0,\n\t\t\t\tLegacyDERPString:  \"127.3.3.40:0\",\n\t\t\t\tHostinfo:          hiview(tailcfg.Hostinfo{}),\n\t\t\t\tMachineAuthorized: true,\n\n\t\t\t\tCapMap: tailcfg.NodeCapMap{\n\t\t\t\t\ttailcfg.CapabilityFileSharing: []tailcfg.RawMessage{},\n\t\t\t\t\ttailcfg.CapabilityAdmin:       []tailcfg.RawMessage{},\n\t\t\t\t\ttailcfg.CapabilitySSH:         []tailcfg.RawMessage{},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t// TODO: Add tests to check other aspects of the node conversion:\n\t\t// - With tags and policy\n\t\t// - dnsconfig and basedomain\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tprimary := routes.New()\n\t\t\tcfg := &types.Config{\n\t\t\t\tBaseDomain:          tt.baseDomain,\n\t\t\t\tTailcfgDNSConfig:    tt.dnsConfig,\n\t\t\t\tRandomizeClientPort: false,\n\t\t\t\tTaildrop:            types.TaildropConfig{Enabled: true},\n\t\t\t}\n\t\t\t_ = primary.SetRoutes(tt.node.ID, tt.node.SubnetRoutes()...)\n\n\t\t\t// This is a hack to avoid having a second node to test the primary route.\n\t\t\t// This should be baked into the test case proper if it is extended in the future.\n\t\t\t_ = primary.SetRoutes(2, netip.MustParsePrefix(\"192.168.0.0/24\"))\n\t\t\tgot, err := tt.node.View().TailNode(\n\t\t\t\t0,\n\t\t\t\tfunc(id types.NodeID) []netip.Prefix {\n\t\t\t\t\treturn primary.PrimaryRoutes(id)\n\t\t\t\t},\n\t\t\t\tcfg,\n\t\t\t)\n\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"TailNode() error = %v, wantErr %v\", err, tt.wantErr)\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif diff := cmp.Diff(tt.want, got, cmpopts.EquateEmpty()); diff != \"\" {\n\t\t\t\tt.Errorf(\"TailNode() unexpected result (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestNodeExpiry(t *testing.T) {\n\ttp := func(t time.Time) *time.Time {\n\t\treturn &t\n\t}\n\ttests := []struct {\n\t\tname         string\n\t\texp          *time.Time\n\t\twantTime     time.Time\n\t\twantTimeZero bool\n\t}{\n\t\t{\n\t\t\tname:         \"no-expiry\",\n\t\t\texp:          nil,\n\t\t\twantTimeZero: true,\n\t\t},\n\t\t{\n\t\t\tname:         \"zero-expiry\",\n\t\t\texp:          &time.Time{},\n\t\t\twantTimeZero: true,\n\t\t},\n\t\t{\n\t\t\tname:         \"localtime\",\n\t\t\texp:          tp(time.Time{}.Local()), //nolint:gosmopolitan\n\t\t\twantTimeZero: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tnode := &types.Node{\n\t\t\t\tID:        0,\n\t\t\t\tGivenName: \"test\",\n\t\t\t\tExpiry:    tt.exp,\n\t\t\t}\n\n\t\t\ttn, err := node.View().TailNode(\n\t\t\t\t0,\n\t\t\t\tfunc(id types.NodeID) []netip.Prefix {\n\t\t\t\t\treturn []netip.Prefix{}\n\t\t\t\t},\n\t\t\t\t&types.Config{Taildrop: types.TaildropConfig{Enabled: true}},\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"nodeExpiry() error = %v\", err)\n\t\t\t}\n\n\t\t\t// Round trip the node through JSON to ensure the time is serialized correctly\n\t\t\tseri, err := json.Marshal(tn)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"nodeExpiry() error = %v\", err)\n\t\t\t}\n\n\t\t\tvar deseri tailcfg.Node\n\n\t\t\terr = json.Unmarshal(seri, &deseri)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"nodeExpiry() error = %v\", err)\n\t\t\t}\n\n\t\t\tif tt.wantTimeZero {\n\t\t\t\tif !deseri.KeyExpiry.IsZero() {\n\t\t\t\t\tt.Errorf(\"nodeExpiry() = %v, want zero\", deseri.KeyExpiry)\n\t\t\t\t}\n\t\t\t} else if deseri.KeyExpiry != tt.wantTime {\n\t\t\t\tt.Errorf(\"nodeExpiry() = %v, want %v\", deseri.KeyExpiry, tt.wantTime)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "hscontrol/metrics.go",
    "content": "package hscontrol\n\nimport (\n\t\"net/http\"\n\t\"strconv\"\n\n\t\"github.com/gorilla/mux\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/prometheus/client_golang/prometheus/promauto\"\n\t\"tailscale.com/envknob\"\n)\n\nvar debugHighCardinalityMetrics = envknob.Bool(\"HEADSCALE_DEBUG_HIGH_CARDINALITY_METRICS\")\n\nvar mapResponseLastSentSeconds *prometheus.GaugeVec\n\nfunc init() {\n\tif debugHighCardinalityMetrics {\n\t\tmapResponseLastSentSeconds = promauto.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: prometheusNamespace,\n\t\t\tName:      \"mapresponse_last_sent_seconds\",\n\t\t\tHelp:      \"last sent metric to node.id\",\n\t\t}, []string{\"type\", \"id\"})\n\t}\n}\n\nconst prometheusNamespace = \"headscale\"\n\nvar (\n\tmapResponseSent = promauto.NewCounterVec(prometheus.CounterOpts{\n\t\tNamespace: prometheusNamespace,\n\t\tName:      \"mapresponse_sent_total\",\n\t\tHelp:      \"total count of mapresponses sent to clients\",\n\t}, []string{\"status\", \"type\"})\n\tmapResponseEndpointUpdates = promauto.NewCounterVec(prometheus.CounterOpts{\n\t\tNamespace: prometheusNamespace,\n\t\tName:      \"mapresponse_endpoint_updates_total\",\n\t\tHelp:      \"total count of endpoint updates received\",\n\t}, []string{\"status\"})\n\tmapResponseEnded = promauto.NewCounterVec(prometheus.CounterOpts{\n\t\tNamespace: prometheusNamespace,\n\t\tName:      \"mapresponse_ended_total\",\n\t\tHelp:      \"total count of new mapsessions ended\",\n\t}, []string{\"reason\"})\n\thttpDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{\n\t\tNamespace: prometheusNamespace,\n\t\tName:      \"http_duration_seconds\",\n\t\tHelp:      \"Duration of HTTP requests.\",\n\t}, []string{\"path\"})\n\thttpCounter = promauto.NewCounterVec(prometheus.CounterOpts{\n\t\tNamespace: prometheusNamespace,\n\t\tName:      \"http_requests_total\",\n\t\tHelp:      \"Total number of http requests processed\",\n\t}, []string{\"code\", \"method\", \"path\"},\n\t)\n)\n\n// prometheusMiddleware implements mux.MiddlewareFunc.\nfunc prometheusMiddleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\troute := mux.CurrentRoute(r)\n\t\tpath, _ := route.GetPathTemplate()\n\n\t\t// Ignore streaming and noise sessions\n\t\t// it has its own router further down.\n\t\tif path == \"/ts2021\" || path == \"/machine/map\" || path == \"/derp\" || path == \"/derp/probe\" || path == \"/derp/latency-check\" || path == \"/bootstrap-dns\" {\n\t\t\tnext.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\trw := &respWriterProm{ResponseWriter: w}\n\n\t\ttimer := prometheus.NewTimer(httpDuration.WithLabelValues(path))\n\n\t\tnext.ServeHTTP(rw, r)\n\t\ttimer.ObserveDuration()\n\t\thttpCounter.WithLabelValues(strconv.Itoa(rw.status), r.Method, path).Inc()\n\t})\n}\n\ntype respWriterProm struct {\n\thttp.ResponseWriter\n\n\tstatus      int\n\twritten     int64\n\twroteHeader bool\n}\n\nfunc (r *respWriterProm) WriteHeader(code int) {\n\tr.status = code\n\tr.wroteHeader = true\n\tr.ResponseWriter.WriteHeader(code)\n}\n\nfunc (r *respWriterProm) Write(b []byte) (int, error) {\n\tif !r.wroteHeader {\n\t\tr.WriteHeader(http.StatusOK)\n\t}\n\n\tn, err := r.ResponseWriter.Write(b)\n\tr.written += int64(n)\n\n\treturn n, err\n}\n"
  },
  {
    "path": "hscontrol/noise.go",
    "content": "package hscontrol\n\nimport (\n\t\"encoding/binary\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"time\"\n\n\t\"github.com/go-chi/chi/v5\"\n\t\"github.com/go-chi/chi/v5/middleware\"\n\t\"github.com/go-chi/metrics\"\n\t\"github.com/juanfont/headscale/hscontrol/capver\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/rs/zerolog\"\n\t\"github.com/rs/zerolog/log\"\n\t\"golang.org/x/net/http2\"\n\t\"tailscale.com/control/controlbase\"\n\t\"tailscale.com/control/controlhttp/controlhttpserver\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/key\"\n)\n\n// ErrUnsupportedClientVersion is returned when a client connects with an unsupported protocol version.\nvar ErrUnsupportedClientVersion = errors.New(\"unsupported client version\")\n\n// ErrMissingURLParameter is returned when a required URL parameter is not provided.\nvar ErrMissingURLParameter = errors.New(\"missing URL parameter\")\n\n// ErrUnsupportedURLParameterType is returned when a URL parameter has an unsupported type.\nvar ErrUnsupportedURLParameterType = errors.New(\"unsupported URL parameter type\")\n\n// ErrNoAuthSession is returned when an auth_id does not match any active auth session.\nvar ErrNoAuthSession = errors.New(\"no auth session found\")\n\nconst (\n\t// ts2021UpgradePath is the path that the server listens on for the WebSockets upgrade.\n\tts2021UpgradePath = \"/ts2021\"\n\n\t// The first 9 bytes from the server to client over Noise are either an HTTP/2\n\t// settings frame (a normal HTTP/2 setup) or, as Tailscale added later, an \"early payload\"\n\t// header that's also 9 bytes long: 5 bytes (earlyPayloadMagic) followed by 4 bytes\n\t// of length. Then that many bytes of JSON-encoded tailcfg.EarlyNoise.\n\t// The early payload is optional. Some servers may not send it... But we do!\n\tearlyPayloadMagic = \"\\xff\\xff\\xffTS\"\n\n\t// noiseBodyLimit is the maximum allowed request body size for Noise protocol\n\t// handlers. This prevents unauthenticated OOM attacks via unbounded io.ReadAll.\n\t// No legitimate Noise request (MapRequest, RegisterRequest, etc.) comes close\n\t// to this limit; typical payloads are a few KB.\n\tnoiseBodyLimit int64 = 1048576 // 1 MiB\n)\n\ntype noiseServer struct {\n\theadscale *Headscale\n\n\thttpBaseConfig *http.Server\n\thttp2Server    *http2.Server\n\tconn           *controlbase.Conn\n\tmachineKey     key.MachinePublic\n\tnodeKey        key.NodePublic\n\n\t// EarlyNoise-related stuff\n\tchallenge       key.ChallengePrivate\n\tprotocolVersion int\n}\n\n// NoiseUpgradeHandler is to upgrade the connection and hijack the net.Conn\n// in order to use the Noise-based TS2021 protocol. Listens in /ts2021.\nfunc (h *Headscale) NoiseUpgradeHandler(\n\twriter http.ResponseWriter,\n\treq *http.Request,\n) {\n\tlog.Trace().Caller().Msgf(\"noise upgrade handler for client %s\", req.RemoteAddr)\n\n\tupgrade := req.Header.Get(\"Upgrade\")\n\tif upgrade == \"\" {\n\t\t// This probably means that the user is running Headscale behind an\n\t\t// improperly configured reverse proxy. TS2021 requires WebSockets to\n\t\t// be passed to Headscale. Let's give them a hint.\n\t\tlog.Warn().\n\t\t\tCaller().\n\t\t\tMsg(\"no upgrade header in TS2021 request. If headscale is behind a reverse proxy, make sure it is configured to pass WebSockets through.\")\n\t\thttp.Error(writer, \"Internal error\", http.StatusInternalServerError)\n\n\t\treturn\n\t}\n\n\tns := noiseServer{\n\t\theadscale: h,\n\t\tchallenge: key.NewChallenge(),\n\t}\n\n\tnoiseConn, err := controlhttpserver.AcceptHTTP(\n\t\treq.Context(),\n\t\twriter,\n\t\treq,\n\t\t*h.noisePrivateKey,\n\t\tns.earlyNoise,\n\t)\n\tif err != nil {\n\t\thttpError(writer, fmt.Errorf(\"upgrading noise connection: %w\", err))\n\t\treturn\n\t}\n\n\tns.conn = noiseConn\n\tns.machineKey = ns.conn.Peer()\n\tns.protocolVersion = ns.conn.ProtocolVersion()\n\n\t// This router is served only over the Noise connection, and exposes only the new API.\n\t//\n\t// The HTTP2 server that exposes this router is created for\n\t// a single hijacked connection from /ts2021, using netutil.NewOneConnListener\n\n\tr := chi.NewRouter()\n\n\t// Limit request body size to prevent unauthenticated OOM attacks.\n\t// The Noise handshake accepts any machine key without checking\n\t// registration, so all endpoints behind this router are reachable\n\t// without credentials.\n\tr.Use(func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tr.Body = http.MaxBytesReader(w, r.Body, noiseBodyLimit)\n\t\t\tnext.ServeHTTP(w, r)\n\t\t})\n\t})\n\tr.Use(metrics.Collector(metrics.CollectorOpts{\n\t\tHost:  false,\n\t\tProto: true,\n\t\tSkip: func(r *http.Request) bool {\n\t\t\treturn r.Method != http.MethodOptions\n\t\t},\n\t}))\n\tr.Use(middleware.RequestID)\n\tr.Use(middleware.RealIP)\n\tr.Use(middleware.RequestLogger(&zerologRequestLogger{}))\n\tr.Use(middleware.Recoverer)\n\n\tr.Handle(\"/metrics\", metrics.Handler())\n\n\tr.Route(\"/machine\", func(r chi.Router) {\n\t\tr.Post(\"/register\", ns.RegistrationHandler)\n\t\tr.Post(\"/map\", ns.PollNetMapHandler)\n\n\t\t// SSH Check mode endpoint, consulted to validate if a given SSH connection should be accepted or rejected.\n\t\tr.Get(\"/ssh/action/from/{src_node_id}/to/{dst_node_id}\", ns.SSHActionHandler)\n\n\t\t// Not implemented yet\n\t\t//\n\t\t// /whoami is a debug endpoint to validate that the client can communicate over the connection,\n\t\t// not clear if there is a specific response, it looks like it is just logged.\n\t\t// https://github.com/tailscale/tailscale/blob/dfba01ca9bd8c4df02c3c32f400d9aeb897c5fc7/cmd/tailscale/cli/debug.go#L1138\n\t\tr.Get(\"/whoami\", ns.NotImplementedHandler)\n\n\t\t// client sends a [tailcfg.SetDNSRequest] to this endpoints and expect\n\t\t// the server to create or update this DNS record \"somewhere\".\n\t\t// It is typically a TXT record for an ACME challenge.\n\t\tr.Post(\"/set-dns\", ns.NotImplementedHandler)\n\n\t\t// A patch of [tailcfg.SetDeviceAttributesRequest] to update device attributes.\n\t\t// We currently do not support device attributes.\n\t\tr.Patch(\"/set-device-attr\", ns.NotImplementedHandler)\n\n\t\t// A [tailcfg.AuditLogRequest] to send audit log entries to the server.\n\t\t// The server is expected to store them \"somewhere\".\n\t\t// We currently do not support device attributes.\n\t\tr.Post(\"/audit-log\", ns.NotImplementedHandler)\n\n\t\t// handles requests to get an OIDC ID token. Receives a [tailcfg.TokenRequest].\n\t\tr.Post(\"/id-token\", ns.NotImplementedHandler)\n\n\t\t// Asks the server if a feature is available and receive information about how to enable it.\n\t\t// Gets a [tailcfg.QueryFeatureRequest] and returns a [tailcfg.QueryFeatureResponse].\n\t\tr.Post(\"/feature/query\", ns.NotImplementedHandler)\n\n\t\tr.Post(\"/update-health\", ns.NotImplementedHandler)\n\n\t\tr.Route(\"/webclient\", func(r chi.Router) {})\n\n\t\tr.Post(\"/c2n\", ns.NotImplementedHandler)\n\t})\n\n\tns.httpBaseConfig = &http.Server{\n\t\tHandler:           r,\n\t\tReadHeaderTimeout: types.HTTPTimeout,\n\t}\n\tns.http2Server = &http2.Server{}\n\n\tns.http2Server.ServeConn(\n\t\tnoiseConn,\n\t\t&http2.ServeConnOpts{\n\t\t\tBaseConfig: ns.httpBaseConfig,\n\t\t},\n\t)\n}\n\nfunc unsupportedClientError(version tailcfg.CapabilityVersion) error {\n\treturn fmt.Errorf(\"%w: %s (%d)\", ErrUnsupportedClientVersion, capver.TailscaleVersion(version), version)\n}\n\nfunc (ns *noiseServer) earlyNoise(protocolVersion int, writer io.Writer) error {\n\tif !isSupportedVersion(tailcfg.CapabilityVersion(protocolVersion)) {\n\t\treturn unsupportedClientError(tailcfg.CapabilityVersion(protocolVersion))\n\t}\n\n\tearlyJSON, err := json.Marshal(&tailcfg.EarlyNoise{\n\t\tNodeKeyChallenge: ns.challenge.Public(),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// 5 bytes that won't be mistaken for an HTTP/2 frame:\n\t// https://httpwg.org/specs/rfc7540.html#rfc.section.4.1 (Especially not\n\t// an HTTP/2 settings frame, which isn't of type 'T')\n\tvar notH2Frame [5]byte\n\tcopy(notH2Frame[:], earlyPayloadMagic)\n\n\tvar lenBuf [4]byte\n\tbinary.BigEndian.PutUint32(lenBuf[:], uint32(len(earlyJSON))) //nolint:gosec // JSON length is bounded\n\t// These writes are all buffered by caller, so fine to do them\n\t// separately:\n\tif _, err := writer.Write(notH2Frame[:]); err != nil { //nolint:noinlineerr\n\t\treturn err\n\t}\n\n\tif _, err := writer.Write(lenBuf[:]); err != nil { //nolint:noinlineerr\n\t\treturn err\n\t}\n\n\tif _, err := writer.Write(earlyJSON); err != nil { //nolint:noinlineerr\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc isSupportedVersion(version tailcfg.CapabilityVersion) bool {\n\treturn version >= capver.MinSupportedCapabilityVersion\n}\n\nfunc rejectUnsupported(\n\twriter http.ResponseWriter,\n\tversion tailcfg.CapabilityVersion,\n\tmkey key.MachinePublic,\n\tnkey key.NodePublic,\n) bool {\n\t// Reject unsupported versions\n\tif !isSupportedVersion(version) {\n\t\tlog.Error().\n\t\t\tCaller().\n\t\t\tInt(\"minimum_cap_ver\", int(capver.MinSupportedCapabilityVersion)).\n\t\t\tInt(\"client_cap_ver\", int(version)).\n\t\t\tStr(\"minimum_version\", capver.TailscaleVersion(capver.MinSupportedCapabilityVersion)).\n\t\t\tStr(\"client_version\", capver.TailscaleVersion(version)).\n\t\t\tStr(\"node.key\", nkey.ShortString()).\n\t\t\tStr(\"machine.key\", mkey.ShortString()).\n\t\t\tMsg(\"unsupported client connected\")\n\t\thttp.Error(writer, unsupportedClientError(version).Error(), http.StatusBadRequest)\n\n\t\treturn true\n\t}\n\n\treturn false\n}\n\nfunc (ns *noiseServer) NotImplementedHandler(writer http.ResponseWriter, req *http.Request) {\n\tlog.Trace().Caller().Str(\"path\", req.URL.String()).Msg(\"not implemented handler hit\")\n\thttp.Error(writer, \"Not implemented yet\", http.StatusNotImplemented)\n}\n\nfunc urlParam[T any](req *http.Request, key string) (T, error) {\n\tvar zero T\n\n\tparam := chi.URLParam(req, key)\n\tif param == \"\" {\n\t\treturn zero, fmt.Errorf(\"%w: %s\", ErrMissingURLParameter, key)\n\t}\n\n\tvar value T\n\tswitch any(value).(type) {\n\tcase string:\n\t\tv, ok := any(param).(T)\n\t\tif !ok {\n\t\t\treturn zero, fmt.Errorf(\"%w: %T\", ErrUnsupportedURLParameterType, value)\n\t\t}\n\n\t\tvalue = v\n\tcase types.NodeID:\n\t\tid, err := types.ParseNodeID(param)\n\t\tif err != nil {\n\t\t\treturn zero, fmt.Errorf(\"parsing %s: %w\", key, err)\n\t\t}\n\n\t\tv, ok := any(id).(T)\n\t\tif !ok {\n\t\t\treturn zero, fmt.Errorf(\"%w: %T\", ErrUnsupportedURLParameterType, value)\n\t\t}\n\n\t\tvalue = v\n\tdefault:\n\t\treturn zero, fmt.Errorf(\"%w: %T\", ErrUnsupportedURLParameterType, value)\n\t}\n\n\treturn value, nil\n}\n\n// SSHActionHandler handles the /ssh-action endpoint, returning a\n// [tailcfg.SSHAction] to the client with the verdict of an SSH access\n// request.\nfunc (ns *noiseServer) SSHActionHandler(\n\twriter http.ResponseWriter,\n\treq *http.Request,\n) {\n\tsrcNodeID, err := urlParam[types.NodeID](req, \"src_node_id\")\n\tif err != nil {\n\t\thttpError(writer, NewHTTPError(\n\t\t\thttp.StatusBadRequest,\n\t\t\t\"Invalid src_node_id\",\n\t\t\terr,\n\t\t))\n\n\t\treturn\n\t}\n\n\tdstNodeID, err := urlParam[types.NodeID](req, \"dst_node_id\")\n\tif err != nil {\n\t\thttpError(writer, NewHTTPError(\n\t\t\thttp.StatusBadRequest,\n\t\t\t\"Invalid dst_node_id\",\n\t\t\terr,\n\t\t))\n\n\t\treturn\n\t}\n\n\treqLog := log.With().\n\t\tUint64(\"src_node_id\", srcNodeID.Uint64()).\n\t\tUint64(\"dst_node_id\", dstNodeID.Uint64()).\n\t\tStr(\"ssh_user\", req.URL.Query().Get(\"ssh_user\")).\n\t\tStr(\"local_user\", req.URL.Query().Get(\"local_user\")).\n\t\tLogger()\n\n\treqLog.Trace().Caller().Msg(\"SSH action request\")\n\n\taction, err := ns.sshAction(\n\t\treqLog,\n\t\tsrcNodeID, dstNodeID,\n\t\treq.URL.Query().Get(\"auth_id\"),\n\t)\n\tif err != nil {\n\t\thttpError(writer, err)\n\n\t\treturn\n\t}\n\n\twriter.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\twriter.WriteHeader(http.StatusOK)\n\n\terr = json.NewEncoder(writer).Encode(action)\n\tif err != nil {\n\t\treqLog.Error().Caller().Err(err).\n\t\t\tMsg(\"failed to encode SSH action response\")\n\n\t\treturn\n\t}\n\n\tif flusher, ok := writer.(http.Flusher); ok {\n\t\tflusher.Flush()\n\t}\n}\n\n// sshAction resolves the SSH action for the given request parameters.\n// It returns the action to send to the client, or an HTTPError on failure.\n//\n// Three cases:\n//  1. Initial request, auto-approved — source recently authenticated\n//     within the check period, accept immediately.\n//  2. Initial request, needs auth — build a HoldAndDelegate URL and\n//     wait for the user to authenticate.\n//  3. Follow-up request — an auth_id is present, wait for the auth\n//     verdict and accept or reject.\nfunc (ns *noiseServer) sshAction(\n\treqLog zerolog.Logger,\n\tsrcNodeID, dstNodeID types.NodeID,\n\tauthIDStr string,\n) (*tailcfg.SSHAction, error) {\n\taction := tailcfg.SSHAction{\n\t\tAllowAgentForwarding:      true,\n\t\tAllowLocalPortForwarding:  true,\n\t\tAllowRemotePortForwarding: true,\n\t}\n\n\t// Look up check params from the server's own policy rather than\n\t// trusting URL parameters, which the client could tamper with.\n\tcheckPeriod, checkFound := ns.headscale.state.SSHCheckParams(\n\t\tsrcNodeID, dstNodeID,\n\t)\n\n\t// Follow-up request with auth_id — wait for the auth verdict.\n\tif authIDStr != \"\" {\n\t\treturn ns.sshActionFollowUp(\n\t\t\treqLog, &action, authIDStr,\n\t\t\tsrcNodeID, dstNodeID,\n\t\t\tcheckFound,\n\t\t)\n\t}\n\n\t// Initial request — check if auto-approval applies.\n\tif checkFound && checkPeriod > 0 {\n\t\tif lastAuth, ok := ns.headscale.state.GetLastSSHAuth(\n\t\t\tsrcNodeID, dstNodeID,\n\t\t); ok && time.Since(lastAuth) < checkPeriod {\n\t\t\treqLog.Trace().Caller().\n\t\t\t\tDur(\"check_period\", checkPeriod).\n\t\t\t\tTime(\"last_auth\", lastAuth).\n\t\t\t\tMsg(\"auto-approved within check period\")\n\n\t\t\taction.Accept = true\n\n\t\t\treturn &action, nil\n\t\t}\n\t}\n\n\t// No auto-approval — create an auth session and hold.\n\treturn ns.sshActionHoldAndDelegate(reqLog, &action)\n}\n\n// sshActionHoldAndDelegate creates a new auth session and returns a\n// HoldAndDelegate action that directs the client to authenticate.\nfunc (ns *noiseServer) sshActionHoldAndDelegate(\n\treqLog zerolog.Logger,\n\taction *tailcfg.SSHAction,\n) (*tailcfg.SSHAction, error) {\n\tholdURL, err := url.Parse(\n\t\tns.headscale.cfg.ServerURL +\n\t\t\t\"/machine/ssh/action/from/$SRC_NODE_ID/to/$DST_NODE_ID\" +\n\t\t\t\"?ssh_user=$SSH_USER&local_user=$LOCAL_USER\",\n\t)\n\tif err != nil {\n\t\treturn nil, NewHTTPError(\n\t\t\thttp.StatusInternalServerError,\n\t\t\t\"Internal error\",\n\t\t\tfmt.Errorf(\"parsing SSH action URL: %w\", err),\n\t\t)\n\t}\n\n\tauthID, err := types.NewAuthID()\n\tif err != nil {\n\t\treturn nil, NewHTTPError(\n\t\t\thttp.StatusInternalServerError,\n\t\t\t\"Internal error\",\n\t\t\tfmt.Errorf(\"generating auth ID: %w\", err),\n\t\t)\n\t}\n\n\tns.headscale.state.SetAuthCacheEntry(authID, types.NewAuthRequest())\n\n\tauthURL := ns.headscale.authProvider.AuthURL(authID)\n\n\tq := holdURL.Query()\n\tq.Set(\"auth_id\", authID.String())\n\tholdURL.RawQuery = q.Encode()\n\n\taction.HoldAndDelegate = holdURL.String()\n\n\t// TODO(kradalby): here we can also send a very tiny mapresponse\n\t// \"popping\" the url and opening it for the user.\n\taction.Message = fmt.Sprintf(\n\t\t\"# Headscale SSH requires an additional check.\\n\"+\n\t\t\t\"# To authenticate, visit: %s\\n\"+\n\t\t\t\"# Authentication checked with Headscale SSH.\\n\",\n\t\tauthURL,\n\t)\n\n\treqLog.Info().Caller().\n\t\tStr(\"auth_id\", authID.String()).\n\t\tMsg(\"SSH check pending, waiting for auth\")\n\n\treturn action, nil\n}\n\n// sshActionFollowUp handles follow-up requests where the client\n// provides an auth_id. It blocks until the auth session resolves.\nfunc (ns *noiseServer) sshActionFollowUp(\n\treqLog zerolog.Logger,\n\taction *tailcfg.SSHAction,\n\tauthIDStr string,\n\tsrcNodeID, dstNodeID types.NodeID,\n\tcheckFound bool,\n) (*tailcfg.SSHAction, error) {\n\tauthID, err := types.AuthIDFromString(authIDStr)\n\tif err != nil {\n\t\treturn nil, NewHTTPError(\n\t\t\thttp.StatusBadRequest,\n\t\t\t\"Invalid auth_id\",\n\t\t\tfmt.Errorf(\"parsing auth_id: %w\", err),\n\t\t)\n\t}\n\n\treqLog = reqLog.With().Str(\"auth_id\", authID.String()).Logger()\n\n\tauth, ok := ns.headscale.state.GetAuthCacheEntry(authID)\n\tif !ok {\n\t\treturn nil, NewHTTPError(\n\t\t\thttp.StatusBadRequest,\n\t\t\t\"Invalid auth_id\",\n\t\t\tfmt.Errorf(\"%w: %s\", ErrNoAuthSession, authID),\n\t\t)\n\t}\n\n\treqLog.Trace().Caller().Msg(\"SSH action follow-up\")\n\n\tverdict := <-auth.WaitForAuth()\n\n\tif !verdict.Accept() {\n\t\taction.Reject = true\n\n\t\treqLog.Trace().Caller().Err(verdict.Err).\n\t\t\tMsg(\"authentication rejected\")\n\n\t\treturn action, nil\n\t}\n\n\taction.Accept = true\n\n\t// Record the successful auth for future auto-approval.\n\tif checkFound {\n\t\tns.headscale.state.SetLastSSHAuth(srcNodeID, dstNodeID)\n\n\t\treqLog.Trace().Caller().\n\t\t\tMsg(\"auth recorded for auto-approval\")\n\t}\n\n\treturn action, nil\n}\n\n// PollNetMapHandler takes care of /machine/:id/map using the Noise protocol\n//\n// This is the busiest endpoint, as it keeps the HTTP long poll that updates\n// the clients when something in the network changes.\n//\n// The clients POST stuff like HostInfo and their Endpoints here, but\n// only after their first request (marked with the ReadOnly field).\n//\n// At this moment the updates are sent in a quite horrendous way, but they kinda work.\nfunc (ns *noiseServer) PollNetMapHandler(\n\twriter http.ResponseWriter,\n\treq *http.Request,\n) {\n\tvar mapRequest tailcfg.MapRequest\n\n\terr := json.NewDecoder(req.Body).Decode(&mapRequest)\n\tif err != nil {\n\t\thttpError(writer, err)\n\t\treturn\n\t}\n\n\t// Reject unsupported versions\n\tif rejectUnsupported(writer, mapRequest.Version, ns.machineKey, mapRequest.NodeKey) {\n\t\treturn\n\t}\n\n\tnv, err := ns.getAndValidateNode(mapRequest)\n\tif err != nil {\n\t\thttpError(writer, err)\n\t\treturn\n\t}\n\n\tns.nodeKey = nv.NodeKey()\n\n\tsess := ns.headscale.newMapSession(req.Context(), mapRequest, writer, nv.AsStruct())\n\tsess.log.Trace().Caller().Msg(\"a node sending a MapRequest with Noise protocol\")\n\n\tif !sess.isStreaming() {\n\t\tsess.serve()\n\t} else {\n\t\tsess.serveLongPoll()\n\t}\n}\n\nfunc regErr(err error) *tailcfg.RegisterResponse {\n\treturn &tailcfg.RegisterResponse{Error: err.Error()}\n}\n\n// RegistrationHandler handles the actual registration process of a node.\nfunc (ns *noiseServer) RegistrationHandler(\n\twriter http.ResponseWriter,\n\treq *http.Request,\n) {\n\tif req.Method != http.MethodPost {\n\t\thttpError(writer, errMethodNotAllowed)\n\n\t\treturn\n\t}\n\n\tregisterRequest, registerResponse := func() (*tailcfg.RegisterRequest, *tailcfg.RegisterResponse) { //nolint:contextcheck\n\t\tvar resp *tailcfg.RegisterResponse\n\n\t\tvar regReq tailcfg.RegisterRequest\n\n\t\terr := json.NewDecoder(req.Body).Decode(&regReq)\n\t\tif err != nil {\n\t\t\treturn &regReq, regErr(err)\n\t\t}\n\n\t\tns.nodeKey = regReq.NodeKey\n\n\t\tresp, err = ns.headscale.handleRegister(req.Context(), regReq, ns.conn.Peer())\n\t\tif err != nil {\n\t\t\tif httpErr, ok := errors.AsType[HTTPError](err); ok {\n\t\t\t\tresp = &tailcfg.RegisterResponse{\n\t\t\t\t\tError: httpErr.Msg,\n\t\t\t\t}\n\n\t\t\t\treturn &regReq, resp\n\t\t\t}\n\n\t\t\treturn &regReq, regErr(err)\n\t\t}\n\n\t\treturn &regReq, resp\n\t}()\n\n\t// Reject unsupported versions\n\tif rejectUnsupported(writer, registerRequest.Version, ns.machineKey, registerRequest.NodeKey) {\n\t\treturn\n\t}\n\n\twriter.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\twriter.WriteHeader(http.StatusOK)\n\n\terr := json.NewEncoder(writer).Encode(registerResponse)\n\tif err != nil {\n\t\tlog.Error().Caller().Err(err).Msg(\"noise registration handler: failed to encode RegisterResponse\")\n\t\treturn\n\t}\n\n\t// Ensure response is flushed to client\n\tif flusher, ok := writer.(http.Flusher); ok {\n\t\tflusher.Flush()\n\t}\n}\n\n// getAndValidateNode retrieves the node from the database using the NodeKey\n// and validates that it matches the MachineKey from the Noise session.\nfunc (ns *noiseServer) getAndValidateNode(mapRequest tailcfg.MapRequest) (types.NodeView, error) {\n\tnv, ok := ns.headscale.state.GetNodeByNodeKey(mapRequest.NodeKey)\n\tif !ok {\n\t\treturn types.NodeView{}, NewHTTPError(http.StatusNotFound, \"node not found\", nil)\n\t}\n\n\t// Validate that the MachineKey in the Noise session matches the one associated with the NodeKey.\n\tif ns.machineKey != nv.MachineKey() {\n\t\treturn types.NodeView{}, NewHTTPError(http.StatusNotFound, \"node key in request does not match the one associated with this machine key\", nil)\n\t}\n\n\treturn nv, nil\n}\n"
  },
  {
    "path": "hscontrol/noise_test.go",
    "content": "package hscontrol\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"testing\"\n\n\t\"github.com/go-chi/chi/v5\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/tailcfg\"\n)\n\n// newNoiseRouterWithBodyLimit builds a chi router with the same body-limit\n// middleware used in the real Noise router but wired to a test handler that\n// captures the io.ReadAll result. This lets us verify the limit without\n// needing a full Headscale instance.\nfunc newNoiseRouterWithBodyLimit(readBody *[]byte, readErr *error) http.Handler {\n\tr := chi.NewRouter()\n\tr.Use(func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tr.Body = http.MaxBytesReader(w, r.Body, noiseBodyLimit)\n\t\t\tnext.ServeHTTP(w, r)\n\t\t})\n\t})\n\n\thandler := func(w http.ResponseWriter, r *http.Request) {\n\t\t*readBody, *readErr = io.ReadAll(r.Body)\n\t\tif *readErr != nil {\n\t\t\thttp.Error(w, \"body too large\", http.StatusRequestEntityTooLarge)\n\n\t\t\treturn\n\t\t}\n\n\t\tw.WriteHeader(http.StatusOK)\n\t}\n\n\tr.Post(\"/machine/map\", handler)\n\tr.Post(\"/machine/register\", handler)\n\n\treturn r\n}\n\nfunc TestNoiseBodyLimit_MapEndpoint(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"normal_map_request\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tvar body []byte\n\n\t\tvar readErr error\n\n\t\trouter := newNoiseRouterWithBodyLimit(&body, &readErr)\n\n\t\tmapReq := tailcfg.MapRequest{Version: 100, Stream: true}\n\t\tpayload, err := json.Marshal(mapReq)\n\t\trequire.NoError(t, err)\n\n\t\treq := httptest.NewRequestWithContext(context.Background(), http.MethodPost, \"/machine/map\", bytes.NewReader(payload))\n\t\trec := httptest.NewRecorder()\n\t\trouter.ServeHTTP(rec, req)\n\n\t\trequire.NoError(t, readErr)\n\t\tassert.Equal(t, http.StatusOK, rec.Code)\n\t\tassert.Len(t, body, len(payload))\n\t})\n\n\tt.Run(\"oversized_body_rejected\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tvar body []byte\n\n\t\tvar readErr error\n\n\t\trouter := newNoiseRouterWithBodyLimit(&body, &readErr)\n\n\t\toversized := bytes.Repeat([]byte(\"x\"), int(noiseBodyLimit)+1)\n\t\treq := httptest.NewRequestWithContext(context.Background(), http.MethodPost, \"/machine/map\", bytes.NewReader(oversized))\n\t\trec := httptest.NewRecorder()\n\t\trouter.ServeHTTP(rec, req)\n\n\t\trequire.Error(t, readErr)\n\t\tassert.Equal(t, http.StatusRequestEntityTooLarge, rec.Code)\n\t\tassert.LessOrEqual(t, len(body), int(noiseBodyLimit))\n\t})\n}\n\nfunc TestNoiseBodyLimit_RegisterEndpoint(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"normal_register_request\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tvar body []byte\n\n\t\tvar readErr error\n\n\t\trouter := newNoiseRouterWithBodyLimit(&body, &readErr)\n\n\t\tregReq := tailcfg.RegisterRequest{Version: 100}\n\t\tpayload, err := json.Marshal(regReq)\n\t\trequire.NoError(t, err)\n\n\t\treq := httptest.NewRequestWithContext(context.Background(), http.MethodPost, \"/machine/register\", bytes.NewReader(payload))\n\t\trec := httptest.NewRecorder()\n\t\trouter.ServeHTTP(rec, req)\n\n\t\trequire.NoError(t, readErr)\n\t\tassert.Equal(t, http.StatusOK, rec.Code)\n\t\tassert.Len(t, body, len(payload))\n\t})\n\n\tt.Run(\"oversized_body_rejected\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tvar body []byte\n\n\t\tvar readErr error\n\n\t\trouter := newNoiseRouterWithBodyLimit(&body, &readErr)\n\n\t\toversized := bytes.Repeat([]byte(\"x\"), int(noiseBodyLimit)+1)\n\t\treq := httptest.NewRequestWithContext(context.Background(), http.MethodPost, \"/machine/register\", bytes.NewReader(oversized))\n\t\trec := httptest.NewRecorder()\n\t\trouter.ServeHTTP(rec, req)\n\n\t\trequire.Error(t, readErr)\n\t\tassert.Equal(t, http.StatusRequestEntityTooLarge, rec.Code)\n\t\tassert.LessOrEqual(t, len(body), int(noiseBodyLimit))\n\t})\n}\n\nfunc TestNoiseBodyLimit_AtExactLimit(t *testing.T) {\n\tt.Parallel()\n\n\tvar body []byte\n\n\tvar readErr error\n\n\trouter := newNoiseRouterWithBodyLimit(&body, &readErr)\n\n\tpayload := bytes.Repeat([]byte(\"a\"), int(noiseBodyLimit))\n\treq := httptest.NewRequestWithContext(context.Background(), http.MethodPost, \"/machine/map\", bytes.NewReader(payload))\n\trec := httptest.NewRecorder()\n\trouter.ServeHTTP(rec, req)\n\n\trequire.NoError(t, readErr)\n\tassert.Equal(t, http.StatusOK, rec.Code)\n\tassert.Len(t, body, int(noiseBodyLimit))\n}\n\n// TestPollNetMapHandler_OversizedBody calls the real handler with a\n// MaxBytesReader-wrapped body to verify it fails gracefully (json decode\n// error on truncated data) rather than consuming unbounded memory.\nfunc TestPollNetMapHandler_OversizedBody(t *testing.T) {\n\tt.Parallel()\n\n\tns := &noiseServer{}\n\n\toversized := bytes.Repeat([]byte(\"x\"), int(noiseBodyLimit)+1)\n\treq := httptest.NewRequestWithContext(context.Background(), http.MethodPost, \"/machine/map\", bytes.NewReader(oversized))\n\trec := httptest.NewRecorder()\n\treq.Body = http.MaxBytesReader(rec, req.Body, noiseBodyLimit)\n\n\tns.PollNetMapHandler(rec, req)\n\n\t// Body is truncated → json.Decode fails → httpError returns 500.\n\tassert.Equal(t, http.StatusInternalServerError, rec.Code)\n}\n\n// TestRegistrationHandler_OversizedBody calls the real handler with a\n// MaxBytesReader-wrapped body to verify it returns an error response\n// rather than consuming unbounded memory.\nfunc TestRegistrationHandler_OversizedBody(t *testing.T) {\n\tt.Parallel()\n\n\tns := &noiseServer{}\n\n\toversized := bytes.Repeat([]byte(\"x\"), int(noiseBodyLimit)+1)\n\treq := httptest.NewRequestWithContext(context.Background(), http.MethodPost, \"/machine/register\", bytes.NewReader(oversized))\n\trec := httptest.NewRecorder()\n\treq.Body = http.MaxBytesReader(rec, req.Body, noiseBodyLimit)\n\n\tns.RegistrationHandler(rec, req)\n\n\t// json.Decode returns MaxBytesError → regErr wraps it → handler writes\n\t// a RegisterResponse with the error and then rejectUnsupported kicks in\n\t// for version 0 → returns 400.\n\tassert.Equal(t, http.StatusBadRequest, rec.Code)\n}\n"
  },
  {
    "path": "hscontrol/oidc.go",
    "content": "package hscontrol\n\nimport (\n\t\"bytes\"\n\t\"cmp\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"slices\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/coreos/go-oidc/v3/oidc\"\n\t\"github.com/juanfont/headscale/hscontrol/db\"\n\t\"github.com/juanfont/headscale/hscontrol/templates\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/types/change\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/rs/zerolog/log\"\n\t\"golang.org/x/oauth2\"\n\t\"zgo.at/zcache/v2\"\n)\n\nconst (\n\trandomByteSize           = 16\n\tdefaultOAuthOptionsCount = 3\n\tauthCacheExpiration      = time.Minute * 15\n\tauthCacheCleanup         = time.Minute * 20\n)\n\nvar (\n\terrEmptyOIDCCallbackParams = errors.New(\"empty OIDC callback params\")\n\terrNoOIDCIDToken           = errors.New(\"extracting ID token\")\n\terrNoOIDCRegistrationInfo  = errors.New(\"registration info not in cache\")\n\terrOIDCAllowedDomains      = errors.New(\n\t\t\"authenticated principal does not match any allowed domain\",\n\t)\n\terrOIDCAllowedGroups = errors.New(\"authenticated principal is not in any allowed group\")\n\terrOIDCAllowedUsers  = errors.New(\n\t\t\"authenticated principal does not match any allowed user\",\n\t)\n\terrOIDCUnverifiedEmail = errors.New(\"authenticated principal has an unverified email\")\n)\n\n// AuthInfo contains both auth ID and verifier information for OIDC validation.\ntype AuthInfo struct {\n\tAuthID       types.AuthID\n\tVerifier     *string\n\tRegistration bool\n}\n\ntype AuthProviderOIDC struct {\n\th         *Headscale\n\tserverURL string\n\tcfg       *types.OIDCConfig\n\n\t// authCache holds auth information between\n\t// the auth and the callback steps.\n\tauthCache *zcache.Cache[string, AuthInfo]\n\n\toidcProvider *oidc.Provider\n\toauth2Config *oauth2.Config\n}\n\nfunc NewAuthProviderOIDC(\n\tctx context.Context,\n\th *Headscale,\n\tserverURL string,\n\tcfg *types.OIDCConfig,\n) (*AuthProviderOIDC, error) {\n\tvar err error\n\t// grab oidc config if it hasn't been already\n\toidcProvider, err := oidc.NewProvider(context.Background(), cfg.Issuer) //nolint:contextcheck\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating OIDC provider from issuer config: %w\", err)\n\t}\n\n\toauth2Config := &oauth2.Config{\n\t\tClientID:     cfg.ClientID,\n\t\tClientSecret: cfg.ClientSecret,\n\t\tEndpoint:     oidcProvider.Endpoint(),\n\t\tRedirectURL:  strings.TrimSuffix(serverURL, \"/\") + \"/oidc/callback\",\n\t\tScopes:       cfg.Scope,\n\t}\n\n\tauthCache := zcache.New[string, AuthInfo](\n\t\tauthCacheExpiration,\n\t\tauthCacheCleanup,\n\t)\n\n\treturn &AuthProviderOIDC{\n\t\th:         h,\n\t\tserverURL: serverURL,\n\t\tcfg:       cfg,\n\t\tauthCache: authCache,\n\n\t\toidcProvider: oidcProvider,\n\t\toauth2Config: oauth2Config,\n\t}, nil\n}\n\nfunc (a *AuthProviderOIDC) AuthURL(authID types.AuthID) string {\n\treturn fmt.Sprintf(\n\t\t\"%s/auth/%s\",\n\t\tstrings.TrimSuffix(a.serverURL, \"/\"),\n\t\tauthID.String())\n}\n\nfunc (a *AuthProviderOIDC) AuthHandler(\n\twriter http.ResponseWriter,\n\treq *http.Request,\n) {\n\ta.authHandler(writer, req, false)\n}\n\nfunc (a *AuthProviderOIDC) RegisterURL(authID types.AuthID) string {\n\treturn fmt.Sprintf(\n\t\t\"%s/register/%s\",\n\t\tstrings.TrimSuffix(a.serverURL, \"/\"),\n\t\tauthID.String())\n}\n\n// RegisterHandler registers the OIDC callback handler with the given router.\n// It puts NodeKey in cache so the callback can retrieve it using the oidc state param.\n// Listens in /register/:auth_id.\nfunc (a *AuthProviderOIDC) RegisterHandler(\n\twriter http.ResponseWriter,\n\treq *http.Request,\n) {\n\ta.authHandler(writer, req, true)\n}\n\n// authHandler takes an incoming request that needs to be authenticated and\n// validates and prepares it for the OIDC flow.\nfunc (a *AuthProviderOIDC) authHandler(\n\twriter http.ResponseWriter,\n\treq *http.Request,\n\tregistration bool,\n) {\n\tauthID, err := authIDFromRequest(req)\n\tif err != nil {\n\t\thttpError(writer, err)\n\t\treturn\n\t}\n\n\t// Set the state and nonce cookies to protect against CSRF attacks\n\tstate, err := setCSRFCookie(writer, req, \"state\")\n\tif err != nil {\n\t\thttpError(writer, err)\n\t\treturn\n\t}\n\n\t// Set the state and nonce cookies to protect against CSRF attacks\n\tnonce, err := setCSRFCookie(writer, req, \"nonce\")\n\tif err != nil {\n\t\thttpError(writer, err)\n\t\treturn\n\t}\n\n\tregistrationInfo := AuthInfo{\n\t\tAuthID:       authID,\n\t\tRegistration: registration,\n\t}\n\n\textras := make([]oauth2.AuthCodeOption, 0, len(a.cfg.ExtraParams)+defaultOAuthOptionsCount)\n\t// Add PKCE verification if enabled\n\tif a.cfg.PKCE.Enabled {\n\t\tverifier := oauth2.GenerateVerifier()\n\t\tregistrationInfo.Verifier = &verifier\n\n\t\textras = append(extras, oauth2.AccessTypeOffline)\n\n\t\tswitch a.cfg.PKCE.Method {\n\t\tcase types.PKCEMethodS256:\n\t\t\textras = append(extras, oauth2.S256ChallengeOption(verifier))\n\t\tcase types.PKCEMethodPlain:\n\t\t\t// oauth2 does not have a plain challenge option, so we add it manually\n\t\t\textras = append(extras, oauth2.SetAuthURLParam(\"code_challenge_method\", \"plain\"), oauth2.SetAuthURLParam(\"code_challenge\", verifier))\n\t\t}\n\t}\n\n\t// Add any extra parameters from configuration\n\tfor k, v := range a.cfg.ExtraParams {\n\t\textras = append(extras, oauth2.SetAuthURLParam(k, v))\n\t}\n\n\textras = append(extras, oidc.Nonce(nonce))\n\n\t// Cache the registration info\n\ta.authCache.Set(state, registrationInfo)\n\n\tauthURL := a.oauth2Config.AuthCodeURL(state, extras...)\n\tlog.Debug().Caller().Msgf(\"redirecting to %s for authentication\", authURL)\n\n\thttp.Redirect(writer, req, authURL, http.StatusFound)\n}\n\n// OIDCCallbackHandler handles the callback from the OIDC endpoint\n// Retrieves the nkey from the state cache and adds the node to the users email user\n// TODO: A confirmation page for new nodes should be added to avoid phishing vulnerabilities\n// TODO: Add groups information from OIDC tokens into node HostInfo\n// Listens in /oidc/callback.\nfunc (a *AuthProviderOIDC) OIDCCallbackHandler(\n\twriter http.ResponseWriter,\n\treq *http.Request,\n) {\n\tcode, state, err := extractCodeAndStateParamFromRequest(req)\n\tif err != nil {\n\t\thttpError(writer, err)\n\t\treturn\n\t}\n\n\tstateCookieName := getCookieName(\"state\", state)\n\n\tcookieState, err := req.Cookie(stateCookieName)\n\tif err != nil {\n\t\thttpError(writer, NewHTTPError(http.StatusBadRequest, \"state not found\", err))\n\t\treturn\n\t}\n\n\tif state != cookieState.Value {\n\t\thttpError(writer, NewHTTPError(http.StatusForbidden, \"state did not match\", nil))\n\t\treturn\n\t}\n\n\toauth2Token, err := a.getOauth2Token(req.Context(), code, state)\n\tif err != nil {\n\t\thttpError(writer, err)\n\t\treturn\n\t}\n\n\tidToken, err := a.extractIDToken(req.Context(), oauth2Token)\n\tif err != nil {\n\t\thttpError(writer, err)\n\t\treturn\n\t}\n\n\tif idToken.Nonce == \"\" {\n\t\thttpError(writer, NewHTTPError(http.StatusBadRequest, \"nonce not found in IDToken\", err))\n\t\treturn\n\t}\n\n\tnonceCookieName := getCookieName(\"nonce\", idToken.Nonce)\n\n\tnonce, err := req.Cookie(nonceCookieName)\n\tif err != nil {\n\t\thttpError(writer, NewHTTPError(http.StatusBadRequest, \"nonce not found\", err))\n\t\treturn\n\t}\n\n\tif idToken.Nonce != nonce.Value {\n\t\thttpError(writer, NewHTTPError(http.StatusForbidden, \"nonce did not match\", nil))\n\t\treturn\n\t}\n\n\tnodeExpiry := a.determineNodeExpiry(idToken.Expiry)\n\n\tvar claims types.OIDCClaims\n\tif err := idToken.Claims(&claims); err != nil { //nolint:noinlineerr\n\t\thttpError(writer, fmt.Errorf(\"decoding ID token claims: %w\", err))\n\t\treturn\n\t}\n\n\t// Fetch user information (email, groups, name, etc) from the userinfo endpoint\n\t// https://openid.net/specs/openid-connect-core-1_0.html#UserInfo\n\tvar userinfo *oidc.UserInfo\n\n\tuserinfo, err = a.oidcProvider.UserInfo(req.Context(), oauth2.StaticTokenSource(oauth2Token))\n\tif err != nil {\n\t\tutil.LogErr(err, \"could not get userinfo; only using claims from id token\")\n\t}\n\n\t// The oidc.UserInfo type only decodes some fields (Subject, Profile, Email, EmailVerified).\n\t// We are interested in other fields too (e.g. groups are required for allowedGroups) so we\n\t// decode into our own OIDCUserInfo type using the underlying claims struct.\n\tvar userinfo2 types.OIDCUserInfo\n\tif userinfo != nil && userinfo.Claims(&userinfo2) == nil && userinfo2.Sub == claims.Sub {\n\t\t// Update the user with the userinfo claims (with id token claims as fallback).\n\t\t// TODO(kradalby): there might be more interesting fields here that we have not found yet.\n\t\tclaims.Email = cmp.Or(userinfo2.Email, claims.Email)\n\t\tclaims.EmailVerified = cmp.Or(userinfo2.EmailVerified, claims.EmailVerified)\n\t\tclaims.Username = cmp.Or(userinfo2.PreferredUsername, claims.Username)\n\t\tclaims.Name = cmp.Or(userinfo2.Name, claims.Name)\n\n\t\tclaims.ProfilePictureURL = cmp.Or(userinfo2.Picture, claims.ProfilePictureURL)\n\t\tif userinfo2.Groups != nil {\n\t\t\tclaims.Groups = userinfo2.Groups\n\t\t}\n\t} else {\n\t\tutil.LogErr(err, \"could not get userinfo; only using claims from id token\")\n\t}\n\n\t// The user claims are now updated from the userinfo endpoint so we can verify the user\n\t// against allowed emails, email domains, and groups.\n\terr = doOIDCAuthorization(a.cfg, &claims)\n\tif err != nil {\n\t\thttpError(writer, err)\n\t\treturn\n\t}\n\n\tuser, _, err := a.createOrUpdateUserFromClaim(&claims)\n\tif err != nil {\n\t\tlog.Error().\n\t\t\tErr(err).\n\t\t\tCaller().\n\t\t\tMsgf(\"could not create or update user\")\n\t\twriter.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\t\twriter.WriteHeader(http.StatusInternalServerError)\n\n\t\t_, werr := writer.Write([]byte(\"Could not create or update user\"))\n\t\tif werr != nil {\n\t\t\tlog.Error().\n\t\t\t\tCaller().\n\t\t\t\tErr(werr).\n\t\t\t\tMsg(\"Failed to write HTTP response\")\n\t\t}\n\n\t\treturn\n\t}\n\n\t// TODO(kradalby): Is this comment right?\n\t// If the node exists, then the node should be reauthenticated,\n\t// if the node does not exist, and the machine key exists, then\n\t// this is a new node that should be registered.\n\tauthInfo := a.getAuthInfoFromState(state)\n\tif authInfo == nil {\n\t\tlog.Debug().Caller().Str(\"state\", state).Msg(\"state not found in cache, login session may have expired\")\n\t\thttpError(writer, NewHTTPError(http.StatusGone, \"login session expired, try again\", nil))\n\n\t\treturn\n\t}\n\n\t// If this is a registration flow, then we need to register the node.\n\tif authInfo.Registration {\n\t\tnewNode, err := a.handleRegistration(user, authInfo.AuthID, nodeExpiry)\n\t\tif err != nil {\n\t\t\tif errors.Is(err, db.ErrNodeNotFoundRegistrationCache) {\n\t\t\t\tlog.Debug().Caller().Str(\"auth_id\", authInfo.AuthID.String()).Msg(\"registration session expired before authorization completed\")\n\t\t\t\thttpError(writer, NewHTTPError(http.StatusGone, \"login session expired, try again\", err))\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\thttpError(writer, err)\n\n\t\t\treturn\n\t\t}\n\n\t\tcontent := renderRegistrationSuccessTemplate(user, newNode)\n\n\t\twriter.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\t\twriter.WriteHeader(http.StatusOK)\n\n\t\tif _, err := writer.Write(content.Bytes()); err != nil { //nolint:noinlineerr\n\t\t\tutil.LogErr(err, \"Failed to write HTTP response\")\n\t\t}\n\n\t\treturn\n\t}\n\n\t// If this is not a registration callback, then its a regular authentication callback\n\t// and we need to send a response and confirm that the access was allowed.\n\n\tauthReq, ok := a.h.state.GetAuthCacheEntry(authInfo.AuthID)\n\tif !ok {\n\t\tlog.Debug().Caller().Str(\"auth_id\", authInfo.AuthID.String()).Msg(\"auth session expired before authorization completed\")\n\t\thttpError(writer, NewHTTPError(http.StatusGone, \"login session expired, try again\", nil))\n\n\t\treturn\n\t}\n\n\t// Send a finish auth verdict with no errors to let the CLI know that the authentication was successful.\n\tauthReq.FinishAuth(types.AuthVerdict{})\n\n\tcontent := renderAuthSuccessTemplate(user)\n\n\twriter.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\twriter.WriteHeader(http.StatusOK)\n\n\tif _, err := writer.Write(content.Bytes()); err != nil { //nolint:noinlineerr\n\t\tutil.LogErr(err, \"Failed to write HTTP response\")\n\t}\n}\n\nfunc (a *AuthProviderOIDC) determineNodeExpiry(idTokenExpiration time.Time) time.Time {\n\tif a.cfg.UseExpiryFromToken {\n\t\treturn idTokenExpiration\n\t}\n\n\treturn time.Now().Add(a.cfg.Expiry)\n}\n\nfunc extractCodeAndStateParamFromRequest(\n\treq *http.Request,\n) (string, string, error) {\n\tcode := req.URL.Query().Get(\"code\")\n\tstate := req.URL.Query().Get(\"state\")\n\n\tif code == \"\" || state == \"\" {\n\t\treturn \"\", \"\", NewHTTPError(http.StatusBadRequest, \"missing code or state parameter\", errEmptyOIDCCallbackParams)\n\t}\n\n\treturn code, state, nil\n}\n\n// getOauth2Token exchanges the code from the callback for an oauth2 token.\nfunc (a *AuthProviderOIDC) getOauth2Token(\n\tctx context.Context,\n\tcode string,\n\tstate string,\n) (*oauth2.Token, error) {\n\tvar exchangeOpts []oauth2.AuthCodeOption\n\n\tif a.cfg.PKCE.Enabled {\n\t\tregInfo, ok := a.authCache.Get(state)\n\t\tif !ok {\n\t\t\treturn nil, NewHTTPError(http.StatusNotFound, \"registration not found\", errNoOIDCRegistrationInfo)\n\t\t}\n\n\t\tif regInfo.Verifier != nil {\n\t\t\texchangeOpts = []oauth2.AuthCodeOption{oauth2.VerifierOption(*regInfo.Verifier)}\n\t\t}\n\t}\n\n\toauth2Token, err := a.oauth2Config.Exchange(ctx, code, exchangeOpts...)\n\tif err != nil {\n\t\treturn nil, NewHTTPError(http.StatusForbidden, \"invalid code\", fmt.Errorf(\"exchanging code for token: %w\", err))\n\t}\n\n\treturn oauth2Token, err\n}\n\n// extractIDToken extracts the ID token from the oauth2 token.\nfunc (a *AuthProviderOIDC) extractIDToken(\n\tctx context.Context,\n\toauth2Token *oauth2.Token,\n) (*oidc.IDToken, error) {\n\trawIDToken, ok := oauth2Token.Extra(\"id_token\").(string)\n\tif !ok {\n\t\treturn nil, NewHTTPError(http.StatusBadRequest, \"no id_token\", errNoOIDCIDToken)\n\t}\n\n\tverifier := a.oidcProvider.Verifier(&oidc.Config{ClientID: a.cfg.ClientID})\n\n\tidToken, err := verifier.Verify(ctx, rawIDToken)\n\tif err != nil {\n\t\treturn nil, NewHTTPError(http.StatusForbidden, \"failed to verify id_token\", fmt.Errorf(\"verifying ID token: %w\", err))\n\t}\n\n\treturn idToken, nil\n}\n\n// validateOIDCAllowedDomains checks that if AllowedDomains is provided,\n// that the authenticated principal ends with @<alloweddomain>.\nfunc validateOIDCAllowedDomains(\n\tallowedDomains []string,\n\tclaims *types.OIDCClaims,\n) error {\n\tif len(allowedDomains) > 0 {\n\t\tif at := strings.LastIndex(claims.Email, \"@\"); at < 0 ||\n\t\t\t!slices.Contains(allowedDomains, claims.Email[at+1:]) {\n\t\t\treturn NewHTTPError(http.StatusUnauthorized, \"unauthorised domain\", errOIDCAllowedDomains)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// validateOIDCAllowedGroups checks if AllowedGroups is provided,\n// and that the user has one group in the list.\n// claims.Groups can be populated by adding a client scope named\n// 'groups' that contains group membership.\nfunc validateOIDCAllowedGroups(\n\tallowedGroups []string,\n\tclaims *types.OIDCClaims,\n) error {\n\tfor _, group := range allowedGroups {\n\t\tif slices.Contains(claims.Groups, group) {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn NewHTTPError(http.StatusUnauthorized, \"unauthorised group\", errOIDCAllowedGroups)\n}\n\n// validateOIDCAllowedUsers checks that if AllowedUsers is provided,\n// that the authenticated principal is part of that list.\nfunc validateOIDCAllowedUsers(\n\tallowedUsers []string,\n\tclaims *types.OIDCClaims,\n) error {\n\tif !slices.Contains(allowedUsers, claims.Email) {\n\t\treturn NewHTTPError(http.StatusUnauthorized, \"unauthorised user\", errOIDCAllowedUsers)\n\t}\n\n\treturn nil\n}\n\n// doOIDCAuthorization applies authorization tests to claims.\n//\n// The following tests are always applied:\n//\n// - validateOIDCAllowedGroups\n//\n// The following tests are applied if cfg.EmailVerifiedRequired=false\n// or claims.email_verified=true:\n//\n// - validateOIDCAllowedDomains\n// - validateOIDCAllowedUsers\n//\n// NOTE that, contrary to the function name, validateOIDCAllowedUsers\n// only checks the email address -- not the username.\nfunc doOIDCAuthorization(\n\tcfg *types.OIDCConfig,\n\tclaims *types.OIDCClaims,\n) error {\n\tif len(cfg.AllowedGroups) > 0 {\n\t\terr := validateOIDCAllowedGroups(cfg.AllowedGroups, claims)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ttrustEmail := !cfg.EmailVerifiedRequired || bool(claims.EmailVerified)\n\n\thasEmailTests := len(cfg.AllowedDomains) > 0 || len(cfg.AllowedUsers) > 0\n\tif !trustEmail && hasEmailTests {\n\t\treturn NewHTTPError(http.StatusUnauthorized, \"unverified email\", errOIDCUnverifiedEmail)\n\t}\n\n\tif len(cfg.AllowedDomains) > 0 {\n\t\terr := validateOIDCAllowedDomains(cfg.AllowedDomains, claims)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(cfg.AllowedUsers) > 0 {\n\t\terr := validateOIDCAllowedUsers(cfg.AllowedUsers, claims)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// getAuthInfoFromState retrieves the registration ID from the state.\nfunc (a *AuthProviderOIDC) getAuthInfoFromState(state string) *AuthInfo {\n\tauthInfo, ok := a.authCache.Get(state)\n\tif !ok {\n\t\treturn nil\n\t}\n\n\treturn &authInfo\n}\n\nfunc (a *AuthProviderOIDC) createOrUpdateUserFromClaim(\n\tclaims *types.OIDCClaims,\n) (*types.User, change.Change, error) {\n\tvar (\n\t\tuser    *types.User\n\t\terr     error\n\t\tnewUser bool\n\t\tc       change.Change\n\t)\n\n\tuser, err = a.h.state.GetUserByOIDCIdentifier(claims.Identifier())\n\tif err != nil && !errors.Is(err, db.ErrUserNotFound) {\n\t\treturn nil, change.Change{}, fmt.Errorf(\"creating or updating user: %w\", err)\n\t}\n\n\t// if the user is still not found, create a new empty user.\n\t// TODO(kradalby): This context is not inherited from the request, which is probably not ideal.\n\t// However, we need a context to use the OIDC provider.\n\tif user == nil {\n\t\tnewUser = true\n\t\tuser = &types.User{}\n\t}\n\n\tuser.FromClaim(claims, a.cfg.EmailVerifiedRequired)\n\n\tif newUser {\n\t\tuser, c, err = a.h.state.CreateUser(*user)\n\t\tif err != nil {\n\t\t\treturn nil, change.Change{}, fmt.Errorf(\"creating user: %w\", err)\n\t\t}\n\t} else {\n\t\t_, c, err = a.h.state.UpdateUser(types.UserID(user.ID), func(u *types.User) error {\n\t\t\t*u = *user\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, change.Change{}, fmt.Errorf(\"updating user: %w\", err)\n\t\t}\n\t}\n\n\treturn user, c, nil\n}\n\nfunc (a *AuthProviderOIDC) handleRegistration(\n\tuser *types.User,\n\tregistrationID types.AuthID,\n\texpiry time.Time,\n) (bool, error) {\n\tnode, nodeChange, err := a.h.state.HandleNodeFromAuthPath(\n\t\tregistrationID,\n\t\ttypes.UserID(user.ID),\n\t\t&expiry,\n\t\tutil.RegisterMethodOIDC,\n\t)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"registering node: %w\", err)\n\t}\n\n\t// This is a bit of a back and forth, but we have a bit of a chicken and egg\n\t// dependency here.\n\t// Because the way the policy manager works, we need to have the node\n\t// in the database, then add it to the policy manager and then we can\n\t// approve the route. This means we get this dance where the node is\n\t// first added to the database, then we add it to the policy manager via\n\t// SaveNode (which automatically updates the policy manager) and then we can auto approve the routes.\n\t// As that only approves the struct object, we need to save it again and\n\t// ensure we send an update.\n\t// This works, but might be another good candidate for doing some sort of\n\t// eventbus.\n\troutesChange, err := a.h.state.AutoApproveRoutes(node)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"auto approving routes: %w\", err)\n\t}\n\n\t// Send both changes. Empty changes are ignored by Change().\n\ta.h.Change(nodeChange, routesChange)\n\n\treturn !nodeChange.IsEmpty(), nil\n}\n\nfunc renderRegistrationSuccessTemplate(\n\tuser *types.User,\n\tnewNode bool,\n) *bytes.Buffer {\n\tresult := templates.AuthSuccessResult{\n\t\tTitle:   \"Headscale - Node Reauthenticated\",\n\t\tHeading: \"Node reauthenticated\",\n\t\tVerb:    \"Reauthenticated\",\n\t\tUser:    user.Display(),\n\t\tMessage: \"You can now close this window.\",\n\t}\n\tif newNode {\n\t\tresult.Title = \"Headscale - Node Registered\"\n\t\tresult.Heading = \"Node registered\"\n\t\tresult.Verb = \"Registered\"\n\t}\n\n\treturn bytes.NewBufferString(templates.AuthSuccess(result).Render())\n}\n\nfunc renderAuthSuccessTemplate(\n\tuser *types.User,\n) *bytes.Buffer {\n\tresult := templates.AuthSuccessResult{\n\t\tTitle:   \"Headscale - SSH Session Authorized\",\n\t\tHeading: \"SSH session authorized\",\n\t\tVerb:    \"Authorized\",\n\t\tUser:    user.Display(),\n\t\tMessage: \"You may return to your terminal.\",\n\t}\n\n\treturn bytes.NewBufferString(templates.AuthSuccess(result).Render())\n}\n\n// getCookieName generates a unique cookie name based on a cookie value.\nfunc getCookieName(baseName, value string) string {\n\treturn fmt.Sprintf(\"%s_%s\", baseName, value[:6])\n}\n\nfunc setCSRFCookie(w http.ResponseWriter, r *http.Request, name string) (string, error) {\n\tval, err := util.GenerateRandomStringURLSafe(64)\n\tif err != nil {\n\t\treturn val, err\n\t}\n\n\tc := &http.Cookie{\n\t\tPath:     \"/oidc/callback\",\n\t\tName:     getCookieName(name, val),\n\t\tValue:    val,\n\t\tMaxAge:   int(time.Hour.Seconds()),\n\t\tSecure:   r.TLS != nil,\n\t\tHttpOnly: true,\n\t}\n\thttp.SetCookie(w, c)\n\n\treturn val, nil\n}\n"
  },
  {
    "path": "hscontrol/oidc_template_test.go",
    "content": "package hscontrol\n\nimport (\n\t\"testing\"\n\n\t\"github.com/juanfont/headscale/hscontrol/templates\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestAuthSuccessTemplate(t *testing.T) {\n\ttests := []struct {\n\t\tname   string\n\t\tresult templates.AuthSuccessResult\n\t}{\n\t\t{\n\t\t\tname: \"node_registered\",\n\t\t\tresult: templates.AuthSuccessResult{\n\t\t\t\tTitle:   \"Headscale - Node Registered\",\n\t\t\t\tHeading: \"Node registered\",\n\t\t\t\tVerb:    \"Registered\",\n\t\t\t\tUser:    \"newuser@example.com\",\n\t\t\t\tMessage: \"You can now close this window.\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"node_reauthenticated\",\n\t\t\tresult: templates.AuthSuccessResult{\n\t\t\t\tTitle:   \"Headscale - Node Reauthenticated\",\n\t\t\t\tHeading: \"Node reauthenticated\",\n\t\t\t\tVerb:    \"Reauthenticated\",\n\t\t\t\tUser:    \"test@example.com\",\n\t\t\t\tMessage: \"You can now close this window.\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ssh_session_authorized\",\n\t\t\tresult: templates.AuthSuccessResult{\n\t\t\t\tTitle:   \"Headscale - SSH Session Authorized\",\n\t\t\t\tHeading: \"SSH session authorized\",\n\t\t\t\tVerb:    \"Authorized\",\n\t\t\t\tUser:    \"test@example.com\",\n\t\t\t\tMessage: \"You may return to your terminal.\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\thtml := templates.AuthSuccess(tt.result).Render()\n\n\t\t\t// Verify the HTML contains expected structural elements\n\t\t\tassert.Contains(t, html, \"<!DOCTYPE html>\")\n\t\t\tassert.Contains(t, html, \"<title>\"+tt.result.Title+\"</title>\")\n\t\t\tassert.Contains(t, html, tt.result.Heading)\n\t\t\tassert.Contains(t, html, tt.result.Verb+\" as \")\n\t\t\tassert.Contains(t, html, tt.result.User)\n\t\t\tassert.Contains(t, html, tt.result.Message)\n\n\t\t\t// Verify Material for MkDocs design system CSS is present\n\t\t\tassert.Contains(t, html, \"Material for MkDocs\")\n\t\t\tassert.Contains(t, html, \"Roboto\")\n\t\t\tassert.Contains(t, html, \".md-typeset\")\n\n\t\t\t// Verify SVG elements are present\n\t\t\tassert.Contains(t, html, \"<svg\")\n\t\t\tassert.Contains(t, html, \"class=\\\"headscale-logo\\\"\")\n\t\t\tassert.Contains(t, html, \"id=\\\"checkbox\\\"\")\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "hscontrol/oidc_test.go",
    "content": "package hscontrol\n\nimport (\n\t\"testing\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n)\n\nfunc TestDoOIDCAuthorization(t *testing.T) {\n\ttestCases := []struct {\n\t\tname    string\n\t\tcfg     *types.OIDCConfig\n\t\tclaims  *types.OIDCClaims\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname:    \"verified email domain\",\n\t\t\twantErr: false,\n\t\t\tcfg: &types.OIDCConfig{\n\t\t\t\tEmailVerifiedRequired: true,\n\t\t\t\tAllowedDomains:        []string{\"test.com\"},\n\t\t\t\tAllowedUsers:          []string{},\n\t\t\t\tAllowedGroups:         []string{},\n\t\t\t},\n\t\t\tclaims: &types.OIDCClaims{\n\t\t\t\tEmail:         \"user@test.com\",\n\t\t\t\tEmailVerified: true,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"verified email user\",\n\t\t\twantErr: false,\n\t\t\tcfg: &types.OIDCConfig{\n\t\t\t\tEmailVerifiedRequired: true,\n\t\t\t\tAllowedDomains:        []string{},\n\t\t\t\tAllowedUsers:          []string{\"user@test.com\"},\n\t\t\t\tAllowedGroups:         []string{},\n\t\t\t},\n\t\t\tclaims: &types.OIDCClaims{\n\t\t\t\tEmail:         \"user@test.com\",\n\t\t\t\tEmailVerified: true,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"unverified email domain\",\n\t\t\twantErr: true,\n\t\t\tcfg: &types.OIDCConfig{\n\t\t\t\tEmailVerifiedRequired: true,\n\t\t\t\tAllowedDomains:        []string{\"test.com\"},\n\t\t\t\tAllowedUsers:          []string{},\n\t\t\t\tAllowedGroups:         []string{},\n\t\t\t},\n\t\t\tclaims: &types.OIDCClaims{\n\t\t\t\tEmail:         \"user@test.com\",\n\t\t\t\tEmailVerified: false,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"group member\",\n\t\t\twantErr: false,\n\t\t\tcfg: &types.OIDCConfig{\n\t\t\t\tEmailVerifiedRequired: true,\n\t\t\t\tAllowedDomains:        []string{},\n\t\t\t\tAllowedUsers:          []string{},\n\t\t\t\tAllowedGroups:         []string{\"test\"},\n\t\t\t},\n\t\t\tclaims: &types.OIDCClaims{Groups: []string{\"test\"}},\n\t\t},\n\t\t{\n\t\t\tname:    \"non group member\",\n\t\t\twantErr: true,\n\t\t\tcfg: &types.OIDCConfig{\n\t\t\t\tEmailVerifiedRequired: true,\n\t\t\t\tAllowedDomains:        []string{},\n\t\t\t\tAllowedUsers:          []string{},\n\t\t\t\tAllowedGroups:         []string{\"nope\"},\n\t\t\t},\n\t\t\tclaims: &types.OIDCClaims{Groups: []string{\"testo\"}},\n\t\t},\n\t\t{\n\t\t\tname:    \"group member but bad domain\",\n\t\t\twantErr: true,\n\t\t\tcfg: &types.OIDCConfig{\n\t\t\t\tEmailVerifiedRequired: true,\n\t\t\t\tAllowedDomains:        []string{\"user@good.com\"},\n\t\t\t\tAllowedUsers:          []string{},\n\t\t\t\tAllowedGroups:         []string{\"test group\"},\n\t\t\t},\n\t\t\tclaims: &types.OIDCClaims{Groups: []string{\"test group\"}, Email: \"bad@bad.com\", EmailVerified: true},\n\t\t},\n\t\t{\n\t\t\tname:    \"all checks pass\",\n\t\t\twantErr: false,\n\t\t\tcfg: &types.OIDCConfig{\n\t\t\t\tEmailVerifiedRequired: true,\n\t\t\t\tAllowedDomains:        []string{\"test.com\"},\n\t\t\t\tAllowedUsers:          []string{\"user@test.com\"},\n\t\t\t\tAllowedGroups:         []string{\"test group\"},\n\t\t\t},\n\t\t\tclaims: &types.OIDCClaims{Groups: []string{\"test group\"}, Email: \"user@test.com\", EmailVerified: true},\n\t\t},\n\t\t{\n\t\t\tname:    \"all checks pass with unverified email\",\n\t\t\twantErr: false,\n\t\t\tcfg: &types.OIDCConfig{\n\t\t\t\tEmailVerifiedRequired: false,\n\t\t\t\tAllowedDomains:        []string{\"test.com\"},\n\t\t\t\tAllowedUsers:          []string{\"user@test.com\"},\n\t\t\t\tAllowedGroups:         []string{\"test group\"},\n\t\t\t},\n\t\t\tclaims: &types.OIDCClaims{Groups: []string{\"test group\"}, Email: \"user@test.com\", EmailVerified: false},\n\t\t},\n\t\t{\n\t\t\tname:    \"fail on unverified email\",\n\t\t\twantErr: true,\n\t\t\tcfg: &types.OIDCConfig{\n\t\t\t\tEmailVerifiedRequired: true,\n\t\t\t\tAllowedDomains:        []string{\"test.com\"},\n\t\t\t\tAllowedUsers:          []string{\"user@test.com\"},\n\t\t\t\tAllowedGroups:         []string{\"test group\"},\n\t\t\t},\n\t\t\tclaims: &types.OIDCClaims{Groups: []string{\"test group\"}, Email: \"user@test.com\", EmailVerified: false},\n\t\t},\n\t\t{\n\t\t\tname:    \"unverified email user only\",\n\t\t\twantErr: true,\n\t\t\tcfg: &types.OIDCConfig{\n\t\t\t\tEmailVerifiedRequired: true,\n\t\t\t\tAllowedDomains:        []string{},\n\t\t\t\tAllowedUsers:          []string{\"user@test.com\"},\n\t\t\t\tAllowedGroups:         []string{},\n\t\t\t},\n\t\t\tclaims: &types.OIDCClaims{\n\t\t\t\tEmail:         \"user@test.com\",\n\t\t\t\tEmailVerified: false,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"no filters configured\",\n\t\t\twantErr: false,\n\t\t\tcfg: &types.OIDCConfig{\n\t\t\t\tEmailVerifiedRequired: true,\n\t\t\t\tAllowedDomains:        []string{},\n\t\t\t\tAllowedUsers:          []string{},\n\t\t\t\tAllowedGroups:         []string{},\n\t\t\t},\n\t\t\tclaims: &types.OIDCClaims{\n\t\t\t\tEmail:         \"anyone@anywhere.com\",\n\t\t\t\tEmailVerified: false,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"multiple allowed groups second matches\",\n\t\t\twantErr: false,\n\t\t\tcfg: &types.OIDCConfig{\n\t\t\t\tEmailVerifiedRequired: true,\n\t\t\t\tAllowedDomains:        []string{},\n\t\t\t\tAllowedUsers:          []string{},\n\t\t\t\tAllowedGroups:         []string{\"group1\", \"group2\", \"group3\"},\n\t\t\t},\n\t\t\tclaims: &types.OIDCClaims{Groups: []string{\"group2\"}},\n\t\t},\n\t}\n\n\tfor _, tC := range testCases {\n\t\tt.Run(tC.name, func(t *testing.T) {\n\t\t\terr := doOIDCAuthorization(tC.cfg, tC.claims)\n\t\t\tif ((err != nil) && !tC.wantErr) || ((err == nil) && tC.wantErr) {\n\t\t\t\tt.Errorf(\"bad authorization: %s > want=%v | got=%v\", tC.name, tC.wantErr, err)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "hscontrol/platform_config.go",
    "content": "package hscontrol\n\nimport (\n\t\"bytes\"\n\t_ \"embed\"\n\t\"html/template\"\n\t\"net/http\"\n\ttextTemplate \"text/template\"\n\n\t\"github.com/gofrs/uuid/v5\"\n\t\"github.com/gorilla/mux\"\n\t\"github.com/juanfont/headscale/hscontrol/templates\"\n)\n\n// WindowsConfigMessage shows a simple message in the browser for how to configure the Windows Tailscale client.\nfunc (h *Headscale) WindowsConfigMessage(\n\twriter http.ResponseWriter,\n\treq *http.Request,\n) {\n\twriter.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\twriter.WriteHeader(http.StatusOK)\n\t_, _ = writer.Write([]byte(templates.Windows(h.cfg.ServerURL).Render()))\n}\n\n// AppleConfigMessage shows a simple message in the browser to point the user to the iOS/MacOS profile and instructions for how to install it.\nfunc (h *Headscale) AppleConfigMessage(\n\twriter http.ResponseWriter,\n\treq *http.Request,\n) {\n\twriter.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\twriter.WriteHeader(http.StatusOK)\n\t_, _ = writer.Write([]byte(templates.Apple(h.cfg.ServerURL).Render()))\n}\n\nfunc (h *Headscale) ApplePlatformConfig(\n\twriter http.ResponseWriter,\n\treq *http.Request,\n) {\n\tvars := mux.Vars(req)\n\n\tplatform, ok := vars[\"platform\"]\n\tif !ok {\n\t\thttpError(writer, NewHTTPError(http.StatusBadRequest, \"no platform specified\", nil))\n\t\treturn\n\t}\n\n\tid, err := uuid.NewV4()\n\tif err != nil {\n\t\thttpError(writer, err)\n\t\treturn\n\t}\n\n\tcontentID, err := uuid.NewV4()\n\tif err != nil {\n\t\thttpError(writer, err)\n\t\treturn\n\t}\n\n\tplatformConfig := AppleMobilePlatformConfig{\n\t\tUUID: contentID,\n\t\tURL:  h.cfg.ServerURL,\n\t}\n\n\tvar payload bytes.Buffer\n\n\tswitch platform {\n\tcase \"macos-standalone\":\n\t\terr := macosStandaloneTemplate.Execute(&payload, platformConfig)\n\t\tif err != nil {\n\t\t\thttpError(writer, err)\n\t\t\treturn\n\t\t}\n\tcase \"macos-app-store\":\n\t\terr := macosAppStoreTemplate.Execute(&payload, platformConfig)\n\t\tif err != nil {\n\t\t\thttpError(writer, err)\n\t\t\treturn\n\t\t}\n\tcase \"ios\":\n\t\terr := iosTemplate.Execute(&payload, platformConfig)\n\t\tif err != nil {\n\t\t\thttpError(writer, err)\n\t\t\treturn\n\t\t}\n\tdefault:\n\t\thttpError(writer, NewHTTPError(http.StatusBadRequest, \"platform must be ios, macos-app-store or macos-standalone\", nil))\n\t\treturn\n\t}\n\n\tconfig := AppleMobileConfig{\n\t\tUUID:    id,\n\t\tURL:     h.cfg.ServerURL,\n\t\tPayload: payload.String(),\n\t}\n\n\tvar content bytes.Buffer\n\tif err := commonTemplate.Execute(&content, config); err != nil { //nolint:noinlineerr\n\t\thttpError(writer, err)\n\t\treturn\n\t}\n\n\twriter.Header().\n\t\tSet(\"Content-Type\", \"application/x-apple-aspen-config; charset=utf-8\")\n\twriter.WriteHeader(http.StatusOK)\n\t_, _ = writer.Write(content.Bytes())\n}\n\ntype AppleMobileConfig struct {\n\tUUID    uuid.UUID\n\tURL     string\n\tPayload string\n}\n\ntype AppleMobilePlatformConfig struct {\n\tUUID uuid.UUID\n\tURL  string\n}\n\nvar commonTemplate = textTemplate.Must(\n\ttextTemplate.New(\"mobileconfig\").Parse(`<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n  <dict>\n    <key>PayloadUUID</key>\n    <string>{{.UUID}}</string>\n    <key>PayloadDisplayName</key>\n    <string>Headscale</string>\n    <key>PayloadDescription</key>\n    <string>Configure Tailscale login server to: {{.URL}}</string>\n    <key>PayloadIdentifier</key>\n    <string>com.github.juanfont.headscale</string>\n    <key>PayloadRemovalDisallowed</key>\n    <false/>\n    <key>PayloadType</key>\n    <string>Configuration</string>\n    <key>PayloadVersion</key>\n    <integer>1</integer>\n    <key>PayloadContent</key>\n    <array>\n    {{.Payload}}\n    </array>\n  </dict>\n</plist>`),\n)\n\nvar iosTemplate = textTemplate.Must(textTemplate.New(\"iosTemplate\").Parse(`\n    <dict>\n        <key>PayloadType</key>\n        <string>io.tailscale.ipn.ios</string>\n        <key>PayloadUUID</key>\n        <string>{{.UUID}}</string>\n        <key>PayloadIdentifier</key>\n        <string>com.github.juanfont.headscale</string>\n        <key>PayloadVersion</key>\n        <integer>1</integer>\n        <key>PayloadEnabled</key>\n        <true/>\n\n        <key>ControlURL</key>\n        <string>{{.URL}}</string>\n    </dict>\n`))\n\nvar macosAppStoreTemplate = template.Must(template.New(\"macosTemplate\").Parse(`\n    <dict>\n        <key>PayloadType</key>\n        <string>io.tailscale.ipn.macos</string>\n        <key>PayloadUUID</key>\n        <string>{{.UUID}}</string>\n        <key>PayloadIdentifier</key>\n        <string>com.github.juanfont.headscale</string>\n        <key>PayloadVersion</key>\n        <integer>1</integer>\n        <key>PayloadEnabled</key>\n        <true/>\n        <key>ControlURL</key>\n        <string>{{.URL}}</string>\n    </dict>\n`))\n\nvar macosStandaloneTemplate = template.Must(template.New(\"macosStandaloneTemplate\").Parse(`\n    <dict>\n        <key>PayloadType</key>\n        <string>io.tailscale.ipn.macsys</string>\n        <key>PayloadUUID</key>\n        <string>{{.UUID}}</string>\n        <key>PayloadIdentifier</key>\n        <string>com.github.juanfont.headscale</string>\n        <key>PayloadVersion</key>\n        <integer>1</integer>\n        <key>PayloadEnabled</key>\n        <true/>\n        <key>ControlURL</key>\n        <string>{{.URL}}</string>\n    </dict>\n`))\n"
  },
  {
    "path": "hscontrol/policy/matcher/matcher.go",
    "content": "package matcher\n\nimport (\n\t\"net/netip\"\n\t\"slices\"\n\t\"strings\"\n\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"go4.org/netipx\"\n\t\"tailscale.com/net/tsaddr\"\n\t\"tailscale.com/tailcfg\"\n)\n\ntype Match struct {\n\tsrcs  *netipx.IPSet\n\tdests *netipx.IPSet\n}\n\nfunc (m *Match) DebugString() string {\n\tvar sb strings.Builder\n\n\tsb.WriteString(\"Match:\\n\")\n\tsb.WriteString(\"  Sources:\\n\")\n\n\tfor _, prefix := range m.srcs.Prefixes() {\n\t\tsb.WriteString(\"    \" + prefix.String() + \"\\n\")\n\t}\n\n\tsb.WriteString(\"  Destinations:\\n\")\n\n\tfor _, prefix := range m.dests.Prefixes() {\n\t\tsb.WriteString(\"    \" + prefix.String() + \"\\n\")\n\t}\n\n\treturn sb.String()\n}\n\nfunc MatchesFromFilterRules(rules []tailcfg.FilterRule) []Match {\n\tmatches := make([]Match, 0, len(rules))\n\tfor _, rule := range rules {\n\t\tmatches = append(matches, MatchFromFilterRule(rule))\n\t}\n\n\treturn matches\n}\n\nfunc MatchFromFilterRule(rule tailcfg.FilterRule) Match {\n\tdests := make([]string, 0, len(rule.DstPorts))\n\tfor _, dest := range rule.DstPorts {\n\t\tdests = append(dests, dest.IP)\n\t}\n\n\treturn MatchFromStrings(rule.SrcIPs, dests)\n}\n\nfunc MatchFromStrings(sources, destinations []string) Match {\n\tsrcs := new(netipx.IPSetBuilder)\n\tdests := new(netipx.IPSetBuilder)\n\n\tfor _, srcIP := range sources {\n\t\tset, _ := util.ParseIPSet(srcIP, nil)\n\n\t\tsrcs.AddSet(set)\n\t}\n\n\tfor _, dest := range destinations {\n\t\tset, _ := util.ParseIPSet(dest, nil)\n\n\t\tdests.AddSet(set)\n\t}\n\n\tsrcsSet, _ := srcs.IPSet()\n\tdestsSet, _ := dests.IPSet()\n\n\tmatch := Match{\n\t\tsrcs:  srcsSet,\n\t\tdests: destsSet,\n\t}\n\n\treturn match\n}\n\nfunc (m *Match) SrcsContainsIPs(ips ...netip.Addr) bool {\n\treturn slices.ContainsFunc(ips, m.srcs.Contains)\n}\n\nfunc (m *Match) DestsContainsIP(ips ...netip.Addr) bool {\n\treturn slices.ContainsFunc(ips, m.dests.Contains)\n}\n\nfunc (m *Match) SrcsOverlapsPrefixes(prefixes ...netip.Prefix) bool {\n\treturn slices.ContainsFunc(prefixes, m.srcs.OverlapsPrefix)\n}\n\nfunc (m *Match) DestsOverlapsPrefixes(prefixes ...netip.Prefix) bool {\n\treturn slices.ContainsFunc(prefixes, m.dests.OverlapsPrefix)\n}\n\n// DestsIsTheInternet reports if the destination contains \"the internet\"\n// which is a IPSet that represents \"autogroup:internet\" and is special\n// cased for exit nodes.\n// This checks if dests is a superset of TheInternet(), which handles\n// merged filter rules where TheInternet is combined with other destinations.\nfunc (m *Match) DestsIsTheInternet() bool {\n\tif m.dests.ContainsPrefix(tsaddr.AllIPv4()) ||\n\t\tm.dests.ContainsPrefix(tsaddr.AllIPv6()) {\n\t\treturn true\n\t}\n\n\t// Check if dests contains all prefixes of TheInternet (superset check)\n\ttheInternet := util.TheInternet()\n\tfor _, prefix := range theInternet.Prefixes() {\n\t\tif !m.dests.ContainsPrefix(prefix) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n"
  },
  {
    "path": "hscontrol/policy/matcher/matcher_test.go",
    "content": "package matcher\n"
  },
  {
    "path": "hscontrol/policy/pm.go",
    "content": "package policy\n\nimport (\n\t\"net/netip\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/policy/matcher\"\n\tpolicyv2 \"github.com/juanfont/headscale/hscontrol/policy/v2\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/views\"\n)\n\ntype PolicyManager interface {\n\t// Filter returns the current filter rules for the entire tailnet and the associated matchers.\n\tFilter() ([]tailcfg.FilterRule, []matcher.Match)\n\t// FilterForNode returns filter rules for a specific node, handling autogroup:self\n\tFilterForNode(node types.NodeView) ([]tailcfg.FilterRule, error)\n\t// MatchersForNode returns matchers for peer relationship determination (unreduced)\n\tMatchersForNode(node types.NodeView) ([]matcher.Match, error)\n\t// BuildPeerMap constructs peer relationship maps for the given nodes\n\tBuildPeerMap(nodes views.Slice[types.NodeView]) map[types.NodeID][]types.NodeView\n\tSSHPolicy(baseURL string, node types.NodeView) (*tailcfg.SSHPolicy, error)\n\t// SSHCheckParams resolves the SSH check period for a (src, dst) pair\n\t// from the current policy, avoiding trust of client-provided URL params.\n\tSSHCheckParams(srcNodeID, dstNodeID types.NodeID) (time.Duration, bool)\n\tSetPolicy(pol []byte) (bool, error)\n\tSetUsers(users []types.User) (bool, error)\n\tSetNodes(nodes views.Slice[types.NodeView]) (bool, error)\n\t// NodeCanHaveTag reports whether the given node can have the given tag.\n\tNodeCanHaveTag(node types.NodeView, tag string) bool\n\n\t// TagExists reports whether the given tag is defined in the policy.\n\tTagExists(tag string) bool\n\n\t// NodeCanApproveRoute reports whether the given node can approve the given route.\n\tNodeCanApproveRoute(node types.NodeView, route netip.Prefix) bool\n\n\tVersion() int\n\tDebugString() string\n}\n\n// NewPolicyManager returns a new policy manager.\nfunc NewPolicyManager(pol []byte, users []types.User, nodes views.Slice[types.NodeView]) (PolicyManager, error) {\n\tvar (\n\t\tpolMan PolicyManager\n\t\terr    error\n\t)\n\n\tpolMan, err = policyv2.NewPolicyManager(pol, users, nodes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn polMan, err\n}\n\n// PolicyManagersForTest returns all available PostureManagers to be used\n// in tests to validate them in tests that try to determine that they\n// behave the same.\nfunc PolicyManagersForTest(pol []byte, users []types.User, nodes views.Slice[types.NodeView]) ([]PolicyManager, error) {\n\tvar polMans []PolicyManager\n\n\tfor _, pmf := range PolicyManagerFuncsForTest(pol) {\n\t\tpm, err := pmf(users, nodes)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpolMans = append(polMans, pm)\n\t}\n\n\treturn polMans, nil\n}\n\nfunc PolicyManagerFuncsForTest(pol []byte) []func([]types.User, views.Slice[types.NodeView]) (PolicyManager, error) {\n\tpolmanFuncs := make([]func([]types.User, views.Slice[types.NodeView]) (PolicyManager, error), 0, 1)\n\n\tpolmanFuncs = append(polmanFuncs, func(u []types.User, n views.Slice[types.NodeView]) (PolicyManager, error) {\n\t\treturn policyv2.NewPolicyManager(pol, u, n)\n\t})\n\n\treturn polmanFuncs\n}\n"
  },
  {
    "path": "hscontrol/policy/policy.go",
    "content": "package policy\n\nimport (\n\t\"net/netip\"\n\t\"slices\"\n\n\t\"github.com/juanfont/headscale/hscontrol/policy/matcher\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/rs/zerolog/log\"\n\t\"github.com/samber/lo\"\n\t\"tailscale.com/types/views\"\n)\n\n// ReduceNodes returns the list of peers authorized to be accessed from a given node.\nfunc ReduceNodes(\n\tnode types.NodeView,\n\tnodes views.Slice[types.NodeView],\n\tmatchers []matcher.Match,\n) views.Slice[types.NodeView] {\n\tvar result []types.NodeView\n\n\tfor _, peer := range nodes.All() {\n\t\tif peer.ID() == node.ID() {\n\t\t\tcontinue\n\t\t}\n\n\t\tif node.CanAccess(matchers, peer) || peer.CanAccess(matchers, node) {\n\t\t\tresult = append(result, peer)\n\t\t}\n\t}\n\n\treturn views.SliceOf(result)\n}\n\n// ReduceRoutes returns a reduced list of routes for a given node that it can access.\nfunc ReduceRoutes(\n\tnode types.NodeView,\n\troutes []netip.Prefix,\n\tmatchers []matcher.Match,\n) []netip.Prefix {\n\tvar result []netip.Prefix\n\n\tfor _, route := range routes {\n\t\tif node.CanAccessRoute(matchers, route) {\n\t\t\tresult = append(result, route)\n\t\t}\n\t}\n\n\treturn result\n}\n\n// BuildPeerMap builds a map of all peers that can be accessed by each node.\nfunc BuildPeerMap(\n\tnodes views.Slice[types.NodeView],\n\tmatchers []matcher.Match,\n) map[types.NodeID][]types.NodeView {\n\tret := make(map[types.NodeID][]types.NodeView, nodes.Len())\n\n\t// Build the map of all peers according to the matchers.\n\t// Compared to ReduceNodes, which builds the list per node, we end up with doing\n\t// the full work for every node (On^2), while this will reduce the list as we see\n\t// relationships while building the map, making it O(n^2/2) in the end, but with less work per node.\n\tfor i := range nodes.Len() {\n\t\tfor j := i + 1; j < nodes.Len(); j++ {\n\t\t\tif nodes.At(i).ID() == nodes.At(j).ID() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif nodes.At(i).CanAccess(matchers, nodes.At(j)) || nodes.At(j).CanAccess(matchers, nodes.At(i)) {\n\t\t\t\tret[nodes.At(i).ID()] = append(ret[nodes.At(i).ID()], nodes.At(j))\n\t\t\t\tret[nodes.At(j).ID()] = append(ret[nodes.At(j).ID()], nodes.At(i))\n\t\t\t}\n\t\t}\n\t}\n\n\treturn ret\n}\n\n// ApproveRoutesWithPolicy checks if the node can approve the announced routes\n// and returns the new list of approved routes.\n// The approved routes will include:\n// 1. ALL previously approved routes (regardless of whether they're still advertised)\n// 2. New routes from announcedRoutes that can be auto-approved by policy\n// This ensures that:\n// - Previously approved routes are ALWAYS preserved (auto-approval never removes routes)\n// - New routes can be auto-approved according to policy\n// - Routes can only be removed by explicit admin action (not by auto-approval).\nfunc ApproveRoutesWithPolicy(pm PolicyManager, nv types.NodeView, currentApproved, announcedRoutes []netip.Prefix) ([]netip.Prefix, bool) {\n\tif pm == nil {\n\t\treturn currentApproved, false\n\t}\n\n\t// Start with ALL currently approved routes - we never remove approved routes\n\tnewApproved := make([]netip.Prefix, len(currentApproved))\n\tcopy(newApproved, currentApproved)\n\n\t// Then, check for new routes that can be auto-approved\n\tfor _, route := range announcedRoutes {\n\t\t// Skip if already approved\n\t\tif slices.Contains(newApproved, route) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Check if this new route can be auto-approved by policy\n\t\tcanApprove := pm.NodeCanApproveRoute(nv, route)\n\t\tif canApprove {\n\t\t\tnewApproved = append(newApproved, route)\n\t\t}\n\t}\n\n\t// Sort and deduplicate\n\tslices.SortFunc(newApproved, netip.Prefix.Compare)\n\tnewApproved = slices.Compact(newApproved)\n\tnewApproved = lo.Filter(newApproved, func(route netip.Prefix, index int) bool {\n\t\treturn route.IsValid()\n\t})\n\n\t// Sort the current approved for comparison\n\tsortedCurrent := make([]netip.Prefix, len(currentApproved))\n\tcopy(sortedCurrent, currentApproved)\n\tslices.SortFunc(sortedCurrent, netip.Prefix.Compare)\n\n\t// Only update if the routes actually changed\n\tif !slices.Equal(sortedCurrent, newApproved) {\n\t\t// Log what changed\n\t\tvar added, kept []netip.Prefix\n\n\t\tfor _, route := range newApproved {\n\t\t\tif !slices.Contains(sortedCurrent, route) {\n\t\t\t\tadded = append(added, route)\n\t\t\t} else {\n\t\t\t\tkept = append(kept, route)\n\t\t\t}\n\t\t}\n\n\t\tif len(added) > 0 {\n\t\t\tlog.Debug().\n\t\t\t\tEmbedObject(nv).\n\t\t\t\tStrs(\"routes.added\", util.PrefixesToString(added)).\n\t\t\t\tStrs(\"routes.kept\", util.PrefixesToString(kept)).\n\t\t\t\tInt(\"routes.total\", len(newApproved)).\n\t\t\t\tMsg(\"Routes auto-approved by policy\")\n\t\t}\n\n\t\treturn newApproved, true\n\t}\n\n\treturn newApproved, false\n}\n"
  },
  {
    "path": "hscontrol/policy/policy_autoapprove_test.go",
    "content": "package policy\n\nimport (\n\t\"fmt\"\n\t\"net/netip\"\n\t\"slices\"\n\t\"testing\"\n\n\tpolicyv2 \"github.com/juanfont/headscale/hscontrol/policy/v2\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gorm.io/gorm\"\n\t\"tailscale.com/types/key\"\n\t\"tailscale.com/types/views\"\n)\n\nfunc TestApproveRoutesWithPolicy_NeverRemovesApprovedRoutes(t *testing.T) {\n\tuser1 := types.User{\n\t\tModel: gorm.Model{ID: 1},\n\t\tName:  \"testuser@\",\n\t}\n\tuser2 := types.User{\n\t\tModel: gorm.Model{ID: 2},\n\t\tName:  \"otheruser@\",\n\t}\n\tusers := []types.User{user1, user2}\n\n\tnode1 := &types.Node{\n\t\tID:             1,\n\t\tMachineKey:     key.NewMachine().Public(),\n\t\tNodeKey:        key.NewNode().Public(),\n\t\tHostname:       \"test-node\",\n\t\tUserID:         new(user1.ID),\n\t\tUser:           new(user1),\n\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t\tIPv4:           new(netip.MustParseAddr(\"100.64.0.1\")),\n\t\tTags:           []string{\"tag:test\"},\n\t}\n\n\tnode2 := &types.Node{\n\t\tID:             2,\n\t\tMachineKey:     key.NewMachine().Public(),\n\t\tNodeKey:        key.NewNode().Public(),\n\t\tHostname:       \"other-node\",\n\t\tUserID:         new(user2.ID),\n\t\tUser:           new(user2),\n\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t\tIPv4:           new(netip.MustParseAddr(\"100.64.0.2\")),\n\t}\n\n\t// Create a policy that auto-approves specific routes\n\tpolicyJSON := `{\n\t\t\"groups\": {\n\t\t\t\"group:test\": [\"testuser@\"]\n\t\t},\n\t\t\"tagOwners\": {\n\t\t\t\"tag:test\": [\"testuser@\"]\n\t\t},\n\t\t\"acls\": [\n\t\t\t{\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"src\": [\"*\"],\n\t\t\t\t\"dst\": [\"*:*\"]\n\t\t\t}\n\t\t],\n\t\t\"autoApprovers\": {\n\t\t\t\"routes\": {\n\t\t\t\t\"10.0.0.0/8\": [\"testuser@\", \"tag:test\"],\n\t\t\t\t\"10.1.0.0/24\": [\"testuser@\"],\n\t\t\t\t\"10.2.0.0/24\": [\"testuser@\"],\n\t\t\t\t\"192.168.0.0/24\": [\"tag:test\"]\n\t\t\t}\n\t\t}\n\t}`\n\n\tpm, err := policyv2.NewPolicyManager([]byte(policyJSON), users, views.SliceOf([]types.NodeView{node1.View(), node2.View()}))\n\trequire.NoError(t, err)\n\n\ttests := []struct {\n\t\tname            string\n\t\tnode            *types.Node\n\t\tcurrentApproved []netip.Prefix\n\t\tannouncedRoutes []netip.Prefix\n\t\twantApproved    []netip.Prefix\n\t\twantChanged     bool\n\t\tdescription     string\n\t}{\n\t\t{\n\t\t\tname: \"previously_approved_route_no_longer_advertised_should_remain\",\n\t\t\tnode: node1,\n\t\t\tcurrentApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t\tnetip.MustParsePrefix(\"192.168.0.0/24\"),\n\t\t\t},\n\t\t\tannouncedRoutes: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"), // Only this one is still advertised\n\t\t\t},\n\t\t\twantApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t\tnetip.MustParsePrefix(\"192.168.0.0/24\"), // Should still be here!\n\t\t\t},\n\t\t\twantChanged: false,\n\t\t\tdescription: \"Previously approved routes should never be removed even when no longer advertised\",\n\t\t},\n\t\t{\n\t\t\tname: \"add_new_auto_approved_route_keeps_old_approved\",\n\t\t\tnode: node1,\n\t\t\tcurrentApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.5.0.0/24\"), // This was manually approved\n\t\t\t},\n\t\t\tannouncedRoutes: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.1.0.0/24\"), // New route that should be auto-approved\n\t\t\t},\n\t\t\twantApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.1.0.0/24\"), // New auto-approved route (subset of 10.0.0.0/8)\n\t\t\t\tnetip.MustParsePrefix(\"10.5.0.0/24\"), // Old approved route kept\n\t\t\t},\n\t\t\twantChanged: true,\n\t\t\tdescription: \"New auto-approved routes should be added while keeping old approved routes\",\n\t\t},\n\t\t{\n\t\t\tname: \"no_announced_routes_keeps_all_approved\",\n\t\t\tnode: node1,\n\t\t\tcurrentApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t\tnetip.MustParsePrefix(\"192.168.0.0/24\"),\n\t\t\t\tnetip.MustParsePrefix(\"172.16.0.0/16\"),\n\t\t\t},\n\t\t\tannouncedRoutes: []netip.Prefix{}, // No routes announced\n\t\t\twantApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t\tnetip.MustParsePrefix(\"172.16.0.0/16\"),\n\t\t\t\tnetip.MustParsePrefix(\"192.168.0.0/24\"),\n\t\t\t},\n\t\t\twantChanged: false,\n\t\t\tdescription: \"All approved routes should remain when no routes are announced\",\n\t\t},\n\t\t{\n\t\t\tname: \"no_changes_when_announced_equals_approved\",\n\t\t\tnode: node1,\n\t\t\tcurrentApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t},\n\t\t\tannouncedRoutes: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t},\n\t\t\twantApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t},\n\t\t\twantChanged: false,\n\t\t\tdescription: \"No changes should occur when announced routes match approved routes\",\n\t\t},\n\t\t{\n\t\t\tname: \"auto_approve_multiple_new_routes\",\n\t\t\tnode: node1,\n\t\t\tcurrentApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"172.16.0.0/24\"), // This was manually approved\n\t\t\t},\n\t\t\tannouncedRoutes: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.2.0.0/24\"),    // Should be auto-approved (subset of 10.0.0.0/8)\n\t\t\t\tnetip.MustParsePrefix(\"192.168.0.0/24\"), // Should be auto-approved for tag:test\n\t\t\t},\n\t\t\twantApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.2.0.0/24\"),    // New auto-approved\n\t\t\t\tnetip.MustParsePrefix(\"172.16.0.0/24\"),  // Original kept\n\t\t\t\tnetip.MustParsePrefix(\"192.168.0.0/24\"), // New auto-approved\n\t\t\t},\n\t\t\twantChanged: true,\n\t\t\tdescription: \"Multiple new routes should be auto-approved while keeping existing approved routes\",\n\t\t},\n\t\t{\n\t\t\tname: \"node_without_permission_no_auto_approval\",\n\t\t\tnode: node2, // Different node without the tag\n\t\t\tcurrentApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t},\n\t\t\tannouncedRoutes: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"192.168.0.0/24\"), // This requires tag:test\n\t\t\t},\n\t\t\twantApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"), // Only the original approved route\n\t\t\t},\n\t\t\twantChanged: false,\n\t\t\tdescription: \"Routes should not be auto-approved for nodes without proper permissions\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgotApproved, gotChanged := ApproveRoutesWithPolicy(pm, tt.node.View(), tt.currentApproved, tt.announcedRoutes)\n\n\t\t\tassert.Equal(t, tt.wantChanged, gotChanged, \"changed flag mismatch: %s\", tt.description)\n\n\t\t\t// Sort for comparison since ApproveRoutesWithPolicy sorts the results\n\t\t\tslices.SortFunc(tt.wantApproved, netip.Prefix.Compare)\n\t\t\tassert.Equal(t, tt.wantApproved, gotApproved, \"approved routes mismatch: %s\", tt.description)\n\n\t\t\t// Verify that all previously approved routes are still present\n\t\t\tfor _, prevRoute := range tt.currentApproved {\n\t\t\t\tassert.Contains(t, gotApproved, prevRoute,\n\t\t\t\t\t\"previously approved route %s was removed - this should never happen\", prevRoute)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestApproveRoutesWithPolicy_NilAndEmptyCases(t *testing.T) {\n\t// Create a basic policy for edge case testing\n\taclPolicy := `\n{\n\t\"acls\": [\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]},\n\t],\n\t\"autoApprovers\": {\n\t\t\"routes\": {\n\t\t\t\"10.1.0.0/24\": [\"test@\"],\n\t\t},\n\t},\n}`\n\n\tpmfs := PolicyManagerFuncsForTest([]byte(aclPolicy))\n\n\ttests := []struct {\n\t\tname            string\n\t\tcurrentApproved []netip.Prefix\n\t\tannouncedRoutes []netip.Prefix\n\t\twantApproved    []netip.Prefix\n\t\twantChanged     bool\n\t}{\n\t\t{\n\t\t\tname: \"nil_policy_manager\",\n\t\t\tcurrentApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t},\n\t\t\tannouncedRoutes: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"192.168.0.0/24\"),\n\t\t\t},\n\t\t\twantApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t},\n\t\t\twantChanged: false,\n\t\t},\n\t\t{\n\t\t\tname:            \"nil_current_approved\",\n\t\t\tcurrentApproved: nil,\n\t\t\tannouncedRoutes: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.1.0.0/24\"),\n\t\t\t},\n\t\t\twantApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.1.0.0/24\"),\n\t\t\t},\n\t\t\twantChanged: true,\n\t\t},\n\t\t{\n\t\t\tname: \"nil_announced_routes\",\n\t\t\tcurrentApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t},\n\t\t\tannouncedRoutes: nil,\n\t\t\twantApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t},\n\t\t\twantChanged: false,\n\t\t},\n\t\t{\n\t\t\tname: \"duplicate_approved_routes\",\n\t\t\tcurrentApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"), // Duplicate\n\t\t\t},\n\t\t\tannouncedRoutes: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.1.0.0/24\"),\n\t\t\t},\n\t\t\twantApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t\tnetip.MustParsePrefix(\"10.1.0.0/24\"),\n\t\t\t},\n\t\t\twantChanged: true,\n\t\t},\n\t\t{\n\t\t\tname:            \"empty_slices\",\n\t\t\tcurrentApproved: []netip.Prefix{},\n\t\t\tannouncedRoutes: []netip.Prefix{},\n\t\t\twantApproved:    []netip.Prefix{},\n\t\t\twantChanged:     false,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tfor i, pmf := range pmfs {\n\t\t\tt.Run(fmt.Sprintf(\"%s-policy-index%d\", tt.name, i), func(t *testing.T) {\n\t\t\t\t// Create test user\n\t\t\t\tuser := types.User{\n\t\t\t\t\tModel: gorm.Model{ID: 1},\n\t\t\t\t\tName:  \"test\",\n\t\t\t\t}\n\t\t\t\tusers := []types.User{user}\n\n\t\t\t\t// Create test node\n\t\t\t\tnode := types.Node{\n\t\t\t\t\tID:             1,\n\t\t\t\t\tMachineKey:     key.NewMachine().Public(),\n\t\t\t\t\tNodeKey:        key.NewNode().Public(),\n\t\t\t\t\tHostname:       \"testnode\",\n\t\t\t\t\tUserID:         new(user.ID),\n\t\t\t\t\tUser:           new(user),\n\t\t\t\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t\t\t\t\tIPv4:           new(netip.MustParseAddr(\"100.64.0.1\")),\n\t\t\t\t\tApprovedRoutes: tt.currentApproved,\n\t\t\t\t}\n\t\t\t\tnodes := types.Nodes{&node}\n\n\t\t\t\t// Create policy manager or use nil if specified\n\t\t\t\tvar (\n\t\t\t\t\tpm  PolicyManager\n\t\t\t\t\terr error\n\t\t\t\t)\n\n\t\t\t\tif tt.name != \"nil_policy_manager\" {\n\t\t\t\t\tpm, err = pmf(users, nodes.ViewSlice())\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t} else {\n\t\t\t\t\tpm = nil\n\t\t\t\t}\n\n\t\t\t\tgotApproved, gotChanged := ApproveRoutesWithPolicy(pm, node.View(), tt.currentApproved, tt.announcedRoutes)\n\n\t\t\t\tassert.Equal(t, tt.wantChanged, gotChanged, \"changed flag mismatch\")\n\n\t\t\t\t// Handle nil vs empty slice comparison\n\t\t\t\tif tt.wantApproved == nil {\n\t\t\t\t\tassert.Nil(t, gotApproved, \"expected nil approved routes\")\n\t\t\t\t} else {\n\t\t\t\t\tslices.SortFunc(tt.wantApproved, netip.Prefix.Compare)\n\t\t\t\t\tassert.Equal(t, tt.wantApproved, gotApproved, \"approved routes mismatch\")\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "hscontrol/policy/policy_route_approval_test.go",
    "content": "package policy\n\nimport (\n\t\"fmt\"\n\t\"net/netip\"\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gorm.io/gorm\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/key\"\n)\n\nfunc TestApproveRoutesWithPolicy_NeverRemovesRoutes(t *testing.T) {\n\t// Test policy that allows specific routes to be auto-approved\n\taclPolicy := `\n{\n\t\"groups\": {\n\t\t\"group:admins\": [\"test@\"],\n\t},\n\t\"acls\": [\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]},\n\t],\n\t\"autoApprovers\": {\n\t\t\"routes\": {\n\t\t\t\"10.0.0.0/24\": [\"test@\"],\n\t\t\t\"192.168.0.0/24\": [\"group:admins\"],\n\t\t\t\"172.16.0.0/16\": [\"tag:approved\"],\n\t\t},\n\t},\n\t\"tagOwners\": {\n\t\t\"tag:approved\": [\"test@\"],\n\t},\n}`\n\n\ttests := []struct {\n\t\tname              string\n\t\tcurrentApproved   []netip.Prefix\n\t\tannouncedRoutes   []netip.Prefix\n\t\tnodeHostname      string\n\t\tnodeUser          string\n\t\tnodeTags          []string\n\t\twantApproved      []netip.Prefix\n\t\twantChanged       bool\n\t\twantRemovedRoutes []netip.Prefix // Routes that should NOT be in the result\n\t}{\n\t\t{\n\t\t\tname: \"previously_approved_route_no_longer_advertised_remains\",\n\t\t\tcurrentApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t\tnetip.MustParsePrefix(\"192.168.0.0/24\"),\n\t\t\t},\n\t\t\tannouncedRoutes: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"192.168.0.0/24\"), // Only this one still advertised\n\t\t\t},\n\t\t\tnodeUser: \"test\",\n\t\t\twantApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"), // Should remain!\n\t\t\t\tnetip.MustParsePrefix(\"192.168.0.0/24\"),\n\t\t\t},\n\t\t\twantChanged:       false,\n\t\t\twantRemovedRoutes: []netip.Prefix{}, // Nothing should be removed\n\t\t},\n\t\t{\n\t\t\tname: \"add_new_auto_approved_route_keeps_existing\",\n\t\t\tcurrentApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t},\n\t\t\tannouncedRoutes: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),    // Still advertised\n\t\t\t\tnetip.MustParsePrefix(\"192.168.0.0/24\"), // New route\n\t\t\t},\n\t\t\tnodeUser: \"test\",\n\t\t\twantApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t\tnetip.MustParsePrefix(\"192.168.0.0/24\"), // Auto-approved via group\n\t\t\t},\n\t\t\twantChanged: true,\n\t\t},\n\t\t{\n\t\t\tname: \"no_announced_routes_keeps_all_approved\",\n\t\t\tcurrentApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t\tnetip.MustParsePrefix(\"192.168.0.0/24\"),\n\t\t\t\tnetip.MustParsePrefix(\"172.16.0.0/16\"),\n\t\t\t},\n\t\t\tannouncedRoutes: []netip.Prefix{}, // No routes announced anymore\n\t\t\tnodeUser:        \"test\",\n\t\t\twantApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t\tnetip.MustParsePrefix(\"172.16.0.0/16\"),\n\t\t\t\tnetip.MustParsePrefix(\"192.168.0.0/24\"),\n\t\t\t},\n\t\t\twantChanged: false,\n\t\t},\n\t\t{\n\t\t\tname: \"manually_approved_route_not_in_policy_remains\",\n\t\t\tcurrentApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"203.0.113.0/24\"), // Not in auto-approvers\n\t\t\t},\n\t\t\tannouncedRoutes: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"), // Can be auto-approved\n\t\t\t},\n\t\t\tnodeUser: \"test\",\n\t\t\twantApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),    // New auto-approved\n\t\t\t\tnetip.MustParsePrefix(\"203.0.113.0/24\"), // Manual approval preserved\n\t\t\t},\n\t\t\twantChanged: true,\n\t\t},\n\t\t{\n\t\t\tname: \"tagged_node_gets_tag_approved_routes\",\n\t\t\tcurrentApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t},\n\t\t\tannouncedRoutes: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"172.16.0.0/16\"), // Tag-approved route\n\t\t\t},\n\t\t\tnodeUser: \"test\",\n\t\t\tnodeTags: []string{\"tag:approved\"},\n\t\t\twantApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),   // Previous approval preserved\n\t\t\t\tnetip.MustParsePrefix(\"172.16.0.0/16\"), // New tag-approved\n\t\t\t},\n\t\t\twantChanged: true,\n\t\t},\n\t\t{\n\t\t\tname: \"complex_scenario_multiple_changes\",\n\t\t\tcurrentApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),    // Will not be advertised\n\t\t\t\tnetip.MustParsePrefix(\"203.0.113.0/24\"), // Manual, not advertised\n\t\t\t},\n\t\t\tannouncedRoutes: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"192.168.0.0/24\"),  // New, auto-approvable\n\t\t\t\tnetip.MustParsePrefix(\"172.16.0.0/16\"),   // New, not approvable (no tag)\n\t\t\t\tnetip.MustParsePrefix(\"198.51.100.0/24\"), // New, not in policy\n\t\t\t},\n\t\t\tnodeUser: \"test\",\n\t\t\twantApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),    // Kept despite not advertised\n\t\t\t\tnetip.MustParsePrefix(\"192.168.0.0/24\"), // New auto-approved\n\t\t\t\tnetip.MustParsePrefix(\"203.0.113.0/24\"), // Kept despite not advertised\n\t\t\t},\n\t\t\twantChanged: true,\n\t\t},\n\t}\n\n\tpmfs := PolicyManagerFuncsForTest([]byte(aclPolicy))\n\n\tfor _, tt := range tests {\n\t\tfor i, pmf := range pmfs {\n\t\t\tt.Run(fmt.Sprintf(\"%s-policy-index%d\", tt.name, i), func(t *testing.T) {\n\t\t\t\t// Create test user\n\t\t\t\tuser := types.User{\n\t\t\t\t\tModel: gorm.Model{ID: 1},\n\t\t\t\t\tName:  tt.nodeUser,\n\t\t\t\t}\n\t\t\t\tusers := []types.User{user}\n\n\t\t\t\t// Create test node\n\t\t\t\tnode := types.Node{\n\t\t\t\t\tID:             1,\n\t\t\t\t\tMachineKey:     key.NewMachine().Public(),\n\t\t\t\t\tNodeKey:        key.NewNode().Public(),\n\t\t\t\t\tHostname:       tt.nodeHostname,\n\t\t\t\t\tUserID:         new(user.ID),\n\t\t\t\t\tUser:           new(user),\n\t\t\t\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tRoutableIPs: tt.announcedRoutes,\n\t\t\t\t\t},\n\t\t\t\t\tIPv4:           new(netip.MustParseAddr(\"100.64.0.1\")),\n\t\t\t\t\tApprovedRoutes: tt.currentApproved,\n\t\t\t\t\tTags:           tt.nodeTags,\n\t\t\t\t}\n\t\t\t\tnodes := types.Nodes{&node}\n\n\t\t\t\t// Create policy manager\n\t\t\t\tpm, err := pmf(users, nodes.ViewSlice())\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.NotNil(t, pm)\n\n\t\t\t\t// Test ApproveRoutesWithPolicy\n\t\t\t\tgotApproved, gotChanged := ApproveRoutesWithPolicy(\n\t\t\t\t\tpm,\n\t\t\t\t\tnode.View(),\n\t\t\t\t\ttt.currentApproved,\n\t\t\t\t\ttt.announcedRoutes,\n\t\t\t\t)\n\n\t\t\t\t// Check change flag\n\t\t\t\tassert.Equal(t, tt.wantChanged, gotChanged, \"change flag mismatch\")\n\n\t\t\t\t// Check approved routes match expected\n\t\t\t\tif diff := cmp.Diff(tt.wantApproved, gotApproved, util.Comparers...); diff != \"\" {\n\t\t\t\t\tt.Logf(\"Want: %v\", tt.wantApproved)\n\t\t\t\t\tt.Logf(\"Got:  %v\", gotApproved)\n\t\t\t\t\tt.Errorf(\"unexpected approved routes (-want +got):\\n%s\", diff)\n\t\t\t\t}\n\n\t\t\t\t// Verify all previously approved routes are still present\n\t\t\t\tfor _, prevRoute := range tt.currentApproved {\n\t\t\t\t\tassert.Contains(t, gotApproved, prevRoute,\n\t\t\t\t\t\t\"previously approved route %s was removed - this should NEVER happen\", prevRoute)\n\t\t\t\t}\n\n\t\t\t\t// Verify no routes were incorrectly removed\n\t\t\t\tfor _, removedRoute := range tt.wantRemovedRoutes {\n\t\t\t\t\tassert.NotContains(t, gotApproved, removedRoute,\n\t\t\t\t\t\t\"route %s should have been removed but wasn't\", removedRoute)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n}\n\nfunc TestApproveRoutesWithPolicy_EdgeCases(t *testing.T) {\n\taclPolicy := `\n{\n\t\"acls\": [\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]},\n\t],\n\t\"autoApprovers\": {\n\t\t\"routes\": {\n\t\t\t\"10.0.0.0/8\": [\"test@\"],\n\t\t},\n\t},\n}`\n\n\ttests := []struct {\n\t\tname            string\n\t\tcurrentApproved []netip.Prefix\n\t\tannouncedRoutes []netip.Prefix\n\t\twantApproved    []netip.Prefix\n\t\twantChanged     bool\n\t}{\n\t\t{\n\t\t\tname:            \"nil_current_approved\",\n\t\t\tcurrentApproved: nil,\n\t\t\tannouncedRoutes: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t},\n\t\t\twantApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t},\n\t\t\twantChanged: true,\n\t\t},\n\t\t{\n\t\t\tname:            \"empty_current_approved\",\n\t\t\tcurrentApproved: []netip.Prefix{},\n\t\t\tannouncedRoutes: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t},\n\t\t\twantApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t},\n\t\t\twantChanged: true,\n\t\t},\n\t\t{\n\t\t\tname: \"duplicate_routes_handled\",\n\t\t\tcurrentApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"), // Duplicate\n\t\t\t},\n\t\t\tannouncedRoutes: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t},\n\t\t\twantApproved: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t},\n\t\t\twantChanged: true, // Duplicates are removed, so it's a change\n\t\t},\n\t}\n\n\tpmfs := PolicyManagerFuncsForTest([]byte(aclPolicy))\n\n\tfor _, tt := range tests {\n\t\tfor i, pmf := range pmfs {\n\t\t\tt.Run(fmt.Sprintf(\"%s-policy-index%d\", tt.name, i), func(t *testing.T) {\n\t\t\t\t// Create test user\n\t\t\t\tuser := types.User{\n\t\t\t\t\tModel: gorm.Model{ID: 1},\n\t\t\t\t\tName:  \"test\",\n\t\t\t\t}\n\t\t\t\tusers := []types.User{user}\n\n\t\t\t\tnode := types.Node{\n\t\t\t\t\tID:             1,\n\t\t\t\t\tMachineKey:     key.NewMachine().Public(),\n\t\t\t\t\tNodeKey:        key.NewNode().Public(),\n\t\t\t\t\tHostname:       \"testnode\",\n\t\t\t\t\tUserID:         new(user.ID),\n\t\t\t\t\tUser:           new(user),\n\t\t\t\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tRoutableIPs: tt.announcedRoutes,\n\t\t\t\t\t},\n\t\t\t\t\tIPv4:           new(netip.MustParseAddr(\"100.64.0.1\")),\n\t\t\t\t\tApprovedRoutes: tt.currentApproved,\n\t\t\t\t}\n\t\t\t\tnodes := types.Nodes{&node}\n\n\t\t\t\tpm, err := pmf(users, nodes.ViewSlice())\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tgotApproved, gotChanged := ApproveRoutesWithPolicy(\n\t\t\t\t\tpm,\n\t\t\t\t\tnode.View(),\n\t\t\t\t\ttt.currentApproved,\n\t\t\t\t\ttt.announcedRoutes,\n\t\t\t\t)\n\n\t\t\t\tassert.Equal(t, tt.wantChanged, gotChanged)\n\n\t\t\t\tif diff := cmp.Diff(tt.wantApproved, gotApproved, util.Comparers...); diff != \"\" {\n\t\t\t\t\tt.Errorf(\"unexpected approved routes (-want +got):\\n%s\", diff)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n}\n\nfunc TestApproveRoutesWithPolicy_NilPolicyManagerCase(t *testing.T) {\n\tuser := types.User{\n\t\tModel: gorm.Model{ID: 1},\n\t\tName:  \"test\",\n\t}\n\n\tuserID := user.ID\n\n\tcurrentApproved := []netip.Prefix{\n\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t}\n\tannouncedRoutes := []netip.Prefix{\n\t\tnetip.MustParsePrefix(\"192.168.0.0/24\"),\n\t}\n\n\tnode := types.Node{\n\t\tID:             1,\n\t\tMachineKey:     key.NewMachine().Public(),\n\t\tNodeKey:        key.NewNode().Public(),\n\t\tHostname:       \"testnode\",\n\t\tUserID:         &userID,\n\t\tUser:           &user,\n\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tRoutableIPs: announcedRoutes,\n\t\t},\n\t\tIPv4:           new(netip.MustParseAddr(\"100.64.0.1\")),\n\t\tApprovedRoutes: currentApproved,\n\t}\n\n\t// With nil policy manager, should return current approved unchanged\n\tgotApproved, gotChanged := ApproveRoutesWithPolicy(nil, node.View(), currentApproved, announcedRoutes)\n\n\tassert.False(t, gotChanged)\n\tassert.Equal(t, currentApproved, gotApproved)\n}\n"
  },
  {
    "path": "hscontrol/policy/policy_test.go",
    "content": "package policy\n\nimport (\n\t\"fmt\"\n\t\"net/netip\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/juanfont/headscale/hscontrol/policy/matcher\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gorm.io/gorm\"\n\t\"tailscale.com/tailcfg\"\n)\n\nvar ap = func(ipStr string) *netip.Addr {\n\tip := netip.MustParseAddr(ipStr)\n\treturn &ip\n}\n\nvar p = func(prefStr string) netip.Prefix {\n\tip := netip.MustParsePrefix(prefStr)\n\treturn ip\n}\n\nfunc TestReduceNodes(t *testing.T) {\n\ttype args struct {\n\t\tnodes types.Nodes\n\t\trules []tailcfg.FilterRule\n\t\tnode  *types.Node\n\t}\n\n\ttests := []struct {\n\t\tname string\n\t\targs args\n\t\twant types.Nodes\n\t}{\n\t\t{\n\t\t\tname: \"all hosts can talk to each other\",\n\t\t\targs: args{\n\t\t\t\tnodes: types.Nodes{ // list of all nodes in the database\n\t\t\t\t\t&types.Node{\n\t\t\t\t\t\tID:   1,\n\t\t\t\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\t\t\t\tUser: &types.User{Name: \"joe\"},\n\t\t\t\t\t},\n\t\t\t\t\t&types.Node{\n\t\t\t\t\t\tID:   2,\n\t\t\t\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\t\t\t\tUser: &types.User{Name: \"marc\"},\n\t\t\t\t\t},\n\t\t\t\t\t&types.Node{\n\t\t\t\t\t\tID:   3,\n\t\t\t\t\t\tIPv4: ap(\"100.64.0.3\"),\n\t\t\t\t\t\tUser: &types.User{Name: \"mickael\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"100.64.0.1\", \"100.64.0.2\", \"100.64.0.3\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tnode: &types.Node{ // current nodes\n\t\t\t\t\tID:   1,\n\t\t\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\t\t\tUser: &types.User{Name: \"joe\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: types.Nodes{\n\t\t\t\t&types.Node{\n\t\t\t\t\tID:   2,\n\t\t\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\t\t\tUser: &types.User{Name: \"marc\"},\n\t\t\t\t},\n\t\t\t\t&types.Node{\n\t\t\t\t\tID:   3,\n\t\t\t\t\tIPv4: ap(\"100.64.0.3\"),\n\t\t\t\t\tUser: &types.User{Name: \"mickael\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"One host can talk to another, but not all hosts\",\n\t\t\targs: args{\n\t\t\t\tnodes: types.Nodes{ // list of all nodes in the database\n\t\t\t\t\t&types.Node{\n\t\t\t\t\t\tID:   1,\n\t\t\t\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\t\t\t\tUser: &types.User{Name: \"joe\"},\n\t\t\t\t\t},\n\t\t\t\t\t&types.Node{\n\t\t\t\t\t\tID:   2,\n\t\t\t\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\t\t\t\tUser: &types.User{Name: \"marc\"},\n\t\t\t\t\t},\n\t\t\t\t\t&types.Node{\n\t\t\t\t\t\tID:   3,\n\t\t\t\t\t\tIPv4: ap(\"100.64.0.3\"),\n\t\t\t\t\t\tUser: &types.User{Name: \"mickael\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{ // list of all ACLRules registered\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"100.64.0.1\", \"100.64.0.2\", \"100.64.0.3\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.64.0.2\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tnode: &types.Node{ // current nodes\n\t\t\t\t\tID:   1,\n\t\t\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\t\t\tUser: &types.User{Name: \"joe\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: types.Nodes{\n\t\t\t\t&types.Node{\n\t\t\t\t\tID:   2,\n\t\t\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\t\t\tUser: &types.User{Name: \"marc\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"host cannot directly talk to destination, but return path is authorized\",\n\t\t\targs: args{\n\t\t\t\tnodes: types.Nodes{ // list of all nodes in the database\n\t\t\t\t\t&types.Node{\n\t\t\t\t\t\tID:   1,\n\t\t\t\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\t\t\t\tUser: &types.User{Name: \"joe\"},\n\t\t\t\t\t},\n\t\t\t\t\t&types.Node{\n\t\t\t\t\t\tID:   2,\n\t\t\t\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\t\t\t\tUser: &types.User{Name: \"marc\"},\n\t\t\t\t\t},\n\t\t\t\t\t&types.Node{\n\t\t\t\t\t\tID:   3,\n\t\t\t\t\t\tIPv4: ap(\"100.64.0.3\"),\n\t\t\t\t\t\tUser: &types.User{Name: \"mickael\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{ // list of all ACLRules registered\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"100.64.0.3\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.64.0.2\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tnode: &types.Node{ // current nodes\n\t\t\t\t\tID:   2,\n\t\t\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\t\t\tUser: &types.User{Name: \"marc\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: types.Nodes{\n\t\t\t\t&types.Node{\n\t\t\t\t\tID:   3,\n\t\t\t\t\tIPv4: ap(\"100.64.0.3\"),\n\t\t\t\t\tUser: &types.User{Name: \"mickael\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"rules allows all hosts to reach one destination\",\n\t\t\targs: args{\n\t\t\t\tnodes: types.Nodes{ // list of all nodes in the database\n\t\t\t\t\t&types.Node{\n\t\t\t\t\t\tID:   1,\n\t\t\t\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\t\t\t\tUser: &types.User{Name: \"joe\"},\n\t\t\t\t\t},\n\t\t\t\t\t&types.Node{\n\t\t\t\t\t\tID:   2,\n\t\t\t\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\t\t\t\tUser: &types.User{Name: \"marc\"},\n\t\t\t\t\t},\n\t\t\t\t\t&types.Node{\n\t\t\t\t\t\tID:   3,\n\t\t\t\t\t\tIPv4: ap(\"100.64.0.3\"),\n\t\t\t\t\t\tUser: &types.User{Name: \"mickael\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{ // list of all ACLRules registered\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"*\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.64.0.2\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tnode: &types.Node{ // current nodes\n\t\t\t\t\tID:   1,\n\t\t\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\t\t\tUser: &types.User{Name: \"joe\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: types.Nodes{\n\t\t\t\t&types.Node{\n\t\t\t\t\tID:   2,\n\t\t\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\t\t\tUser: &types.User{Name: \"marc\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"rules allows all hosts to reach one destination, destination can reach all hosts\",\n\t\t\targs: args{\n\t\t\t\tnodes: types.Nodes{ // list of all nodes in the database\n\t\t\t\t\t&types.Node{\n\t\t\t\t\t\tID:   1,\n\t\t\t\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\t\t\t\tUser: &types.User{Name: \"joe\"},\n\t\t\t\t\t},\n\t\t\t\t\t&types.Node{\n\t\t\t\t\t\tID:   2,\n\t\t\t\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\t\t\t\tUser: &types.User{Name: \"marc\"},\n\t\t\t\t\t},\n\t\t\t\t\t&types.Node{\n\t\t\t\t\t\tID:   3,\n\t\t\t\t\t\tIPv4: ap(\"100.64.0.3\"),\n\t\t\t\t\t\tUser: &types.User{Name: \"mickael\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{ // list of all ACLRules registered\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"*\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.64.0.2\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tnode: &types.Node{ // current nodes\n\t\t\t\t\tID:   2,\n\t\t\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\t\t\tUser: &types.User{Name: \"marc\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: types.Nodes{\n\t\t\t\t&types.Node{\n\t\t\t\t\tID:   1,\n\t\t\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\t\t\tUser: &types.User{Name: \"joe\"},\n\t\t\t\t},\n\t\t\t\t&types.Node{\n\t\t\t\t\tID:   3,\n\t\t\t\t\tIPv4: ap(\"100.64.0.3\"),\n\t\t\t\t\tUser: &types.User{Name: \"mickael\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"rule allows all hosts to reach all destinations\",\n\t\t\targs: args{\n\t\t\t\tnodes: types.Nodes{ // list of all nodes in the database\n\t\t\t\t\t&types.Node{\n\t\t\t\t\t\tID:   1,\n\t\t\t\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\t\t\t\tUser: &types.User{Name: \"joe\"},\n\t\t\t\t\t},\n\t\t\t\t\t&types.Node{\n\t\t\t\t\t\tID:   2,\n\t\t\t\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\t\t\t\tUser: &types.User{Name: \"marc\"},\n\t\t\t\t\t},\n\t\t\t\t\t&types.Node{\n\t\t\t\t\t\tID:   3,\n\t\t\t\t\t\tIPv4: ap(\"100.64.0.3\"),\n\t\t\t\t\t\tUser: &types.User{Name: \"mickael\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{ // list of all ACLRules registered\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"*\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tnode: &types.Node{ // current nodes\n\t\t\t\t\tID:   2,\n\t\t\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\t\t\tUser: &types.User{Name: \"marc\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: types.Nodes{\n\t\t\t\t&types.Node{\n\t\t\t\t\tID:   1,\n\t\t\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\t\t\tUser: &types.User{Name: \"joe\"},\n\t\t\t\t},\n\t\t\t\t&types.Node{\n\t\t\t\t\tID:   3,\n\t\t\t\t\tIPv4: ap(\"100.64.0.3\"),\n\t\t\t\t\tUser: &types.User{Name: \"mickael\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"without rule all communications are forbidden\",\n\t\t\targs: args{\n\t\t\t\tnodes: types.Nodes{ // list of all nodes in the database\n\t\t\t\t\t&types.Node{\n\t\t\t\t\t\tID:   1,\n\t\t\t\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\t\t\t\tUser: &types.User{Name: \"joe\"},\n\t\t\t\t\t},\n\t\t\t\t\t&types.Node{\n\t\t\t\t\t\tID:   2,\n\t\t\t\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\t\t\t\tUser: &types.User{Name: \"marc\"},\n\t\t\t\t\t},\n\t\t\t\t\t&types.Node{\n\t\t\t\t\t\tID:   3,\n\t\t\t\t\t\tIPv4: ap(\"100.64.0.3\"),\n\t\t\t\t\t\tUser: &types.User{Name: \"mickael\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{ // list of all ACLRules registered\n\t\t\t\t},\n\t\t\t\tnode: &types.Node{ // current nodes\n\t\t\t\t\tID:   2,\n\t\t\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\t\t\tUser: &types.User{Name: \"marc\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: nil,\n\t\t},\n\t\t{\n\t\t\t// Investigating 699\n\t\t\t// Found some nodes: [ts-head-8w6paa ts-unstable-lys2ib ts-head-upcrmb ts-unstable-rlwpvr] nodes=ts-head-8w6paa\n\t\t\t// ACL rules generated ACL=[{\"DstPorts\":[{\"Bits\":null,\"IP\":\"*\",\"Ports\":{\"First\":0,\"Last\":65535}}],\"SrcIPs\":[\"fd7a:115c:a1e0::3\",\"100.64.0.3\",\"fd7a:115c:a1e0::4\",\"100.64.0.4\"]}]\n\t\t\t// ACL Cache Map={\"100.64.0.3\":{\"*\":{}},\"100.64.0.4\":{\"*\":{}},\"fd7a:115c:a1e0::3\":{\"*\":{}},\"fd7a:115c:a1e0::4\":{\"*\":{}}}\n\t\t\tname: \"issue-699-broken-star\",\n\t\t\targs: args{\n\t\t\t\tnodes: types.Nodes{ //\n\t\t\t\t\t&types.Node{\n\t\t\t\t\t\tID:       1,\n\t\t\t\t\t\tHostname: \"ts-head-upcrmb\",\n\t\t\t\t\t\tIPv4:     ap(\"100.64.0.3\"),\n\t\t\t\t\t\tIPv6:     ap(\"fd7a:115c:a1e0::3\"),\n\t\t\t\t\t\tUser:     &types.User{Name: \"user1\"},\n\t\t\t\t\t},\n\t\t\t\t\t&types.Node{\n\t\t\t\t\t\tID:       2,\n\t\t\t\t\t\tHostname: \"ts-unstable-rlwpvr\",\n\t\t\t\t\t\tIPv4:     ap(\"100.64.0.4\"),\n\t\t\t\t\t\tIPv6:     ap(\"fd7a:115c:a1e0::4\"),\n\t\t\t\t\t\tUser:     &types.User{Name: \"user1\"},\n\t\t\t\t\t},\n\t\t\t\t\t&types.Node{\n\t\t\t\t\t\tID:       3,\n\t\t\t\t\t\tHostname: \"ts-head-8w6paa\",\n\t\t\t\t\t\tIPv4:     ap(\"100.64.0.1\"),\n\t\t\t\t\t\tIPv6:     ap(\"fd7a:115c:a1e0::1\"),\n\t\t\t\t\t\tUser:     &types.User{Name: \"user2\"},\n\t\t\t\t\t},\n\t\t\t\t\t&types.Node{\n\t\t\t\t\t\tID:       4,\n\t\t\t\t\t\tHostname: \"ts-unstable-lys2ib\",\n\t\t\t\t\t\tIPv4:     ap(\"100.64.0.2\"),\n\t\t\t\t\t\tIPv6:     ap(\"fd7a:115c:a1e0::2\"),\n\t\t\t\t\t\tUser:     &types.User{Name: \"user2\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{ // list of all ACLRules registered\n\t\t\t\t\t{\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tIP:    \"*\",\n\t\t\t\t\t\t\t\tPorts: tailcfg.PortRange{First: 0, Last: 65535},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::3\", \"100.64.0.3\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4\", \"100.64.0.4\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tnode: &types.Node{ // current nodes\n\t\t\t\t\tID:       3,\n\t\t\t\t\tHostname: \"ts-head-8w6paa\",\n\t\t\t\t\tIPv4:     ap(\"100.64.0.1\"),\n\t\t\t\t\tIPv6:     ap(\"fd7a:115c:a1e0::1\"),\n\t\t\t\t\tUser:     &types.User{Name: \"user2\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: types.Nodes{\n\t\t\t\t&types.Node{\n\t\t\t\t\tID:       1,\n\t\t\t\t\tHostname: \"ts-head-upcrmb\",\n\t\t\t\t\tIPv4:     ap(\"100.64.0.3\"),\n\t\t\t\t\tIPv6:     ap(\"fd7a:115c:a1e0::3\"),\n\t\t\t\t\tUser:     &types.User{Name: \"user1\"},\n\t\t\t\t},\n\t\t\t\t&types.Node{\n\t\t\t\t\tID:       2,\n\t\t\t\t\tHostname: \"ts-unstable-rlwpvr\",\n\t\t\t\t\tIPv4:     ap(\"100.64.0.4\"),\n\t\t\t\t\tIPv6:     ap(\"fd7a:115c:a1e0::4\"),\n\t\t\t\t\tUser:     &types.User{Name: \"user1\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"failing-edge-case-during-p3-refactor\",\n\t\t\targs: args{\n\t\t\t\tnodes: []*types.Node{\n\t\t\t\t\t{\n\t\t\t\t\t\tID:       1,\n\t\t\t\t\t\tIPv4:     ap(\"100.64.0.2\"),\n\t\t\t\t\t\tHostname: \"peer1\",\n\t\t\t\t\t\tUser:     &types.User{Name: \"mini\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tID:       2,\n\t\t\t\t\t\tIPv4:     ap(\"100.64.0.3\"),\n\t\t\t\t\t\tHostname: \"peer2\",\n\t\t\t\t\t\tUser:     &types.User{Name: \"peer2\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"100.64.0.1/32\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.64.0.3/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"::/0\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tnode: &types.Node{\n\t\t\t\t\tID:       0,\n\t\t\t\t\tIPv4:     ap(\"100.64.0.1\"),\n\t\t\t\t\tHostname: \"mini\",\n\t\t\t\t\tUser:     &types.User{Name: \"mini\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*types.Node{\n\t\t\t\t{\n\t\t\t\t\tID:       2,\n\t\t\t\t\tIPv4:     ap(\"100.64.0.3\"),\n\t\t\t\t\tHostname: \"peer2\",\n\t\t\t\t\tUser:     &types.User{Name: \"peer2\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"p4-host-in-netmap-user2-dest-bug\",\n\t\t\targs: args{\n\t\t\t\tnodes: []*types.Node{\n\t\t\t\t\t{\n\t\t\t\t\t\tID:       1,\n\t\t\t\t\t\tIPv4:     ap(\"100.64.0.2\"),\n\t\t\t\t\t\tHostname: \"user1-2\",\n\t\t\t\t\t\tUser:     &types.User{Name: \"user1\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tID:       0,\n\t\t\t\t\t\tIPv4:     ap(\"100.64.0.1\"),\n\t\t\t\t\t\tHostname: \"user1-1\",\n\t\t\t\t\t\tUser:     &types.User{Name: \"user1\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tID:       3,\n\t\t\t\t\t\tIPv4:     ap(\"100.64.0.4\"),\n\t\t\t\t\t\tHostname: \"user2-2\",\n\t\t\t\t\t\tUser:     &types.User{Name: \"user2\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.3/32\",\n\t\t\t\t\t\t\t\"100.64.0.4/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::3/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.64.0.3/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.64.0.4/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::3/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::4/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.1/32\",\n\t\t\t\t\t\t\t\"100.64.0.2/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::1/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.64.0.3/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.64.0.4/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::3/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::4/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tnode: &types.Node{\n\t\t\t\t\tID:       2,\n\t\t\t\t\tIPv4:     ap(\"100.64.0.3\"),\n\t\t\t\t\tHostname: \"user-2-1\",\n\t\t\t\t\tUser:     &types.User{Name: \"user2\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*types.Node{\n\t\t\t\t{\n\t\t\t\t\tID:       1,\n\t\t\t\t\tIPv4:     ap(\"100.64.0.2\"),\n\t\t\t\t\tHostname: \"user1-2\",\n\t\t\t\t\tUser:     &types.User{Name: \"user1\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tID:       0,\n\t\t\t\t\tIPv4:     ap(\"100.64.0.1\"),\n\t\t\t\t\tHostname: \"user1-1\",\n\t\t\t\t\tUser:     &types.User{Name: \"user1\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tID:       3,\n\t\t\t\t\tIPv4:     ap(\"100.64.0.4\"),\n\t\t\t\t\tHostname: \"user2-2\",\n\t\t\t\t\tUser:     &types.User{Name: \"user2\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"p4-host-in-netmap-user1-dest-bug\",\n\t\t\targs: args{\n\t\t\t\tnodes: []*types.Node{\n\t\t\t\t\t{\n\t\t\t\t\t\tID:       1,\n\t\t\t\t\t\tIPv4:     ap(\"100.64.0.2\"),\n\t\t\t\t\t\tHostname: \"user1-2\",\n\t\t\t\t\t\tUser:     &types.User{Name: \"user1\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tID:       2,\n\t\t\t\t\t\tIPv4:     ap(\"100.64.0.3\"),\n\t\t\t\t\t\tHostname: \"user-2-1\",\n\t\t\t\t\t\tUser:     &types.User{Name: \"user2\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tID:       3,\n\t\t\t\t\t\tIPv4:     ap(\"100.64.0.4\"),\n\t\t\t\t\t\tHostname: \"user2-2\",\n\t\t\t\t\t\tUser:     &types.User{Name: \"user2\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.1/32\",\n\t\t\t\t\t\t\t\"100.64.0.2/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::1/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.64.0.1/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.64.0.2/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::1/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.1/32\",\n\t\t\t\t\t\t\t\"100.64.0.2/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::1/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.64.0.3/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.64.0.4/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::3/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::4/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tnode: &types.Node{\n\t\t\t\t\tID:       0,\n\t\t\t\t\tIPv4:     ap(\"100.64.0.1\"),\n\t\t\t\t\tHostname: \"user1-1\",\n\t\t\t\t\tUser:     &types.User{Name: \"user1\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*types.Node{\n\t\t\t\t{\n\t\t\t\t\tID:       1,\n\t\t\t\t\tIPv4:     ap(\"100.64.0.2\"),\n\t\t\t\t\tHostname: \"user1-2\",\n\t\t\t\t\tUser:     &types.User{Name: \"user1\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tID:       2,\n\t\t\t\t\tIPv4:     ap(\"100.64.0.3\"),\n\t\t\t\t\tHostname: \"user-2-1\",\n\t\t\t\t\tUser:     &types.User{Name: \"user2\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tID:       3,\n\t\t\t\t\tIPv4:     ap(\"100.64.0.4\"),\n\t\t\t\t\tHostname: \"user2-2\",\n\t\t\t\t\tUser:     &types.User{Name: \"user2\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"subnet-router-with-only-route\",\n\t\t\targs: args{\n\t\t\t\tnodes: []*types.Node{\n\t\t\t\t\t{\n\t\t\t\t\t\tID:       1,\n\t\t\t\t\t\tIPv4:     ap(\"100.64.0.1\"),\n\t\t\t\t\t\tHostname: \"user1\",\n\t\t\t\t\t\tUser:     &types.User{Name: \"user1\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tID:       2,\n\t\t\t\t\t\tIPv4:     ap(\"100.64.0.2\"),\n\t\t\t\t\t\tHostname: \"router\",\n\t\t\t\t\t\tUser:     &types.User{Name: \"router\"},\n\t\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\t\tRoutableIPs: []netip.Prefix{netip.MustParsePrefix(\"10.33.0.0/16\")},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tApprovedRoutes: []netip.Prefix{netip.MustParsePrefix(\"10.33.0.0/16\")},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.1/32\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tnode: &types.Node{\n\t\t\t\t\tID:       1,\n\t\t\t\t\tIPv4:     ap(\"100.64.0.1\"),\n\t\t\t\t\tHostname: \"user1\",\n\t\t\t\t\tUser:     &types.User{Name: \"user1\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*types.Node{\n\t\t\t\t{\n\t\t\t\t\tID:       2,\n\t\t\t\t\tIPv4:     ap(\"100.64.0.2\"),\n\t\t\t\t\tHostname: \"router\",\n\t\t\t\t\tUser:     &types.User{Name: \"router\"},\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tRoutableIPs: []netip.Prefix{netip.MustParsePrefix(\"10.33.0.0/16\")},\n\t\t\t\t\t},\n\t\t\t\t\tApprovedRoutes: []netip.Prefix{netip.MustParsePrefix(\"10.33.0.0/16\")},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"subnet-router-with-only-route-smaller-mask-2181\",\n\t\t\targs: args{\n\t\t\t\tnodes: []*types.Node{\n\t\t\t\t\t{\n\t\t\t\t\t\tID:       1,\n\t\t\t\t\t\tIPv4:     ap(\"100.64.0.1\"),\n\t\t\t\t\t\tHostname: \"router\",\n\t\t\t\t\t\tUser:     &types.User{Name: \"router\"},\n\t\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\t\tRoutableIPs: []netip.Prefix{netip.MustParsePrefix(\"10.99.0.0/16\")},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tApprovedRoutes: []netip.Prefix{netip.MustParsePrefix(\"10.99.0.0/16\")},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tID:       2,\n\t\t\t\t\t\tIPv4:     ap(\"100.64.0.2\"),\n\t\t\t\t\t\tHostname: \"node\",\n\t\t\t\t\t\tUser:     &types.User{Name: \"node\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.2/32\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.99.0.2/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tnode: &types.Node{\n\t\t\t\t\tID:       1,\n\t\t\t\t\tIPv4:     ap(\"100.64.0.1\"),\n\t\t\t\t\tHostname: \"router\",\n\t\t\t\t\tUser:     &types.User{Name: \"router\"},\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tRoutableIPs: []netip.Prefix{netip.MustParsePrefix(\"10.99.0.0/16\")},\n\t\t\t\t\t},\n\t\t\t\t\tApprovedRoutes: []netip.Prefix{netip.MustParsePrefix(\"10.99.0.0/16\")},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*types.Node{\n\t\t\t\t{\n\t\t\t\t\tID:       2,\n\t\t\t\t\tIPv4:     ap(\"100.64.0.2\"),\n\t\t\t\t\tHostname: \"node\",\n\t\t\t\t\tUser:     &types.User{Name: \"node\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"node-to-subnet-router-with-only-route-smaller-mask-2181\",\n\t\t\targs: args{\n\t\t\t\tnodes: []*types.Node{\n\t\t\t\t\t{\n\t\t\t\t\t\tID:       1,\n\t\t\t\t\t\tIPv4:     ap(\"100.64.0.1\"),\n\t\t\t\t\t\tHostname: \"router\",\n\t\t\t\t\t\tUser:     &types.User{Name: \"router\"},\n\t\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\t\tRoutableIPs: []netip.Prefix{netip.MustParsePrefix(\"10.99.0.0/16\")},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tApprovedRoutes: []netip.Prefix{netip.MustParsePrefix(\"10.99.0.0/16\")},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tID:       2,\n\t\t\t\t\t\tIPv4:     ap(\"100.64.0.2\"),\n\t\t\t\t\t\tHostname: \"node\",\n\t\t\t\t\t\tUser:     &types.User{Name: \"node\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.2/32\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.99.0.2/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tnode: &types.Node{\n\t\t\t\t\tID:       2,\n\t\t\t\t\tIPv4:     ap(\"100.64.0.2\"),\n\t\t\t\t\tHostname: \"node\",\n\t\t\t\t\tUser:     &types.User{Name: \"node\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*types.Node{\n\t\t\t\t{\n\t\t\t\t\tID:       1,\n\t\t\t\t\tIPv4:     ap(\"100.64.0.1\"),\n\t\t\t\t\tHostname: \"router\",\n\t\t\t\t\tUser:     &types.User{Name: \"router\"},\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tRoutableIPs: []netip.Prefix{netip.MustParsePrefix(\"10.99.0.0/16\")},\n\t\t\t\t\t},\n\t\t\t\t\tApprovedRoutes: []netip.Prefix{netip.MustParsePrefix(\"10.99.0.0/16\")},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tmatchers := matcher.MatchesFromFilterRules(tt.args.rules)\n\t\t\tgotViews := ReduceNodes(\n\t\t\t\ttt.args.node.View(),\n\t\t\t\ttt.args.nodes.ViewSlice(),\n\t\t\t\tmatchers,\n\t\t\t)\n\t\t\t// Convert views back to nodes for comparison in tests\n\t\t\tvar got types.Nodes\n\t\t\tfor _, v := range gotViews.All() {\n\t\t\t\tgot = append(got, v.AsStruct())\n\t\t\t}\n\n\t\t\tif diff := cmp.Diff(tt.want, got, util.Comparers...); diff != \"\" {\n\t\t\t\tt.Errorf(\"ReduceNodes() unexpected result (-want +got):\\n%s\", diff)\n\t\t\t\tt.Log(\"Matchers: \")\n\n\t\t\t\tfor _, m := range matchers {\n\t\t\t\t\tt.Log(\"\\t+\", m.DebugString())\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestReduceNodesFromPolicy(t *testing.T) {\n\tn := func(id types.NodeID, ip, hostname, username string, routess ...string) *types.Node {\n\t\troutes := make([]netip.Prefix, 0, len(routess))\n\t\tfor _, route := range routess {\n\t\t\troutes = append(routes, netip.MustParsePrefix(route))\n\t\t}\n\n\t\treturn &types.Node{\n\t\t\tID:       id,\n\t\t\tIPv4:     ap(ip),\n\t\t\tHostname: hostname,\n\t\t\tUser:     &types.User{Name: username},\n\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\tRoutableIPs: routes,\n\t\t\t},\n\t\t\tApprovedRoutes: routes,\n\t\t}\n\t}\n\n\ttests := []struct {\n\t\tname         string\n\t\tnodes        types.Nodes\n\t\tpolicy       string\n\t\tnode         *types.Node\n\t\twant         types.Nodes\n\t\twantMatchers int\n\t}{\n\t\t{\n\t\t\tname: \"2788-exit-node-too-visible\",\n\t\t\tnodes: types.Nodes{\n\t\t\t\tn(1, \"100.64.0.1\", \"mobile\", \"mobile\"),\n\t\t\t\tn(2, \"100.64.0.2\", \"server\", \"server\"),\n\t\t\t\tn(3, \"100.64.0.3\", \"exit\", \"server\", \"0.0.0.0/0\", \"::/0\"),\n\t\t\t},\n\t\t\tpolicy: `\n{\n  \"hosts\": {\n    \"mobile\": \"100.64.0.1/32\",\n    \"server\": \"100.64.0.2/32\",\n    \"exit\": \"100.64.0.3/32\"\n  },\n\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"mobile\"\n      ],\n      \"dst\": [\n        \"server:80\"\n      ]\n    }\n  ]\n}`,\n\t\t\tnode: n(1, \"100.64.0.1\", \"mobile\", \"mobile\"),\n\t\t\twant: types.Nodes{\n\t\t\t\tn(2, \"100.64.0.2\", \"server\", \"server\"),\n\t\t\t},\n\t\t\twantMatchers: 1,\n\t\t},\n\t\t{\n\t\t\tname: \"2788-exit-node-autogroup:internet\",\n\t\t\tnodes: types.Nodes{\n\t\t\t\tn(1, \"100.64.0.1\", \"mobile\", \"mobile\"),\n\t\t\t\tn(2, \"100.64.0.2\", \"server\", \"server\"),\n\t\t\t\tn(3, \"100.64.0.3\", \"exit\", \"server\", \"0.0.0.0/0\", \"::/0\"),\n\t\t\t},\n\t\t\tpolicy: `\n{\n  \"hosts\": {\n    \"mobile\": \"100.64.0.1/32\",\n    \"server\": \"100.64.0.2/32\",\n    \"exit\": \"100.64.0.3/32\"\n  },\n\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"mobile\"\n      ],\n      \"dst\": [\n        \"server:80\"\n      ]\n    },\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"mobile\"\n      ],\n      \"dst\": [\n        \"autogroup:internet:*\"\n      ]\n    }\n  ]\n}`,\n\t\t\tnode: n(1, \"100.64.0.1\", \"mobile\", \"mobile\"),\n\t\t\t// autogroup:internet does not generate packet filters - it's handled\n\t\t\t// by exit node routing via AllowedIPs, not by packet filtering.\n\t\t\t// Only server is visible through the mobile -> server:80 rule.\n\t\t\twant: types.Nodes{\n\t\t\t\tn(2, \"100.64.0.2\", \"server\", \"server\"),\n\t\t\t},\n\t\t\twantMatchers: 1,\n\t\t},\n\t\t{\n\t\t\tname: \"2788-exit-node-0000-route\",\n\t\t\tnodes: types.Nodes{\n\t\t\t\tn(1, \"100.64.0.1\", \"mobile\", \"mobile\"),\n\t\t\t\tn(2, \"100.64.0.2\", \"server\", \"server\"),\n\t\t\t\tn(3, \"100.64.0.3\", \"exit\", \"server\", \"0.0.0.0/0\", \"::/0\"),\n\t\t\t},\n\t\t\tpolicy: `\n{\n  \"hosts\": {\n    \"mobile\": \"100.64.0.1/32\",\n    \"server\": \"100.64.0.2/32\",\n    \"exit\": \"100.64.0.3/32\"\n  },\n\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"mobile\"\n      ],\n      \"dst\": [\n        \"server:80\"\n      ]\n    },\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"mobile\"\n      ],\n      \"dst\": [\n        \"0.0.0.0/0:*\"\n      ]\n    }\n  ]\n}`,\n\t\t\tnode: n(1, \"100.64.0.1\", \"mobile\", \"mobile\"),\n\t\t\twant: types.Nodes{\n\t\t\t\tn(2, \"100.64.0.2\", \"server\", \"server\"),\n\t\t\t\tn(3, \"100.64.0.3\", \"exit\", \"server\", \"0.0.0.0/0\", \"::/0\"),\n\t\t\t},\n\t\t\twantMatchers: 1,\n\t\t},\n\t\t{\n\t\t\tname: \"2788-exit-node-::0-route\",\n\t\t\tnodes: types.Nodes{\n\t\t\t\tn(1, \"100.64.0.1\", \"mobile\", \"mobile\"),\n\t\t\t\tn(2, \"100.64.0.2\", \"server\", \"server\"),\n\t\t\t\tn(3, \"100.64.0.3\", \"exit\", \"server\", \"0.0.0.0/0\", \"::/0\"),\n\t\t\t},\n\t\t\tpolicy: `\n{\n  \"hosts\": {\n    \"mobile\": \"100.64.0.1/32\",\n    \"server\": \"100.64.0.2/32\",\n    \"exit\": \"100.64.0.3/32\"\n  },\n\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"mobile\"\n      ],\n      \"dst\": [\n        \"server:80\"\n      ]\n    },\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"mobile\"\n      ],\n      \"dst\": [\n        \"::0/0:*\"\n      ]\n    }\n  ]\n}`,\n\t\t\tnode: n(1, \"100.64.0.1\", \"mobile\", \"mobile\"),\n\t\t\twant: types.Nodes{\n\t\t\t\tn(2, \"100.64.0.2\", \"server\", \"server\"),\n\t\t\t\tn(3, \"100.64.0.3\", \"exit\", \"server\", \"0.0.0.0/0\", \"::/0\"),\n\t\t\t},\n\t\t\twantMatchers: 1,\n\t\t},\n\t\t{\n\t\t\tname: \"2784-split-exit-node-access\",\n\t\t\tnodes: types.Nodes{\n\t\t\t\tn(1, \"100.64.0.1\", \"user\", \"user\"),\n\t\t\t\tn(2, \"100.64.0.2\", \"exit1\", \"exit\", \"0.0.0.0/0\", \"::/0\"),\n\t\t\t\tn(3, \"100.64.0.3\", \"exit2\", \"exit\", \"0.0.0.0/0\", \"::/0\"),\n\t\t\t\tn(4, \"100.64.0.4\", \"otheruser\", \"otheruser\"),\n\t\t\t},\n\t\t\tpolicy: `\n{\n  \"hosts\": {\n    \"user\": \"100.64.0.1/32\",\n    \"exit1\": \"100.64.0.2/32\",\n    \"exit2\": \"100.64.0.3/32\",\n    \"otheruser\": \"100.64.0.4/32\",\n  },\n\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"user\"\n      ],\n      \"dst\": [\n        \"exit1:*\"\n      ]\n    },\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"otheruser\"\n      ],\n      \"dst\": [\n        \"exit2:*\"\n      ]\n    }\n  ]\n}`,\n\t\t\tnode: n(1, \"100.64.0.1\", \"user\", \"user\"),\n\t\t\twant: types.Nodes{\n\t\t\t\tn(2, \"100.64.0.2\", \"exit1\", \"exit\", \"0.0.0.0/0\", \"::/0\"),\n\t\t\t},\n\t\t\twantMatchers: 2,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tfor idx, pmf := range PolicyManagerFuncsForTest([]byte(tt.policy)) {\n\t\t\tt.Run(fmt.Sprintf(\"%s-index%d\", tt.name, idx), func(t *testing.T) {\n\t\t\t\tvar (\n\t\t\t\t\tpm  PolicyManager\n\t\t\t\t\terr error\n\t\t\t\t)\n\n\t\t\t\tpm, err = pmf(nil, tt.nodes.ViewSlice())\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tmatchers, err := pm.MatchersForNode(tt.node.View())\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.Len(t, matchers, tt.wantMatchers)\n\n\t\t\t\tgotViews := ReduceNodes(\n\t\t\t\t\ttt.node.View(),\n\t\t\t\t\ttt.nodes.ViewSlice(),\n\t\t\t\t\tmatchers,\n\t\t\t\t)\n\t\t\t\t// Convert views back to nodes for comparison in tests\n\t\t\t\tvar got types.Nodes\n\t\t\t\tfor _, v := range gotViews.All() {\n\t\t\t\t\tgot = append(got, v.AsStruct())\n\t\t\t\t}\n\n\t\t\t\tif diff := cmp.Diff(tt.want, got, util.Comparers...); diff != \"\" {\n\t\t\t\t\tt.Errorf(\"TestReduceNodesFromPolicy() unexpected result (-want +got):\\n%s\", diff)\n\t\t\t\t\tt.Log(\"Matchers: \")\n\n\t\t\t\t\tfor _, m := range matchers {\n\t\t\t\t\t\tt.Log(\"\\t+\", m.DebugString())\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n}\n\nfunc TestSSHPolicyRules(t *testing.T) {\n\tusers := []types.User{\n\t\t{Name: \"user1\", Model: gorm.Model{ID: 1}},\n\t\t{Name: \"user2\", Model: gorm.Model{ID: 2}},\n\t\t{Name: \"user3\", Model: gorm.Model{ID: 3}},\n\t\t{Name: \"alice\", Email: \"alice@example.com\", Model: gorm.Model{ID: 4}},\n\t\t{Name: \"bob\", Email: \"bob@example.com\", Model: gorm.Model{ID: 5}},\n\t}\n\n\t// Create standard node setups used across tests\n\tnodeUser1 := types.Node{\n\t\tHostname: \"user1-device\",\n\t\tIPv4:     ap(\"100.64.0.1\"),\n\t\tUserID:   new(uint(1)),\n\t\tUser:     new(users[0]),\n\t}\n\tnodeUser2 := types.Node{\n\t\tHostname: \"user2-device\",\n\t\tIPv4:     ap(\"100.64.0.2\"),\n\t\tUserID:   new(uint(2)),\n\t\tUser:     new(users[1]),\n\t}\n\n\ttaggedClient := types.Node{\n\t\tHostname: \"tagged-client\",\n\t\tIPv4:     ap(\"100.64.0.4\"),\n\t\tUserID:   new(uint(2)),\n\t\tUser:     new(users[1]),\n\t\tTags:     []string{\"tag:client\"},\n\t}\n\n\t// Create a tagged server node for valid SSH patterns\n\tnodeTaggedServer := types.Node{\n\t\tHostname: \"tagged-server\",\n\t\tIPv4:     ap(\"100.64.0.5\"),\n\t\tUserID:   new(uint(1)),\n\t\tUser:     new(users[0]),\n\t\tTags:     []string{\"tag:server\"},\n\t}\n\n\t// Nodes for localpart tests (users with email addresses)\n\tnodeAlice := types.Node{\n\t\tHostname: \"alice-device\",\n\t\tIPv4:     ap(\"100.64.0.6\"),\n\t\tUserID:   new(uint(4)),\n\t\tUser:     new(users[3]),\n\t}\n\tnodeBob := types.Node{\n\t\tHostname: \"bob-device\",\n\t\tIPv4:     ap(\"100.64.0.7\"),\n\t\tUserID:   new(uint(5)),\n\t\tUser:     new(users[4]),\n\t}\n\n\ttests := []struct {\n\t\tname         string\n\t\ttargetNode   types.Node\n\t\tpeers        types.Nodes\n\t\tpolicy       string\n\t\twantSSH      *tailcfg.SSHPolicy\n\t\texpectErr    bool\n\t\terrorMessage string\n\t}{\n\t\t{\n\t\t\tname:       \"group-to-tag\",\n\t\t\ttargetNode: nodeTaggedServer,\n\t\t\tpeers:      types.Nodes{&nodeUser2},\n\t\t\tpolicy: `{\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"user2@\"]\n\t\t\t\t},\n\t\t\t\t\"ssh\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\t\t\"src\": [\"group:admins\"],\n\t\t\t\t\t\t\"dst\": [\"tag:server\"],\n\t\t\t\t\t\t\"users\": [\"autogroup:nonroot\"]\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantSSH: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{\n\t\t\t\t\t\t{NodeIP: \"100.64.0.2\"},\n\t\t\t\t\t},\n\t\t\t\t\tSSHUsers: map[string]string{\n\t\t\t\t\t\t\"*\":    \"=\",\n\t\t\t\t\t\t\"root\": \"\",\n\t\t\t\t\t},\n\t\t\t\t\tAction: &tailcfg.SSHAction{\n\t\t\t\t\t\tAccept:                    true,\n\t\t\t\t\t\tAllowAgentForwarding:      true,\n\t\t\t\t\t\tAllowLocalPortForwarding:  true,\n\t\t\t\t\t\tAllowRemotePortForwarding: true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t\t{\n\t\t\tname:       \"check-period-specified\",\n\t\t\ttargetNode: taggedClient,\n\t\t\tpeers:      types.Nodes{&nodeUser2},\n\t\t\tpolicy: `{\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:client\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"user2@\"]\n\t\t\t\t},\n\t\t\t\t\"ssh\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"action\": \"check\",\n\t\t\t\t\t\t\"checkPeriod\": \"24h\",\n\t\t\t\t\t\t\"src\": [\"group:admins\"],\n\t\t\t\t\t\t\"dst\": [\"tag:client\"],\n\t\t\t\t\t\t\"users\": [\"autogroup:nonroot\"]\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantSSH: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{\n\t\t\t\t\t\t{NodeIP: \"100.64.0.2\"},\n\t\t\t\t\t},\n\t\t\t\t\tSSHUsers: map[string]string{\n\t\t\t\t\t\t\"*\":    \"=\",\n\t\t\t\t\t\t\"root\": \"\",\n\t\t\t\t\t},\n\t\t\t\t\tAction: &tailcfg.SSHAction{\n\t\t\t\t\t\tAccept:                    false,\n\t\t\t\t\t\tSessionDuration:           24 * time.Hour,\n\t\t\t\t\t\tHoldAndDelegate:           \"unused-url/machine/ssh/action/from/$SRC_NODE_ID/to/$DST_NODE_ID?ssh_user=$SSH_USER&local_user=$LOCAL_USER\",\n\t\t\t\t\t\tAllowAgentForwarding:      true,\n\t\t\t\t\t\tAllowLocalPortForwarding:  true,\n\t\t\t\t\t\tAllowRemotePortForwarding: true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t\t{\n\t\t\tname:       \"no-matching-rules\",\n\t\t\ttargetNode: nodeUser2,\n\t\t\tpeers:      types.Nodes{&nodeUser1, &nodeTaggedServer},\n\t\t\tpolicy: `{\n\t\t\t    \"tagOwners\": {\n\t\t\t    \t\"tag:server\": [\"user1@\"]\n\t\t\t    },\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"ssh\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\t\t\"src\": [\"group:admins\"],\n\t\t\t\t\t\t\"dst\": [\"tag:server\"],\n\t\t\t\t\t\t\"users\": [\"autogroup:nonroot\"]\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantSSH: &tailcfg.SSHPolicy{Rules: nil},\n\t\t},\n\t\t{\n\t\t\tname:       \"invalid-action\",\n\t\t\ttargetNode: nodeTaggedServer,\n\t\t\tpeers:      types.Nodes{&nodeUser2},\n\t\t\tpolicy: `{\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"user2@\"]\n\t\t\t\t},\n\t\t\t\t\"ssh\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"action\": \"invalid\",\n\t\t\t\t\t\t\"src\": [\"group:admins\"],\n\t\t\t\t\t\t\"dst\": [\"tag:server\"],\n\t\t\t\t\t\t\"users\": [\"autogroup:nonroot\"]\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\texpectErr:    true,\n\t\t\terrorMessage: `invalid SSH action: \"invalid\", must be one of: accept, check`,\n\t\t},\n\t\t{\n\t\t\tname:       \"invalid-check-period\",\n\t\t\ttargetNode: nodeTaggedServer,\n\t\t\tpeers:      types.Nodes{&nodeUser2},\n\t\t\tpolicy: `{\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"user2@\"]\n\t\t\t\t},\n\t\t\t\t\"ssh\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"action\": \"check\",\n\t\t\t\t\t\t\"checkPeriod\": \"invalid\",\n\t\t\t\t\t\t\"src\": [\"group:admins\"],\n\t\t\t\t\t\t\"dst\": [\"tag:server\"],\n\t\t\t\t\t\t\"users\": [\"autogroup:nonroot\"]\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\texpectErr:    true,\n\t\t\terrorMessage: \"not a valid duration string\",\n\t\t},\n\t\t{\n\t\t\tname:       \"unsupported-autogroup\",\n\t\t\ttargetNode: taggedClient,\n\t\t\tpeers:      types.Nodes{&nodeUser2},\n\t\t\tpolicy: `{\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:client\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"user2@\"]\n\t\t\t\t},\n\t\t\t\t\"ssh\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\t\t\"src\": [\"group:admins\"],\n\t\t\t\t\t\t\"dst\": [\"tag:client\"],\n\t\t\t\t\t\t\"users\": [\"autogroup:invalid\"]\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\texpectErr:    true,\n\t\t\terrorMessage: \"autogroup not supported for SSH user\",\n\t\t},\n\t\t{\n\t\t\tname:       \"autogroup-nonroot-should-use-wildcard-with-root-excluded\",\n\t\t\ttargetNode: nodeTaggedServer,\n\t\t\tpeers:      types.Nodes{&nodeUser2},\n\t\t\tpolicy: `{\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"user2@\"]\n\t\t\t\t},\n\t\t\t\t\"ssh\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\t\t\"src\": [\"group:admins\"],\n\t\t\t\t\t\t\"dst\": [\"tag:server\"],\n\t\t\t\t\t\t\"users\": [\"autogroup:nonroot\"]\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\t// autogroup:nonroot should map to wildcard \"*\" with root excluded\n\t\t\twantSSH: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{\n\t\t\t\t\t\t{NodeIP: \"100.64.0.2\"},\n\t\t\t\t\t},\n\t\t\t\t\tSSHUsers: map[string]string{\n\t\t\t\t\t\t\"*\":    \"=\",\n\t\t\t\t\t\t\"root\": \"\",\n\t\t\t\t\t},\n\t\t\t\t\tAction: &tailcfg.SSHAction{\n\t\t\t\t\t\tAccept:                    true,\n\t\t\t\t\t\tAllowAgentForwarding:      true,\n\t\t\t\t\t\tAllowLocalPortForwarding:  true,\n\t\t\t\t\t\tAllowRemotePortForwarding: true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t\t{\n\t\t\tname:       \"autogroup-nonroot-plus-root-should-use-wildcard-with-root-mapped\",\n\t\t\ttargetNode: nodeTaggedServer,\n\t\t\tpeers:      types.Nodes{&nodeUser2},\n\t\t\tpolicy: `{\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"user2@\"]\n\t\t\t\t},\n\t\t\t\t\"ssh\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\t\t\"src\": [\"group:admins\"],\n\t\t\t\t\t\t\"dst\": [\"tag:server\"],\n\t\t\t\t\t\t\"users\": [\"autogroup:nonroot\", \"root\"]\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\t// autogroup:nonroot + root should map to wildcard \"*\" with root mapped to itself\n\t\t\twantSSH: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{\n\t\t\t\t\t\t{NodeIP: \"100.64.0.2\"},\n\t\t\t\t\t},\n\t\t\t\t\tSSHUsers: map[string]string{\n\t\t\t\t\t\t\"*\":    \"=\",\n\t\t\t\t\t\t\"root\": \"root\",\n\t\t\t\t\t},\n\t\t\t\t\tAction: &tailcfg.SSHAction{\n\t\t\t\t\t\tAccept:                    true,\n\t\t\t\t\t\tAllowAgentForwarding:      true,\n\t\t\t\t\t\tAllowLocalPortForwarding:  true,\n\t\t\t\t\t\tAllowRemotePortForwarding: true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t\t{\n\t\t\tname:       \"specific-users-should-map-to-themselves-not-equals\",\n\t\t\ttargetNode: nodeTaggedServer,\n\t\t\tpeers:      types.Nodes{&nodeUser2},\n\t\t\tpolicy: `{\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"user2@\"]\n\t\t\t\t},\n\t\t\t\t\"ssh\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\t\t\"src\": [\"group:admins\"],\n\t\t\t\t\t\t\"dst\": [\"tag:server\"],\n\t\t\t\t\t\t\"users\": [\"ubuntu\", \"root\"]\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\t// specific usernames should map to themselves, not \"=\"\n\t\t\twantSSH: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{\n\t\t\t\t\t\t{NodeIP: \"100.64.0.2\"},\n\t\t\t\t\t},\n\t\t\t\t\tSSHUsers: map[string]string{\n\t\t\t\t\t\t\"root\":   \"root\",\n\t\t\t\t\t\t\"ubuntu\": \"ubuntu\",\n\t\t\t\t\t},\n\t\t\t\t\tAction: &tailcfg.SSHAction{\n\t\t\t\t\t\tAccept:                    true,\n\t\t\t\t\t\tAllowAgentForwarding:      true,\n\t\t\t\t\t\tAllowLocalPortForwarding:  true,\n\t\t\t\t\t\tAllowRemotePortForwarding: true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t\t{\n\t\t\tname:       \"2863-allow-predefined-missing-users\",\n\t\t\ttargetNode: taggedClient,\n\t\t\tpeers:      types.Nodes{&nodeUser2},\n\t\t\tpolicy: `{\n  \"groups\": {\n   \"group:example-infra\": [\n      \"user2@\",\n      \"not-created-yet@\",\n    ],\n  },\n  \"tagOwners\": {\n    \"tag:client\": [\n      \"user2@\"\n    ],\n  },\n  \"ssh\": [\n    // Allow infra to ssh to tag:example-infra server as debian\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"group:example-infra\"\n      ],\n      \"dst\": [\n        \"tag:client\",\n      ],\n      \"users\": [\n        \"debian\",\n      ],\n    },\n  ],\n}`,\n\t\t\twantSSH: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{\n\t\t\t\t\t\t{NodeIP: \"100.64.0.2\"},\n\t\t\t\t\t},\n\t\t\t\t\tSSHUsers: map[string]string{\n\t\t\t\t\t\t\"debian\": \"debian\",\n\t\t\t\t\t\t\"root\":   \"\",\n\t\t\t\t\t},\n\t\t\t\t\tAction: &tailcfg.SSHAction{\n\t\t\t\t\t\tAccept:                    true,\n\t\t\t\t\t\tAllowAgentForwarding:      true,\n\t\t\t\t\t\tAllowLocalPortForwarding:  true,\n\t\t\t\t\t\tAllowRemotePortForwarding: true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t\t{\n\t\t\tname:       \"localpart-maps-email-to-os-user\",\n\t\t\ttargetNode: nodeTaggedServer,\n\t\t\tpeers:      types.Nodes{&nodeAlice, &nodeBob},\n\t\t\tpolicy: `{\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"alice@example.com\"]\n\t\t\t\t},\n\t\t\t\t\"ssh\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\t\t\"src\": [\"autogroup:member\"],\n\t\t\t\t\t\t\"dst\": [\"tag:server\"],\n\t\t\t\t\t\t\"users\": [\"localpart:*@example.com\"]\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\t// Per-user common+localpart interleaved: each user gets root deny then localpart.\n\t\t\twantSSH: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.6\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"root\": \"\"},\n\t\t\t\t\tAction: &tailcfg.SSHAction{\n\t\t\t\t\t\tAccept:                    true,\n\t\t\t\t\t\tAllowAgentForwarding:      true,\n\t\t\t\t\t\tAllowLocalPortForwarding:  true,\n\t\t\t\t\t\tAllowRemotePortForwarding: true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.6\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"alice\": \"alice\"},\n\t\t\t\t\tAction: &tailcfg.SSHAction{\n\t\t\t\t\t\tAccept:                    true,\n\t\t\t\t\t\tAllowAgentForwarding:      true,\n\t\t\t\t\t\tAllowLocalPortForwarding:  true,\n\t\t\t\t\t\tAllowRemotePortForwarding: true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.7\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"root\": \"\"},\n\t\t\t\t\tAction: &tailcfg.SSHAction{\n\t\t\t\t\t\tAccept:                    true,\n\t\t\t\t\t\tAllowAgentForwarding:      true,\n\t\t\t\t\t\tAllowLocalPortForwarding:  true,\n\t\t\t\t\t\tAllowRemotePortForwarding: true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.7\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"bob\": \"bob\"},\n\t\t\t\t\tAction: &tailcfg.SSHAction{\n\t\t\t\t\t\tAccept:                    true,\n\t\t\t\t\t\tAllowAgentForwarding:      true,\n\t\t\t\t\t\tAllowLocalPortForwarding:  true,\n\t\t\t\t\t\tAllowRemotePortForwarding: true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t\t{\n\t\t\tname:       \"localpart-combined-with-root\",\n\t\t\ttargetNode: nodeTaggedServer,\n\t\t\tpeers:      types.Nodes{&nodeAlice},\n\t\t\tpolicy: `{\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"alice@example.com\"]\n\t\t\t\t},\n\t\t\t\t\"ssh\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\t\t\"src\": [\"autogroup:member\"],\n\t\t\t\t\t\t\"dst\": [\"tag:server\"],\n\t\t\t\t\t\t\"users\": [\"localpart:*@example.com\", \"root\"]\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\t// Common root rule followed by alice's per-user localpart rule (interleaved).\n\t\t\twantSSH: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.6\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"root\": \"root\"},\n\t\t\t\t\tAction: &tailcfg.SSHAction{\n\t\t\t\t\t\tAccept:                    true,\n\t\t\t\t\t\tAllowAgentForwarding:      true,\n\t\t\t\t\t\tAllowLocalPortForwarding:  true,\n\t\t\t\t\t\tAllowRemotePortForwarding: true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.6\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"alice\": \"alice\"},\n\t\t\t\t\tAction: &tailcfg.SSHAction{\n\t\t\t\t\t\tAccept:                    true,\n\t\t\t\t\t\tAllowAgentForwarding:      true,\n\t\t\t\t\t\tAllowLocalPortForwarding:  true,\n\t\t\t\t\t\tAllowRemotePortForwarding: true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tfor idx, pmf := range PolicyManagerFuncsForTest([]byte(tt.policy)) {\n\t\t\tt.Run(fmt.Sprintf(\"%s-index%d\", tt.name, idx), func(t *testing.T) {\n\t\t\t\tvar (\n\t\t\t\t\tpm  PolicyManager\n\t\t\t\t\terr error\n\t\t\t\t)\n\n\t\t\t\tpm, err = pmf(users, append(tt.peers, &tt.targetNode).ViewSlice())\n\n\t\t\t\tif tt.expectErr {\n\t\t\t\t\trequire.Error(t, err)\n\t\t\t\t\trequire.Contains(t, err.Error(), tt.errorMessage)\n\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tgot, err := pm.SSHPolicy(\"unused-url\", tt.targetNode.View())\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tif diff := cmp.Diff(tt.wantSSH, got); diff != \"\" {\n\t\t\t\t\tt.Errorf(\"SSHPolicy() unexpected result (-want +got):\\n%s\", diff)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n}\n\nfunc TestReduceRoutes(t *testing.T) {\n\ttype args struct {\n\t\tnode   *types.Node\n\t\troutes []netip.Prefix\n\t\trules  []tailcfg.FilterRule\n\t}\n\n\ttests := []struct {\n\t\tname string\n\t\targs args\n\t\twant []netip.Prefix\n\t}{\n\t\t{\n\t\t\tname: \"node-can-access-all-routes\",\n\t\t\targs: args{\n\t\t\t\tnode: &types.Node{\n\t\t\t\t\tID:   1,\n\t\t\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\t\t\tUser: &types.User{Name: \"user1\"},\n\t\t\t\t},\n\t\t\t\troutes: []netip.Prefix{\n\t\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t\t\tnetip.MustParsePrefix(\"192.168.1.0/24\"),\n\t\t\t\t\tnetip.MustParsePrefix(\"172.16.0.0/16\"),\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"100.64.0.1\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t\tnetip.MustParsePrefix(\"192.168.1.0/24\"),\n\t\t\t\tnetip.MustParsePrefix(\"172.16.0.0/16\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"node-can-access-specific-route\",\n\t\t\targs: args{\n\t\t\t\tnode: &types.Node{\n\t\t\t\t\tID:   1,\n\t\t\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\t\t\tUser: &types.User{Name: \"user1\"},\n\t\t\t\t},\n\t\t\t\troutes: []netip.Prefix{\n\t\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t\t\tnetip.MustParsePrefix(\"192.168.1.0/24\"),\n\t\t\t\t\tnetip.MustParsePrefix(\"172.16.0.0/16\"),\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"100.64.0.1\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.0.0.0/24\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"node-can-access-multiple-specific-routes\",\n\t\t\targs: args{\n\t\t\t\tnode: &types.Node{\n\t\t\t\t\tID:   1,\n\t\t\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\t\t\tUser: &types.User{Name: \"user1\"},\n\t\t\t\t},\n\t\t\t\troutes: []netip.Prefix{\n\t\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t\t\tnetip.MustParsePrefix(\"192.168.1.0/24\"),\n\t\t\t\t\tnetip.MustParsePrefix(\"172.16.0.0/16\"),\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"100.64.0.1\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.0.0.0/24\"},\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t\tnetip.MustParsePrefix(\"192.168.1.0/24\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"node-can-access-overlapping-routes\",\n\t\t\targs: args{\n\t\t\t\tnode: &types.Node{\n\t\t\t\t\tID:   1,\n\t\t\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\t\t\tUser: &types.User{Name: \"user1\"},\n\t\t\t\t},\n\t\t\t\troutes: []netip.Prefix{\n\t\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/16\"), // Overlaps with the first one\n\t\t\t\t\tnetip.MustParsePrefix(\"192.168.1.0/24\"),\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"100.64.0.1\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.0.0.0/16\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/16\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"node-with-no-matching-rules\",\n\t\t\targs: args{\n\t\t\t\tnode: &types.Node{\n\t\t\t\t\tID:   1,\n\t\t\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\t\t\tUser: &types.User{Name: \"user1\"},\n\t\t\t\t},\n\t\t\t\troutes: []netip.Prefix{\n\t\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t\t\tnetip.MustParsePrefix(\"192.168.1.0/24\"),\n\t\t\t\t\tnetip.MustParsePrefix(\"172.16.0.0/16\"),\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"100.64.0.2\"}, // Different source IP\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"node-with-both-ipv4-and-ipv6\",\n\t\t\targs: args{\n\t\t\t\tnode: &types.Node{\n\t\t\t\t\tID:   1,\n\t\t\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\t\t\tIPv6: ap(\"fd7a:115c:a1e0::1\"),\n\t\t\t\t\tUser: &types.User{Name: \"user1\"},\n\t\t\t\t},\n\t\t\t\troutes: []netip.Prefix{\n\t\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t\t\tnetip.MustParsePrefix(\"2001:db8::/64\"),\n\t\t\t\t\tnetip.MustParsePrefix(\"192.168.1.0/24\"),\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"fd7a:115c:a1e0::1\"}, // IPv6 source\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"2001:db8::/64\"}, // IPv6 destination\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"100.64.0.1\"}, // IPv4 source\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.0.0.0/24\"}, // IPv4 destination\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),\n\t\t\t\tnetip.MustParsePrefix(\"2001:db8::/64\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"router-with-multiple-routes-and-node-with-specific-access\",\n\t\t\targs: args{\n\t\t\t\tnode: &types.Node{\n\t\t\t\t\tID:   2,\n\t\t\t\t\tIPv4: ap(\"100.64.0.2\"), // Node IP\n\t\t\t\t\tUser: &types.User{Name: \"node\"},\n\t\t\t\t},\n\t\t\t\troutes: []netip.Prefix{\n\t\t\t\t\tnetip.MustParsePrefix(\"10.10.10.0/24\"),\n\t\t\t\t\tnetip.MustParsePrefix(\"10.10.11.0/24\"),\n\t\t\t\t\tnetip.MustParsePrefix(\"10.10.12.0/24\"),\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"*\"}, // Any source\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.64.0.1\"}, // Router node\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"100.64.0.2\"}, // Node IP\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.10.10.0/24\"}, // Only one subnet allowed\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.10.10.0/24\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"node-with-access-to-one-subnet-and-partial-overlap\",\n\t\t\targs: args{\n\t\t\t\tnode: &types.Node{\n\t\t\t\t\tID:   2,\n\t\t\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\t\t\tUser: &types.User{Name: \"node\"},\n\t\t\t\t},\n\t\t\t\troutes: []netip.Prefix{\n\t\t\t\t\tnetip.MustParsePrefix(\"10.10.10.0/24\"),\n\t\t\t\t\tnetip.MustParsePrefix(\"10.10.11.0/24\"),\n\t\t\t\t\tnetip.MustParsePrefix(\"10.10.10.0/16\"), // Overlaps with the first one\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"100.64.0.2\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.10.10.0/24\"}, // Only specific subnet\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.10.10.0/24\"),\n\t\t\t\tnetip.MustParsePrefix(\"10.10.10.0/16\"), // With current implementation, this is included because it overlaps with the allowed subnet\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"node-with-access-to-wildcard-subnet\",\n\t\t\targs: args{\n\t\t\t\tnode: &types.Node{\n\t\t\t\t\tID:   2,\n\t\t\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\t\t\tUser: &types.User{Name: \"node\"},\n\t\t\t\t},\n\t\t\t\troutes: []netip.Prefix{\n\t\t\t\t\tnetip.MustParsePrefix(\"10.10.10.0/24\"),\n\t\t\t\t\tnetip.MustParsePrefix(\"10.10.11.0/24\"),\n\t\t\t\t\tnetip.MustParsePrefix(\"10.10.12.0/24\"),\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"100.64.0.2\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.10.0.0/16\"}, // Broader subnet that includes all three\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.10.10.0/24\"),\n\t\t\t\tnetip.MustParsePrefix(\"10.10.11.0/24\"),\n\t\t\t\tnetip.MustParsePrefix(\"10.10.12.0/24\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multiple-nodes-with-different-subnet-permissions\",\n\t\t\targs: args{\n\t\t\t\tnode: &types.Node{\n\t\t\t\t\tID:   2,\n\t\t\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\t\t\tUser: &types.User{Name: \"node\"},\n\t\t\t\t},\n\t\t\t\troutes: []netip.Prefix{\n\t\t\t\t\tnetip.MustParsePrefix(\"10.10.10.0/24\"),\n\t\t\t\t\tnetip.MustParsePrefix(\"10.10.11.0/24\"),\n\t\t\t\t\tnetip.MustParsePrefix(\"10.10.12.0/24\"),\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"100.64.0.1\"}, // Different node\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.10.11.0/24\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"100.64.0.2\"}, // Our node\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.10.10.0/24\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"100.64.0.3\"}, // Different node\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.10.12.0/24\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.10.10.0/24\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"exactly-matching-users-acl-example\",\n\t\t\targs: args{\n\t\t\t\tnode: &types.Node{\n\t\t\t\t\tID:   2,\n\t\t\t\t\tIPv4: ap(\"100.64.0.2\"), // node with IP 100.64.0.2\n\t\t\t\t\tUser: &types.User{Name: \"node\"},\n\t\t\t\t},\n\t\t\t\troutes: []netip.Prefix{\n\t\t\t\t\tnetip.MustParsePrefix(\"10.10.10.0/24\"),\n\t\t\t\t\tnetip.MustParsePrefix(\"10.10.11.0/24\"),\n\t\t\t\t\tnetip.MustParsePrefix(\"10.10.12.0/24\"),\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t\t{\n\t\t\t\t\t\t// This represents the rule: action: accept, src: [\"*\"], dst: [\"router:0\"]\n\t\t\t\t\t\tSrcIPs: []string{\"*\"}, // Any source\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.64.0.1\"}, // Router IP\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t// This represents the rule: action: accept, src: [\"node\"], dst: [\"10.10.10.0/24:*\"]\n\t\t\t\t\t\tSrcIPs: []string{\"100.64.0.2\"}, // Node IP\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.10.10.0/24\", Ports: tailcfg.PortRangeAny}, // All ports on this subnet\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.10.10.0/24\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"acl-all-source-nodes-can-access-router-only-node-can-access-10.10.10.0-24\",\n\t\t\targs: args{\n\t\t\t\t// When testing from router node's perspective\n\t\t\t\tnode: &types.Node{\n\t\t\t\t\tID:   1,\n\t\t\t\t\tIPv4: ap(\"100.64.0.1\"), // router with IP 100.64.0.1\n\t\t\t\t\tUser: &types.User{Name: \"router\"},\n\t\t\t\t},\n\t\t\t\troutes: []netip.Prefix{\n\t\t\t\t\tnetip.MustParsePrefix(\"10.10.10.0/24\"),\n\t\t\t\t\tnetip.MustParsePrefix(\"10.10.11.0/24\"),\n\t\t\t\t\tnetip.MustParsePrefix(\"10.10.12.0/24\"),\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"*\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.64.0.1\"}, // Router can be accessed by all\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"100.64.0.2\"}, // Only node\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.10.10.0/24\"}, // Can access this subnet\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t// Add a rule for router to access its own routes\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"100.64.0.1\"}, // Router node\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\"}, // Can access everything\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Router needs explicit rules to access routes\n\t\t\twant: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.10.10.0/24\"),\n\t\t\t\tnetip.MustParsePrefix(\"10.10.11.0/24\"),\n\t\t\t\tnetip.MustParsePrefix(\"10.10.12.0/24\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"acl-specific-port-ranges-for-subnets\",\n\t\t\targs: args{\n\t\t\t\tnode: &types.Node{\n\t\t\t\t\tID:   2,\n\t\t\t\t\tIPv4: ap(\"100.64.0.2\"), // node\n\t\t\t\t\tUser: &types.User{Name: \"node\"},\n\t\t\t\t},\n\t\t\t\troutes: []netip.Prefix{\n\t\t\t\t\tnetip.MustParsePrefix(\"10.10.10.0/24\"),\n\t\t\t\t\tnetip.MustParsePrefix(\"10.10.11.0/24\"),\n\t\t\t\t\tnetip.MustParsePrefix(\"10.10.12.0/24\"),\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"100.64.0.2\"}, // node\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.10.10.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}}, // Only SSH\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"100.64.0.2\"}, // node\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.10.11.0/24\", Ports: tailcfg.PortRange{First: 80, Last: 80}}, // Only HTTP\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Should get both subnets with specific port ranges\n\t\t\twant: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.10.10.0/24\"),\n\t\t\t\tnetip.MustParsePrefix(\"10.10.11.0/24\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"acl-order-of-rules-and-rule-specificity\",\n\t\t\targs: args{\n\t\t\t\tnode: &types.Node{\n\t\t\t\t\tID:   2,\n\t\t\t\t\tIPv4: ap(\"100.64.0.2\"), // node\n\t\t\t\t\tUser: &types.User{Name: \"node\"},\n\t\t\t\t},\n\t\t\t\troutes: []netip.Prefix{\n\t\t\t\t\tnetip.MustParsePrefix(\"10.10.10.0/24\"),\n\t\t\t\t\tnetip.MustParsePrefix(\"10.10.11.0/24\"),\n\t\t\t\t\tnetip.MustParsePrefix(\"10.10.12.0/24\"),\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t\t// First rule allows all traffic\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"*\"}, // Any source\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny}, // Any destination and any port\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t// Second rule is more specific but should be overridden by the first rule\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"100.64.0.2\"}, // node\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.10.10.0/24\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Due to the first rule allowing all traffic, node should have access to all routes\n\t\t\twant: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.10.10.0/24\"),\n\t\t\t\tnetip.MustParsePrefix(\"10.10.11.0/24\"),\n\t\t\t\tnetip.MustParsePrefix(\"10.10.12.0/24\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"return-path-subnet-router-to-regular-node-issue-2608\",\n\t\t\targs: args{\n\t\t\t\tnode: &types.Node{\n\t\t\t\t\tID:   2,\n\t\t\t\t\tIPv4: ap(\"100.123.45.89\"), // Node B - regular node\n\t\t\t\t\tUser: &types.User{Name: \"node-b\"},\n\t\t\t\t},\n\t\t\t\troutes: []netip.Prefix{\n\t\t\t\t\tnetip.MustParsePrefix(\"192.168.1.0/24\"), // Subnet connected to Node A\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t\t{\n\t\t\t\t\t\t// Policy allows 192.168.1.0/24 and group:routers to access *:*\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"192.168.1.0/24\", // Subnet behind router\n\t\t\t\t\t\t\t\"100.123.45.67\",  // Node A (router, part of group:routers)\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny}, // Access to everything\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Node B should receive the 192.168.1.0/24 route for return traffic\n\t\t\t// even though Node B cannot initiate connections to that network\n\t\t\twant: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"192.168.1.0/24\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"return-path-router-perspective-2608\",\n\t\t\targs: args{\n\t\t\t\tnode: &types.Node{\n\t\t\t\t\tID:   1,\n\t\t\t\t\tIPv4: ap(\"100.123.45.67\"), // Node A - router node\n\t\t\t\t\tUser: &types.User{Name: \"router\"},\n\t\t\t\t},\n\t\t\t\troutes: []netip.Prefix{\n\t\t\t\t\tnetip.MustParsePrefix(\"192.168.1.0/24\"), // Subnet connected to this router\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t\t{\n\t\t\t\t\t\t// Policy allows 192.168.1.0/24 and group:routers to access *:*\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"192.168.1.0/24\", // Subnet behind router\n\t\t\t\t\t\t\t\"100.123.45.67\",  // Node A (router, part of group:routers)\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny}, // Access to everything\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Router should have access to its own routes\n\t\t\twant: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"192.168.1.0/24\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"subnet-behind-router-bidirectional-connectivity-issue-2608\",\n\t\t\targs: args{\n\t\t\t\tnode: &types.Node{\n\t\t\t\t\tID:   2,\n\t\t\t\t\tIPv4: ap(\"100.123.45.89\"), // Node B - regular node that should be reachable\n\t\t\t\t\tUser: &types.User{Name: \"node-b\"},\n\t\t\t\t},\n\t\t\t\troutes: []netip.Prefix{\n\t\t\t\t\tnetip.MustParsePrefix(\"192.168.1.0/24\"), // Subnet behind router\n\t\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),    // Another subnet\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t\t{\n\t\t\t\t\t\t// Only 192.168.1.0/24 and routers can access everything\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"192.168.1.0/24\", // Subnet that can connect to Node B\n\t\t\t\t\t\t\t\"100.123.45.67\",  // Router node\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t// Node B cannot access anything (no rules with Node B as source)\n\t\t\t\t\t\tSrcIPs:   []string{\"100.123.45.89\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// No destinations - Node B cannot initiate connections\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Node B should still get the 192.168.1.0/24 route for return traffic\n\t\t\t// but should NOT get 10.0.0.0/24 since nothing allows that subnet to connect to Node B\n\t\t\twant: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"192.168.1.0/24\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"no-route-leakage-when-no-connection-allowed-2608\",\n\t\t\targs: args{\n\t\t\t\tnode: &types.Node{\n\t\t\t\t\tID:   3,\n\t\t\t\t\tIPv4: ap(\"100.123.45.99\"), // Node C - isolated node\n\t\t\t\t\tUser: &types.User{Name: \"isolated-node\"},\n\t\t\t\t},\n\t\t\t\troutes: []netip.Prefix{\n\t\t\t\t\tnetip.MustParsePrefix(\"192.168.1.0/24\"), // Subnet behind router\n\t\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/24\"),    // Another private subnet\n\t\t\t\t\tnetip.MustParsePrefix(\"172.16.0.0/24\"),  // Yet another subnet\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t\t{\n\t\t\t\t\t\t// Only specific subnets and routers can access specific destinations\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"192.168.1.0/24\", // This subnet can access everything\n\t\t\t\t\t\t\t\"100.123.45.67\",  // Router node can access everything\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.123.45.89\", Ports: tailcfg.PortRangeAny}, // Only to Node B\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t// 10.0.0.0/24 can only access router\n\t\t\t\t\t\tSrcIPs: []string{\"10.0.0.0/24\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.123.45.67\", Ports: tailcfg.PortRangeAny}, // Only to router\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t// 172.16.0.0/24 has no access rules at all\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Node C should get NO routes because:\n\t\t\t// - 192.168.1.0/24 can only connect to Node B (not Node C)\n\t\t\t// - 10.0.0.0/24 can only connect to router (not Node C)\n\t\t\t// - 172.16.0.0/24 has no rules allowing it to connect anywhere\n\t\t\t// - Node C is not in any rules as a destination\n\t\t\twant: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"original-issue-2608-with-slash14-network\",\n\t\t\targs: args{\n\t\t\t\tnode: &types.Node{\n\t\t\t\t\tID:   2,\n\t\t\t\t\tIPv4: ap(\"100.123.45.89\"), // Node B - regular node\n\t\t\t\t\tUser: &types.User{Name: \"node-b\"},\n\t\t\t\t},\n\t\t\t\troutes: []netip.Prefix{\n\t\t\t\t\tnetip.MustParsePrefix(\"192.168.1.0/14\"), // Network 192.168.1.0/14 as mentioned in original issue\n\t\t\t\t},\n\t\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t\t{\n\t\t\t\t\t\t// Policy allows 192.168.1.0/24 (part of /14) and group:routers to access *:*\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"192.168.1.0/24\", // Subnet behind router (part of the larger /14 network)\n\t\t\t\t\t\t\t\"100.123.45.67\",  // Node A (router, part of group:routers)\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny}, // Access to everything\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Node B should receive the 192.168.1.0/14 route for return traffic\n\t\t\t// even though only 192.168.1.0/24 (part of /14) can connect to Node B\n\t\t\t// This is the exact scenario from the original issue\n\t\t\twant: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"192.168.1.0/14\"),\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tmatchers := matcher.MatchesFromFilterRules(tt.args.rules)\n\n\t\t\tgot := ReduceRoutes(\n\t\t\t\ttt.args.node.View(),\n\t\t\t\ttt.args.routes,\n\t\t\t\tmatchers,\n\t\t\t)\n\t\t\tif diff := cmp.Diff(tt.want, got, util.Comparers...); diff != \"\" {\n\t\t\t\tt.Errorf(\"ReduceRoutes() unexpected result (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "hscontrol/policy/policyutil/reduce.go",
    "content": "package policyutil\n\nimport (\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"tailscale.com/tailcfg\"\n)\n\n// ReduceFilterRules takes a node and a set of global filter rules and removes all rules\n// and destinations that are not relevant to that particular node.\n//\n// IMPORTANT: This function is designed for global filters only. Per-node filters\n// (from autogroup:self policies) are already node-specific and should not be passed\n// to this function. Use PolicyManager.FilterForNode() instead, which handles both cases.\nfunc ReduceFilterRules(node types.NodeView, rules []tailcfg.FilterRule) []tailcfg.FilterRule {\n\tret := []tailcfg.FilterRule{}\n\n\tfor _, rule := range rules {\n\t\t// record if the rule is actually relevant for the given node.\n\t\tvar dests []tailcfg.NetPortRange\n\n\tDEST_LOOP:\n\t\tfor _, dest := range rule.DstPorts {\n\t\t\texpanded, err := util.ParseIPSet(dest.IP, nil)\n\t\t\t// Fail closed, if we can't parse it, then we should not allow\n\t\t\t// access.\n\t\t\tif err != nil {\n\t\t\t\tcontinue DEST_LOOP\n\t\t\t}\n\n\t\t\tif node.InIPSet(expanded) {\n\t\t\t\tdests = append(dests, dest)\n\t\t\t\tcontinue DEST_LOOP\n\t\t\t}\n\n\t\t\t// If the node exposes routes, ensure they are note removed\n\t\t\t// when the filters are reduced.\n\t\t\tif node.Hostinfo().Valid() {\n\t\t\t\troutableIPs := node.Hostinfo().RoutableIPs()\n\t\t\t\tif routableIPs.Len() > 0 {\n\t\t\t\t\tfor _, routableIP := range routableIPs.All() {\n\t\t\t\t\t\tif expanded.OverlapsPrefix(routableIP) {\n\t\t\t\t\t\t\tdests = append(dests, dest)\n\t\t\t\t\t\t\tcontinue DEST_LOOP\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Also check approved subnet routes - nodes should have access\n\t\t\t// to subnets they're approved to route traffic for.\n\t\t\tsubnetRoutes := node.SubnetRoutes()\n\n\t\t\tfor _, subnetRoute := range subnetRoutes {\n\t\t\t\tif expanded.OverlapsPrefix(subnetRoute) {\n\t\t\t\t\tdests = append(dests, dest)\n\t\t\t\t\tcontinue DEST_LOOP\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif len(dests) > 0 {\n\t\t\tret = append(ret, tailcfg.FilterRule{\n\t\t\t\tSrcIPs:   rule.SrcIPs,\n\t\t\t\tDstPorts: dests,\n\t\t\t\tIPProto:  rule.IPProto,\n\t\t\t})\n\t\t}\n\t}\n\n\treturn ret\n}\n"
  },
  {
    "path": "hscontrol/policy/policyutil/reduce_test.go",
    "content": "package policyutil_test\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/netip\"\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/juanfont/headscale/hscontrol/policy\"\n\t\"github.com/juanfont/headscale/hscontrol/policy/policyutil\"\n\tv2 \"github.com/juanfont/headscale/hscontrol/policy/v2\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/rs/zerolog/log\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gorm.io/gorm\"\n\t\"tailscale.com/net/tsaddr\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/util/must\"\n)\n\nvar ap = func(ipStr string) *netip.Addr {\n\tip := netip.MustParseAddr(ipStr)\n\treturn &ip\n}\n\nvar p = func(prefStr string) netip.Prefix {\n\tip := netip.MustParsePrefix(prefStr)\n\treturn ip\n}\n\n// hsExitNodeDestForTest is the list of destination IP ranges that are allowed when\n// we use headscale \"autogroup:internet\".\nvar hsExitNodeDestForTest = []tailcfg.NetPortRange{\n\t{IP: \"0.0.0.0/5\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"8.0.0.0/7\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"11.0.0.0/8\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"12.0.0.0/6\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"16.0.0.0/4\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"32.0.0.0/3\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"64.0.0.0/3\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"96.0.0.0/6\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"100.0.0.0/10\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"100.128.0.0/9\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"101.0.0.0/8\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"102.0.0.0/7\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"104.0.0.0/5\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"112.0.0.0/4\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"128.0.0.0/3\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"160.0.0.0/5\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"168.0.0.0/8\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"169.0.0.0/9\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"169.128.0.0/10\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"169.192.0.0/11\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"169.224.0.0/12\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"169.240.0.0/13\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"169.248.0.0/14\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"169.252.0.0/15\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"169.255.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"170.0.0.0/7\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"172.0.0.0/12\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"172.32.0.0/11\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"172.64.0.0/10\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"172.128.0.0/9\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"173.0.0.0/8\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"174.0.0.0/7\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"176.0.0.0/4\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"192.0.0.0/9\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"192.128.0.0/11\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"192.160.0.0/13\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"192.169.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"192.170.0.0/15\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"192.172.0.0/14\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"192.176.0.0/12\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"192.192.0.0/10\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"193.0.0.0/8\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"194.0.0.0/7\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"196.0.0.0/6\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"200.0.0.0/5\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"208.0.0.0/4\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"224.0.0.0/3\", Ports: tailcfg.PortRangeAny},\n\t{IP: \"2000::/3\", Ports: tailcfg.PortRangeAny},\n}\n\nfunc TestTheInternet(t *testing.T) {\n\tinternetSet := util.TheInternet()\n\n\tinternetPrefs := internetSet.Prefixes()\n\n\tfor i := range internetPrefs {\n\t\tif internetPrefs[i].String() != hsExitNodeDestForTest[i].IP {\n\t\t\tt.Errorf(\n\t\t\t\t\"prefix from internet set %q != hsExit list %q\",\n\t\t\t\tinternetPrefs[i].String(),\n\t\t\t\thsExitNodeDestForTest[i].IP,\n\t\t\t)\n\t\t}\n\t}\n\n\tif len(internetPrefs) != len(hsExitNodeDestForTest) {\n\t\tt.Fatalf(\n\t\t\t\"expected same length of prefixes, internet: %d, hsExit: %d\",\n\t\t\tlen(internetPrefs),\n\t\t\tlen(hsExitNodeDestForTest),\n\t\t)\n\t}\n}\n\nfunc TestReduceFilterRules(t *testing.T) {\n\tusers := types.Users{\n\t\ttypes.User{Model: gorm.Model{ID: 1}, Name: \"mickael\"},\n\t\ttypes.User{Model: gorm.Model{ID: 2}, Name: \"user1\"},\n\t\ttypes.User{Model: gorm.Model{ID: 3}, Name: \"user2\"},\n\t\ttypes.User{Model: gorm.Model{ID: 4}, Name: \"user100\"},\n\t\ttypes.User{Model: gorm.Model{ID: 5}, Name: \"user3\"},\n\t}\n\n\ttests := []struct {\n\t\tname  string\n\t\tnode  *types.Node\n\t\tpeers types.Nodes\n\t\tpol   string\n\t\twant  []tailcfg.FilterRule\n\t}{\n\t\t{\n\t\t\tname: \"host1-can-reach-host2-no-rules\",\n\t\t\tpol: `\n{\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"proto\": \"\",\n      \"src\": [\n        \"100.64.0.1\"\n      ],\n      \"dst\": [\n        \"100.64.0.2:*\"\n      ]\n    }\n  ],\n}\n`,\n\t\t\tnode: &types.Node{\n\t\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\t\tIPv6: ap(\"fd7a:115c:a1e0:ab12:4843:2222:6273:2221\"),\n\t\t\t\tUser: new(users[0]),\n\t\t\t},\n\t\t\tpeers: types.Nodes{\n\t\t\t\t&types.Node{\n\t\t\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\t\t\tIPv6: ap(\"fd7a:115c:a1e0:ab12:4843:2222:6273:2222\"),\n\t\t\t\t\tUser: new(users[0]),\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []tailcfg.FilterRule{},\n\t\t},\n\t\t{\n\t\t\tname: \"1604-subnet-routers-are-preserved\",\n\t\t\tpol: `\n{\n  \"groups\": {\n    \"group:admins\": [\n      \"user1@\"\n    ]\n  },\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"proto\": \"\",\n      \"src\": [\n        \"group:admins\"\n      ],\n      \"dst\": [\n        \"group:admins:*\"\n      ]\n    },\n    {\n      \"action\": \"accept\",\n      \"proto\": \"\",\n      \"src\": [\n        \"group:admins\"\n      ],\n      \"dst\": [\n        \"10.33.0.0/16:*\"\n      ]\n    }\n  ],\n}\n`,\n\t\t\tnode: &types.Node{\n\t\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\t\tIPv6: ap(\"fd7a:115c:a1e0::1\"),\n\t\t\t\tUser: new(users[1]),\n\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\tRoutableIPs: []netip.Prefix{\n\t\t\t\t\t\tnetip.MustParsePrefix(\"10.33.0.0/16\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tpeers: types.Nodes{\n\t\t\t\t&types.Node{\n\t\t\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\t\t\tIPv6: ap(\"fd7a:115c:a1e0::2\"),\n\t\t\t\t\tUser: new(users[1]),\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []tailcfg.FilterRule{\n\t\t\t\t// Merged: Both ACL rules combined (same SrcIPs and IPProto)\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\"100.64.0.1/32\",\n\t\t\t\t\t\t\"100.64.0.2/32\",\n\t\t\t\t\t\t\"fd7a:115c:a1e0::1/128\",\n\t\t\t\t\t\t\"fd7a:115c:a1e0::2/128\",\n\t\t\t\t\t},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP:    \"100.64.0.1/32\",\n\t\t\t\t\t\t\tPorts: tailcfg.PortRangeAny,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP:    \"fd7a:115c:a1e0::1/128\",\n\t\t\t\t\t\t\tPorts: tailcfg.PortRangeAny,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP:    \"10.33.0.0/16\",\n\t\t\t\t\t\t\tPorts: tailcfg.PortRangeAny,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{v2.ProtocolTCP, v2.ProtocolUDP, v2.ProtocolICMP, v2.ProtocolIPv6ICMP},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"1786-reducing-breaks-exit-nodes-the-client\",\n\t\t\tpol: `\n{\n  \"groups\": {\n    \"group:team\": [\n      \"user3@\",\n      \"user2@\",\n      \"user1@\"\n    ]\n  },\n  \"hosts\": {\n    \"internal\": \"100.64.0.100/32\"\n  },\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"proto\": \"\",\n      \"src\": [\n        \"group:team\"\n      ],\n      \"dst\": [\n        \"internal:*\"\n      ]\n    },\n    {\n      \"action\": \"accept\",\n      \"proto\": \"\",\n      \"src\": [\n        \"group:team\"\n      ],\n      \"dst\": [\n        \"autogroup:internet:*\"\n      ]\n    }\n  ],\n}\n`,\n\t\t\tnode: &types.Node{\n\t\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\t\tIPv6: ap(\"fd7a:115c:a1e0::1\"),\n\t\t\t\tUser: new(users[1]),\n\t\t\t},\n\t\t\tpeers: types.Nodes{\n\t\t\t\t&types.Node{\n\t\t\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\t\t\tIPv6: ap(\"fd7a:115c:a1e0::2\"),\n\t\t\t\t\tUser: new(users[2]),\n\t\t\t\t},\n\t\t\t\t// \"internal\" exit node\n\t\t\t\t&types.Node{\n\t\t\t\t\tIPv4: ap(\"100.64.0.100\"),\n\t\t\t\t\tIPv6: ap(\"fd7a:115c:a1e0::100\"),\n\t\t\t\t\tUser: new(users[3]),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tRoutableIPs: tsaddr.ExitRoutes(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []tailcfg.FilterRule{},\n\t\t},\n\t\t{\n\t\t\tname: \"1786-reducing-breaks-exit-nodes-the-exit\",\n\t\t\tpol: `\n{\n  \"groups\": {\n    \"group:team\": [\n      \"user3@\",\n      \"user2@\",\n      \"user1@\"\n    ]\n  },\n  \"hosts\": {\n    \"internal\": \"100.64.0.100/32\"\n  },\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"proto\": \"\",\n      \"src\": [\n        \"group:team\"\n      ],\n      \"dst\": [\n        \"internal:*\"\n      ]\n    },\n    {\n      \"action\": \"accept\",\n      \"proto\": \"\",\n      \"src\": [\n        \"group:team\"\n      ],\n      \"dst\": [\n        \"autogroup:internet:*\"\n      ]\n    }\n  ],\n}\n`,\n\t\t\tnode: &types.Node{\n\t\t\t\tIPv4: ap(\"100.64.0.100\"),\n\t\t\t\tIPv6: ap(\"fd7a:115c:a1e0::100\"),\n\t\t\t\tUser: new(users[3]),\n\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\tRoutableIPs: tsaddr.ExitRoutes(),\n\t\t\t\t},\n\t\t\t},\n\t\t\tpeers: types.Nodes{\n\t\t\t\t&types.Node{\n\t\t\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\t\t\tIPv6: ap(\"fd7a:115c:a1e0::2\"),\n\t\t\t\t\tUser: new(users[2]),\n\t\t\t\t},\n\t\t\t\t&types.Node{\n\t\t\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\t\t\tIPv6: ap(\"fd7a:115c:a1e0::1\"),\n\t\t\t\t\tUser: new(users[1]),\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []tailcfg.FilterRule{\n\t\t\t\t// Only the internal:* rule generates filters.\n\t\t\t\t// autogroup:internet does NOT generate packet filters - it's handled\n\t\t\t\t// by exit node routing via AllowedIPs, not by packet filtering.\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.1/32\", \"100.64.0.2/32\", \"fd7a:115c:a1e0::1/128\", \"fd7a:115c:a1e0::2/128\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP:    \"100.64.0.100/32\",\n\t\t\t\t\t\t\tPorts: tailcfg.PortRangeAny,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP:    \"fd7a:115c:a1e0::100/128\",\n\t\t\t\t\t\t\tPorts: tailcfg.PortRangeAny,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{v2.ProtocolTCP, v2.ProtocolUDP, v2.ProtocolICMP, v2.ProtocolIPv6ICMP},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"1786-reducing-breaks-exit-nodes-the-example-from-issue\",\n\t\t\tpol: `\n{\n  \"groups\": {\n    \"group:team\": [\n      \"user3@\",\n      \"user2@\",\n      \"user1@\"\n    ]\n  },\n  \"hosts\": {\n    \"internal\": \"100.64.0.100/32\"\n  },\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"proto\": \"\",\n      \"src\": [\n        \"group:team\"\n      ],\n      \"dst\": [\n        \"internal:*\"\n      ]\n    },\n    {\n      \"action\": \"accept\",\n      \"proto\": \"\",\n      \"src\": [\n        \"group:team\"\n      ],\n      \"dst\": [\n        \"0.0.0.0/5:*\",\n        \"8.0.0.0/7:*\",\n        \"11.0.0.0/8:*\",\n        \"12.0.0.0/6:*\",\n        \"16.0.0.0/4:*\",\n        \"32.0.0.0/3:*\",\n        \"64.0.0.0/2:*\",\n        \"128.0.0.0/3:*\",\n        \"160.0.0.0/5:*\",\n        \"168.0.0.0/6:*\",\n        \"172.0.0.0/12:*\",\n        \"172.32.0.0/11:*\",\n        \"172.64.0.0/10:*\",\n        \"172.128.0.0/9:*\",\n        \"173.0.0.0/8:*\",\n        \"174.0.0.0/7:*\",\n        \"176.0.0.0/4:*\",\n        \"192.0.0.0/9:*\",\n        \"192.128.0.0/11:*\",\n        \"192.160.0.0/13:*\",\n        \"192.169.0.0/16:*\",\n        \"192.170.0.0/15:*\",\n        \"192.172.0.0/14:*\",\n        \"192.176.0.0/12:*\",\n        \"192.192.0.0/10:*\",\n        \"193.0.0.0/8:*\",\n        \"194.0.0.0/7:*\",\n        \"196.0.0.0/6:*\",\n        \"200.0.0.0/5:*\",\n        \"208.0.0.0/4:*\"\n      ]\n    }\n  ],\n}\n`,\n\t\t\tnode: &types.Node{\n\t\t\t\tIPv4: ap(\"100.64.0.100\"),\n\t\t\t\tIPv6: ap(\"fd7a:115c:a1e0::100\"),\n\t\t\t\tUser: new(users[3]),\n\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\tRoutableIPs: tsaddr.ExitRoutes(),\n\t\t\t\t},\n\t\t\t},\n\t\t\tpeers: types.Nodes{\n\t\t\t\t&types.Node{\n\t\t\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\t\t\tIPv6: ap(\"fd7a:115c:a1e0::2\"),\n\t\t\t\t\tUser: new(users[2]),\n\t\t\t\t},\n\t\t\t\t&types.Node{\n\t\t\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\t\t\tIPv6: ap(\"fd7a:115c:a1e0::1\"),\n\t\t\t\t\tUser: new(users[1]),\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []tailcfg.FilterRule{\n\t\t\t\t// Merged: Both ACL rules combined (same SrcIPs and IPProto)\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.1/32\", \"100.64.0.2/32\", \"fd7a:115c:a1e0::1/128\", \"fd7a:115c:a1e0::2/128\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP:    \"100.64.0.100/32\",\n\t\t\t\t\t\t\tPorts: tailcfg.PortRangeAny,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP:    \"fd7a:115c:a1e0::100/128\",\n\t\t\t\t\t\t\tPorts: tailcfg.PortRangeAny,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{IP: \"0.0.0.0/5\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"8.0.0.0/7\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"11.0.0.0/8\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"12.0.0.0/6\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"16.0.0.0/4\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"32.0.0.0/3\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"64.0.0.0/2\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"128.0.0.0/3\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"160.0.0.0/5\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"168.0.0.0/6\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"172.0.0.0/12\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"172.32.0.0/11\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"172.64.0.0/10\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"172.128.0.0/9\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"173.0.0.0/8\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"174.0.0.0/7\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"176.0.0.0/4\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"192.0.0.0/9\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"192.128.0.0/11\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"192.160.0.0/13\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"192.169.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"192.170.0.0/15\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"192.172.0.0/14\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"192.176.0.0/12\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"192.192.0.0/10\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"193.0.0.0/8\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"194.0.0.0/7\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"196.0.0.0/6\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"200.0.0.0/5\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t{IP: \"208.0.0.0/4\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{v2.ProtocolTCP, v2.ProtocolUDP, v2.ProtocolICMP, v2.ProtocolIPv6ICMP},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"1786-reducing-breaks-exit-nodes-app-connector-like\",\n\t\t\tpol: `\n{\n  \"groups\": {\n    \"group:team\": [\n      \"user3@\",\n      \"user2@\",\n      \"user1@\"\n    ]\n  },\n  \"hosts\": {\n    \"internal\": \"100.64.0.100/32\"\n  },\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"proto\": \"\",\n      \"src\": [\n        \"group:team\"\n      ],\n      \"dst\": [\n        \"internal:*\"\n      ]\n    },\n    {\n      \"action\": \"accept\",\n      \"proto\": \"\",\n      \"src\": [\n        \"group:team\"\n      ],\n      \"dst\": [\n        \"8.0.0.0/8:*\",\n        \"16.0.0.0/8:*\"\n      ]\n    }\n  ],\n}\n`,\n\t\t\tnode: &types.Node{\n\t\t\t\tIPv4: ap(\"100.64.0.100\"),\n\t\t\t\tIPv6: ap(\"fd7a:115c:a1e0::100\"),\n\t\t\t\tUser: new(users[3]),\n\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\tRoutableIPs: []netip.Prefix{netip.MustParsePrefix(\"8.0.0.0/16\"), netip.MustParsePrefix(\"16.0.0.0/16\")},\n\t\t\t\t},\n\t\t\t},\n\t\t\tpeers: types.Nodes{\n\t\t\t\t&types.Node{\n\t\t\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\t\t\tIPv6: ap(\"fd7a:115c:a1e0::2\"),\n\t\t\t\t\tUser: new(users[2]),\n\t\t\t\t},\n\t\t\t\t&types.Node{\n\t\t\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\t\t\tIPv6: ap(\"fd7a:115c:a1e0::1\"),\n\t\t\t\t\tUser: new(users[1]),\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []tailcfg.FilterRule{\n\t\t\t\t// Merged: Both ACL rules combined (same SrcIPs and IPProto)\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.1/32\", \"100.64.0.2/32\", \"fd7a:115c:a1e0::1/128\", \"fd7a:115c:a1e0::2/128\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP:    \"100.64.0.100/32\",\n\t\t\t\t\t\t\tPorts: tailcfg.PortRangeAny,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP:    \"fd7a:115c:a1e0::100/128\",\n\t\t\t\t\t\t\tPorts: tailcfg.PortRangeAny,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP:    \"8.0.0.0/8\",\n\t\t\t\t\t\t\tPorts: tailcfg.PortRangeAny,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP:    \"16.0.0.0/8\",\n\t\t\t\t\t\t\tPorts: tailcfg.PortRangeAny,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{v2.ProtocolTCP, v2.ProtocolUDP, v2.ProtocolICMP, v2.ProtocolIPv6ICMP},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"1786-reducing-breaks-exit-nodes-app-connector-like2\",\n\t\t\tpol: `\n{\n  \"groups\": {\n    \"group:team\": [\n      \"user3@\",\n      \"user2@\",\n      \"user1@\"\n    ]\n  },\n  \"hosts\": {\n    \"internal\": \"100.64.0.100/32\"\n  },\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"proto\": \"\",\n      \"src\": [\n        \"group:team\"\n      ],\n      \"dst\": [\n        \"internal:*\"\n      ]\n    },\n    {\n      \"action\": \"accept\",\n      \"proto\": \"\",\n      \"src\": [\n        \"group:team\"\n      ],\n      \"dst\": [\n        \"8.0.0.0/16:*\",\n        \"16.0.0.0/16:*\"\n      ]\n    }\n  ],\n}\n`,\n\t\t\tnode: &types.Node{\n\t\t\t\tIPv4: ap(\"100.64.0.100\"),\n\t\t\t\tIPv6: ap(\"fd7a:115c:a1e0::100\"),\n\t\t\t\tUser: new(users[3]),\n\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\tRoutableIPs: []netip.Prefix{netip.MustParsePrefix(\"8.0.0.0/8\"), netip.MustParsePrefix(\"16.0.0.0/8\")},\n\t\t\t\t},\n\t\t\t},\n\t\t\tpeers: types.Nodes{\n\t\t\t\t&types.Node{\n\t\t\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\t\t\tIPv6: ap(\"fd7a:115c:a1e0::2\"),\n\t\t\t\t\tUser: new(users[2]),\n\t\t\t\t},\n\t\t\t\t&types.Node{\n\t\t\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\t\t\tIPv6: ap(\"fd7a:115c:a1e0::1\"),\n\t\t\t\t\tUser: new(users[1]),\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []tailcfg.FilterRule{\n\t\t\t\t// Merged: Both ACL rules combined (same SrcIPs and IPProto)\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.1/32\", \"100.64.0.2/32\", \"fd7a:115c:a1e0::1/128\", \"fd7a:115c:a1e0::2/128\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP:    \"100.64.0.100/32\",\n\t\t\t\t\t\t\tPorts: tailcfg.PortRangeAny,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP:    \"fd7a:115c:a1e0::100/128\",\n\t\t\t\t\t\t\tPorts: tailcfg.PortRangeAny,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP:    \"8.0.0.0/16\",\n\t\t\t\t\t\t\tPorts: tailcfg.PortRangeAny,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP:    \"16.0.0.0/16\",\n\t\t\t\t\t\t\tPorts: tailcfg.PortRangeAny,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{v2.ProtocolTCP, v2.ProtocolUDP, v2.ProtocolICMP, v2.ProtocolIPv6ICMP},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"1817-reduce-breaks-32-mask\",\n\t\t\tpol: `\n{\n  \"tagOwners\": {\n    \"tag:access-servers\": [\"user100@\"],\n  },\n  \"groups\": {\n    \"group:access\": [\n      \"user1@\"\n    ]\n  },\n  \"hosts\": {\n    \"dns1\": \"172.16.0.21/32\",\n    \"vlan1\": \"172.16.0.0/24\"\n  },\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"proto\": \"\",\n      \"src\": [\n        \"group:access\"\n      ],\n      \"dst\": [\n        \"tag:access-servers:*\",\n        \"dns1:*\"\n      ]\n    }\n  ],\n}\n`,\n\t\t\tnode: &types.Node{\n\t\t\t\tIPv4: ap(\"100.64.0.100\"),\n\t\t\t\tIPv6: ap(\"fd7a:115c:a1e0::100\"),\n\t\t\t\tUser: new(users[3]),\n\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\tRoutableIPs: []netip.Prefix{netip.MustParsePrefix(\"172.16.0.0/24\")},\n\t\t\t\t},\n\t\t\t\tTags: []string{\"tag:access-servers\"},\n\t\t\t},\n\t\t\tpeers: types.Nodes{\n\t\t\t\t&types.Node{\n\t\t\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\t\t\tIPv6: ap(\"fd7a:115c:a1e0::1\"),\n\t\t\t\t\tUser: new(users[1]),\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []tailcfg.FilterRule{\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.1/32\", \"fd7a:115c:a1e0::1/128\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP:    \"100.64.0.100/32\",\n\t\t\t\t\t\t\tPorts: tailcfg.PortRangeAny,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP:    \"fd7a:115c:a1e0::100/128\",\n\t\t\t\t\t\t\tPorts: tailcfg.PortRangeAny,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP:    \"172.16.0.21/32\",\n\t\t\t\t\t\t\tPorts: tailcfg.PortRangeAny,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{v2.ProtocolTCP, v2.ProtocolUDP, v2.ProtocolICMP, v2.ProtocolIPv6ICMP},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"2365-only-route-policy\",\n\t\t\tpol: `\n{\n  \"hosts\": {\n    \"router\": \"100.64.0.1/32\",\n    \"node\": \"100.64.0.2/32\"\n  },\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"*\"\n      ],\n      \"dst\": [\n        \"router:8000\"\n      ]\n    },\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"node\"\n      ],\n      \"dst\": [\n        \"172.26.0.0/16:*\"\n      ]\n    }\n  ],\n}\n`,\n\t\t\tnode: &types.Node{\n\t\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\t\tIPv6: ap(\"fd7a:115c:a1e0::2\"),\n\t\t\t\tUser: new(users[3]),\n\t\t\t},\n\t\t\tpeers: types.Nodes{\n\t\t\t\t&types.Node{\n\t\t\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\t\t\tIPv6: ap(\"fd7a:115c:a1e0::1\"),\n\t\t\t\t\tUser: new(users[1]),\n\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\t\tRoutableIPs: []netip.Prefix{p(\"172.16.0.0/24\"), p(\"10.10.11.0/24\"), p(\"10.10.12.0/24\")},\n\t\t\t\t\t},\n\t\t\t\t\tApprovedRoutes: []netip.Prefix{p(\"172.16.0.0/24\"), p(\"10.10.11.0/24\"), p(\"10.10.12.0/24\")},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []tailcfg.FilterRule{},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tfor idx, pmf := range policy.PolicyManagerFuncsForTest([]byte(tt.pol)) {\n\t\t\tt.Run(fmt.Sprintf(\"%s-index%d\", tt.name, idx), func(t *testing.T) {\n\t\t\t\tvar (\n\t\t\t\t\tpm  policy.PolicyManager\n\t\t\t\t\terr error\n\t\t\t\t)\n\n\t\t\t\tpm, err = pmf(users, append(tt.peers, tt.node).ViewSlice())\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tgot, _ := pm.Filter()\n\t\t\t\tt.Logf(\"full filter:\\n%s\", must.Get(json.MarshalIndent(got, \"\", \"  \")))\n\t\t\t\tgot = policyutil.ReduceFilterRules(tt.node.View(), got)\n\n\t\t\t\tif diff := cmp.Diff(tt.want, got); diff != \"\" {\n\t\t\t\t\tlog.Trace().Interface(\"got\", got).Msg(\"result\")\n\t\t\t\t\tt.Errorf(\"TestReduceFilterRules() unexpected result (-want +got):\\n%s\", diff)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "hscontrol/policy/route_approval_test.go",
    "content": "package policy\n\nimport (\n\t\"fmt\"\n\t\"net/netip\"\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gorm.io/gorm\"\n)\n\nfunc TestNodeCanApproveRoute(t *testing.T) {\n\tusers := []types.User{\n\t\t{Name: \"user1\", Model: gorm.Model{ID: 1}},\n\t\t{Name: \"user2\", Model: gorm.Model{ID: 2}},\n\t\t{Name: \"user3\", Model: gorm.Model{ID: 3}},\n\t}\n\n\t// Create standard node setups used across tests\n\tnormalNode := types.Node{\n\t\tID:       1,\n\t\tHostname: \"user1-device\",\n\t\tIPv4:     ap(\"100.64.0.1\"),\n\t\tUserID:   new(uint(1)),\n\t\tUser:     new(users[0]),\n\t}\n\n\texitNode := types.Node{\n\t\tID:       2,\n\t\tHostname: \"user2-device\",\n\t\tIPv4:     ap(\"100.64.0.2\"),\n\t\tUserID:   new(uint(2)),\n\t\tUser:     new(users[1]),\n\t}\n\n\ttaggedNode := types.Node{\n\t\tID:       3,\n\t\tHostname: \"tagged-server\",\n\t\tIPv4:     ap(\"100.64.0.3\"),\n\t\tUserID:   new(uint(3)),\n\t\tUser:     new(users[2]),\n\t\tTags:     []string{\"tag:router\"},\n\t}\n\n\tmultiTagNode := types.Node{\n\t\tID:       4,\n\t\tHostname: \"multi-tag-node\",\n\t\tIPv4:     ap(\"100.64.0.4\"),\n\t\tUserID:   new(uint(2)),\n\t\tUser:     new(users[1]),\n\t\tTags:     []string{\"tag:router\", \"tag:server\"},\n\t}\n\n\ttests := []struct {\n\t\tname       string\n\t\tnode       types.Node\n\t\troute      netip.Prefix\n\t\tpolicy     string\n\t\tcanApprove bool\n\t}{\n\t\t{\n\t\t\tname:  \"allow-all-routes-for-admin-user\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"192.168.1.0/24\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"192.168.0.0/16\": [\"group:admin\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: true,\n\t\t},\n\t\t{\n\t\t\tname:  \"deny-route-that-doesnt-match-autoApprovers\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"10.0.0.0/24\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"192.168.0.0/16\": [\"group:admin\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: false,\n\t\t},\n\t\t{\n\t\t\tname:  \"user-not-in-group\",\n\t\t\tnode:  exitNode,\n\t\t\troute: p(\"192.168.1.0/24\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"192.168.0.0/16\": [\"group:admin\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: false,\n\t\t},\n\t\t{\n\t\t\tname:  \"tagged-node-can-approve\",\n\t\t\tnode:  taggedNode,\n\t\t\troute: p(\"10.0.0.0/8\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:router\": [\"user3@\"]\n\t\t\t\t},\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"10.0.0.0/8\": [\"tag:router\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: true,\n\t\t},\n\t\t{\n\t\t\tname:  \"multiple-routes-in-policy\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"172.16.10.0/24\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:router\": [\"user3@\"]\n\t\t\t\t},\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"192.168.0.0/16\": [\"group:admin\"],\n\t\t\t\t\t\t\"172.16.0.0/12\": [\"group:admin\"],\n\t\t\t\t\t\t\"10.0.0.0/8\": [\"tag:router\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: true,\n\t\t},\n\t\t{\n\t\t\tname:  \"match-specific-route-within-range\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"192.168.5.0/24\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"192.168.0.0/16\": [\"group:admin\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: true,\n\t\t},\n\t\t{\n\t\t\tname:  \"ip-address-within-range\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"192.168.1.5/32\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"192.168.1.0/24\": [\"group:admin\"],\n\t\t\t\t\t\t\"192.168.1.128/25\": [\"group:admin\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: true,\n\t\t},\n\t\t{\n\t\t\tname:  \"all-IPv4-routes-(0.0.0.0/0)-approval\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"0.0.0.0/0\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"0.0.0.0/0\": [\"group:admin\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: false,\n\t\t},\n\t\t{\n\t\t\tname:  \"all-IPv4-routes-exitnode-approval\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"0.0.0.0/0\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"exitNode\": [\"group:admin\"]\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: true,\n\t\t},\n\t\t{\n\t\t\tname:  \"all-IPv6-routes-exitnode-approval\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"::/0\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"exitNode\": [\"group:admin\"]\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: true,\n\t\t},\n\t\t{\n\t\t\tname:  \"specific-IPv4-route-with-exitnode-only-approval\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"192.168.1.0/24\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"exitNode\": [\"group:admin\"]\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: false,\n\t\t},\n\t\t{\n\t\t\tname:  \"specific-IPv6-route-with-exitnode-only-approval\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"fd00::/8\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"exitNode\": [\"group:admin\"]\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: false,\n\t\t},\n\t\t{\n\t\t\tname:  \"specific-IPv4-route-with-all-routes-policy\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"10.0.0.0/8\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"0.0.0.0/0\": [\"group:admin\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: true,\n\t\t},\n\t\t{\n\t\t\tname:  \"all-IPv6-routes-(::0/0)-approval\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"::/0\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"::/0\": [\"group:admin\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: false,\n\t\t},\n\t\t{\n\t\t\tname:  \"specific-IPv6-route-with-all-routes-policy\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"fd00::/8\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"::/0\": [\"group:admin\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: true,\n\t\t},\n\t\t{\n\t\t\tname:  \"IPv6-route-with-IPv4-all-routes-policy\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"fd00::/8\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"0.0.0.0/0\": [\"group:admin\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: false,\n\t\t},\n\t\t{\n\t\t\tname:  \"IPv4-route-with-IPv6-all-routes-policy\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"10.0.0.0/8\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"::/0\": [\"group:admin\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: false,\n\t\t},\n\t\t{\n\t\t\tname:  \"both-IPv4-and-IPv6-all-routes-policy\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"192.168.1.0/24\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"0.0.0.0/0\": [\"group:admin\"],\n\t\t\t\t\t\t\"::/0\": [\"group:admin\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: true,\n\t\t},\n\t\t{\n\t\t\tname:  \"ip-address-with-all-routes-policy\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"192.168.101.5/32\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"0.0.0.0/0\": [\"group:admin\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: true,\n\t\t},\n\t\t{\n\t\t\tname:  \"specific-IPv6-host-route-with-all-routes-policy\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"2001:db8::1/128\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"::/0\": [\"group:admin\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: true,\n\t\t},\n\t\t{\n\t\t\tname:  \"multiple-groups-allowed-to-approve-same-route\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"192.168.1.0/24\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"],\n\t\t\t\t\t\"group:netadmin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"192.168.1.0/24\": [\"group:admin\", \"group:netadmin\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: true,\n\t\t},\n\t\t{\n\t\t\tname:  \"overlapping-routes-with-different-groups\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"192.168.1.0/24\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"],\n\t\t\t\t\t\"group:restricted\": [\"user2@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"192.168.0.0/16\": [\"group:restricted\"],\n\t\t\t\t\t\t\"192.168.1.0/24\": [\"group:admin\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: true,\n\t\t},\n\t\t{\n\t\t\tname:  \"unique-local-IPv6-address-with-all-routes-policy\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"fc00::/7\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"::/0\": [\"group:admin\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: true,\n\t\t},\n\t\t{\n\t\t\tname:  \"exact-prefix-match-in-policy\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"203.0.113.0/24\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"203.0.113.0/24\": [\"group:admin\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: true,\n\t\t},\n\t\t{\n\t\t\tname:  \"narrower-range-than-policy\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"203.0.113.0/26\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"203.0.113.0/24\": [\"group:admin\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: true,\n\t\t},\n\t\t{\n\t\t\tname:  \"wider-range-than-policy-should-fail\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"203.0.113.0/23\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"203.0.113.0/24\": [\"group:admin\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: false,\n\t\t},\n\t\t{\n\t\t\tname:  \"adjacent-route-to-policy-route-should-fail\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"203.0.114.0/24\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"203.0.113.0/24\": [\"group:admin\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: false,\n\t\t},\n\t\t{\n\t\t\tname:  \"combined-routes-and-exitnode-approvers-specific-route\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"192.168.1.0/24\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"exitNode\": [\"group:admin\"],\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"192.168.1.0/24\": [\"group:admin\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: true,\n\t\t},\n\t\t{\n\t\t\tname:  \"partly-overlapping-route-with-policy-should-fail\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"203.0.113.128/23\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"203.0.113.0/24\": [\"group:admin\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: false,\n\t\t},\n\t\t{\n\t\t\tname:  \"multiple-routes-with-aggregatable-ranges\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"10.0.0.0/8\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"10.0.0.0/9\": [\"group:admin\"],\n\t\t\t\t\t\t\"10.128.0.0/9\": [\"group:admin\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: false,\n\t\t},\n\t\t{\n\t\t\tname:  \"non-standard-IPv6-notation\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"2001:db8::1/128\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"2001:db8::/32\": [\"group:admin\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: true,\n\t\t},\n\t\t{\n\t\t\tname:  \"node-with-multiple-tags-all-required\",\n\t\t\tnode:  multiTagNode,\n\t\t\troute: p(\"10.10.0.0/16\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:router\": [\"user2@\"],\n\t\t\t\t\t\"tag:server\": [\"user2@\"]\n\t\t\t\t},\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"10.10.0.0/16\": [\"tag:router\", \"tag:server\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: true,\n\t\t},\n\t\t{\n\t\t\tname:  \"node-with-multiple-tags-one-matching-is-sufficient\",\n\t\t\tnode:  multiTagNode,\n\t\t\troute: p(\"10.10.0.0/16\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:router\": [\"user2@\"],\n\t\t\t\t\t\"tag:server\": [\"user2@\"]\n\t\t\t\t},\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"10.10.0.0/16\": [\"tag:router\", \"group:admin\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: true,\n\t\t},\n\t\t{\n\t\t\tname:  \"node-with-multiple-tags-missing-required-tag\",\n\t\t\tnode:  multiTagNode,\n\t\t\troute: p(\"10.10.0.0/16\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t    \"tag:othertag\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"10.10.0.0/16\": [\"tag:othertag\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: false,\n\t\t},\n\t\t{\n\t\t\tname:  \"node-with-tag-and-group-membership\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"10.20.0.0/16\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:router\": [\"user3@\"]\n\t\t\t\t},\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"10.20.0.0/16\": [\"group:admin\", \"tag:router\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: true,\n\t\t},\n\t\t{\n\t\t\t// Tags-as-identity: Tagged nodes are identified by their tags, not by the\n\t\t\t// user who created them. Group membership of the creator is irrelevant.\n\t\t\t// A tagged node can only be auto-approved via tag-based autoApprovers,\n\t\t\t// not group-based ones (even if the creator is in the group).\n\t\t\tname:  \"tagged-node-with-group-autoapprover-not-approved\",\n\t\t\tnode:  taggedNode, // Has tag:router, owned by user3\n\t\t\troute: p(\"10.30.0.0/16\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:router\": [\"user3@\"]\n\t\t\t\t},\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:ops\": [\"user3@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"routes\": {\n\t\t\t\t\t\t\"10.30.0.0/16\": [\"group:ops\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: false, // Tagged nodes don't inherit group membership for auto-approval\n\t\t},\n\t\t{\n\t\t\tname:  \"small-subnet-with-exitnode-only-approval\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"192.168.1.1/32\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admin\"], \"dst\": [\"*:*\"]}\n\t\t\t\t],\n\t\t\t\t\"autoApprovers\": {\n\t\t\t\t\t\"exitNode\": [\"group:admin\"]\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcanApprove: false,\n\t\t},\n\t\t{\n\t\t\tname:       \"empty-policy\",\n\t\t\tnode:       normalNode,\n\t\t\troute:      p(\"192.168.1.0/24\"),\n\t\t\tpolicy:     `{\"acls\":[{\"action\":\"accept\",\"src\":[\"*\"],\"dst\":[\"*:*\"]}]}`,\n\t\t\tcanApprove: false,\n\t\t},\n\t\t{\n\t\t\tname:  \"policy-without-autoApprovers-section\",\n\t\t\tnode:  normalNode,\n\t\t\troute: p(\"10.33.0.0/16\"),\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admin\": [\"user1@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\t\t\"src\": [\"group:admin\"],\n\t\t\t\t\t\t\"dst\": [\"group:admin:*\"]\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\t\t\"src\": [\"group:admin\"],\n\t\t\t\t\t\t\"dst\": [\"10.33.0.0/16:*\"]\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\tcanApprove: false,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\t// Initialize all policy manager implementations\n\t\t\tpolicyManagers, err := PolicyManagersForTest([]byte(tt.policy), users, types.Nodes{&tt.node}.ViewSlice())\n\t\t\tif tt.name == \"empty policy\" {\n\t\t\t\t// We expect this one to have a valid but empty policy\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t}\n\n\t\t\tfor i, pm := range policyManagers {\n\t\t\t\tt.Run(fmt.Sprintf(\"policy-index%d\", i), func(t *testing.T) {\n\t\t\t\t\tresult := pm.NodeCanApproveRoute(tt.node.View(), tt.route)\n\n\t\t\t\t\tif diff := cmp.Diff(tt.canApprove, result); diff != \"\" {\n\t\t\t\t\t\tt.Errorf(\"NodeCanApproveRoute() mismatch (-want +got):\\n%s\", diff)\n\t\t\t\t\t}\n\n\t\t\t\t\tassert.Equal(t, tt.canApprove, result, \"Unexpected route approval result\")\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/filter.go",
    "content": "package v2\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net/netip\"\n\t\"slices\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/rs/zerolog/log\"\n\t\"go4.org/netipx\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/views\"\n)\n\nvar (\n\tErrInvalidAction = errors.New(\"invalid action\")\n\terrSelfInSources = errors.New(\"autogroup:self cannot be used in sources\")\n)\n\n// compileFilterRules takes a set of nodes and an ACLPolicy and generates a\n// set of Tailscale compatible FilterRules used to allow traffic on clients.\nfunc (pol *Policy) compileFilterRules(\n\tusers types.Users,\n\tnodes views.Slice[types.NodeView],\n) ([]tailcfg.FilterRule, error) {\n\tif pol == nil || pol.ACLs == nil {\n\t\treturn tailcfg.FilterAllowAll, nil\n\t}\n\n\tvar rules []tailcfg.FilterRule\n\n\tfor _, acl := range pol.ACLs {\n\t\tif acl.Action != ActionAccept {\n\t\t\treturn nil, ErrInvalidAction\n\t\t}\n\n\t\tsrcIPs, err := acl.Sources.Resolve(pol, users, nodes)\n\t\tif err != nil {\n\t\t\tlog.Trace().Caller().Err(err).Msgf(\"resolving source ips\")\n\t\t}\n\n\t\tif srcIPs == nil || len(srcIPs.Prefixes()) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tprotocols := acl.Protocol.parseProtocol()\n\n\t\tvar destPorts []tailcfg.NetPortRange\n\n\t\tfor _, dest := range acl.Destinations {\n\t\t\t// Check if destination is a wildcard - use \"*\" directly instead of expanding\n\t\t\tif _, isWildcard := dest.Alias.(Asterix); isWildcard {\n\t\t\t\tfor _, port := range dest.Ports {\n\t\t\t\t\tdestPorts = append(destPorts, tailcfg.NetPortRange{\n\t\t\t\t\t\tIP:    \"*\",\n\t\t\t\t\t\tPorts: port,\n\t\t\t\t\t})\n\t\t\t\t}\n\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// autogroup:internet does not generate packet filters - it's handled\n\t\t\t// by exit node routing via AllowedIPs, not by packet filtering.\n\t\t\tif ag, isAutoGroup := dest.Alias.(*AutoGroup); isAutoGroup && ag.Is(AutoGroupInternet) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tips, err := dest.Resolve(pol, users, nodes)\n\t\t\tif err != nil {\n\t\t\t\tlog.Trace().Caller().Err(err).Msgf(\"resolving destination ips\")\n\t\t\t}\n\n\t\t\tif ips == nil {\n\t\t\t\tlog.Debug().Caller().Msgf(\"destination resolved to nil ips: %v\", dest)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tprefixes := ips.Prefixes()\n\n\t\t\tfor _, pref := range prefixes {\n\t\t\t\tfor _, port := range dest.Ports {\n\t\t\t\t\tpr := tailcfg.NetPortRange{\n\t\t\t\t\t\tIP:    pref.String(),\n\t\t\t\t\t\tPorts: port,\n\t\t\t\t\t}\n\t\t\t\t\tdestPorts = append(destPorts, pr)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif len(destPorts) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\trules = append(rules, tailcfg.FilterRule{\n\t\t\tSrcIPs:   ipSetToPrefixStringList(srcIPs),\n\t\t\tDstPorts: destPorts,\n\t\t\tIPProto:  protocols,\n\t\t})\n\t}\n\n\treturn mergeFilterRules(rules), nil\n}\n\n// compileFilterRulesForNode compiles filter rules for a specific node.\nfunc (pol *Policy) compileFilterRulesForNode(\n\tusers types.Users,\n\tnode types.NodeView,\n\tnodes views.Slice[types.NodeView],\n) ([]tailcfg.FilterRule, error) {\n\tif pol == nil {\n\t\treturn tailcfg.FilterAllowAll, nil\n\t}\n\n\tvar rules []tailcfg.FilterRule\n\n\tfor _, acl := range pol.ACLs {\n\t\tif acl.Action != ActionAccept {\n\t\t\treturn nil, ErrInvalidAction\n\t\t}\n\n\t\taclRules, err := pol.compileACLWithAutogroupSelf(acl, users, node, nodes)\n\t\tif err != nil {\n\t\t\tlog.Trace().Err(err).Msgf(\"compiling ACL\")\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, rule := range aclRules {\n\t\t\tif rule != nil {\n\t\t\t\trules = append(rules, *rule)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn mergeFilterRules(rules), nil\n}\n\n// compileACLWithAutogroupSelf compiles a single ACL rule, handling\n// autogroup:self per-node while supporting all other alias types normally.\n// It returns a slice of filter rules because when an ACL has both autogroup:self\n// and other destinations, they need to be split into separate rules with different\n// source filtering logic.\n//\n//nolint:gocyclo // complex ACL compilation logic\nfunc (pol *Policy) compileACLWithAutogroupSelf(\n\tacl ACL,\n\tusers types.Users,\n\tnode types.NodeView,\n\tnodes views.Slice[types.NodeView],\n) ([]*tailcfg.FilterRule, error) {\n\tvar (\n\t\tautogroupSelfDests []AliasWithPorts\n\t\totherDests         []AliasWithPorts\n\t)\n\n\tfor _, dest := range acl.Destinations {\n\t\tif ag, ok := dest.Alias.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {\n\t\t\tautogroupSelfDests = append(autogroupSelfDests, dest)\n\t\t} else {\n\t\t\totherDests = append(otherDests, dest)\n\t\t}\n\t}\n\n\tprotocols := acl.Protocol.parseProtocol()\n\n\tvar rules []*tailcfg.FilterRule\n\n\tvar resolvedSrcIPs []*netipx.IPSet\n\n\tfor _, src := range acl.Sources {\n\t\tif ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {\n\t\t\treturn nil, errSelfInSources\n\t\t}\n\n\t\tips, err := src.Resolve(pol, users, nodes)\n\t\tif err != nil {\n\t\t\tlog.Trace().Caller().Err(err).Msgf(\"resolving source ips\")\n\t\t}\n\n\t\tif ips != nil {\n\t\t\tresolvedSrcIPs = append(resolvedSrcIPs, ips)\n\t\t}\n\t}\n\n\tif len(resolvedSrcIPs) == 0 {\n\t\treturn rules, nil\n\t}\n\n\t// Handle autogroup:self destinations (if any)\n\t// Tagged nodes don't participate in autogroup:self (identity is tag-based, not user-based)\n\tif len(autogroupSelfDests) > 0 && !node.IsTagged() {\n\t\t// Pre-filter to same-user untagged devices once - reuse for both sources and destinations\n\t\tsameUserNodes := make([]types.NodeView, 0)\n\n\t\tfor _, n := range nodes.All() {\n\t\t\tif !n.IsTagged() && n.User().ID() == node.User().ID() {\n\t\t\t\tsameUserNodes = append(sameUserNodes, n)\n\t\t\t}\n\t\t}\n\n\t\tif len(sameUserNodes) > 0 {\n\t\t\t// Filter sources to only same-user untagged devices\n\t\t\tvar srcIPs netipx.IPSetBuilder\n\n\t\t\tfor _, ips := range resolvedSrcIPs {\n\t\t\t\tfor _, n := range sameUserNodes {\n\t\t\t\t\t// Check if any of this node's IPs are in the source set\n\t\t\t\t\tif slices.ContainsFunc(n.IPs(), ips.Contains) {\n\t\t\t\t\t\tn.AppendToIPSet(&srcIPs)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tsrcSet, err := srcIPs.IPSet()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif srcSet != nil && len(srcSet.Prefixes()) > 0 {\n\t\t\t\tvar destPorts []tailcfg.NetPortRange\n\n\t\t\t\tfor _, dest := range autogroupSelfDests {\n\t\t\t\t\tfor _, n := range sameUserNodes {\n\t\t\t\t\t\tfor _, port := range dest.Ports {\n\t\t\t\t\t\t\tfor _, ip := range n.IPs() {\n\t\t\t\t\t\t\t\tdestPorts = append(destPorts, tailcfg.NetPortRange{\n\t\t\t\t\t\t\t\t\tIP:    netip.PrefixFrom(ip, ip.BitLen()).String(),\n\t\t\t\t\t\t\t\t\tPorts: port,\n\t\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif len(destPorts) > 0 {\n\t\t\t\t\trules = append(rules, &tailcfg.FilterRule{\n\t\t\t\t\t\tSrcIPs:   ipSetToPrefixStringList(srcSet),\n\t\t\t\t\t\tDstPorts: destPorts,\n\t\t\t\t\t\tIPProto:  protocols,\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(otherDests) > 0 {\n\t\tvar srcIPs netipx.IPSetBuilder\n\n\t\tfor _, ips := range resolvedSrcIPs {\n\t\t\tsrcIPs.AddSet(ips)\n\t\t}\n\n\t\tsrcSet, err := srcIPs.IPSet()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif srcSet != nil && len(srcSet.Prefixes()) > 0 {\n\t\t\tvar destPorts []tailcfg.NetPortRange\n\n\t\t\tfor _, dest := range otherDests {\n\t\t\t\t// Check if destination is a wildcard - use \"*\" directly instead of expanding\n\t\t\t\tif _, isWildcard := dest.Alias.(Asterix); isWildcard {\n\t\t\t\t\tfor _, port := range dest.Ports {\n\t\t\t\t\t\tdestPorts = append(destPorts, tailcfg.NetPortRange{\n\t\t\t\t\t\t\tIP:    \"*\",\n\t\t\t\t\t\t\tPorts: port,\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// autogroup:internet does not generate packet filters - it's handled\n\t\t\t\t// by exit node routing via AllowedIPs, not by packet filtering.\n\t\t\t\tif ag, isAutoGroup := dest.Alias.(*AutoGroup); isAutoGroup && ag.Is(AutoGroupInternet) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tips, err := dest.Resolve(pol, users, nodes)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Trace().Caller().Err(err).Msgf(\"resolving destination ips\")\n\t\t\t\t}\n\n\t\t\t\tif ips == nil {\n\t\t\t\t\tlog.Debug().Caller().Msgf(\"destination resolved to nil ips: %v\", dest)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tprefixes := ips.Prefixes()\n\n\t\t\t\tfor _, pref := range prefixes {\n\t\t\t\t\tfor _, port := range dest.Ports {\n\t\t\t\t\t\tpr := tailcfg.NetPortRange{\n\t\t\t\t\t\t\tIP:    pref.String(),\n\t\t\t\t\t\t\tPorts: port,\n\t\t\t\t\t\t}\n\t\t\t\t\t\tdestPorts = append(destPorts, pr)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif len(destPorts) > 0 {\n\t\t\t\trules = append(rules, &tailcfg.FilterRule{\n\t\t\t\t\tSrcIPs:   ipSetToPrefixStringList(srcSet),\n\t\t\t\t\tDstPorts: destPorts,\n\t\t\t\t\tIPProto:  protocols,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\treturn rules, nil\n}\n\nvar sshAccept = tailcfg.SSHAction{\n\tReject:                    false,\n\tAccept:                    true,\n\tAllowAgentForwarding:      true,\n\tAllowLocalPortForwarding:  true,\n\tAllowRemotePortForwarding: true,\n}\n\n// checkPeriodFromRule extracts the check period duration from an SSH rule.\n// Returns SSHCheckPeriodDefault if no checkPeriod is configured,\n// 0 if checkPeriod is \"always\", or the configured duration otherwise.\nfunc checkPeriodFromRule(rule SSH) time.Duration {\n\tswitch {\n\tcase rule.CheckPeriod == nil:\n\t\treturn SSHCheckPeriodDefault\n\tcase rule.CheckPeriod.Always:\n\t\treturn 0\n\tdefault:\n\t\treturn rule.CheckPeriod.Duration\n\t}\n}\n\nfunc sshCheck(baseURL string, duration time.Duration) tailcfg.SSHAction {\n\tholdURL := baseURL + \"/machine/ssh/action/from/$SRC_NODE_ID/to/$DST_NODE_ID?ssh_user=$SSH_USER&local_user=$LOCAL_USER\"\n\n\treturn tailcfg.SSHAction{\n\t\tReject:          false,\n\t\tAccept:          false,\n\t\tSessionDuration: duration,\n\t\t// Replaced in the client:\n\t\t//   * $SRC_NODE_IP (URL escaped)\n\t\t//   * $SRC_NODE_ID (Node.ID as int64 string)\n\t\t//   * $DST_NODE_IP (URL escaped)\n\t\t//   * $DST_NODE_ID (Node.ID as int64 string)\n\t\t//   * $SSH_USER (URL escaped, ssh user requested)\n\t\t//   * $LOCAL_USER (URL escaped, local user mapped)\n\t\tHoldAndDelegate:           holdURL,\n\t\tAllowAgentForwarding:      true,\n\t\tAllowLocalPortForwarding:  true,\n\t\tAllowRemotePortForwarding: true,\n\t}\n}\n\nfunc (pol *Policy) compileSSHPolicy(\n\tbaseURL string,\n\tusers types.Users,\n\tnode types.NodeView,\n\tnodes views.Slice[types.NodeView],\n) (*tailcfg.SSHPolicy, error) {\n\tif pol == nil || pol.SSHs == nil || len(pol.SSHs) == 0 {\n\t\treturn nil, nil //nolint:nilnil // intentional: no SSH policy when none configured\n\t}\n\n\tlog.Trace().Caller().Msgf(\"compiling SSH policy for node %q\", node.Hostname())\n\n\tvar rules []*tailcfg.SSHRule\n\n\tfor index, rule := range pol.SSHs {\n\t\tvar autogroupSelfDests, otherDests []Alias\n\n\t\tfor _, dst := range rule.Destinations {\n\t\t\tif ag, ok := dst.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {\n\t\t\t\tautogroupSelfDests = append(autogroupSelfDests, dst)\n\t\t\t} else {\n\t\t\t\totherDests = append(otherDests, dst)\n\t\t\t}\n\t\t}\n\n\t\tsrcIPs, err := rule.Sources.Resolve(pol, users, nodes)\n\t\tif err != nil {\n\t\t\tlog.Trace().Caller().Err(err).Msgf(\n\t\t\t\t\"ssh policy compilation failed resolving source ips for rule %+v\", rule,\n\t\t\t)\n\t\t}\n\n\t\tif srcIPs == nil || len(srcIPs.Prefixes()) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar action tailcfg.SSHAction\n\n\t\tswitch rule.Action {\n\t\tcase SSHActionAccept:\n\t\t\taction = sshAccept\n\t\tcase SSHActionCheck:\n\t\t\taction = sshCheck(baseURL, checkPeriodFromRule(rule))\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\"parsing SSH policy, unknown action %q, index: %d: %w\",\n\t\t\t\trule.Action, index, err,\n\t\t\t)\n\t\t}\n\n\t\tacceptEnv := rule.AcceptEnv\n\n\t\t// Build the common userMap (always has at least a root entry).\n\t\tconst rootUser = \"root\"\n\n\t\tbaseUserMap := make(map[string]string, len(rule.Users))\n\t\tif rule.Users.ContainsNonRoot() {\n\t\t\tbaseUserMap[\"*\"] = \"=\"\n\t\t}\n\n\t\tif rule.Users.ContainsRoot() {\n\t\t\tbaseUserMap[rootUser] = rootUser\n\t\t} else {\n\t\t\tbaseUserMap[rootUser] = \"\"\n\t\t}\n\n\t\tfor _, u := range rule.Users.NormalUsers() {\n\t\t\tbaseUserMap[u.String()] = u.String()\n\t\t}\n\n\t\thasLocalpart := rule.Users.ContainsLocalpart()\n\n\t\tvar localpartByUser map[uint]string\n\t\tif hasLocalpart {\n\t\t\tlocalpartByUser = resolveLocalparts(\n\t\t\t\trule.Users.LocalpartEntries(), users,\n\t\t\t)\n\t\t}\n\n\t\tuserIDs, principalsByUser, taggedPrincipals := groupSourcesByUser(\n\t\t\tnodes, srcIPs,\n\t\t)\n\n\t\t// appendRules emits a common rule and, if the user has a\n\t\t// localpart match, a per-user localpart rule.\n\t\tappendRules := func(principals []*tailcfg.SSHPrincipal, uid uint, hasUID bool) {\n\t\t\trules = append(rules, &tailcfg.SSHRule{\n\t\t\t\tPrincipals: principals,\n\t\t\t\tSSHUsers:   baseUserMap,\n\t\t\t\tAction:     &action,\n\t\t\t\tAcceptEnv:  acceptEnv,\n\t\t\t})\n\n\t\t\tif hasUID {\n\t\t\t\tif lp, ok := localpartByUser[uid]; ok {\n\t\t\t\t\trules = append(rules, &tailcfg.SSHRule{\n\t\t\t\t\t\tPrincipals: principals,\n\t\t\t\t\t\tSSHUsers:   map[string]string{lp: lp},\n\t\t\t\t\t\tAction:     &action,\n\t\t\t\t\t\tAcceptEnv:  acceptEnv,\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Handle autogroup:self destinations.\n\t\t// Tagged nodes can't match autogroup:self.\n\t\tif len(autogroupSelfDests) > 0 &&\n\t\t\t!node.IsTagged() && node.User().Valid() {\n\t\t\tuid := node.User().ID()\n\n\t\t\tif principals := principalsByUser[uid]; len(principals) > 0 {\n\t\t\t\tappendRules(principals, uid, true)\n\t\t\t}\n\t\t}\n\n\t\t// Handle other destinations.\n\t\tif len(otherDests) > 0 {\n\t\t\tvar dest netipx.IPSetBuilder\n\n\t\t\tfor _, dst := range otherDests {\n\t\t\t\tips, err := dst.Resolve(pol, users, nodes)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Trace().Caller().Err(err).\n\t\t\t\t\t\tMsgf(\"resolving destination ips\")\n\t\t\t\t}\n\n\t\t\t\tif ips != nil {\n\t\t\t\t\tdest.AddSet(ips)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tdestSet, err := dest.IPSet()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif node.InIPSet(destSet) {\n\t\t\t\t// Node is a destination — emit rules.\n\t\t\t\t// When localpart entries exist, interleave common\n\t\t\t\t// and localpart rules per source user to match\n\t\t\t\t// Tailscale SaaS first-match-wins ordering.\n\t\t\t\tif hasLocalpart {\n\t\t\t\t\tfor _, uid := range userIDs {\n\t\t\t\t\t\tappendRules(principalsByUser[uid], uid, true)\n\t\t\t\t\t}\n\n\t\t\t\t\tif len(taggedPrincipals) > 0 {\n\t\t\t\t\t\tappendRules(taggedPrincipals, 0, false)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif principals := ipSetToPrincipals(srcIPs); len(principals) > 0 {\n\t\t\t\t\t\trules = append(rules, &tailcfg.SSHRule{\n\t\t\t\t\t\t\tPrincipals: principals,\n\t\t\t\t\t\t\tSSHUsers:   baseUserMap,\n\t\t\t\t\t\t\tAction:     &action,\n\t\t\t\t\t\t\tAcceptEnv:  acceptEnv,\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else if hasLocalpart && node.InIPSet(srcIPs) {\n\t\t\t\t// Self-access: source node not in destination set\n\t\t\t\t// receives rules scoped to its own user.\n\t\t\t\tif node.IsTagged() {\n\t\t\t\t\tvar builder netipx.IPSetBuilder\n\n\t\t\t\t\tnode.AppendToIPSet(&builder)\n\n\t\t\t\t\tipSet, err := builder.IPSet()\n\t\t\t\t\tif err == nil && ipSet != nil {\n\t\t\t\t\t\tif principals := ipSetToPrincipals(ipSet); len(principals) > 0 {\n\t\t\t\t\t\t\tappendRules(principals, 0, false)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else if node.User().Valid() {\n\t\t\t\t\tuid := node.User().ID()\n\t\t\t\t\tif principals := principalsByUser[uid]; len(principals) > 0 {\n\t\t\t\t\t\tappendRules(principals, uid, true)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Sort rules: check (HoldAndDelegate) before accept, per Tailscale\n\t// evaluation order (most-restrictive first).\n\tslices.SortStableFunc(rules, func(a, b *tailcfg.SSHRule) int {\n\t\taIsCheck := a.Action != nil && a.Action.HoldAndDelegate != \"\"\n\n\t\tbIsCheck := b.Action != nil && b.Action.HoldAndDelegate != \"\"\n\t\tif aIsCheck == bIsCheck {\n\t\t\treturn 0\n\t\t}\n\n\t\tif aIsCheck {\n\t\t\treturn -1\n\t\t}\n\n\t\treturn 1\n\t})\n\n\treturn &tailcfg.SSHPolicy{\n\t\tRules: rules,\n\t}, nil\n}\n\n// ipSetToPrincipals converts an IPSet into SSH principals, one per address.\nfunc ipSetToPrincipals(ipSet *netipx.IPSet) []*tailcfg.SSHPrincipal {\n\tif ipSet == nil {\n\t\treturn nil\n\t}\n\n\tvar principals []*tailcfg.SSHPrincipal\n\n\tfor addr := range util.IPSetAddrIter(ipSet) {\n\t\tprincipals = append(principals, &tailcfg.SSHPrincipal{\n\t\t\tNodeIP: addr.String(),\n\t\t})\n\t}\n\n\treturn principals\n}\n\n// resolveLocalparts maps each user whose email matches a localpart:*@<domain>\n// entry to their email local-part. Returns userID → localPart (e.g. {1: \"alice\"}).\n// This is a pure data function — no node walking or IP resolution.\nfunc resolveLocalparts(\n\tentries []SSHUser,\n\tusers types.Users,\n) map[uint]string {\n\tif len(entries) == 0 {\n\t\treturn nil\n\t}\n\n\tresult := make(map[uint]string)\n\n\tfor _, entry := range entries {\n\t\tdomain, err := entry.ParseLocalpart()\n\t\tif err != nil {\n\t\t\tlog.Warn().Err(err).Msgf(\n\t\t\t\t\"skipping invalid localpart entry %q during SSH compilation\",\n\t\t\t\tentry,\n\t\t\t)\n\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, user := range users {\n\t\t\tif user.Email == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tatIdx := strings.LastIndex(user.Email, \"@\")\n\t\t\tif atIdx < 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif !strings.EqualFold(user.Email[atIdx+1:], domain) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tresult[user.ID] = user.Email[:atIdx]\n\t\t}\n\t}\n\n\treturn result\n}\n\n// groupSourcesByUser groups source node IPs by user ownership. Returns sorted\n// user IDs for deterministic iteration, per-user principals, and tagged principals.\n// Only includes nodes whose IPs are in the srcIPs set.\nfunc groupSourcesByUser(\n\tnodes views.Slice[types.NodeView],\n\tsrcIPs *netipx.IPSet,\n) ([]uint, map[uint][]*tailcfg.SSHPrincipal, []*tailcfg.SSHPrincipal) {\n\tuserIPSets := make(map[uint]*netipx.IPSetBuilder)\n\n\tvar taggedIPSet netipx.IPSetBuilder\n\n\thasTagged := false\n\n\tfor _, n := range nodes.All() {\n\t\tif !slices.ContainsFunc(n.IPs(), srcIPs.Contains) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif n.IsTagged() {\n\t\t\tn.AppendToIPSet(&taggedIPSet)\n\n\t\t\thasTagged = true\n\n\t\t\tcontinue\n\t\t}\n\n\t\tif !n.User().Valid() {\n\t\t\tcontinue\n\t\t}\n\n\t\tuid := n.User().ID()\n\n\t\tif _, ok := userIPSets[uid]; !ok {\n\t\t\tuserIPSets[uid] = &netipx.IPSetBuilder{}\n\t\t}\n\n\t\tn.AppendToIPSet(userIPSets[uid])\n\t}\n\n\tvar userIDs []uint\n\n\tprincipalsByUser := make(map[uint][]*tailcfg.SSHPrincipal, len(userIPSets))\n\n\tfor uid, builder := range userIPSets {\n\t\tipSet, err := builder.IPSet()\n\t\tif err != nil || ipSet == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif principals := ipSetToPrincipals(ipSet); len(principals) > 0 {\n\t\t\tprincipalsByUser[uid] = principals\n\t\t\tuserIDs = append(userIDs, uid)\n\t\t}\n\t}\n\n\tslices.Sort(userIDs)\n\n\tvar tagged []*tailcfg.SSHPrincipal\n\n\tif hasTagged {\n\t\ttaggedSet, err := taggedIPSet.IPSet()\n\t\tif err == nil && taggedSet != nil {\n\t\t\ttagged = ipSetToPrincipals(taggedSet)\n\t\t}\n\t}\n\n\treturn userIDs, principalsByUser, tagged\n}\n\nfunc ipSetToPrefixStringList(ips *netipx.IPSet) []string {\n\tvar out []string\n\n\tif ips == nil {\n\t\treturn out\n\t}\n\n\tfor _, pref := range ips.Prefixes() {\n\t\tout = append(out, pref.String())\n\t}\n\n\treturn out\n}\n\n// filterRuleKey generates a unique key for merging based on SrcIPs and IPProto.\nfunc filterRuleKey(rule tailcfg.FilterRule) string {\n\tsrcKey := strings.Join(rule.SrcIPs, \",\")\n\n\tprotoStrs := make([]string, len(rule.IPProto))\n\tfor i, p := range rule.IPProto {\n\t\tprotoStrs[i] = strconv.Itoa(p)\n\t}\n\n\treturn srcKey + \"|\" + strings.Join(protoStrs, \",\")\n}\n\n// mergeFilterRules merges rules with identical SrcIPs and IPProto by combining\n// their DstPorts. DstPorts are NOT deduplicated to match Tailscale behavior.\nfunc mergeFilterRules(rules []tailcfg.FilterRule) []tailcfg.FilterRule {\n\tif len(rules) <= 1 {\n\t\treturn rules\n\t}\n\n\tkeyToIdx := make(map[string]int)\n\tresult := make([]tailcfg.FilterRule, 0, len(rules))\n\n\tfor _, rule := range rules {\n\t\tkey := filterRuleKey(rule)\n\n\t\tif idx, exists := keyToIdx[key]; exists {\n\t\t\t// Merge: append DstPorts to existing rule\n\t\t\tresult[idx].DstPorts = append(result[idx].DstPorts, rule.DstPorts...)\n\t\t} else {\n\t\t\t// New unique combination\n\t\t\tkeyToIdx[key] = len(result)\n\t\t\tresult = append(result, tailcfg.FilterRule{\n\t\t\t\tSrcIPs:   rule.SrcIPs,\n\t\t\t\tDstPorts: slices.Clone(rule.DstPorts),\n\t\t\t\tIPProto:  rule.IPProto,\n\t\t\t})\n\t\t}\n\t}\n\n\treturn result\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/filter_test.go",
    "content": "package v2\n\nimport (\n\t\"encoding/json\"\n\t\"net/netip\"\n\t\"slices\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"go4.org/netipx\"\n\t\"gorm.io/gorm\"\n\t\"tailscale.com/tailcfg\"\n)\n\n// aliasWithPorts creates an AliasWithPorts structure from an alias and ports.\nfunc aliasWithPorts(alias Alias, ports ...tailcfg.PortRange) AliasWithPorts {\n\treturn AliasWithPorts{\n\t\tAlias: alias,\n\t\tPorts: ports,\n\t}\n}\n\nfunc TestParsing(t *testing.T) {\n\tusers := types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"testuser\"},\n\t}\n\ttests := []struct {\n\t\tname    string\n\t\tformat  string\n\t\tacl     string\n\t\twant    []tailcfg.FilterRule\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname:   \"invalid-hujson\",\n\t\t\tformat: \"hujson\",\n\t\t\tacl: `\n{\n\t\t`,\n\t\t\twant:    []tailcfg.FilterRule{},\n\t\t\twantErr: true,\n\t\t},\n\t\t// The new parser will ignore all that is irrelevant\n\t\t// \t\t{\n\t\t// \t\t\tname:   \"valid-hujson-invalid-content\",\n\t\t// \t\t\tformat: \"hujson\",\n\t\t// \t\t\tacl: `\n\t\t// {\n\t\t//   \"valid_json\": true,\n\t\t//   \"but_a_policy_though\": false\n\t\t// }\n\t\t// \t\t\t\t`,\n\t\t// \t\t\twant:    []tailcfg.FilterRule{},\n\t\t// \t\t\twantErr: true,\n\t\t// \t\t},\n\t\t// \t\t{\n\t\t// \t\t\tname:   \"invalid-cidr\",\n\t\t// \t\t\tformat: \"hujson\",\n\t\t// \t\t\tacl: `\n\t\t// {\"example-host-1\": \"100.100.100.100/42\"}\n\t\t// \t\t\t\t`,\n\t\t// \t\t\twant:    []tailcfg.FilterRule{},\n\t\t// \t\t\twantErr: true,\n\t\t// \t\t},\n\t\t{\n\t\t\tname:   \"basic-rule\",\n\t\t\tformat: \"hujson\",\n\t\t\tacl: `\n{\n\t\"hosts\": {\n\t\t\"host-1\": \"100.100.100.100\",\n\t\t\"subnet-1\": \"100.100.101.100/24\",\n\t},\n\n\t\"acls\": [\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"src\": [\n\t\t\t\t\"subnet-1\",\n\t\t\t\t\"192.168.1.0/24\"\n\t\t\t],\n\t\t\t\"dst\": [\n\t\t\t\t\"*:22,3389\",\n\t\t\t\t\"host-1:*\",\n\t\t\t],\n\t\t},\n\t],\n}\n\t\t`,\n\t\t\twant: []tailcfg.FilterRule{\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.100.101.0/24\", \"192.168.1.0/24\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRange{First: 3389, Last: 3389}},\n\t\t\t\t\t\t{IP: \"100.100.100.100/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname:   \"parse-protocol\",\n\t\t\tformat: \"hujson\",\n\t\t\tacl: `\n{\n\t\"hosts\": {\n\t\t\"host-1\": \"100.100.100.100\",\n\t\t\"subnet-1\": \"100.100.101.100/24\",\n\t},\n\n\t\"acls\": [\n\t\t{\n\t\t\t\"Action\": \"accept\",\n\t\t\t\"src\": [\n\t\t\t\t\"*\",\n\t\t\t],\n\t\t\t\"proto\": \"tcp\",\n\t\t\t\"dst\": [\n\t\t\t\t\"host-1:*\",\n\t\t\t],\n\t\t},\n\t\t{\n\t\t\t\"Action\": \"accept\",\n\t\t\t\"src\": [\n\t\t\t\t\"*\",\n\t\t\t],\n\t\t\t\"proto\": \"udp\",\n\t\t\t\"dst\": [\n\t\t\t\t\"host-1:53\",\n\t\t\t],\n\t\t},\n\t\t{\n\t\t\t\"Action\": \"accept\",\n\t\t\t\"src\": [\n\t\t\t\t\"*\",\n\t\t\t],\n\t\t\t\"proto\": \"icmp\",\n\t\t\t\"dst\": [\n\t\t\t\t\"host-1:*\",\n\t\t\t],\n\t\t},\n\t],\n}`,\n\t\t\twant: []tailcfg.FilterRule{\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.0/10\", \"fd7a:115c:a1e0::/48\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.100.100.100/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{ProtocolTCP},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.0/10\", \"fd7a:115c:a1e0::/48\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.100.100.100/32\", Ports: tailcfg.PortRange{First: 53, Last: 53}},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{ProtocolUDP},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.0/10\", \"fd7a:115c:a1e0::/48\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.100.100.100/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t},\n\t\t\t\t\t// proto:icmp only includes ICMP (1), not ICMPv6 (58)\n\t\t\t\t\tIPProto: []int{ProtocolICMP},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname:   \"port-wildcard\",\n\t\t\tformat: \"hujson\",\n\t\t\tacl: `\n{\n\t\"hosts\": {\n\t\t\"host-1\": \"100.100.100.100\",\n\t\t\"subnet-1\": \"100.100.101.100/24\",\n\t},\n\n\t\"acls\": [\n\t\t{\n\t\t\t\"Action\": \"accept\",\n\t\t\t\"src\": [\n\t\t\t\t\"*\",\n\t\t\t],\n\t\t\t\"dst\": [\n\t\t\t\t\"host-1:*\",\n\t\t\t],\n\t\t},\n\t],\n}\n`,\n\t\t\twant: []tailcfg.FilterRule{\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.0/10\", \"fd7a:115c:a1e0::/48\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.100.100.100/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname:   \"port-range\",\n\t\t\tformat: \"hujson\",\n\t\t\tacl: `\n{\n\t\"hosts\": {\n\t\t\"host-1\": \"100.100.100.100\",\n\t\t\"subnet-1\": \"100.100.101.100/24\",\n\t},\n\n\t\"acls\": [\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"src\": [\n\t\t\t\t\"subnet-1\",\n\t\t\t],\n\t\t\t\"dst\": [\n\t\t\t\t\"host-1:5400-5500\",\n\t\t\t],\n\t\t},\n\t],\n}\n`,\n\t\t\twant: []tailcfg.FilterRule{\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.100.101.0/24\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP:    \"100.100.100.100/32\",\n\t\t\t\t\t\t\tPorts: tailcfg.PortRange{First: 5400, Last: 5500},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname:   \"port-group\",\n\t\t\tformat: \"hujson\",\n\t\t\tacl: `\n{\n\t\"groups\": {\n\t\t\"group:example\": [\n\t\t\t\"testuser@\",\n\t\t],\n\t},\n\n\t\"hosts\": {\n\t\t\"host-1\": \"100.100.100.100\",\n\t\t\"subnet-1\": \"100.100.101.100/24\",\n\t},\n\n\t\"acls\": [\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"src\": [\n\t\t\t\t\"group:example\",\n\t\t\t],\n\t\t\t\"dst\": [\n\t\t\t\t\"host-1:*\",\n\t\t\t],\n\t\t},\n\t],\n}\n`,\n\t\t\twant: []tailcfg.FilterRule{\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"200.200.200.200/32\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.100.100.100/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname:   \"port-user\",\n\t\t\tformat: \"hujson\",\n\t\t\tacl: `\n{\n\t\"hosts\": {\n\t\t\"host-1\": \"100.100.100.100\",\n\t\t\"subnet-1\": \"100.100.101.100/24\",\n\t},\n\n\t\"acls\": [\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"src\": [\n\t\t\t\t\"testuser@\",\n\t\t\t],\n\t\t\t\"dst\": [\n\t\t\t\t\"host-1:*\",\n\t\t\t],\n\t\t},\n\t],\n}\n`,\n\t\t\twant: []tailcfg.FilterRule{\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"200.200.200.200/32\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.100.100.100/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname:   \"ipv6\",\n\t\t\tformat: \"hujson\",\n\t\t\tacl: `\n{\n\t\"hosts\": {\n\t\t\"host-1\": \"100.100.100.100/32\",\n\t\t\"subnet-1\": \"100.100.101.100/24\",\n\t},\n\n\t\"acls\": [\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"src\": [\n\t\t\t\t\"*\",\n\t\t\t],\n\t\t\t\"dst\": [\n\t\t\t\t\"host-1:*\",\n\t\t\t],\n\t\t},\n\t],\n}\n`,\n\t\t\twant: []tailcfg.FilterRule{\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.0/10\", \"fd7a:115c:a1e0::/48\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.100.100.100/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tpol, err := unmarshalPolicy([]byte(tt.acl))\n\t\t\tif tt.wantErr && err == nil {\n\t\t\t\tt.Errorf(\"parsing() error = %v, wantErr %v\", err, tt.wantErr)\n\n\t\t\t\treturn\n\t\t\t} else if !tt.wantErr && err != nil {\n\t\t\t\tt.Errorf(\"parsing() error = %v, wantErr %v\", err, tt.wantErr)\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trules, err := pol.compileFilterRules(\n\t\t\t\tusers,\n\t\t\t\ttypes.Nodes{\n\t\t\t\t\t&types.Node{\n\t\t\t\t\t\tIPv4: ap(\"100.100.100.100\"),\n\t\t\t\t\t},\n\t\t\t\t\t&types.Node{\n\t\t\t\t\t\tIPv4:     ap(\"200.200.200.200\"),\n\t\t\t\t\t\tUser:     &users[0],\n\t\t\t\t\t\tHostinfo: &tailcfg.Hostinfo{},\n\t\t\t\t\t},\n\t\t\t\t}.ViewSlice())\n\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"parsing() error = %v, wantErr %v\", err, tt.wantErr)\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif diff := cmp.Diff(tt.want, rules); diff != \"\" {\n\t\t\t\tt.Errorf(\"parsing() unexpected result (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestCompileSSHPolicy_UserMapping(t *testing.T) {\n\tusers := types.Users{\n\t\t{Name: \"user1\", Model: gorm.Model{ID: 1}},\n\t\t{Name: \"user2\", Model: gorm.Model{ID: 2}},\n\t}\n\n\t// Create test nodes - use tagged nodes as SSH destinations\n\t// and untagged nodes as SSH sources (since group->username destinations\n\t// are not allowed per Tailscale security model, but groups can SSH to tags)\n\tnodeTaggedServer := types.Node{\n\t\tHostname: \"tagged-server\",\n\t\tIPv4:     createAddr(\"100.64.0.1\"),\n\t\tUserID:   new(users[0].ID),\n\t\tUser:     new(users[0]),\n\t\tTags:     []string{\"tag:server\"},\n\t}\n\tnodeTaggedDB := types.Node{\n\t\tHostname: \"tagged-db\",\n\t\tIPv4:     createAddr(\"100.64.0.2\"),\n\t\tUserID:   new(users[1].ID),\n\t\tUser:     new(users[1]),\n\t\tTags:     []string{\"tag:database\"},\n\t}\n\t// Add untagged node for user2 - this will be the SSH source\n\t// (group:admins contains user2, so user2's untagged node provides the source IPs)\n\tnodeUser2Untagged := types.Node{\n\t\tHostname: \"user2-device\",\n\t\tIPv4:     createAddr(\"100.64.0.3\"),\n\t\tUserID:   new(users[1].ID),\n\t\tUser:     new(users[1]),\n\t}\n\n\tnodes := types.Nodes{&nodeTaggedServer, &nodeTaggedDB, &nodeUser2Untagged}\n\n\tacceptAction := &tailcfg.SSHAction{\n\t\tAccept:                    true,\n\t\tAllowAgentForwarding:      true,\n\t\tAllowLocalPortForwarding:  true,\n\t\tAllowRemotePortForwarding: true,\n\t}\n\tuser2Principal := []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.3\"}}\n\n\ttests := []struct {\n\t\tname       string\n\t\ttargetNode types.Node\n\t\tpolicy     *Policy\n\t\twant       *tailcfg.SSHPolicy\n\t}{\n\t\t{\n\t\t\tname:       \"specific user mapping\",\n\t\t\ttargetNode: nodeTaggedServer,\n\t\t\tpolicy: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:server\"): Owners{up(\"user1@\")},\n\t\t\t\t},\n\t\t\t\tGroups: Groups{\n\t\t\t\t\tGroup(\"group:admins\"): []Username{Username(\"user2@\")},\n\t\t\t\t},\n\t\t\t\tSSHs: []SSH{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:       \"accept\",\n\t\t\t\t\t\tSources:      SSHSrcAliases{gp(\"group:admins\")},\n\t\t\t\t\t\tDestinations: SSHDstAliases{tp(\"tag:server\")},\n\t\t\t\t\t\tUsers:        []SSHUser{\"ssh-it-user\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{\n\t\t\t\t{\n\t\t\t\t\tPrincipals: user2Principal,\n\t\t\t\t\tSSHUsers:   map[string]string{\"root\": \"\", \"ssh-it-user\": \"ssh-it-user\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t\t{\n\t\t\tname:       \"multiple specific users\",\n\t\t\ttargetNode: nodeTaggedServer,\n\t\t\tpolicy: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:server\"): Owners{up(\"user1@\")},\n\t\t\t\t},\n\t\t\t\tGroups: Groups{\n\t\t\t\t\tGroup(\"group:admins\"): []Username{Username(\"user2@\")},\n\t\t\t\t},\n\t\t\t\tSSHs: []SSH{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:       \"accept\",\n\t\t\t\t\t\tSources:      SSHSrcAliases{gp(\"group:admins\")},\n\t\t\t\t\t\tDestinations: SSHDstAliases{tp(\"tag:server\")},\n\t\t\t\t\t\tUsers:        []SSHUser{\"ubuntu\", \"admin\", \"deploy\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{\n\t\t\t\t{\n\t\t\t\t\tPrincipals: user2Principal,\n\t\t\t\t\tSSHUsers:   map[string]string{\"root\": \"\", \"ubuntu\": \"ubuntu\", \"admin\": \"admin\", \"deploy\": \"deploy\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t\t{\n\t\t\tname:       \"autogroup:nonroot only\",\n\t\t\ttargetNode: nodeTaggedServer,\n\t\t\tpolicy: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:server\"): Owners{up(\"user1@\")},\n\t\t\t\t},\n\t\t\t\tGroups: Groups{\n\t\t\t\t\tGroup(\"group:admins\"): []Username{Username(\"user2@\")},\n\t\t\t\t},\n\t\t\t\tSSHs: []SSH{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:       \"accept\",\n\t\t\t\t\t\tSources:      SSHSrcAliases{gp(\"group:admins\")},\n\t\t\t\t\t\tDestinations: SSHDstAliases{tp(\"tag:server\")},\n\t\t\t\t\t\tUsers:        []SSHUser{SSHUser(AutoGroupNonRoot)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{\n\t\t\t\t{\n\t\t\t\t\tPrincipals: user2Principal,\n\t\t\t\t\tSSHUsers:   map[string]string{\"*\": \"=\", \"root\": \"\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t\t{\n\t\t\tname:       \"root only\",\n\t\t\ttargetNode: nodeTaggedServer,\n\t\t\tpolicy: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:server\"): Owners{up(\"user1@\")},\n\t\t\t\t},\n\t\t\t\tGroups: Groups{\n\t\t\t\t\tGroup(\"group:admins\"): []Username{Username(\"user2@\")},\n\t\t\t\t},\n\t\t\t\tSSHs: []SSH{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:       \"accept\",\n\t\t\t\t\t\tSources:      SSHSrcAliases{gp(\"group:admins\")},\n\t\t\t\t\t\tDestinations: SSHDstAliases{tp(\"tag:server\")},\n\t\t\t\t\t\tUsers:        []SSHUser{\"root\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{\n\t\t\t\t{\n\t\t\t\t\tPrincipals: user2Principal,\n\t\t\t\t\tSSHUsers:   map[string]string{\"root\": \"root\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t\t{\n\t\t\tname:       \"autogroup:nonroot plus root\",\n\t\t\ttargetNode: nodeTaggedServer,\n\t\t\tpolicy: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:server\"): Owners{up(\"user1@\")},\n\t\t\t\t},\n\t\t\t\tGroups: Groups{\n\t\t\t\t\tGroup(\"group:admins\"): []Username{Username(\"user2@\")},\n\t\t\t\t},\n\t\t\t\tSSHs: []SSH{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:       \"accept\",\n\t\t\t\t\t\tSources:      SSHSrcAliases{gp(\"group:admins\")},\n\t\t\t\t\t\tDestinations: SSHDstAliases{tp(\"tag:server\")},\n\t\t\t\t\t\tUsers:        []SSHUser{SSHUser(AutoGroupNonRoot), \"root\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{\n\t\t\t\t{\n\t\t\t\t\tPrincipals: user2Principal,\n\t\t\t\t\tSSHUsers:   map[string]string{\"*\": \"=\", \"root\": \"root\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t\t{\n\t\t\tname:       \"mixed specific users and autogroups\",\n\t\t\ttargetNode: nodeTaggedServer,\n\t\t\tpolicy: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:server\"): Owners{up(\"user1@\")},\n\t\t\t\t},\n\t\t\t\tGroups: Groups{\n\t\t\t\t\tGroup(\"group:admins\"): []Username{Username(\"user2@\")},\n\t\t\t\t},\n\t\t\t\tSSHs: []SSH{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:       \"accept\",\n\t\t\t\t\t\tSources:      SSHSrcAliases{gp(\"group:admins\")},\n\t\t\t\t\t\tDestinations: SSHDstAliases{tp(\"tag:server\")},\n\t\t\t\t\t\tUsers:        []SSHUser{SSHUser(AutoGroupNonRoot), \"root\", \"ubuntu\", \"admin\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{\n\t\t\t\t{\n\t\t\t\t\tPrincipals: user2Principal,\n\t\t\t\t\tSSHUsers:   map[string]string{\"*\": \"=\", \"root\": \"root\", \"ubuntu\": \"ubuntu\", \"admin\": \"admin\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t\t{\n\t\t\tname:       \"no matching destination\",\n\t\t\ttargetNode: nodeTaggedDB, // Target tag:database, but policy only allows tag:server\n\t\t\tpolicy: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:server\"):   Owners{up(\"user1@\")},\n\t\t\t\t\tTag(\"tag:database\"): Owners{up(\"user1@\")},\n\t\t\t\t},\n\t\t\t\tGroups: Groups{\n\t\t\t\t\tGroup(\"group:admins\"): []Username{Username(\"user2@\")},\n\t\t\t\t},\n\t\t\t\tSSHs: []SSH{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:       \"accept\",\n\t\t\t\t\t\tSources:      SSHSrcAliases{gp(\"group:admins\")},\n\t\t\t\t\t\tDestinations: SSHDstAliases{tp(\"tag:server\")}, // Only tag:server, not tag:database\n\t\t\t\t\t\tUsers:        []SSHUser{\"ssh-it-user\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: &tailcfg.SSHPolicy{},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\trequire.NoError(t, tt.policy.validate())\n\n\t\t\tgot, err := tt.policy.compileSSHPolicy(\"unused-server-url\", users, tt.targetNode.View(), nodes.ViewSlice())\n\t\t\trequire.NoError(t, err)\n\n\t\t\tif diff := cmp.Diff(tt.want, got); diff != \"\" {\n\t\t\t\tt.Errorf(\"compileSSHPolicy() mismatch (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestCompileSSHPolicy_LocalpartMapping(t *testing.T) {\n\tusers := types.Users{\n\t\t{Name: \"alice\", Email: \"alice@example.com\", Model: gorm.Model{ID: 1}},\n\t\t{Name: \"bob\", Email: \"bob@example.com\", Model: gorm.Model{ID: 2}},\n\t\t{Name: \"charlie\", Email: \"charlie@other.com\", Model: gorm.Model{ID: 3}},\n\t\t{Name: \"dave\", Model: gorm.Model{ID: 4}}, // CLI user, no email\n\t}\n\n\tnodeTaggedServer := types.Node{\n\t\tHostname: \"tagged-server\",\n\t\tIPv4:     createAddr(\"100.64.0.1\"),\n\t\tUserID:   new(users[0].ID),\n\t\tUser:     new(users[0]),\n\t\tTags:     []string{\"tag:server\"},\n\t}\n\tnodeAlice := types.Node{\n\t\tHostname: \"alice-device\",\n\t\tIPv4:     createAddr(\"100.64.0.2\"),\n\t\tUserID:   new(users[0].ID),\n\t\tUser:     new(users[0]),\n\t}\n\tnodeBob := types.Node{\n\t\tHostname: \"bob-device\",\n\t\tIPv4:     createAddr(\"100.64.0.3\"),\n\t\tUserID:   new(users[1].ID),\n\t\tUser:     new(users[1]),\n\t}\n\tnodeCharlie := types.Node{\n\t\tHostname: \"charlie-device\",\n\t\tIPv4:     createAddr(\"100.64.0.4\"),\n\t\tUserID:   new(users[2].ID),\n\t\tUser:     new(users[2]),\n\t}\n\tnodeDave := types.Node{\n\t\tHostname: \"dave-device\",\n\t\tIPv4:     createAddr(\"100.64.0.5\"),\n\t\tUserID:   new(users[3].ID),\n\t\tUser:     new(users[3]),\n\t}\n\n\tnodes := types.Nodes{&nodeTaggedServer, &nodeAlice, &nodeBob, &nodeCharlie, &nodeDave}\n\n\tacceptAction := &tailcfg.SSHAction{\n\t\tAccept:                    true,\n\t\tAllowAgentForwarding:      true,\n\t\tAllowLocalPortForwarding:  true,\n\t\tAllowRemotePortForwarding: true,\n\t}\n\n\ttests := []struct {\n\t\tname       string\n\t\tusers      types.Users // nil → use default users\n\t\tnodes      types.Nodes // nil → use default nodes\n\t\ttargetNode types.Node\n\t\tpolicy     *Policy\n\t\twant       *tailcfg.SSHPolicy\n\t}{\n\t\t{\n\t\t\tname:       \"localpart only\",\n\t\t\ttargetNode: nodeTaggedServer,\n\t\t\tpolicy: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:server\"): Owners{up(\"alice@example.com\")},\n\t\t\t\t},\n\t\t\t\tSSHs: []SSH{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:       \"accept\",\n\t\t\t\t\t\tSources:      SSHSrcAliases{agp(\"autogroup:member\")},\n\t\t\t\t\t\tDestinations: SSHDstAliases{tp(\"tag:server\")},\n\t\t\t\t\t\tUsers:        []SSHUser{SSHUser(\"localpart:*@example.com\")},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Per-user common+localpart rules interleaved, then non-matching users.\n\t\t\twant: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.2\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"root\": \"\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.2\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"alice\": \"alice\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.3\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"root\": \"\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.3\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"bob\": \"bob\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.4\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"root\": \"\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.5\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"root\": \"\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t\t{\n\t\t\tname:       \"localpart with root\",\n\t\t\ttargetNode: nodeTaggedServer,\n\t\t\tpolicy: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:server\"): Owners{up(\"alice@example.com\")},\n\t\t\t\t},\n\t\t\t\tSSHs: []SSH{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:       \"accept\",\n\t\t\t\t\t\tSources:      SSHSrcAliases{agp(\"autogroup:member\")},\n\t\t\t\t\t\tDestinations: SSHDstAliases{tp(\"tag:server\")},\n\t\t\t\t\t\tUsers:        []SSHUser{SSHUser(\"localpart:*@example.com\"), \"root\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Per-user common+localpart rules interleaved, then non-matching users.\n\t\t\twant: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.2\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"root\": \"root\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.2\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"alice\": \"alice\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.3\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"root\": \"root\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.3\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"bob\": \"bob\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.4\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"root\": \"root\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.5\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"root\": \"root\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t\t{\n\t\t\tname:       \"localpart no matching users in domain\",\n\t\t\ttargetNode: nodeTaggedServer,\n\t\t\tpolicy: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:server\"): Owners{up(\"alice@example.com\")},\n\t\t\t\t},\n\t\t\t\tSSHs: []SSH{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:       \"accept\",\n\t\t\t\t\t\tSources:      SSHSrcAliases{agp(\"autogroup:member\")},\n\t\t\t\t\t\tDestinations: SSHDstAliases{tp(\"tag:server\")},\n\t\t\t\t\t\tUsers:        []SSHUser{SSHUser(\"localpart:*@nonexistent.com\")},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// No localpart matches, but per-user common rules still emitted (root deny)\n\t\t\twant: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.2\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"root\": \"\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.3\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"root\": \"\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.4\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"root\": \"\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.5\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"root\": \"\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t\t{\n\t\t\tname: \"localpart with special chars in email\",\n\t\t\tusers: types.Users{\n\t\t\t\t{Name: \"dave+sshuser\", Email: \"dave+sshuser@example.com\", Model: gorm.Model{ID: 10}},\n\t\t\t},\n\t\t\tnodes: func() types.Nodes {\n\t\t\t\tspecialUser := types.User{Name: \"dave+sshuser\", Email: \"dave+sshuser@example.com\", Model: gorm.Model{ID: 10}}\n\t\t\t\tn := types.Node{\n\t\t\t\t\tHostname: \"special-device\",\n\t\t\t\t\tIPv4:     createAddr(\"100.64.0.10\"),\n\t\t\t\t\tUserID:   new(specialUser.ID),\n\t\t\t\t\tUser:     &specialUser,\n\t\t\t\t}\n\n\t\t\t\treturn types.Nodes{&nodeTaggedServer, &n}\n\t\t\t}(),\n\t\t\ttargetNode: nodeTaggedServer,\n\t\t\tpolicy: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:server\"): Owners{up(\"dave+sshuser@example.com\")},\n\t\t\t\t},\n\t\t\t\tSSHs: []SSH{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:       \"accept\",\n\t\t\t\t\t\tSources:      SSHSrcAliases{agp(\"autogroup:member\")},\n\t\t\t\t\t\tDestinations: SSHDstAliases{tp(\"tag:server\")},\n\t\t\t\t\t\tUsers:        []SSHUser{SSHUser(\"localpart:*@example.com\")},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Per-user common rule (root deny), then separate localpart rule.\n\t\t\twant: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.10\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"root\": \"\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.10\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"dave+sshuser\": \"dave+sshuser\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t\t{\n\t\t\tname: \"localpart excludes CLI users without email\",\n\t\t\tusers: types.Users{\n\t\t\t\t{Name: \"dave\", Model: gorm.Model{ID: 4}},\n\t\t\t},\n\t\t\tnodes: func() types.Nodes {\n\t\t\t\tcliUser := types.User{Name: \"dave\", Model: gorm.Model{ID: 4}}\n\t\t\t\tn := types.Node{\n\t\t\t\t\tHostname: \"dave-cli-device\",\n\t\t\t\t\tIPv4:     createAddr(\"100.64.0.5\"),\n\t\t\t\t\tUserID:   new(cliUser.ID),\n\t\t\t\t\tUser:     &cliUser,\n\t\t\t\t}\n\n\t\t\t\treturn types.Nodes{&nodeTaggedServer, &n}\n\t\t\t}(),\n\t\t\ttargetNode: nodeTaggedServer,\n\t\t\tpolicy: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:server\"): Owners{up(\"dave@\")},\n\t\t\t\t},\n\t\t\t\tSSHs: []SSH{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:       \"accept\",\n\t\t\t\t\t\tSources:      SSHSrcAliases{agp(\"autogroup:member\")},\n\t\t\t\t\t\tDestinations: SSHDstAliases{tp(\"tag:server\")},\n\t\t\t\t\t\tUsers:        []SSHUser{SSHUser(\"localpart:*@example.com\")},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// No localpart matches (CLI user, no email), but implicit root deny emits common rule\n\t\t\twant: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.5\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"root\": \"\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t\t{\n\t\t\tname:       \"localpart with multiple domains\",\n\t\t\ttargetNode: nodeTaggedServer,\n\t\t\tpolicy: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:server\"): Owners{up(\"alice@example.com\")},\n\t\t\t\t},\n\t\t\t\tSSHs: []SSH{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:       \"accept\",\n\t\t\t\t\t\tSources:      SSHSrcAliases{agp(\"autogroup:member\")},\n\t\t\t\t\t\tDestinations: SSHDstAliases{tp(\"tag:server\")},\n\t\t\t\t\t\tUsers: []SSHUser{\n\t\t\t\t\t\t\tSSHUser(\"localpart:*@example.com\"),\n\t\t\t\t\t\t\tSSHUser(\"localpart:*@other.com\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Per-user common+localpart rules interleaved:\n\t\t\t// alice/bob match *@example.com, charlie matches *@other.com.\n\t\t\twant: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.2\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"root\": \"\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.2\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"alice\": \"alice\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.3\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"root\": \"\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.3\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"bob\": \"bob\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.4\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"root\": \"\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.4\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"charlie\": \"charlie\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.5\"}},\n\t\t\t\t\tSSHUsers:   map[string]string{\"root\": \"\"},\n\t\t\t\t\tAction:     acceptAction,\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\ttestUsers := users\n\t\t\tif tt.users != nil {\n\t\t\t\ttestUsers = tt.users\n\t\t\t}\n\n\t\t\ttestNodes := nodes\n\t\t\tif tt.nodes != nil {\n\t\t\t\ttestNodes = tt.nodes\n\t\t\t}\n\n\t\t\trequire.NoError(t, tt.policy.validate())\n\n\t\t\tgot, err := tt.policy.compileSSHPolicy(\n\t\t\t\t\"unused-server-url\", testUsers, tt.targetNode.View(), testNodes.ViewSlice(),\n\t\t\t)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tif diff := cmp.Diff(tt.want, got); diff != \"\" {\n\t\t\t\tt.Errorf(\"compileSSHPolicy() unexpected result (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestCompileSSHPolicy_CheckAction(t *testing.T) {\n\tusers := types.Users{\n\t\t{Name: \"user1\", Model: gorm.Model{ID: 1}},\n\t\t{Name: \"user2\", Model: gorm.Model{ID: 2}},\n\t}\n\n\t// Use tagged nodes for SSH user mapping tests\n\tnodeTaggedServer := types.Node{\n\t\tHostname: \"tagged-server\",\n\t\tIPv4:     createAddr(\"100.64.0.1\"),\n\t\tUserID:   new(users[0].ID),\n\t\tUser:     new(users[0]),\n\t\tTags:     []string{\"tag:server\"},\n\t}\n\tnodeUser2 := types.Node{\n\t\tHostname: \"user2-device\",\n\t\tIPv4:     createAddr(\"100.64.0.2\"),\n\t\tUserID:   new(users[1].ID),\n\t\tUser:     new(users[1]),\n\t}\n\n\tnodes := types.Nodes{&nodeTaggedServer, &nodeUser2}\n\n\tpolicy := &Policy{\n\t\tTagOwners: TagOwners{\n\t\t\tTag(\"tag:server\"): Owners{up(\"user1@\")},\n\t\t},\n\t\tGroups: Groups{\n\t\t\tGroup(\"group:admins\"): []Username{Username(\"user2@\")},\n\t\t},\n\t\tSSHs: []SSH{\n\t\t\t{\n\t\t\t\tAction:       \"check\",\n\t\t\t\tCheckPeriod:  &SSHCheckPeriod{Duration: 24 * time.Hour},\n\t\t\t\tSources:      SSHSrcAliases{gp(\"group:admins\")},\n\t\t\t\tDestinations: SSHDstAliases{tp(\"tag:server\")},\n\t\t\t\tUsers:        []SSHUser{\"ssh-it-user\"},\n\t\t\t},\n\t\t},\n\t}\n\n\trequire.NoError(t, policy.validate())\n\n\tsshPolicy, err := policy.compileSSHPolicy(\"unused-server-url\", users, nodeTaggedServer.View(), nodes.ViewSlice())\n\trequire.NoError(t, err)\n\trequire.NotNil(t, sshPolicy)\n\trequire.Len(t, sshPolicy.Rules, 1)\n\n\trule := sshPolicy.Rules[0]\n\n\t// Verify SSH users are correctly mapped\n\texpectedUsers := map[string]string{\n\t\t\"ssh-it-user\": \"ssh-it-user\",\n\t\t\"root\":        \"\",\n\t}\n\tassert.Equal(t, expectedUsers, rule.SSHUsers)\n\n\t// Verify check action: Accept is false, HoldAndDelegate is set\n\tassert.False(t, rule.Action.Accept)\n\tassert.False(t, rule.Action.Reject)\n\tassert.NotEmpty(t, rule.Action.HoldAndDelegate)\n\tassert.Contains(t, rule.Action.HoldAndDelegate, \"/machine/ssh/action/\")\n\tassert.Equal(t, 24*time.Hour, rule.Action.SessionDuration)\n\n\t// Verify check params are NOT encoded in the URL (looked up server-side).\n\tassert.NotContains(t, rule.Action.HoldAndDelegate, \"check_explicit\")\n\tassert.NotContains(t, rule.Action.HoldAndDelegate, \"check_period\")\n}\n\n// TestCompileSSHPolicy_CheckBeforeAcceptOrdering verifies that check\n// (HoldAndDelegate) rules are sorted before accept rules, even when\n// the accept rule appears first in the policy definition.\nfunc TestCompileSSHPolicy_CheckBeforeAcceptOrdering(t *testing.T) {\n\tusers := types.Users{\n\t\t{Name: \"user1\", Model: gorm.Model{ID: 1}},\n\t\t{Name: \"user2\", Model: gorm.Model{ID: 2}},\n\t}\n\n\tnodeTaggedServer := types.Node{\n\t\tHostname: \"tagged-server\",\n\t\tIPv4:     createAddr(\"100.64.0.1\"),\n\t\tUserID:   new(users[0].ID),\n\t\tUser:     new(users[0]),\n\t\tTags:     []string{\"tag:server\"},\n\t}\n\tnodeUser2 := types.Node{\n\t\tHostname: \"user2-device\",\n\t\tIPv4:     createAddr(\"100.64.0.2\"),\n\t\tUserID:   new(users[1].ID),\n\t\tUser:     new(users[1]),\n\t}\n\n\tnodes := types.Nodes{&nodeTaggedServer, &nodeUser2}\n\n\t// Accept rule appears BEFORE check rule in policy definition.\n\tpolicy := &Policy{\n\t\tTagOwners: TagOwners{\n\t\t\tTag(\"tag:server\"): Owners{up(\"user1@\")},\n\t\t},\n\t\tGroups: Groups{\n\t\t\tGroup(\"group:admins\"): []Username{Username(\"user2@\")},\n\t\t},\n\t\tSSHs: []SSH{\n\t\t\t{\n\t\t\t\tAction:       \"accept\",\n\t\t\t\tSources:      SSHSrcAliases{gp(\"group:admins\")},\n\t\t\t\tDestinations: SSHDstAliases{tp(\"tag:server\")},\n\t\t\t\tUsers:        []SSHUser{\"root\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tAction:       \"check\",\n\t\t\t\tCheckPeriod:  &SSHCheckPeriod{Duration: 24 * time.Hour},\n\t\t\t\tSources:      SSHSrcAliases{gp(\"group:admins\")},\n\t\t\t\tDestinations: SSHDstAliases{tp(\"tag:server\")},\n\t\t\t\tUsers:        []SSHUser{\"ssh-it-user\"},\n\t\t\t},\n\t\t},\n\t}\n\n\terr := policy.validate()\n\trequire.NoError(t, err)\n\n\tsshPolicy, err := policy.compileSSHPolicy(\n\t\t\"unused-server-url\",\n\t\tusers,\n\t\tnodeTaggedServer.View(),\n\t\tnodes.ViewSlice(),\n\t)\n\trequire.NoError(t, err)\n\trequire.NotNil(t, sshPolicy)\n\trequire.Len(t, sshPolicy.Rules, 2)\n\n\t// First rule must be the check rule (HoldAndDelegate set).\n\tassert.NotEmpty(t, sshPolicy.Rules[0].Action.HoldAndDelegate,\n\t\t\"first rule should be check (HoldAndDelegate)\")\n\tassert.False(t, sshPolicy.Rules[0].Action.Accept,\n\t\t\"first rule should not be accept\")\n\n\t// Second rule must be the accept rule.\n\tassert.True(t, sshPolicy.Rules[1].Action.Accept,\n\t\t\"second rule should be accept\")\n\tassert.Empty(t, sshPolicy.Rules[1].Action.HoldAndDelegate,\n\t\t\"second rule should not have HoldAndDelegate\")\n}\n\n// TestSSHIntegrationReproduction reproduces the exact scenario from the integration test\n// TestSSHOneUserToAll that was failing with empty sshUsers.\nfunc TestSSHIntegrationReproduction(t *testing.T) {\n\t// Create users matching the integration test\n\tusers := types.Users{\n\t\t{Name: \"user1\", Model: gorm.Model{ID: 1}},\n\t\t{Name: \"user2\", Model: gorm.Model{ID: 2}},\n\t}\n\n\t// Create simple nodes for testing\n\tnode1 := &types.Node{\n\t\tHostname: \"user1-node\",\n\t\tIPv4:     createAddr(\"100.64.0.1\"),\n\t\tUserID:   new(users[0].ID),\n\t\tUser:     new(users[0]),\n\t}\n\n\tnode2 := &types.Node{\n\t\tHostname: \"user2-node\",\n\t\tIPv4:     createAddr(\"100.64.0.2\"),\n\t\tUserID:   new(users[1].ID),\n\t\tUser:     new(users[1]),\n\t}\n\n\tnodes := types.Nodes{node1, node2}\n\n\t// Create a simple policy that reproduces the issue\n\t// Updated to use autogroup:self instead of username destination (per Tailscale security model)\n\tpolicy := &Policy{\n\t\tGroups: Groups{\n\t\t\tGroup(\"group:integration-test\"): []Username{Username(\"user1@\"), Username(\"user2@\")},\n\t\t},\n\t\tSSHs: []SSH{\n\t\t\t{\n\t\t\t\tAction:       \"accept\",\n\t\t\t\tSources:      SSHSrcAliases{gp(\"group:integration-test\")},\n\t\t\t\tDestinations: SSHDstAliases{agp(\"autogroup:self\")}, // Users can SSH to their own devices\n\t\t\t\tUsers:        []SSHUser{SSHUser(\"ssh-it-user\")},    // This is the key - specific user\n\t\t\t},\n\t\t},\n\t}\n\n\trequire.NoError(t, policy.validate())\n\n\t// Test SSH policy compilation for node2 (owned by user2, who is in the group)\n\tgot, err := policy.compileSSHPolicy(\"unused-server-url\", users, node2.View(), nodes.ViewSlice())\n\trequire.NoError(t, err)\n\n\twant := &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{\n\t\t{\n\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.2\"}},\n\t\t\tSSHUsers:   map[string]string{\"root\": \"\", \"ssh-it-user\": \"ssh-it-user\"},\n\t\t\tAction: &tailcfg.SSHAction{\n\t\t\t\tAccept:                    true,\n\t\t\t\tAllowAgentForwarding:      true,\n\t\t\t\tAllowLocalPortForwarding:  true,\n\t\t\t\tAllowRemotePortForwarding: true,\n\t\t\t},\n\t\t},\n\t}}\n\n\tif diff := cmp.Diff(want, got); diff != \"\" {\n\t\tt.Errorf(\"compileSSHPolicy() mismatch (-want +got):\\n%s\", diff)\n\t}\n}\n\n// TestSSHJSONSerialization verifies that the SSH policy can be properly serialized\n// to JSON and that the sshUsers field is not empty.\nfunc TestSSHJSONSerialization(t *testing.T) {\n\tusers := types.Users{\n\t\t{Name: \"user1\", Model: gorm.Model{ID: 1}},\n\t}\n\n\tuid := uint(1)\n\tnode := &types.Node{\n\t\tHostname: \"test-node\",\n\t\tIPv4:     createAddr(\"100.64.0.1\"),\n\t\tUserID:   &uid,\n\t\tUser:     &users[0],\n\t}\n\n\tnodes := types.Nodes{node}\n\n\tpolicy := &Policy{\n\t\tSSHs: []SSH{\n\t\t\t{\n\t\t\t\tAction:       \"accept\",\n\t\t\t\tSources:      SSHSrcAliases{up(\"user1@\")},\n\t\t\t\tDestinations: SSHDstAliases{up(\"user1@\")},\n\t\t\t\tUsers:        []SSHUser{\"ssh-it-user\", \"ubuntu\", \"admin\"},\n\t\t\t},\n\t\t},\n\t}\n\n\trequire.NoError(t, policy.validate())\n\n\tgot, err := policy.compileSSHPolicy(\"unused-server-url\", users, node.View(), nodes.ViewSlice())\n\trequire.NoError(t, err)\n\n\twant := &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{\n\t\t{\n\t\t\tPrincipals: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.1\"}},\n\t\t\tSSHUsers:   map[string]string{\"root\": \"\", \"ssh-it-user\": \"ssh-it-user\", \"ubuntu\": \"ubuntu\", \"admin\": \"admin\"},\n\t\t\tAction: &tailcfg.SSHAction{\n\t\t\t\tAccept:                    true,\n\t\t\t\tAllowAgentForwarding:      true,\n\t\t\t\tAllowLocalPortForwarding:  true,\n\t\t\t\tAllowRemotePortForwarding: true,\n\t\t\t},\n\t\t},\n\t}}\n\n\tif diff := cmp.Diff(want, got); diff != \"\" {\n\t\tt.Errorf(\"compileSSHPolicy() mismatch (-want +got):\\n%s\", diff)\n\t}\n\n\t// Verify JSON round-trip preserves the full structure\n\tjsonData, err := json.MarshalIndent(got, \"\", \"  \")\n\trequire.NoError(t, err)\n\n\tvar parsed tailcfg.SSHPolicy\n\trequire.NoError(t, json.Unmarshal(jsonData, &parsed))\n\n\tif diff := cmp.Diff(want, &parsed); diff != \"\" {\n\t\tt.Errorf(\"JSON round-trip mismatch (-want +got):\\n%s\", diff)\n\t}\n}\n\nfunc TestCompileFilterRulesForNodeWithAutogroupSelf(t *testing.T) {\n\tusers := types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"user1\"},\n\t\t{Model: gorm.Model{ID: 2}, Name: \"user2\"},\n\t}\n\n\tnodes := types.Nodes{\n\t\t{\n\t\t\tUser: new(users[0]),\n\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t},\n\t\t{\n\t\t\tUser: new(users[0]),\n\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t},\n\t\t{\n\t\t\tUser: new(users[1]),\n\t\t\tIPv4: ap(\"100.64.0.3\"),\n\t\t},\n\t\t{\n\t\t\tUser: new(users[1]),\n\t\t\tIPv4: ap(\"100.64.0.4\"),\n\t\t},\n\t\t// Tagged device for user1\n\t\t{\n\t\t\tUser: &users[0],\n\t\t\tIPv4: ap(\"100.64.0.5\"),\n\t\t\tTags: []string{\"tag:test\"},\n\t\t},\n\t\t// Tagged device for user2\n\t\t{\n\t\t\tUser: &users[1],\n\t\t\tIPv4: ap(\"100.64.0.6\"),\n\t\t\tTags: []string{\"tag:test\"},\n\t\t},\n\t}\n\n\t// Test: Tailscale intended usage pattern (autogroup:member + autogroup:self)\n\tpolicy2 := &Policy{\n\t\tACLs: []ACL{\n\t\t\t{\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []Alias{agp(\"autogroup:member\")},\n\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\taliasWithPorts(agp(\"autogroup:self\"), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\terr := policy2.validate()\n\tif err != nil {\n\t\tt.Fatalf(\"policy validation failed: %v\", err)\n\t}\n\n\t// Test compilation for user1's first node\n\tnode1 := nodes[0].View()\n\n\trules, err := policy2.compileFilterRulesForNode(users, node1, nodes.ViewSlice())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tif len(rules) != 1 {\n\t\tt.Fatalf(\"expected 1 rule, got %d\", len(rules))\n\t}\n\n\t// Check that the rule includes:\n\t// - Sources: only user1's untagged devices (filtered by autogroup:self semantics)\n\t// - Destinations: only user1's untagged devices (autogroup:self)\n\trule := rules[0]\n\n\t// Sources should ONLY include user1's untagged devices (100.64.0.1, 100.64.0.2)\n\texpectedSourceIPs := []string{\"100.64.0.1\", \"100.64.0.2\"}\n\n\tfor _, expectedIP := range expectedSourceIPs {\n\t\tfound := false\n\n\t\taddr := netip.MustParseAddr(expectedIP)\n\n\t\tfor _, prefix := range rule.SrcIPs {\n\t\t\tpref := netip.MustParsePrefix(prefix)\n\t\t\tif pref.Contains(addr) {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !found {\n\t\t\tt.Errorf(\"expected source IP %s to be covered by generated prefixes %v\", expectedIP, rule.SrcIPs)\n\t\t}\n\t}\n\n\t// Verify that other users' devices and tagged devices are not included in sources\n\texcludedSourceIPs := []string{\"100.64.0.3\", \"100.64.0.4\", \"100.64.0.5\", \"100.64.0.6\"}\n\tfor _, excludedIP := range excludedSourceIPs {\n\t\taddr := netip.MustParseAddr(excludedIP)\n\n\t\tfor _, prefix := range rule.SrcIPs {\n\t\t\tpref := netip.MustParsePrefix(prefix)\n\t\t\tif pref.Contains(addr) {\n\t\t\t\tt.Errorf(\"SECURITY VIOLATION: source IP %s should not be included but found in prefix %s\", excludedIP, prefix)\n\t\t\t}\n\t\t}\n\t}\n\n\texpectedDestIPs := []string{\"100.64.0.1/32\", \"100.64.0.2/32\"}\n\n\tactualDestIPs := make([]string, 0, len(rule.DstPorts))\n\tfor _, dst := range rule.DstPorts {\n\t\tactualDestIPs = append(actualDestIPs, dst.IP)\n\t}\n\n\tfor _, expectedIP := range expectedDestIPs {\n\t\tfound := slices.Contains(actualDestIPs, expectedIP)\n\n\t\tif !found {\n\t\t\tt.Errorf(\"expected destination IP %s to be included, got: %v\", expectedIP, actualDestIPs)\n\t\t}\n\t}\n\n\t// Verify that other users' devices and tagged devices are not in destinations\n\texcludedDestIPs := []string{\"100.64.0.3/32\", \"100.64.0.4/32\", \"100.64.0.5/32\", \"100.64.0.6/32\"}\n\tfor _, excludedIP := range excludedDestIPs {\n\t\tfor _, actualIP := range actualDestIPs {\n\t\t\tif actualIP == excludedIP {\n\t\t\t\tt.Errorf(\"SECURITY: destination IP %s should not be included but found in destinations\", excludedIP)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// TestTagUserMutualExclusivity tests that user-owned nodes and tagged nodes\n// are treated as separate identity classes and cannot inadvertently access each other.\nfunc TestTagUserMutualExclusivity(t *testing.T) {\n\tusers := types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"user1\"},\n\t\t{Model: gorm.Model{ID: 2}, Name: \"user2\"},\n\t}\n\n\tnodes := types.Nodes{\n\t\t// User-owned nodes\n\t\t{\n\t\t\tUser: new(users[0]),\n\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t},\n\t\t{\n\t\t\tUser: new(users[1]),\n\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t},\n\t\t// Tagged nodes\n\t\t{\n\t\t\tUser: &users[0], // \"created by\" tracking\n\t\t\tIPv4: ap(\"100.64.0.10\"),\n\t\t\tTags: []string{\"tag:server\"},\n\t\t},\n\t\t{\n\t\t\tUser: &users[1], // \"created by\" tracking\n\t\t\tIPv4: ap(\"100.64.0.11\"),\n\t\t\tTags: []string{\"tag:database\"},\n\t\t},\n\t}\n\n\tpolicy := &Policy{\n\t\tTagOwners: TagOwners{\n\t\t\tTag(\"tag:server\"):   Owners{new(Username(\"user1@\"))},\n\t\t\tTag(\"tag:database\"): Owners{new(Username(\"user2@\"))},\n\t\t},\n\t\tACLs: []ACL{\n\t\t\t// Rule 1: user1 (user-owned) should NOT be able to reach tagged nodes\n\t\t\t{\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []Alias{up(\"user1@\")},\n\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\taliasWithPorts(tp(\"tag:server\"), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Rule 2: tag:server should be able to reach tag:database\n\t\t\t{\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []Alias{tp(\"tag:server\")},\n\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\taliasWithPorts(tp(\"tag:database\"), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\terr := policy.validate()\n\tif err != nil {\n\t\tt.Fatalf(\"policy validation failed: %v\", err)\n\t}\n\n\t// Test user1's user-owned node (100.64.0.1)\n\tuserNode := nodes[0].View()\n\n\tuserRules, err := policy.compileFilterRulesForNode(users, userNode, nodes.ViewSlice())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error for user node: %v\", err)\n\t}\n\n\t// User1's user-owned node should NOT reach tag:server (100.64.0.10)\n\t// because user1@ as a source only matches user1's user-owned devices, NOT tagged devices\n\tfor _, rule := range userRules {\n\t\tfor _, dst := range rule.DstPorts {\n\t\t\tif dst.IP == \"100.64.0.10\" {\n\t\t\t\tt.Errorf(\"SECURITY: user-owned node should NOT reach tagged node (got dest %s in rule)\", dst.IP)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Test tag:server node (100.64.0.10)\n\t// compileFilterRulesForNode returns rules for what the node can ACCESS (as source)\n\ttaggedNode := nodes[2].View()\n\n\ttaggedRules, err := policy.compileFilterRulesForNode(users, taggedNode, nodes.ViewSlice())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error for tagged node: %v\", err)\n\t}\n\n\t// Tag:server (as source) should be able to reach tag:database (100.64.0.11)\n\t// Check destinations in the rules for this node\n\tfoundDatabaseDest := false\n\n\tfor _, rule := range taggedRules {\n\t\t// Check if this rule applies to tag:server as source\n\t\tif !slices.Contains(rule.SrcIPs, \"100.64.0.10/32\") {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Check if tag:database is in destinations\n\t\tfor _, dst := range rule.DstPorts {\n\t\t\tif dst.IP == \"100.64.0.11/32\" {\n\t\t\t\tfoundDatabaseDest = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif foundDatabaseDest {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !foundDatabaseDest {\n\t\tt.Errorf(\"tag:server should reach tag:database but didn't find 100.64.0.11 in destinations\")\n\t}\n}\n\n// TestAutogroupTagged tests that autogroup:tagged correctly selects all devices\n// with tag-based identity (IsTagged() == true or has requested tags in tagOwners).\nfunc TestAutogroupTagged(t *testing.T) {\n\tt.Parallel()\n\n\tusers := types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"user1\"},\n\t\t{Model: gorm.Model{ID: 2}, Name: \"user2\"},\n\t}\n\n\tnodes := types.Nodes{\n\t\t// User-owned nodes (not tagged)\n\t\t{\n\t\t\tUser: new(users[0]),\n\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t},\n\t\t{\n\t\t\tUser: new(users[1]),\n\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t},\n\t\t// Tagged nodes\n\t\t{\n\t\t\tUser: &users[0], // \"created by\" tracking\n\t\t\tIPv4: ap(\"100.64.0.10\"),\n\t\t\tTags: []string{\"tag:server\"},\n\t\t},\n\t\t{\n\t\t\tUser: &users[1], // \"created by\" tracking\n\t\t\tIPv4: ap(\"100.64.0.11\"),\n\t\t\tTags: []string{\"tag:database\"},\n\t\t},\n\t\t{\n\t\t\tUser: &users[0],\n\t\t\tIPv4: ap(\"100.64.0.12\"),\n\t\t\tTags: []string{\"tag:web\", \"tag:prod\"},\n\t\t},\n\t}\n\n\tpolicy := &Policy{\n\t\tTagOwners: TagOwners{\n\t\t\tTag(\"tag:server\"):   Owners{new(Username(\"user1@\"))},\n\t\t\tTag(\"tag:database\"): Owners{new(Username(\"user2@\"))},\n\t\t\tTag(\"tag:web\"):      Owners{new(Username(\"user1@\"))},\n\t\t\tTag(\"tag:prod\"):     Owners{new(Username(\"user1@\"))},\n\t\t},\n\t\tACLs: []ACL{\n\t\t\t// Rule: autogroup:tagged can reach user-owned nodes\n\t\t\t{\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []Alias{agp(\"autogroup:tagged\")},\n\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\taliasWithPorts(up(\"user1@\"), tailcfg.PortRangeAny),\n\t\t\t\t\taliasWithPorts(up(\"user2@\"), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\terr := policy.validate()\n\trequire.NoError(t, err)\n\n\t// Verify autogroup:tagged includes all tagged nodes\n\tag := AutoGroupTagged\n\ttaggedIPs, err := ag.Resolve(policy, users, nodes.ViewSlice())\n\trequire.NoError(t, err)\n\trequire.NotNil(t, taggedIPs)\n\n\t// Should contain all tagged nodes\n\tassert.True(t, taggedIPs.Contains(*ap(\"100.64.0.10\")), \"should include tag:server\")\n\tassert.True(t, taggedIPs.Contains(*ap(\"100.64.0.11\")), \"should include tag:database\")\n\tassert.True(t, taggedIPs.Contains(*ap(\"100.64.0.12\")), \"should include tag:web,tag:prod\")\n\n\t// Should NOT contain user-owned nodes\n\tassert.False(t, taggedIPs.Contains(*ap(\"100.64.0.1\")), \"should not include user1 node\")\n\tassert.False(t, taggedIPs.Contains(*ap(\"100.64.0.2\")), \"should not include user2 node\")\n\n\t// Test ACL filtering: all tagged nodes should be able to reach user nodes\n\ttests := []struct {\n\t\tname        string\n\t\tsourceNode  types.NodeView\n\t\tshouldReach []string // IP strings for comparison\n\t}{\n\t\t{\n\t\t\tname:        \"tag:server can reach user-owned nodes\",\n\t\t\tsourceNode:  nodes[2].View(),\n\t\t\tshouldReach: []string{\"100.64.0.1\", \"100.64.0.2\"},\n\t\t},\n\t\t{\n\t\t\tname:        \"tag:database can reach user-owned nodes\",\n\t\t\tsourceNode:  nodes[3].View(),\n\t\t\tshouldReach: []string{\"100.64.0.1\", \"100.64.0.2\"},\n\t\t},\n\t\t{\n\t\t\tname:        \"tag:web,tag:prod can reach user-owned nodes\",\n\t\t\tsourceNode:  nodes[4].View(),\n\t\t\tshouldReach: []string{\"100.64.0.1\", \"100.64.0.2\"},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\trules, err := policy.compileFilterRulesForNode(users, tt.sourceNode, nodes.ViewSlice())\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Verify all expected destinations are reachable\n\t\t\tfor _, expectedDest := range tt.shouldReach {\n\t\t\t\tfound := false\n\n\t\t\t\tfor _, rule := range rules {\n\t\t\t\t\tfor _, dstPort := range rule.DstPorts {\n\t\t\t\t\t\t// DstPort.IP is CIDR notation like \"100.64.0.1/32\"\n\t\t\t\t\t\tif strings.HasPrefix(dstPort.IP, expectedDest+\"/\") || dstPort.IP == expectedDest {\n\t\t\t\t\t\t\tfound = true\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif found {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tassert.True(t, found, \"Expected to find destination %s in rules\", expectedDest)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestAutogroupSelfInSourceIsRejected(t *testing.T) {\n\t// Test that autogroup:self cannot be used in sources (per Tailscale spec)\n\tpolicy := &Policy{\n\t\tACLs: []ACL{\n\t\t\t{\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []Alias{agp(\"autogroup:self\")},\n\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\taliasWithPorts(agp(\"autogroup:member\"), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\terr := policy.validate()\n\tif err == nil {\n\t\tt.Error(\"expected validation error when using autogroup:self in sources\")\n\t}\n\n\tif !strings.Contains(err.Error(), \"autogroup:self\") {\n\t\tt.Errorf(\"expected error message to mention autogroup:self, got: %v\", err)\n\t}\n}\n\n// TestAutogroupSelfWithSpecificUserSource verifies that when autogroup:self is in\n// the destination and a specific user is in the source, only that user's devices\n// are allowed (and only if they match the target user).\nfunc TestAutogroupSelfWithSpecificUserSource(t *testing.T) {\n\tusers := types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"user1\"},\n\t\t{Model: gorm.Model{ID: 2}, Name: \"user2\"},\n\t}\n\n\tnodes := types.Nodes{\n\t\t{User: new(users[0]), IPv4: ap(\"100.64.0.1\")},\n\t\t{User: new(users[0]), IPv4: ap(\"100.64.0.2\")},\n\t\t{User: new(users[1]), IPv4: ap(\"100.64.0.3\")},\n\t\t{User: new(users[1]), IPv4: ap(\"100.64.0.4\")},\n\t}\n\n\tpolicy := &Policy{\n\t\tACLs: []ACL{\n\t\t\t{\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []Alias{up(\"user1@\")},\n\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\taliasWithPorts(agp(\"autogroup:self\"), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\terr := policy.validate()\n\trequire.NoError(t, err)\n\n\t// For user1's node: sources should be user1's devices\n\tnode1 := nodes[0].View()\n\trules, err := policy.compileFilterRulesForNode(users, node1, nodes.ViewSlice())\n\trequire.NoError(t, err)\n\trequire.Len(t, rules, 1)\n\n\texpectedSourceIPs := []string{\"100.64.0.1\", \"100.64.0.2\"}\n\tfor _, expectedIP := range expectedSourceIPs {\n\t\tfound := false\n\t\taddr := netip.MustParseAddr(expectedIP)\n\n\t\tfor _, prefix := range rules[0].SrcIPs {\n\t\t\tpref := netip.MustParsePrefix(prefix)\n\t\t\tif pref.Contains(addr) {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tassert.True(t, found, \"expected source IP %s to be present\", expectedIP)\n\t}\n\n\tactualDestIPs := make([]string, 0, len(rules[0].DstPorts))\n\tfor _, dst := range rules[0].DstPorts {\n\t\tactualDestIPs = append(actualDestIPs, dst.IP)\n\t}\n\n\texpectedDestIPs := []string{\"100.64.0.1/32\", \"100.64.0.2/32\"}\n\tassert.ElementsMatch(t, expectedDestIPs, actualDestIPs)\n\n\tnode2 := nodes[2].View()\n\trules2, err := policy.compileFilterRulesForNode(users, node2, nodes.ViewSlice())\n\trequire.NoError(t, err)\n\tassert.Empty(t, rules2, \"user2's node should have no rules (user1@ devices can't match user2's self)\")\n}\n\n// TestAutogroupSelfWithGroupSource verifies that when a group is used as source\n// and autogroup:self as destination, only group members who are the same user\n// as the target are allowed.\nfunc TestAutogroupSelfWithGroupSource(t *testing.T) {\n\tusers := types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"user1\"},\n\t\t{Model: gorm.Model{ID: 2}, Name: \"user2\"},\n\t\t{Model: gorm.Model{ID: 3}, Name: \"user3\"},\n\t}\n\n\tnodes := types.Nodes{\n\t\t{User: new(users[0]), IPv4: ap(\"100.64.0.1\")},\n\t\t{User: new(users[0]), IPv4: ap(\"100.64.0.2\")},\n\t\t{User: new(users[1]), IPv4: ap(\"100.64.0.3\")},\n\t\t{User: new(users[1]), IPv4: ap(\"100.64.0.4\")},\n\t\t{User: new(users[2]), IPv4: ap(\"100.64.0.5\")},\n\t}\n\n\tpolicy := &Policy{\n\t\tGroups: Groups{\n\t\t\tGroup(\"group:admins\"): []Username{Username(\"user1@\"), Username(\"user2@\")},\n\t\t},\n\t\tACLs: []ACL{\n\t\t\t{\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []Alias{gp(\"group:admins\")},\n\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\taliasWithPorts(agp(\"autogroup:self\"), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\terr := policy.validate()\n\trequire.NoError(t, err)\n\n\t// (group:admins has user1+user2, but autogroup:self filters to same user)\n\tnode1 := nodes[0].View()\n\trules, err := policy.compileFilterRulesForNode(users, node1, nodes.ViewSlice())\n\trequire.NoError(t, err)\n\trequire.Len(t, rules, 1)\n\n\texpectedSrcIPs := []string{\"100.64.0.1\", \"100.64.0.2\"}\n\tfor _, expectedIP := range expectedSrcIPs {\n\t\tfound := false\n\t\taddr := netip.MustParseAddr(expectedIP)\n\n\t\tfor _, prefix := range rules[0].SrcIPs {\n\t\t\tpref := netip.MustParsePrefix(prefix)\n\t\t\tif pref.Contains(addr) {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tassert.True(t, found, \"expected source IP %s for user1\", expectedIP)\n\t}\n\n\tnode3 := nodes[4].View()\n\trules3, err := policy.compileFilterRulesForNode(users, node3, nodes.ViewSlice())\n\trequire.NoError(t, err)\n\tassert.Empty(t, rules3, \"user3 should have no rules\")\n}\n\n// Helper function to create IP addresses for testing.\nfunc createAddr(ip string) *netip.Addr {\n\taddr, _ := netip.ParseAddr(ip)\n\treturn &addr\n}\n\n// TestSSHWithAutogroupSelfInDestination verifies that SSH policies work correctly\n// with autogroup:self in destinations.\nfunc TestSSHWithAutogroupSelfInDestination(t *testing.T) {\n\tusers := types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"user1\"},\n\t\t{Model: gorm.Model{ID: 2}, Name: \"user2\"},\n\t}\n\n\tnodes := types.Nodes{\n\t\t// User1's nodes\n\t\t{User: new(users[0]), IPv4: ap(\"100.64.0.1\"), Hostname: \"user1-node1\"},\n\t\t{User: new(users[0]), IPv4: ap(\"100.64.0.2\"), Hostname: \"user1-node2\"},\n\t\t// User2's nodes\n\t\t{User: new(users[1]), IPv4: ap(\"100.64.0.3\"), Hostname: \"user2-node1\"},\n\t\t{User: new(users[1]), IPv4: ap(\"100.64.0.4\"), Hostname: \"user2-node2\"},\n\t\t// Tagged node for user1 (should be excluded)\n\t\t{User: new(users[0]), IPv4: ap(\"100.64.0.5\"), Hostname: \"user1-tagged\", Tags: []string{\"tag:server\"}},\n\t}\n\n\tpolicy := &Policy{\n\t\tSSHs: []SSH{\n\t\t\t{\n\t\t\t\tAction:       \"accept\",\n\t\t\t\tSources:      SSHSrcAliases{agp(\"autogroup:member\")},\n\t\t\t\tDestinations: SSHDstAliases{agp(\"autogroup:self\")},\n\t\t\t\tUsers:        []SSHUser{\"autogroup:nonroot\"},\n\t\t\t},\n\t\t},\n\t}\n\n\terr := policy.validate()\n\trequire.NoError(t, err)\n\n\t// Test for user1's first node\n\tnode1 := nodes[0].View()\n\tsshPolicy, err := policy.compileSSHPolicy(\"unused-server-url\", users, node1, nodes.ViewSlice())\n\trequire.NoError(t, err)\n\trequire.NotNil(t, sshPolicy)\n\trequire.Len(t, sshPolicy.Rules, 1)\n\n\trule := sshPolicy.Rules[0]\n\n\t// Principals should only include user1's untagged devices\n\trequire.Len(t, rule.Principals, 2, \"should have 2 principals (user1's 2 untagged nodes)\")\n\n\tprincipalIPs := make([]string, len(rule.Principals))\n\tfor i, p := range rule.Principals {\n\t\tprincipalIPs[i] = p.NodeIP\n\t}\n\n\tassert.ElementsMatch(t, []string{\"100.64.0.1\", \"100.64.0.2\"}, principalIPs)\n\n\t// Test for user2's first node\n\tnode3 := nodes[2].View()\n\tsshPolicy2, err := policy.compileSSHPolicy(\"unused-server-url\", users, node3, nodes.ViewSlice())\n\trequire.NoError(t, err)\n\trequire.NotNil(t, sshPolicy2)\n\trequire.Len(t, sshPolicy2.Rules, 1)\n\n\trule2 := sshPolicy2.Rules[0]\n\n\t// Principals should only include user2's untagged devices\n\trequire.Len(t, rule2.Principals, 2, \"should have 2 principals (user2's 2 untagged nodes)\")\n\n\tprincipalIPs2 := make([]string, len(rule2.Principals))\n\tfor i, p := range rule2.Principals {\n\t\tprincipalIPs2[i] = p.NodeIP\n\t}\n\n\tassert.ElementsMatch(t, []string{\"100.64.0.3\", \"100.64.0.4\"}, principalIPs2)\n\n\t// Test for tagged node (should have no SSH rules)\n\tnode5 := nodes[4].View()\n\tsshPolicy3, err := policy.compileSSHPolicy(\"unused-server-url\", users, node5, nodes.ViewSlice())\n\trequire.NoError(t, err)\n\n\tif sshPolicy3 != nil {\n\t\tassert.Empty(t, sshPolicy3.Rules, \"tagged nodes should not get SSH rules with autogroup:self\")\n\t}\n}\n\n// TestSSHWithAutogroupSelfAndSpecificUser verifies that when a specific user\n// is in the source and autogroup:self in destination, only that user's devices\n// can SSH (and only if they match the target user).\nfunc TestSSHWithAutogroupSelfAndSpecificUser(t *testing.T) {\n\tusers := types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"user1\"},\n\t\t{Model: gorm.Model{ID: 2}, Name: \"user2\"},\n\t}\n\n\tnodes := types.Nodes{\n\t\t{User: new(users[0]), IPv4: ap(\"100.64.0.1\")},\n\t\t{User: new(users[0]), IPv4: ap(\"100.64.0.2\")},\n\t\t{User: new(users[1]), IPv4: ap(\"100.64.0.3\")},\n\t\t{User: new(users[1]), IPv4: ap(\"100.64.0.4\")},\n\t}\n\n\tpolicy := &Policy{\n\t\tSSHs: []SSH{\n\t\t\t{\n\t\t\t\tAction:       \"accept\",\n\t\t\t\tSources:      SSHSrcAliases{up(\"user1@\")},\n\t\t\t\tDestinations: SSHDstAliases{agp(\"autogroup:self\")},\n\t\t\t\tUsers:        []SSHUser{\"ubuntu\"},\n\t\t\t},\n\t\t},\n\t}\n\n\terr := policy.validate()\n\trequire.NoError(t, err)\n\n\t// For user1's node: should allow SSH from user1's devices\n\tnode1 := nodes[0].View()\n\tsshPolicy, err := policy.compileSSHPolicy(\"unused-server-url\", users, node1, nodes.ViewSlice())\n\trequire.NoError(t, err)\n\trequire.NotNil(t, sshPolicy)\n\trequire.Len(t, sshPolicy.Rules, 1)\n\n\trule := sshPolicy.Rules[0]\n\trequire.Len(t, rule.Principals, 2, \"user1 should have 2 principals\")\n\n\tprincipalIPs := make([]string, len(rule.Principals))\n\tfor i, p := range rule.Principals {\n\t\tprincipalIPs[i] = p.NodeIP\n\t}\n\n\tassert.ElementsMatch(t, []string{\"100.64.0.1\", \"100.64.0.2\"}, principalIPs)\n\n\t// For user2's node: should have no rules (user1's devices can't match user2's self)\n\tnode3 := nodes[2].View()\n\tsshPolicy2, err := policy.compileSSHPolicy(\"unused-server-url\", users, node3, nodes.ViewSlice())\n\trequire.NoError(t, err)\n\n\tif sshPolicy2 != nil {\n\t\tassert.Empty(t, sshPolicy2.Rules, \"user2 should have no SSH rules since source is user1\")\n\t}\n}\n\n// TestSSHWithAutogroupSelfAndGroup verifies SSH with group sources and autogroup:self destinations.\nfunc TestSSHWithAutogroupSelfAndGroup(t *testing.T) {\n\tusers := types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"user1\"},\n\t\t{Model: gorm.Model{ID: 2}, Name: \"user2\"},\n\t\t{Model: gorm.Model{ID: 3}, Name: \"user3\"},\n\t}\n\n\tnodes := types.Nodes{\n\t\t{User: new(users[0]), IPv4: ap(\"100.64.0.1\")},\n\t\t{User: new(users[0]), IPv4: ap(\"100.64.0.2\")},\n\t\t{User: new(users[1]), IPv4: ap(\"100.64.0.3\")},\n\t\t{User: new(users[1]), IPv4: ap(\"100.64.0.4\")},\n\t\t{User: new(users[2]), IPv4: ap(\"100.64.0.5\")},\n\t}\n\n\tpolicy := &Policy{\n\t\tGroups: Groups{\n\t\t\tGroup(\"group:admins\"): []Username{Username(\"user1@\"), Username(\"user2@\")},\n\t\t},\n\t\tSSHs: []SSH{\n\t\t\t{\n\t\t\t\tAction:       \"accept\",\n\t\t\t\tSources:      SSHSrcAliases{gp(\"group:admins\")},\n\t\t\t\tDestinations: SSHDstAliases{agp(\"autogroup:self\")},\n\t\t\t\tUsers:        []SSHUser{\"root\"},\n\t\t\t},\n\t\t},\n\t}\n\n\terr := policy.validate()\n\trequire.NoError(t, err)\n\n\t// For user1's node: should allow SSH from user1's devices only (not user2's)\n\tnode1 := nodes[0].View()\n\tsshPolicy, err := policy.compileSSHPolicy(\"unused-server-url\", users, node1, nodes.ViewSlice())\n\trequire.NoError(t, err)\n\trequire.NotNil(t, sshPolicy)\n\trequire.Len(t, sshPolicy.Rules, 1)\n\n\trule := sshPolicy.Rules[0]\n\trequire.Len(t, rule.Principals, 2, \"user1 should have 2 principals (only user1's nodes)\")\n\n\tprincipalIPs := make([]string, len(rule.Principals))\n\tfor i, p := range rule.Principals {\n\t\tprincipalIPs[i] = p.NodeIP\n\t}\n\n\tassert.ElementsMatch(t, []string{\"100.64.0.1\", \"100.64.0.2\"}, principalIPs)\n\n\t// For user3's node: should have no rules (not in group:admins)\n\tnode5 := nodes[4].View()\n\tsshPolicy2, err := policy.compileSSHPolicy(\"unused-server-url\", users, node5, nodes.ViewSlice())\n\trequire.NoError(t, err)\n\n\tif sshPolicy2 != nil {\n\t\tassert.Empty(t, sshPolicy2.Rules, \"user3 should have no SSH rules (not in group)\")\n\t}\n}\n\n// TestSSHWithAutogroupSelfExcludesTaggedDevices verifies that tagged devices\n// are excluded from both sources and destinations when autogroup:self is used.\nfunc TestSSHWithAutogroupSelfExcludesTaggedDevices(t *testing.T) {\n\tusers := types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"user1\"},\n\t}\n\n\tnodes := types.Nodes{\n\t\t{User: new(users[0]), IPv4: ap(\"100.64.0.1\"), Hostname: \"untagged1\"},\n\t\t{User: new(users[0]), IPv4: ap(\"100.64.0.2\"), Hostname: \"untagged2\"},\n\t\t{User: new(users[0]), IPv4: ap(\"100.64.0.3\"), Hostname: \"tagged1\", Tags: []string{\"tag:server\"}},\n\t\t{User: new(users[0]), IPv4: ap(\"100.64.0.4\"), Hostname: \"tagged2\", Tags: []string{\"tag:web\"}},\n\t}\n\n\tpolicy := &Policy{\n\t\tTagOwners: TagOwners{\n\t\t\tTag(\"tag:server\"): Owners{up(\"user1@\")},\n\t\t\tTag(\"tag:web\"):    Owners{up(\"user1@\")},\n\t\t},\n\t\tSSHs: []SSH{\n\t\t\t{\n\t\t\t\tAction:       \"accept\",\n\t\t\t\tSources:      SSHSrcAliases{agp(\"autogroup:member\")},\n\t\t\t\tDestinations: SSHDstAliases{agp(\"autogroup:self\")},\n\t\t\t\tUsers:        []SSHUser{\"admin\"},\n\t\t\t},\n\t\t},\n\t}\n\n\terr := policy.validate()\n\trequire.NoError(t, err)\n\n\t// For untagged node: should only get principals from other untagged nodes\n\tnode1 := nodes[0].View()\n\tsshPolicy, err := policy.compileSSHPolicy(\"unused-server-url\", users, node1, nodes.ViewSlice())\n\trequire.NoError(t, err)\n\trequire.NotNil(t, sshPolicy)\n\trequire.Len(t, sshPolicy.Rules, 1)\n\n\trule := sshPolicy.Rules[0]\n\trequire.Len(t, rule.Principals, 2, \"should only have 2 principals (untagged nodes)\")\n\n\tprincipalIPs := make([]string, len(rule.Principals))\n\tfor i, p := range rule.Principals {\n\t\tprincipalIPs[i] = p.NodeIP\n\t}\n\n\tassert.ElementsMatch(t, []string{\"100.64.0.1\", \"100.64.0.2\"}, principalIPs,\n\t\t\"should only include untagged devices\")\n\n\t// For tagged node: should get no SSH rules\n\tnode3 := nodes[2].View()\n\tsshPolicy2, err := policy.compileSSHPolicy(\"unused-server-url\", users, node3, nodes.ViewSlice())\n\trequire.NoError(t, err)\n\n\tif sshPolicy2 != nil {\n\t\tassert.Empty(t, sshPolicy2.Rules, \"tagged node should get no SSH rules with autogroup:self\")\n\t}\n}\n\n// TestSSHWithAutogroupSelfAndMixedDestinations tests that SSH rules can have both\n// autogroup:self and other destinations (like tag:router) in the same rule, and that\n// autogroup:self filtering only applies to autogroup:self destinations, not others.\nfunc TestSSHWithAutogroupSelfAndMixedDestinations(t *testing.T) {\n\tusers := types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"user1\"},\n\t\t{Model: gorm.Model{ID: 2}, Name: \"user2\"},\n\t}\n\n\tnodes := types.Nodes{\n\t\t{User: new(users[0]), IPv4: ap(\"100.64.0.1\"), Hostname: \"user1-device\"},\n\t\t{User: new(users[0]), IPv4: ap(\"100.64.0.2\"), Hostname: \"user1-device2\"},\n\t\t{User: new(users[1]), IPv4: ap(\"100.64.0.3\"), Hostname: \"user2-device\"},\n\t\t{User: new(users[1]), IPv4: ap(\"100.64.0.4\"), Hostname: \"user2-router\", Tags: []string{\"tag:router\"}},\n\t}\n\n\tpolicy := &Policy{\n\t\tTagOwners: TagOwners{\n\t\t\tTag(\"tag:router\"): Owners{up(\"user2@\")},\n\t\t},\n\t\tSSHs: []SSH{\n\t\t\t{\n\t\t\t\tAction:       \"accept\",\n\t\t\t\tSources:      SSHSrcAliases{agp(\"autogroup:member\")},\n\t\t\t\tDestinations: SSHDstAliases{agp(\"autogroup:self\"), tp(\"tag:router\")},\n\t\t\t\tUsers:        []SSHUser{\"admin\"},\n\t\t\t},\n\t\t},\n\t}\n\n\terr := policy.validate()\n\trequire.NoError(t, err)\n\n\t// Test 1: Compile for user1's device (should only match autogroup:self destination)\n\tnode1 := nodes[0].View()\n\tsshPolicy1, err := policy.compileSSHPolicy(\"unused-server-url\", users, node1, nodes.ViewSlice())\n\trequire.NoError(t, err)\n\trequire.NotNil(t, sshPolicy1)\n\trequire.Len(t, sshPolicy1.Rules, 1, \"user1's device should have 1 SSH rule (autogroup:self)\")\n\n\t// Verify autogroup:self rule has filtered sources (only same-user devices)\n\tselfRule := sshPolicy1.Rules[0]\n\trequire.Len(t, selfRule.Principals, 2, \"autogroup:self rule should only have user1's devices\")\n\n\tselfPrincipals := make([]string, len(selfRule.Principals))\n\tfor i, p := range selfRule.Principals {\n\t\tselfPrincipals[i] = p.NodeIP\n\t}\n\n\trequire.ElementsMatch(t, []string{\"100.64.0.1\", \"100.64.0.2\"}, selfPrincipals,\n\t\t\"autogroup:self rule should only include same-user untagged devices\")\n\n\t// Test 2: Compile for router (should only match tag:router destination)\n\trouterNode := nodes[3].View() // user2-router\n\tsshPolicyRouter, err := policy.compileSSHPolicy(\"unused-server-url\", users, routerNode, nodes.ViewSlice())\n\trequire.NoError(t, err)\n\trequire.NotNil(t, sshPolicyRouter)\n\trequire.Len(t, sshPolicyRouter.Rules, 1, \"router should have 1 SSH rule (tag:router)\")\n\n\trouterRule := sshPolicyRouter.Rules[0]\n\n\trouterPrincipals := make([]string, len(routerRule.Principals))\n\tfor i, p := range routerRule.Principals {\n\t\trouterPrincipals[i] = p.NodeIP\n\t}\n\n\trequire.Contains(t, routerPrincipals, \"100.64.0.1\", \"router rule should include user1's device (unfiltered sources)\")\n\trequire.Contains(t, routerPrincipals, \"100.64.0.2\", \"router rule should include user1's other device (unfiltered sources)\")\n\trequire.Contains(t, routerPrincipals, \"100.64.0.3\", \"router rule should include user2's device (unfiltered sources)\")\n}\n\n// TestAutogroupSelfWithNonExistentUserInGroup verifies that when a group\n// contains a non-existent user, partial resolution still works correctly.\n// This reproduces the issue from https://github.com/juanfont/headscale/issues/2990\n// where autogroup:self breaks when groups contain users that don't have\n// registered nodes.\nfunc TestAutogroupSelfWithNonExistentUserInGroup(t *testing.T) {\n\tusers := types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"superadmin\"},\n\t\t{Model: gorm.Model{ID: 2}, Name: \"admin\"},\n\t\t{Model: gorm.Model{ID: 3}, Name: \"direction\"},\n\t}\n\n\tnodes := types.Nodes{\n\t\t// superadmin's device\n\t\t{ID: 1, User: new(users[0]), IPv4: ap(\"100.64.0.1\"), Hostname: \"superadmin-device\"},\n\t\t// admin's device\n\t\t{ID: 2, User: new(users[1]), IPv4: ap(\"100.64.0.2\"), Hostname: \"admin-device\"},\n\t\t// direction's device\n\t\t{ID: 3, User: new(users[2]), IPv4: ap(\"100.64.0.3\"), Hostname: \"direction-device\"},\n\t\t// tagged servers\n\t\t{ID: 4, IPv4: ap(\"100.64.0.10\"), Hostname: \"common-server\", Tags: []string{\"tag:common\"}},\n\t\t{ID: 5, IPv4: ap(\"100.64.0.11\"), Hostname: \"tech-server\", Tags: []string{\"tag:tech\"}},\n\t\t{ID: 6, IPv4: ap(\"100.64.0.12\"), Hostname: \"privileged-server\", Tags: []string{\"tag:privileged\"}},\n\t}\n\n\tpolicy := &Policy{\n\t\tGroups: Groups{\n\t\t\t// group:superadmin contains \"phantom_user\" who doesn't exist\n\t\t\tGroup(\"group:superadmin\"): []Username{Username(\"superadmin@\"), Username(\"phantom_user@\")},\n\t\t\tGroup(\"group:admin\"):      []Username{Username(\"admin@\")},\n\t\t\tGroup(\"group:direction\"):  []Username{Username(\"direction@\")},\n\t\t},\n\t\tTagOwners: TagOwners{\n\t\t\tTag(\"tag:common\"):     Owners{gp(\"group:superadmin\")},\n\t\t\tTag(\"tag:tech\"):       Owners{gp(\"group:superadmin\")},\n\t\t\tTag(\"tag:privileged\"): Owners{gp(\"group:superadmin\")},\n\t\t},\n\t\tACLs: []ACL{\n\t\t\t{\n\t\t\t\t// Rule 1: all groups -> tag:common\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []Alias{gp(\"group:superadmin\"), gp(\"group:admin\"), gp(\"group:direction\")},\n\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\taliasWithPorts(tp(\"tag:common\"), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\t// Rule 2: superadmin + admin -> tag:tech\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []Alias{gp(\"group:superadmin\"), gp(\"group:admin\")},\n\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\taliasWithPorts(tp(\"tag:tech\"), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\t// Rule 3: superadmin -> tag:privileged + autogroup:self\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []Alias{gp(\"group:superadmin\")},\n\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\taliasWithPorts(tp(\"tag:privileged\"), tailcfg.PortRangeAny),\n\t\t\t\t\taliasWithPorts(agp(\"autogroup:self\"), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\terr := policy.validate()\n\trequire.NoError(t, err)\n\n\tcontainsIP := func(rules []tailcfg.FilterRule, ip string) bool {\n\t\taddr := netip.MustParseAddr(ip)\n\n\t\tfor _, rule := range rules {\n\t\t\tfor _, dp := range rule.DstPorts {\n\t\t\t\t// DstPort IPs may be bare addresses or CIDR prefixes\n\t\t\t\tpref, err := netip.ParsePrefix(dp.IP)\n\t\t\t\tif err != nil {\n\t\t\t\t\t// Try as bare address\n\t\t\t\t\ta, err2 := netip.ParseAddr(dp.IP)\n\t\t\t\t\tif err2 != nil {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tif a == addr {\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif pref.Contains(addr) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn false\n\t}\n\n\tcontainsSrcIP := func(rules []tailcfg.FilterRule, ip string) bool {\n\t\taddr := netip.MustParseAddr(ip)\n\n\t\tfor _, rule := range rules {\n\t\t\tfor _, srcIP := range rule.SrcIPs {\n\t\t\t\tpref, err := netip.ParsePrefix(srcIP)\n\t\t\t\tif err != nil {\n\t\t\t\t\ta, err2 := netip.ParseAddr(srcIP)\n\t\t\t\t\tif err2 != nil {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tif a == addr {\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif pref.Contains(addr) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn false\n\t}\n\n\t// Test superadmin's device: should have rules with tag:common, tag:tech, tag:privileged destinations\n\t// and superadmin's IP should appear in sources (partial resolution of group:superadmin works)\n\tsuperadminNode := nodes[0].View()\n\tsuperadminRules, err := policy.compileFilterRulesForNode(users, superadminNode, nodes.ViewSlice())\n\trequire.NoError(t, err)\n\tassert.True(t, containsIP(superadminRules, \"100.64.0.10\"), \"rules should include tag:common server\")\n\tassert.True(t, containsIP(superadminRules, \"100.64.0.11\"), \"rules should include tag:tech server\")\n\tassert.True(t, containsIP(superadminRules, \"100.64.0.12\"), \"rules should include tag:privileged server\")\n\n\t// Key assertion: superadmin's IP should appear as a source in rules\n\t// despite phantom_user in group:superadmin causing a partial resolution error\n\tassert.True(t, containsSrcIP(superadminRules, \"100.64.0.1\"),\n\t\t\"superadmin's IP should appear in sources despite phantom_user in group:superadmin\")\n\n\t// Test admin's device: admin is in group:admin which has NO phantom users.\n\t// The key bug was: when group:superadmin (with phantom_user) appeared as a source\n\t// alongside group:admin, the error from resolving group:superadmin caused its\n\t// partial result to be discarded via `continue`. With the fix, superadmin's IPs\n\t// from group:superadmin are retained alongside admin's IPs from group:admin.\n\tadminNode := nodes[1].View()\n\tadminRules, err := policy.compileFilterRulesForNode(users, adminNode, nodes.ViewSlice())\n\trequire.NoError(t, err)\n\n\t// Rule 1 sources: [group:superadmin, group:admin, group:direction]\n\t// Without fix: group:superadmin discarded -> only admin + direction IPs in sources\n\t// With fix: superadmin IP preserved -> superadmin + admin + direction IPs in sources\n\tassert.True(t, containsIP(adminRules, \"100.64.0.10\"),\n\t\t\"admin rules should include tag:common server (group:admin resolves correctly)\")\n\tassert.True(t, containsSrcIP(adminRules, \"100.64.0.1\"),\n\t\t\"superadmin's IP should be in sources for rules seen by admin (partial resolution preserved)\")\n\tassert.True(t, containsSrcIP(adminRules, \"100.64.0.2\"),\n\t\t\"admin's own IP should be in sources\")\n\n\t// Test direction's device: similar to admin, verifies group:direction sources work\n\tdirectionNode := nodes[2].View()\n\tdirectionRules, err := policy.compileFilterRulesForNode(users, directionNode, nodes.ViewSlice())\n\trequire.NoError(t, err)\n\tassert.True(t, containsIP(directionRules, \"100.64.0.10\"),\n\t\t\"direction rules should include tag:common server\")\n\tassert.True(t, containsSrcIP(directionRules, \"100.64.0.3\"),\n\t\t\"direction's own IP should be in sources\")\n\t// With fix: superadmin's IP preserved in rules that include group:superadmin\n\tassert.True(t, containsSrcIP(directionRules, \"100.64.0.1\"),\n\t\t\"superadmin's IP should be in sources for rule 1 (partial resolution preserved)\")\n}\n\nfunc TestMergeFilterRules(t *testing.T) {\n\ttests := []struct {\n\t\tname  string\n\t\tinput []tailcfg.FilterRule\n\t\twant  []tailcfg.FilterRule\n\t}{\n\t\t{\n\t\t\tname:  \"empty input\",\n\t\t\tinput: []tailcfg.FilterRule{},\n\t\t\twant:  []tailcfg.FilterRule{},\n\t\t},\n\t\t{\n\t\t\tname: \"single rule unchanged\",\n\t\t\tinput: []tailcfg.FilterRule{\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.1/32\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.64.0.2/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{ProtocolTCP},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []tailcfg.FilterRule{\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.1/32\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.64.0.2/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{ProtocolTCP},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"merge two rules with same key\",\n\t\t\tinput: []tailcfg.FilterRule{\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.1/32\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.64.0.2/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.1/32\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.64.0.2/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []tailcfg.FilterRule{\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.1/32\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.64.0.2/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t{IP: \"100.64.0.2/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"different SrcIPs not merged\",\n\t\t\tinput: []tailcfg.FilterRule{\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.1/32\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.64.0.3/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{ProtocolTCP},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.2/32\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.64.0.3/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{ProtocolTCP},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []tailcfg.FilterRule{\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.1/32\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.64.0.3/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{ProtocolTCP},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.2/32\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.64.0.3/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{ProtocolTCP},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"different IPProto not merged\",\n\t\t\tinput: []tailcfg.FilterRule{\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.1/32\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.64.0.2/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{ProtocolTCP},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.1/32\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.64.0.2/32\", Ports: tailcfg.PortRange{First: 53, Last: 53}},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{ProtocolUDP},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []tailcfg.FilterRule{\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.1/32\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.64.0.2/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{ProtocolTCP},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.1/32\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.64.0.2/32\", Ports: tailcfg.PortRange{First: 53, Last: 53}},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{ProtocolUDP},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"DstPorts combined without deduplication\",\n\t\t\tinput: []tailcfg.FilterRule{\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.1/32\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.64.0.2/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{ProtocolTCP},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.1/32\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.64.0.2/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{ProtocolTCP},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []tailcfg.FilterRule{\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.1/32\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.64.0.2/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t{IP: \"100.64.0.2/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{ProtocolTCP},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"merge three rules with same key\",\n\t\t\tinput: []tailcfg.FilterRule{\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.1/32\", \"100.64.0.2/32\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.64.0.3/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.1/32\", \"100.64.0.2/32\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.64.0.3/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.1/32\", \"100.64.0.2/32\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.64.0.4/32\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []tailcfg.FilterRule{\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.1/32\", \"100.64.0.2/32\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.64.0.3/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t{IP: \"100.64.0.3/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t{IP: \"100.64.0.4/32\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t},\n\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot := mergeFilterRules(tt.input)\n\t\t\tif diff := cmp.Diff(tt.want, got); diff != \"\" {\n\t\t\t\tt.Errorf(\"mergeFilterRules() mismatch (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestCompileSSHPolicy_CheckPeriodVariants(t *testing.T) {\n\tusers := types.Users{\n\t\t{Name: \"user1\", Model: gorm.Model{ID: 1}},\n\t}\n\n\tnode := types.Node{\n\t\tHostname: \"device\",\n\t\tIPv4:     createAddr(\"100.64.0.1\"),\n\t\tUserID:   new(users[0].ID),\n\t\tUser:     new(users[0]),\n\t}\n\n\tnodes := types.Nodes{&node}\n\n\ttests := []struct {\n\t\tname         string\n\t\tcheckPeriod  *SSHCheckPeriod\n\t\twantDuration time.Duration\n\t}{\n\t\t{\n\t\t\tname:         \"nil period defaults to 12h\",\n\t\t\tcheckPeriod:  nil,\n\t\t\twantDuration: SSHCheckPeriodDefault,\n\t\t},\n\t\t{\n\t\t\tname:         \"always period uses 0\",\n\t\t\tcheckPeriod:  &SSHCheckPeriod{Always: true},\n\t\t\twantDuration: 0,\n\t\t},\n\t\t{\n\t\t\tname:         \"explicit 2h\",\n\t\t\tcheckPeriod:  &SSHCheckPeriod{Duration: 2 * time.Hour},\n\t\t\twantDuration: 2 * time.Hour,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tpolicy := &Policy{\n\t\t\t\tSSHs: []SSH{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:       SSHActionCheck,\n\t\t\t\t\t\tSources:      SSHSrcAliases{up(\"user1@\")},\n\t\t\t\t\t\tDestinations: SSHDstAliases{agp(\"autogroup:member\")},\n\t\t\t\t\t\tUsers:        SSHUsers{\"root\"},\n\t\t\t\t\t\tCheckPeriod:  tt.checkPeriod,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := policy.validate()\n\t\t\trequire.NoError(t, err)\n\n\t\t\tsshPolicy, err := policy.compileSSHPolicy(\n\t\t\t\t\"http://test\",\n\t\t\t\tusers,\n\t\t\t\tnode.View(),\n\t\t\t\tnodes.ViewSlice(),\n\t\t\t)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NotNil(t, sshPolicy)\n\t\t\trequire.Len(t, sshPolicy.Rules, 1)\n\n\t\t\trule := sshPolicy.Rules[0]\n\t\t\tassert.Equal(t, tt.wantDuration, rule.Action.SessionDuration)\n\t\t\t// Check params must NOT be in the URL; they are\n\t\t\t// resolved server-side via SSHCheckParams.\n\t\t\tassert.NotContains(t, rule.Action.HoldAndDelegate, \"check_explicit\")\n\t\t\tassert.NotContains(t, rule.Action.HoldAndDelegate, \"check_period\")\n\t\t})\n\t}\n}\n\nfunc TestIPSetToPrincipals(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tips  []string // IPs to add to the set\n\t\twant []*tailcfg.SSHPrincipal\n\t}{\n\t\t{\n\t\t\tname: \"nil input\",\n\t\t\tips:  nil,\n\t\t\twant: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"single IPv4\",\n\t\t\tips:  []string{\"100.64.0.1\"},\n\t\t\twant: []*tailcfg.SSHPrincipal{{NodeIP: \"100.64.0.1\"}},\n\t\t},\n\t\t{\n\t\t\tname: \"multiple IPs\",\n\t\t\tips:  []string{\"100.64.0.1\", \"100.64.0.2\"},\n\t\t\twant: []*tailcfg.SSHPrincipal{\n\t\t\t\t{NodeIP: \"100.64.0.1\"},\n\t\t\t\t{NodeIP: \"100.64.0.2\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"IPv6\",\n\t\t\tips:  []string{\"fd7a:115c:a1e0::1\"},\n\t\t\twant: []*tailcfg.SSHPrincipal{{NodeIP: \"fd7a:115c:a1e0::1\"}},\n\t\t},\n\t\t{\n\t\t\tname: \"mixed IPv4 and IPv6\",\n\t\t\tips:  []string{\"100.64.0.1\", \"fd7a:115c:a1e0::1\"},\n\t\t\twant: []*tailcfg.SSHPrincipal{\n\t\t\t\t{NodeIP: \"100.64.0.1\"},\n\t\t\t\t{NodeIP: \"fd7a:115c:a1e0::1\"},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tvar ipSet *netipx.IPSet\n\n\t\t\tif tt.ips != nil {\n\t\t\t\tvar builder netipx.IPSetBuilder\n\n\t\t\t\tfor _, ip := range tt.ips {\n\t\t\t\t\taddr := netip.MustParseAddr(ip)\n\t\t\t\t\tbuilder.Add(addr)\n\t\t\t\t}\n\n\t\t\t\tvar err error\n\n\t\t\t\tipSet, err = builder.IPSet()\n\t\t\t\trequire.NoError(t, err)\n\t\t\t}\n\n\t\t\tgot := ipSetToPrincipals(ipSet)\n\n\t\t\t// Sort for deterministic comparison\n\t\t\tsortPrincipals := func(p []*tailcfg.SSHPrincipal) {\n\t\t\t\tslices.SortFunc(p, func(a, b *tailcfg.SSHPrincipal) int {\n\t\t\t\t\tif a.NodeIP < b.NodeIP {\n\t\t\t\t\t\treturn -1\n\t\t\t\t\t}\n\n\t\t\t\t\tif a.NodeIP > b.NodeIP {\n\t\t\t\t\t\treturn 1\n\t\t\t\t\t}\n\n\t\t\t\t\treturn 0\n\t\t\t\t})\n\t\t\t}\n\t\t\tsortPrincipals(got)\n\t\t\tsortPrincipals(tt.want)\n\n\t\t\tif diff := cmp.Diff(tt.want, got); diff != \"\" {\n\t\t\t\tt.Errorf(\"ipSetToPrincipals() mismatch (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestSSHCheckParams(t *testing.T) {\n\tusers := types.Users{\n\t\t{Name: \"user1\", Model: gorm.Model{ID: 1}},\n\t\t{Name: \"user2\", Model: gorm.Model{ID: 2}},\n\t}\n\n\tnodeUser1 := types.Node{\n\t\tID:       1,\n\t\tHostname: \"user1-device\",\n\t\tIPv4:     createAddr(\"100.64.0.1\"),\n\t\tUserID:   new(users[0].ID),\n\t\tUser:     new(users[0]),\n\t}\n\tnodeUser2 := types.Node{\n\t\tID:       2,\n\t\tHostname: \"user2-device\",\n\t\tIPv4:     createAddr(\"100.64.0.2\"),\n\t\tUserID:   new(users[1].ID),\n\t\tUser:     new(users[1]),\n\t}\n\tnodeTaggedServer := types.Node{\n\t\tID:       3,\n\t\tHostname: \"tagged-server\",\n\t\tIPv4:     createAddr(\"100.64.0.3\"),\n\t\tUserID:   new(users[0].ID),\n\t\tUser:     new(users[0]),\n\t\tTags:     []string{\"tag:server\"},\n\t}\n\n\tnodes := types.Nodes{&nodeUser1, &nodeUser2, &nodeTaggedServer}\n\n\ttests := []struct {\n\t\tname       string\n\t\tpolicy     []byte\n\t\tsrcID      types.NodeID\n\t\tdstID      types.NodeID\n\t\twantPeriod time.Duration\n\t\twantOK     bool\n\t}{\n\t\t{\n\t\t\tname: \"explicit check period for tagged destination\",\n\t\t\tpolicy: []byte(`{\n\t\t\t\t\"tagOwners\": {\"tag:server\": [\"user1@\"]},\n\t\t\t\t\"ssh\": [{\n\t\t\t\t\t\"action\": \"check\",\n\t\t\t\t\t\"checkPeriod\": \"2h\",\n\t\t\t\t\t\"src\": [\"user2@\"],\n\t\t\t\t\t\"dst\": [\"tag:server\"],\n\t\t\t\t\t\"users\": [\"autogroup:nonroot\"]\n\t\t\t\t}]\n\t\t\t}`),\n\t\t\tsrcID:      types.NodeID(2),\n\t\t\tdstID:      types.NodeID(3),\n\t\t\twantPeriod: 2 * time.Hour,\n\t\t\twantOK:     true,\n\t\t},\n\t\t{\n\t\t\tname: \"default period when checkPeriod omitted\",\n\t\t\tpolicy: []byte(`{\n\t\t\t\t\"tagOwners\": {\"tag:server\": [\"user1@\"]},\n\t\t\t\t\"ssh\": [{\n\t\t\t\t\t\"action\": \"check\",\n\t\t\t\t\t\"src\": [\"user2@\"],\n\t\t\t\t\t\"dst\": [\"tag:server\"],\n\t\t\t\t\t\"users\": [\"autogroup:nonroot\"]\n\t\t\t\t}]\n\t\t\t}`),\n\t\t\tsrcID:      types.NodeID(2),\n\t\t\tdstID:      types.NodeID(3),\n\t\t\twantPeriod: SSHCheckPeriodDefault,\n\t\t\twantOK:     true,\n\t\t},\n\t\t{\n\t\t\tname: \"always check (checkPeriod always)\",\n\t\t\tpolicy: []byte(`{\n\t\t\t\t\"tagOwners\": {\"tag:server\": [\"user1@\"]},\n\t\t\t\t\"ssh\": [{\n\t\t\t\t\t\"action\": \"check\",\n\t\t\t\t\t\"checkPeriod\": \"always\",\n\t\t\t\t\t\"src\": [\"user2@\"],\n\t\t\t\t\t\"dst\": [\"tag:server\"],\n\t\t\t\t\t\"users\": [\"autogroup:nonroot\"]\n\t\t\t\t}]\n\t\t\t}`),\n\t\t\tsrcID:      types.NodeID(2),\n\t\t\tdstID:      types.NodeID(3),\n\t\t\twantPeriod: 0,\n\t\t\twantOK:     true,\n\t\t},\n\t\t{\n\t\t\tname: \"no match when src not in rule\",\n\t\t\tpolicy: []byte(`{\n\t\t\t\t\"tagOwners\": {\"tag:server\": [\"user1@\"]},\n\t\t\t\t\"ssh\": [{\n\t\t\t\t\t\"action\": \"check\",\n\t\t\t\t\t\"src\": [\"user1@\"],\n\t\t\t\t\t\"dst\": [\"tag:server\"],\n\t\t\t\t\t\"users\": [\"autogroup:nonroot\"]\n\t\t\t\t}]\n\t\t\t}`),\n\t\t\tsrcID:  types.NodeID(2),\n\t\t\tdstID:  types.NodeID(3),\n\t\t\twantOK: false,\n\t\t},\n\t\t{\n\t\t\tname: \"no match when dst not in rule\",\n\t\t\tpolicy: []byte(`{\n\t\t\t\t\"tagOwners\": {\"tag:server\": [\"user1@\"]},\n\t\t\t\t\"ssh\": [{\n\t\t\t\t\t\"action\": \"check\",\n\t\t\t\t\t\"src\": [\"user2@\"],\n\t\t\t\t\t\"dst\": [\"tag:server\"],\n\t\t\t\t\t\"users\": [\"autogroup:nonroot\"]\n\t\t\t\t}]\n\t\t\t}`),\n\t\t\tsrcID:  types.NodeID(2),\n\t\t\tdstID:  types.NodeID(1),\n\t\t\twantOK: false,\n\t\t},\n\t\t{\n\t\t\tname: \"accept rule is not returned\",\n\t\t\tpolicy: []byte(`{\n\t\t\t\t\"tagOwners\": {\"tag:server\": [\"user1@\"]},\n\t\t\t\t\"ssh\": [{\n\t\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\t\"src\": [\"user2@\"],\n\t\t\t\t\t\"dst\": [\"tag:server\"],\n\t\t\t\t\t\"users\": [\"autogroup:nonroot\"]\n\t\t\t\t}]\n\t\t\t}`),\n\t\t\tsrcID:  types.NodeID(2),\n\t\t\tdstID:  types.NodeID(3),\n\t\t\twantOK: false,\n\t\t},\n\t\t{\n\t\t\tname: \"autogroup:self matches same-user pair\",\n\t\t\tpolicy: []byte(`{\n\t\t\t\t\"ssh\": [{\n\t\t\t\t\t\"action\": \"check\",\n\t\t\t\t\t\"checkPeriod\": \"6h\",\n\t\t\t\t\t\"src\": [\"user1@\"],\n\t\t\t\t\t\"dst\": [\"autogroup:self\"],\n\t\t\t\t\t\"users\": [\"autogroup:nonroot\"]\n\t\t\t\t}]\n\t\t\t}`),\n\t\t\tsrcID:      types.NodeID(1),\n\t\t\tdstID:      types.NodeID(1),\n\t\t\twantPeriod: 6 * time.Hour,\n\t\t\twantOK:     true,\n\t\t},\n\t\t{\n\t\t\tname: \"autogroup:self rejects cross-user pair\",\n\t\t\tpolicy: []byte(`{\n\t\t\t\t\"ssh\": [{\n\t\t\t\t\t\"action\": \"check\",\n\t\t\t\t\t\"src\": [\"user1@\"],\n\t\t\t\t\t\"dst\": [\"autogroup:self\"],\n\t\t\t\t\t\"users\": [\"autogroup:nonroot\"]\n\t\t\t\t}]\n\t\t\t}`),\n\t\t\tsrcID:  types.NodeID(1),\n\t\t\tdstID:  types.NodeID(2),\n\t\t\twantOK: false,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tpm, err := NewPolicyManager(tt.policy, users, nodes.ViewSlice())\n\t\t\trequire.NoError(t, err)\n\n\t\t\tperiod, ok := pm.SSHCheckParams(tt.srcID, tt.dstID)\n\t\t\tassert.Equal(t, tt.wantOK, ok, \"ok mismatch\")\n\n\t\t\tif tt.wantOK {\n\t\t\t\tassert.Equal(t, tt.wantPeriod, period, \"period mismatch\")\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestResolveLocalparts(t *testing.T) {\n\ttests := []struct {\n\t\tname    string\n\t\tentries []SSHUser\n\t\tusers   types.Users\n\t\twant    map[uint]string\n\t}{\n\t\t{\n\t\t\tname:    \"no entries\",\n\t\t\tentries: nil,\n\t\t\tusers:   types.Users{{Name: \"alice\", Email: \"alice@example.com\", Model: gorm.Model{ID: 1}}},\n\t\t\twant:    nil,\n\t\t},\n\t\t{\n\t\t\tname:    \"single match\",\n\t\t\tentries: []SSHUser{\"localpart:*@example.com\"},\n\t\t\tusers: types.Users{\n\t\t\t\t{Name: \"alice\", Email: \"alice@example.com\", Model: gorm.Model{ID: 1}},\n\t\t\t},\n\t\t\twant: map[uint]string{1: \"alice\"},\n\t\t},\n\t\t{\n\t\t\tname:    \"domain mismatch\",\n\t\t\tentries: []SSHUser{\"localpart:*@other.com\"},\n\t\t\tusers: types.Users{\n\t\t\t\t{Name: \"alice\", Email: \"alice@example.com\", Model: gorm.Model{ID: 1}},\n\t\t\t},\n\t\t\twant: map[uint]string{},\n\t\t},\n\t\t{\n\t\t\tname:    \"case insensitive domain\",\n\t\t\tentries: []SSHUser{\"localpart:*@EXAMPLE.COM\"},\n\t\t\tusers: types.Users{\n\t\t\t\t{Name: \"alice\", Email: \"alice@example.com\", Model: gorm.Model{ID: 1}},\n\t\t\t},\n\t\t\twant: map[uint]string{1: \"alice\"},\n\t\t},\n\t\t{\n\t\t\tname:    \"user without email skipped\",\n\t\t\tentries: []SSHUser{\"localpart:*@example.com\"},\n\t\t\tusers: types.Users{\n\t\t\t\t{Name: \"cli-user\", Model: gorm.Model{ID: 1}},\n\t\t\t},\n\t\t\twant: map[uint]string{},\n\t\t},\n\t\t{\n\t\t\tname: \"multiple domains multiple users\",\n\t\t\tentries: []SSHUser{\n\t\t\t\t\"localpart:*@example.com\",\n\t\t\t\t\"localpart:*@other.com\",\n\t\t\t},\n\t\t\tusers: types.Users{\n\t\t\t\t{Name: \"alice\", Email: \"alice@example.com\", Model: gorm.Model{ID: 1}},\n\t\t\t\t{Name: \"bob\", Email: \"bob@other.com\", Model: gorm.Model{ID: 2}},\n\t\t\t\t{Name: \"charlie\", Email: \"charlie@nope.com\", Model: gorm.Model{ID: 3}},\n\t\t\t},\n\t\t\twant: map[uint]string{1: \"alice\", 2: \"bob\"},\n\t\t},\n\t\t{\n\t\t\tname:    \"special chars in local part\",\n\t\t\tentries: []SSHUser{\"localpart:*@example.com\"},\n\t\t\tusers: types.Users{\n\t\t\t\t{Name: \"d\", Email: \"dave+ssh@example.com\", Model: gorm.Model{ID: 1}},\n\t\t\t},\n\t\t\twant: map[uint]string{1: \"dave+ssh\"},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot := resolveLocalparts(tt.entries, tt.users)\n\t\t\tif diff := cmp.Diff(tt.want, got); diff != \"\" {\n\t\t\t\tt.Errorf(\"resolveLocalparts() mismatch (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestGroupSourcesByUser(t *testing.T) {\n\talice := types.User{\n\t\tName: \"alice\", Email: \"alice@example.com\",\n\t\tModel: gorm.Model{ID: 1},\n\t}\n\tbob := types.User{\n\t\tName: \"bob\", Email: \"bob@example.com\",\n\t\tModel: gorm.Model{ID: 2},\n\t}\n\n\tnodeAlice := types.Node{\n\t\tHostname: \"alice-dev\",\n\t\tIPv4:     createAddr(\"100.64.0.1\"),\n\t\tUserID:   &alice.ID,\n\t\tUser:     &alice,\n\t}\n\tnodeBob := types.Node{\n\t\tHostname: \"bob-dev\",\n\t\tIPv4:     createAddr(\"100.64.0.2\"),\n\t\tUserID:   &bob.ID,\n\t\tUser:     &bob,\n\t}\n\tnodeTagged := types.Node{\n\t\tHostname: \"tagged\",\n\t\tIPv4:     createAddr(\"100.64.0.3\"),\n\t\tUserID:   &alice.ID,\n\t\tUser:     &alice,\n\t\tTags:     []string{\"tag:server\"},\n\t}\n\n\t// Build an IPSet that includes all node IPs\n\tallIPs := func() *netipx.IPSet {\n\t\tvar b netipx.IPSetBuilder\n\t\tb.AddPrefix(netip.MustParsePrefix(\"100.64.0.0/24\"))\n\n\t\ts, _ := b.IPSet()\n\n\t\treturn s\n\t}()\n\n\ttests := []struct {\n\t\tname          string\n\t\tnodes         types.Nodes\n\t\tsrcIPs        *netipx.IPSet\n\t\twantUIDs      []uint\n\t\twantUserCount int\n\t\twantHasTagged bool\n\t\twantTaggedLen int\n\t\twantAliceIP   string\n\t\twantBobIP     string\n\t\twantTaggedIP  string\n\t}{\n\t\t{\n\t\t\tname:          \"user-owned only\",\n\t\t\tnodes:         types.Nodes{&nodeAlice, &nodeBob},\n\t\t\tsrcIPs:        allIPs,\n\t\t\twantUIDs:      []uint{1, 2},\n\t\t\twantUserCount: 2,\n\t\t\twantAliceIP:   \"100.64.0.1\",\n\t\t\twantBobIP:     \"100.64.0.2\",\n\t\t},\n\t\t{\n\t\t\tname:          \"mixed user and tagged\",\n\t\t\tnodes:         types.Nodes{&nodeAlice, &nodeTagged},\n\t\t\tsrcIPs:        allIPs,\n\t\t\twantUIDs:      []uint{1},\n\t\t\twantUserCount: 1,\n\t\t\twantHasTagged: true,\n\t\t\twantTaggedLen: 1,\n\t\t\twantAliceIP:   \"100.64.0.1\",\n\t\t\twantTaggedIP:  \"100.64.0.3\",\n\t\t},\n\t\t{\n\t\t\tname:          \"tagged only\",\n\t\t\tnodes:         types.Nodes{&nodeTagged},\n\t\t\tsrcIPs:        allIPs,\n\t\t\twantUIDs:      nil,\n\t\t\twantUserCount: 0,\n\t\t\twantHasTagged: true,\n\t\t\twantTaggedLen: 1,\n\t\t},\n\t\t{\n\t\t\tname:  \"node not in srcIPs excluded\",\n\t\t\tnodes: types.Nodes{&nodeAlice, &nodeBob},\n\t\t\tsrcIPs: func() *netipx.IPSet {\n\t\t\t\tvar b netipx.IPSetBuilder\n\t\t\t\tb.Add(netip.MustParseAddr(\"100.64.0.1\")) // only alice\n\n\t\t\t\ts, _ := b.IPSet()\n\n\t\t\t\treturn s\n\t\t\t}(),\n\t\t\twantUIDs:      []uint{1},\n\t\t\twantUserCount: 1,\n\t\t\twantAliceIP:   \"100.64.0.1\",\n\t\t},\n\t\t{\n\t\t\tname:          \"sorted by user ID\",\n\t\t\tnodes:         types.Nodes{&nodeBob, &nodeAlice}, // reverse order\n\t\t\tsrcIPs:        allIPs,\n\t\t\twantUIDs:      []uint{1, 2}, // still sorted\n\t\t\twantUserCount: 2,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tsortedUIDs, byUser, tagged := groupSourcesByUser(\n\t\t\t\ttt.nodes.ViewSlice(), tt.srcIPs,\n\t\t\t)\n\n\t\t\tassert.Equal(t, tt.wantUIDs, sortedUIDs, \"sortedUIDs\")\n\t\t\tassert.Len(t, byUser, tt.wantUserCount, \"byUser count\")\n\n\t\t\tif tt.wantHasTagged {\n\t\t\t\tassert.Len(t, tagged, tt.wantTaggedLen, \"tagged count\")\n\t\t\t} else {\n\t\t\t\tassert.Empty(t, tagged, \"tagged should be empty\")\n\t\t\t}\n\n\t\t\tif tt.wantAliceIP != \"\" {\n\t\t\t\trequire.Contains(t, byUser, uint(1))\n\t\t\t\tassert.Equal(t, tt.wantAliceIP, byUser[1][0].NodeIP)\n\t\t\t}\n\n\t\t\tif tt.wantBobIP != \"\" {\n\t\t\t\trequire.Contains(t, byUser, uint(2))\n\t\t\t\tassert.Equal(t, tt.wantBobIP, byUser[2][0].NodeIP)\n\t\t\t}\n\n\t\t\tif tt.wantTaggedIP != \"\" {\n\t\t\t\trequire.NotEmpty(t, tagged)\n\t\t\t\tassert.Equal(t, tt.wantTaggedIP, tagged[0].NodeIP)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/main_test.go",
    "content": "package v2\n\nimport (\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"testing\"\n)\n\n// TestMain ensures the working directory is set to the package source directory\n// so that relative testdata/ paths resolve correctly when the test binary is\n// executed from an arbitrary location (e.g., via \"go tool stress\").\nfunc TestMain(m *testing.M) {\n\t_, filename, _, ok := runtime.Caller(0)\n\tif !ok {\n\t\tpanic(\"could not determine test source directory\")\n\t}\n\n\terr := os.Chdir(filepath.Dir(filename))\n\tif err != nil {\n\t\tpanic(\"could not chdir to test source directory: \" + err.Error())\n\t}\n\n\tos.Exit(m.Run())\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/policy.go",
    "content": "package v2\n\nimport (\n\t\"cmp\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/netip\"\n\t\"slices\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/policy/matcher\"\n\t\"github.com/juanfont/headscale/hscontrol/policy/policyutil\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/rs/zerolog/log\"\n\t\"go4.org/netipx\"\n\t\"tailscale.com/net/tsaddr\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/views\"\n\t\"tailscale.com/util/deephash\"\n)\n\n// ErrInvalidTagOwner is returned when a tag owner is not an Alias type.\nvar ErrInvalidTagOwner = errors.New(\"tag owner is not an Alias\")\n\ntype PolicyManager struct {\n\tmu    sync.Mutex\n\tpol   *Policy\n\tusers []types.User\n\tnodes views.Slice[types.NodeView]\n\n\tfilterHash deephash.Sum\n\tfilter     []tailcfg.FilterRule\n\tmatchers   []matcher.Match\n\n\ttagOwnerMapHash deephash.Sum\n\ttagOwnerMap     map[Tag]*netipx.IPSet\n\n\texitSetHash        deephash.Sum\n\texitSet            *netipx.IPSet\n\tautoApproveMapHash deephash.Sum\n\tautoApproveMap     map[netip.Prefix]*netipx.IPSet\n\n\t// Lazy map of SSH policies\n\tsshPolicyMap map[types.NodeID]*tailcfg.SSHPolicy\n\n\t// Lazy map of per-node compiled filter rules (unreduced, for autogroup:self)\n\tcompiledFilterRulesMap map[types.NodeID][]tailcfg.FilterRule\n\t// Lazy map of per-node filter rules (reduced, for packet filters)\n\tfilterRulesMap    map[types.NodeID][]tailcfg.FilterRule\n\tusesAutogroupSelf bool\n}\n\n// filterAndPolicy combines the compiled filter rules with policy content for hashing.\n// This ensures filterHash changes when policy changes, even for autogroup:self where\n// the compiled filter is always empty.\ntype filterAndPolicy struct {\n\tFilter []tailcfg.FilterRule\n\tPolicy *Policy\n}\n\n// NewPolicyManager creates a new PolicyManager from a policy file and a list of users and nodes.\n// It returns an error if the policy file is invalid.\n// The policy manager will update the filter rules based on the users and nodes.\nfunc NewPolicyManager(b []byte, users []types.User, nodes views.Slice[types.NodeView]) (*PolicyManager, error) {\n\tpolicy, err := unmarshalPolicy(b)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"parsing policy: %w\", err)\n\t}\n\n\tpm := PolicyManager{\n\t\tpol:                    policy,\n\t\tusers:                  users,\n\t\tnodes:                  nodes,\n\t\tsshPolicyMap:           make(map[types.NodeID]*tailcfg.SSHPolicy, nodes.Len()),\n\t\tcompiledFilterRulesMap: make(map[types.NodeID][]tailcfg.FilterRule, nodes.Len()),\n\t\tfilterRulesMap:         make(map[types.NodeID][]tailcfg.FilterRule, nodes.Len()),\n\t\tusesAutogroupSelf:      policy.usesAutogroupSelf(),\n\t}\n\n\t_, err = pm.updateLocked()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &pm, nil\n}\n\n// updateLocked updates the filter rules based on the current policy and nodes.\n// It must be called with the lock held.\nfunc (pm *PolicyManager) updateLocked() (bool, error) {\n\t// Check if policy uses autogroup:self\n\tpm.usesAutogroupSelf = pm.pol.usesAutogroupSelf()\n\n\tvar filter []tailcfg.FilterRule\n\n\tvar err error\n\n\t// Standard compilation for all policies\n\tfilter, err = pm.pol.compileFilterRules(pm.users, pm.nodes)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"compiling filter rules: %w\", err)\n\t}\n\n\t// Hash both the compiled filter AND the policy content together.\n\t// This ensures filterHash changes when policy changes, even for autogroup:self\n\t// where the compiled filter is always empty. This eliminates the need for\n\t// a separate policyHash field.\n\tfilterHash := deephash.Hash(&filterAndPolicy{\n\t\tFilter: filter,\n\t\tPolicy: pm.pol,\n\t})\n\n\tfilterChanged := filterHash != pm.filterHash\n\tif filterChanged {\n\t\tlog.Debug().\n\t\t\tStr(\"filter.hash.old\", pm.filterHash.String()[:8]).\n\t\t\tStr(\"filter.hash.new\", filterHash.String()[:8]).\n\t\t\tInt(\"filter.rules\", len(pm.filter)).\n\t\t\tInt(\"filter.rules.new\", len(filter)).\n\t\t\tMsg(\"Policy filter hash changed\")\n\t}\n\n\tpm.filter = filter\n\n\tpm.filterHash = filterHash\n\tif filterChanged {\n\t\tpm.matchers = matcher.MatchesFromFilterRules(pm.filter)\n\t}\n\n\t// Order matters, tags might be used in autoapprovers, so we need to ensure\n\t// that the map for tag owners is resolved before resolving autoapprovers.\n\t// TODO(kradalby): Order might not matter after #2417\n\ttagMap, err := resolveTagOwners(pm.pol, pm.users, pm.nodes)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"resolving tag owners map: %w\", err)\n\t}\n\n\ttagOwnerMapHash := deephash.Hash(&tagMap)\n\n\ttagOwnerChanged := tagOwnerMapHash != pm.tagOwnerMapHash\n\tif tagOwnerChanged {\n\t\tlog.Debug().\n\t\t\tStr(\"tagOwner.hash.old\", pm.tagOwnerMapHash.String()[:8]).\n\t\t\tStr(\"tagOwner.hash.new\", tagOwnerMapHash.String()[:8]).\n\t\t\tInt(\"tagOwners.old\", len(pm.tagOwnerMap)).\n\t\t\tInt(\"tagOwners.new\", len(tagMap)).\n\t\t\tMsg(\"Tag owner hash changed\")\n\t}\n\n\tpm.tagOwnerMap = tagMap\n\tpm.tagOwnerMapHash = tagOwnerMapHash\n\n\tautoMap, exitSet, err := resolveAutoApprovers(pm.pol, pm.users, pm.nodes)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"resolving auto approvers map: %w\", err)\n\t}\n\n\tautoApproveMapHash := deephash.Hash(&autoMap)\n\n\tautoApproveChanged := autoApproveMapHash != pm.autoApproveMapHash\n\tif autoApproveChanged {\n\t\tlog.Debug().\n\t\t\tStr(\"autoApprove.hash.old\", pm.autoApproveMapHash.String()[:8]).\n\t\t\tStr(\"autoApprove.hash.new\", autoApproveMapHash.String()[:8]).\n\t\t\tInt(\"autoApprovers.old\", len(pm.autoApproveMap)).\n\t\t\tInt(\"autoApprovers.new\", len(autoMap)).\n\t\t\tMsg(\"Auto-approvers hash changed\")\n\t}\n\n\tpm.autoApproveMap = autoMap\n\tpm.autoApproveMapHash = autoApproveMapHash\n\n\texitSetHash := deephash.Hash(&exitSet)\n\n\texitSetChanged := exitSetHash != pm.exitSetHash\n\tif exitSetChanged {\n\t\tlog.Debug().\n\t\t\tStr(\"exitSet.hash.old\", pm.exitSetHash.String()[:8]).\n\t\t\tStr(\"exitSet.hash.new\", exitSetHash.String()[:8]).\n\t\t\tMsg(\"Exit node set hash changed\")\n\t}\n\n\tpm.exitSet = exitSet\n\tpm.exitSetHash = exitSetHash\n\n\t// Determine if we need to send updates to nodes\n\t// filterChanged now includes policy content changes (via combined hash),\n\t// so it will detect changes even for autogroup:self where compiled filter is empty\n\tneedsUpdate := filterChanged || tagOwnerChanged || autoApproveChanged || exitSetChanged\n\n\t// Only clear caches if we're actually going to send updates\n\t// This prevents clearing caches when nothing changed, which would leave nodes\n\t// with stale filters until they reconnect. This is critical for autogroup:self\n\t// where even reloading the same policy would clear caches but not send updates.\n\tif needsUpdate {\n\t\t// Clear the SSH policy map to ensure it's recalculated with the new policy.\n\t\t// TODO(kradalby): This could potentially be optimized by only clearing the\n\t\t// policies for nodes that have changed. Particularly if the only difference is\n\t\t// that nodes has been added or removed.\n\t\tclear(pm.sshPolicyMap)\n\t\tclear(pm.compiledFilterRulesMap)\n\t\tclear(pm.filterRulesMap)\n\t}\n\n\t// If nothing changed, no need to update nodes\n\tif !needsUpdate {\n\t\tlog.Trace().\n\t\t\tMsg(\"Policy evaluation detected no changes - all hashes match\")\n\n\t\treturn false, nil\n\t}\n\n\tlog.Debug().\n\t\tBool(\"filter.changed\", filterChanged).\n\t\tBool(\"tagOwners.changed\", tagOwnerChanged).\n\t\tBool(\"autoApprovers.changed\", autoApproveChanged).\n\t\tBool(\"exitNodes.changed\", exitSetChanged).\n\t\tMsg(\"Policy changes require node updates\")\n\n\treturn true, nil\n}\n\nfunc (pm *PolicyManager) SSHPolicy(baseURL string, node types.NodeView) (*tailcfg.SSHPolicy, error) {\n\tpm.mu.Lock()\n\tdefer pm.mu.Unlock()\n\n\tif sshPol, ok := pm.sshPolicyMap[node.ID()]; ok {\n\t\treturn sshPol, nil\n\t}\n\n\tsshPol, err := pm.pol.compileSSHPolicy(baseURL, pm.users, node, pm.nodes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"compiling SSH policy: %w\", err)\n\t}\n\n\tpm.sshPolicyMap[node.ID()] = sshPol\n\n\treturn sshPol, nil\n}\n\n// SSHCheckParams resolves the SSH check period for a source-destination\n// node pair by looking up the current policy. This avoids trusting URL\n// parameters that a client could tamper with.\n// It returns the check period duration and whether a matching check\n// rule was found.\nfunc (pm *PolicyManager) SSHCheckParams(\n\tsrcNodeID, dstNodeID types.NodeID,\n) (time.Duration, bool) {\n\tpm.mu.Lock()\n\tdefer pm.mu.Unlock()\n\n\tif pm.pol == nil || len(pm.pol.SSHs) == 0 {\n\t\treturn 0, false\n\t}\n\n\t// Find the source and destination node views.\n\tvar srcNode, dstNode types.NodeView\n\n\tfor _, n := range pm.nodes.All() {\n\t\tnid := n.ID()\n\t\tif nid == srcNodeID {\n\t\t\tsrcNode = n\n\t\t}\n\n\t\tif nid == dstNodeID {\n\t\t\tdstNode = n\n\t\t}\n\n\t\tif srcNode.Valid() && dstNode.Valid() {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !srcNode.Valid() || !dstNode.Valid() {\n\t\treturn 0, false\n\t}\n\n\t// Iterate SSH rules to find the first matching check rule.\n\tfor _, rule := range pm.pol.SSHs {\n\t\tif rule.Action != SSHActionCheck {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Resolve sources and check if src node matches.\n\t\tsrcIPs, err := rule.Sources.Resolve(pm.pol, pm.users, pm.nodes)\n\t\tif err != nil || srcIPs == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif !slices.ContainsFunc(srcNode.IPs(), srcIPs.Contains) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Check if dst node matches any destination.\n\t\tfor _, dst := range rule.Destinations {\n\t\t\tif ag, isAG := dst.(*AutoGroup); isAG && ag.Is(AutoGroupSelf) {\n\t\t\t\tif !srcNode.IsTagged() && !dstNode.IsTagged() &&\n\t\t\t\t\tsrcNode.User().ID() == dstNode.User().ID() {\n\t\t\t\t\treturn checkPeriodFromRule(rule), true\n\t\t\t\t}\n\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdstIPs, err := dst.Resolve(pm.pol, pm.users, pm.nodes)\n\t\t\tif err != nil || dstIPs == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif slices.ContainsFunc(dstNode.IPs(), dstIPs.Contains) {\n\t\t\t\treturn checkPeriodFromRule(rule), true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn 0, false\n}\n\nfunc (pm *PolicyManager) SetPolicy(polB []byte) (bool, error) {\n\tif len(polB) == 0 {\n\t\treturn false, nil\n\t}\n\n\tpol, err := unmarshalPolicy(polB)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"parsing policy: %w\", err)\n\t}\n\n\tpm.mu.Lock()\n\tdefer pm.mu.Unlock()\n\n\t// Log policy metadata for debugging\n\tlog.Debug().\n\t\tInt(\"policy.bytes\", len(polB)).\n\t\tInt(\"acls.count\", len(pol.ACLs)).\n\t\tInt(\"groups.count\", len(pol.Groups)).\n\t\tInt(\"hosts.count\", len(pol.Hosts)).\n\t\tInt(\"tagOwners.count\", len(pol.TagOwners)).\n\t\tInt(\"autoApprovers.routes.count\", len(pol.AutoApprovers.Routes)).\n\t\tMsg(\"Policy parsed successfully\")\n\n\tpm.pol = pol\n\n\treturn pm.updateLocked()\n}\n\n// Filter returns the current filter rules for the entire tailnet and the associated matchers.\nfunc (pm *PolicyManager) Filter() ([]tailcfg.FilterRule, []matcher.Match) {\n\tif pm == nil {\n\t\treturn nil, nil\n\t}\n\n\tpm.mu.Lock()\n\tdefer pm.mu.Unlock()\n\n\treturn pm.filter, pm.matchers\n}\n\n// BuildPeerMap constructs peer relationship maps for the given nodes.\n// For global filters, it uses the global filter matchers for all nodes.\n// For autogroup:self policies (empty global filter), it builds per-node\n// peer maps using each node's specific filter rules.\nfunc (pm *PolicyManager) BuildPeerMap(nodes views.Slice[types.NodeView]) map[types.NodeID][]types.NodeView {\n\tif pm == nil {\n\t\treturn nil\n\t}\n\n\tpm.mu.Lock()\n\tdefer pm.mu.Unlock()\n\n\t// If we have a global filter, use it for all nodes (normal case)\n\tif !pm.usesAutogroupSelf {\n\t\tret := make(map[types.NodeID][]types.NodeView, nodes.Len())\n\n\t\t// Build the map of all peers according to the matchers.\n\t\t// Compared to ReduceNodes, which builds the list per node, we end up with doing\n\t\t// the full work for every node O(n^2), while this will reduce the list as we see\n\t\t// relationships while building the map, making it O(n^2/2) in the end, but with less work per node.\n\t\tfor i := range nodes.Len() {\n\t\t\tfor j := i + 1; j < nodes.Len(); j++ {\n\t\t\t\tif nodes.At(i).ID() == nodes.At(j).ID() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif nodes.At(i).CanAccess(pm.matchers, nodes.At(j)) || nodes.At(j).CanAccess(pm.matchers, nodes.At(i)) {\n\t\t\t\t\tret[nodes.At(i).ID()] = append(ret[nodes.At(i).ID()], nodes.At(j))\n\t\t\t\t\tret[nodes.At(j).ID()] = append(ret[nodes.At(j).ID()], nodes.At(i))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn ret\n\t}\n\n\t// For autogroup:self (empty global filter), build per-node peer relationships\n\tret := make(map[types.NodeID][]types.NodeView, nodes.Len())\n\n\t// Pre-compute per-node matchers using unreduced compiled rules\n\t// We need unreduced rules to determine peer relationships correctly.\n\t// Reduced rules only show destinations where the node is the target,\n\t// but peer relationships require the full bidirectional access rules.\n\tnodeMatchers := make(map[types.NodeID][]matcher.Match, nodes.Len())\n\tfor _, node := range nodes.All() {\n\t\tfilter, err := pm.compileFilterRulesForNodeLocked(node)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\t// Include all nodes in nodeMatchers, even those with empty filters.\n\t\t// Empty filters result in empty matchers where CanAccess() returns false,\n\t\t// but the node still needs to be in the map so hasFilterX is true.\n\t\t// This ensures symmetric visibility works correctly: if node A can access\n\t\t// node B, both should see each other regardless of B's filter rules.\n\t\tnodeMatchers[node.ID()] = matcher.MatchesFromFilterRules(filter)\n\t}\n\n\t// Check each node pair for peer relationships.\n\t// Start j at i+1 to avoid checking the same pair twice and creating duplicates.\n\t// We use symmetric visibility: if EITHER node can access the other, BOTH see\n\t// each other. This matches the global filter path behavior and ensures that\n\t// one-way access rules (e.g., admin -> tagged server) still allow both nodes\n\t// to see each other as peers, which is required for network connectivity.\n\tfor i := range nodes.Len() {\n\t\tnodeI := nodes.At(i)\n\t\tmatchersI, hasFilterI := nodeMatchers[nodeI.ID()]\n\n\t\tfor j := i + 1; j < nodes.Len(); j++ {\n\t\t\tnodeJ := nodes.At(j)\n\t\t\tmatchersJ, hasFilterJ := nodeMatchers[nodeJ.ID()]\n\n\t\t\t// If either node can access the other, both should see each other as peers.\n\t\t\t// This symmetric visibility is required for proper network operation:\n\t\t\t// - Admin with *:* rule should see tagged servers (even if servers\n\t\t\t//   can't access admin)\n\t\t\t// - Servers should see admin so they can respond to admin's connections\n\t\t\tcanIAccessJ := hasFilterI && nodeI.CanAccess(matchersI, nodeJ)\n\t\t\tcanJAccessI := hasFilterJ && nodeJ.CanAccess(matchersJ, nodeI)\n\n\t\t\tif canIAccessJ || canJAccessI {\n\t\t\t\tret[nodeI.ID()] = append(ret[nodeI.ID()], nodeJ)\n\t\t\t\tret[nodeJ.ID()] = append(ret[nodeJ.ID()], nodeI)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn ret\n}\n\n// compileFilterRulesForNodeLocked returns the unreduced compiled filter rules for a node\n// when using autogroup:self. This is used by BuildPeerMap to determine peer relationships.\n// For packet filters sent to nodes, use filterForNodeLocked which returns reduced rules.\nfunc (pm *PolicyManager) compileFilterRulesForNodeLocked(node types.NodeView) ([]tailcfg.FilterRule, error) {\n\tif pm == nil {\n\t\treturn nil, nil\n\t}\n\n\t// Check if we have cached compiled rules\n\tif rules, ok := pm.compiledFilterRulesMap[node.ID()]; ok {\n\t\treturn rules, nil\n\t}\n\n\t// Compile per-node rules with autogroup:self expanded\n\trules, err := pm.pol.compileFilterRulesForNode(pm.users, node, pm.nodes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"compiling filter rules for node: %w\", err)\n\t}\n\n\t// Cache the unreduced compiled rules\n\tpm.compiledFilterRulesMap[node.ID()] = rules\n\n\treturn rules, nil\n}\n\n// filterForNodeLocked returns the filter rules for a specific node, already reduced\n// to only include rules relevant to that node.\n// This is a lock-free version of FilterForNode for internal use when the lock is already held.\n// BuildPeerMap already holds the lock, so we need a version that doesn't re-acquire it.\nfunc (pm *PolicyManager) filterForNodeLocked(node types.NodeView) ([]tailcfg.FilterRule, error) {\n\tif pm == nil {\n\t\treturn nil, nil\n\t}\n\n\tif !pm.usesAutogroupSelf {\n\t\t// For global filters, reduce to only rules relevant to this node.\n\t\t// Cache the reduced filter per node for efficiency.\n\t\tif rules, ok := pm.filterRulesMap[node.ID()]; ok {\n\t\t\treturn rules, nil\n\t\t}\n\n\t\t// Use policyutil.ReduceFilterRules for global filter reduction.\n\t\treducedFilter := policyutil.ReduceFilterRules(node, pm.filter)\n\n\t\tpm.filterRulesMap[node.ID()] = reducedFilter\n\n\t\treturn reducedFilter, nil\n\t}\n\n\t// For autogroup:self, compile per-node rules then reduce them.\n\t// Check if we have cached reduced rules for this node.\n\tif rules, ok := pm.filterRulesMap[node.ID()]; ok {\n\t\treturn rules, nil\n\t}\n\n\t// Get unreduced compiled rules\n\tcompiledRules, err := pm.compileFilterRulesForNodeLocked(node)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Reduce the compiled rules to only destinations relevant to this node\n\treducedFilter := policyutil.ReduceFilterRules(node, compiledRules)\n\n\t// Cache the reduced filter\n\tpm.filterRulesMap[node.ID()] = reducedFilter\n\n\treturn reducedFilter, nil\n}\n\n// FilterForNode returns the filter rules for a specific node, already reduced\n// to only include rules relevant to that node.\n// If the policy uses autogroup:self, this returns node-specific compiled rules.\n// Otherwise, it returns the global filter reduced for this node.\nfunc (pm *PolicyManager) FilterForNode(node types.NodeView) ([]tailcfg.FilterRule, error) {\n\tif pm == nil {\n\t\treturn nil, nil\n\t}\n\n\tpm.mu.Lock()\n\tdefer pm.mu.Unlock()\n\n\treturn pm.filterForNodeLocked(node)\n}\n\n// MatchersForNode returns the matchers for peer relationship determination for a specific node.\n// These are UNREDUCED matchers - they include all rules where the node could be either source or destination.\n// This is different from FilterForNode which returns REDUCED rules for packet filtering.\n//\n// For global policies: returns the global matchers (same for all nodes)\n// For autogroup:self: returns node-specific matchers from unreduced compiled rules.\nfunc (pm *PolicyManager) MatchersForNode(node types.NodeView) ([]matcher.Match, error) {\n\tif pm == nil {\n\t\treturn nil, nil\n\t}\n\n\tpm.mu.Lock()\n\tdefer pm.mu.Unlock()\n\n\t// For global policies, return the shared global matchers\n\tif !pm.usesAutogroupSelf {\n\t\treturn pm.matchers, nil\n\t}\n\n\t// For autogroup:self, get unreduced compiled rules and create matchers\n\tcompiledRules, err := pm.compileFilterRulesForNodeLocked(node)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create matchers from unreduced rules for peer relationship determination\n\treturn matcher.MatchesFromFilterRules(compiledRules), nil\n}\n\n// SetUsers updates the users in the policy manager and updates the filter rules.\nfunc (pm *PolicyManager) SetUsers(users []types.User) (bool, error) {\n\tif pm == nil {\n\t\treturn false, nil\n\t}\n\n\tpm.mu.Lock()\n\tdefer pm.mu.Unlock()\n\n\tpm.users = users\n\n\t// Clear SSH policy map when users change to force SSH policy recomputation\n\t// This ensures that if SSH policy compilation previously failed due to missing users,\n\t// it will be retried with the new user list\n\tclear(pm.sshPolicyMap)\n\n\tchanged, err := pm.updateLocked()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// If SSH policies exist, force a policy change when users are updated\n\t// This ensures nodes get updated SSH policies even if other policy hashes didn't change\n\tif pm.pol != nil && pm.pol.SSHs != nil && len(pm.pol.SSHs) > 0 {\n\t\treturn true, nil\n\t}\n\n\treturn changed, nil\n}\n\n// SetNodes updates the nodes in the policy manager and updates the filter rules.\nfunc (pm *PolicyManager) SetNodes(nodes views.Slice[types.NodeView]) (bool, error) {\n\tif pm == nil {\n\t\treturn false, nil\n\t}\n\n\tpm.mu.Lock()\n\tdefer pm.mu.Unlock()\n\n\tpolicyChanged := pm.nodesHavePolicyAffectingChanges(nodes)\n\n\t// Invalidate cache entries for nodes that changed.\n\t// For autogroup:self: invalidate all nodes belonging to affected users (peer changes).\n\t// For global policies: invalidate only nodes whose properties changed (IPs, routes).\n\tpm.invalidateNodeCache(nodes)\n\n\tpm.nodes = nodes\n\n\t// When policy-affecting node properties change, we must recompile filters because:\n\t// 1. User/group aliases (like \"user1@\") resolve to node IPs\n\t// 2. Tag aliases (like \"tag:server\") match nodes based on their tags\n\t// 3. Filter compilation needs nodes to generate rules\n\t//\n\t// For autogroup:self: return true when nodes change even if the global filter\n\t// hash didn't change. The global filter is empty for autogroup:self (each node\n\t// has its own filter), so the hash never changes. But peer relationships DO\n\t// change when nodes are added/removed, so we must signal this to trigger updates.\n\t// For global policies: the filter must be recompiled to include the new nodes.\n\tif policyChanged {\n\t\t// Recompile filter with the new node list\n\t\tneedsUpdate, err := pm.updateLocked()\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tif !needsUpdate {\n\t\t\t// This ensures fresh filter rules are generated for all nodes\n\t\t\tclear(pm.sshPolicyMap)\n\t\t\tclear(pm.compiledFilterRulesMap)\n\t\t\tclear(pm.filterRulesMap)\n\t\t}\n\t\t// Always return true when nodes changed, even if filter hash didn't change\n\t\t// (can happen with autogroup:self or when nodes are added but don't affect rules)\n\t\treturn true, nil\n\t}\n\n\treturn false, nil\n}\n\nfunc (pm *PolicyManager) nodesHavePolicyAffectingChanges(newNodes views.Slice[types.NodeView]) bool {\n\tif pm.nodes.Len() != newNodes.Len() {\n\t\treturn true\n\t}\n\n\toldNodes := make(map[types.NodeID]types.NodeView, pm.nodes.Len())\n\tfor _, node := range pm.nodes.All() {\n\t\toldNodes[node.ID()] = node\n\t}\n\n\tfor _, newNode := range newNodes.All() {\n\t\toldNode, exists := oldNodes[newNode.ID()]\n\t\tif !exists {\n\t\t\treturn true\n\t\t}\n\n\t\tif newNode.HasPolicyChange(oldNode) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n// NodeCanHaveTag checks if a node can have the specified tag during client-initiated\n// registration or reauth flows (e.g., tailscale up --advertise-tags).\n//\n// This function is NOT used by the admin API's SetNodeTags - admins can set any\n// existing tag on any node by calling State.SetNodeTags directly, which bypasses\n// this authorization check.\nfunc (pm *PolicyManager) NodeCanHaveTag(node types.NodeView, tag string) bool {\n\tif pm == nil || pm.pol == nil {\n\t\treturn false\n\t}\n\n\tpm.mu.Lock()\n\tdefer pm.mu.Unlock()\n\n\t// Check if tag exists in policy\n\towners, exists := pm.pol.TagOwners[Tag(tag)]\n\tif !exists {\n\t\treturn false\n\t}\n\n\t// Check if node's owner can assign this tag via the pre-resolved tagOwnerMap.\n\t// The tagOwnerMap contains IP sets built from resolving TagOwners entries\n\t// (usernames/groups) to their nodes' IPs, so checking if the node's IP\n\t// is in the set answers \"does this node's owner own this tag?\"\n\tif ips, ok := pm.tagOwnerMap[Tag(tag)]; ok {\n\t\tif slices.ContainsFunc(node.IPs(), ips.Contains) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\t// For new nodes being registered, their IP may not yet be in the tagOwnerMap.\n\t// Fall back to checking the node's user directly against the TagOwners.\n\t// This handles the case where a user registers a new node with --advertise-tags.\n\tif node.User().Valid() {\n\t\tfor _, owner := range owners {\n\t\t\tif pm.userMatchesOwner(node.User(), owner) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}\n\n// userMatchesOwner checks if a user matches a tag owner entry.\n// This is used as a fallback when the node's IP is not in the tagOwnerMap.\nfunc (pm *PolicyManager) userMatchesOwner(user types.UserView, owner Owner) bool {\n\tswitch o := owner.(type) {\n\tcase *Username:\n\t\tif o == nil {\n\t\t\treturn false\n\t\t}\n\t\t// Resolve the username to find the user it refers to\n\t\tresolvedUser, err := o.resolveUser(pm.users)\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\n\t\treturn user.ID() == resolvedUser.ID\n\n\tcase *Group:\n\t\tif o == nil || pm.pol == nil {\n\t\t\treturn false\n\t\t}\n\t\t// Resolve the group to get usernames\n\t\tusernames, ok := pm.pol.Groups[*o]\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\t\t// Check if the user matches any username in the group\n\t\tfor _, uname := range usernames {\n\t\t\tresolvedUser, err := uname.resolveUser(pm.users)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif user.ID() == resolvedUser.ID {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\treturn false\n\n\tdefault:\n\t\treturn false\n\t}\n}\n\n// TagExists reports whether the given tag is defined in the policy.\nfunc (pm *PolicyManager) TagExists(tag string) bool {\n\tif pm == nil || pm.pol == nil {\n\t\treturn false\n\t}\n\n\tpm.mu.Lock()\n\tdefer pm.mu.Unlock()\n\n\t_, exists := pm.pol.TagOwners[Tag(tag)]\n\n\treturn exists\n}\n\nfunc (pm *PolicyManager) NodeCanApproveRoute(node types.NodeView, route netip.Prefix) bool {\n\tif pm == nil {\n\t\treturn false\n\t}\n\n\t// If the route to-be-approved is an exit route, then we need to check\n\t// if the node is in allowed to approve it. This is treated differently\n\t// than the auto-approvers, as the auto-approvers are not allowed to\n\t// approve the whole /0 range.\n\t// However, an auto approver might be /0, meaning that they can approve\n\t// all routes available, just not exit nodes.\n\tif tsaddr.IsExitRoute(route) {\n\t\tif pm.exitSet == nil {\n\t\t\treturn false\n\t\t}\n\n\t\tif slices.ContainsFunc(node.IPs(), pm.exitSet.Contains) {\n\t\t\treturn true\n\t\t}\n\n\t\treturn false\n\t}\n\n\tpm.mu.Lock()\n\tdefer pm.mu.Unlock()\n\n\t// The fast path is that a node requests to approve a prefix\n\t// where there is an exact entry, e.g. 10.0.0.0/8, then\n\t// check and return quickly\n\tif approvers, ok := pm.autoApproveMap[route]; ok {\n\t\tcanApprove := slices.ContainsFunc(node.IPs(), approvers.Contains)\n\t\tif canApprove {\n\t\t\treturn true\n\t\t}\n\t}\n\n\t// The slow path is that the node tries to approve\n\t// 10.0.10.0/24, which is a part of 10.0.0.0/8, then we\n\t// cannot just lookup in the prefix map and have to check\n\t// if there is a \"parent\" prefix available.\n\tfor prefix, approveAddrs := range pm.autoApproveMap {\n\t\t// Check if prefix is larger (so containing) and then overlaps\n\t\t// the route to see if the node can approve a subset of an autoapprover\n\t\tif prefix.Bits() <= route.Bits() && prefix.Overlaps(route) {\n\t\t\tcanApprove := slices.ContainsFunc(node.IPs(), approveAddrs.Contains)\n\t\t\tif canApprove {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc (pm *PolicyManager) Version() int {\n\treturn 2\n}\n\nfunc (pm *PolicyManager) DebugString() string {\n\tif pm == nil {\n\t\treturn \"PolicyManager is not setup\"\n\t}\n\n\tvar sb strings.Builder\n\n\tfmt.Fprintf(&sb, \"PolicyManager (v%d):\\n\\n\", pm.Version())\n\n\tsb.WriteString(\"\\n\\n\")\n\n\tif pm.pol != nil {\n\t\tpol, err := json.MarshalIndent(pm.pol, \"\", \"  \")\n\t\tif err == nil {\n\t\t\tsb.WriteString(\"Policy:\\n\")\n\t\t\tsb.Write(pol)\n\t\t\tsb.WriteString(\"\\n\\n\")\n\t\t}\n\t}\n\n\tfmt.Fprintf(&sb, \"AutoApprover (%d):\\n\", len(pm.autoApproveMap))\n\n\tfor prefix, approveAddrs := range pm.autoApproveMap {\n\t\tfmt.Fprintf(&sb, \"\\t%s:\\n\", prefix)\n\n\t\tfor _, iprange := range approveAddrs.Ranges() {\n\t\t\tfmt.Fprintf(&sb, \"\\t\\t%s\\n\", iprange)\n\t\t}\n\t}\n\n\tsb.WriteString(\"\\n\\n\")\n\n\tfmt.Fprintf(&sb, \"TagOwner (%d):\\n\", len(pm.tagOwnerMap))\n\n\tfor prefix, tagOwners := range pm.tagOwnerMap {\n\t\tfmt.Fprintf(&sb, \"\\t%s:\\n\", prefix)\n\n\t\tfor _, iprange := range tagOwners.Ranges() {\n\t\t\tfmt.Fprintf(&sb, \"\\t\\t%s\\n\", iprange)\n\t\t}\n\t}\n\n\tsb.WriteString(\"\\n\\n\")\n\n\tif pm.filter != nil {\n\t\tfilter, err := json.MarshalIndent(pm.filter, \"\", \"  \")\n\t\tif err == nil {\n\t\t\tsb.WriteString(\"Compiled filter:\\n\")\n\t\t\tsb.Write(filter)\n\t\t\tsb.WriteString(\"\\n\\n\")\n\t\t}\n\t}\n\n\tsb.WriteString(\"\\n\\n\")\n\tsb.WriteString(\"Matchers:\\n\")\n\tsb.WriteString(\"an internal structure used to filter nodes and routes\\n\")\n\n\tfor _, match := range pm.matchers {\n\t\tsb.WriteString(match.DebugString())\n\t\tsb.WriteString(\"\\n\")\n\t}\n\n\tsb.WriteString(\"\\n\\n\")\n\tsb.WriteString(\"Nodes:\\n\")\n\n\tfor _, node := range pm.nodes.All() {\n\t\tsb.WriteString(node.String())\n\t\tsb.WriteString(\"\\n\")\n\t}\n\n\treturn sb.String()\n}\n\n// invalidateAutogroupSelfCache intelligently clears only the cache entries that need to be\n// invalidated when using autogroup:self policies. This is much more efficient than clearing\n// the entire cache.\nfunc (pm *PolicyManager) invalidateAutogroupSelfCache(oldNodes, newNodes views.Slice[types.NodeView]) {\n\t// Build maps for efficient lookup\n\toldNodeMap := make(map[types.NodeID]types.NodeView)\n\tfor _, node := range oldNodes.All() {\n\t\toldNodeMap[node.ID()] = node\n\t}\n\n\tnewNodeMap := make(map[types.NodeID]types.NodeView)\n\tfor _, node := range newNodes.All() {\n\t\tnewNodeMap[node.ID()] = node\n\t}\n\n\t// Track which users are affected by changes.\n\t// Tagged nodes don't participate in autogroup:self (identity is tag-based),\n\t// so we skip them when collecting affected users, except when tag status changes\n\t// (which affects the user's device set).\n\taffectedUsers := make(map[uint]struct{})\n\n\t// Check for removed nodes (only non-tagged nodes affect autogroup:self)\n\tfor nodeID, oldNode := range oldNodeMap {\n\t\tif _, exists := newNodeMap[nodeID]; !exists {\n\t\t\tif !oldNode.IsTagged() {\n\t\t\t\taffectedUsers[oldNode.User().ID()] = struct{}{}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Check for added nodes (only non-tagged nodes affect autogroup:self)\n\tfor nodeID, newNode := range newNodeMap {\n\t\tif _, exists := oldNodeMap[nodeID]; !exists {\n\t\t\tif !newNode.IsTagged() {\n\t\t\t\taffectedUsers[newNode.User().ID()] = struct{}{}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Check for modified nodes (user changes, tag changes, IP changes)\n\tfor nodeID, newNode := range newNodeMap {\n\t\tif oldNode, exists := oldNodeMap[nodeID]; exists {\n\t\t\t// Check if tag status changed — this affects the user's autogroup:self device set.\n\t\t\t// Use the non-tagged version to get the user ID safely.\n\t\t\tif oldNode.IsTagged() != newNode.IsTagged() {\n\t\t\t\tif !oldNode.IsTagged() {\n\t\t\t\t\t// Was untagged, now tagged: user lost a device\n\t\t\t\t\taffectedUsers[oldNode.User().ID()] = struct{}{}\n\t\t\t\t} else {\n\t\t\t\t\t// Was tagged, now untagged: user gained a device\n\t\t\t\t\taffectedUsers[newNode.User().ID()] = struct{}{}\n\t\t\t\t}\n\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Skip tagged nodes for remaining checks — they don't participate in autogroup:self\n\t\t\tif newNode.IsTagged() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Check if user changed (both versions are non-tagged here)\n\t\t\tif oldNode.User().ID() != newNode.User().ID() {\n\t\t\t\taffectedUsers[oldNode.User().ID()] = struct{}{}\n\t\t\t\taffectedUsers[newNode.User().ID()] = struct{}{}\n\t\t\t}\n\n\t\t\t// Check if IPs changed (simple check - could be more sophisticated)\n\t\t\toldIPs := oldNode.IPs()\n\n\t\t\tnewIPs := newNode.IPs()\n\t\t\tif len(oldIPs) != len(newIPs) {\n\t\t\t\taffectedUsers[newNode.User().ID()] = struct{}{}\n\t\t\t} else {\n\t\t\t\t// Check if any IPs are different\n\t\t\t\tfor i, oldIP := range oldIPs {\n\t\t\t\t\tif i >= len(newIPs) || oldIP != newIPs[i] {\n\t\t\t\t\t\taffectedUsers[newNode.User().ID()] = struct{}{}\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Clear cache entries for affected users only.\n\t// For autogroup:self, we need to clear all nodes belonging to affected users\n\t// because autogroup:self rules depend on the entire user's device set.\n\tfor nodeID := range pm.filterRulesMap {\n\t\t// Find the user for this cached node\n\t\tvar nodeUserID uint\n\n\t\tfound := false\n\n\t\t// Check in new nodes first\n\t\tfor _, node := range newNodes.All() {\n\t\t\tif node.ID() == nodeID {\n\t\t\t\t// Tagged nodes don't participate in autogroup:self,\n\t\t\t\t// so their cache doesn't need user-based invalidation.\n\t\t\t\tif node.IsTagged() {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tnodeUserID = node.User().ID()\n\t\t\t\tfound = true\n\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// If not found in new nodes, check old nodes\n\t\tif !found {\n\t\t\tfor _, node := range oldNodes.All() {\n\t\t\t\tif node.ID() == nodeID {\n\t\t\t\t\tif node.IsTagged() {\n\t\t\t\t\t\tfound = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\n\t\t\t\t\tnodeUserID = node.User().ID()\n\t\t\t\t\tfound = true\n\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// If we found the user and they're affected, clear this cache entry\n\t\tif found {\n\t\t\tif _, affected := affectedUsers[nodeUserID]; affected {\n\t\t\t\tdelete(pm.compiledFilterRulesMap, nodeID)\n\t\t\t\tdelete(pm.filterRulesMap, nodeID)\n\t\t\t}\n\t\t} else {\n\t\t\t// Node not found in either old or new list, clear it\n\t\t\tdelete(pm.compiledFilterRulesMap, nodeID)\n\t\t\tdelete(pm.filterRulesMap, nodeID)\n\t\t}\n\t}\n\n\tif len(affectedUsers) > 0 {\n\t\tlog.Debug().\n\t\t\tInt(\"affected_users\", len(affectedUsers)).\n\t\t\tInt(\"remaining_cache_entries\", len(pm.filterRulesMap)).\n\t\t\tMsg(\"Selectively cleared autogroup:self cache for affected users\")\n\t}\n}\n\n// invalidateNodeCache invalidates cache entries based on what changed.\nfunc (pm *PolicyManager) invalidateNodeCache(newNodes views.Slice[types.NodeView]) {\n\tif pm.usesAutogroupSelf {\n\t\t// For autogroup:self, a node's filter depends on its peers (same user).\n\t\t// When any node in a user changes, all nodes for that user need invalidation.\n\t\tpm.invalidateAutogroupSelfCache(pm.nodes, newNodes)\n\t} else {\n\t\t// For global policies, a node's filter depends only on its own properties.\n\t\t// Only invalidate nodes whose properties actually changed.\n\t\tpm.invalidateGlobalPolicyCache(newNodes)\n\t}\n}\n\n// invalidateGlobalPolicyCache invalidates only nodes whose properties affecting\n// ReduceFilterRules changed. For global policies, each node's filter is independent.\nfunc (pm *PolicyManager) invalidateGlobalPolicyCache(newNodes views.Slice[types.NodeView]) {\n\toldNodeMap := make(map[types.NodeID]types.NodeView)\n\tfor _, node := range pm.nodes.All() {\n\t\toldNodeMap[node.ID()] = node\n\t}\n\n\tnewNodeMap := make(map[types.NodeID]types.NodeView)\n\tfor _, node := range newNodes.All() {\n\t\tnewNodeMap[node.ID()] = node\n\t}\n\n\t// Invalidate nodes whose properties changed\n\tfor nodeID, newNode := range newNodeMap {\n\t\toldNode, existed := oldNodeMap[nodeID]\n\t\tif !existed {\n\t\t\t// New node - no cache entry yet, will be lazily calculated\n\t\t\tcontinue\n\t\t}\n\n\t\tif newNode.HasNetworkChanges(oldNode) {\n\t\t\tdelete(pm.filterRulesMap, nodeID)\n\t\t}\n\t}\n\n\t// Remove deleted nodes from cache\n\tfor nodeID := range pm.filterRulesMap {\n\t\tif _, exists := newNodeMap[nodeID]; !exists {\n\t\t\tdelete(pm.filterRulesMap, nodeID)\n\t\t}\n\t}\n}\n\n// flattenTags flattens the TagOwners by resolving nested tags and detecting cycles.\n// It will return a Owners list where all the Tag types have been resolved to their underlying Owners.\nfunc flattenTags(tagOwners TagOwners, tag Tag, visiting map[Tag]bool, chain []Tag) (Owners, error) {\n\tif visiting[tag] {\n\t\tcycleStart := 0\n\n\t\tfor i, t := range chain {\n\t\t\tif t == tag {\n\t\t\t\tcycleStart = i\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tcycleTags := make([]string, len(chain[cycleStart:]))\n\t\tfor i, t := range chain[cycleStart:] {\n\t\t\tcycleTags[i] = string(t)\n\t\t}\n\n\t\tslices.Sort(cycleTags)\n\n\t\treturn nil, fmt.Errorf(\"%w: %s\", ErrCircularReference, strings.Join(cycleTags, \" -> \"))\n\t}\n\n\tvisiting[tag] = true\n\n\tchain = append(chain, tag)\n\tdefer delete(visiting, tag)\n\n\tvar result Owners\n\n\tfor _, owner := range tagOwners[tag] {\n\t\tswitch o := owner.(type) {\n\t\tcase *Tag:\n\t\t\tif _, ok := tagOwners[*o]; !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"tag %q %w %q\", tag, ErrUndefinedTagReference, *o)\n\t\t\t}\n\n\t\t\tnested, err := flattenTags(tagOwners, *o, visiting, chain)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tresult = append(result, nested...)\n\t\tdefault:\n\t\t\tresult = append(result, owner)\n\t\t}\n\t}\n\n\treturn result, nil\n}\n\n// flattenTagOwners flattens all TagOwners by resolving nested tags and detecting cycles.\n// It will return a new TagOwners map where all the Tag types have been resolved to their underlying Owners.\nfunc flattenTagOwners(tagOwners TagOwners) (TagOwners, error) {\n\tret := make(TagOwners)\n\n\tfor tag := range tagOwners {\n\t\tflattened, err := flattenTags(tagOwners, tag, make(map[Tag]bool), nil)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tslices.SortFunc(flattened, func(a, b Owner) int {\n\t\t\treturn cmp.Compare(a.String(), b.String())\n\t\t})\n\t\tret[tag] = slices.CompactFunc(flattened, func(a, b Owner) bool {\n\t\t\treturn a.String() == b.String()\n\t\t})\n\t}\n\n\treturn ret, nil\n}\n\n// resolveTagOwners resolves the TagOwners to a map of Tag to netipx.IPSet.\n// The resulting map can be used to quickly look up the IPSet for a given Tag.\n// It is intended for internal use in a PolicyManager.\nfunc resolveTagOwners(p *Policy, users types.Users, nodes views.Slice[types.NodeView]) (map[Tag]*netipx.IPSet, error) {\n\tif p == nil {\n\t\treturn make(map[Tag]*netipx.IPSet), nil\n\t}\n\n\tif len(p.TagOwners) == 0 {\n\t\treturn make(map[Tag]*netipx.IPSet), nil\n\t}\n\n\tret := make(map[Tag]*netipx.IPSet)\n\n\ttagOwners, err := flattenTagOwners(p.TagOwners)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor tag, owners := range tagOwners {\n\t\tvar ips netipx.IPSetBuilder\n\n\t\tfor _, owner := range owners {\n\t\t\tswitch o := owner.(type) {\n\t\t\tcase *Tag:\n\t\t\t\t// After flattening, Tag types should not appear in the owners list.\n\t\t\t\t// If they do, skip them as they represent already-resolved references.\n\n\t\t\tcase Alias:\n\t\t\t\t// If it does not resolve, that means the tag is not associated with any IP addresses.\n\t\t\t\tresolved, _ := o.Resolve(p, users, nodes)\n\t\t\t\tips.AddSet(resolved)\n\n\t\t\tdefault:\n\t\t\t\t// Should never happen - after flattening, all owners should be Alias types\n\t\t\t\treturn nil, fmt.Errorf(\"%w: %v\", ErrInvalidTagOwner, owner)\n\t\t\t}\n\t\t}\n\n\t\tipSet, err := ips.IPSet()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tret[tag] = ipSet\n\t}\n\n\treturn ret, nil\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/policy_test.go",
    "content": "package v2\n\nimport (\n\t\"net/netip\"\n\t\"slices\"\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/juanfont/headscale/hscontrol/policy/matcher\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gorm.io/gorm\"\n\t\"tailscale.com/tailcfg\"\n)\n\nfunc node(name, ipv4, ipv6 string, user types.User) *types.Node {\n\treturn &types.Node{\n\t\tID:       0,\n\t\tHostname: name,\n\t\tIPv4:     ap(ipv4),\n\t\tIPv6:     ap(ipv6),\n\t\tUser:     new(user),\n\t\tUserID:   new(user.ID),\n\t}\n}\n\nfunc TestPolicyManager(t *testing.T) {\n\tusers := types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"testuser\", Email: \"testuser@headscale.net\"},\n\t\t{Model: gorm.Model{ID: 2}, Name: \"otheruser\", Email: \"otheruser@headscale.net\"},\n\t}\n\n\ttests := []struct {\n\t\tname         string\n\t\tpol          string\n\t\tnodes        types.Nodes\n\t\twantFilter   []tailcfg.FilterRule\n\t\twantMatchers []matcher.Match\n\t}{\n\t\t{\n\t\t\tname:         \"empty-policy\",\n\t\t\tpol:          \"{}\",\n\t\t\tnodes:        types.Nodes{},\n\t\t\twantFilter:   tailcfg.FilterAllowAll,\n\t\t\twantMatchers: matcher.MatchesFromFilterRules(tailcfg.FilterAllowAll),\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tpm, err := NewPolicyManager([]byte(tt.pol), users, tt.nodes.ViewSlice())\n\t\t\trequire.NoError(t, err)\n\n\t\t\tfilter, matchers := pm.Filter()\n\t\t\tif diff := cmp.Diff(tt.wantFilter, filter); diff != \"\" {\n\t\t\t\tt.Errorf(\"Filter() filter mismatch (-want +got):\\n%s\", diff)\n\t\t\t}\n\n\t\t\tif diff := cmp.Diff(\n\t\t\t\ttt.wantMatchers,\n\t\t\t\tmatchers,\n\t\t\t\tcmp.AllowUnexported(matcher.Match{}),\n\t\t\t); diff != \"\" {\n\t\t\t\tt.Errorf(\"Filter() matchers mismatch (-want +got):\\n%s\", diff)\n\t\t\t}\n\n\t\t\t// TODO(kradalby): Test SSH Policy\n\t\t})\n\t}\n}\n\nfunc TestInvalidateAutogroupSelfCache(t *testing.T) {\n\tusers := types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"user1\", Email: \"user1@headscale.net\"},\n\t\t{Model: gorm.Model{ID: 2}, Name: \"user2\", Email: \"user2@headscale.net\"},\n\t\t{Model: gorm.Model{ID: 3}, Name: \"user3\", Email: \"user3@headscale.net\"},\n\t}\n\n\t//nolint:goconst // test-specific inline policy for clarity\n\tpolicy := `{\n\t\t\"acls\": [\n\t\t\t{\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"src\": [\"autogroup:member\"],\n\t\t\t\t\"dst\": [\"autogroup:self:*\"]\n\t\t\t}\n\t\t]\n\t}`\n\n\tinitialNodes := types.Nodes{\n\t\tnode(\"user1-node1\", \"100.64.0.1\", \"fd7a:115c:a1e0::1\", users[0]),\n\t\tnode(\"user1-node2\", \"100.64.0.2\", \"fd7a:115c:a1e0::2\", users[0]),\n\t\tnode(\"user2-node1\", \"100.64.0.3\", \"fd7a:115c:a1e0::3\", users[1]),\n\t\tnode(\"user3-node1\", \"100.64.0.4\", \"fd7a:115c:a1e0::4\", users[2]),\n\t}\n\n\tfor i, n := range initialNodes {\n\t\tn.ID = types.NodeID(i + 1) //nolint:gosec // safe conversion in test\n\t}\n\n\tpm, err := NewPolicyManager([]byte(policy), users, initialNodes.ViewSlice())\n\trequire.NoError(t, err)\n\n\t// Add to cache by calling FilterForNode for each node\n\tfor _, n := range initialNodes {\n\t\t_, err := pm.FilterForNode(n.View())\n\t\trequire.NoError(t, err)\n\t}\n\n\trequire.Len(t, pm.filterRulesMap, len(initialNodes))\n\n\ttests := []struct {\n\t\tname            string\n\t\tnewNodes        types.Nodes\n\t\texpectedCleared int\n\t\tdescription     string\n\t}{\n\t\t{\n\t\t\tname: \"no_changes\",\n\t\t\tnewNodes: types.Nodes{\n\t\t\t\tnode(\"user1-node1\", \"100.64.0.1\", \"fd7a:115c:a1e0::1\", users[0]),\n\t\t\t\tnode(\"user1-node2\", \"100.64.0.2\", \"fd7a:115c:a1e0::2\", users[0]),\n\t\t\t\tnode(\"user2-node1\", \"100.64.0.3\", \"fd7a:115c:a1e0::3\", users[1]),\n\t\t\t\tnode(\"user3-node1\", \"100.64.0.4\", \"fd7a:115c:a1e0::4\", users[2]),\n\t\t\t},\n\t\t\texpectedCleared: 0,\n\t\t\tdescription:     \"No changes should clear no cache entries\",\n\t\t},\n\t\t{\n\t\t\tname: \"node_added\",\n\t\t\tnewNodes: types.Nodes{\n\t\t\t\tnode(\"user1-node1\", \"100.64.0.1\", \"fd7a:115c:a1e0::1\", users[0]),\n\t\t\t\tnode(\"user1-node2\", \"100.64.0.2\", \"fd7a:115c:a1e0::2\", users[0]),\n\t\t\t\tnode(\"user1-node3\", \"100.64.0.5\", \"fd7a:115c:a1e0::5\", users[0]), // New node\n\t\t\t\tnode(\"user2-node1\", \"100.64.0.3\", \"fd7a:115c:a1e0::3\", users[1]),\n\t\t\t\tnode(\"user3-node1\", \"100.64.0.4\", \"fd7a:115c:a1e0::4\", users[2]),\n\t\t\t},\n\t\t\texpectedCleared: 2, // user1's existing nodes should be cleared\n\t\t\tdescription:     \"Adding a node should clear cache for that user's existing nodes\",\n\t\t},\n\t\t{\n\t\t\tname: \"node_removed\",\n\t\t\tnewNodes: types.Nodes{\n\t\t\t\tnode(\"user1-node1\", \"100.64.0.1\", \"fd7a:115c:a1e0::1\", users[0]),\n\t\t\t\t// user1-node2 removed\n\t\t\t\tnode(\"user2-node1\", \"100.64.0.3\", \"fd7a:115c:a1e0::3\", users[1]),\n\t\t\t\tnode(\"user3-node1\", \"100.64.0.4\", \"fd7a:115c:a1e0::4\", users[2]),\n\t\t\t},\n\t\t\texpectedCleared: 2, // user1's remaining node + removed node should be cleared\n\t\t\tdescription:     \"Removing a node should clear cache for that user's remaining nodes\",\n\t\t},\n\t\t{\n\t\t\tname: \"user_changed\",\n\t\t\tnewNodes: types.Nodes{\n\t\t\t\tnode(\"user1-node1\", \"100.64.0.1\", \"fd7a:115c:a1e0::1\", users[0]),\n\t\t\t\tnode(\"user1-node2\", \"100.64.0.2\", \"fd7a:115c:a1e0::2\", users[2]), // Changed to user3\n\t\t\t\tnode(\"user2-node1\", \"100.64.0.3\", \"fd7a:115c:a1e0::3\", users[1]),\n\t\t\t\tnode(\"user3-node1\", \"100.64.0.4\", \"fd7a:115c:a1e0::4\", users[2]),\n\t\t\t},\n\t\t\texpectedCleared: 3, // user1's node + user2's node + user3's nodes should be cleared\n\t\t\tdescription:     \"Changing a node's user should clear cache for both old and new users\",\n\t\t},\n\t\t{\n\t\t\tname: \"ip_changed\",\n\t\t\tnewNodes: types.Nodes{\n\t\t\t\tnode(\"user1-node1\", \"100.64.0.10\", \"fd7a:115c:a1e0::10\", users[0]), // IP changed\n\t\t\t\tnode(\"user1-node2\", \"100.64.0.2\", \"fd7a:115c:a1e0::2\", users[0]),\n\t\t\t\tnode(\"user2-node1\", \"100.64.0.3\", \"fd7a:115c:a1e0::3\", users[1]),\n\t\t\t\tnode(\"user3-node1\", \"100.64.0.4\", \"fd7a:115c:a1e0::4\", users[2]),\n\t\t\t},\n\t\t\texpectedCleared: 2, // user1's nodes should be cleared\n\t\t\tdescription:     \"Changing a node's IP should clear cache for that user's nodes\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tfor i, n := range tt.newNodes {\n\t\t\t\tfound := false\n\n\t\t\t\tfor _, origNode := range initialNodes {\n\t\t\t\t\tif n.Hostname == origNode.Hostname {\n\t\t\t\t\t\tn.ID = origNode.ID\n\t\t\t\t\t\tfound = true\n\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif !found {\n\t\t\t\t\tn.ID = types.NodeID(len(initialNodes) + i + 1) //nolint:gosec // safe conversion in test\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tpm.filterRulesMap = make(map[types.NodeID][]tailcfg.FilterRule)\n\t\t\tfor _, n := range initialNodes {\n\t\t\t\t_, err := pm.FilterForNode(n.View())\n\t\t\t\trequire.NoError(t, err)\n\t\t\t}\n\n\t\t\tinitialCacheSize := len(pm.filterRulesMap)\n\t\t\trequire.Equal(t, len(initialNodes), initialCacheSize)\n\n\t\t\tpm.invalidateAutogroupSelfCache(initialNodes.ViewSlice(), tt.newNodes.ViewSlice())\n\n\t\t\t// Verify the expected number of cache entries were cleared\n\t\t\tfinalCacheSize := len(pm.filterRulesMap)\n\t\t\tclearedEntries := initialCacheSize - finalCacheSize\n\t\t\trequire.Equal(t, tt.expectedCleared, clearedEntries, tt.description)\n\t\t})\n\t}\n}\n\n// TestInvalidateGlobalPolicyCache tests the cache invalidation logic for global policies.\nfunc TestInvalidateGlobalPolicyCache(t *testing.T) {\n\tmustIPPtr := func(s string) *netip.Addr {\n\t\tip := netip.MustParseAddr(s)\n\t\treturn &ip\n\t}\n\n\ttests := []struct {\n\t\tname               string\n\t\toldNodes           types.Nodes\n\t\tnewNodes           types.Nodes\n\t\tinitialCache       map[types.NodeID][]tailcfg.FilterRule\n\t\texpectedCacheAfter map[types.NodeID]bool // true = should exist, false = should not exist\n\t}{\n\t\t{\n\t\t\tname: \"node property changed - invalidates only that node\",\n\t\t\toldNodes: types.Nodes{\n\t\t\t\t&types.Node{ID: 1, IPv4: mustIPPtr(\"100.64.0.1\")},\n\t\t\t\t&types.Node{ID: 2, IPv4: mustIPPtr(\"100.64.0.2\")},\n\t\t\t},\n\t\t\tnewNodes: types.Nodes{\n\t\t\t\t&types.Node{ID: 1, IPv4: mustIPPtr(\"100.64.0.99\")}, // Changed\n\t\t\t\t&types.Node{ID: 2, IPv4: mustIPPtr(\"100.64.0.2\")},  // Unchanged\n\t\t\t},\n\t\t\tinitialCache: map[types.NodeID][]tailcfg.FilterRule{\n\t\t\t\t1: {},\n\t\t\t\t2: {},\n\t\t\t},\n\t\t\texpectedCacheAfter: map[types.NodeID]bool{\n\t\t\t\t1: false, // Invalidated\n\t\t\t\t2: true,  // Preserved\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multiple nodes changed\",\n\t\t\toldNodes: types.Nodes{\n\t\t\t\t&types.Node{ID: 1, IPv4: mustIPPtr(\"100.64.0.1\")},\n\t\t\t\t&types.Node{ID: 2, IPv4: mustIPPtr(\"100.64.0.2\")},\n\t\t\t\t&types.Node{ID: 3, IPv4: mustIPPtr(\"100.64.0.3\")},\n\t\t\t},\n\t\t\tnewNodes: types.Nodes{\n\t\t\t\t&types.Node{ID: 1, IPv4: mustIPPtr(\"100.64.0.99\")}, // Changed\n\t\t\t\t&types.Node{ID: 2, IPv4: mustIPPtr(\"100.64.0.2\")},  // Unchanged\n\t\t\t\t&types.Node{ID: 3, IPv4: mustIPPtr(\"100.64.0.88\")}, // Changed\n\t\t\t},\n\t\t\tinitialCache: map[types.NodeID][]tailcfg.FilterRule{\n\t\t\t\t1: {},\n\t\t\t\t2: {},\n\t\t\t\t3: {},\n\t\t\t},\n\t\t\texpectedCacheAfter: map[types.NodeID]bool{\n\t\t\t\t1: false, // Invalidated\n\t\t\t\t2: true,  // Preserved\n\t\t\t\t3: false, // Invalidated\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"node deleted - removes from cache\",\n\t\t\toldNodes: types.Nodes{\n\t\t\t\t&types.Node{ID: 1, IPv4: mustIPPtr(\"100.64.0.1\")},\n\t\t\t\t&types.Node{ID: 2, IPv4: mustIPPtr(\"100.64.0.2\")},\n\t\t\t},\n\t\t\tnewNodes: types.Nodes{\n\t\t\t\t&types.Node{ID: 2, IPv4: mustIPPtr(\"100.64.0.2\")},\n\t\t\t},\n\t\t\tinitialCache: map[types.NodeID][]tailcfg.FilterRule{\n\t\t\t\t1: {},\n\t\t\t\t2: {},\n\t\t\t},\n\t\t\texpectedCacheAfter: map[types.NodeID]bool{\n\t\t\t\t1: false, // Deleted\n\t\t\t\t2: true,  // Preserved\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"node added - no cache invalidation needed\",\n\t\t\toldNodes: types.Nodes{\n\t\t\t\t&types.Node{ID: 1, IPv4: mustIPPtr(\"100.64.0.1\")},\n\t\t\t},\n\t\t\tnewNodes: types.Nodes{\n\t\t\t\t&types.Node{ID: 1, IPv4: mustIPPtr(\"100.64.0.1\")},\n\t\t\t\t&types.Node{ID: 2, IPv4: mustIPPtr(\"100.64.0.2\")}, // New\n\t\t\t},\n\t\t\tinitialCache: map[types.NodeID][]tailcfg.FilterRule{\n\t\t\t\t1: {},\n\t\t\t},\n\t\t\texpectedCacheAfter: map[types.NodeID]bool{\n\t\t\t\t1: true,  // Preserved\n\t\t\t\t2: false, // Not in cache (new node)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"no changes - preserves all cache\",\n\t\t\toldNodes: types.Nodes{\n\t\t\t\t&types.Node{ID: 1, IPv4: mustIPPtr(\"100.64.0.1\")},\n\t\t\t\t&types.Node{ID: 2, IPv4: mustIPPtr(\"100.64.0.2\")},\n\t\t\t},\n\t\t\tnewNodes: types.Nodes{\n\t\t\t\t&types.Node{ID: 1, IPv4: mustIPPtr(\"100.64.0.1\")},\n\t\t\t\t&types.Node{ID: 2, IPv4: mustIPPtr(\"100.64.0.2\")},\n\t\t\t},\n\t\t\tinitialCache: map[types.NodeID][]tailcfg.FilterRule{\n\t\t\t\t1: {},\n\t\t\t\t2: {},\n\t\t\t},\n\t\t\texpectedCacheAfter: map[types.NodeID]bool{\n\t\t\t\t1: true,\n\t\t\t\t2: true,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"routes changed - invalidates that node only\",\n\t\t\toldNodes: types.Nodes{\n\t\t\t\t&types.Node{\n\t\t\t\t\tID:             1,\n\t\t\t\t\tIPv4:           mustIPPtr(\"100.64.0.1\"),\n\t\t\t\t\tHostinfo:       &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix(\"10.0.0.0/24\"), netip.MustParsePrefix(\"192.168.0.0/24\")}},\n\t\t\t\t\tApprovedRoutes: []netip.Prefix{netip.MustParsePrefix(\"10.0.0.0/24\")},\n\t\t\t\t},\n\t\t\t\t&types.Node{ID: 2, IPv4: mustIPPtr(\"100.64.0.2\")},\n\t\t\t},\n\t\t\tnewNodes: types.Nodes{\n\t\t\t\t&types.Node{\n\t\t\t\t\tID:             1,\n\t\t\t\t\tIPv4:           mustIPPtr(\"100.64.0.1\"),\n\t\t\t\t\tHostinfo:       &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix(\"10.0.0.0/24\"), netip.MustParsePrefix(\"192.168.0.0/24\")}},\n\t\t\t\t\tApprovedRoutes: []netip.Prefix{netip.MustParsePrefix(\"192.168.0.0/24\")}, // Changed\n\t\t\t\t},\n\t\t\t\t&types.Node{ID: 2, IPv4: mustIPPtr(\"100.64.0.2\")},\n\t\t\t},\n\t\t\tinitialCache: map[types.NodeID][]tailcfg.FilterRule{\n\t\t\t\t1: {},\n\t\t\t\t2: {},\n\t\t\t},\n\t\t\texpectedCacheAfter: map[types.NodeID]bool{\n\t\t\t\t1: false, // Invalidated\n\t\t\t\t2: true,  // Preserved\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tpm := &PolicyManager{\n\t\t\t\tnodes:             tt.oldNodes.ViewSlice(),\n\t\t\t\tfilterRulesMap:    tt.initialCache,\n\t\t\t\tusesAutogroupSelf: false,\n\t\t\t}\n\n\t\t\tpm.invalidateGlobalPolicyCache(tt.newNodes.ViewSlice())\n\n\t\t\t// Verify cache state\n\t\t\tfor nodeID, shouldExist := range tt.expectedCacheAfter {\n\t\t\t\t_, exists := pm.filterRulesMap[nodeID]\n\t\t\t\trequire.Equal(t, shouldExist, exists, \"node %d cache existence mismatch\", nodeID)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TestAutogroupSelfReducedVsUnreducedRules verifies that:\n// 1. BuildPeerMap uses unreduced compiled rules for determining peer relationships\n// 2. FilterForNode returns reduced compiled rules for packet filters.\nfunc TestAutogroupSelfReducedVsUnreducedRules(t *testing.T) {\n\tuser1 := types.User{Model: gorm.Model{ID: 1}, Name: \"user1\", Email: \"user1@headscale.net\"}\n\tuser2 := types.User{Model: gorm.Model{ID: 2}, Name: \"user2\", Email: \"user2@headscale.net\"}\n\tusers := types.Users{user1, user2}\n\n\t// Create two nodes\n\tnode1 := node(\"node1\", \"100.64.0.1\", \"fd7a:115c:a1e0::1\", user1)\n\tnode1.ID = 1\n\tnode2 := node(\"node2\", \"100.64.0.2\", \"fd7a:115c:a1e0::2\", user2)\n\tnode2.ID = 2\n\tnodes := types.Nodes{node1, node2}\n\n\t// Policy with autogroup:self - all members can reach their own devices\n\tpolicyStr := `{\n\t\t\"acls\": [\n\t\t\t{\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"src\": [\"autogroup:member\"],\n\t\t\t\t\"dst\": [\"autogroup:self:*\"]\n\t\t\t}\n\t\t]\n\t}`\n\n\tpm, err := NewPolicyManager([]byte(policyStr), users, nodes.ViewSlice())\n\trequire.NoError(t, err)\n\trequire.True(t, pm.usesAutogroupSelf, \"policy should use autogroup:self\")\n\n\t// Test FilterForNode returns reduced rules\n\t// For node1: should have rules where node1 is in destinations (its own IP)\n\tfilterNode1, err := pm.FilterForNode(nodes[0].View())\n\trequire.NoError(t, err)\n\n\t// For node2: should have rules where node2 is in destinations (its own IP)\n\tfilterNode2, err := pm.FilterForNode(nodes[1].View())\n\trequire.NoError(t, err)\n\n\t// FilterForNode should return reduced rules - verify they only contain the node's own IPs as destinations\n\t// For node1, destinations should only be node1's IPs\n\tnode1IPs := []string{\"100.64.0.1/32\", \"100.64.0.1\", \"fd7a:115c:a1e0::1/128\", \"fd7a:115c:a1e0::1\"}\n\n\tfor _, rule := range filterNode1 {\n\t\tfor _, dst := range rule.DstPorts {\n\t\t\trequire.Contains(t, node1IPs, dst.IP,\n\t\t\t\t\"node1 filter should only contain node1's IPs as destinations\")\n\t\t}\n\t}\n\n\t// For node2, destinations should only be node2's IPs\n\tnode2IPs := []string{\"100.64.0.2/32\", \"100.64.0.2\", \"fd7a:115c:a1e0::2/128\", \"fd7a:115c:a1e0::2\"}\n\n\tfor _, rule := range filterNode2 {\n\t\tfor _, dst := range rule.DstPorts {\n\t\t\trequire.Contains(t, node2IPs, dst.IP,\n\t\t\t\t\"node2 filter should only contain node2's IPs as destinations\")\n\t\t}\n\t}\n\n\t// Test BuildPeerMap uses unreduced rules\n\tpeerMap := pm.BuildPeerMap(nodes.ViewSlice())\n\n\t// According to the policy, user1 can reach autogroup:self (which expands to node1's own IPs for node1)\n\t// So node1 should be able to reach itself, but since we're looking at peer relationships,\n\t// node1 should NOT have itself in the peer map (nodes don't peer with themselves)\n\t// node2 should also not have any peers since user2 has no rules allowing it to reach anyone\n\n\t// Verify peer relationships based on unreduced rules\n\t// With unreduced rules, BuildPeerMap can properly determine that:\n\t// - node1 can access autogroup:self (its own IPs)\n\t// - node2 cannot access node1\n\trequire.Empty(t, peerMap[node1.ID], \"node1 should have no peers (can only reach itself)\")\n\trequire.Empty(t, peerMap[node2.ID], \"node2 should have no peers\")\n}\n\n// When separate ACL rules exist (one with autogroup:self, one with tag:router),\n// the autogroup:self rule should not prevent the tag:router rule from working.\n// This ensures that autogroup:self doesn't interfere with other ACL rules.\nfunc TestAutogroupSelfWithOtherRules(t *testing.T) {\n\tusers := types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"test-1\", Email: \"test-1@example.com\"},\n\t\t{Model: gorm.Model{ID: 2}, Name: \"test-2\", Email: \"test-2@example.com\"},\n\t}\n\n\t// test-1 has a regular device\n\ttest1Node := &types.Node{\n\t\tID:       1,\n\t\tHostname: \"test-1-device\",\n\t\tIPv4:     ap(\"100.64.0.1\"),\n\t\tIPv6:     ap(\"fd7a:115c:a1e0::1\"),\n\t\tUser:     new(users[0]),\n\t\tUserID:   new(users[0].ID),\n\t\tHostinfo: &tailcfg.Hostinfo{},\n\t}\n\n\t// test-2 has a router device with tag:node-router\n\ttest2RouterNode := &types.Node{\n\t\tID:       2,\n\t\tHostname: \"test-2-router\",\n\t\tIPv4:     ap(\"100.64.0.2\"),\n\t\tIPv6:     ap(\"fd7a:115c:a1e0::2\"),\n\t\tUser:     new(users[1]),\n\t\tUserID:   new(users[1].ID),\n\t\tTags:     []string{\"tag:node-router\"},\n\t\tHostinfo: &tailcfg.Hostinfo{},\n\t}\n\n\tnodes := types.Nodes{test1Node, test2RouterNode}\n\n\t// This matches the exact policy from issue #2838:\n\t// - First rule: autogroup:member -> autogroup:self (allows users to see their own devices)\n\t// - Second rule: group:home -> tag:node-router (should allow group members to see router)\n\tpolicy := `{\n\t\t\"groups\": {\n\t\t\t\"group:home\": [\"test-1@example.com\", \"test-2@example.com\"]\n\t\t},\n\t\t\"tagOwners\": {\n\t\t\t\"tag:node-router\": [\"group:home\"]\n\t\t},\n\t\t\"acls\": [\n\t\t\t{\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"src\": [\"autogroup:member\"],\n\t\t\t\t\"dst\": [\"autogroup:self:*\"]\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"src\": [\"group:home\"],\n\t\t\t\t\"dst\": [\"tag:node-router:*\"]\n\t\t\t}\n\t\t]\n\t}`\n\n\tpm, err := NewPolicyManager([]byte(policy), users, nodes.ViewSlice())\n\trequire.NoError(t, err)\n\n\tpeerMap := pm.BuildPeerMap(nodes.ViewSlice())\n\n\t// test-1 (in group:home) should see:\n\t// 1. Their own node (from autogroup:self rule)\n\t// 2. The router node (from group:home -> tag:node-router rule)\n\ttest1Peers := peerMap[test1Node.ID]\n\n\t// Verify test-1 can see the router (group:home -> tag:node-router rule)\n\trequire.True(t, slices.ContainsFunc(test1Peers, func(n types.NodeView) bool {\n\t\treturn n.ID() == test2RouterNode.ID\n\t}), \"test-1 should see test-2's router via group:home -> tag:node-router rule, even when autogroup:self rule exists (issue #2838)\")\n\n\t// Verify that test-1 has filter rules (including autogroup:self and tag:node-router access)\n\trules, err := pm.FilterForNode(test1Node.View())\n\trequire.NoError(t, err)\n\trequire.NotEmpty(t, rules, \"test-1 should have filter rules from both ACL rules\")\n}\n\n// TestAutogroupSelfPolicyUpdateTriggersMapResponse verifies that when a policy with\n// autogroup:self is updated, SetPolicy returns true to trigger MapResponse updates,\n// even if the global filter hash didn't change (which is always empty for autogroup:self).\n// This fixes the issue where policy updates would clear caches but not trigger updates,\n// leaving nodes with stale filter rules until reconnect.\nfunc TestAutogroupSelfPolicyUpdateTriggersMapResponse(t *testing.T) {\n\tusers := types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"test-1\", Email: \"test-1@example.com\"},\n\t\t{Model: gorm.Model{ID: 2}, Name: \"test-2\", Email: \"test-2@example.com\"},\n\t}\n\n\ttest1Node := &types.Node{\n\t\tID:       1,\n\t\tHostname: \"test-1-device\",\n\t\tIPv4:     ap(\"100.64.0.1\"),\n\t\tIPv6:     ap(\"fd7a:115c:a1e0::1\"),\n\t\tUser:     new(users[0]),\n\t\tUserID:   new(users[0].ID),\n\t\tHostinfo: &tailcfg.Hostinfo{},\n\t}\n\n\ttest2Node := &types.Node{\n\t\tID:       2,\n\t\tHostname: \"test-2-device\",\n\t\tIPv4:     ap(\"100.64.0.2\"),\n\t\tIPv6:     ap(\"fd7a:115c:a1e0::2\"),\n\t\tUser:     new(users[1]),\n\t\tUserID:   new(users[1].ID),\n\t\tHostinfo: &tailcfg.Hostinfo{},\n\t}\n\n\tnodes := types.Nodes{test1Node, test2Node}\n\n\t// Initial policy with autogroup:self\n\tinitialPolicy := `{\n\t\t\"acls\": [\n\t\t\t{\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"src\": [\"autogroup:member\"],\n\t\t\t\t\"dst\": [\"autogroup:self:*\"]\n\t\t\t}\n\t\t]\n\t}`\n\n\tpm, err := NewPolicyManager([]byte(initialPolicy), users, nodes.ViewSlice())\n\trequire.NoError(t, err)\n\trequire.True(t, pm.usesAutogroupSelf, \"policy should use autogroup:self\")\n\n\t// Get initial filter rules for test-1 (should be cached)\n\trules1, err := pm.FilterForNode(test1Node.View())\n\trequire.NoError(t, err)\n\trequire.NotEmpty(t, rules1, \"test-1 should have filter rules\")\n\n\t// Update policy with a different ACL that still results in empty global filter\n\t// (only autogroup:self rules, which compile to empty global filter)\n\t// We add a comment/description change by adding groups (which don't affect filter compilation)\n\tupdatedPolicy := `{\n\t\t\"groups\": {\n\t\t\t\"group:test\": [\"test-1@example.com\"]\n\t\t},\n\t\t\"acls\": [\n\t\t\t{\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"src\": [\"autogroup:member\"],\n\t\t\t\t\"dst\": [\"autogroup:self:*\"]\n\t\t\t}\n\t\t]\n\t}`\n\n\t// SetPolicy should return true even though global filter hash didn't change\n\tpolicyChanged, err := pm.SetPolicy([]byte(updatedPolicy))\n\trequire.NoError(t, err)\n\trequire.True(t, policyChanged, \"SetPolicy should return true when policy content changes, even if global filter hash unchanged (autogroup:self)\")\n\n\t// Verify that caches were cleared and new rules are generated\n\t// The cache should be empty, so FilterForNode will recompile\n\trules2, err := pm.FilterForNode(test1Node.View())\n\trequire.NoError(t, err)\n\trequire.NotEmpty(t, rules2, \"test-1 should have filter rules after policy update\")\n\n\t// Verify that the policy hash tracking works - a second identical update should return false\n\tpolicyChanged2, err := pm.SetPolicy([]byte(updatedPolicy))\n\trequire.NoError(t, err)\n\trequire.False(t, policyChanged2, \"SetPolicy should return false when policy content hasn't changed\")\n}\n\n// TestTagPropagationToPeerMap tests that when a node's tags change,\n// the peer map is correctly updated. This is a regression test for\n// https://github.com/juanfont/headscale/issues/2389\nfunc TestTagPropagationToPeerMap(t *testing.T) {\n\tusers := types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"user1\", Email: \"user1@headscale.net\"},\n\t\t{Model: gorm.Model{ID: 2}, Name: \"user2\", Email: \"user2@headscale.net\"},\n\t}\n\n\t// Policy: user2 can access tag:web nodes\n\tpolicy := `{\n\t\t\"tagOwners\": {\n\t\t\t\"tag:web\": [\"user1@headscale.net\"],\n\t\t\t\"tag:internal\": [\"user1@headscale.net\"]\n\t\t},\n\t\t\"acls\": [\n\t\t\t{\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"src\": [\"user2@headscale.net\"],\n\t\t\t\t\"dst\": [\"user2@headscale.net:*\"]\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"src\": [\"user2@headscale.net\"],\n\t\t\t\t\"dst\": [\"tag:web:*\"]\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"src\": [\"tag:web\"],\n\t\t\t\t\"dst\": [\"user2@headscale.net:*\"]\n\t\t\t}\n\t\t]\n\t}`\n\n\t// user1's node starts with tag:web and tag:internal\n\tuser1Node := &types.Node{\n\t\tID:       1,\n\t\tHostname: \"user1-node\",\n\t\tIPv4:     ap(\"100.64.0.1\"),\n\t\tIPv6:     ap(\"fd7a:115c:a1e0::1\"),\n\t\tUser:     new(users[0]),\n\t\tUserID:   new(users[0].ID),\n\t\tTags:     []string{\"tag:web\", \"tag:internal\"},\n\t}\n\n\t// user2's node (no tags)\n\tuser2Node := &types.Node{\n\t\tID:       2,\n\t\tHostname: \"user2-node\",\n\t\tIPv4:     ap(\"100.64.0.2\"),\n\t\tIPv6:     ap(\"fd7a:115c:a1e0::2\"),\n\t\tUser:     new(users[1]),\n\t\tUserID:   new(users[1].ID),\n\t}\n\n\tinitialNodes := types.Nodes{user1Node, user2Node}\n\n\tpm, err := NewPolicyManager([]byte(policy), users, initialNodes.ViewSlice())\n\trequire.NoError(t, err)\n\n\t// Initial state: user2 should see user1 as a peer (user1 has tag:web)\n\tinitialPeerMap := pm.BuildPeerMap(initialNodes.ViewSlice())\n\n\t// Check user2's peers - should include user1\n\tuser2Peers := initialPeerMap[user2Node.ID]\n\trequire.Len(t, user2Peers, 1, \"user2 should have 1 peer initially (user1 with tag:web)\")\n\trequire.Equal(t, user1Node.ID, user2Peers[0].ID(), \"user2's peer should be user1\")\n\n\t// Check user1's peers - should include user2 (bidirectional ACL)\n\tuser1Peers := initialPeerMap[user1Node.ID]\n\trequire.Len(t, user1Peers, 1, \"user1 should have 1 peer initially (user2)\")\n\trequire.Equal(t, user2Node.ID, user1Peers[0].ID(), \"user1's peer should be user2\")\n\n\t// Now change user1's tags: remove tag:web, keep only tag:internal\n\tuser1NodeUpdated := &types.Node{\n\t\tID:       1,\n\t\tHostname: \"user1-node\",\n\t\tIPv4:     ap(\"100.64.0.1\"),\n\t\tIPv6:     ap(\"fd7a:115c:a1e0::1\"),\n\t\tUser:     new(users[0]),\n\t\tUserID:   new(users[0].ID),\n\t\tTags:     []string{\"tag:internal\"}, // tag:web removed!\n\t}\n\n\tupdatedNodes := types.Nodes{user1NodeUpdated, user2Node}\n\n\t// SetNodes should detect the tag change\n\tchanged, err := pm.SetNodes(updatedNodes.ViewSlice())\n\trequire.NoError(t, err)\n\trequire.True(t, changed, \"SetNodes should return true when tags change\")\n\n\t// After tag change: user2 should NOT see user1 as a peer anymore\n\t// (no ACL allows user2 to access tag:internal)\n\tupdatedPeerMap := pm.BuildPeerMap(updatedNodes.ViewSlice())\n\n\t// Check user2's peers - should be empty now\n\tuser2PeersAfter := updatedPeerMap[user2Node.ID]\n\trequire.Empty(t, user2PeersAfter, \"user2 should have no peers after tag:web is removed from user1\")\n\n\t// Check user1's peers - should also be empty\n\tuser1PeersAfter := updatedPeerMap[user1Node.ID]\n\trequire.Empty(t, user1PeersAfter, \"user1 should have no peers after tag:web is removed\")\n\n\t// Also verify MatchersForNode returns non-empty matchers and ReduceNodes filters correctly\n\t// This simulates what buildTailPeers does in the mapper\n\tmatchersForUser2, err := pm.MatchersForNode(user2Node.View())\n\trequire.NoError(t, err)\n\trequire.NotEmpty(t, matchersForUser2, \"MatchersForNode should return non-empty matchers (at least self-access rule)\")\n\n\t// Test ReduceNodes logic with the updated nodes and matchers\n\t// This is what buildTailPeers does - it takes peers from ListPeers (which might include user1)\n\t// and filters them using ReduceNodes with the updated matchers\n\t// Inline the ReduceNodes logic to avoid import cycle\n\tuser2View := user2Node.View()\n\tuser1UpdatedView := user1NodeUpdated.View()\n\n\t// Check if user2 can access user1 OR user1 can access user2\n\tcanAccess := user2View.CanAccess(matchersForUser2, user1UpdatedView) ||\n\t\tuser1UpdatedView.CanAccess(matchersForUser2, user2View)\n\n\trequire.False(t, canAccess, \"user2 should NOT be able to access user1 after tag:web is removed (ReduceNodes should filter out)\")\n}\n\n// TestAutogroupSelfWithAdminOverride reproduces issue #2990:\n// When autogroup:self is combined with an admin rule (group:admin -> *:*),\n// tagged nodes become invisible to admins because BuildPeerMap uses asymmetric\n// peer visibility in the autogroup:self path.\n//\n// The fix requires symmetric visibility: if admin can access tagged node,\n// BOTH admin and tagged node should see each other as peers.\nfunc TestAutogroupSelfWithAdminOverride(t *testing.T) {\n\tusers := types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"admin\", Email: \"admin@example.com\"},\n\t\t{Model: gorm.Model{ID: 2}, Name: \"user1\", Email: \"user1@example.com\"},\n\t}\n\n\t// Admin has a regular device\n\tadminNode := &types.Node{\n\t\tID:       1,\n\t\tHostname: \"admin-device\",\n\t\tIPv4:     ap(\"100.64.0.1\"),\n\t\tIPv6:     ap(\"fd7a:115c:a1e0::1\"),\n\t\tUser:     new(users[0]),\n\t\tUserID:   new(users[0].ID),\n\t\tHostinfo: &tailcfg.Hostinfo{},\n\t}\n\n\t// user1 has a tagged server\n\tuser1TaggedNode := &types.Node{\n\t\tID:       2,\n\t\tHostname: \"user1-server\",\n\t\tIPv4:     ap(\"100.64.0.2\"),\n\t\tIPv6:     ap(\"fd7a:115c:a1e0::2\"),\n\t\tUser:     new(users[1]),\n\t\tUserID:   new(users[1].ID),\n\t\tTags:     []string{\"tag:server\"},\n\t\tHostinfo: &tailcfg.Hostinfo{},\n\t}\n\n\tnodes := types.Nodes{adminNode, user1TaggedNode}\n\n\t// Policy from issue #2990:\n\t// - group:admin has full access to everything (*:*)\n\t// - autogroup:member -> autogroup:self (allows users to see their own devices)\n\t//\n\t// Bug: The tagged server becomes invisible to admin because:\n\t// 1. Admin can access tagged server (via *:* rule)\n\t// 2. Tagged server CANNOT access admin (no rule for that)\n\t// 3. With asymmetric logic, tagged server is not added to admin's peer list\n\tpolicy := `{\n\t\t\"groups\": {\n\t\t\t\"group:admin\": [\"admin@example.com\"]\n\t\t},\n\t\t\"tagOwners\": {\n\t\t\t\"tag:server\": [\"user1@example.com\"]\n\t\t},\n\t\t\"acls\": [\n\t\t\t{\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"src\": [\"group:admin\"],\n\t\t\t\t\"dst\": [\"*:*\"]\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"src\": [\"autogroup:member\"],\n\t\t\t\t\"dst\": [\"autogroup:self:*\"]\n\t\t\t}\n\t\t]\n\t}`\n\n\tpm, err := NewPolicyManager([]byte(policy), users, nodes.ViewSlice())\n\trequire.NoError(t, err)\n\n\tpeerMap := pm.BuildPeerMap(nodes.ViewSlice())\n\n\t// Admin should see the tagged server as a peer (via group:admin -> *:* rule)\n\tadminPeers := peerMap[adminNode.ID]\n\trequire.True(t, slices.ContainsFunc(adminPeers, func(n types.NodeView) bool {\n\t\treturn n.ID() == user1TaggedNode.ID\n\t}), \"admin should see tagged server as peer via *:* rule (issue #2990)\")\n\n\t// Tagged server should also see admin as a peer (symmetric visibility)\n\t// Even though tagged server cannot ACCESS admin, it should still SEE admin\n\t// because admin CAN access it. This is required for proper network operation.\n\ttaggedPeers := peerMap[user1TaggedNode.ID]\n\trequire.True(t, slices.ContainsFunc(taggedPeers, func(n types.NodeView) bool {\n\t\treturn n.ID() == adminNode.ID\n\t}), \"tagged server should see admin as peer (symmetric visibility)\")\n}\n\n// TestAutogroupSelfSymmetricVisibility verifies that peer visibility is symmetric:\n// if node A can access node B, then both A and B should see each other as peers.\n// This is the same behavior as the global filter path.\nfunc TestAutogroupSelfSymmetricVisibility(t *testing.T) {\n\tusers := types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"user1\", Email: \"user1@example.com\"},\n\t\t{Model: gorm.Model{ID: 2}, Name: \"user2\", Email: \"user2@example.com\"},\n\t}\n\n\t// user1 has device A\n\tdeviceA := &types.Node{\n\t\tID:       1,\n\t\tHostname: \"device-a\",\n\t\tIPv4:     ap(\"100.64.0.1\"),\n\t\tIPv6:     ap(\"fd7a:115c:a1e0::1\"),\n\t\tUser:     new(users[0]),\n\t\tUserID:   new(users[0].ID),\n\t\tHostinfo: &tailcfg.Hostinfo{},\n\t}\n\n\t// user2 has device B (tagged)\n\tdeviceB := &types.Node{\n\t\tID:       2,\n\t\tHostname: \"device-b\",\n\t\tIPv4:     ap(\"100.64.0.2\"),\n\t\tIPv6:     ap(\"fd7a:115c:a1e0::2\"),\n\t\tUser:     new(users[1]),\n\t\tUserID:   new(users[1].ID),\n\t\tTags:     []string{\"tag:web\"},\n\t\tHostinfo: &tailcfg.Hostinfo{},\n\t}\n\n\tnodes := types.Nodes{deviceA, deviceB}\n\n\t// One-way rule: user1 can access tag:web, but tag:web cannot access user1\n\tpolicy := `{\n\t\t\"tagOwners\": {\n\t\t\t\"tag:web\": [\"user2@example.com\"]\n\t\t},\n\t\t\"acls\": [\n\t\t\t{\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"src\": [\"user1@example.com\"],\n\t\t\t\t\"dst\": [\"tag:web:*\"]\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"src\": [\"autogroup:member\"],\n\t\t\t\t\"dst\": [\"autogroup:self:*\"]\n\t\t\t}\n\t\t]\n\t}`\n\n\tpm, err := NewPolicyManager([]byte(policy), users, nodes.ViewSlice())\n\trequire.NoError(t, err)\n\n\tpeerMap := pm.BuildPeerMap(nodes.ViewSlice())\n\n\t// Device A (user1) should see device B (tag:web) as peer\n\taPeers := peerMap[deviceA.ID]\n\trequire.True(t, slices.ContainsFunc(aPeers, func(n types.NodeView) bool {\n\t\treturn n.ID() == deviceB.ID\n\t}), \"device A should see device B as peer (user1 -> tag:web rule)\")\n\n\t// Device B (tag:web) should ALSO see device A as peer (symmetric visibility)\n\t// Even though B cannot ACCESS A, B should still SEE A as a peer\n\tbPeers := peerMap[deviceB.ID]\n\trequire.True(t, slices.ContainsFunc(bPeers, func(n types.NodeView) bool {\n\t\treturn n.ID() == deviceA.ID\n\t}), \"device B should see device A as peer (symmetric visibility)\")\n}\n\n// TestAutogroupSelfDoesNotBreakOtherUsersAccess reproduces the Discord scenario\n// where enabling autogroup:self for superadmins should NOT break access for\n// other users who don't use autogroup:self.\n//\n// Scenario:\n// - Rule 1: [superadmin, admin, direction] -> [tag:common:*]\n// - Rule 2: [superadmin, admin] -> [tag:tech:*]\n// - Rule 3: [superadmin] -> [tag:privileged:*, autogroup:self:*]\n//\n// Expected behavior:\n// - Superadmin sees: tag:common, tag:tech, tag:privileged, and own devices\n// - Admin sees: tag:common, tag:tech\n// - Direction sees: tag:common\n// - All tagged nodes should be visible to users who can access them.\nfunc TestAutogroupSelfDoesNotBreakOtherUsersAccess(t *testing.T) {\n\tusers := types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"superadmin\", Email: \"superadmin@example.com\"},\n\t\t{Model: gorm.Model{ID: 2}, Name: \"admin\", Email: \"admin@example.com\"},\n\t\t{Model: gorm.Model{ID: 3}, Name: \"direction\", Email: \"direction@example.com\"},\n\t\t{Model: gorm.Model{ID: 4}, Name: \"tagowner\", Email: \"tagowner@example.com\"},\n\t}\n\n\t// Create nodes:\n\t// - superadmin's device\n\t// - admin's device\n\t// - direction's device\n\t// - tagged server (tag:common)\n\t// - tagged server (tag:tech)\n\t// - tagged server (tag:privileged)\n\n\tsuperadminDevice := &types.Node{\n\t\tID:       1,\n\t\tHostname: \"superadmin-laptop\",\n\t\tUser:     new(users[0]),\n\t\tUserID:   new(users[0].ID),\n\t\tIPv4:     ap(\"100.64.0.1\"),\n\t\tHostinfo: &tailcfg.Hostinfo{},\n\t}\n\n\tadminDevice := &types.Node{\n\t\tID:       2,\n\t\tHostname: \"admin-laptop\",\n\t\tUser:     new(users[1]),\n\t\tUserID:   new(users[1].ID),\n\t\tIPv4:     ap(\"100.64.0.2\"),\n\t\tHostinfo: &tailcfg.Hostinfo{},\n\t}\n\n\tdirectionDevice := &types.Node{\n\t\tID:       3,\n\t\tHostname: \"direction-laptop\",\n\t\tUser:     new(users[2]),\n\t\tUserID:   new(users[2].ID),\n\t\tIPv4:     ap(\"100.64.0.3\"),\n\t\tHostinfo: &tailcfg.Hostinfo{},\n\t}\n\n\tcommonServer := &types.Node{\n\t\tID:       4,\n\t\tHostname: \"common-server\",\n\t\tUser:     new(users[3]),\n\t\tUserID:   new(users[3].ID),\n\t\tIPv4:     ap(\"100.64.0.4\"),\n\t\tTags:     []string{\"tag:common\"},\n\t\tHostinfo: &tailcfg.Hostinfo{},\n\t}\n\n\ttechServer := &types.Node{\n\t\tID:       5,\n\t\tHostname: \"tech-server\",\n\t\tUser:     new(users[3]),\n\t\tUserID:   new(users[3].ID),\n\t\tIPv4:     ap(\"100.64.0.5\"),\n\t\tTags:     []string{\"tag:tech\"},\n\t\tHostinfo: &tailcfg.Hostinfo{},\n\t}\n\n\tprivilegedServer := &types.Node{\n\t\tID:       6,\n\t\tHostname: \"privileged-server\",\n\t\tUser:     new(users[3]),\n\t\tUserID:   new(users[3].ID),\n\t\tIPv4:     ap(\"100.64.0.6\"),\n\t\tTags:     []string{\"tag:privileged\"},\n\t\tHostinfo: &tailcfg.Hostinfo{},\n\t}\n\n\tnodes := types.Nodes{\n\t\tsuperadminDevice,\n\t\tadminDevice,\n\t\tdirectionDevice,\n\t\tcommonServer,\n\t\ttechServer,\n\t\tprivilegedServer,\n\t}\n\n\tpolicy := `{\n\t\t\"groups\": {\n\t\t\t\"group:superadmin\": [\"superadmin@example.com\"],\n\t\t\t\"group:admin\": [\"admin@example.com\"],\n\t\t\t\"group:direction\": [\"direction@example.com\"]\n\t\t},\n\t\t\"tagOwners\": {\n\t\t\t\"tag:common\": [\"tagowner@example.com\"],\n\t\t\t\"tag:tech\": [\"tagowner@example.com\"],\n\t\t\t\"tag:privileged\": [\"tagowner@example.com\"]\n\t\t},\n\t\t\"acls\": [\n\t\t\t{\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"src\": [\"group:superadmin\", \"group:admin\", \"group:direction\"],\n\t\t\t\t\"dst\": [\"tag:common:*\"]\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"src\": [\"group:superadmin\", \"group:admin\"],\n\t\t\t\t\"dst\": [\"tag:tech:*\"]\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"src\": [\"group:superadmin\"],\n\t\t\t\t\"dst\": [\"tag:privileged:*\", \"autogroup:self:*\"]\n\t\t\t}\n\t\t]\n\t}`\n\n\tpm, err := NewPolicyManager([]byte(policy), users, nodes.ViewSlice())\n\trequire.NoError(t, err)\n\n\tpeerMap := pm.BuildPeerMap(nodes.ViewSlice())\n\n\t// Helper to check if node A sees node B\n\tcanSee := func(a, b types.NodeID) bool {\n\t\tpeers := peerMap[a]\n\n\t\treturn slices.ContainsFunc(peers, func(n types.NodeView) bool {\n\t\t\treturn n.ID() == b\n\t\t})\n\t}\n\n\t// Superadmin should see all tagged servers\n\trequire.True(t, canSee(superadminDevice.ID, commonServer.ID),\n\t\t\"superadmin should see tag:common\")\n\trequire.True(t, canSee(superadminDevice.ID, techServer.ID),\n\t\t\"superadmin should see tag:tech\")\n\trequire.True(t, canSee(superadminDevice.ID, privilegedServer.ID),\n\t\t\"superadmin should see tag:privileged\")\n\n\t// Admin should see tag:common and tag:tech (but NOT tag:privileged)\n\trequire.True(t, canSee(adminDevice.ID, commonServer.ID),\n\t\t\"admin should see tag:common\")\n\trequire.True(t, canSee(adminDevice.ID, techServer.ID),\n\t\t\"admin should see tag:tech\")\n\trequire.False(t, canSee(adminDevice.ID, privilegedServer.ID),\n\t\t\"admin should NOT see tag:privileged\")\n\n\t// Direction should see tag:common only\n\trequire.True(t, canSee(directionDevice.ID, commonServer.ID),\n\t\t\"direction should see tag:common\")\n\trequire.False(t, canSee(directionDevice.ID, techServer.ID),\n\t\t\"direction should NOT see tag:tech\")\n\trequire.False(t, canSee(directionDevice.ID, privilegedServer.ID),\n\t\t\"direction should NOT see tag:privileged\")\n\n\t// Tagged servers should see their authorized users (symmetric visibility)\n\trequire.True(t, canSee(commonServer.ID, superadminDevice.ID),\n\t\t\"tag:common should see superadmin (symmetric)\")\n\trequire.True(t, canSee(commonServer.ID, adminDevice.ID),\n\t\t\"tag:common should see admin (symmetric)\")\n\trequire.True(t, canSee(commonServer.ID, directionDevice.ID),\n\t\t\"tag:common should see direction (symmetric)\")\n\n\trequire.True(t, canSee(techServer.ID, superadminDevice.ID),\n\t\t\"tag:tech should see superadmin (symmetric)\")\n\trequire.True(t, canSee(techServer.ID, adminDevice.ID),\n\t\t\"tag:tech should see admin (symmetric)\")\n\n\trequire.True(t, canSee(privilegedServer.ID, superadminDevice.ID),\n\t\t\"tag:privileged should see superadmin (symmetric)\")\n}\n\n// TestEmptyFilterNodesStillVisible verifies that nodes with empty filter rules\n// (e.g., tagged servers that are only destinations, never sources) are still\n// visible to nodes that can access them.\nfunc TestEmptyFilterNodesStillVisible(t *testing.T) {\n\tusers := types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"admin\", Email: \"admin@example.com\"},\n\t\t{Model: gorm.Model{ID: 2}, Name: \"tagowner\", Email: \"tagowner@example.com\"},\n\t}\n\n\tadminDevice := &types.Node{\n\t\tID:       1,\n\t\tHostname: \"admin-laptop\",\n\t\tUser:     new(users[0]),\n\t\tUserID:   new(users[0].ID),\n\t\tIPv4:     ap(\"100.64.0.1\"),\n\t\tHostinfo: &tailcfg.Hostinfo{},\n\t}\n\n\t// Tagged server - only a destination, never a source in any rule\n\t// This means its compiled filter rules will be empty\n\ttaggedServer := &types.Node{\n\t\tID:       2,\n\t\tHostname: \"server\",\n\t\tUser:     new(users[1]),\n\t\tUserID:   new(users[1].ID),\n\t\tIPv4:     ap(\"100.64.0.2\"),\n\t\tTags:     []string{\"tag:server\"},\n\t\tHostinfo: &tailcfg.Hostinfo{},\n\t}\n\n\tnodes := types.Nodes{adminDevice, taggedServer}\n\n\t// Policy where tagged server is ONLY a destination\n\tpolicy := `{\n\t\t\"groups\": {\n\t\t\t\"group:admin\": [\"admin@example.com\"]\n\t\t},\n\t\t\"tagOwners\": {\n\t\t\t\"tag:server\": [\"tagowner@example.com\"]\n\t\t},\n\t\t\"acls\": [\n\t\t\t{\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"src\": [\"group:admin\"],\n\t\t\t\t\"dst\": [\"tag:server:*\", \"autogroup:self:*\"]\n\t\t\t}\n\t\t]\n\t}`\n\n\tpm, err := NewPolicyManager([]byte(policy), users, nodes.ViewSlice())\n\trequire.NoError(t, err)\n\n\tpeerMap := pm.BuildPeerMap(nodes.ViewSlice())\n\n\t// Admin should see the tagged server\n\tadminPeers := peerMap[adminDevice.ID]\n\trequire.True(t, slices.ContainsFunc(adminPeers, func(n types.NodeView) bool {\n\t\treturn n.ID() == taggedServer.ID\n\t}), \"admin should see tagged server\")\n\n\t// Tagged server should see admin (symmetric visibility)\n\t// Even though the server has no outbound rules (empty filter)\n\tserverPeers := peerMap[taggedServer.ID]\n\trequire.True(t, slices.ContainsFunc(serverPeers, func(n types.NodeView) bool {\n\t\treturn n.ID() == adminDevice.ID\n\t}), \"tagged server should see admin (symmetric visibility)\")\n}\n\n// TestAutogroupSelfCombinedWithTags verifies that autogroup:self combined with\n// specific tags in the same rule provides \"combined access\" - users get both\n// tagged nodes AND their own devices.\nfunc TestAutogroupSelfCombinedWithTags(t *testing.T) {\n\tusers := types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"admin\", Email: \"admin@example.com\"},\n\t\t{Model: gorm.Model{ID: 2}, Name: \"tagowner\", Email: \"tagowner@example.com\"},\n\t}\n\n\t// Admin has two devices\n\tadminLaptop := &types.Node{\n\t\tID:       1,\n\t\tHostname: \"admin-laptop\",\n\t\tUser:     new(users[0]),\n\t\tUserID:   new(users[0].ID),\n\t\tIPv4:     ap(\"100.64.0.1\"),\n\t\tHostinfo: &tailcfg.Hostinfo{},\n\t}\n\n\tadminPhone := &types.Node{\n\t\tID:       2,\n\t\tHostname: \"admin-phone\",\n\t\tUser:     new(users[0]),\n\t\tUserID:   new(users[0].ID),\n\t\tIPv4:     ap(\"100.64.0.2\"),\n\t\tHostinfo: &tailcfg.Hostinfo{},\n\t}\n\n\t// Tagged web server\n\twebServer := &types.Node{\n\t\tID:       3,\n\t\tHostname: \"web-server\",\n\t\tUser:     new(users[1]),\n\t\tUserID:   new(users[1].ID),\n\t\tIPv4:     ap(\"100.64.0.3\"),\n\t\tTags:     []string{\"tag:web\"},\n\t\tHostinfo: &tailcfg.Hostinfo{},\n\t}\n\n\tnodes := types.Nodes{adminLaptop, adminPhone, webServer}\n\n\t// Combined rule: admin gets both tag:web AND autogroup:self\n\tpolicy := `{\n\t\t\"groups\": {\n\t\t\t\"group:admin\": [\"admin@example.com\"]\n\t\t},\n\t\t\"tagOwners\": {\n\t\t\t\"tag:web\": [\"tagowner@example.com\"]\n\t\t},\n\t\t\"acls\": [\n\t\t\t{\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"src\": [\"group:admin\"],\n\t\t\t\t\"dst\": [\"tag:web:*\", \"autogroup:self:*\"]\n\t\t\t}\n\t\t]\n\t}`\n\n\tpm, err := NewPolicyManager([]byte(policy), users, nodes.ViewSlice())\n\trequire.NoError(t, err)\n\n\tpeerMap := pm.BuildPeerMap(nodes.ViewSlice())\n\n\t// Helper to check visibility\n\tcanSee := func(a, b types.NodeID) bool {\n\t\tpeers := peerMap[a]\n\n\t\treturn slices.ContainsFunc(peers, func(n types.NodeView) bool {\n\t\t\treturn n.ID() == b\n\t\t})\n\t}\n\n\t// Admin laptop should see: admin phone (autogroup:self) AND web server (tag:web)\n\trequire.True(t, canSee(adminLaptop.ID, adminPhone.ID),\n\t\t\"admin laptop should see admin phone (autogroup:self)\")\n\trequire.True(t, canSee(adminLaptop.ID, webServer.ID),\n\t\t\"admin laptop should see web server (tag:web)\")\n\n\t// Admin phone should see: admin laptop (autogroup:self) AND web server (tag:web)\n\trequire.True(t, canSee(adminPhone.ID, adminLaptop.ID),\n\t\t\"admin phone should see admin laptop (autogroup:self)\")\n\trequire.True(t, canSee(adminPhone.ID, webServer.ID),\n\t\t\"admin phone should see web server (tag:web)\")\n\n\t// Web server should see both admin devices (symmetric visibility)\n\trequire.True(t, canSee(webServer.ID, adminLaptop.ID),\n\t\t\"web server should see admin laptop (symmetric)\")\n\trequire.True(t, canSee(webServer.ID, adminPhone.ID),\n\t\t\"web server should see admin phone (symmetric)\")\n}\n\n// TestIssue2990SameUserTaggedDevice reproduces the exact scenario from issue #2990:\n// - One user (user1) who is in group:admin\n// - node1: user device (not tagged), belongs to user1\n// - node2: tagged with tag:admin, ALSO belongs to user1 (same user!)\n// - Rule: group:admin -> *:*\n// - Rule: autogroup:member -> autogroup:self:*\n//\n// Expected: node1 should be able to reach node2 via group:admin -> *:* rule.\nfunc TestIssue2990SameUserTaggedDevice(t *testing.T) {\n\tusers := types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"user1\", Email: \"user1@\"},\n\t}\n\n\t// node1: user device (not tagged), belongs to user1\n\tnode1 := &types.Node{\n\t\tID:       1,\n\t\tHostname: \"node1\",\n\t\tUser:     new(users[0]),\n\t\tUserID:   new(users[0].ID),\n\t\tIPv4:     ap(\"100.64.0.1\"),\n\t\tIPv6:     ap(\"fd7a:115c:a1e0::1\"),\n\t\tHostinfo: &tailcfg.Hostinfo{},\n\t}\n\n\t// node2: tagged with tag:admin, ALSO belongs to user1 (same user!)\n\tnode2 := &types.Node{\n\t\tID:       2,\n\t\tHostname: \"node2\",\n\t\tUser:     new(users[0]),\n\t\tUserID:   new(users[0].ID),\n\t\tIPv4:     ap(\"100.64.0.2\"),\n\t\tIPv6:     ap(\"fd7a:115c:a1e0::2\"),\n\t\tTags:     []string{\"tag:admin\"},\n\t\tHostinfo: &tailcfg.Hostinfo{},\n\t}\n\n\tnodes := types.Nodes{node1, node2}\n\n\t// Exact policy from the issue report\n\tpolicy := `{\n\t\t\"groups\": {\n\t\t\t\"group:admin\": [\"user1@\"]\n\t\t},\n\t\t\"tagOwners\": {\n\t\t\t\"tag:admin\": [\"group:admin\"]\n\t\t},\n\t\t\"acls\": [\n\t\t\t{\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"src\": [\"group:admin\"],\n\t\t\t\t\"dst\": [\"*:*\"]\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"src\": [\"autogroup:member\"],\n\t\t\t\t\"dst\": [\"autogroup:self:*\"]\n\t\t\t}\n\t\t]\n\t}`\n\n\tpm, err := NewPolicyManager([]byte(policy), users, nodes.ViewSlice())\n\trequire.NoError(t, err)\n\n\t// Check peer visibility\n\tpeerMap := pm.BuildPeerMap(nodes.ViewSlice())\n\n\tcanSee := func(a, b types.NodeID) bool {\n\t\tpeers := peerMap[a]\n\n\t\treturn slices.ContainsFunc(peers, func(n types.NodeView) bool {\n\t\t\treturn n.ID() == b\n\t\t})\n\t}\n\n\t// node1 should see node2 (via group:admin -> *:* and symmetric visibility)\n\trequire.True(t, canSee(node1.ID, node2.ID),\n\t\t\"node1 should see node2 as peer\")\n\n\t// node2 should see node1 (symmetric visibility)\n\trequire.True(t, canSee(node2.ID, node1.ID),\n\t\t\"node2 should see node1 as peer (symmetric visibility)\")\n\n\t// Check packet filter for node1 - should allow access to node2\n\tfilter1, err := pm.FilterForNode(node1.View())\n\trequire.NoError(t, err)\n\tt.Logf(\"node1 filter rules: %d\", len(filter1))\n\n\tfor i, rule := range filter1 {\n\t\tt.Logf(\"  rule %d: SrcIPs=%v DstPorts=%v\", i, rule.SrcIPs, rule.DstPorts)\n\t}\n\n\t// node1's filter should include a rule allowing access to node2's IP\n\t// (via the group:admin -> *:* rule)\n\trequire.NotEmpty(t, filter1,\n\t\t\"node1's packet filter should have rules (group:admin -> *:*)\")\n\n\t// Check packet filter for node2 - tagged device, should have limited access\n\tfilter2, err := pm.FilterForNode(node2.View())\n\trequire.NoError(t, err)\n\tt.Logf(\"node2 filter rules: %d\", len(filter2))\n\n\tfor i, rule := range filter2 {\n\t\tt.Logf(\"  rule %d: SrcIPs=%v DstPorts=%v\", i, rule.SrcIPs, rule.DstPorts)\n\t}\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/tailscale_compat_test.go",
    "content": "// This file is \"generated\" by Claude.\n// It contains a large set of input ACL/Policy JSON configurations that\n// the AI agent has systematically applied to a Tailnet on Tailscale SaaS\n// and then observed the individual clients connected to the Tailnet\n// with a given policy and recorded the resulting Packet filter rules sent\n// to the clients.\n//\n// There is likely a lot of duplicate or overlapping tests, however, the main\n// exercise of this work was to create a comperehensive test set for comparing\n// the behaviour of our policy engine and the upstream one.\n//\n// We aim to keep these tests to make sure we do not regress as we evolve\n// and improve our policy implementation.\n// This file is NOT intended for developer/humans to change and should be\n// consider a \"black box\" test suite.\npackage v2\n\nimport (\n\t\"net/netip\"\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/google/go-cmp/cmp/cmpopts\"\n\t\"github.com/juanfont/headscale/hscontrol/policy/policyutil\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gorm.io/gorm\"\n\t\"tailscale.com/tailcfg\"\n)\n\n// ptrAddr is a helper to create a pointer to a netip.Addr.\nfunc ptrAddr(s string) *netip.Addr {\n\taddr := netip.MustParseAddr(s)\n\treturn &addr\n}\n\n// setupTailscaleCompatUsers returns the test users for compatibility tests.\nfunc setupTailscaleCompatUsers() types.Users {\n\treturn types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"kratail2tid\"},\n\t}\n}\n\n// setupTailscaleCompatNodes returns the test nodes for compatibility tests.\n// The node configuration matches the Tailscale test environment:\n// - 1 user-owned node (user1)\n// - 4 tagged nodes (tagged-server, tagged-client, tagged-db, tagged-web).\nfunc setupTailscaleCompatNodes(users types.Users) types.Nodes {\n\t// Node: user1 - User-owned by kratail2tid\n\tnodeUser1 := &types.Node{\n\t\tID:        1,\n\t\tGivenName: \"user1\",\n\t\tUser:      &users[0],\n\t\tUserID:    &users[0].ID,\n\t\tIPv4:      ptrAddr(\"100.90.199.68\"),\n\t\tIPv6:      ptrAddr(\"fd7a:115c:a1e0::2d01:c747\"),\n\t\tHostinfo:  &tailcfg.Hostinfo{},\n\t}\n\n\t// Node: tagged-server - Has tag:server\n\tnodeTaggedServer := &types.Node{\n\t\tID:        2,\n\t\tGivenName: \"tagged-server\",\n\t\tIPv4:      ptrAddr(\"100.108.74.26\"),\n\t\tIPv6:      ptrAddr(\"fd7a:115c:a1e0::b901:4a87\"),\n\t\tTags:      []string{\"tag:server\"},\n\t\tHostinfo:  &tailcfg.Hostinfo{},\n\t}\n\n\t// Node: tagged-client - Has tag:client\n\tnodeTaggedClient := &types.Node{\n\t\tID:        3,\n\t\tGivenName: \"tagged-client\",\n\t\tIPv4:      ptrAddr(\"100.80.238.75\"),\n\t\tIPv6:      ptrAddr(\"fd7a:115c:a1e0::7901:ee86\"),\n\t\tTags:      []string{\"tag:client\"},\n\t\tHostinfo:  &tailcfg.Hostinfo{},\n\t}\n\n\t// Node: tagged-db - Has tag:database\n\tnodeTaggedDB := &types.Node{\n\t\tID:        4,\n\t\tGivenName: \"tagged-db\",\n\t\tIPv4:      ptrAddr(\"100.74.60.128\"),\n\t\tIPv6:      ptrAddr(\"fd7a:115c:a1e0::2f01:3c9c\"),\n\t\tTags:      []string{\"tag:database\"},\n\t\tHostinfo:  &tailcfg.Hostinfo{},\n\t}\n\n\t// Node: tagged-web - Has tag:web\n\tnodeTaggedWeb := &types.Node{\n\t\tID:        5,\n\t\tGivenName: \"tagged-web\",\n\t\tIPv4:      ptrAddr(\"100.94.92.91\"),\n\t\tIPv6:      ptrAddr(\"fd7a:115c:a1e0::ef01:5c81\"),\n\t\tTags:      []string{\"tag:web\"},\n\t\tHostinfo:  &tailcfg.Hostinfo{},\n\t}\n\n\treturn types.Nodes{\n\t\tnodeUser1,\n\t\tnodeTaggedServer,\n\t\tnodeTaggedClient,\n\t\tnodeTaggedDB,\n\t\tnodeTaggedWeb,\n\t}\n}\n\n// findNodeByGivenName finds a node by its GivenName field.\nfunc findNodeByGivenName(nodes types.Nodes, name string) *types.Node {\n\tfor _, n := range nodes {\n\t\tif n.GivenName == name {\n\t\t\treturn n\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// tailscaleCompatTest defines a test case for Tailscale compatibility testing.\ntype tailscaleCompatTest struct {\n\tname        string                          // Test name\n\tpolicy      string                          // HuJSON policy as multiline raw string\n\twantFilters map[string][]tailcfg.FilterRule // node GivenName -> expected filters\n}\n\n// basePolicyTemplate provides the standard groups, tagOwners, and hosts\n// that are used in all Tailscale compatibility tests.\nconst basePolicyPrefix = `{\n\t\"groups\": {\n\t\t\"group:admins\": [\"kratail2tid@\"],\n\t\t\"group:developers\": [\"kratail2tid@\"],\n\t\t\"group:empty\": []\n\t},\n\t\"tagOwners\": {\n\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\"tag:web\": [\"kratail2tid@\"]\n\t},\n\t\"hosts\": {\n\t\t\"webserver\": \"100.108.74.26\",\n\t\t\"database\": \"100.74.60.128\",\n\t\t\"internal\": \"10.0.0.0/8\",\n\t\t\"subnet24\": \"192.168.1.0/24\"\n\t},\n\t\"acls\": [`\n\nconst basePolicySuffix = `\n\t]\n}`\n\n// makePolicy creates a full policy from just the ACL rules portion.\nfunc makePolicy(aclRules string) string {\n\treturn basePolicyPrefix + aclRules + basePolicySuffix\n}\n\n// cmpOptions returns comparison options for FilterRule slices.\n// It sorts SrcIPs and DstPorts to handle ordering differences.\nfunc cmpOptions() []cmp.Option {\n\treturn []cmp.Option{\n\t\tcmpopts.SortSlices(func(a, b string) bool { return a < b }),\n\t\tcmpopts.SortSlices(func(a, b tailcfg.NetPortRange) bool {\n\t\t\tif a.IP != b.IP {\n\t\t\t\treturn a.IP < b.IP\n\t\t\t}\n\n\t\t\tif a.Ports.First != b.Ports.First {\n\t\t\t\treturn a.Ports.First < b.Ports.First\n\t\t\t}\n\n\t\t\treturn a.Ports.Last < b.Ports.Last\n\t\t}),\n\t\tcmpopts.SortSlices(func(a, b int) bool { return a < b }),\n\t}\n}\n\n// Tailscale uses partitioned CGNAT CIDR ranges for wildcard source expansion\n// (excluding the ChromeOS VM range 100.115.92.0/23). Headscale uses the simpler\n// full CGNAT range (100.64.0.0/10) and Tailscale ULA range (fd7a:115c:a1e0::/48).\n// This is functionally equivalent for access control purposes.\n//\n// For reference, Tailscale's partitioned ranges are:\n// var tailscaleCGNATCIDRs = []string{\n// \t\"100.64.0.0/11\",\n// \t\"100.96.0.0/12\",\n// \t\"100.112.0.0/15\",\n// \t\"100.114.0.0/16\",\n// \t\"100.115.0.0/18\",\n// \t\"100.115.64.0/20\",\n// \t\"100.115.80.0/21\",\n// \t\"100.115.88.0/22\",\n// \t\"100.115.94.0/23\",\n// \t\"100.115.96.0/19\",\n// \t\"100.115.128.0/17\",\n// \t\"100.116.0.0/14\",\n// \t\"100.120.0.0/13\",\n// \t\"fd7a:115c:a1e0::/48\",\n// }\n\n// TestTailscaleCompatWildcardACLs tests wildcard ACL rules (* source and destination).\n// These are the most fundamental tests for basic allow-all and IP-based rules.\nfunc TestTailscaleCompatWildcardACLs(t *testing.T) {\n\tt.Parallel()\n\n\tusers := setupTailscaleCompatUsers()\n\tnodes := setupTailscaleCompatNodes(users)\n\n\ttests := []tailscaleCompatTest{\n\t\t{\n\t\t\tname: \"allow_all_wildcard\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]}\n\t`),\n\t\t\t// All nodes receive the same filter for allow-all rule.\n\t\t\t// NOTE: Tailscale expands `*` source to partitioned CGNAT CIDR ranges:\n\t\t\t// 100.64.0.0/11, 100.96.0.0/12, 100.112.0.0/15, etc. plus fd7a:115c:a1e0::/48\n\t\t\t// Headscale uses the full 100.64.0.0/10 and fd7a:115c:a1e0::/48 ranges.\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// NOTE: Tailscale uses partitioned CGNAT CIDRs, Headscale uses full range.\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"single_ip_as_source\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"100.90.199.68\"], \"dst\": [\"*:*\"]}\n\t`),\n\t\t\t// Single IP source: Headscale resolves the IP to a node and includes ALL of the\n\t\t\t// node's IPs (both IPv4 and IPv6). Tailscale uses only the literal IP specified.\n\t\t\t// TODO: Tailscale only includes the literal IP \"100.90.199.68/32\" without IPv6.\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// TODO: Tailscale only includes the literal IP:\n\t\t\t\t\t\t// SrcIPs: []string{\"100.90.199.68/32\"},\n\t\t\t\t\t\t// Headscale: Resolves IP to node and includes ALL node IPs (IPv4+IPv6)\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"cidr_as_source\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"100.64.0.0/16\"], \"dst\": [\"*:*\"]}\n\t`),\n\t\t\t// CIDR source is passed through unchanged to the filter.\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"100.64.0.0/16\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"100.64.0.0/16\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"100.64.0.0/16\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"100.64.0.0/16\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"100.64.0.0/16\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"single_ip_as_destination\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"100.108.74.26:*\"]}\n\t`),\n\t\t\t// Single IP destination: ONLY that node receives the filter.\n\t\t\t// KEY INSIGHT: Destination filters are only sent to nodes that ARE the destination.\n\t\t\t// NOTE: This IP (100.108.74.26) is tagged-server.\n\t\t\t// NOTE: Headscale resolves the IP to a node and includes ALL of the node's IPs.\n\t\t\t// TODO: Tailscale only includes the literal destination IP without IPv6.\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// NOTE: Tailscale uses partitioned CGNAT CIDRs, Headscale uses full 100.64.0.0/10\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// TODO: Tailscale only includes the literal destination IP:\n\t\t\t\t\t\t// DstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t// \t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t// },\n\t\t\t\t\t\t// Headscale: Resolves IP to node and includes ALL node IPs (IPv4+IPv6)\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"cidr_as_destination\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"100.64.0.0/12:*\"]}\n\t`),\n\t\t\t// CIDR destination: only nodes with IPs in the CIDR range receive the filter.\n\t\t\t// 100.64.0.0/12 covers 100.64.0.0 - 100.79.255.255\n\t\t\t// Of our test nodes, only tagged-db (100.74.60.128) falls in this range.\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil, // 100.90.199.68 is NOT in 100.64.0.0/12\n\t\t\t\t\"tagged-server\": nil, // 100.108.74.26 is NOT in 100.64.0.0/12\n\t\t\t\t\"tagged-client\": nil, // 100.80.238.75 is NOT in 100.64.0.0/12\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// NOTE: Tailscale uses partitioned CGNAT CIDRs, Headscale uses full 100.64.0.0/10\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.64.0.0/12\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": nil, // 100.94.92.91 is NOT in 100.64.0.0/12\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tpol, err := unmarshalPolicy([]byte(tt.policy))\n\t\t\trequire.NoError(t, err, \"failed to parse policy\")\n\n\t\t\terr = pol.validate()\n\t\t\trequire.NoError(t, err, \"policy validation failed\")\n\n\t\t\tfor nodeName, wantFilters := range tt.wantFilters {\n\t\t\t\tnode := findNodeByGivenName(nodes, nodeName)\n\t\t\t\trequire.NotNil(t, node, \"node %s not found\", nodeName)\n\n\t\t\t\tcompiledFilters, err := pol.compileFilterRulesForNode(users, node.View(), nodes.ViewSlice())\n\t\t\t\trequire.NoError(t, err, \"failed to compile filters for node %s\", nodeName)\n\n\t\t\t\tgotFilters := policyutil.ReduceFilterRules(node.View(), compiledFilters)\n\n\t\t\t\tif len(wantFilters) == 0 && len(gotFilters) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif diff := cmp.Diff(wantFilters, gotFilters, cmpOptions()...); diff != \"\" {\n\t\t\t\t\tt.Errorf(\"node %s filters mismatch (-want +got):\\n%s\", nodeName, diff)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TestTailscaleCompatBasicTags tests basic tag-to-tag ACL rules.\n// These tests verify that tags are correctly expanded to node IPs\n// and that filters are distributed to the correct destination nodes.\nfunc TestTailscaleCompatBasicTags(t *testing.T) {\n\tt.Parallel()\n\n\tusers := setupTailscaleCompatUsers()\n\tnodes := setupTailscaleCompatNodes(users)\n\n\ttests := []tailscaleCompatTest{\n\t\t{\n\t\t\tname: \"tag_client_to_tag_server_port_22\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"tag_as_source_wildcard_dest\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"*:*\"]}\n\t`),\n\t\t\t// When dst is *, all nodes should receive the filter\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multiple_source_tags\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\", \"tag:web\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"tag_as_destination_only\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\t// When using wildcard source and tag destination, ONLY the tagged node receives the filter.\n\t\t\t// This is different from tag_as_source_wildcard_dest where all nodes receive the filter.\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// NOTE: Tailscale uses partitioned CGNAT CIDRs, Headscale uses full 100.64.0.0/10\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multiple_destination_tags\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\", \"tag:database:5432\", \"tag:web:80\"]}\n\t`),\n\t\t\t// Multiple destination tags in a single rule.\n\t\t\t// Each tagged node receives ONLY its own destination portion.\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.94.92.91/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::ef01:5c81/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"all_tagged_nodes_as_source_to_specific_destination\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:tagged\"], \"dst\": [\"tag:database:5432\"]}\n\t`),\n\t\t\t// All tagged nodes as source (including the destination node itself).\n\t\t\t// Only the destination node receives the filter.\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": nil,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tpol, err := unmarshalPolicy([]byte(tt.policy))\n\t\t\trequire.NoError(t, err, \"failed to parse policy\")\n\n\t\t\terr = pol.validate()\n\t\t\trequire.NoError(t, err, \"policy validation failed\")\n\n\t\t\tfor nodeName, wantFilters := range tt.wantFilters {\n\t\t\t\tnode := findNodeByGivenName(nodes, nodeName)\n\t\t\t\trequire.NotNil(t, node, \"node %s not found\", nodeName)\n\n\t\t\t\t// Get compiled filters for this specific node\n\t\t\t\tcompiledFilters, err := pol.compileFilterRulesForNode(users, node.View(), nodes.ViewSlice())\n\t\t\t\trequire.NoError(t, err, \"failed to compile filters for node %s\", nodeName)\n\n\t\t\t\t// Reduce to only rules where this node is a destination\n\t\t\t\tgotFilters := policyutil.ReduceFilterRules(node.View(), compiledFilters)\n\n\t\t\t\t// Handle nil vs empty slice comparison\n\t\t\t\tif len(wantFilters) == 0 && len(gotFilters) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif diff := cmp.Diff(wantFilters, gotFilters, cmpOptions()...); diff != \"\" {\n\t\t\t\t\tt.Errorf(\"node %s filters mismatch (-want +got):\\n%s\", nodeName, diff)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TestTailscaleCompatUsersGroups tests user and group ACL rules.\nfunc TestTailscaleCompatUsersGroups(t *testing.T) {\n\tt.Parallel()\n\n\tusers := setupTailscaleCompatUsers()\n\tnodes := setupTailscaleCompatNodes(users)\n\n\ttests := []tailscaleCompatTest{\n\t\t{\n\t\t\tname: \"user_as_source\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"kratail2tid@\"], \"dst\": [\"*:*\"]}\n\t`),\n\t\t\t// User as source expands to IPs of nodes owned by that user\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"user_as_destination\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"kratail2tid@:*\"]}\n\t`),\n\t\t\t// User as destination - only user-owned nodes receive the filter\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"group_as_source\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"group:admins\"], \"dst\": [\"*:*\"]}\n\t`),\n\t\t\t// Group as source expands to IPs of nodes owned by group members\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"group_as_destination\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"group:admins:*\"]}\n\t`),\n\t\t\t// Group as destination - only nodes owned by group members receive the filter\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multiple_destinations_different_ports\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\", \"tag:database:5432\"]}\n\t`),\n\t\t\t// Each destination node receives ONLY its own destination portion\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": nil,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tpol, err := unmarshalPolicy([]byte(tt.policy))\n\t\t\trequire.NoError(t, err, \"failed to parse policy\")\n\n\t\t\terr = pol.validate()\n\t\t\trequire.NoError(t, err, \"policy validation failed\")\n\n\t\t\tfor nodeName, wantFilters := range tt.wantFilters {\n\t\t\t\tnode := findNodeByGivenName(nodes, nodeName)\n\t\t\t\trequire.NotNil(t, node, \"node %s not found\", nodeName)\n\n\t\t\t\t// Get compiled filters for this specific node\n\t\t\t\tcompiledFilters, err := pol.compileFilterRulesForNode(users, node.View(), nodes.ViewSlice())\n\t\t\t\trequire.NoError(t, err, \"failed to compile filters for node %s\", nodeName)\n\n\t\t\t\t// Reduce to only rules where this node is a destination\n\t\t\t\tgotFilters := policyutil.ReduceFilterRules(node.View(), compiledFilters)\n\n\t\t\t\tif len(wantFilters) == 0 && len(gotFilters) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif diff := cmp.Diff(wantFilters, gotFilters, cmpOptions()...); diff != \"\" {\n\t\t\t\t\tt.Errorf(\"node %s filters mismatch (-want +got):\\n%s\", nodeName, diff)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TestTailscaleCompatAutogroups tests autogroup ACL rules.\nfunc TestTailscaleCompatAutogroups(t *testing.T) {\n\tt.Parallel()\n\n\tusers := setupTailscaleCompatUsers()\n\tnodes := setupTailscaleCompatNodes(users)\n\n\ttests := []tailscaleCompatTest{\n\t\t{\n\t\t\tname: \"autogroup_member_as_source\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"*:*\"]}\n\t`),\n\t\t\t// autogroup:member expands to IPs of user-owned nodes only\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"autogroup_tagged_as_source\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:tagged\"], \"dst\": [\"*:*\"]}\n\t`),\n\t\t\t// autogroup:tagged expands to IPs of all tagged nodes\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"autogroup_member_plus_tag_client\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\", \"tag:client\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\t// Sources are merged into one Srcs array\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"autogroup_self_as_destination\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"autogroup:self:*\"]}\n\t`),\n\t\t\t// autogroup:self allows a node to access ITSELF.\n\t\t\t// The source wildcard `*` is narrowed to the node's own IP for autogroup:self.\n\t\t\t// KEY INSIGHT: Tagged nodes do NOT receive autogroup:self filters.\n\t\t\t// Only user-owned nodes can use autogroup:self.\n\t\t\t// NOTE: For autogroup:self destinations, both Tailscale and Headscale narrow\n\t\t\t// the wildcard source to only the same-user untagged nodes.\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// Source is narrowed to the node's own IPs for autogroup:self.\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// Tailscale uses CIDR format: \"100.90.199.68/32\" and \"fd7a:115c:a1e0::2d01:c747/128\"\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": nil, // Tagged nodes do NOT receive autogroup:self filters\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"autogroup_internet_as_destination\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"autogroup:internet:*\"]}\n\t`),\n\t\t\t// autogroup:internet produces NO PacketFilter entries.\n\t\t\t// This autogroup relates to exit node routing, not direct node-to-node filters.\n\t\t\t// It controls what traffic can be routed through exit nodes to the internet.\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"autogroup_member_as_destination\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"autogroup:member:*\"]}\n\t`),\n\t\t\t// autogroup:member as destination - only user-owned nodes receive the filter.\n\t\t\t// Tagged nodes do NOT receive this filter.\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// NOTE: Tailscale uses partitioned CGNAT CIDRs, Headscale uses full 100.64.0.0/10\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"autogroup_self_mixed_with_tag\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"autogroup:self:*\", \"tag:server:22\"]}\n\t`),\n\t\t\t// KEY FINDING: Mixed destinations create SEPARATE filter entries with different Srcs!\n\t\t\t// - autogroup:self narrows Srcs to the user's own IPs\n\t\t\t// - tag:server keeps Srcs as full wildcard\n\t\t\t// user1 gets ONLY the self filter (narrowed Srcs to user1's IPs)\n\t\t\t// tagged-server gets ONLY the tag filter (full wildcard Srcs)\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// autogroup:self narrows Srcs to user's own IPs\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// tag:server keeps full wildcard Srcs\n\t\t\t\t\t\t// NOTE: Tailscale uses partitioned CGNAT CIDRs, Headscale uses full 100.64.0.0/10\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil, // Not in destination\n\t\t\t\t\"tagged-db\":     nil, // Not in destination\n\t\t\t\t\"tagged-web\":    nil, // Not in destination\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"autogroup_tagged_as_destination\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"autogroup:tagged:*\"]}\n\t`),\n\t\t\t// autogroup:tagged as destination - all tagged nodes receive the filter.\n\t\t\t// User-owned nodes do NOT receive this filter.\n\t\t\t// KEY INSIGHT: ReduceFilterRules filters DstPorts to only the current node's IPs.\n\t\t\t// So each tagged node only sees its OWN IPs in DstPorts after reduction.\n\t\t\t// TODO: Tailscale includes ALL tagged nodes' IPs in DstPorts for each node.\n\t\t\t// Headscale only includes the current node's IPs after ReduceFilterRules.\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// NOTE: Tailscale uses partitioned CGNAT CIDRs, Headscale uses full 100.64.0.0/10\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// TODO: Tailscale includes ALL tagged nodes' IPs:\n\t\t\t\t\t\t// DstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t// \t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t// \t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t// \t{IP: \"100.80.238.75/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t// \t{IP: \"100.94.92.91/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t// \t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t// \t{IP: \"fd7a:115c:a1e0::7901:ee86/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t// \t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t// \t{IP: \"fd7a:115c:a1e0::ef01:5c81/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t// },\n\t\t\t\t\t\t// Headscale: After ReduceFilterRules, only this node's IPs are in DstPorts\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// TODO: Tailscale includes ALL tagged nodes' IPs (see tagged-server comment)\n\t\t\t\t\t\t// Headscale: Only this node's IPs after ReduceFilterRules\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.80.238.75/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::7901:ee86/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// TODO: Tailscale includes ALL tagged nodes' IPs (see tagged-server comment)\n\t\t\t\t\t\t// Headscale: Only this node's IPs after ReduceFilterRules\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// TODO: Tailscale includes ALL tagged nodes' IPs (see tagged-server comment)\n\t\t\t\t\t\t// Headscale: Only this node's IPs after ReduceFilterRules\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.94.92.91/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::ef01:5c81/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tpol, err := unmarshalPolicy([]byte(tt.policy))\n\t\t\trequire.NoError(t, err, \"failed to parse policy\")\n\n\t\t\terr = pol.validate()\n\t\t\trequire.NoError(t, err, \"policy validation failed\")\n\n\t\t\tfor nodeName, wantFilters := range tt.wantFilters {\n\t\t\t\tnode := findNodeByGivenName(nodes, nodeName)\n\t\t\t\trequire.NotNil(t, node, \"node %s not found\", nodeName)\n\n\t\t\t\t// Get compiled filters for this specific node\n\t\t\t\tcompiledFilters, err := pol.compileFilterRulesForNode(users, node.View(), nodes.ViewSlice())\n\t\t\t\trequire.NoError(t, err, \"failed to compile filters for node %s\", nodeName)\n\n\t\t\t\t// Reduce to only rules where this node is a destination\n\t\t\t\tgotFilters := policyutil.ReduceFilterRules(node.View(), compiledFilters)\n\n\t\t\t\tif len(wantFilters) == 0 && len(gotFilters) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif diff := cmp.Diff(wantFilters, gotFilters, cmpOptions()...); diff != \"\" {\n\t\t\t\t\tt.Errorf(\"node %s filters mismatch (-want +got):\\n%s\", nodeName, diff)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TestTailscaleCompatHosts tests host alias ACL rules.\nfunc TestTailscaleCompatHosts(t *testing.T) {\n\tt.Parallel()\n\n\tusers := setupTailscaleCompatUsers()\n\tnodes := setupTailscaleCompatNodes(users)\n\n\ttests := []tailscaleCompatTest{\n\t\t{\n\t\t\tname: \"host_as_destination\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"webserver:80\"]}\n\t`),\n\t\t\t// Host reference webserver = 100.108.74.26 = tagged-server\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// NOTE: Tailscale uses partitioned CGNAT CIDRs, Headscale uses full 100.64.0.0/10\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// TODO: Tailscale only includes the literal IPv4 for host aliases:\n\t\t\t\t\t\t// DstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t// \t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t// },\n\t\t\t\t\t\t// Headscale: Resolves host alias to node and includes ALL node IPs (IPv4+IPv6)\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"host_as_source\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"webserver\"], \"dst\": [\"*:*\"]}\n\t`),\n\t\t\t// Host as source resolves to the defined IP\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// TODO: Tailscale only includes the literal IPv4 for host aliases:\n\t\t\t\t\t\t// SrcIPs: []string{\"100.108.74.26/32\"},\n\t\t\t\t\t\t// Headscale: Resolves host alias to node and includes ALL node IPs (IPv4+IPv6)\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// TODO: Tailscale only includes the literal IPv4 for host aliases (see user1 comment)\n\t\t\t\t\t\t// Headscale: Resolves host alias to node and includes ALL node IPs (IPv4+IPv6)\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// TODO: Tailscale only includes the literal IPv4 for host aliases (see user1 comment)\n\t\t\t\t\t\t// Headscale: Resolves host alias to node and includes ALL node IPs (IPv4+IPv6)\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// TODO: Tailscale only includes the literal IPv4 for host aliases (see user1 comment)\n\t\t\t\t\t\t// Headscale: Resolves host alias to node and includes ALL node IPs (IPv4+IPv6)\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// TODO: Tailscale only includes the literal IPv4 for host aliases (see user1 comment)\n\t\t\t\t\t\t// Headscale: Resolves host alias to node and includes ALL node IPs (IPv4+IPv6)\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"cidr_host_as_source\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"internal\"], \"dst\": [\"*:*\"]}\n\t`),\n\t\t\t// CIDR host definition (10.0.0.0/8) is passed through unchanged\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"10.0.0.0/8\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"10.0.0.0/8\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"10.0.0.0/8\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"10.0.0.0/8\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"10.0.0.0/8\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tpol, err := unmarshalPolicy([]byte(tt.policy))\n\t\t\trequire.NoError(t, err, \"failed to parse policy\")\n\n\t\t\terr = pol.validate()\n\t\t\trequire.NoError(t, err, \"policy validation failed\")\n\n\t\t\tfor nodeName, wantFilters := range tt.wantFilters {\n\t\t\t\tnode := findNodeByGivenName(nodes, nodeName)\n\t\t\t\trequire.NotNil(t, node, \"node %s not found\", nodeName)\n\n\t\t\t\t// Get compiled filters for this specific node\n\t\t\t\tcompiledFilters, err := pol.compileFilterRulesForNode(users, node.View(), nodes.ViewSlice())\n\t\t\t\trequire.NoError(t, err, \"failed to compile filters for node %s\", nodeName)\n\n\t\t\t\t// Reduce to only rules where this node is a destination\n\t\t\t\tgotFilters := policyutil.ReduceFilterRules(node.View(), compiledFilters)\n\n\t\t\t\tif len(wantFilters) == 0 && len(gotFilters) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif diff := cmp.Diff(wantFilters, gotFilters, cmpOptions()...); diff != \"\" {\n\t\t\t\t\tt.Errorf(\"node %s filters mismatch (-want +got):\\n%s\", nodeName, diff)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TestTailscaleCompatProtocolsPorts tests protocol and port ACL rules.\nfunc TestTailscaleCompatProtocolsPorts(t *testing.T) {\n\tt.Parallel()\n\n\tusers := setupTailscaleCompatUsers()\n\tnodes := setupTailscaleCompatNodes(users)\n\n\ttests := []tailscaleCompatTest{\n\t\t{\n\t\t\tname: \"tcp_only_protocol\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"proto\": \"tcp\", \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"udp_only_protocol\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"proto\": \"udp\", \"dst\": [\"tag:server:53\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 53, Last: 53}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 53, Last: 53}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolUDP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"icmp_numeric_protocol\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"proto\": \"1\", \"dst\": [\"tag:server:*\"]}\n\t`),\n\t\t\t// Numeric protocol values work (e.g., \"1\" for ICMP)\n\t\t\t// Even for ICMP (which doesn't use ports), the ports field is 0-65535\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// NOTE: Tailscale uses partitioned CGNAT CIDRs, Headscale uses full 100.64.0.0/10\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"port_range\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"tag:server:80-443\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 443}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multiple_comma_separated_ports\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"tag:server:22,80,443\"]}\n\t`),\n\t\t\t// Comma-separated ports expand into separate DstPorts entries\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"wildcard_port\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"tag:server:*\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tpol, err := unmarshalPolicy([]byte(tt.policy))\n\t\t\trequire.NoError(t, err, \"failed to parse policy\")\n\n\t\t\terr = pol.validate()\n\t\t\trequire.NoError(t, err, \"policy validation failed\")\n\n\t\t\tfor nodeName, wantFilters := range tt.wantFilters {\n\t\t\t\tnode := findNodeByGivenName(nodes, nodeName)\n\t\t\t\trequire.NotNil(t, node, \"node %s not found\", nodeName)\n\n\t\t\t\t// Get compiled filters for this specific node\n\t\t\t\tcompiledFilters, err := pol.compileFilterRulesForNode(users, node.View(), nodes.ViewSlice())\n\t\t\t\trequire.NoError(t, err, \"failed to compile filters for node %s\", nodeName)\n\n\t\t\t\t// Reduce to only rules where this node is a destination\n\t\t\t\tgotFilters := policyutil.ReduceFilterRules(node.View(), compiledFilters)\n\n\t\t\t\tif len(wantFilters) == 0 && len(gotFilters) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif diff := cmp.Diff(wantFilters, gotFilters, cmpOptions()...); diff != \"\" {\n\t\t\t\t\tt.Errorf(\"node %s filters mismatch (-want +got):\\n%s\", nodeName, diff)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TestTailscaleCompatMixedSources tests mixing different source types in a single rule.\n// From findings/09-mixed-scenarios.md - Category 1: Mixed Sources (Single Rule).\nfunc TestTailscaleCompatMixedSources(t *testing.T) {\n\tt.Parallel()\n\n\tusers := setupTailscaleCompatUsers()\n\tnodes := setupTailscaleCompatNodes(users)\n\n\ttests := []tailscaleCompatTest{\n\t\t{\n\t\t\tname: \"autogroup_tagged_plus_autogroup_member_full_tailnet\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:tagged\", \"autogroup:member\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\t// Full tailnet coverage: autogroup:tagged (all 4 tagged) + autogroup:member (user1)\n\t\t\t// All 5 nodes' IPv4 and IPv6 addresses should be in Srcs (10 total entries)\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"group_plus_tag\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"group:admins\", \"tag:client\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\t// group:admins → user1's IPs + tag:client → tagged-client's IPs\n\t\t\t// Both merged into single Srcs array (4 IPs total)\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"explicit_user_plus_tag\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"kratail2tid@\", \"tag:client\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\t// Explicit user kratail2tid@ → user1's IPs + tag:client → tagged-client's IPs\n\t\t\t// Both merged into single Srcs array (4 IPs total)\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"cidr_plus_tag\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"10.0.0.0/8\", \"tag:client\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\t// CIDR 10.0.0.0/8 + tag:client IPs merged into single Srcs array\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"10.0.0.0/8\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"host_plus_tag\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"internal\", \"tag:client\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\t// Host alias \"internal\" (10.0.0.0/8) + tag:client IPs merged into single Srcs array\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"10.0.0.0/8\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"webserver_host_plus_tag\",\n\t\t\t// Test 1.5: webserver (host) + tag:client\n\t\t\t// Host aliases are IPv4 only; tags include IPv6.\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"webserver\", \"tag:client\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// TODO: Tailscale: webserver host = 100.108.74.26/32 (IPv4 only)\n\t\t\t\t\t\t// Tailscale Srcs: [\"100.108.74.26/32\", \"100.80.238.75/32\", \"fd7a:115c:a1e0::7901:ee86/128\"]\n\t\t\t\t\t\t// Headscale: Host resolves to node and includes ALL node IPs\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"raw_ip_plus_tag\",\n\t\t\t// Test 1.6: 100.90.199.68 (raw IP) + tag:client\n\t\t\t// Raw IPs are treated as literal CIDRs\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"100.90.199.68\", \"tag:client\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// Raw IP 100.90.199.68 resolves to user1 node - Headscale includes all node IPs\n\t\t\t\t\t\t// tag:client expands to tagged-client's IPs\n\t\t\t\t\t\t// TODO: Tailscale may treat raw IP as literal /32 only without IPv6\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\", // user1 IPv6 added by Headscale\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"same_user_three_ways\",\n\t\t\t// Test 1.7: autogroup:member + group:admins + kratail2tid@ (same user 3 ways)\n\t\t\t// All three resolve to user1, should deduplicate to just user1's IPs\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\", \"group:admins\", \"kratail2tid@\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// All three sources resolve to user1 - should be deduplicated\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"same_ip_two_ways_as_source\",\n\t\t\t// Test 1.8: tag:server + webserver (same IP via tag and host)\n\t\t\t// Both reference tagged-server's IP - should deduplicate\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:server\", \"webserver\"], \"dst\": [\"tag:database:5432\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// TODO: Tailscale: webserver host only adds IPv4\n\t\t\t\t\t\t// Tailscale Srcs: [\"100.108.74.26/32\", \"fd7a:115c:a1e0::b901:4a87/128\"]\n\t\t\t\t\t\t// Headscale: Both tag:server and webserver resolve to all node IPs\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": nil,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tpol, err := unmarshalPolicy([]byte(tt.policy))\n\t\t\trequire.NoError(t, err, \"failed to parse policy\")\n\n\t\t\terr = pol.validate()\n\t\t\trequire.NoError(t, err, \"policy validation failed\")\n\n\t\t\tfor nodeName, wantFilters := range tt.wantFilters {\n\t\t\t\tnode := findNodeByGivenName(nodes, nodeName)\n\t\t\t\trequire.NotNil(t, node, \"node %s not found\", nodeName)\n\n\t\t\t\t// Get compiled filters for this specific node\n\t\t\t\tcompiledFilters, err := pol.compileFilterRulesForNode(users, node.View(), nodes.ViewSlice())\n\t\t\t\trequire.NoError(t, err, \"failed to compile filters for node %s\", nodeName)\n\n\t\t\t\t// Reduce to only rules where this node is a destination\n\t\t\t\tgotFilters := policyutil.ReduceFilterRules(node.View(), compiledFilters)\n\n\t\t\t\tif len(wantFilters) == 0 && len(gotFilters) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif diff := cmp.Diff(wantFilters, gotFilters, cmpOptions()...); diff != \"\" {\n\t\t\t\t\tt.Errorf(\"node %s filters mismatch (-want +got):\\n%s\", nodeName, diff)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TestTailscaleCompatComplexScenarios tests complex ACL rule combinations.\nfunc TestTailscaleCompatComplexScenarios(t *testing.T) {\n\tt.Parallel()\n\n\tusers := setupTailscaleCompatUsers()\n\tnodes := setupTailscaleCompatNodes(users)\n\n\ttests := []tailscaleCompatTest{\n\t\t{\n\t\t\tname: \"empty_group_produces_no_filter\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"group:empty\"], \"dst\": [\"*:*\"]}\n\t`),\n\t\t\t// Empty groups produce no filter entries\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multiple_rules_same_source_merged\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:80,443\"]}\n\t`),\n\t\t\t// KEY INSIGHT: In Tailscale, multiple rules with the SAME source are MERGED into a\n\t\t\t// single filter entry with all destination ports combined.\n\t\t\t// Headscale now merges rules with identical SrcIPs and IPProto.\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t// Merged: Both ACL rules combined into single filter entry\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"different_sources_same_destination_separate\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"tag:web\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"tag:database\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\t// KEY INSIGHT: Different sources are NEVER merged - always separate filter entries.\n\t\t\t// Each source gets its own filter entry even with identical destinations.\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"mixed_overlapping_rules\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:80\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"tag:web\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"tag:web\"], \"dst\": [\"tag:server:443\"]}\n\t`),\n\t\t\t// In Tailscale: 4 rules → 2 filter entries (merged per-source)\n\t\t\t// - tag:client rules merged (ports 22, 80)\n\t\t\t// - tag:web rules merged (ports 22, 443)\n\t\t\t// Headscale now merges rules with identical SrcIPs and IPProto.\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t// Merged: tag:client rules (ports 22, 80)\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t// Merged: tag:web rules (ports 22, 443)\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multiple_tag_destinations_distributed\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\", \"tag:database:5432\"]}\n\t`),\n\t\t\t// Multiple tag destinations are distributed to their respective nodes.\n\t\t\t// tagged-server gets port 22, tagged-db gets port 5432.\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"same_node_different_ports_via_tag_and_host\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\", \"webserver:80\"]}\n\t`),\n\t\t\t// KEY FINDING: Same IP can appear multiple times in Dsts with different ports\n\t\t\t// when referenced via different aliases (tag vs host).\n\t\t\t// - tag:server adds both IPv4 and IPv6 (port 22)\n\t\t\t// - webserver host adds only IPv4 (port 80)\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// TODO: Tailscale includes webserver:80 BEFORE tag:server:22 in Dsts:\n\t\t\t\t\t\t// DstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t//   {IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t//   {IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t//   {IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t// },\n\t\t\t\t\t\t// Headscale: tag destinations come first, then host destinations\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t// Host alias \"webserver\" expands to node's IPs (IPv4 + IPv6)\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"group_and_tag_destinations_distributed\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"group:admins:22\", \"tag:server:80\"]}\n\t`),\n\t\t\t// Group:admins → user1, tag:server → tagged-server\n\t\t\t// Each destination type distributed to its respective nodes.\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"wildcard_mixed_with_specific_source\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"tag:server:80\"]}\n\t`),\n\t\t\t// Wildcard `*` is NOT merged with specific sources.\n\t\t\t// Each remains a separate filter entry.\n\t\t\t// Wildcard expands to CIDR ranges, specific tag expands to node IP.\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t// NOTE: Tailscale uses partitioned CGNAT CIDRs, Headscale uses full 100.64.0.0/10\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"same_src_different_dest_ports_merged\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:80\"]}\n\t`),\n\t\t\t// KEY FINDING: Same source, same dest node, different ports = MERGED\n\t\t\t// 2 rules → 1 filter entry with all ports combined (4 Dsts: 2 ports × 2 IPs)\n\t\t\t// Headscale now merges rules with identical SrcIPs and IPProto.\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t// Merged: Both rules combined\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"same_src_different_dest_nodes_separate\",\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:database:5432\"]}\n\t`),\n\t\t\t// Same source, different destination nodes = separate filter entries per node.\n\t\t\t// Each destination node only receives its relevant filter.\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": nil,\n\t\t\t},\n\t\t},\n\t\t// Category 2: Mixed Destinations - Additional tests\n\t\t{\n\t\t\tname: \"tag_plus_raw_ip_same_node_different_ports\",\n\t\t\t// Test 2.3: tag:server:22 + 100.108.74.26:80 (tag + raw IP, same node)\n\t\t\t// Same behavior as Test 2.2 - same IP can appear multiple times with different ports\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\", \"100.108.74.26:80\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// tag:server adds both IPv4+IPv6 for port 22\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t// Headscale resolves raw IP to node and includes all IPs (IPv4+IPv6)\n\t\t\t\t\t\t\t// TODO: Tailscale adds only IPv4 for raw IP destinations\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"user_via_email_and_group_different_ports\",\n\t\t\t// Test 2.6: kratail2tid@:22 + group:admins:80 (same user via email + group)\n\t\t\t// Same user referenced via email and group creates separate Dst entries per port\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"kratail2tid@:22\", \"group:admins:80\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// Same user via email and group with different ports - 4 Dst entries total\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multiple_host_destinations\",\n\t\t\t// Test 2.7: webserver:22 + database:5432 (multiple hosts)\n\t\t\t// Host destinations are properly distributed to matching nodes\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"webserver:22\", \"database:5432\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// Headscale resolves host alias to node and includes all IPs (IPv4+IPv6)\n\t\t\t\t\t\t// TODO: Tailscale host alias is IPv4-only\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// Headscale resolves host alias to node and includes all IPs (IPv4+IPv6)\n\t\t\t\t\t\t// TODO: Tailscale host alias is IPv4-only\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": nil,\n\t\t\t},\n\t\t},\n\t\t// Category 3: Overlapping References - Same entity via different names\n\t\t{\n\t\t\tname: \"same_ip_via_tag_and_host_source\",\n\t\t\t// Test 3.1: src: [tag:server, webserver] - same IP via tag and host\n\t\t\t// Duplicate IPs should be deduplicated in Srcs\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:server\", \"webserver\"], \"dst\": [\"tag:client:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// tag:server gives IPv4+IPv6, webserver adds IPv4 again (but deduplicated)\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.80.238.75/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::7901:ee86/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\":  nil,\n\t\t\t\t\"tagged-web\": nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"same_ip_port_via_tag_and_host_dest\",\n\t\t\t// Test 3.3: dst: [tag:server:22, webserver:22] - same IP:port via tag and host\n\t\t\t// Destinations are NOT deduplicated - same IP:port can appear multiple times\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\", \"webserver:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// Destinations NOT deduplicated - same IP can appear twice\n\t\t\t\t\t\t// tag:server adds IPv4:22 + IPv6:22\n\t\t\t\t\t\t// webserver adds IPv4:22 again + Headscale adds IPv6 too\n\t\t\t\t\t\t// TODO: Tailscale: webserver adds IPv4:22 only (duplicated with tag:server)\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"same_ip_port_via_tag_and_raw_ip_dest\",\n\t\t\t// Test 3.4: dst: [tag:server:22, 100.108.74.26:22] - tag + raw IP (identical)\n\t\t\t// Same behavior as Test 3.3 - Dsts not deduplicated\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\", \"100.108.74.26:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// Destinations NOT deduplicated\n\t\t\t\t\t\t// tag:server adds IPv4:22 + IPv6:22\n\t\t\t\t\t\t// Raw IP adds IPv4:22 again + Headscale adds IPv6 too\n\t\t\t\t\t\t// TODO: Tailscale: raw IP adds IPv4:22 only (duplicated)\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"tag_database_plus_host_database_source\",\n\t\t\t// Test 3.5: src: [tag:database, database] - tag:database + host database (same node)\n\t\t\t// Sources ARE deduplicated\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:database\", \"database\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// Sources deduplicated: tag:database (IPv4+IPv6) + database host (IPv4)\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t// Category 4: Cross-Type Source→Destination Combinations\n\t\t{\n\t\t\tname: \"autogroup_tagged_to_user\",\n\t\t\t// Test 4.2: autogroup:tagged → kratail2tid@:22\n\t\t\t// Tagged nodes → user-owned nodes\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:tagged\"], \"dst\": [\"kratail2tid@:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// All 4 tagged nodes (8 IPs) can access user1:22\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"group_to_host_alias\",\n\t\t\t// Test 4.3: group:admins → webserver:22\n\t\t\t// Group → host alias\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"group:admins\"], \"dst\": [\"webserver:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// Headscale resolves host alias to node and adds IPv6 too\n\t\t\t\t\t\t// TODO: Tailscale host alias is IPv4-only\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t// Category 5: Order Effects - Order does NOT affect output\n\t\t{\n\t\t\tname: \"source_order_independence\",\n\t\t\t// Test 5.1: Order of sources doesn't affect output - they are sorted\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:web\", \"tag:client\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// Sources are sorted: IPv4 first (ascending), then IPv6 (ascending)\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t// Category 6: Edge Cases\n\t\t{\n\t\t\tname: \"cidr_host_as_source\",\n\t\t\t// Test 6.5: internal (10.0.0.0/8) → tag:server:22\n\t\t\t// CIDR host definitions work as sources\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"internal\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// CIDR host goes directly into SrcIPs\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"10.0.0.0/8\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"cidr_host_as_destination_no_matching_nodes\",\n\t\t\t// Test 6.6: tag:client → internal:22 (CIDR host as destination)\n\t\t\t// No nodes in 10.0.0.0/8 range, so no filters generated for any tailnet nodes\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"internal:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t// Category 7: Maximum Combinations\n\t\t{\n\t\t\tname: \"multiple_tags_as_sources\",\n\t\t\t// Test 7.x: Multiple tags as sources\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\", \"tag:web\", \"tag:database\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// All 3 tags' IPs\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"tag_to_multiple_destinations_ports\",\n\t\t\t// Test 7.x: tag:client → multiple destinations with different ports\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\", \"tag:database:5432\", \"tag:web:80\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.94.92.91/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::ef01:5c81/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Category 8: Redundancy Stress Tests\n\t\t{\n\t\t\tname: \"user1_referenced_multiple_ways_as_source\",\n\t\t\t// Test 8.1: user1 referenced 5 ways - all deduplicated\n\t\t\t// autogroup:member, kratail2tid@, group:admins, group:developers, 100.90.199.68\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\", \"kratail2tid@\", \"group:admins\", \"group:developers\", \"100.90.199.68\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// All 5 references resolve to user1 - deduplicated\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t// Category 9: All Tags + All Autogroups\n\t\t{\n\t\t\tname: \"all_four_tags_as_sources\",\n\t\t\t// Test 9.1: All 4 tags as sources\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:server\", \"tag:client\", \"tag:database\", \"tag:web\"], \"dst\": [\"kratail2tid@:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// All 4 tagged nodes (8 IPs total)\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"all_four_tags_as_destinations\",\n\t\t\t// Test 9.2: All 4 tags as destinations\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"kratail2tid@\"], \"dst\": [\"tag:server:22\", \"tag:client:22\", \"tag:database:22\", \"tag:web:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.80.238.75/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::7901:ee86/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.94.92.91/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::ef01:5c81/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"both_autogroups_as_sources\",\n\t\t\t// Test 9.3: autogroup:member + autogroup:tagged as sources (full tailnet)\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\", \"autogroup:tagged\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// All 5 nodes (10 IPs)\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t// Category 10: Multiple Rules with Mixed Types\n\t\t{\n\t\t\tname: \"cross_type_separate_rules\",\n\t\t\t// Test 10.1: Different source types in separate rules\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:database:5432\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": nil,\n\t\t\t},\n\t\t},\n\t\t// Category 11: Port Variations with Mixed Types\n\t\t{\n\t\t\tname: \"mixed_sources_with_port_range\",\n\t\t\t// Test 11.2: Mixed sources with port range\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\", \"tag:client\"], \"dst\": [\"tag:server:80-443\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 443}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t// Category 14: Multi-Rule Compounding\n\t\t{\n\t\t\tname: \"same_src_different_dests_two_rules\",\n\t\t\t// Test 14.1: Same src, different dests (2 rules)\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:database:5432\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"different_srcs_same_dest_two_rules\",\n\t\t\t// Test 14.6: Different srcs, same dest (2 rules)\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t// Two separate filter rules for each ACL rule\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t// Category 12: CIDR Host Combinations\n\t\t{\n\t\t\tname: \"cidr_host_plus_tag_as_sources\",\n\t\t\t// Test 12.1: CIDR host + tag as sources\n\t\t\t// internal (10.0.0.0/8) + tag:client\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"internal\", \"tag:client\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// CIDR host appears as-is in Srcs + tag:client IPs\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"10.0.0.0/8\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multiple_cidr_hosts_as_sources\",\n\t\t\t// Test 12.2: Multiple CIDR hosts as sources\n\t\t\t// internal (10.0.0.0/8) + subnet24 (192.168.1.0/24)\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"internal\", \"subnet24\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// Both CIDR hosts appear in Srcs\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"10.0.0.0/8\",\n\t\t\t\t\t\t\t\"192.168.1.0/24\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"same_cidr_via_host_and_raw\",\n\t\t\t// Test 12.4: Same CIDR referenced via host alias and raw CIDR\n\t\t\t// internal (10.0.0.0/8) + 10.0.0.0/8 - should deduplicate\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"internal\", \"10.0.0.0/8\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// Same CIDR referenced 2 ways should deduplicate\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"10.0.0.0/8\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t// Category 13: autogroup:self Deep Dive - Tests where autogroup:self works\n\t\t{\n\t\t\tname: \"wildcard_to_autogroup_self\",\n\t\t\t// Test 13.1: * → autogroup:self:*\n\t\t\t// CRITICAL: autogroup:self NARROWS Srcs even when source is wildcard\n\t\t\t// Only user-owned nodes receive filters; tagged nodes get empty\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"autogroup:self:*\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// Srcs narrowed to user1's own IPs (NOT wildcard CIDRs)\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// Dsts = user1's own IPs with all ports (no CIDR notation for autogroup:self)\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// Tagged nodes receive NO filters for autogroup:self\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"wildcard_to_autogroup_self_specific_port\",\n\t\t\t// Test 13.2: * → autogroup:self:22\n\t\t\t// Specific port with autogroup:self\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"autogroup:self:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"autogroup_member_to_self\",\n\t\t\t// Test 13.5: autogroup:member → autogroup:self:*\n\t\t\t// autogroup:member is a valid source for autogroup:self\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"autogroup:self:*\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"specific_user_to_self\",\n\t\t\t// Test 13.8: kratail2tid@ → autogroup:self:*\n\t\t\t// Specific user email is a valid source for autogroup:self\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"kratail2tid@\"], \"dst\": [\"autogroup:self:*\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"group_to_self\",\n\t\t\t// Test 13.9: group:admins → autogroup:self:*\n\t\t\t// Groups are valid sources for autogroup:self\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"group:admins\"], \"dst\": [\"autogroup:self:*\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"wildcard_to_self_plus_tag\",\n\t\t\t// Test 13.16: * → [autogroup:self:*, tag:server:22]\n\t\t\t// Mixed destinations with autogroup:self - different Srcs for each\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"autogroup:self:*\", \"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// Self filter gets narrowed Srcs (user1's IPs only)\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// autogroup:self destinations use plain IPs (no CIDR notation)\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// Tag filter gets full wildcard Srcs\n\t\t\t\t\t\t// NOTE: Tailscale uses partitioned CGNAT CIDRs, Headscale uses full 100.64.0.0/10\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// Tag destinations use CIDR notation\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t// Category 14: More Multi-Rule Compounding\n\t\t{\n\t\t\tname: \"same_src_same_dest_different_ports_two_rules\",\n\t\t\t// Test 14.2: Same src, same dest, different ports (2 rules)\n\t\t\t// In Tailscale: MERGED into single filter entry with combined Dsts\n\t\t\t// Headscale now merges rules with identical SrcIPs and IPProto.\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:80\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t// Merged: Both rules combined\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"three_different_srcs_same_dest_different_ports\",\n\t\t\t// Test 14.21: 3 different sources → same dest, different ports\n\t\t\t// Each rule becomes a separate filter entry\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"tag:web\"], \"dst\": [\"tag:server:80\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"tag:database\"], \"dst\": [\"tag:server:443\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"overlapping_dests_same_src_different_rules\",\n\t\t\t// Test 10.2: Overlapping destinations, different sources (2 rules)\n\t\t\t// Each rule creates its own filter entry on destination nodes\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"group:admins\"], \"dst\": [\"tag:server:*\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:tagged\"], \"dst\": [\"tag:server:*\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// Rule 1: group:admins → tag:server:*\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t// Rule 2: autogroup:tagged → tag:server:*\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"mixed_sources_comma_ports\",\n\t\t\t// Test 11.1: Mixed sources with comma-separated ports\n\t\t\t// Each port becomes a separate Dst entry\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\", \"tag:client\"], \"dst\": [\"tag:server:22,80,443\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// Each port is a separate Dst entry (6 total: 3 ports × 2 IPs)\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"full_autogroups_with_wildcard_and_specific_port\",\n\t\t\t// Test 11.4: Both autogroups with wildcard and specific port destinations\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:tagged\", \"autogroup:member\"], \"dst\": [\"tag:server:*\", \"tag:database:5432\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// All 5 nodes (10 IPs) as sources\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// Wildcard port → 0-65535\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": nil,\n\t\t\t},\n\t\t},\n\t\t// Category 13: More autogroup:self tests\n\t\t{\n\t\t\tname: \"wildcard_to_self_comma_ports\",\n\t\t\t// Test 13.3: * → autogroup:self:22,80,443\n\t\t\t// Comma-separated ports create separate Dsts entries\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"autogroup:self:22,80,443\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// 6 Dsts: 3 ports × 2 IPs (autogroup:self uses plain IPs)\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"wildcard_to_self_port_range\",\n\t\t\t// Test 13.4: * → autogroup:self:80-443\n\t\t\t// Port range preserved as First/Last\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"autogroup:self:80-443\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// Port range preserved (autogroup:self uses plain IPs)\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 80, Last: 443}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 80, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"self_twice_separate_rules_merged\",\n\t\t\t// Test 13.36: Self twice in separate rules (merged)\n\t\t\t// * → autogroup:self:22\n\t\t\t// * → autogroup:self:80\n\t\t\t// Tailscale MERGES these into a single filter entry with 4 Dsts\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"autogroup:self:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"autogroup:self:80\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t// Merged: Both rules combined into 1 filter entry with 4 Dsts\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t// Category 14: More Multi-Rule Compounding\n\t\t{\n\t\t\tname: \"same_src_different_dests_two_rules_distributed\",\n\t\t\t// Test 14.1: Same src, different dests (2 rules)\n\t\t\t// Rules distributed to different destination nodes\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:database:5432\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"different_srcs_same_dest_two_rules\",\n\t\t\t// Test 14.6: Different srcs, same dest (2 rules)\n\t\t\t// Creates 2 SEPARATE filter entries (not merged)\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"tag:web\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"group_and_user_same_person_same_dest\",\n\t\t\t// Test 14.8: Group + user (same person) → same dest (2 rules)\n\t\t\t// Srcs DEDUPLICATED but Dsts NOT deduplicated\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"group:admins\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"kratail2tid@\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t// Merged: 1 filter entry with Srcs deduplicated and 4 Dsts (duplicated)\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"wildcard_to_self_plus_group\",\n\t\t\t// Test 13.20: * → [autogroup:self:*, group:admins:22]\n\t\t\t// user1 gets TWO filter entries (different Srcs)\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"autogroup:self:*\", \"group:admins:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t// Entry 1: autogroup:self with narrowed Srcs\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t// Entry 2: group:admins with full wildcard Srcs\n\t\t\t\t\t// NOTE: Tailscale uses partitioned CGNAT CIDRs, Headscale uses full 100.64.0.0/10\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"same_src_same_dest_different_ports_two_rules_merged\",\n\t\t\t// Test 14.2: Same src, same dest, different ports (2 rules)\n\t\t\t// MERGED into single filter entry with 4 Dsts\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:80\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t// Merged: Both rules combined\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"three_different_srcs_same_dest_different_ports\",\n\t\t\t// Test 14.21: 3 different srcs → same dest, different ports (3 rules)\n\t\t\t// Creates 3 SEPARATE filter entries\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"tag:web\"], \"dst\": [\"tag:server:80\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"tag:database\"], \"dst\": [\"tag:server:443\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"three_refs_same_user_same_dest_port\",\n\t\t\t// Test 14.22: 3 refs to same user → same dest:port (3 rules)\n\t\t\t// Srcs DEDUPLICATED, Dsts NOT deduplicated (6 entries)\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"group:admins\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"kratail2tid@\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t// Merged: 1 filter entry with Srcs deduplicated and 6 Dsts (not deduplicated)\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"same_src_three_different_dests\",\n\t\t\t// Test 14.23: Same src → 3 different dests (3 rules)\n\t\t\t// Each destination node receives its own filter entry\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:database:5432\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:web:80\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.94.92.91/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::ef01:5c81/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"full_wildcard_plus_specific_rule\",\n\t\t\t// Test 14.36: Full wildcard + specific rule\n\t\t\t// BOTH rules create filter entries (wildcard does NOT subsume specific)\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t// Wildcard rule only\n\t\t\t\t\t{\n\t\t\t\t\t\t// NOTE: Tailscale uses partitioned CGNAT CIDRs and IPProto [0] (any).\n\t\t\t\t\t\t// Headscale uses full 100.64.0.0/10 and explicit IPProto list.\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t// TODO: Tailscale produces 2 entries: wildcard (IPProto [0]) + specific (IPProto [6,17,1,58])\n\t\t\t\t\t// Headscale produces 2 entries but with same IPProto\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"both_autogroups_to_wildcard\",\n\t\t\t// Test 14.42: Both autogroups → wildcard (full network)\n\t\t\t// Different Srcs = separate entries, even with identical Dsts\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:tagged\"], \"dst\": [\"*:*\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"*:*\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t// Entry 1: autogroup:tagged Srcs\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t// Entry 2: autogroup:member Srcs\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"triple_src_ref_each_rule\",\n\t\t\t// Test 14.45: Triple src ref each rule\n\t\t\t// Sources deduplicated within each rule\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\", \"group:admins\", \"kratail2tid@\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"tag:server\", \"webserver\", \"100.108.74.26\"], \"dst\": [\"group:admins:80\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t// Rule 2: tag:server + webserver + raw IP → group:admins (user1)\n\t\t\t\t\t{\n\t\t\t\t\t\t// Srcs deduplicated to 1 IP + IPv6 (all resolve to same tagged-server)\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t// Rule 1: autogroup:member + group:admins + user → tag:server\n\t\t\t\t\t{\n\t\t\t\t\t\t// Srcs deduplicated to user1's IPs (all 3 resolve to same user)\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"same_src_four_dests\",\n\t\t\t// Test 14.47: Same src → 4 dests\n\t\t\t// Same Srcs across 4 rules = merged into single filter entry per destination node\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"tag:database:5432\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"tag:web:80\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"webserver:443\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.94.92.91/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::ef01:5c81/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"overlapping_destinations_different_sources\",\n\t\t\t// Test 10.2: Overlapping destinations, different sources\n\t\t\t// Rules with same destination create SEPARATE filter entries, NOT merged\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"group:admins\"], \"dst\": [\"*:*\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:tagged\"], \"dst\": [\"*:*\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t// Entry 1: group:admins → *:*\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t// Entry 2: autogroup:tagged → *:*\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"same_dest_node_via_tag_vs_host_source\",\n\t\t\t// Test 10.3: Same dest node via tag vs host source\n\t\t\t// Same destination with different sources = separate entries\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"webserver\"], \"dst\": [\"tag:server:80\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t// Entry 1: tag:client → :22\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t// Entry 2: webserver → :80 (host source expands to node IPs)\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"three_rules_same_dest_different_sources\",\n\t\t\t// Test 10.4: 3 rules, same dest, different sources\n\t\t\t// 3 separate filter entries on the same destination node\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:80\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"tag:server:443\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t// Entry 1: * → :22\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t// Entry 2: tag:client → :80\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t// Entry 3: autogroup:member → :443\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"mixed_sources_in_multiple_rules\",\n\t\t\t// Test 10.5: Mixed sources in multiple rules\n\t\t\t// Sources within a rule are deduplicated\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\", \"tag:web\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\", \"group:admins\"], \"dst\": [\"tag:database:5432\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t// Rule 1: [tag:client, tag:web] → tag:server:22\n\t\t\t\t\t// Sources merged and deduplicated\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t// Rule 2: [autogroup:member, group:admins] → tag:database:5432\n\t\t\t\t\t// Both resolve to user1, deduplicated\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"mixed_sources_with_port_range_11_2\",\n\t\t\t// Test 11.2: Mixed sources with port range\n\t\t\t// Port range preserved as First/Last\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"group:admins\", \"webserver\"], \"dst\": [\"tag:server:80-443\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// group:admins (IPv4+IPv6) + webserver (node IPs) = 4 Srcs\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 443}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"same_dest_node_different_ports_via_different_refs_2_2\",\n\t\t\t// Test 2.2: Same node referenced via tag and host with different ports\n\t\t\t// Same IP can appear multiple times in Dsts with different ports\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\", \"webserver:80\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// tag:server:22 adds IPv4 and IPv6\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t// webserver:80 expands to node IPs (both IPv4 and IPv6)\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"same_user_different_ports_via_email_and_group_2_6\",\n\t\t\t// Test 2.6: Same user referenced via email and group with different ports\n\t\t\t// Destinations are NOT deduplicated when ports differ\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"kratail2tid@:22\", \"group:admins:80\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// 4 entries: user1's IPv4 and IPv6 for EACH port (22 and 80)\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"diff_srcs_same_dest_14_6\",\n\t\t\t// Test 14.6: Different srcs, same dest (2 rules)\n\t\t\t// Different sources, same destination = 2 SEPARATE filter entries\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"tag:web\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t// Entry 1: tag:client → :22\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t// Entry 2: tag:web → :22\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"group_plus_user_same_person_same_dest_14_8\",\n\t\t\t// Test 14.8: Group + user (same person) → same dest (2 rules)\n\t\t\t// Same person via group + user email = 1 filter entry, Srcs MERGED, Dsts NOT merged\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"group:admins\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"kratail2tid@\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t// Merged: 1 filter entry with 4 Dsts (duplicated)\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"self_overlap_with_explicit_user_13_86\",\n\t\t\t// Test 13.86: self:22 + user:22 (overlap on same node)\n\t\t\t// Different Srcs for self vs explicit user = separate entries\n\t\t\t// NOTE: Tailscale produces 2 entries, one with wildcard CGNAT Srcs, one with user1's IPs.\n\t\t\t// Headscale produces similar with full CGNAT range (100.64.0.0/10).\n\t\t\t// In Headscale, autogroup:self entry comes FIRST, explicit user SECOND.\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"autogroup:self:22\", \"kratail2tid@:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t// Entry 1: * → autogroup:self:22 (Srcs narrowed to user1's IPs, no CIDR in DstPorts)\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t// Entry 2: * → kratail2tid@:22 (wildcard Srcs, CIDR in DstPorts)\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"self_twice_different_ports_13_36\",\n\t\t\t// Test 13.36: Self twice in separate rules (merged)\n\t\t\t// Multiple self rules with same source = MERGED into single filter entry\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"autogroup:self:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"autogroup:self:80\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t// Merged: 1 filter entry with 4 Dsts\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"six_rules_mixing_all_patterns\",\n\t\t\t// Test 14.50: 6 rules mixing all patterns\n\t\t\t// Self-referential rules work, different Srcs create separate entries\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:server\"], \"dst\": [\"tag:server:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:client:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"tag:database\"], \"dst\": [\"tag:database:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"tag:web\"], \"dst\": [\"tag:web:22\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"*:80\"]},\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"autogroup:member:443\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t// Entry 1: autogroup:member → *:80\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t// Entry 2: * → autogroup:member:443 (user1 is in autogroup:member)\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t// Entry 1: tag:server → tag:server:22 (self-reference)\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t// Entry 2: autogroup:member → *:80\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": {\n\t\t\t\t\t// Entry 1: tag:client → tag:client:22 (self-reference)\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.80.238.75/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::7901:ee86/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t// Entry 2: autogroup:member → *:80\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t// Entry 1: tag:database → tag:database:22 (self-reference)\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t// Entry 2: autogroup:member → *:80\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t// Entry 1: tag:web → tag:web:22 (self-reference)\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.94.92.91/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::ef01:5c81/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t// Entry 2: autogroup:member → *:80\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Category 1: Mixed Sources\n\t\t{\n\t\t\tname: \"autogroup_member_plus_tag_client_1_1\",\n\t\t\t// Test 1.1: autogroup:member + tag:client\n\t\t\t// Sources are merged into single Srcs array\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\", \"tag:client\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// autogroup:member (user1) + tag:client = merged\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"group_admins_plus_tag_client_1_3\",\n\t\t\t// Test 1.3: group:admins + tag:client\n\t\t\t// Sources are merged into single Srcs array\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"group:admins\", \"tag:client\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// group:admins (user1) + tag:client = merged\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"user_email_plus_tag_client_1_4\",\n\t\t\t// Test 1.4: kratail2tid@ + tag:client\n\t\t\t// User email expanded to IPs + tag = merged\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"kratail2tid@\", \"tag:client\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"host_plus_tag_client_1_5\",\n\t\t\t// Test 1.5: webserver (host) + tag:client\n\t\t\t// Host expands to node IPs + tag = merged\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"webserver\", \"tag:client\"], \"dst\": [\"tag:database:5432\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// webserver (tagged-server IPs) + tag:client = merged\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"raw_ip_plus_tag_client_1_6\",\n\t\t\t// Test 1.6: 100.90.199.68 (raw IP) + tag:client\n\t\t\t// Raw IP expands to node's both IPs + tag = merged\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"100.90.199.68\", \"tag:client\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// Raw IP expands to user1's IPs + tag:client = merged (4 IPs)\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"user1_three_ways_1_7\",\n\t\t\t// Test 1.7: autogroup:member + group:admins + kratail2tid@\n\t\t\t// Same user referenced 3 ways = deduplicated to 2 IPs\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\", \"group:admins\", \"kratail2tid@\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// All 3 references resolve to user1's IPs, deduplicated\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Category 2: Mixed Destinations\n\t\t{\n\t\t\tname: \"tag_server_22_plus_tag_database_5432_2_1\",\n\t\t\t// Test 2.1: tag:server:22 + tag:database:5432\n\t\t\t// Multiple destinations in same rule, distributed to each node\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\", \"tag:database:5432\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"tag_server_22_plus_raw_ip_80_2_3\",\n\t\t\t// Test 2.3: tag:server:22 + 100.108.74.26:80 (tag + raw IP, same node)\n\t\t\t// Same node via tag and raw IP, different ports = NOT deduplicated in Dsts\n\t\t\t// Raw IP destination expands to include node's IPv6\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\", \"100.108.74.26:80\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// tag:server:22 adds IPv4 and IPv6\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t// raw IP:80 expands to both IPs\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"group_admins_22_plus_tag_server_80_2_4\",\n\t\t\t// Test 2.4: group:admins:22 + tag:server:80\n\t\t\t// User destination on port 22, tag destination on port 80\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"group:admins:22\", \"tag:server:80\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"webserver_22_plus_database_5432_2_7\",\n\t\t\t// Test 2.7: webserver:22 + database:5432 (multiple hosts)\n\t\t\t// Multiple host destinations\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"webserver:22\", \"database:5432\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// webserver host expands to tagged-server's IPs\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// database host expands to tagged-db's IPs\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Category 3: Overlapping References\n\t\t{\n\t\t\tname: \"user1_three_ways_source_3_2\",\n\t\t\t// Test 3.2: user1 referenced 3 ways as source\n\t\t\t// All resolve to same IPs, deduplicated\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\", \"kratail2tid@\", \"group:admins\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// All 3 references resolve to user1, deduplicated to 2 IPs\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"same_ip_port_tag_and_host_dest_3_3\",\n\t\t\t// Test 3.3: Same IP:port via tag and host as dest\n\t\t\t// Same IP:port referenced two ways = NOT deduplicated\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\", \"webserver:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// tag:server:22 adds IPv4 and IPv6\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t// webserver:22 also expands to same IPs - NOT deduplicated\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"same_ip_port_tag_and_raw_ip_dest_3_4\",\n\t\t\t// Test 3.4: Same IP:port via tag and raw IP\n\t\t\t// Raw IP also expands to both IPs when matching a node\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\", \"100.108.74.26:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// tag:server:22 adds IPv4 and IPv6\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t// raw IP also expands to both IPs (NOT deduplicated)\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Category 4: Cross-Type Source→Destination Combinations\n\t\t{\n\t\t\tname: \"raw_ip_to_tag_server_4_7\",\n\t\t\t// Test 4.7: 100.90.199.68 → tag:server:22\n\t\t\t// Raw IP as source, tag as destination\n\t\t\t// In Headscale, raw IP that matches a node expands to include IPv6\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"100.90.199.68\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"tag_client_to_raw_ip_4_8\",\n\t\t\t// Test 4.8: tag:client → 100.108.74.26:22\n\t\t\t// Tag as source, raw IP as destination\n\t\t\t// In Headscale, raw IP destination that matches a node expands to include IPv6\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"100.108.74.26:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Category 7: Maximum Combinations (\"Kitchen Sink\")\n\t\t{\n\t\t\tname: \"all_source_types_to_tag_server_7_1\",\n\t\t\t// Test 7.1: ALL source types → tag:server:22\n\t\t\t// Mix of all source types in one rule\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\", \"autogroup:tagged\", \"group:admins\", \"tag:client\", \"webserver\", \"100.74.60.128\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// All sources merged: user1, all tagged, webserver, database IP\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Category 8: Redundancy Stress Tests\n\t\t{\n\t\t\tname: \"user1_referenced_5_ways_8_1\",\n\t\t\t// Test 8.1: user1 referenced 5 ways\n\t\t\t// All references deduplicated to user1's 2 IPs\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\", \"group:admins\", \"group:developers\", \"kratail2tid@\", \"100.90.199.68\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// 5 references → deduplicated to user1's IPs + raw IP\n\t\t\t\t\t\t// Note: raw IP only adds IPv4, others add both\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"tagged_server_3_ways_source_8_2\",\n\t\t\t// Test 8.2: tagged-server referenced 3 ways as source\n\t\t\t// tag:server + webserver + raw IP = deduplicated\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:server\", \"webserver\", \"100.108.74.26\"], \"dst\": [\"tag:database:5432\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// All 3 references resolve to tagged-server's IPs, deduplicated\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"same_ip_port_3_ways_dest_8_5\",\n\t\t\t// Test 8.5: Same IP:port referenced 3 ways as destination\n\t\t\t// tag:server:22 + webserver:22 + 100.108.74.26:22\n\t\t\t// Destinations are NOT deduplicated, raw IP also expands\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\", \"webserver:22\", \"100.108.74.26:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// tag:server:22 adds both IPs\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t// webserver:22 also adds both IPs (NOT deduplicated)\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t// raw IP also adds both IPs (NOT deduplicated)\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Category 12: CIDR Host Combinations\n\t\t{\n\t\t\tname: \"cidr_subnet_plus_tag_as_sources_12_3\",\n\t\t\t// Test 12.3: internal (CIDR host) + tag as sources\n\t\t\t// External CIDR doesn't match nodes, tag does\n\t\t\tpolicy: makePolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"internal\", \"tag:client\"], \"dst\": [\"tag:server:22\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// internal (10.0.0.0/8) + tag:client IPs\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"10.0.0.0/8\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// ===========================================\n\t\t// Category 5: Order Effects\n\t\t// ===========================================\n\t\t// Test 5.1a: Source Order - [tag:client, tag:web]\n\t\t{\n\t\t\tname: \"source_order_client_web_5_1a\",\n\t\t\t// Test that order of sources doesn't affect output\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"tag:client\", \"tag:web\"], \"dst\": [\"tag:server:22\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// Sources merged and sorted: IPv4 first (sorted), then IPv6 (sorted)\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 5.1b: Source Order Reversed - [tag:web, tag:client]\n\t\t{\n\t\t\tname: \"source_order_web_client_5_1b\",\n\t\t\t// Same as 5.1a but reversed order - should produce identical output\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"tag:web\", \"tag:client\"], \"dst\": [\"tag:server:22\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// Should be identical to 5.1a - order doesn't matter\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 5.2a: Destination Order - [tag:server:22, tag:database:80]\n\t\t{\n\t\t\tname: \"dest_order_server_db_5_2a\",\n\t\t\t// Test destination order - each node should get only its portion\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\", \"tag:database:80\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 5.2b: Destination Order Reversed - [tag:database:80, tag:server:22]\n\t\t{\n\t\t\tname: \"dest_order_db_server_5_2b\",\n\t\t\t// Same as 5.2a but reversed - should produce identical per-node filters\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:database:80\", \"tag:server:22\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 5.3a: Mixed Source Types Order - [autogroup:member, tag:client]\n\t\t{\n\t\t\tname: \"mixed_source_order_member_client_5_3a\",\n\t\t\t// Test mixed source types order\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"autogroup:member\", \"tag:client\"], \"dst\": [\"tag:server:22\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// Sources sorted: IPv4 first, then IPv6\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 5.3b: Mixed Source Types Order Reversed - [tag:client, autogroup:member]\n\t\t{\n\t\t\tname: \"mixed_source_order_client_member_5_3b\",\n\t\t\t// Same as 5.3a but reversed - should produce identical output\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"tag:client\", \"autogroup:member\"], \"dst\": [\"tag:server:22\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// Should be identical to 5.3a\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// ===========================================\n\t\t// Category 6: Edge Cases\n\t\t// ===========================================\n\t\t// Test 6.3: Empty group as source - no filters expected\n\t\t{\n\t\t\tname: \"empty_group_source_6_3\",\n\t\t\t// group:empty has no members, so no filters should be generated\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"group:empty\"], \"dst\": [\"tag:server:22\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-server\": nil, // No filter because source group is empty\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t// Test 6.5: CIDR host (internal = 10.0.0.0/8) as source\n\t\t{\n\t\t\tname: \"cidr_host_source_6_5\",\n\t\t\t// Host \"internal\" defined as 10.0.0.0/8 - CIDR goes directly into Srcs\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"internal\"], \"dst\": [\"tag:server:22\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"10.0.0.0/8\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 6.6: CIDR host as destination - no tailnet nodes match\n\t\t{\n\t\t\tname: \"cidr_host_dest_6_6\",\n\t\t\t// internal (10.0.0.0/8) as destination - no tailnet nodes in this range\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"internal:22\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t// No nodes match 10.0.0.0/8, so no filters generated\n\t\t\t},\n\t\t},\n\n\t\t// ===========================================\n\t\t// Category 9: All Tags + All Autogroups\n\t\t// ===========================================\n\t\t// Test 9.1: All 4 tags as sources\n\t\t{\n\t\t\tname: \"all_four_tags_sources_9_1\",\n\t\t\t// All 4 tags combined as sources\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"tag:server\", \"tag:client\", \"tag:database\", \"tag:web\"], \"dst\": [\"tag:server:22\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// 4 tags = 8 IPs (4 IPv4 + 4 IPv6, deduplicated)\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 9.2: All 4 tags as destinations\n\t\t{\n\t\t\tname: \"all_four_tags_dests_9_2\",\n\t\t\t// All 4 tags as destinations - each node gets only its own IP:port\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"tag:server:22\", \"tag:client:22\", \"tag:database:22\", \"tag:web:22\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil, // Not a destination\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.80.238.75/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::7901:ee86/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.94.92.91/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::ef01:5c81/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 9.3: Both autogroups as sources\n\t\t{\n\t\t\tname: \"both_autogroups_sources_9_3\",\n\t\t\t// autogroup:member + autogroup:tagged = full tailnet coverage\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"autogroup:member\", \"autogroup:tagged\"], \"dst\": [\"tag:server:22\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// Full tailnet: 5 nodes = 10 IPs (5 IPv4 + 5 IPv6)\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// ===========================================\n\t\t// Category 10: Multiple Rules with Mixed Types\n\t\t// ===========================================\n\t\t// Test 10.1: Cross-type in separate rules\n\t\t{\n\t\t\tname: \"cross_type_separate_rules_10_1\",\n\t\t\t// Rule 1: autogroup:member → tag:server:22\n\t\t\t// Rule 2: tag:client → group:admins:80\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"kratail2tid@\"],\n\t\t\t\t\t\"group:developers\": [\"kratail2tid@\"],\n\t\t\t\t\t\"group:empty\": []\n\t\t\t\t},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\n\t\t\t\t\t\"webserver\": \"100.108.74.26\",\n\t\t\t\t\t\"database\": \"100.74.60.128\",\n\t\t\t\t\t\"internal\": \"10.0.0.0/8\",\n\t\t\t\t\t\"subnet24\": \"192.168.1.0/24\"\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"tag:server:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"group:admins:80\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t// user1 gets filter from Rule 2 (tag:client → group:admins:80)\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// tagged-server gets filter from Rule 1 (autogroup:member → tag:server:22)\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 10.2: Overlapping destinations, different sources\n\t\t{\n\t\t\tname: \"overlapping_dests_diff_sources_10_2\",\n\t\t\t// Rule 1: group:admins → tag:server:22\n\t\t\t// Rule 2: autogroup:tagged → tag:server:22\n\t\t\t// Same destination, different sources - creates separate filter entries\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"kratail2tid@\"],\n\t\t\t\t\t\"group:developers\": [\"kratail2tid@\"],\n\t\t\t\t\t\"group:empty\": []\n\t\t\t\t},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\n\t\t\t\t\t\"webserver\": \"100.108.74.26\",\n\t\t\t\t\t\"database\": \"100.74.60.128\",\n\t\t\t\t\t\"internal\": \"10.0.0.0/8\",\n\t\t\t\t\t\"subnet24\": \"192.168.1.0/24\"\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admins\"], \"dst\": [\"tag:server:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:tagged\"], \"dst\": [\"tag:server:22\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t// tagged-server gets TWO separate filter entries (one per rule)\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t// Rule 1: group:admins\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t// Rule 2: autogroup:tagged\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 10.3: Three rules to same destination\n\t\t{\n\t\t\tname: \"three_rules_same_dest_10_3\",\n\t\t\t// Rule 1: autogroup:member → tag:server:22\n\t\t\t// Rule 2: tag:client → tag:server:22\n\t\t\t// Rule 3: group:admins → tag:server:22\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"kratail2tid@\"],\n\t\t\t\t\t\"group:developers\": [\"kratail2tid@\"],\n\t\t\t\t\t\"group:empty\": []\n\t\t\t\t},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\n\t\t\t\t\t\"webserver\": \"100.108.74.26\",\n\t\t\t\t\t\"database\": \"100.74.60.128\",\n\t\t\t\t\t\"internal\": \"10.0.0.0/8\",\n\t\t\t\t\t\"subnet24\": \"192.168.1.0/24\"\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"tag:server:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admins\"], \"dst\": [\"tag:server:22\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t// tagged-server gets TWO filter entries (Rules 1+3 merged, Rule 2 separate)\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t// Rules 1+3: autogroup:member and group:admins (same SrcIPs) merged\n\t\t\t\t\t// DstPorts combined from both rules (duplicates included)\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t// Rule 2: tag:client (different SrcIPs, not merged)\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// ===========================================\n\t\t// Category 11: Port Variations with Mixed Types\n\t\t// ===========================================\n\t\t// Test 11.1: Mixed sources with comma ports\n\t\t{\n\t\t\tname: \"mixed_sources_comma_ports_11_1\",\n\t\t\t// Comma-separated ports create separate Dsts entries\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"autogroup:member\", \"tag:client\"], \"dst\": [\"tag:server:22,80,443\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// 3 ports × 2 IPs = 6 Dsts entries\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 11.2: Mixed sources with port range\n\t\t{\n\t\t\tname: \"mixed_sources_port_range_11_2\",\n\t\t\t// Port ranges preserved as First/Last in Dsts\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"group:admins\", \"webserver\"], \"dst\": [\"tag:server:80-443\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// group:admins (IPv4+IPv6) + webserver (IPv4+IPv6 since it matches tagged-server node)\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 443}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 11.4: Full autogroups with wildcard port\n\t\t{\n\t\t\tname: \"autogroups_wildcard_port_11_4\",\n\t\t\t// Wildcard port (*) expands to 0-65535\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"autogroup:tagged\", \"autogroup:member\"], \"dst\": [\"tag:server:*\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// Full tailnet: 5 nodes = 10 IPs\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// ===========================================\n\t\t// Category 13: autogroup:self Deep Dive\n\t\t// ===========================================\n\t\t// Test 13.1: Wildcard → self:*\n\t\t{\n\t\t\tname: \"wildcard_to_self_all_ports_13_1\",\n\t\t\t// autogroup:self NARROWS Srcs even when source is wildcard\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"autogroup:self:*\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t// Only user1 (user-owned) receives filter\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// Srcs NARROWED to user1's IPs only (not wildcard CIDRs!)\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// Tagged nodes receive NO filters\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t// Test 13.2: Wildcard → self:22\n\t\t{\n\t\t\tname: \"wildcard_to_self_port_22_13_2\",\n\t\t\t// Specific port with self\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"autogroup:self:22\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t// Test 13.5: autogroup:member → self:*\n\t\t{\n\t\t\tname: \"member_to_self_13_5\",\n\t\t\t// autogroup:member works with autogroup:self\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"autogroup:self:*\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t// Test 13.8: Specific user → self:*\n\t\t{\n\t\t\tname: \"specific_user_to_self_13_8\",\n\t\t\t// Specific user email works with autogroup:self\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"kratail2tid@\"], \"dst\": [\"autogroup:self:*\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\t\t// Test 13.9: group:admins → self:*\n\t\t{\n\t\t\tname: \"group_to_self_13_9\",\n\t\t\t// Groups work with autogroup:self\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"group:admins\"], \"dst\": [\"autogroup:self:*\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t},\n\t\t},\n\n\t\t// ===========================================\n\t\t// Category 14: Multi-Rule Compounding\n\t\t// ===========================================\n\t\t// Test 14.1: Same src, different dests (2 rules)\n\t\t{\n\t\t\tname: \"same_src_diff_dests_14_1\",\n\t\t\t// Same source, different destinations = separate filter entries per dest node\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"kratail2tid@\"],\n\t\t\t\t\t\"group:developers\": [\"kratail2tid@\"],\n\t\t\t\t\t\"group:empty\": []\n\t\t\t\t},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\n\t\t\t\t\t\"webserver\": \"100.108.74.26\",\n\t\t\t\t\t\"database\": \"100.74.60.128\",\n\t\t\t\t\t\"internal\": \"10.0.0.0/8\",\n\t\t\t\t\t\"subnet24\": \"192.168.1.0/24\"\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:database:5432\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 14.2: Same src, same dest, different ports (2 rules)\n\t\t{\n\t\t\tname: \"same_src_same_dest_diff_ports_merged_14_2\",\n\t\t\t// Same source + dest node + different ports\n\t\t\t// MERGED into 1 filter entry with 4 Dsts\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"kratail2tid@\"],\n\t\t\t\t\t\"group:developers\": [\"kratail2tid@\"],\n\t\t\t\t\t\"group:empty\": []\n\t\t\t\t},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\n\t\t\t\t\t\"webserver\": \"100.108.74.26\",\n\t\t\t\t\t\"database\": \"100.74.60.128\",\n\t\t\t\t\t\"internal\": \"10.0.0.0/8\",\n\t\t\t\t\t\"subnet24\": \"192.168.1.0/24\"\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:80\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t// Merged: 1 entry with 4 DstPorts\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 14.6: Different srcs, same dest (2 rules)\n\t\t{\n\t\t\tname: \"diff_srcs_same_dest_14_6\",\n\t\t\t// Different sources, same dest = 2 SEPARATE filter entries\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"kratail2tid@\"],\n\t\t\t\t\t\"group:developers\": [\"kratail2tid@\"],\n\t\t\t\t\t\"group:empty\": []\n\t\t\t\t},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\n\t\t\t\t\t\"webserver\": \"100.108.74.26\",\n\t\t\t\t\t\"database\": \"100.74.60.128\",\n\t\t\t\t\t\"internal\": \"10.0.0.0/8\",\n\t\t\t\t\t\"subnet24\": \"192.168.1.0/24\"\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:web\"], \"dst\": [\"tag:server:22\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t// TWO separate filter entries\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t// Entry 1: tag:client\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t// Entry 2: tag:web\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 14.8: Group + user (same person) → same dest (2 rules)\n\t\t{\n\t\t\tname: \"group_user_same_person_same_dest_14_8\",\n\t\t\t// Group + user (same person)\n\t\t\t// MERGED into 1 filter entry (Srcs deduplicated, Dsts NOT)\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"kratail2tid@\"],\n\t\t\t\t\t\"group:developers\": [\"kratail2tid@\"],\n\t\t\t\t\t\"group:empty\": []\n\t\t\t\t},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\n\t\t\t\t\t\"webserver\": \"100.108.74.26\",\n\t\t\t\t\t\"database\": \"100.74.60.128\",\n\t\t\t\t\t\"internal\": \"10.0.0.0/8\",\n\t\t\t\t\t\"subnet24\": \"192.168.1.0/24\"\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admins\"], \"dst\": [\"tag:server:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"kratail2tid@\"], \"dst\": [\"tag:server:22\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t// Merged: 1 entry with deduplicated Srcs but duplicated Dsts\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// ===========================================\n\t\t// Category 7: Kitchen Sink Tests\n\t\t// ===========================================\n\t\t// Test 7.2: tag:client → ALL destination types\n\t\t{\n\t\t\tname: \"all_dest_types_7_2\",\n\t\t\t// Test ALL destination types from one source\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"kratail2tid@\"],\n\t\t\t\t\t\"group:developers\": [\"kratail2tid@\"],\n\t\t\t\t\t\"group:empty\": []\n\t\t\t\t},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\n\t\t\t\t\t\"webserver\": \"100.108.74.26\",\n\t\t\t\t\t\"database\": \"100.74.60.128\",\n\t\t\t\t\t\"internal\": \"10.0.0.0/8\",\n\t\t\t\t\t\"subnet24\": \"192.168.1.0/24\"\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\", \"tag:database:5432\", \"webserver:80\", \"database:443\", \"group:admins:8080\", \"kratail2tid@:3000\", \"100.108.74.26:9000\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t// user1 gets entries for user:3000 and group:8080\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 8080, Last: 8080}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 8080, Last: 8080}},\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 3000, Last: 3000}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 3000, Last: 3000}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// tagged-server gets tag:server:22, webserver:80, raw IP:9000\n\t\t\t\t// Note: Host aliases that match node IPs get expanded to include IPv6\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 9000, Last: 9000}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 9000, Last: 9000}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// tagged-db gets tag:database:5432 and database:443\n\t\t\t\t// Note: Host aliases that match node IPs get expanded to include IPv6\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 7.3: 10 different sources → *:*\n\t\t{\n\t\t\tname: \"ten_sources_to_wildcard_7_3\",\n\t\t\t// 10 different source types all deduplicated\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"kratail2tid@\"],\n\t\t\t\t\t\"group:developers\": [\"kratail2tid@\"],\n\t\t\t\t\t\"group:empty\": []\n\t\t\t\t},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\n\t\t\t\t\t\"webserver\": \"100.108.74.26\",\n\t\t\t\t\t\"database\": \"100.74.60.128\",\n\t\t\t\t\t\"internal\": \"10.0.0.0/8\",\n\t\t\t\t\t\"subnet24\": \"192.168.1.0/24\"\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\", \"autogroup:tagged\", \"group:admins\", \"group:developers\", \"kratail2tid@\", \"tag:client\", \"tag:web\", \"tag:database\", \"webserver\", \"database\"], \"dst\": [\"*:*\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t// All nodes receive the deduplicated sources (including tagged-client since it's in *:*)\n\t\t\t\t// The sources are: autogroup:member, autogroup:tagged, group:admins, group:developers,\n\t\t\t\t// kratail2tid@, tag:client, tag:web, tag:database, webserver, database\n\t\t\t\t// autogroup:tagged includes ALL tagged nodes: tagged-server, tagged-client, tagged-db, tagged-web\n\t\t\t\t// All 5 nodes' IPs are included in the sources\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// ===========================================\n\t\t// Category 12: CIDR Host Combinations\n\t\t// ===========================================\n\t\t// Test 12.1: CIDR host + tag as sources\n\t\t{\n\t\t\tname: \"cidr_host_plus_tag_sources_12_1\",\n\t\t\t// CIDR host (10.0.0.0/8) combined with tag as sources\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"kratail2tid@\"],\n\t\t\t\t\t\"group:developers\": [\"kratail2tid@\"],\n\t\t\t\t\t\"group:empty\": []\n\t\t\t\t},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\n\t\t\t\t\t\"webserver\": \"100.108.74.26\",\n\t\t\t\t\t\"database\": \"100.74.60.128\",\n\t\t\t\t\t\"internal\": \"10.0.0.0/8\",\n\t\t\t\t\t\"subnet24\": \"192.168.1.0/24\"\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"internal\", \"tag:client\"], \"dst\": [\"tag:server:22\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"10.0.0.0/8\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 12.2: Multiple CIDR hosts as sources\n\t\t{\n\t\t\tname: \"multiple_cidr_hosts_sources_12_2\",\n\t\t\t// Multiple CIDR hosts (10.0.0.0/8 + 192.168.1.0/24)\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"kratail2tid@\"],\n\t\t\t\t\t\"group:developers\": [\"kratail2tid@\"],\n\t\t\t\t\t\"group:empty\": []\n\t\t\t\t},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\n\t\t\t\t\t\"webserver\": \"100.108.74.26\",\n\t\t\t\t\t\"database\": \"100.74.60.128\",\n\t\t\t\t\t\"internal\": \"10.0.0.0/8\",\n\t\t\t\t\t\"subnet24\": \"192.168.1.0/24\"\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"internal\", \"subnet24\"], \"dst\": [\"tag:server:22\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"10.0.0.0/8\",\n\t\t\t\t\t\t\t\"192.168.1.0/24\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 12.4: Host CIDR + raw CIDR (same value) as sources\n\t\t{\n\t\t\tname: \"host_cidr_plus_raw_cidr_same_12_4\",\n\t\t\t// Same CIDR via host alias and raw value - should deduplicate\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"kratail2tid@\"],\n\t\t\t\t\t\"group:developers\": [\"kratail2tid@\"],\n\t\t\t\t\t\"group:empty\": []\n\t\t\t\t},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\n\t\t\t\t\t\"webserver\": \"100.108.74.26\",\n\t\t\t\t\t\"database\": \"100.74.60.128\",\n\t\t\t\t\t\"internal\": \"10.0.0.0/8\",\n\t\t\t\t\t\"subnet24\": \"192.168.1.0/24\"\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"internal\", \"10.0.0.0/8\"], \"dst\": [\"tag:server:22\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t// Deduplicated - only one 10.0.0.0/8 entry\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"10.0.0.0/8\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// ===========================================\n\t\t// Additional Missing Tests from 09-mixed-scenarios.md\n\t\t// ===========================================\n\t\t// Test 6.2: * → [webserver:22, database:5432]\n\t\t// Wildcard source + multiple host destinations\n\t\t{\n\t\t\tname:   \"wildcard_to_multiple_hosts_6_2\",\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"webserver:22\", \"database:5432\"]}`),\n\t\t\t// Wildcard `*` expands to all nodes (Headscale uses 0.0.0.0/0 and ::/0)\n\t\t\t// Host destinations are properly distributed to matching nodes\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t// tagged-server gets webserver:22 (since webserver = 100.108.74.26 = tagged-server)\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// NOTE: Tailscale uses partitioned CGNAT CIDRs, Headscale uses full 100.64.0.0/10:\n\t\t\t\t\t\t\t// \"100.115.94.0/23\", \"100.115.96.0/19\", ..., \"fd7a:115c:a1e0::/48\"\n\t\t\t\t\t\t\t// TODO: Host destination is IPv4-only in Tailscale, but Headscale\n\t\t\t\t\t\t\t// resolves host aliases to node IPs and includes both IPv4+IPv6\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// tagged-db gets database:5432 (since database = 100.74.60.128 = tagged-db)\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// TODO: Host destination is IPv4-only in Tailscale\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 7.4: * → 9 destinations (multiple per node)\n\t\t// Destinations: tag:server:22, tag:server:80, tag:server:443, tag:database:5432,\n\t\t//               tag:database:3306, tag:web:80, tag:web:443, webserver:8080, database:8080\n\t\t{\n\t\t\tname:   \"wildcard_to_9_destinations_7_4\",\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"tag:server:22\", \"tag:server:80\", \"tag:server:443\", \"tag:database:5432\", \"tag:database:3306\", \"tag:web:80\", \"tag:web:443\", \"webserver:8080\", \"database:8080\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t// tagged-server gets: tag:server:22/80/443 + webserver:8080\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// webserver:8080 (host alias - Headscale includes IPv4+IPv6)\n\t\t\t\t\t\t\t// TODO: Tailscale host destinations are IPv4-only\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 8080, Last: 8080}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 8080, Last: 8080}},\n\t\t\t\t\t\t\t// tag:server:22 (IPv4)\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t// tag:server:80 (IPv4)\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t// tag:server:443 (IPv4)\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t\t// tag:server:22 (IPv6)\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t// tag:server:80 (IPv6)\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t// tag:server:443 (IPv6)\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// tagged-db gets: tag:database:5432/3306 + database:8080\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// database:8080 (host alias - Headscale includes IPv4+IPv6)\n\t\t\t\t\t\t\t// TODO: Tailscale host destinations are IPv4-only\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 8080, Last: 8080}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 8080, Last: 8080}},\n\t\t\t\t\t\t\t// tag:database:5432 (IPv4)\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t// tag:database:3306 (IPv4)\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 3306, Last: 3306}},\n\t\t\t\t\t\t\t// tag:database:5432 (IPv6)\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t// tag:database:3306 (IPv6)\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 3306, Last: 3306}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// tagged-web gets: tag:web:80/443\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// tag:web:80 (IPv4)\n\t\t\t\t\t\t\t{IP: \"100.94.92.91/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t// tag:web:443 (IPv4)\n\t\t\t\t\t\t\t{IP: \"100.94.92.91/32\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t\t// tag:web:80 (IPv6)\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::ef01:5c81/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t// tag:web:443 (IPv6)\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::ef01:5c81/128\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 7.5: MANY sources → MANY destinations\n\t\t// Sources: autogroup:member, group:admins, kratail2tid@, tag:client, tag:web, 100.80.238.75, 100.94.92.91\n\t\t// Destinations: tag:server:22, webserver:80, 100.108.74.26:443, group:admins:8080, kratail2tid@:9000\n\t\t{\n\t\t\tname:   \"many_sources_many_destinations_7_5\",\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"autogroup:member\", \"group:admins\", \"kratail2tid@\", \"tag:client\", \"tag:web\", \"100.80.238.75\", \"100.94.92.91\"], \"dst\": [\"tag:server:22\", \"webserver:80\", \"100.108.74.26:443\", \"group:admins:8080\", \"kratail2tid@:9000\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t// user1 gets: group:admins:8080 + kratail2tid@:9000\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// kratail2tid@:9000\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 9000, Last: 9000}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 9000, Last: 9000}},\n\t\t\t\t\t\t\t// group:admins:8080\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 8080, Last: 8080}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 8080, Last: 8080}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// tagged-server gets: tag:server:22 + webserver:80 + 100.108.74.26:443\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// webserver:80 (host alias matches tagged-server, includes IPv6)\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t// tag:server:22\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t// 100.108.74.26:443 (raw IP matches node, so Headscale includes IPv6)\n\t\t\t\t\t\t\t// TODO: Tailscale raw IP destinations are IPv4-only\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 8.3: tagged-db referenced 3 ways as source\n\t\t// Sources: tag:database, database (host alias), 100.74.60.128 (raw IP)\n\t\t// All 3 resolve to tagged-db - should be deduplicated in Srcs\n\t\t{\n\t\t\tname:   \"tagged_db_3_ways_source_8_3\",\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"tag:database\", \"database\", \"100.74.60.128\"], \"dst\": [\"tag:server:22\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t// tagged-server receives filter\n\t\t\t\t// Srcs should be deduplicated: tag adds IPv6, host/raw IP are IPv4-only\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 8.4: autogroup:tagged + all 4 tags as sources\n\t\t// Sources: autogroup:tagged, tag:server, tag:client, tag:database, tag:web\n\t\t// autogroup:tagged covers all 4 tags, so individual tags are redundant\n\t\t// Should deduplicate to just 8 IPs (4 nodes × 2 IPs each)\n\t\t{\n\t\t\tname:   \"autogroup_tagged_plus_all_4_tags_8_4\",\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"autogroup:tagged\", \"tag:server\", \"tag:client\", \"tag:database\", \"tag:web\"], \"dst\": [\"autogroup:member:22\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t// user1 (autogroup:member) receives the filter\n\t\t\t\t// Srcs = all 4 tagged nodes deduplicated = 8 IPs\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// ===========================================\n\t\t// Additional Missing Tests - Batch 2\n\t\t// ===========================================\n\t\t// Test 1.8: tag:server + webserver (same IP two ways as sources)\n\t\t{\n\t\t\tname:   \"tag_server_plus_webserver_same_ip_1_8\",\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"tag:server\", \"webserver\"], \"dst\": [\"tag:client:22\"]}`),\n\t\t\t// tag:server and webserver both resolve to tagged-server (100.108.74.26)\n\t\t\t// Sources should be deduplicated\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t// tagged-client receives the filter\n\t\t\t\t\"tagged-client\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t// Deduplicated: tag:server adds IPv4+IPv6, webserver adds IPv4 only\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.80.238.75/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::7901:ee86/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 4.3: group:admins → webserver:22\n\t\t{\n\t\t\tname:   \"group_admins_to_webserver_4_3\",\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"group:admins\"], \"dst\": [\"webserver:22\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t// tagged-server (webserver) receives the filter\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// TODO: Tailscale only includes IPv4 for host alias\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 4.4: webserver → group:admins:22\n\t\t{\n\t\t\tname:   \"webserver_to_group_admins_4_4\",\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"webserver\"], \"dst\": [\"group:admins:22\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t// user1 (group:admins member) receives the filter\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t// TODO: Tailscale only includes IPv4 for host source\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 8.6: user1:22 referenced 4 ways as destination\n\t\t// Destinations: group:admins:22, group:developers:22, kratail2tid@:22, 100.90.199.68:22\n\t\t{\n\t\t\tname:   \"user1_4_ways_dest_8_6\",\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"group:admins:22\", \"group:developers:22\", \"kratail2tid@:22\", \"100.90.199.68:22\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t// user1 receives the filter - Dsts NOT deduplicated\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// kratail2tid@:22\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t// group:admins:22\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t// group:developers:22\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t// 100.90.199.68:22 (raw IP matches node, includes IPv6)\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 8.7: Same node, 5 ports via different references\n\t\t// Destinations: tag:server:22, tag:server:80, tag:server:443, webserver:8080, 100.108.74.26:9000\n\t\t{\n\t\t\tname:   \"same_node_5_ports_different_refs_8_7\",\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\", \"tag:server:80\", \"tag:server:443\", \"webserver:8080\", \"100.108.74.26:9000\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t// tagged-server receives the filter\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// webserver:8080 (host alias - includes IPv6)\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 8080, Last: 8080}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 8080, Last: 8080}},\n\t\t\t\t\t\t\t// 100.108.74.26:9000 (raw IP - includes IPv6)\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 9000, Last: 9000}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 9000, Last: 9000}},\n\t\t\t\t\t\t\t// tag:server:22\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t// tag:server:80\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t// tag:server:443\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 9.4: Wildcard to autogroup:self\n\t\t{\n\t\t\tname:   \"wildcard_to_autogroup_self_9_4\",\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"autogroup:self:*\"]}`),\n\t\t\t// Only user1 (user-owned) receives filter; tagged nodes don't support autogroup:self\n\t\t\t// Sources narrowed to user1's own IPs (not full wildcard)\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// Note: autogroup:self destinations use raw IP format (no /32 suffix)\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 10.4: 3 rules, same dest, different sources\n\t\t// Rule 1: * → tag:server:22\n\t\t// Rule 2: tag:client → tag:server:80\n\t\t// Rule 3: autogroup:member → tag:server:443\n\t\t{\n\t\t\tname: \"three_rules_same_dest_different_sources_10_4\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"kratail2tid@\"],\n\t\t\t\t\t\"group:developers\": [\"kratail2tid@\"],\n\t\t\t\t\t\"group:empty\": []\n\t\t\t\t},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\n\t\t\t\t\t\"webserver\": \"100.108.74.26\",\n\t\t\t\t\t\"database\": \"100.74.60.128\",\n\t\t\t\t\t\"internal\": \"10.0.0.0/8\",\n\t\t\t\t\t\"subnet24\": \"192.168.1.0/24\"\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"tag:server:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:80\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"tag:server:443\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t// tagged-server receives 3 filter entries\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 10.5: Mixed sources in multiple rules\n\t\t// Rule 1: [tag:client, tag:web] → tag:server:22\n\t\t// Rule 2: [autogroup:member, group:admins] → tag:database:5432\n\t\t{\n\t\t\tname: \"mixed_sources_multiple_rules_10_5\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"kratail2tid@\"],\n\t\t\t\t\t\"group:developers\": [\"kratail2tid@\"],\n\t\t\t\t\t\"group:empty\": []\n\t\t\t\t},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\n\t\t\t\t\t\"webserver\": \"100.108.74.26\",\n\t\t\t\t\t\"database\": \"100.74.60.128\",\n\t\t\t\t\t\"internal\": \"10.0.0.0/8\",\n\t\t\t\t\t\"subnet24\": \"192.168.1.0/24\"\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\", \"tag:web\"], \"dst\": [\"tag:server:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\", \"group:admins\"], \"dst\": [\"tag:database:5432\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t// tagged-server receives filter from rule 1\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// tagged-db receives filter from rule 2\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 11.3: Mixed sources with mixed port formats\n\t\t// Destinations: tag:server:22, tag:server:80-443, tag:database:5432,3306\n\t\t{\n\t\t\tname:   \"mixed_sources_mixed_port_formats_11_3\",\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"tag:client\", \"tag:web\"], \"dst\": [\"tag:server:22\", \"tag:server:80-443\", \"tag:database:5432,3306\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t// tagged-server receives :22 and :80-443\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// :22\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t// :80-443\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 443}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// tagged-db receives :5432,3306\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// :5432\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t// :3306\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 3306, Last: 3306}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 3306, Last: 3306}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 12.5: Multiple CIDR + tag destinations\n\t\t// Destinations: internal:22, subnet24:80, tag:server:443\n\t\t// CIDR destinations don't match tailnet nodes\n\t\t{\n\t\t\tname:   \"multiple_cidr_plus_tag_destinations_12_5\",\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"internal:22\", \"subnet24:80\", \"tag:server:443\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t// Only tag:server:443 is delivered (CIDRs don't match tailnet nodes)\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 13.4: Wildcard → self:80-443 (port range)\n\t\t{\n\t\t\tname:   \"wildcard_to_self_port_range_13_4\",\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"autogroup:self:80-443\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// Note: autogroup:self destinations use raw IP format (no /32 suffix)\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 80, Last: 443}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 80, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 13.16: Wildcard → self + tag:server:22 (mixed destinations)\n\t\t{\n\t\t\tname:   \"wildcard_to_self_plus_tag_server_13_16\",\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"autogroup:self:*\", \"tag:server:22\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t// user1: receives narrowed Srcs for autogroup:self\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// Note: autogroup:self destinations use raw IP format (no /32 suffix)\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// tagged-server: receives full wildcard Srcs for tag:server:22\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 13.20: Wildcard → self + group:admins:22 (same dest node)\n\t\t{\n\t\t\tname:   \"wildcard_to_self_plus_group_admins_13_20\",\n\t\t\tpolicy: makePolicy(`{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"autogroup:self:*\", \"group:admins:22\"]}`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t// user1 gets 2 filter entries:\n\t\t\t\t// Entry 1: autogroup:self:* with narrowed Srcs (processed first due to autogroup:self splitting)\n\t\t\t\t// Entry 2: group:admins:22 with full wildcard\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// Note: autogroup:self destinations use raw IP format (no /32 suffix)\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 0, Last: 65535}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// ===== Category 14: Multi-Rule Tests =====\n\n\t\t// Test 14.21: 3 different srcs → same dest, different ports (3 rules)\n\t\t{\n\t\t\tname: \"three_diff_srcs_same_dest_diff_ports_14_21\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\"group:admins\": [\"kratail2tid@\"]},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\"webserver\": \"100.108.74.26\", \"database\": \"100.74.60.128\"},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:web\"], \"dst\": [\"tag:server:80\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:database\"], \"dst\": [\"tag:server:443\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t// tagged-server: receives 3 separate filter entries (different Srcs = separate)\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 14.22: 3 refs to same user → same dest:port (3 rules)\n\t\t// MERGED into 1 entry with 6 Dsts (not deduplicated)\n\t\t{\n\t\t\tname: \"three_refs_same_user_same_dest_14_22\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\"group:admins\": [\"kratail2tid@\"]},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\"webserver\": \"100.108.74.26\", \"database\": \"100.74.60.128\"},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"tag:server:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admins\"], \"dst\": [\"tag:server:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"kratail2tid@\"], \"dst\": [\"tag:server:22\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t// Merged: 1 entry with 6 Dsts (not deduplicated)\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 14.23: Same src → 3 different dests (3 rules)\n\t\t{\n\t\t\tname: \"same_src_three_diff_dests_14_23\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\"group:admins\": [\"kratail2tid@\"]},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\"webserver\": \"100.108.74.26\", \"database\": \"100.74.60.128\"},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:database:5432\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:web:80\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t// Each destination node receives its own filter (same Srcs per node)\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.94.92.91/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::ef01:5c81/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 14.26: Same entity as both src and dst in 2 rules\n\t\t// MERGED into 1 entry with 4 Dsts (not deduplicated)\n\t\t{\n\t\t\tname: \"same_entity_src_and_dst_14_26\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\"group:admins\": [\"kratail2tid@\"]},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\"webserver\": \"100.108.74.26\", \"database\": \"100.74.60.128\"},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"autogroup:member:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admins\"], \"dst\": [\"group:admins:22\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"user1\": {\n\t\t\t\t\t// Merged: 1 entry with 4 Dsts (not deduplicated)\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 14.27: User→user:22, group→user:80 (same Srcs, different ports)\n\t\t// MERGED into 1 entry with 4 Dsts\n\t\t{\n\t\t\tname: \"user_to_user_22_group_to_user_80_14_27\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\"group:admins\": [\"kratail2tid@\"]},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\"webserver\": \"100.108.74.26\", \"database\": \"100.74.60.128\"},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"kratail2tid@\"], \"dst\": [\"kratail2tid@:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:admins\"], \"dst\": [\"kratail2tid@:80\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"user1\": {\n\t\t\t\t\t// Merged: 1 entry with 4 Dsts\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 14.29: tagged→tagged:22, specific tags→tagged:80\n\t\t{\n\t\t\tname: \"tagged_to_tagged_specific_tags_14_29\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\"group:admins\": [\"kratail2tid@\"]},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\"webserver\": \"100.108.74.26\", \"database\": \"100.74.60.128\"},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:tagged\"], \"dst\": [\"autogroup:tagged:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\", \"tag:web\"], \"dst\": [\"autogroup:tagged:80\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\": nil,\n\t\t\t\t// Each tagged node receives 2 filter entries (different Srcs = separate)\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.80.238.75/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::7901:ee86/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.80.238.75/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::7901:ee86/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.94.92.91/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::ef01:5c81/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.94.92.91/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::ef01:5c81/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 14.42: Both autogroups → wildcard (full network)\n\t\t{\n\t\t\tname: \"both_autogroups_to_wildcard_14_42\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\"group:admins\": [\"kratail2tid@\"]},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\"webserver\": \"100.108.74.26\", \"database\": \"100.74.60.128\"},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:tagged\"], \"dst\": [\"*:*\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"*:*\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t// All nodes receive 2 filter entries (different Srcs = separate entries)\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 14.45: Triple src ref each rule\n\t\t{\n\t\t\tname: \"triple_src_ref_each_rule_14_45\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\"group:admins\": [\"kratail2tid@\"]},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\"webserver\": \"100.108.74.26\", \"database\": \"100.74.60.128\"},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\", \"group:admins\", \"kratail2tid@\"], \"dst\": [\"tag:server:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:server\", \"webserver\", \"100.108.74.26\"], \"dst\": [\"group:admins:80\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t// tagged-server: receives filter from rule 1 (triple user ref deduplicated to 1 IP)\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// user1: receives filter from rule 2 (triple ref deduplicated to tag:server IP)\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 14.47: Same src → 4 dests (4 rules)\n\t\t{\n\t\t\tname: \"same_src_four_dests_14_47\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\"group:admins\": [\"kratail2tid@\"]},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\"webserver\": \"100.108.74.26\", \"database\": \"100.74.60.128\"},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"tag:server:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"tag:database:5432\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"tag:web:80\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"webserver:443\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t// tagged-server: merged entry for :22 and :443 (same SrcIPs)\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.94.92.91/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::ef01:5c81/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 14.50: 6 rules mixing all patterns\n\t\t{\n\t\t\tname: \"six_rules_mixed_patterns_14_50\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\"group:admins\": [\"kratail2tid@\"]},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\"webserver\": \"100.108.74.26\", \"database\": \"100.74.60.128\"},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:server\"], \"dst\": [\"tag:server:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:client:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:database\"], \"dst\": [\"tag:database:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:web\"], \"dst\": [\"tag:web:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"*:80\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"autogroup:member:443\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t// user1: receives 2 entries: member→*:80 and *→user1:443\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// tagged-server: receives self-ref + member→*:80\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// tagged-client: receives self-ref + member→*:80\n\t\t\t\t\"tagged-client\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.80.238.75/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::7901:ee86/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// tagged-db: receives self-ref + member→*:80\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// tagged-web: receives self-ref + member→*:80\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.94.92.91/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::ef01:5c81/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 14.17: Wildcard → group and user (same person):22\n\t\t// Test 14.17: * → group:admins:22 and * → kratail2tid@:22\n\t\t// MERGED into 1 entry with 4 Dsts (duplicated)\n\t\t{\n\t\t\tname: \"wildcard_to_group_and_user_same_14_17\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\"group:admins\": [\"kratail2tid@\"]},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\"webserver\": \"100.108.74.26\", \"database\": \"100.74.60.128\"},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"group:admins:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"kratail2tid@:22\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"user1\": {\n\t\t\t\t\t// Merged: 1 entry with 4 Dsts (duplicated)\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 14.18: Tag → member and group (same):22\n\t\t// MERGED into 1 entry with 4 Dsts (duplicated)\n\t\t{\n\t\t\tname: \"tag_to_member_and_group_same_14_18\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\"group:admins\": [\"kratail2tid@\"]},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\"webserver\": \"100.108.74.26\", \"database\": \"100.74.60.128\"},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"autogroup:member:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"group:admins:22\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"tagged-server\": nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t\"tagged-db\":     nil,\n\t\t\t\t\"tagged-web\":    nil,\n\t\t\t\t\"user1\": {\n\t\t\t\t\t// Merged: 1 entry with 4 Dsts (duplicated)\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 14.20: Two rules with multi-dest, partial dest overlap\n\t\t{\n\t\t\tname: \"two_rules_multi_dest_partial_overlap_14_20\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\"group:admins\": [\"kratail2tid@\"]},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\"webserver\": \"100.108.74.26\", \"database\": \"100.74.60.128\"},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"tag:server:22\", \"tag:database:5432\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:80\", \"tag:web:443\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t// tagged-server: receives both wildcard:22 and tag:client:80\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// tagged-db: receives wildcard:5432\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// tagged-web: receives tag:client:443\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.94.92.91/32\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::ef01:5c81/128\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 14.30: All→all subset, wildcard→wildcard\n\t\t{\n\t\t\tname: \"all_to_all_subset_wildcard_wildcard_14_30\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\"group:admins\": [\"kratail2tid@\"]},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\"webserver\": \"100.108.74.26\", \"database\": \"100.74.60.128\"},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\", \"autogroup:tagged\"], \"dst\": [\"autogroup:member:22\", \"autogroup:tagged:80\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:443\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t// user1: receives member:22 (first rule dst) + *:443 (second rule)\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// tagged-web: receives tagged:80 (first rule dst) + *:443 (second rule)\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.94.92.91/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::ef01:5c81/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// Other tagged nodes: same pattern - tagged:80 + *:443\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-client\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.80.238.75/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::7901:ee86/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.108.74.26/32\",\n\t\t\t\t\t\t\t\"100.74.60.128/32\",\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"100.94.92.91/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2f01:3c9c/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::b901:4a87/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::ef01:5c81/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 14.37: Multiple wildcard src rules\n\t\t// Rules with same SrcIPs going to the same node are MERGED\n\t\t{\n\t\t\tname: \"multiple_wildcard_src_rules_14_37\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\"group:admins\": [\"kratail2tid@\"]},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\"webserver\": \"100.108.74.26\", \"database\": \"100.74.60.128\"},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"tag:server:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"tag:database:5432\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:80\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"tagged-client\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// tagged-server: receives rule 1 (:22) and rule 3 (:80) - MERGED\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// tagged-db: receives rule 2 (:5432) and rule 3 (:80) - MERGED\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 14.38: Wildcard dest + specific dest\n\t\t// TODO: Tailscale subsumes specific into wildcard (1 entry), Headscale creates 2 separate entries\n\t\t{\n\t\t\tname: \"wildcard_dest_plus_specific_dest_14_38\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\"group:admins\": [\"kratail2tid@\"]},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\"webserver\": \"100.108.74.26\", \"database\": \"100.74.60.128\"},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"*:*\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t// tagged-client: receives only wildcard (tag:server:22 doesn't apply to tagged-client)\n\t\t\t\t\"tagged-client\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// tagged-server: receives both wildcard and specific (specific is subset)\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 14.40: Wildcard in different positions\n\t\t{\n\t\t\tname: \"wildcard_in_different_positions_14_40\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\"group:admins\": [\"kratail2tid@\"]},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\"webserver\": \"100.108.74.26\", \"database\": \"100.74.60.128\"},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"tag:server:22\", \"tag:database:5432\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:80\", \"*:443\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t// user1: receives only *:443 from rule 2\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// tagged-server: receives wildcard:22 and tag:client:80 and tag:client:443\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// tagged-db: receives wildcard:5432 and tag:client:443\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 5432, Last: 5432}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// tagged-web: receives only tag:client:443\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// tagged-client: receives only tag:client:443\n\t\t\t\t\"tagged-client\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Test 14.49: Same src → 5 dests (some overlap)\n\t\t// TODO: Tailscale merges, Headscale creates separate entries but may deduplicate destinations\n\t\t{\n\t\t\tname: \"same_src_five_dests_overlap_14_49\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\"group:admins\": [\"kratail2tid@\"]},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:database\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:web\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\"webserver\": \"100.108.74.26\", \"database\": \"100.74.60.128\"},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:server:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:database:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"tag:web:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"webserver:22\"]},\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"database:22\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"tagged-client\": nil,\n\t\t\t\t// tagged-server: receives rules 1 and 4 (tag:server:22 and webserver:22 resolve to same node)\n\t\t\t\t// Note: Host alias (webserver) also resolves to both IPv4 and IPv6 when it matches a node\n\t\t\t\t\"tagged-server\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.108.74.26/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::b901:4a87/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// tagged-db: receives rules 2 and 5 (tag:database:22 and database:22 resolve to same node)\n\t\t\t\t\"tagged-db\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.74.60.128/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2f01:3c9c/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// tagged-web: receives rule 3 only\n\t\t\t\t\"tagged-web\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.80.238.75/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7901:ee86/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.94.92.91/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::ef01:5c81/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tpol, err := unmarshalPolicy([]byte(tt.policy))\n\t\t\trequire.NoError(t, err, \"failed to parse policy\")\n\n\t\t\terr = pol.validate()\n\t\t\trequire.NoError(t, err, \"policy validation failed\")\n\n\t\t\tfor nodeName, wantFilters := range tt.wantFilters {\n\t\t\t\tnode := findNodeByGivenName(nodes, nodeName)\n\t\t\t\trequire.NotNil(t, node, \"node %s not found\", nodeName)\n\n\t\t\t\t// Get compiled filters for this specific node\n\t\t\t\tcompiledFilters, err := pol.compileFilterRulesForNode(users, node.View(), nodes.ViewSlice())\n\t\t\t\trequire.NoError(t, err, \"failed to compile filters for node %s\", nodeName)\n\n\t\t\t\t// Reduce to only rules where this node is a destination\n\t\t\t\tgotFilters := policyutil.ReduceFilterRules(node.View(), compiledFilters)\n\n\t\t\t\tif len(wantFilters) == 0 && len(gotFilters) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif diff := cmp.Diff(wantFilters, gotFilters, cmpOptions()...); diff != \"\" {\n\t\t\t\t\tt.Errorf(\"node %s filters mismatch (-want +got):\\n%s\", nodeName, diff)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TestTailscaleCompatErrorCases tests ACL configurations that should produce validation errors.\n// These tests verify that Headscale correctly rejects invalid policies, matching Tailscale's behavior\n// where the coordination server rejects the policy at update time (400 Bad Request).\n//\n// Reference: /home/kradalby/acl-explore/findings/09-mixed-scenarios.md.\nfunc TestTailscaleCompatErrorCases(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tname      string\n\t\tpolicy    string\n\t\twantErr   string\n\t\treference string // Test case reference from findings\n\t}{\n\t\t// Test 6.4: tag:nonexistent → tag:server:22 (ERROR)\n\t\t// Tailscale error: \"src=tag not found: \\\"tag:nonexistent\\\" (400)\"\n\t\t{\n\t\t\tname: \"undefined_tag_source_6_4\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:nonexistent\"], \"dst\": [\"tag:server:22\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantErr:   `tag not defined in policy: \"tag:nonexistent\"`,\n\t\t\treference: \"Test 6.4: tag:nonexistent → tag:server:22\",\n\t\t},\n\n\t\t// Test 13.41: autogroup:self as SOURCE (ERROR)\n\t\t// Tailscale error: \"\\\"autogroup:self\\\" not valid on the src side of a rule (400)\"\n\t\t{\n\t\t\tname: \"self_as_source_13_41\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:self\"], \"dst\": [\"tag:server:22\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantErr:   `autogroup:self can only be used in ACL destinations`,\n\t\t\treference: \"Test 13.41: autogroup:self as SOURCE\",\n\t\t},\n\n\t\t// Test 13.43: autogroup:self without port (ERROR)\n\t\t// Tailscale error: \"dst=\\\"autogroup:self\\\": port range \\\"self\\\": invalid first integer (400)\"\n\t\t{\n\t\t\tname: \"self_without_port_13_43\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"autogroup:self\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\twantErr:   `invalid port number`,\n\t\t\treference: \"Test 13.43: autogroup:self without port\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tpol, err := unmarshalPolicy([]byte(tt.policy))\n\t\t\t// Check for parsing errors (some errors occur at parse time)\n\t\t\tif err != nil {\n\t\t\t\trequire.ErrorContains(t, err, tt.wantErr,\n\t\t\t\t\t\"test %s (%s): expected parse error containing %q, got %q\",\n\t\t\t\t\ttt.name, tt.reference, tt.wantErr, err.Error())\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Check for validation errors\n\t\t\terr = pol.validate()\n\t\t\trequire.Error(t, err, \"test %s (%s): expected validation error, got none\", tt.name, tt.reference)\n\t\t\trequire.ErrorContains(t, err, tt.wantErr,\n\t\t\t\t\"test %s (%s): expected error containing %q, got %q\",\n\t\t\t\ttt.name, tt.reference, tt.wantErr, err.Error())\n\t\t})\n\t}\n}\n\n// TestTailscaleCompatErrorCasesHeadscaleDiffers validates that Headscale correctly rejects\n// policies that Tailscale also rejects. These tests verify that autogroup:self destination\n// validation for ACL rules matches Tailscale's behavior.\n//\n// Tailscale validates that autogroup:self can only be used when ALL sources are\n// users, groups, or autogroup:member. Headscale now performs this same validation.\n//\n// Reference: /home/kradalby/acl-explore/findings/09-mixed-scenarios.md.\nfunc TestTailscaleCompatErrorCasesHeadscaleDiffers(t *testing.T) {\n\tt.Parallel()\n\n\t// These tests verify that Headscale rejects policies the same way Tailscale does.\n\t// Tailscale rejects these policies at validation time (400 Bad Request),\n\t// and Headscale now does the same.\n\ttests := []struct {\n\t\tname           string\n\t\tpolicy         string\n\t\ttailscaleError string // What Tailscale returns (and Headscale should match)\n\t\treference      string\n\t}{\n\t\t// Test 2.5: tag:client → autogroup:self:* + tag:server:22\n\t\t// Tailscale REJECTS this - autogroup:self requires user/group sources\n\t\t{\n\t\t\tname: \"tag_source_with_self_dest_2_5\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"],\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"autogroup:self:*\", \"tag:server:22\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\ttailscaleError: \"autogroup:self can only be used with users, groups, or supported autogroups (400)\",\n\t\t\treference:      \"Test 2.5: tag:client → autogroup:self:* + tag:server:22\",\n\t\t},\n\n\t\t// Test 4.5: tag:client → autogroup:self:*\n\t\t// Tailscale REJECTS this - autogroup:self requires user/group sources\n\t\t{\n\t\t\tname: \"tag_source_to_self_dest_only_4_5\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"autogroup:self:*\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\ttailscaleError: \"autogroup:self can only be used with users, groups, or supported autogroups (400)\",\n\t\t\treference:      \"Test 4.5: tag:client → autogroup:self:*\",\n\t\t},\n\n\t\t// Test 6.1: autogroup:tagged → autogroup:self:*\n\t\t// Tailscale REJECTS this - autogroup:tagged is NOT a valid source for autogroup:self\n\t\t{\n\t\t\tname: \"autogroup_tagged_to_self_6_1\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:tagged\"], \"dst\": [\"autogroup:self:*\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\ttailscaleError: \"autogroup:self can only be used with users, groups, or supported autogroups (400)\",\n\t\t\treference:      \"Test 6.1: autogroup:tagged → autogroup:self:*\",\n\t\t},\n\n\t\t// Test 9.5: [autogroup:member, autogroup:tagged] → [autogroup:self:*, tag:server:22]\n\t\t// Tailscale REJECTS this - ANY invalid source (autogroup:tagged) invalidates the rule\n\t\t{\n\t\t\tname: \"both_autogroups_to_self_plus_tag_9_5\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\", \"autogroup:tagged\"], \"dst\": [\"autogroup:self:*\", \"tag:server:22\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\ttailscaleError: \"autogroup:self can only be used with users, groups, or supported autogroups (400)\",\n\t\t\treference:      \"Test 9.5: [autogroup:member, autogroup:tagged] → [autogroup:self:*, tag:server:22]\",\n\t\t},\n\n\t\t// Test 13.6: autogroup:tagged → self:*\n\t\t// Tailscale REJECTS this - same as 6.1\n\t\t{\n\t\t\tname: \"autogroup_tagged_to_self_13_6\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:tagged\"], \"dst\": [\"autogroup:self:*\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\ttailscaleError: \"autogroup:self can only be used with users, groups, or supported autogroups (400)\",\n\t\t\treference:      \"Test 13.6: autogroup:tagged → self:*\",\n\t\t},\n\n\t\t// Test 13.10: tag:client → self:*\n\t\t// Tailscale REJECTS this - tags are not valid sources for autogroup:self\n\t\t{\n\t\t\tname: \"tag_to_self_13_10\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:client\"], \"dst\": [\"autogroup:self:*\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\ttailscaleError: \"autogroup:self can only be used with users, groups, or supported autogroups (400)\",\n\t\t\treference:      \"Test 13.10: tag:client → self:*\",\n\t\t},\n\n\t\t// Test 13.13: Host → self:*\n\t\t// Tailscale REJECTS this - hosts are not valid sources for autogroup:self\n\t\t{\n\t\t\tname: \"host_to_self_13_13\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"hosts\": {\n\t\t\t\t\t\"webserver\": \"100.108.74.26\"\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"webserver\"], \"dst\": [\"autogroup:self:*\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\ttailscaleError: \"autogroup:self can only be used with users, groups, or supported autogroups (400)\",\n\t\t\treference:      \"Test 13.13: Host → self:*\",\n\t\t},\n\n\t\t// Test 13.14: Raw IP → self:*\n\t\t// Tailscale REJECTS this - raw IPs are not valid sources for autogroup:self\n\t\t{\n\t\t\tname: \"raw_ip_to_self_13_14\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:server\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"100.90.199.68\"], \"dst\": [\"autogroup:self:*\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\ttailscaleError: \"autogroup:self can only be used with users, groups, or supported autogroups (400)\",\n\t\t\treference:      \"Test 13.14: Raw IP (user1) → self:*\",\n\t\t},\n\n\t\t// Test 13.25: [autogroup:member, tag:client] → self:*\n\t\t// Tailscale REJECTS this - ANY invalid source (tag:client) invalidates the rule\n\t\t{\n\t\t\tname: \"mixed_valid_invalid_sources_to_self_13_25\",\n\t\t\tpolicy: `{\n\t\t\t\t\"groups\": {\n\t\t\t\t\t\"group:admins\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"tagOwners\": {\n\t\t\t\t\t\"tag:client\": [\"kratail2tid@\"]\n\t\t\t\t},\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\", \"tag:client\"], \"dst\": [\"autogroup:self:*\"]}\n\t\t\t\t]\n\t\t\t}`,\n\t\t\ttailscaleError: \"autogroup:self can only be used with users, groups, or supported autogroups (400)\",\n\t\t\treference:      \"Test 13.25: [autogroup:member, tag:client] → self:*\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\t// unmarshalPolicy calls validate() internally, so we expect it to fail\n\t\t\t// with our validation error\n\t\t\t_, err := unmarshalPolicy([]byte(tt.policy))\n\t\t\trequire.Error(t, err,\n\t\t\t\t\"test %s (%s): should reject policy like Tailscale\",\n\t\t\t\ttt.name, tt.reference)\n\t\t\trequire.ErrorIs(t, err, ErrACLAutogroupSelfInvalidSource,\n\t\t\t\t\"test %s (%s): expected autogroup:self validation error\",\n\t\t\t\ttt.name, tt.reference)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/tailscale_routes_compat_test.go",
    "content": "package v2\n\n// This file contains compatibility tests for subnet routes and exit nodes.\n// It validates Headscale's ACL engine behavior against documented Tailscale\n// SaaS behavior. Tests document behavioral differences with TODO comments.\n//\n// Source findings: /home/kradalby/acl-explore/findings/{10,11,12,13,14,15}-*.md\n\nimport (\n\t\"net/netip\"\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/juanfont/headscale/hscontrol/policy/policyutil\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gorm.io/gorm\"\n\t\"tailscale.com/net/tsaddr\"\n\t\"tailscale.com/tailcfg\"\n)\n\n// wildcardSrcIPs represents the SrcIPs used when wildcard (*) source is specified.\n// In Tailscale, this includes the CGNAT range and IPv6 Tailscale range, plus any\n// advertised subnet routes.\nvar wildcardSrcIPs = []string{\n\t\"100.64.0.0/10\",       // CGNAT range for Tailscale IPs\n\t\"fd7a:115c:a1e0::/48\", // Tailscale IPv6 range\n}\n\n// memberSrcIPs represents the SrcIPs for autogroup:member (user-owned nodes).\n// This includes client1, client2, and user1.\nvar memberSrcIPs = []string{\n\t\"100.116.73.38/32\",\n\t\"100.89.42.23/32\",\n\t\"100.90.199.68/32\",\n\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n}\n\n// wildcardDstPorts represents wildcard destination ports using {IP: \"*\"}.\nvar wildcardDstPorts = []tailcfg.NetPortRange{\n\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n}\n\n// setupRouteCompatUsers returns the test users for route compatibility tests.\nfunc setupRouteCompatUsers() types.Users {\n\treturn types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"kratail2tid\"},\n\t}\n}\n\n// setupRouteCompatNodes returns the test nodes for route compatibility tests.\n// The node configuration includes:\n// - 2 client nodes (user-owned, no routes)\n// - 1 subnet router (tag:router, 10.33.0.0/16)\n// - 1 exit node (tag:exit, 0.0.0.0/0, ::/0)\n// - 1 multi-router (tag:router + tag:exit, 172.16.0.0/24 + exit routes)\n// - 2 HA routers (tag:ha, both advertise 192.168.1.0/24)\n// - 1 big router (tag:router, 10.0.0.0/8)\n// - 1 user-owned node (user1).\nfunc setupRouteCompatNodes(users types.Users) types.Nodes {\n\t// Node: client1 - User-owned client (no routes)\n\tnodeClient1 := &types.Node{\n\t\tID:             1,\n\t\tGivenName:      \"client1\",\n\t\tUser:           &users[0],\n\t\tUserID:         &users[0].ID,\n\t\tIPv4:           ptrAddr(\"100.116.73.38\"),\n\t\tIPv6:           ptrAddr(\"fd7a:115c:a1e0::a801:4949\"),\n\t\tHostinfo:       &tailcfg.Hostinfo{},\n\t\tApprovedRoutes: []netip.Prefix{},\n\t}\n\n\t// Node: client2 - User-owned client (no routes)\n\tnodeClient2 := &types.Node{\n\t\tID:             2,\n\t\tGivenName:      \"client2\",\n\t\tUser:           &users[0],\n\t\tUserID:         &users[0].ID,\n\t\tIPv4:           ptrAddr(\"100.89.42.23\"),\n\t\tIPv6:           ptrAddr(\"fd7a:115c:a1e0::d01:2a2e\"),\n\t\tHostinfo:       &tailcfg.Hostinfo{},\n\t\tApprovedRoutes: []netip.Prefix{},\n\t}\n\n\t// Node: subnet-router - Tagged with tag:router, advertises 10.33.0.0/16\n\tnodeSubnetRouter := &types.Node{\n\t\tID:        3,\n\t\tGivenName: \"subnet-router\",\n\t\tIPv4:      ptrAddr(\"100.119.139.79\"),\n\t\tIPv6:      ptrAddr(\"fd7a:115c:a1e0::4001:8ba0\"),\n\t\tTags:      []string{\"tag:router\"},\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tRoutableIPs: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.33.0.0/16\"),\n\t\t\t},\n\t\t},\n\t\tApprovedRoutes: []netip.Prefix{\n\t\t\tnetip.MustParsePrefix(\"10.33.0.0/16\"),\n\t\t},\n\t}\n\n\t// Node: exit-node - Tagged with tag:exit, advertises exit routes\n\tnodeExitNode := &types.Node{\n\t\tID:        4,\n\t\tGivenName: \"exit-node\",\n\t\tIPv4:      ptrAddr(\"100.121.32.1\"),\n\t\tIPv6:      ptrAddr(\"fd7a:115c:a1e0::7f01:2004\"),\n\t\tTags:      []string{\"tag:exit\"},\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tRoutableIPs: tsaddr.ExitRoutes(),\n\t\t},\n\t\tApprovedRoutes: tsaddr.ExitRoutes(),\n\t}\n\n\t// Node: multi-router - Tagged with tag:router AND tag:exit\n\t// Advertises both subnet (172.16.0.0/24) and exit routes\n\tmultiRouterRoutes := append([]netip.Prefix{\n\t\tnetip.MustParsePrefix(\"172.16.0.0/24\"),\n\t}, tsaddr.ExitRoutes()...)\n\tnodeMultiRouter := &types.Node{\n\t\tID:        5,\n\t\tGivenName: \"multi-router\",\n\t\tIPv4:      ptrAddr(\"100.74.117.7\"),\n\t\tIPv6:      ptrAddr(\"fd7a:115c:a1e0::c401:7508\"),\n\t\tTags:      []string{\"tag:router\", \"tag:exit\"},\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tRoutableIPs: multiRouterRoutes,\n\t\t},\n\t\tApprovedRoutes: multiRouterRoutes,\n\t}\n\n\t// Node: ha-router1 - Tagged with tag:ha, advertises 192.168.1.0/24\n\tnodeHARouter1 := &types.Node{\n\t\tID:        6,\n\t\tGivenName: \"ha-router1\",\n\t\tIPv4:      ptrAddr(\"100.85.37.108\"),\n\t\tIPv6:      ptrAddr(\"fd7a:115c:a1e0::f101:2597\"),\n\t\tTags:      []string{\"tag:ha\"},\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tRoutableIPs: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"192.168.1.0/24\"),\n\t\t\t},\n\t\t},\n\t\tApprovedRoutes: []netip.Prefix{\n\t\t\tnetip.MustParsePrefix(\"192.168.1.0/24\"),\n\t\t},\n\t}\n\n\t// Node: ha-router2 - Tagged with tag:ha, advertises same 192.168.1.0/24\n\tnodeHARouter2 := &types.Node{\n\t\tID:        7,\n\t\tGivenName: \"ha-router2\",\n\t\tIPv4:      ptrAddr(\"100.119.130.32\"),\n\t\tIPv6:      ptrAddr(\"fd7a:115c:a1e0::4501:82a9\"),\n\t\tTags:      []string{\"tag:ha\"},\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tRoutableIPs: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"192.168.1.0/24\"),\n\t\t\t},\n\t\t},\n\t\tApprovedRoutes: []netip.Prefix{\n\t\t\tnetip.MustParsePrefix(\"192.168.1.0/24\"),\n\t\t},\n\t}\n\n\t// Node: big-router - Tagged with tag:router, advertises 10.0.0.0/8\n\tnodeBigRouter := &types.Node{\n\t\tID:        8,\n\t\tGivenName: \"big-router\",\n\t\tIPv4:      ptrAddr(\"100.100.100.1\"),\n\t\tIPv6:      ptrAddr(\"fd7a:115c:a1e0::6401:6401\"),\n\t\tTags:      []string{\"tag:router\"},\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tRoutableIPs: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"10.0.0.0/8\"),\n\t\t\t},\n\t\t},\n\t\tApprovedRoutes: []netip.Prefix{\n\t\t\tnetip.MustParsePrefix(\"10.0.0.0/8\"),\n\t\t},\n\t}\n\n\t// Node: user1 - User-owned node (no routes)\n\tnodeUser1 := &types.Node{\n\t\tID:             9,\n\t\tGivenName:      \"user1\",\n\t\tUser:           &users[0],\n\t\tUserID:         &users[0].ID,\n\t\tIPv4:           ptrAddr(\"100.90.199.68\"),\n\t\tIPv6:           ptrAddr(\"fd7a:115c:a1e0::2d01:c747\"),\n\t\tHostinfo:       &tailcfg.Hostinfo{},\n\t\tApprovedRoutes: []netip.Prefix{},\n\t}\n\n\treturn types.Nodes{\n\t\tnodeClient1,\n\t\tnodeClient2,\n\t\tnodeSubnetRouter,\n\t\tnodeExitNode,\n\t\tnodeMultiRouter,\n\t\tnodeHARouter1,\n\t\tnodeHARouter2,\n\t\tnodeBigRouter,\n\t\tnodeUser1,\n\t}\n}\n\n// routesPolicyPrefix provides the standard groups, tagOwners, and hosts\n// for route compatibility tests.\nconst routesPolicyPrefix = `{\n\t\"groups\": {\n\t\t\"group:admins\": [\"kratail2tid@\"],\n\t\t\"group:empty\": []\n\t},\n\t\"tagOwners\": {\n\t\t\"tag:router\": [\"kratail2tid@\"],\n\t\t\"tag:exit\": [\"kratail2tid@\"],\n\t\t\"tag:ha\": [\"kratail2tid@\"]\n\t},\n\t\"hosts\": {\n\t\t\"internal\": \"10.0.0.0/8\",\n\t\t\"subnet24\": \"192.168.1.0/24\"\n\t},\n\t\"acls\": [`\n\nconst routesPolicySuffix = `\n\t]\n}`\n\n// makeRoutesPolicy creates a full policy from just the ACL rules portion.\nfunc makeRoutesPolicy(aclRules string) string {\n\treturn routesPolicyPrefix + aclRules + routesPolicySuffix\n}\n\n// routesCompatTest defines a test case for routes compatibility testing.\ntype routesCompatTest struct {\n\tname        string                          // Test name\n\tpolicy      string                          // HuJSON policy as multiline raw string\n\twantFilters map[string][]tailcfg.FilterRule // node GivenName -> expected filters\n}\n\n// TestTailscaleRoutesCompatSubnetBasics tests basic subnet route behavior (Category A).\n// These tests verify that subnet routes are correctly included in SrcIPs for wildcard rules,\n// that tag-based ACLs resolve to node IPs (not routes), and that explicit subnet filters\n// are placed on the correct destination nodes.\nfunc TestTailscaleRoutesCompatSubnetBasics(t *testing.T) {\n\tt.Parallel()\n\n\tusers := setupRouteCompatUsers()\n\tnodes := setupRouteCompatNodes(users)\n\n\ttests := []routesCompatTest{\n\t\t// A1: Wildcard ACL includes subnet routes in SrcIPs\n\t\t{\n\t\t\tname: \"A1_wildcard_acl_includes_routes_in_srcips\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]}\n\t`),\n\t\t\t// When using * -> *:*, SrcIPs should include advertised subnet routes\n\t\t\t// (but NOT exit routes 0.0.0.0/0, ::/0).\n\t\t\t// TODO: Verify Tailscale includes subnet routes 10.33.0.0/16, 172.16.0.0/24,\n\t\t\t// 192.168.1.0/24, 10.0.0.0/8 in SrcIPs but NOT 0.0.0.0/0, ::/0\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t\t// TODO: Tailscale also includes these subnet routes:\n\t\t\t\t\t\t\t// \"10.0.0.0/8\",\n\t\t\t\t\t\t\t// \"10.33.0.0/16\",\n\t\t\t\t\t\t\t// \"172.16.0.0/24\",\n\t\t\t\t\t\t\t// \"192.168.1.0/24\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"client2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO: Fix DstPorts expansion and exit node coverage for tag-based ACLs\n\t\t//\n\t\t// A2: Tag-based ACL resolves to node IPs only, NOT routes\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - tag:router includes: subnet-router, multi-router, big-router\n\t\t// - Each tag:router node receives filter with ALL tag:router IPs in DstPorts\n\t\t// - exit-node (tag:exit only) does NOT receive any filter\n\t\t// - DstPorts contains ONLY node IPs, NOT advertised routes\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - INCORRECT: Each node only gets ITS OWN IPs in DstPorts (should be ALL tag IPs)\n\t\t// - INCORRECT: exit-node receives a filter because multi-router has exit routes\n\t\t//   and Headscale treats 0.0.0.0/0 as covering node IPs for filter distribution\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// 1. Filter compilation only adds destinations for the current node being filtered,\n\t\t//    not all nodes matching the tag\n\t\t// 2. Exit routes (0.0.0.0/0, ::/0) are treated as covering all destinations\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// 1. When dst is a tag, include ALL IPs of nodes with that tag in DstPorts\n\t\t// 2. Exclude exit routes from filter coverage calculations\n\t\t{\n\t\t\tname: \"A2_tag_based_acl_excludes_routes\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:router\"], \"dst\": [\"tag:router:*\"]}\n\t`),\n\t\t\t// EXPECTED (Tailscale) - commented out:\n\t\t\t// wantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t//     \"client1\":    nil,\n\t\t\t//     \"client2\":    nil,\n\t\t\t//     \"exit-node\":  nil,  // tag:exit only, not tag:router\n\t\t\t//     \"ha-router1\": nil,\n\t\t\t//     \"ha-router2\": nil,\n\t\t\t//     \"user1\":      nil,\n\t\t\t//     \"subnet-router\": {\n\t\t\t//         {\n\t\t\t//             SrcIPs: []string{\n\t\t\t//                 \"100.100.100.1/32\", \"100.119.139.79/32\", \"100.74.117.7/32\",\n\t\t\t//                 \"fd7a:115c:a1e0::4001:8ba0/128\", \"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t//                 \"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t//             },\n\t\t\t//             DstPorts: []tailcfg.NetPortRange{\n\t\t\t//                 // ALL tag:router IPs, not just this node's IP\n\t\t\t//                 {IP: \"100.100.100.1/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t//                 {IP: \"100.119.139.79/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t//                 {IP: \"100.74.117.7/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t//                 {IP: \"fd7a:115c:a1e0::4001:8ba0/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t//                 {IP: \"fd7a:115c:a1e0::6401:6401/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t//                 {IP: \"fd7a:115c:a1e0::c401:7508/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t//             },\n\t\t\t//             IPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t//         },\n\t\t\t//     },\n\t\t\t//     // Same for multi-router and big-router...\n\t\t\t// },\n\t\t\t//\n\t\t\t// ACTUAL (Headscale) - current behavior:\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t// INCORRECT: subnet-router only gets its OWN IPs in DstPorts\n\t\t\t\t// Tailscale includes ALL tag:router IPs\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.100.100.1/32\",\n\t\t\t\t\t\t\t\"100.119.139.79/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// INCORRECT: Only this node's IPs, should be ALL tag:router IPs\n\t\t\t\t\t\t\t{IP: \"100.119.139.79/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::4001:8ba0/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// multi-router has BOTH tag:router AND tag:exit\n\t\t\t\t// Because of exit routes, filter merging includes all tag:router destinations\n\t\t\t\t// This is actually the CORRECT Tailscale behavior for DstPorts (but wrong filter distribution)\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.100.100.1/32\",\n\t\t\t\t\t\t\t\"100.119.139.79/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// All tag:router IPs due to exit route coverage + filter merging\n\t\t\t\t\t\t\t{IP: \"100.74.117.7/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.100.100.1/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.119.139.79/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::c401:7508/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::4001:8ba0/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::6401:6401/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: big-router only gets its OWN IPs in DstPorts\n\t\t\t\t// Tailscale includes ALL tag:router IPs\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.100.100.1/32\",\n\t\t\t\t\t\t\t\"100.119.139.79/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// INCORRECT: Only this node's IPs, should be ALL tag:router IPs\n\t\t\t\t\t\t\t{IP: \"100.100.100.1/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::6401:6401/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: exit-node receives filter due to multi-router having exit routes\n\t\t\t\t// and Headscale treating 0.0.0.0/0 as covering node IPs\n\t\t\t\t// Tailscale would return nil here\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"100.100.100.1/32\",\n\t\t\t\t\t\t\t\"100.119.139.79/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.74.117.7/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.100.100.1/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.119.139.79/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::4001:8ba0/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::6401:6401/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::c401:7508/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO: Fix exit node route coverage to match Tailscale behavior\n\t\t//\n\t\t// A3: Explicit subnet destination - filter goes to router only\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - Filter placed ONLY on nodes whose routes cover the destination\n\t\t// - subnet-router (10.33.0.0/16) gets filter - exact match\n\t\t// - big-router (10.0.0.0/8) gets filter - parent covers child\n\t\t// - exit-node and multi-router get NO filter - exit routes (0.0.0.0/0)\n\t\t//   do NOT count as \"covering\" subnet destinations for filter placement\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - Exit nodes (0.0.0.0/0) ARE treated as covering all destinations\n\t\t// - exit-node and multi-router incorrectly receive the filter\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// hscontrol/policy/v2/filter.go treats exit routes (0.0.0.0/0, ::/0) as\n\t\t// covering all destinations, but Tailscale only uses exit routes for\n\t\t// actual traffic routing, not for filter distribution.\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// When determining which nodes receive a filter based on route coverage,\n\t\t// exclude exit routes (0.0.0.0/0 and ::/0) from the coverage check.\n\t\t// Exit nodes should only receive filters when explicitly targeted.\n\t\t{\n\t\t\tname: \"A3_explicit_subnet_filter_to_router\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.33.0.0/16:*\"]}\n\t`),\n\t\t\t// EXPECTED (Tailscale) - commented out:\n\t\t\t// wantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t//     \"client1\":      nil,\n\t\t\t//     \"client2\":      nil,\n\t\t\t//     \"exit-node\":    nil,  // Exit route does NOT cover for filter placement\n\t\t\t//     \"ha-router1\":   nil,\n\t\t\t//     \"ha-router2\":   nil,\n\t\t\t//     \"user1\":        nil,\n\t\t\t//     \"multi-router\": nil,  // Exit route does NOT cover for filter placement\n\t\t\t//     \"subnet-router\": {\n\t\t\t//         {\n\t\t\t//             SrcIPs:   []string{\"100.64.0.0/10\", \"fd7a:115c:a1e0::/48\"},\n\t\t\t//             DstPorts: []tailcfg.NetPortRange{{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny}},\n\t\t\t//             IPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t//         },\n\t\t\t//     },\n\t\t\t//     \"big-router\": {\n\t\t\t//         {\n\t\t\t//             SrcIPs:   []string{\"100.64.0.0/10\", \"fd7a:115c:a1e0::/48\"},\n\t\t\t//             DstPorts: []tailcfg.NetPortRange{{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny}},\n\t\t\t//             IPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t//         },\n\t\t\t//     },\n\t\t\t// },\n\t\t\t//\n\t\t\t// ACTUAL (Headscale) - current behavior:\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t// subnet-router owns 10.33.0.0/16 - exact match (CORRECT)\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// big-router owns 10.0.0.0/8 which covers 10.33.0.0/16 (CORRECT)\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: exit-node gets filter because 0.0.0.0/0 \"covers\" destination\n\t\t\t\t// Tailscale would return nil here\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: multi-router gets filter because 0.0.0.0/0 \"covers\" destination\n\t\t\t\t// Tailscale would return nil here\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO: Fix exit node route coverage to match Tailscale behavior\n\t\t//\n\t\t// A3b: autogroup:member to subnet - SrcIPs = member IPs only\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - autogroup:member = user-owned nodes only (client1, client2, user1)\n\t\t// - Filter goes to subnet-router (exact match) and big-router (parent route)\n\t\t// - exit-node and multi-router get NO filter (exit routes don't cover)\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - exit-node and multi-router incorrectly receive filters because\n\t\t//   exit routes (0.0.0.0/0) are treated as covering all destinations\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// Same as A3 - exit routes should not count for filter distribution\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// Exclude exit routes (0.0.0.0/0 and ::/0) from coverage checks\n\t\t{\n\t\t\tname: \"A3b_autogroup_member_to_subnet\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"10.33.0.0/16:*\"]}\n\t`),\n\t\t\t// EXPECTED (Tailscale) - commented out:\n\t\t\t// wantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t//     \"client1\":      nil,\n\t\t\t//     \"client2\":      nil,\n\t\t\t//     \"exit-node\":    nil,  // Exit route does NOT cover\n\t\t\t//     \"ha-router1\":   nil,\n\t\t\t//     \"ha-router2\":   nil,\n\t\t\t//     \"user1\":        nil,\n\t\t\t//     \"multi-router\": nil,  // Exit route does NOT cover\n\t\t\t//     \"subnet-router\": { ... },  // Exact match\n\t\t\t//     \"big-router\":    { ... },  // Parent route covers\n\t\t\t// },\n\t\t\t//\n\t\t\t// ACTUAL (Headscale) - current behavior:\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t// CORRECT: subnet-router gets filter (exact match)\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.89.42.23/32\",  // client2\n\t\t\t\t\t\t\t\"100.90.199.68/32\", // user1\n\t\t\t\t\t\t\t\"100.116.73.38/32\", // client1\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// CORRECT: big-router gets filter (parent route 10.0.0.0/8 covers)\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: exit-node receives filter due to 0.0.0.0/0 coverage\n\t\t\t\t// Tailscale would return nil here\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: multi-router receives filter due to 0.0.0.0/0 coverage\n\t\t\t\t// Tailscale would return nil here\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO: Fix exit node route coverage to match Tailscale behavior\n\t\t//\n\t\t// A4: Multiple routes on same router (172.16.0.0/24 destination)\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - multi-router has 172.16.0.0/24, should get filter (exact match)\n\t\t// - exit-node has 0.0.0.0/0 but does NOT cover 172.16.0.0/24 for filter placement\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - multi-router correctly gets filter\n\t\t// - exit-node incorrectly gets filter because 0.0.0.0/0 is treated as covering\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// Same as A3 - exit routes should not count for filter distribution\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// Exclude exit routes from coverage checks\n\t\t{\n\t\t\tname: \"A4_multiple_routes_same_router\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"172.16.0.0/24:*\"]}\n\t`),\n\t\t\t// EXPECTED (Tailscale) - commented out:\n\t\t\t// wantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t//     \"client1\":       nil,\n\t\t\t//     \"client2\":       nil,\n\t\t\t//     \"subnet-router\": nil,\n\t\t\t//     \"exit-node\":     nil,  // 0.0.0.0/0 does NOT cover for filter placement\n\t\t\t//     \"ha-router1\":    nil,\n\t\t\t//     \"ha-router2\":    nil,\n\t\t\t//     \"big-router\":    nil,\n\t\t\t//     \"user1\":         nil,\n\t\t\t//     \"multi-router\":  { ... },  // Exact match\n\t\t\t// },\n\t\t\t//\n\t\t\t// ACTUAL (Headscale) - current behavior:\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t// CORRECT: multi-router gets filter (exact match for 172.16.0.0/24)\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"172.16.0.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: exit-node receives filter due to 0.0.0.0/0 coverage\n\t\t\t\t// Tailscale would return nil here\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"172.16.0.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO: Fix exit node route coverage to match Tailscale behavior\n\t\t//\n\t\t// A5: Host alias to subnet (uses \"internal\" = \"10.0.0.0/8\")\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - \"internal\" resolves to 10.0.0.0/8 via hosts alias\n\t\t// - big-router (10.0.0.0/8) gets filter - exact match\n\t\t// - subnet-router (10.33.0.0/16) gets filter - child route\n\t\t// - exit-node and multi-router get NO filter (exit routes don't cover)\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - big-router and subnet-router correctly get filters\n\t\t// - exit-node and multi-router incorrectly get filters (exit route coverage)\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// Same as A3 - exit routes should not count for filter distribution\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// Exclude exit routes from coverage checks\n\t\t{\n\t\t\tname: \"A5_host_alias_to_subnet\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"internal:22\"]}\n\t`),\n\t\t\t// EXPECTED (Tailscale) - commented out:\n\t\t\t// wantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t//     \"client1\":      nil,\n\t\t\t//     \"client2\":      nil,\n\t\t\t//     \"exit-node\":    nil,  // Exit route does NOT cover\n\t\t\t//     \"ha-router1\":   nil,\n\t\t\t//     \"ha-router2\":   nil,\n\t\t\t//     \"user1\":        nil,\n\t\t\t//     \"multi-router\": nil,  // Exit route does NOT cover\n\t\t\t//     \"subnet-router\": { ... },  // Child route\n\t\t\t//     \"big-router\":    { ... },  // Exact match\n\t\t\t// },\n\t\t\t//\n\t\t\t// ACTUAL (Headscale) - current behavior:\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t// CORRECT: subnet-router gets filter (child route 10.33.0.0/16 within 10.0.0.0/8)\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.0.0.0/8\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// CORRECT: big-router gets filter (exact match for 10.0.0.0/8)\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.0.0.0/8\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: exit-node receives filter due to 0.0.0.0/0 coverage\n\t\t\t\t// Tailscale would return nil here\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.0.0.0/8\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: multi-router receives filter due to 0.0.0.0/0 coverage\n\t\t\t\t// Tailscale would return nil here\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.0.0.0/8\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tpol, err := unmarshalPolicy([]byte(tt.policy))\n\t\t\trequire.NoError(t, err, \"failed to parse policy\")\n\n\t\t\terr = pol.validate()\n\t\t\trequire.NoError(t, err, \"policy validation failed\")\n\n\t\t\tfor nodeName, wantFilters := range tt.wantFilters {\n\t\t\t\tnode := findNodeByGivenName(nodes, nodeName)\n\t\t\t\trequire.NotNil(t, node, \"node %s not found\", nodeName)\n\n\t\t\t\tcompiledFilters, err := pol.compileFilterRulesForNode(users, node.View(), nodes.ViewSlice())\n\t\t\t\trequire.NoError(t, err, \"failed to compile filters for node %s\", nodeName)\n\n\t\t\t\tgotFilters := policyutil.ReduceFilterRules(node.View(), compiledFilters)\n\n\t\t\t\tif len(wantFilters) == 0 && len(gotFilters) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif diff := cmp.Diff(wantFilters, gotFilters, cmpOptions()...); diff != \"\" {\n\t\t\t\t\tt.Errorf(\"node %s filters mismatch (-want +got):\\n%s\", nodeName, diff)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TestTailscaleRoutesCompatExitNodes tests exit node behavior (Category B).\n// These tests verify that exit routes (0.0.0.0/0, ::/0) are NOT included in SrcIPs,\n// that exit nodes can cover external destinations, and autogroup:internet behavior.\nfunc TestTailscaleRoutesCompatExitNodes(t *testing.T) {\n\tt.Parallel()\n\n\tusers := setupRouteCompatUsers()\n\tnodes := setupRouteCompatNodes(users)\n\n\t// Standard wildcard filter that all nodes receive for * -> *:* ACL\n\twildcardFilter := []tailcfg.FilterRule{\n\t\t{\n\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\tDstPorts: wildcardDstPorts,\n\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t},\n\t}\n\n\ttests := []routesCompatTest{\n\t\t// TODO: Verify Tailscale includes subnet routes in SrcIPs for wildcard ACLs\n\t\t//\n\t\t// B1: Exit routes NOT in SrcIPs with wildcard ACL\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - SrcIPs includes CGNAT + IPv6 Tailscale ranges\n\t\t// - SrcIPs also includes advertised subnet routes (10.0.0.0/8, etc.)\n\t\t// - Exit routes (0.0.0.0/0, ::/0) are NOT included in SrcIPs\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - SrcIPs only includes CGNAT + IPv6 Tailscale ranges\n\t\t// - Subnet routes are NOT included in SrcIPs (might be a difference)\n\t\t// - Exit routes correctly NOT included\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// Headscale doesn't expand wildcard source to include subnet routes\n\t\t//\n\t\t// FIX REQUIRED (if needed):\n\t\t// Add subnet routes to SrcIPs when source is wildcard\n\t\t{\n\t\t\tname: \"B1_exit_routes_not_in_srcips\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]}\n\t`),\n\t\t\t// All nodes receive the same wildcard filter\n\t\t\t// Key verification: exit routes NOT in SrcIPs (they're not - correct!)\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"exit-node\":     wildcardFilter,\n\t\t\t\t\"client1\":       wildcardFilter,\n\t\t\t\t\"client2\":       wildcardFilter,\n\t\t\t\t\"multi-router\":  wildcardFilter,\n\t\t\t\t\"subnet-router\": wildcardFilter,\n\t\t\t\t\"ha-router1\":    wildcardFilter,\n\t\t\t\t\"ha-router2\":    wildcardFilter,\n\t\t\t\t\"big-router\":    wildcardFilter,\n\t\t\t\t\"user1\":         wildcardFilter,\n\t\t\t},\n\t\t},\n\t\t// B2: tag:exit excludes exit routes from DstPorts\n\t\t{\n\t\t\tname: \"B2_tag_exit_excludes_exit_routes\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:exit\"], \"dst\": [\"tag:exit:*\"]}\n\t`),\n\t\t\t// tag:exit includes: exit-node, multi-router\n\t\t\t// DstPorts should contain ONLY their node IPs, NOT exit routes\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.121.32.1/32\",               // exit-node IPv4\n\t\t\t\t\t\t\t\"100.74.117.7/32\",               // multi-router IPv4\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\", // exit-node IPv6\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\", // multi-router IPv6\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// Node IPs only, NOT exit routes (0.0.0.0/0, ::/0)\n\t\t\t\t\t\t\t{IP: \"100.121.32.1/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.74.117.7/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::7f01:2004/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::c401:7508/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.121.32.1/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.74.117.7/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::7f01:2004/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::c401:7508/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO: Verify Tailscale includes subnet routes in SrcIPs\n\t\t//\n\t\t// B4: Multi-router has both subnet and exit routes\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - multi-router has 172.16.0.0/24 (subnet) + 0.0.0.0/0,::/0 (exit)\n\t\t// - SrcIPs may include 172.16.0.0/24 but NOT 0.0.0.0/0 or ::/0\n\t\t// - Only multi-router node may receive the filter (needs verification)\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - All nodes receive the same wildcard filter\n\t\t// - SrcIPs is just CGNAT + IPv6 range, no subnet routes\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// Headscale distributes wildcard filters to all nodes\n\t\t{\n\t\t\tname: \"B4_multi_router_has_both_route_types\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]}\n\t`),\n\t\t\t// EXPECTED (Tailscale) - commented out:\n\t\t\t// wantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t//     \"multi-router\": {\n\t\t\t//         {\n\t\t\t//             SrcIPs: []string{\n\t\t\t//                 \"100.64.0.0/10\",\n\t\t\t//                 \"fd7a:115c:a1e0::/48\",\n\t\t\t//                 // Tailscale may include 172.16.0.0/24 here\n\t\t\t//                 // but definitely NOT 0.0.0.0/0 or ::/0\n\t\t\t//             },\n\t\t\t//             DstPorts: []tailcfg.NetPortRange{\n\t\t\t//                 {IP: \"100.64.0.0/10\", Ports: tailcfg.PortRangeAny},\n\t\t\t//                 {IP: \"fd7a:115c:a1e0::/48\", Ports: tailcfg.PortRangeAny},\n\t\t\t//             },\n\t\t\t//             IPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t//         },\n\t\t\t//     },\n\t\t\t//     \"client1\":       nil,\n\t\t\t//     \"client2\":       nil,\n\t\t\t//     \"subnet-router\": nil,\n\t\t\t//     \"exit-node\":     nil,\n\t\t\t//     \"ha-router1\":    nil,\n\t\t\t//     \"ha-router2\":    nil,\n\t\t\t//     \"big-router\":    nil,\n\t\t\t//     \"user1\":         nil,\n\t\t\t// },\n\t\t\t//\n\t\t\t// ACTUAL (Headscale) - all nodes get wildcard filter:\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"multi-router\":  wildcardFilter,\n\t\t\t\t\"client1\":       wildcardFilter,\n\t\t\t\t\"client2\":       wildcardFilter,\n\t\t\t\t\"subnet-router\": wildcardFilter,\n\t\t\t\t\"exit-node\":     wildcardFilter,\n\t\t\t\t\"ha-router1\":    wildcardFilter,\n\t\t\t\t\"ha-router2\":    wildcardFilter,\n\t\t\t\t\"big-router\":    wildcardFilter,\n\t\t\t\t\"user1\":         wildcardFilter,\n\t\t\t},\n\t\t},\n\t\t// B8: autogroup:internet generates no filters\n\t\t//\n\t\t// autogroup:internet is handled by exit node routing via AllowedIPs,\n\t\t// not by packet filtering. ALL nodes should get null/empty filters.\n\t\t{\n\t\t\tname: \"B8_autogroup_internet_no_filters\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"autogroup:internet:*\"]}\n\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t},\n\t\t},\n\t\t// B3: Exit node advertises exit routes (verify RoutableIPs)\n\t\t//\n\t\t// This test verifies that exit-node has 0.0.0.0/0 and ::/0 in RoutableIPs.\n\t\t// All nodes get wildcard filters with {IP: \"*\"} format matching Tailscale.\n\t\t{\n\t\t\tname: \"B3_exit_node_advertises_routes\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]}\n\t\t\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       wildcardFilter,\n\t\t\t\t\"client2\":       wildcardFilter,\n\t\t\t\t\"subnet-router\": wildcardFilter,\n\t\t\t\t\"exit-node\":     wildcardFilter,\n\t\t\t\t\"multi-router\":  wildcardFilter,\n\t\t\t\t\"ha-router1\":    wildcardFilter,\n\t\t\t\t\"ha-router2\":    wildcardFilter,\n\t\t\t\t\"big-router\":    wildcardFilter,\n\t\t\t\t\"user1\":         wildcardFilter,\n\t\t\t},\n\t\t},\n\t\t// B5: Exit node with wildcard destination has ExitNodeOption\n\t\t//\n\t\t// Exit nodes should have ExitNodeOption=true in MapResponse.\n\t\t// All nodes get wildcard filters with {IP: \"*\"} format matching Tailscale.\n\t\t{\n\t\t\tname: \"B5_exit_with_wildcard_dst\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]}\n\t\t\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       wildcardFilter,\n\t\t\t\t\"client2\":       wildcardFilter,\n\t\t\t\t\"subnet-router\": wildcardFilter,\n\t\t\t\t\"exit-node\":     wildcardFilter,\n\t\t\t\t\"multi-router\":  wildcardFilter,\n\t\t\t\t\"ha-router1\":    wildcardFilter,\n\t\t\t\t\"ha-router2\":    wildcardFilter,\n\t\t\t\t\"big-router\":    wildcardFilter,\n\t\t\t\t\"user1\":         wildcardFilter,\n\t\t\t},\n\t\t},\n\t\t// TODO: Verify Tailscale filter distribution for tag source with wildcard destination\n\t\t//\n\t\t// B6: ExitNodeOption field verification\n\t\t//\n\t\t// ACL: tag:exit -> *:*\n\t\t// Nodes with approved exit routes should have ExitNodeOption=true.\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - Need to verify if only exit-tagged nodes receive filters\n\t\t// - Or if ALL nodes (destinations) receive filters\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - ALL nodes receive filters (they're all destinations)\n\t\t// - SrcIPs = tag:exit node IPs\n\t\t// - DstPorts = explicit CIDR ranges (not \"*\")\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// The test expected only exit-tagged nodes to get filters, but with\n\t\t// `tag:exit -> *:*`, all nodes are destinations and should get filters.\n\t\t{\n\t\t\tname: \"B6_exit_node_option_field\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:exit\"], \"dst\": [\"*:*\"]}\n\t\t\t`),\n\t\t\t/* EXPECTED (Tailscale) - need verification:\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,  // Or filter?\n\t\t\t\t\"client2\":       nil,  // Or filter?\n\t\t\t\t\"subnet-router\": nil,  // Or filter?\n\t\t\t\t\"ha-router1\":    nil,  // Or filter?\n\t\t\t\t\"ha-router2\":    nil,  // Or filter?\n\t\t\t\t\"big-router\":    nil,  // Or filter?\n\t\t\t\t\"user1\":         nil,  // Or filter?\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": { ... },\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale):\n\t\t\t// All nodes receive filters (they're all destinations)\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"client2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO: Same as B6 - verify Tailscale filter distribution\n\t\t//\n\t\t// B7: Multiple exit nodes verification\n\t\t//\n\t\t// ACL: tag:exit -> *:*\n\t\t// Both exit-node and multi-router have tag:exit.\n\t\t// Same pattern as B6 - all nodes are destinations and receive filters.\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - Need to verify actual filter distribution\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - All nodes receive filters (same as B6)\n\t\t{\n\t\t\tname: \"B7_multiple_exit_nodes\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:exit\"], \"dst\": [\"*:*\"]}\n\t\t\t`),\n\t\t\t/* EXPECTED (Tailscale) - need verification:\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,  // Or filter?\n\t\t\t\t// ... same pattern as B6\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": { ... },\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale):\n\t\t\t// All nodes receive filters (same as B6)\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"client2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// B9: Exit routes appear in peer AllowedIPs\n\t\t//\n\t\t// When viewing exit-node as a peer, AllowedIPs should include exit routes.\n\t\t// All nodes get wildcard filters with {IP: \"*\"} format matching Tailscale.\n\t\t{\n\t\t\tname: \"B9_exit_routes_in_allowedips\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]}\n\t\t\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       wildcardFilter,\n\t\t\t\t\"client2\":       wildcardFilter,\n\t\t\t\t\"subnet-router\": wildcardFilter,\n\t\t\t\t\"exit-node\":     wildcardFilter,\n\t\t\t\t\"multi-router\":  wildcardFilter,\n\t\t\t\t\"ha-router1\":    wildcardFilter,\n\t\t\t\t\"ha-router2\":    wildcardFilter,\n\t\t\t\t\"big-router\":    wildcardFilter,\n\t\t\t\t\"user1\":         wildcardFilter,\n\t\t\t},\n\t\t},\n\t\t// B10: Exit routes NOT in PrimaryRoutes field\n\t\t//\n\t\t// PrimaryRoutes is for subnet routes only, not exit routes.\n\t\t// Exit routes (0.0.0.0/0, ::/0) should NOT appear in PrimaryRoutes.\n\t\t// All nodes get wildcard filters with {IP: \"*\"} format matching Tailscale.\n\t\t{\n\t\t\tname: \"B10_exit_routes_not_in_primaryroutes\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]}\n\t\t\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       wildcardFilter,\n\t\t\t\t\"client2\":       wildcardFilter,\n\t\t\t\t\"subnet-router\": wildcardFilter,\n\t\t\t\t\"exit-node\":     wildcardFilter,\n\t\t\t\t\"multi-router\":  wildcardFilter,\n\t\t\t\t\"ha-router1\":    wildcardFilter,\n\t\t\t\t\"ha-router2\":    wildcardFilter,\n\t\t\t\t\"big-router\":    wildcardFilter,\n\t\t\t\t\"user1\":         wildcardFilter,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tpol, err := unmarshalPolicy([]byte(tt.policy))\n\t\t\trequire.NoError(t, err, \"failed to parse policy\")\n\n\t\t\terr = pol.validate()\n\t\t\trequire.NoError(t, err, \"policy validation failed\")\n\n\t\t\tfor nodeName, wantFilters := range tt.wantFilters {\n\t\t\t\tnode := findNodeByGivenName(nodes, nodeName)\n\t\t\t\trequire.NotNil(t, node, \"node %s not found\", nodeName)\n\n\t\t\t\tcompiledFilters, err := pol.compileFilterRulesForNode(users, node.View(), nodes.ViewSlice())\n\t\t\t\trequire.NoError(t, err, \"failed to compile filters for node %s\", nodeName)\n\n\t\t\t\tgotFilters := policyutil.ReduceFilterRules(node.View(), compiledFilters)\n\n\t\t\t\tif len(wantFilters) == 0 && len(gotFilters) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif diff := cmp.Diff(wantFilters, gotFilters, cmpOptions()...); diff != \"\" {\n\t\t\t\t\tt.Errorf(\"node %s filters mismatch (-want +got):\\n%s\", nodeName, diff)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TestTailscaleRoutesCompatHARouters tests HA router behavior (Category E).\n// These tests verify that multiple routers can advertise the same subnet,\n// and that both receive filters even though only one is primary.\nfunc TestTailscaleRoutesCompatHARouters(t *testing.T) {\n\tt.Parallel()\n\n\tusers := setupRouteCompatUsers()\n\tnodes := setupRouteCompatNodes(users)\n\n\ttests := []routesCompatTest{\n\t\t// TODO: Fix exit node route coverage to match Tailscale behavior\n\t\t//\n\t\t// E1: Two HA routers advertise same subnet - both enabled\n\t\t//\n\t\t// ACL: * -> 192.168.1.0/24:*\n\t\t// Both ha-router1 and ha-router2 advertise 192.168.1.0/24.\n\t\t// Both should receive the filter (both are approved, one is primary).\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - Only HA routers get filters (exact route match)\n\t\t// - Exit nodes do NOT get filters (exit routes don't cover for placement)\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - HA routers correctly get filters\n\t\t// - Exit nodes also get filters because 0.0.0.0/0 \"covers\" destination\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// Same as A3 - exit routes (0.0.0.0/0) are treated as covering all destinations\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// Exclude exit routes from filter distribution coverage checks\n\t\t{\n\t\t\tname: \"E1_ha_two_routers_same_subnet\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"192.168.1.0/24:*\"]}\n\t`),\n\t\t\t/* EXPECTED (Tailscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"ha-router1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale):\n\t\t\t// HA routers correctly get filters, but exit nodes also incorrectly get them\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t// CORRECT: Both HA routers get the filter\n\t\t\t\t\"ha-router1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: Exit nodes get filters due to 0.0.0.0/0 coverage\n\t\t\t\t// Tailscale would return nil here\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO: Fix exit node route coverage to match Tailscale behavior\n\t\t//\n\t\t// E4: HA routers with host alias\n\t\t//\n\t\t// ACL: * -> subnet24:22 (subnet24 = 192.168.1.0/24)\n\t\t// Same as E1 but uses host alias. Exit route coverage issue applies.\n\t\t{\n\t\t\tname: \"E4_ha_both_get_filters_host_alias\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"subnet24:22\"]}\n\t`),\n\t\t\t/* EXPECTED (Tailscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"ha-router1\": { ... },\n\t\t\t\t\"ha-router2\": { ... },\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"ha-router1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: Exit nodes get filters due to 0.0.0.0/0 coverage\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO: Fix exit node route coverage to match Tailscale behavior\n\t\t//\n\t\t// E2: HA primary node appears in peer AllowedIPs\n\t\t// Same exit route coverage issue as E1.\n\t\t{\n\t\t\tname: \"E2_ha_primary_in_allowedips\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"192.168.1.0/24:*\"]}\n\t\t\t`),\n\t\t\t/* EXPECTED (Tailscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t// ... only HA routers get filters\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"ha-router1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: Exit nodes get filters\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO: Fix exit node route coverage to match Tailscale behavior\n\t\t//\n\t\t// E3: HA secondary does NOT have route in AllowedIPs\n\t\t// Same exit route coverage issue as E1.\n\t\t{\n\t\t\tname: \"E3_ha_secondary_no_route_in_allowedips\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"192.168.1.0/24:*\"]}\n\t\t\t`),\n\t\t\t/* EXPECTED (Tailscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t// ... only HA routers get filters\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"ha-router1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: Exit nodes get filters\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO: Fix exit node route coverage to match Tailscale behavior\n\t\t//\n\t\t// E5: First advertiser becomes primary, both HA routers get filters\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - Only HA routers get filters (they own 192.168.1.0/24)\n\t\t// - Exit nodes do NOT get filters (exit routes don't count for coverage)\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - HA routers correctly get filters\n\t\t// - Exit nodes also get filters because 0.0.0.0/0 \"covers\" destination\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// Same as E1-E4 - exit routes (0.0.0.0/0) are treated as covering all destinations\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// Exclude exit routes from filter distribution coverage checks\n\t\t{\n\t\t\tname: \"E5_first_advertiser_is_primary\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"192.168.1.0/24:*\"]}\n\t\t\t`),\n\t\t\t/* EXPECTED (Tailscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"ha-router1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit nodes incorrectly get filters\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"ha-router1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: Exit nodes get filters\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\trunRoutesCompatTests(t, users, nodes, tests)\n}\n\n// TestTailscaleRoutesCompatFilterPlacement tests filter placement rules (Category F).\n// These tests verify that filters go to DESTINATION nodes (route owners),\n// not to source nodes, and that route coverage rules are applied correctly.\nfunc TestTailscaleRoutesCompatFilterPlacement(t *testing.T) {\n\tt.Parallel()\n\n\tusers := setupRouteCompatUsers()\n\tnodes := setupRouteCompatNodes(users)\n\n\ttests := []routesCompatTest{\n\t\t// TODO: Fix exit node route coverage to match Tailscale behavior\n\t\t//\n\t\t// F1: Filter goes to destination node (route owner), not source\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - Filter placed on subnet-router (owns 10.33.0.0/16) and big-router (owns 10.0.0.0/8)\n\t\t// - Source nodes (clients, user1) get null filters\n\t\t// - Exit nodes do NOT get filters (exit routes don't count for coverage)\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - Correct for subnet-router and big-router\n\t\t// - Exit nodes also get filters because 0.0.0.0/0 \"covers\" destination\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// Exit routes (0.0.0.0/0) are treated as covering all destinations\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// Exclude exit routes from filter distribution coverage checks\n\t\t{\n\t\t\tname: \"F1_filter_on_destination_not_source\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"10.33.0.0/16:22\"]}\n\t`),\n\t\t\t/* EXPECTED (Tailscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit nodes incorrectly get filters\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: Exit nodes get filters\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO: Fix DstPorts expansion for autogroup:member to match Tailscale behavior\n\t\t//\n\t\t// F2: Subnet as ACL source, autogroup:member as destination\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - Each member receives a filter with DstPorts containing ALL member IPs\n\t\t// - client1's filter has DstPorts with client1, client2, user1 IPs\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - Each member receives filter with DstPorts containing ONLY its own IP\n\t\t// - client1's filter has DstPorts with only client1's IP\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// DstPorts is not expanded to include all autogroup:member IPs\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// Expand autogroup:member in DstPorts to include all member IPs, not just self\n\t\t{\n\t\t\tname: \"F2_subnet_as_acl_source\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"10.33.0.0/16\"], \"dst\": [\"autogroup:member:*\"]}\n\t`),\n\t\t\t/* EXPECTED (Tailscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"10.33.0.0/16\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.116.73.38/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.89.42.23/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::a801:4949/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::d01:2a2e/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"client2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"10.33.0.0/16\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.116.73.38/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.89.42.23/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::a801:4949/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::d01:2a2e/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"10.33.0.0/16\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.116.73.38/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.89.42.23/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::a801:4949/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::d01:2a2e/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): DstPorts only contains self IP, not all member IPs\n\t\t\t// Additionally, tagged nodes also incorrectly receive filters\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t// Members receive filters with ONLY self IP in DstPorts\n\t\t\t\t\"client1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"10.33.0.0/16\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// INCORRECT: Only client1's IPs, should include all members\n\t\t\t\t\t\t\t{IP: \"100.116.73.38/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::a801:4949/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"client2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"10.33.0.0/16\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// INCORRECT: Only client2's IPs\n\t\t\t\t\t\t\t{IP: \"100.89.42.23/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::d01:2a2e/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"10.33.0.0/16\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// INCORRECT: Only user1's IPs\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// Tagged nodes should not get filters but do in Headscale\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t\t// INCORRECT: Exit nodes get filters with all member IPs in DstPorts\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"10.33.0.0/16\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.89.42.23/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.116.73.38/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::a801:4949/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::d01:2a2e/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"10.33.0.0/16\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.89.42.23/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.116.73.38/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::a801:4949/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::d01:2a2e/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO: Fix exit node route coverage to match Tailscale behavior\n\t\t//\n\t\t// F3: Wildcard source, specific subnet destination\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - Filter on subnet-router (owns 10.33.0.0/16) and big-router (owns 10.0.0.0/8)\n\t\t// - Exit nodes do NOT get filters (exit routes don't count for coverage)\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - Correct for subnet-router and big-router\n\t\t// - Exit nodes also get filters because 0.0.0.0/0 \"covers\" destination\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// Exit routes (0.0.0.0/0) are treated as covering all destinations\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// Exclude exit routes from filter distribution coverage checks\n\t\t{\n\t\t\tname: \"F3_wildcard_src_specific_dst\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.33.0.0/16:22\"]}\n\t`),\n\t\t\t/* EXPECTED (Tailscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit nodes incorrectly get filters\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: Exit nodes get filters\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO: Fix exit node route coverage to match Tailscale behavior\n\t\t//\n\t\t// F7: Filter DstPorts shows ACL CIDR, not route CIDR\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - DstPorts.IP = ACL CIDR (10.33.1.0/24), not route CIDR\n\t\t// - Only subnet-router and big-router get filters\n\t\t// - Exit nodes do NOT get filters\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - DstPorts.IP correctly uses ACL CIDR (this part works)\n\t\t// - Exit nodes also get filters because 0.0.0.0/0 \"covers\" destination\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// Exit routes (0.0.0.0/0) are treated as covering all destinations\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// Exclude exit routes from filter distribution coverage checks\n\t\t{\n\t\t\tname: \"F7_filter_dstports_shows_acl_cidr\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.33.1.0/24:22\"]}\n\t`),\n\t\t\t/* EXPECTED (Tailscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit nodes incorrectly get filters\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: Exit nodes get filters\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO: Fix wildcard destination filter distribution to match Tailscale behavior\n\t\t//\n\t\t// F4: Specific source (tag:router), wildcard destination\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - Filter sent to all non-source nodes (all nodes except tag:router nodes)\n\t\t// - Non-router nodes get filter, router nodes don't receive filter for their own traffic\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - All nodes get the filter, including the source nodes themselves\n\t\t// - DstPorts uses expanded CGNAT ranges instead of \"*\"\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// Wildcard destination distribution differs - Headscale sends to all nodes\n\t\t// DstPorts format differs - Headscale expands \"*\" to CGNAT ranges\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// Review wildcard destination distribution logic\n\t\t{\n\t\t\tname: \"F4_specific_src_wildcard_dst\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:router\"], \"dst\": [\"*:*\"]}\n\t\t\t`),\n\t\t\t/* EXPECTED (Tailscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.100.100.1/32\",  // big-router\n\t\t\t\t\t\t\t\"100.119.139.79/32\", // subnet-router\n\t\t\t\t\t\t\t\"100.74.117.7/32\",   // multi-router\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): All nodes get filters with expanded CGNAT ranges\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.117.7/32\",   // multi-router\n\t\t\t\t\t\t\t\"100.100.100.1/32\",  // big-router\n\t\t\t\t\t\t\t\"100.119.139.79/32\", // subnet-router\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"client2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"100.100.100.1/32\",\n\t\t\t\t\t\t\t\"100.119.139.79/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"100.100.100.1/32\",\n\t\t\t\t\t\t\t\"100.119.139.79/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"100.100.100.1/32\",\n\t\t\t\t\t\t\t\"100.119.139.79/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"100.100.100.1/32\",\n\t\t\t\t\t\t\t\"100.119.139.79/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"100.100.100.1/32\",\n\t\t\t\t\t\t\t\"100.119.139.79/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"100.100.100.1/32\",\n\t\t\t\t\t\t\t\"100.119.139.79/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"100.100.100.1/32\",\n\t\t\t\t\t\t\t\"100.119.139.79/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"100.100.100.1/32\",\n\t\t\t\t\t\t\t\"100.119.139.79/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO: Fix bidirectional subnet access and DstPorts expansion to match Tailscale\n\t\t//\n\t\t// F5: Bidirectional subnet access\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - Rule 1 (member -> subnet): Filters on subnet-router and big-router only\n\t\t// - Rule 2 (subnet -> member): All members get filter with all member IPs in DstPorts\n\t\t// - Exit nodes do NOT get filters\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - All members get filters (rule 2 distribution to all)\n\t\t// - DstPorts only contains self IP, not all member IPs\n\t\t// - Exit nodes also get filters (exit route coverage issue)\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// 1. autogroup:member DstPorts expansion only includes self\n\t\t// 2. Exit routes treated as covering subnet destinations\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// 1. Expand autogroup:member in DstPorts to all member IPs\n\t\t// 2. Exclude exit routes from filter distribution coverage\n\t\t{\n\t\t\tname: \"F5_bidirectional_subnet_access\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"10.33.0.0/16:*\"]},\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"10.33.0.0/16\"], \"dst\": [\"autogroup:member:*\"]}\n\t\t\t`),\n\t\t\t/* EXPECTED (Tailscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"10.33.0.0/16\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.116.73.38/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.89.42.23/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::a801:4949/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::d01:2a2e/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Multiple issues\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t// All members get filters with self-only DstPorts\n\t\t\t\t\"client1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"10.33.0.0/16\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// INCORRECT: Only client1's IPs\n\t\t\t\t\t\t\t{IP: \"100.116.73.38/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::a801:4949/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"client2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"10.33.0.0/16\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.89.42.23/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::d01:2a2e/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"10.33.0.0/16\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: Exit nodes get filters from BOTH rules\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t// First filter: from rule 1 (member -> subnet)\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t// Second filter: from rule 2 (subnet -> member)\n\t\t\t\t\t// Exit node gets this because 0.0.0.0/0 \"covers\" member IPs\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"10.33.0.0/16\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.89.42.23/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.116.73.38/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::d01:2a2e/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::a801:4949/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t// First filter: from rule 1 (member -> subnet)\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t\t// Second filter: from rule 2 (subnet -> member)\n\t\t\t\t\t// Multi-router gets this because 0.0.0.0/0 \"covers\" member IPs\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\"10.33.0.0/16\"},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.89.42.23/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.90.199.68/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.116.73.38/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::d01:2a2e/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::2d01:c747/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::a801:4949/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO: Fix exit node route coverage to match Tailscale behavior\n\t\t//\n\t\t// F6: Filter SrcIPs expansion with autogroup:member\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - Only subnet-router and big-router get filters\n\t\t// - Exit nodes do NOT get filters\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - Correct for subnet-router and big-router\n\t\t// - Exit nodes also get filters because 0.0.0.0/0 \"covers\" destination\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// Exit routes (0.0.0.0/0) are treated as covering all destinations\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// Exclude exit routes from filter distribution coverage checks\n\t\t{\n\t\t\tname: \"F6_filter_srcips_expansion\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"10.33.0.0/16:*\"]}\n\t\t\t`),\n\t\t\t/* EXPECTED (Tailscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit nodes incorrectly get filters\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: Exit nodes get filters\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO: Fix policy validation to allow undefined tags (matching Tailscale behavior)\n\t\t//\n\t\t// F8: Route enabled but ACL source doesn't match any nodes\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - Policy is accepted even if tag doesn't exist (no nodes have that tag)\n\t\t// - All nodes get null filters\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - Policy parsing fails with \"Tag is not defined in the Policy\"\n\t\t// - Headscale requires all tags to be defined in tagOwners\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// Headscale validates that all tags in ACLs are defined in tagOwners\n\t\t// Tailscale allows undefined tags (they just match nothing)\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// Either relax tag validation or accept that this is a stricter policy mode\n\t\t// Using group:empty instead (defined but has no members)\n\t\t{\n\t\t\tname: \"F8_route_enabled_acl_denies\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"group:empty\"], \"dst\": [\"10.33.0.0/16:*\"]}\n\t\t\t`),\n\t\t\t// group:empty has no members, so no source IPs match\n\t\t\t// All nodes should get null filters\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t},\n\t\t},\n\t\t// TODO: Fix exit node route coverage to match Tailscale behavior\n\t\t//\n\t\t// F9: ACL allows traffic to subnet but no node has that route\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - No node has 10.99.0.0/16 route\n\t\t// - No filters should be generated for any node\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - Exit nodes get filters because 0.0.0.0/0 \"covers\" 10.99.0.0/16\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// Exit routes (0.0.0.0/0) are treated as covering all destinations\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// Exclude exit routes from filter distribution coverage checks\n\t\t{\n\t\t\tname: \"F9_route_disabled_acl_allows\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.99.0.0/16:*\"]}\n\t\t\t`),\n\t\t\t/* EXPECTED (Tailscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Routers with covering routes get filters\n\t\t\t// NOTE: big-router (10.0.0.0/8) covers 10.99.0.0/16, so it correctly gets filter\n\t\t\t// Exit nodes also incorrectly get filters due to 0.0.0.0/0 coverage\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t// big-router (10.0.0.0/8) correctly covers 10.99.0.0/16\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.99.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: Exit nodes get filters\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.99.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.99.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\trunRoutesCompatTests(t, users, nodes, tests)\n}\n\n// runRoutesCompatTests is a helper to run route compatibility tests.\nfunc runRoutesCompatTests(t *testing.T, users types.Users, nodes types.Nodes, tests []routesCompatTest) {\n\tt.Helper()\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tpol, err := unmarshalPolicy([]byte(tt.policy))\n\t\t\trequire.NoError(t, err, \"failed to parse policy\")\n\n\t\t\terr = pol.validate()\n\t\t\trequire.NoError(t, err, \"policy validation failed\")\n\n\t\t\tfor nodeName, wantFilters := range tt.wantFilters {\n\t\t\t\tnode := findNodeByGivenName(nodes, nodeName)\n\t\t\t\trequire.NotNil(t, node, \"node %s not found\", nodeName)\n\n\t\t\t\tcompiledFilters, err := pol.compileFilterRulesForNode(users, node.View(), nodes.ViewSlice())\n\t\t\t\trequire.NoError(t, err, \"failed to compile filters for node %s\", nodeName)\n\n\t\t\t\tgotFilters := policyutil.ReduceFilterRules(node.View(), compiledFilters)\n\n\t\t\t\tif len(wantFilters) == 0 && len(gotFilters) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif diff := cmp.Diff(wantFilters, gotFilters, cmpOptions()...); diff != \"\" {\n\t\t\t\t\tt.Errorf(\"node %s filters mismatch (-want +got):\\n%s\", nodeName, diff)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TestTailscaleRoutesCompatRouteCoverage tests route coverage rules (Category R).\n// These tests verify that:\n// - Route coverage: R.Bits() <= D.Bits() && R.Contains(D.Addr())\n// - Exit nodes (0.0.0.0/0) receive filters for ANY destination\n// - Parent routes cover child destinations.\nfunc TestTailscaleRoutesCompatRouteCoverage(t *testing.T) {\n\tt.Parallel()\n\n\tusers := setupRouteCompatUsers()\n\tnodes := setupRouteCompatNodes(users)\n\n\ttests := []routesCompatTest{\n\t\t// R1: Exit route covers external destination\n\t\t{\n\t\t\tname: \"R1_exit_covers_external_dest\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"8.8.8.0/24:53\"]}\n\t`),\n\t\t\t// 8.8.8.0/24 is external (Google DNS range)\n\t\t\t// Exit nodes (0.0.0.0/0) should receive the filter because they cover it\n\t\t\t// TODO: Verify this is Tailscale behavior\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"subnet-router\": nil, // 10.33.0.0/16 doesn't cover 8.8.8.0/24\n\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\"big-router\":    nil, // 10.0.0.0/8 doesn't cover 8.8.8.0/24\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t// Exit nodes cover 8.8.8.0/24\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"8.8.8.0/24\", Ports: tailcfg.PortRange{First: 53, Last: 53}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"8.8.8.0/24\", Ports: tailcfg.PortRange{First: 53, Last: 53}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// R2: Parent route covers child destination\n\t\t// TODO: Exit route coverage issue - exit nodes get filters when they shouldn't.\n\t\t// TAILSCALE BEHAVIOR: Exit nodes (0.0.0.0/0) do NOT receive filters for internal\n\t\t// subnet destinations like 10.33.1.0/24. Only subnet-router and big-router get filters.\n\t\t// HEADSCALE BEHAVIOR: Exit nodes also get filters because Headscale treats exit routes\n\t\t// (0.0.0.0/0) as covering all IPv4 destinations, including internal ranges.\n\t\t// ROOT CAUSE: routeCoversDestination() returns true for exit routes covering internal IPs.\n\t\t{\n\t\t\tname: \"R2_parent_route_covers_child_dest\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.33.1.0/24:22\"]}\n\t`),\n\t\t\t// big-router has 10.0.0.0/8 - covers 10.33.1.0/24\n\t\t\t// subnet-router has 10.33.0.0/16 - also covers 10.33.1.0/24\n\t\t\t// Both should receive the filter\n\t\t\t/*\n\t\t\t\tEXPECTED (Tailscale):\n\t\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\t\"client1\":      nil,\n\t\t\t\t\t\"client2\":      nil,\n\t\t\t\t\t\"exit-node\":    nil,\n\t\t\t\t\t\"multi-router\": nil,\n\t\t\t\t\t\"ha-router1\":   nil,\n\t\t\t\t\t\"ha-router2\":   nil,\n\t\t\t\t\t\"user1\":        nil,\n\t\t\t\t\t\"subnet-router\": { ... },\n\t\t\t\t\t\"big-router\":    { ... },\n\t\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit nodes also get filters due to exit route coverage\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}}},\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}}},\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// Exit nodes incorrectly get filters (exit route 0.0.0.0/0 covers 10.33.1.0/24)\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}}},\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}}},\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// R3: Sibling routes don't cover each other\n\t\t// TODO: Exit route coverage issue - exit nodes get filters when they shouldn't.\n\t\t// TAILSCALE BEHAVIOR: Exit nodes do NOT receive filters for internal subnet destinations.\n\t\t// subnet-router (10.33.0.0/16) correctly does NOT get filter (sibling doesn't cover sibling).\n\t\t// HEADSCALE BEHAVIOR: Exit nodes get filters because 0.0.0.0/0 covers 10.34.0.0/16.\n\t\t// ROOT CAUSE: routeCoversDestination() returns true for exit routes covering internal IPs.\n\t\t{\n\t\t\tname: \"R3_sibling_routes_no_coverage\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.34.0.0/16:22\"]}\n\t`),\n\t\t\t// 10.34.0.0/16 is a sibling to 10.33.0.0/16 (different /16 in 10.0.0.0/8)\n\t\t\t// subnet-router (10.33.0.0/16) should NOT get filter\n\t\t\t// big-router (10.0.0.0/8) SHOULD get filter (parent covers both)\n\t\t\t/*\n\t\t\t\tEXPECTED (Tailscale):\n\t\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\t\"client1\":       nil,\n\t\t\t\t\t\"client2\":       nil,\n\t\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\t\"user1\":         nil,\n\t\t\t\t\t\"big-router\":    { ... },\n\t\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit nodes also get filters due to exit route coverage\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"subnet-router\": nil, // 10.33.0.0/16 doesn't cover 10.34.0.0/16 (correct)\n\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t// Only big-router covers 10.34.0.0/16\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{{IP: \"10.34.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}}},\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// Exit nodes incorrectly get filters (exit route 0.0.0.0/0 covers 10.34.0.0/16)\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{{IP: \"10.34.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}}},\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{{IP: \"10.34.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}}},\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// R4: Exact match route\n\t\t// TODO: Exit route coverage issue - exit nodes get filters when they shouldn't.\n\t\t// TAILSCALE BEHAVIOR: Exit nodes do NOT receive filters for internal subnet destinations.\n\t\t// HEADSCALE BEHAVIOR: Exit nodes get filters because 0.0.0.0/0 covers 10.33.0.0/16.\n\t\t// ROOT CAUSE: routeCoversDestination() returns true for exit routes covering internal IPs.\n\t\t{\n\t\t\tname: \"R4_exact_match_route\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.33.0.0/16:22\"]}\n\t`),\n\t\t\t// Exact match: subnet-router has exactly 10.33.0.0/16\n\t\t\t// big-router (10.0.0.0/8) also covers it\n\t\t\t/*\n\t\t\t\tEXPECTED (Tailscale):\n\t\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\t\"client1\":      nil,\n\t\t\t\t\t\"client2\":      nil,\n\t\t\t\t\t\"exit-node\":    nil,\n\t\t\t\t\t\"multi-router\": nil,\n\t\t\t\t\t\"ha-router1\":   nil,\n\t\t\t\t\t\"ha-router2\":   nil,\n\t\t\t\t\t\"user1\":        nil,\n\t\t\t\t\t\"subnet-router\": { ... },\n\t\t\t\t\t\"big-router\":    { ... },\n\t\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit nodes also get filters due to exit route coverage\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}}},\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}}},\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// Exit nodes incorrectly get filters (exit route 0.0.0.0/0 covers 10.33.0.0/16)\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}}},\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}}},\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\trunRoutesCompatTests(t, users, nodes, tests)\n}\n\n// TestTailscaleRoutesCompatOverlapping tests overlapping route behavior (Category O).\n// These tests verify that multiple routers with overlapping routes all receive filters.\nfunc TestTailscaleRoutesCompatOverlapping(t *testing.T) {\n\tt.Parallel()\n\n\tusers := setupRouteCompatUsers()\n\tnodes := setupRouteCompatNodes(users)\n\n\ttests := []routesCompatTest{\n\t\t// O2: HA routers both get filter\n\t\t// TODO: Fix exit route coverage for HA route destinations\n\t\t// TAILSCALE BEHAVIOR: Only ha-router1 and ha-router2 get filters.\n\t\t// HEADSCALE BEHAVIOR: Exit nodes also get filters (0.0.0.0/0 covers 192.168.1.0/24).\n\t\t// ROOT CAUSE: Exit route coverage.\n\t\t// FIX REQUIRED: Exclude exit nodes from subnet-specific destinations.\n\t\t{\n\t\t\tname: \"O2_ha_routers_both_get_filter\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"192.168.1.0/24:*\"]}\n\t`),\n\t\t\t/*\n\t\t\t\tEXPECTED (Tailscale):\n\t\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\t\"client1\":       nil,\n\t\t\t\t\t\"client2\":       nil,\n\t\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t\t\"big-router\":    nil,\n\t\t\t\t\t\"user1\":         nil,\n\t\t\t\t\t\"ha-router1\":    { filter with 192.168.1.0/24:* },\n\t\t\t\t\t\"ha-router2\":    { filter with 192.168.1.0/24:* },\n\t\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit nodes also get filters\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"ha-router1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// Exit nodes incorrectly get filters (exit route covers)\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// O3: Parent-child routes on different nodes\n\t\t// TODO: Fix exit route coverage for subnet destinations\n\t\t// TAILSCALE BEHAVIOR: Only subnet-router and big-router get filters.\n\t\t// HEADSCALE BEHAVIOR: Exit nodes also get filters (0.0.0.0/0 covers 10.33.1.0/24).\n\t\t// ROOT CAUSE: Exit route coverage.\n\t\t// FIX REQUIRED: Exclude exit nodes from subnet-specific destinations.\n\t\t{\n\t\t\tname: \"O3_parent_child_different_nodes\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.33.1.0/24:22\"]}\n\t`),\n\t\t\t/*\n\t\t\t\tEXPECTED (Tailscale):\n\t\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\t\"client1\":      nil,\n\t\t\t\t\t\"client2\":      nil,\n\t\t\t\t\t\"exit-node\":    nil,\n\t\t\t\t\t\"multi-router\": nil,\n\t\t\t\t\t\"ha-router1\":   nil,\n\t\t\t\t\t\"ha-router2\":   nil,\n\t\t\t\t\t\"user1\":        nil,\n\t\t\t\t\t\"subnet-router\": { filter with 10.33.1.0/24:22 },\n\t\t\t\t\t\"big-router\":    { filter with 10.33.1.0/24:22 },\n\t\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit nodes also get filters\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// Exit nodes incorrectly get filters (exit route covers)\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// O6: Exit route expands filter distribution\n\t\t{\n\t\t\tname: \"O6_exit_route_expands_filter_dist\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"8.8.8.0/24:53\"]}\n\t`),\n\t\t\t// Only exit nodes cover 8.8.8.0/24\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"8.8.8.0/24\", Ports: tailcfg.PortRange{First: 53, Last: 53}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"8.8.8.0/24\", Ports: tailcfg.PortRange{First: 53, Last: 53}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// O12: Filter dest is ACL CIDR, not route CIDR\n\t\t// TODO: Fix exit route coverage for subnet destinations\n\t\t// TAILSCALE BEHAVIOR: Only subnet-router and big-router get filters.\n\t\t// HEADSCALE BEHAVIOR: Exit nodes also get filters (0.0.0.0/0 covers 10.33.1.0/24).\n\t\t// ROOT CAUSE: Exit route coverage.\n\t\t// FIX REQUIRED: Exclude exit nodes from subnet-specific destinations.\n\t\t{\n\t\t\tname: \"O12_filter_dest_is_acl_cidr\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.33.1.0/24:22\"]}\n\t`),\n\t\t\t/*\n\t\t\t\tEXPECTED (Tailscale):\n\t\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\t\"client1\":      nil,\n\t\t\t\t\t\"client2\":      nil,\n\t\t\t\t\t\"exit-node\":    nil,\n\t\t\t\t\t\"multi-router\": nil,\n\t\t\t\t\t\"ha-router1\":   nil,\n\t\t\t\t\t\"ha-router2\":   nil,\n\t\t\t\t\t\"user1\":        nil,\n\t\t\t\t\t\"subnet-router\": { filter with 10.33.1.0/24:22 },\n\t\t\t\t\t\"big-router\":    { filter with 10.33.1.0/24:22 },\n\t\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit nodes also get filters\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// Must be ACL CIDR \"10.33.1.0/24\", NOT route \"10.33.0.0/16\"\n\t\t\t\t\t\t\t{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// Must be ACL CIDR \"10.33.1.0/24\", NOT route \"10.0.0.0/8\"\n\t\t\t\t\t\t\t{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// Exit nodes incorrectly get filters (exit route covers)\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\trunRoutesCompatTests(t, users, nodes, tests)\n}\n\n// TestTailscaleRoutesCompatTagResolution tests tag resolution behavior (Category T).\n// These tests verify that tags resolve to node IPs only, NOT to routes.\nfunc TestTailscaleRoutesCompatTagResolution(t *testing.T) {\n\tt.Parallel()\n\n\tusers := setupRouteCompatUsers()\n\tnodes := setupRouteCompatNodes(users)\n\n\ttests := []routesCompatTest{\n\t\t// TODO: Fix per-node DstPorts visibility and exit route coverage\n\t\t//\n\t\t// T1: Tags resolve to IPs, not routes\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - Only tag:router nodes (subnet-router, multi-router, big-router) get filters\n\t\t// - DstPorts shows ALL tag:router node IPs to each node\n\t\t// - exit-node does NOT get filter (not in tag:router)\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - Exit node also gets filter (0.0.0.0/0 route \"covers\" tag:router IPs)\n\t\t// - Per-node DstPorts visibility: each node only sees its OWN IP in DstPorts\n\t\t//   (subnet-router sees only subnet-router IPs, big-router sees only big-router IPs)\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// 1. Exit routes (0.0.0.0/0) are treated as covering all destinations\n\t\t// 2. Filter reduction logic scopes DstPorts to per-node visibility\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// 1. Exclude exit routes from tag-based filter distribution\n\t\t// 2. Show full destination set to all destination nodes (not per-node scoped)\n\t\t{\n\t\t\tname: \"T1_tags_resolve_to_ips_not_routes\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:router\"], \"dst\": [\"tag:router:*\"]}\n\t`),\n\t\t\t/* EXPECTED (Tailscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"exit-node\":  nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"subnet-router\": { SrcIPs: all tag:router, DstPorts: ALL tag:router IPs },\n\t\t\t\t\"multi-router\":  { SrcIPs: all tag:router, DstPorts: ALL tag:router IPs },\n\t\t\t\t\"big-router\":    { SrcIPs: all tag:router, DstPorts: ALL tag:router IPs },\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit gets filter, per-node DstPorts scoped to own IPs\n\t\t\t// tag:router = subnet-router (100.119.139.79), multi-router (100.74.117.7), big-router (100.100.100.1)\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t// INCORRECT: exit-node gets filter due to 0.0.0.0/0 coverage\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.100.100.1/32\",  // big-router\n\t\t\t\t\t\t\t\"100.119.139.79/32\", // subnet-router\n\t\t\t\t\t\t\t\"100.74.117.7/32\",   // multi-router\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// exit-node sees all tag:router IPs (via 0.0.0.0/0 coverage)\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.100.100.1/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.119.139.79/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.74.117.7/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::4001:8ba0/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::6401:6401/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::c401:7508/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: subnet-router only sees its own IPs in DstPorts\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.100.100.1/32\",\n\t\t\t\t\t\t\t\"100.119.139.79/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// Per-node scoped: only subnet-router's own IPs\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.119.139.79/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::4001:8ba0/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: multi-router sees ALL IPs (it has tag:router AND tag:exit with 0.0.0.0/0)\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.100.100.1/32\",\n\t\t\t\t\t\t\t\"100.119.139.79/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// multi-router sees ALL tag:router IPs (it has 0.0.0.0/0 exit route coverage)\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.100.100.1/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.119.139.79/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.74.117.7/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::4001:8ba0/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::6401:6401/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::c401:7508/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: big-router only sees its own IPs in DstPorts\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.100.100.1/32\",\n\t\t\t\t\t\t\t\"100.119.139.79/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// Per-node scoped: only big-router's own IPs\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.100.100.1/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::6401:6401/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// T2: tag:exit to tag:exit\n\t\t{\n\t\t\tname: \"T2_tag_to_tag_with_exit\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:exit\"], \"dst\": [\"tag:exit:*\"]}\n\t`),\n\t\t\t// tag:exit = exit-node, multi-router\n\t\t\t// DstPorts = node IPs only, NOT exit routes\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// Node IPs only - no exit routes 0.0.0.0/0, ::/0\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.121.32.1/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.74.117.7/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::7f01:2004/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::c401:7508/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.121.32.1/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.74.117.7/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::7f01:2004/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::c401:7508/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// T5: Multi-tag node appears in both src and dst\n\t\t{\n\t\t\tname: \"T5_multi_tag_node_in_both\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t{\"action\": \"accept\", \"src\": [\"tag:router\"], \"dst\": [\"tag:exit:*\"]}\n\t`),\n\t\t\t// multi-router has BOTH tag:router and tag:exit\n\t\t\t// It should appear in BOTH SrcIPs (as router) and DstPorts (as exit)\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// Source: tag:router nodes\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.100.100.1/32\",  // big-router\n\t\t\t\t\t\t\t\"100.119.139.79/32\", // subnet-router\n\t\t\t\t\t\t\t\"100.74.117.7/32\",   // multi-router (has both tags)\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// Dest: tag:exit nodes (exit-node + multi-router)\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.121.32.1/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.74.117.7/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::7f01:2004/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::c401:7508/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.100.100.1/32\",\n\t\t\t\t\t\t\t\"100.119.139.79/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.121.32.1/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.74.117.7/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::7f01:2004/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::c401:7508/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\trunRoutesCompatTests(t, users, nodes, tests)\n}\n\n// TestTailscaleRoutesCompatProtocolPort tests protocol and port restrictions on subnet routes.\n// Category G: Tests from 13-route-acl-interactions.md focusing on protocol/port handling.\nfunc TestTailscaleRoutesCompatProtocolPort(t *testing.T) {\n\tt.Parallel()\n\n\tusers := setupRouteCompatUsers()\n\tnodes := setupRouteCompatNodes(users)\n\n\ttests := []routesCompatTest{\n\t\t// TODO: Fix exit node route coverage to match Tailscale behavior\n\t\t//\n\t\t// G1: Port restriction on subnet (22 only)\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - Only subnet-router and big-router get filters\n\t\t// - Exit nodes do NOT get filters for subnet destinations\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - Exit nodes also get filters because 0.0.0.0/0 \"covers\" everything\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// Exit routes (0.0.0.0/0) are treated as covering all destinations\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// Exclude exit routes from filter distribution coverage checks\n\t\t{\n\t\t\tname: \"G1_port_restriction_subnet\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"10.33.0.0/16:22\"]}\n\t\t\t`),\n\t\t\t/* EXPECTED (Tailscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t\"subnet-router\": { filter with port 22 },\n\t\t\t\t\"big-router\":    { filter with port 22 },\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit nodes also get filters\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: Exit nodes get filters due to 0.0.0.0/0 coverage\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO: Fix exit node route coverage to match Tailscale behavior\n\t\t//\n\t\t// G2: Port range on subnet (80-443)\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - Only subnet-router and big-router get filters\n\t\t// - Exit nodes do NOT get filters for subnet destinations\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - Exit nodes also get filters because 0.0.0.0/0 \"covers\" everything\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// Exit routes (0.0.0.0/0) are treated as covering all destinations\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// Exclude exit routes from filter distribution coverage checks\n\t\t{\n\t\t\tname: \"G2_port_range_subnet\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.33.0.0/16:80-443\"]}\n\t\t\t`),\n\t\t\t/* EXPECTED (Tailscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t\"subnet-router\": { filter with port 80-443 },\n\t\t\t\t\"big-router\":    { filter with port 80-443 },\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit nodes also get filters\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 80, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 80, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: Exit nodes get filters due to 0.0.0.0/0 coverage\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 80, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 80, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO: Fix exit node route coverage to match Tailscale behavior\n\t\t//\n\t\t// G7: All ports wildcard\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - Only subnet-router and big-router get filters\n\t\t// - Exit nodes do NOT get filters for subnet destinations\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - Exit nodes also get filters because 0.0.0.0/0 \"covers\" everything\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// Exit routes (0.0.0.0/0) are treated as covering all destinations\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// Exclude exit routes from filter distribution coverage checks\n\t\t{\n\t\t\tname: \"G7_all_ports_wildcard\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"10.33.0.0/16:*\"]}\n\t\t\t`),\n\t\t\t/* EXPECTED (Tailscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t\"subnet-router\": { filter with all ports },\n\t\t\t\t\"big-router\":    { filter with all ports },\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit nodes also get filters\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: Exit nodes get filters due to 0.0.0.0/0 coverage\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\trunRoutesCompatTests(t, users, nodes, tests)\n}\n\n// TestTailscaleRoutesCompatIPv6 tests IPv6-specific route behavior.\n// Category I: Tests from 15-overlapping-subnets.md focusing on IPv6 handling.\nfunc TestTailscaleRoutesCompatIPv6(t *testing.T) {\n\tt.Parallel()\n\n\tusers := setupRouteCompatUsers()\n\n\t// Create nodes with IPv6 subnet routes\n\tnodeClient1 := &types.Node{\n\t\tID:             1,\n\t\tGivenName:      \"client1\",\n\t\tUser:           &users[0],\n\t\tUserID:         &users[0].ID,\n\t\tIPv4:           ptrAddr(\"100.116.73.38\"),\n\t\tIPv6:           ptrAddr(\"fd7a:115c:a1e0::a801:4949\"),\n\t\tHostinfo:       &tailcfg.Hostinfo{},\n\t\tApprovedRoutes: []netip.Prefix{},\n\t}\n\n\t// IPv6 subnet router\n\tnodeIPv6Router := &types.Node{\n\t\tID:        2,\n\t\tGivenName: \"ipv6-router\",\n\t\tIPv4:      ptrAddr(\"100.119.139.80\"),\n\t\tIPv6:      ptrAddr(\"fd7a:115c:a1e0::4001:8ba1\"),\n\t\tTags:      []string{\"tag:router\"},\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tRoutableIPs: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"fd00::/48\"),\n\t\t\t},\n\t\t},\n\t\tApprovedRoutes: []netip.Prefix{\n\t\t\tnetip.MustParsePrefix(\"fd00::/48\"),\n\t\t},\n\t}\n\n\t// IPv6 child route (more specific)\n\tnodeIPv6ChildRouter := &types.Node{\n\t\tID:        3,\n\t\tGivenName: \"ipv6-child-router\",\n\t\tIPv4:      ptrAddr(\"100.119.139.81\"),\n\t\tIPv6:      ptrAddr(\"fd7a:115c:a1e0::4001:8ba2\"),\n\t\tTags:      []string{\"tag:router\"},\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tRoutableIPs: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"fd00:1::/64\"),\n\t\t\t},\n\t\t},\n\t\tApprovedRoutes: []netip.Prefix{\n\t\t\tnetip.MustParsePrefix(\"fd00:1::/64\"),\n\t\t},\n\t}\n\n\t// IPv6 exit node (with ::/0)\n\tnodeIPv6Exit := &types.Node{\n\t\tID:        4,\n\t\tGivenName: \"ipv6-exit\",\n\t\tIPv4:      ptrAddr(\"100.121.32.2\"),\n\t\tIPv6:      ptrAddr(\"fd7a:115c:a1e0::7f01:2005\"),\n\t\tTags:      []string{\"tag:exit\"},\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tRoutableIPs: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"::/0\"),\n\t\t\t},\n\t\t},\n\t\tApprovedRoutes: []netip.Prefix{\n\t\t\tnetip.MustParsePrefix(\"::/0\"),\n\t\t},\n\t}\n\n\tnodes := types.Nodes{nodeClient1, nodeIPv6Router, nodeIPv6ChildRouter, nodeIPv6Exit}\n\n\ttests := []routesCompatTest{\n\t\t// TODO: Fix wildcard DstPorts format, SrcIPs to include subnet routes, and filter distribution\n\t\t//\n\t\t// I1: IPv6 subnet route with wildcard ACL\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - SrcIPs includes IPv6 subnet route (fd00::/48) in wildcard expansion\n\t\t// - DstPorts uses {IP: \"*\"} for wildcard destinations\n\t\t// - Only client1 receives a filter (filter placed on destination node)\n\t\t// - Other nodes (routers) do NOT receive filters for wildcard dst\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - SrcIPs doesn't include subnet routes, only CGNAT ranges\n\t\t// - DstPorts expands to CGNAT ranges instead of \"*\"\n\t\t// - ALL nodes receive filters (incorrect filter distribution)\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// 1. Headscale doesn't include subnet routes in wildcard SrcIPs\n\t\t// 2. Headscale expands \"*\" to CGNAT ranges instead of using \"*\"\n\t\t// 3. Headscale distributes filters to all nodes instead of only the destination\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// 1. Include advertised subnet routes in wildcard SrcIPs\n\t\t// 2. Use {IP: \"*\"} for wildcard destinations\n\t\t// 3. Fix filter distribution to only send to destination nodes\n\t\t{\n\t\t\tname: \"I1_ipv6_subnet_route\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]}\n\t\t\t`),\n\t\t\t/* EXPECTED (Tailscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\t// Wildcard ACL - SrcIPs should include IPv6 route (fd00::/48)\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd00::/48\", // IPv6 subnet route\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ipv6-router\":       nil,\n\t\t\t\t\"ipv6-child-router\": nil,\n\t\t\t\t\"ipv6-exit\":         nil,\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): All nodes get filters with CGNAT DstPorts\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: All routers get filters (should be nil)\n\t\t\t\t\"ipv6-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ipv6-child-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ipv6-exit\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO: Fix IPv6 parent route coverage\n\t\t//\n\t\t// I4: IPv6 specific ACL targeting fd00:1::/64\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - ipv6-router (fd00::/48) covers fd00:1::/64 - should get filter\n\t\t// - ipv6-child-router (fd00:1::/64) exact match - should get filter\n\t\t// - ipv6-exit (::/0) covers everything - should get filter\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - ipv6-router (fd00::/48) does NOT get filter - Headscale doesn't recognize\n\t\t//   that fd00::/48 covers fd00:1::/64 (parent route coverage not working)\n\t\t// - ipv6-child-router (fd00:1::/64) gets filter (exact match works)\n\t\t// - ipv6-exit (::/0) gets filter (IPv6 exit route coverage works)\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// Headscale's route coverage logic doesn't properly handle IPv6 parent routes.\n\t\t// fd00::/48 should cover fd00:1::/64 but Headscale doesn't recognize this.\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// Fix IPv6 parent route coverage in filter distribution logic.\n\t\t{\n\t\t\tname: \"I4_ipv6_specific_acl\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"fd00:1::/64:443\"]}\n\t\t\t`),\n\t\t\t/* EXPECTED (Tailscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\": nil,\n\t\t\t\t// ipv6-router should get filter (fd00::/48 covers fd00:1::/64)\n\t\t\t\t\"ipv6-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"fd00:1::/64\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// ipv6-child-router should also get filter (exact match)\n\t\t\t\t\"ipv6-child-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"fd00:1::/64\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// ipv6-exit should get filter (::/0 covers everything)\n\t\t\t\t\"ipv6-exit\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"fd00:1::/64\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): ipv6-router doesn't get filter (parent route coverage broken)\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\": nil,\n\t\t\t\t// INCORRECT: ipv6-router doesn't get filter (should based on parent coverage)\n\t\t\t\t\"ipv6-router\": nil,\n\t\t\t\t// ipv6-child-router gets filter (exact match works)\n\t\t\t\t\"ipv6-child-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"fd00:1::/64\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// ipv6-exit gets filter (::/0 covers everything)\n\t\t\t\t\"ipv6-exit\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"fd00:1::/64\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO: Fix IPv6 parent route coverage\n\t\t//\n\t\t// I5: IPv6 parent/child route coverage\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - ipv6-router (fd00::/48) covers fd00:1:2::/80 - should get filter\n\t\t// - ipv6-child-router (fd00:1::/64) does NOT cover fd00:1:2::/80\n\t\t//   (fd00:1::/64 = fd00:0001:0000::/64, fd00:1:2::/80 = fd00:0001:0002::/80 - different)\n\t\t// - ipv6-exit (::/0) covers everything - should get filter\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - ipv6-router (fd00::/48) does NOT get filter - Headscale doesn't recognize\n\t\t//   that fd00::/48 covers fd00:1:2::/80 (parent route coverage not working)\n\t\t// - ipv6-child-router correctly gets nil (fd00:1::/64 doesn't cover fd00:1:2::/80)\n\t\t// - ipv6-exit gets filter (::/0 covers everything)\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// Headscale's route coverage logic doesn't properly handle IPv6 parent routes.\n\t\t// fd00::/48 should cover fd00:1:2::/80 but Headscale doesn't recognize this.\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// Fix IPv6 parent route coverage in filter distribution logic.\n\t\t{\n\t\t\tname: \"I5_ipv6_parent_child_routes\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"fd00:1:2::/80:*\"]}\n\t\t\t`),\n\t\t\t/* EXPECTED (Tailscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\": nil,\n\t\t\t\t// ipv6-router (fd00::/48) covers fd00:1:2::/80 - should get filter\n\t\t\t\t\"ipv6-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"fd00:1:2::/80\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// ipv6-child-router (fd00:1::/64) does NOT cover fd00:1:2::/80\n\t\t\t\t\"ipv6-child-router\": nil,\n\t\t\t\t// ipv6-exit (::/0) covers everything\n\t\t\t\t\"ipv6-exit\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"fd00:1:2::/80\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): ipv6-router doesn't get filter (parent route coverage broken)\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\": nil,\n\t\t\t\t// INCORRECT: ipv6-router doesn't get filter (should based on parent coverage)\n\t\t\t\t\"ipv6-router\": nil,\n\t\t\t\t// ipv6-child-router correctly doesn't get filter\n\t\t\t\t// (fd00:1::/64 doesn't cover fd00:1:2::/80)\n\t\t\t\t\"ipv6-child-router\": nil,\n\t\t\t\t// ipv6-exit gets filter (::/0 covers everything)\n\t\t\t\t\"ipv6-exit\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"fd00:1:2::/80\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// I7: IPv6 exit route coverage (external IPv6 destination)\n\t\t{\n\t\t\tname: \"I7_ipv6_exit_coverage\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"2001:db8::/32:443\"]}\n\t\t\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":           nil,\n\t\t\t\t\"ipv6-router\":       nil, // fd00::/48 doesn't cover 2001:db8::/32\n\t\t\t\t\"ipv6-child-router\": nil, // fd00:1::/64 doesn't cover 2001:db8::/32\n\t\t\t\t// Only ipv6-exit (::/0) should get filter\n\t\t\t\t\"ipv6-exit\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"2001:db8::/32\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\trunRoutesCompatTests(t, users, nodes, tests)\n}\n\n// TestTailscaleRoutesCompatEdgeCases tests edge cases and unusual configurations.\n// Category H: Edge cases from various findings documents.\nfunc TestTailscaleRoutesCompatEdgeCases(t *testing.T) {\n\tt.Parallel()\n\n\tusers := setupRouteCompatUsers()\n\tnodes := setupRouteCompatNodes(users)\n\n\ttests := []routesCompatTest{\n\t\t// TODO: Fix wildcard SrcIPs to include subnet routes like Tailscale\n\t\t//\n\t\t// H1: Verify wildcard SrcIPs format\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - SrcIPs includes CGNAT range + all advertised subnet routes\n\t\t// - Exit nodes do NOT get filters for tag:router destination\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - SrcIPs only includes CGNAT range (no subnet routes)\n\t\t// - Exit nodes also get filters due to exit route coverage\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// 1. Headscale doesn't include subnet routes in wildcard SrcIPs\n\t\t// 2. Exit routes (0.0.0.0/0) treated as covering all destinations\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// 1. Include advertised subnet routes in wildcard SrcIPs\n\t\t// 2. Exclude exit routes from filter distribution\n\t\t{\n\t\t\tname: \"H1_wildcard_srcips_format\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"tag:router:*\"]}\n\t\t\t`),\n\t\t\t/* EXPECTED (Tailscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"exit-node\":  nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\", \"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t\t\"10.0.0.0/8\", \"10.33.0.0/16\", \"172.16.0.0/24\", \"192.168.1.0/24\", // routes!\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: ... tag:router IPs,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// ... multi-router and big-router with same SrcIPs pattern\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale):\n\t\t\t// - SrcIPs missing routes\n\t\t\t// - DstPorts only contains node's own IPs (not all tag:router IPs)\n\t\t\t// - exit-node gets filter\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t// INCORRECT: DstPorts only contains self IPs, not all tag:router IPs\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// Only subnet-router's own IPs\n\t\t\t\t\t\t\t{IP: \"100.119.139.79/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::4001:8ba0/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// multi-router has tag:router AND tag:exit, gets all tag:router IPs\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// All tag:router IPs\n\t\t\t\t\t\t\t{IP: \"100.100.100.1/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.119.139.79/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.74.117.7/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::4001:8ba0/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::6401:6401/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::c401:7508/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// Only big-router's own IPs\n\t\t\t\t\t\t\t{IP: \"100.100.100.1/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::6401:6401/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: Exit node gets filter (should be nil)\n\t\t\t\t// Exit-node has tag:exit, NOT tag:router, so shouldn't get filter\n\t\t\t\t// But due to exit route coverage, it gets ALL tag:router IPs\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.64.0.0/10\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::/48\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// All tag:router IPs (exit-node sees all because of route coverage)\n\t\t\t\t\t\t\t{IP: \"100.100.100.1/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.119.139.79/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.74.117.7/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::4001:8ba0/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::6401:6401/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::c401:7508/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// H9: Large prefix (/8) subnet route\n\t\t// TODO: Fix exit route coverage and child route coverage\n\t\t// TAILSCALE BEHAVIOR: Only big-router (has 10.0.0.0/8) gets the filter.\n\t\t//   subnet-router (10.33.0.0/16) is a CHILD of 10.0.0.0/8 - doesn't cover parent.\n\t\t//   Exit nodes do NOT get filters for specific subnet destinations.\n\t\t// HEADSCALE BEHAVIOR: Exit nodes get filters (0.0.0.0/0 covers). subnet-router also\n\t\t//   gets filter (Headscale incorrectly treats child routes as covering).\n\t\t// ROOT CAUSE: Two issues: (1) Exit route coverage, (2) Child route coverage.\n\t\t// FIX REQUIRED: Exclude exit nodes and fix route coverage to only include parents.\n\t\t{\n\t\t\tname: \"H9_large_prefix_works\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"10.0.0.0/8:*\"]}\n\t\t\t`),\n\t\t\t/*\n\t\t\t\tEXPECTED (Tailscale):\n\t\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\t\"client1\":       nil,\n\t\t\t\t\t\"client2\":       nil,\n\t\t\t\t\t\"user1\":         nil,\n\t\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\t\"big-router\": {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tSrcIPs: memberSrcIPs,\n\t\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t\t{IP: \"10.0.0.0/8\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit nodes and child routes also get filters\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: memberSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.0.0.0/8\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// subnet-router incorrectly gets filter (child route coverage)\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: memberSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.0.0.0/8\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// Exit nodes incorrectly get filters (exit route covers)\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: memberSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.0.0.0/8\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: memberSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.0.0.0/8\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// H2: Wildcard DstPorts format\n\t\t// TODO: Fix wildcard DstPorts format and filter distribution\n\t\t// TAILSCALE BEHAVIOR: DstPorts uses {IP: \"*\"} for wildcard destinations.\n\t\t//   Only client1 receives a filter (filter placed on destination node).\n\t\t// HEADSCALE BEHAVIOR: DstPorts expands to CGNAT ranges (100.64.0.0/10, fd7a:115c:a1e0::/48).\n\t\t//   ALL nodes receive filters.\n\t\t// ROOT CAUSE: Two issues: (1) Headscale expands \"*\" to CGNAT ranges instead of using \"*\",\n\t\t//   (2) Headscale distributes filters to all nodes instead of only the destination.\n\t\t// FIX REQUIRED: Use {IP: \"*\"} for wildcard destinations and fix filter distribution.\n\t\t{\n\t\t\tname: \"H2_wildcard_dstports_format\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"*:*\"]}\n\t\t\t`),\n\t\t\t/*\n\t\t\t\tEXPECTED (Tailscale):\n\t\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\t\"client1\": {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tSrcIPs: memberSrcIPs,\n\t\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\"client2\":       nil,\n\t\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\t\"big-router\":    nil,\n\t\t\t\t\t\"user1\":         nil,\n\t\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): DstPorts expanded to CGNAT, all nodes get filters\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   memberSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"client2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   memberSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   memberSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   memberSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   memberSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   memberSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   memberSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   memberSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   memberSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// H3: CGNAT range expansion in wildcard\n\t\t// TODO: Fix filter distribution and exit route coverage for tag destinations\n\t\t// TAILSCALE BEHAVIOR: Only tag:router nodes (subnet-router, multi-router, big-router)\n\t\t//   receive filters. Each receives DstPorts containing ALL tag:router node IPs.\n\t\t// HEADSCALE BEHAVIOR: Exit nodes also get filters (exit route covers tag:router IPs).\n\t\t//   Each node only sees its OWN IPs in DstPorts, not all tag:router IPs.\n\t\t// ROOT CAUSE: Two issues: (1) Exit route coverage gives filters to exit-node,\n\t\t//   (2) Per-node DstPorts filtering shows only self IPs instead of all tag:router IPs.\n\t\t// FIX REQUIRED: Exclude exit nodes from tag-based destinations, fix DstPorts to include all.\n\t\t{\n\t\t\tname: \"H3_cgnat_range_expansion\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"tag:router:*\"]}\n\t\t\t`),\n\t\t\t/*\n\t\t\t\tEXPECTED (Tailscale):\n\t\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\t\"client1\":    nil,\n\t\t\t\t\t\"client2\":    nil,\n\t\t\t\t\t\"user1\":      nil,\n\t\t\t\t\t\"exit-node\":  nil,\n\t\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t\t// All tag:router node IPs\n\t\t\t\t\t\t\t\t{IP: \"100.100.100.1/32\", Ports: tailcfg.PortRangeAny},  // big-router\n\t\t\t\t\t\t\t\t{IP: \"100.119.139.79/32\", Ports: tailcfg.PortRangeAny}, // subnet-router\n\t\t\t\t\t\t\t\t{IP: \"100.74.117.7/32\", Ports: tailcfg.PortRangeAny},   // multi-router\n\t\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::4001:8ba0/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::6401:6401/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::c401:7508/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\"multi-router\": nil, // Also tag:router but expected to get filter\n\t\t\t\t\t\"big-router\":   nil, // Also tag:router but expected to get filter\n\t\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Each node only sees its own IPs in DstPorts,\n\t\t\t// multi-router and big-router also get filters, exit-node incorrectly gets filter\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// Only self IPs\n\t\t\t\t\t\t\t{IP: \"100.119.139.79/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::4001:8ba0/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// All tag:router IPs (multi-router sees all)\n\t\t\t\t\t\t\t{IP: \"100.100.100.1/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.119.139.79/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.74.117.7/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::4001:8ba0/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::6401:6401/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::c401:7508/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// Only self IPs\n\t\t\t\t\t\t\t{IP: \"100.100.100.1/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::6401:6401/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// exit-node incorrectly gets filter (exit route covers tag:router IPs)\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.100.100.1/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.119.139.79/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.74.117.7/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::4001:8ba0/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::6401:6401/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::c401:7508/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// H4: IPv6 range in SrcIPs\n\t\t// TODO: Fix wildcard DstPorts format and filter distribution\n\t\t// TAILSCALE BEHAVIOR: DstPorts uses {IP: \"*\"} for wildcard destinations.\n\t\t//   SrcIPs includes fd7a:115c:a1e0::/48 (IPv6 Tailscale range). Only client1 receives filter.\n\t\t// HEADSCALE BEHAVIOR: DstPorts expands to CGNAT ranges. ALL nodes receive filters.\n\t\t// ROOT CAUSE: Same as H2 - Headscale expands \"*\" to CGNAT and distributes to all nodes.\n\t\t// FIX REQUIRED: Use {IP: \"*\"} for wildcard destinations and fix filter distribution.\n\t\t{\n\t\t\tname: \"H4_ipv6_range_in_srcips\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]}\n\t\t\t`),\n\t\t\t/*\n\t\t\t\tEXPECTED (Tailscale):\n\t\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\t\"client1\": {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\"client2\":       nil,\n\t\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\t\"big-router\":    nil,\n\t\t\t\t\t\"user1\":         nil,\n\t\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): All nodes get filters with CGNAT DstPorts\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"client2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// H7: Two nodes claiming same subnet - first is primary\n\t\t// TODO: Fix exit route coverage for subnet destinations\n\t\t// TAILSCALE BEHAVIOR: Only ha-router1 and ha-router2 (which have 192.168.1.0/24) get filters.\n\t\t//   Exit nodes do NOT get filters for specific subnet destinations.\n\t\t// HEADSCALE BEHAVIOR: Exit nodes also get filters (0.0.0.0/0 covers 192.168.1.0/24).\n\t\t// ROOT CAUSE: Exit route coverage gives filters to exit-node and multi-router.\n\t\t// FIX REQUIRED: Exclude exit nodes from subnet-specific destinations.\n\t\t{\n\t\t\tname: \"H7_two_nodes_same_subnet\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"192.168.1.0/24:*\"]}\n\t\t\t`),\n\t\t\t/*\n\t\t\t\tEXPECTED (Tailscale):\n\t\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\t\"client1\":       nil,\n\t\t\t\t\t\"client2\":       nil,\n\t\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t\t\"big-router\":    nil,\n\t\t\t\t\t\"user1\":         nil,\n\t\t\t\t\t\"ha-router1\": {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\"ha-router2\": {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit nodes also get filters\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"ha-router1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// Exit nodes incorrectly get filters (exit route covers 192.168.1.0/24)\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// H10: Very small prefix (/32)\n\t\t// TODO: Fix exit route coverage for /32 destinations\n\t\t// TAILSCALE BEHAVIOR: Only subnet-router (10.33.0.0/16) and big-router (10.0.0.0/8) get filters.\n\t\t//   These routes cover 10.33.0.100/32. Exit nodes do NOT get filters.\n\t\t// HEADSCALE BEHAVIOR: Exit nodes also get filters (0.0.0.0/0 covers 10.33.0.100/32).\n\t\t// ROOT CAUSE: Exit route coverage gives filters to exit-node and multi-router.\n\t\t// FIX REQUIRED: Exclude exit nodes from specific IP destinations.\n\t\t{\n\t\t\tname: \"H10_very_small_prefix\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.33.0.100/32:80\"]}\n\t\t\t`),\n\t\t\t/*\n\t\t\t\tEXPECTED (Tailscale):\n\t\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\t\"client1\":      nil,\n\t\t\t\t\t\"client2\":      nil,\n\t\t\t\t\t\"user1\":        nil,\n\t\t\t\t\t\"ha-router1\":   nil,\n\t\t\t\t\t\"ha-router2\":   nil,\n\t\t\t\t\t\"exit-node\":    nil,\n\t\t\t\t\t\"multi-router\": nil,\n\t\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t\t{IP: \"10.33.0.100/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\"big-router\": {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t\t{IP: \"10.33.0.100/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit nodes also get filters\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.100/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.100/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// Exit nodes incorrectly get filters (exit route covers)\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.100/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.100/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\trunRoutesCompatTests(t, users, nodes, tests)\n}\n\n// TestTailscaleRoutesCompatAdditionalR tests additional route coverage scenarios (Category R).\nfunc TestTailscaleRoutesCompatAdditionalR(t *testing.T) {\n\tt.Parallel()\n\n\tusers := setupRouteCompatUsers()\n\tnodes := setupRouteCompatNodes(users)\n\n\ttests := []routesCompatTest{\n\t\t// R5: Route coverage check logic verification\n\t\t// TODO: Exit route coverage issue - exit nodes get filters when they shouldn't.\n\t\t// TAILSCALE BEHAVIOR: Exit nodes do NOT receive filters for internal subnet destinations.\n\t\t// HEADSCALE BEHAVIOR: Exit nodes get filters because 0.0.0.0/0 covers 10.33.1.0/24.\n\t\t// ROOT CAUSE: routeCoversDestination() returns true for exit routes covering internal IPs.\n\t\t{\n\t\t\tname: \"R5_route_coverage_check_logic\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.33.1.0/24:22\"]}\n\t\t\t`),\n\t\t\t// Route coverage: R.Bits() <= D.Bits() && R.Contains(D.Addr())\n\t\t\t// 10.0.0.0/8 (bits=8) <= 24 && contains 10.33.1.0 -> YES\n\t\t\t// 10.33.0.0/16 (bits=16) <= 24 && contains 10.33.1.0 -> YES\n\t\t\t/*\n\t\t\t\tEXPECTED (Tailscale):\n\t\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\t\"client1\":      nil,\n\t\t\t\t\t\"client2\":      nil,\n\t\t\t\t\t\"user1\":        nil,\n\t\t\t\t\t\"exit-node\":    nil,\n\t\t\t\t\t\"multi-router\": nil,\n\t\t\t\t\t\"ha-router1\":   nil,\n\t\t\t\t\t\"ha-router2\":   nil,\n\t\t\t\t\t\"subnet-router\": { ... },\n\t\t\t\t\t\"big-router\":    { ... },\n\t\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit nodes also get filters due to exit route coverage\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}}},\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}}},\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// Exit nodes incorrectly get filters (exit route 0.0.0.0/0 covers 10.33.1.0/24)\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}}},\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}}},\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// R6: IPv6 route coverage\n\t\t// TODO: Exit route coverage issue - exit nodes get filters for IPv6 Tailscale range.\n\t\t// TAILSCALE BEHAVIOR: No nodes get filters for IPv6 addresses in the Tailscale range\n\t\t// (fd7a:115c:a1e0::/48) as these are node IPs, not routed destinations.\n\t\t// HEADSCALE BEHAVIOR: Exit nodes get filters because ::/0 covers all IPv6 addresses.\n\t\t// ROOT CAUSE: routeCoversDestination() returns true for exit routes covering all IPs.\n\t\t{\n\t\t\tname: \"R6_ipv6_route_coverage\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"fd7a:115c:a1e0::1/128:443\"]}\n\t\t\t`),\n\t\t\t// Targeting a specific IPv6 in the Tailscale range\n\t\t\t/*\n\t\t\t\tEXPECTED (Tailscale):\n\t\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\t\"client1\":       nil,\n\t\t\t\t\t\"client2\":       nil,\n\t\t\t\t\t\"user1\":         nil,\n\t\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\t\"big-router\":    nil,\n\t\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit nodes get filters (exit route ::/0 covers all IPv6)\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t\t// Exit nodes incorrectly get filters (exit route ::/0 covers fd7a:115c:a1e0::1)\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{{IP: \"fd7a:115c:a1e0::1/128\", Ports: tailcfg.PortRange{First: 443, Last: 443}}},\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{{IP: \"fd7a:115c:a1e0::1/128\", Ports: tailcfg.PortRange{First: 443, Last: 443}}},\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// R7: Exit node IPv6 coverage\n\t\t{\n\t\t\tname: \"R7_exit_ipv6_coverage\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"2001:db8::1/128:443\"]}\n\t\t\t`),\n\t\t\t// External IPv6 address - only exit nodes cover\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t\t// Exit nodes cover all destinations\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"2001:db8::1/128\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"2001:db8::1/128\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// R8: Mixed IPv4/IPv6 coverage\n\t\t// TODO: Multiple coverage issues in this test:\n\t\t// 1. Exit route coverage: exit nodes get IPv4 filters (0.0.0.0/0 covers 10.33.0.0/16)\n\t\t// 2. Node IP coverage: all nodes get IPv6 filters because their IPv6 addresses are in\n\t\t//    fd7a:115c:a1e0::/48 which overlaps with the destination fd7a:115c:a1e0::/64\n\t\t// TAILSCALE BEHAVIOR: Only subnet-router and big-router get filters (IPv4 only).\n\t\t// HEADSCALE BEHAVIOR:\n\t\t//   - All nodes get filters for IPv6 (node IPs are in fd7a:115c:a1e0::/48)\n\t\t//   - Exit nodes get filters for IPv4 (exit route covers 10.33.0.0/16)\n\t\t//   - subnet-router and big-router get both IPv4 and IPv6\n\t\t// ROOT CAUSE: Node IP prefixes incorrectly treated as routes covering destinations.\n\t\t{\n\t\t\tname: \"R8_mixed_ipv4_ipv6_coverage\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.33.0.0/16:*\", \"fd7a:115c:a1e0::/64:*\"]}\n\t\t\t`),\n\t\t\t// Both IPv4 and IPv6 destinations\n\t\t\t/*\n\t\t\t\tEXPECTED (Tailscale):\n\t\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\t\"client1\":      nil,\n\t\t\t\t\t\"client2\":      nil,\n\t\t\t\t\t\"user1\":        nil,\n\t\t\t\t\t\"ha-router1\":   nil,\n\t\t\t\t\t\"ha-router2\":   nil,\n\t\t\t\t\t\"exit-node\":    nil,\n\t\t\t\t\t\"multi-router\": nil,\n\t\t\t\t\t\"subnet-router\": { IPv4 only: 10.33.0.0/16 },\n\t\t\t\t\t\"big-router\":    { IPv4 only: 10.33.0.0/16 },\n\t\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Multiple issues - node IPs treated as routes, exit coverage\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t// All nodes get IPv6 filters because their IPs are in fd7a:115c:a1e0::/48\n\t\t\t\t\"client1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{{IP: \"fd7a:115c:a1e0::/64\", Ports: tailcfg.PortRangeAny}},\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"client2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{{IP: \"fd7a:115c:a1e0::/64\", Ports: tailcfg.PortRangeAny}},\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{{IP: \"fd7a:115c:a1e0::/64\", Ports: tailcfg.PortRangeAny}},\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{{IP: \"fd7a:115c:a1e0::/64\", Ports: tailcfg.PortRangeAny}},\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{{IP: \"fd7a:115c:a1e0::/64\", Ports: tailcfg.PortRangeAny}},\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// subnet-router and big-router get both IPv4 (from routes) and IPv6 (from node IPs)\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::/64\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::/64\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// Exit nodes get both IPv4 (exit route) and IPv6 (exit route + node IPs)\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::/64\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::/64\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\trunRoutesCompatTests(t, users, nodes, tests)\n}\n\n// TestTailscaleRoutesCompatAdditionalO tests additional overlapping route scenarios (Category O).\nfunc TestTailscaleRoutesCompatAdditionalO(t *testing.T) {\n\tt.Parallel()\n\n\tusers := setupRouteCompatUsers()\n\tnodes := setupRouteCompatNodes(users)\n\n\ttests := []routesCompatTest{\n\t\t// O1: Overlapping routes not merged\n\t\t// TODO: Fix wildcard destination handling for nodes with routes\n\t\t// TAILSCALE BEHAVIOR: Only client1 gets filters (dst *:* only goes to primary node).\n\t\t// HEADSCALE BEHAVIOR: All nodes get filters (dst *:* expands to Headscale IP ranges for all nodes).\n\t\t// ROOT CAUSE: Wildcard destination expands to Headscale IP ranges, not literal \"*\".\n\t\t// FIX REQUIRED: Limit *:* distribution to match Tailscale behavior.\n\t\t{\n\t\t\tname: \"O1_overlapping_routes_not_merged\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]}\n\t\t\t`),\n\t\t\t/*\n\t\t\t\tEXPECTED (Tailscale):\n\t\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\t\"client1\": { filter with *:* },\n\t\t\t\t\t\"client2\":       nil,\n\t\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\t\"big-router\":    nil,\n\t\t\t\t\t\"user1\":         nil,\n\t\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): All nodes get filters with expanded IP ranges\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"client2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// All routers get filters because *:* expands to all Headscale IP ranges\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// O4: Three-way hierarchy\n\t\t// TODO: Fix exit route coverage for subnet destinations\n\t\t// TAILSCALE BEHAVIOR: Only subnet-router and big-router get filters.\n\t\t// HEADSCALE BEHAVIOR: Exit nodes also get filters (0.0.0.0/0 covers 10.33.1.128/25).\n\t\t// ROOT CAUSE: Exit route coverage.\n\t\t// FIX REQUIRED: Exclude exit nodes from subnet-specific destinations.\n\t\t{\n\t\t\tname: \"O4_three_way_hierarchy\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.33.1.128/25:22\"]}\n\t\t\t`),\n\t\t\t/*\n\t\t\t\tEXPECTED (Tailscale):\n\t\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\t\"client1\":      nil,\n\t\t\t\t\t\"client2\":      nil,\n\t\t\t\t\t\"user1\":        nil,\n\t\t\t\t\t\"ha-router1\":   nil,\n\t\t\t\t\t\"ha-router2\":   nil,\n\t\t\t\t\t\"exit-node\":    nil,\n\t\t\t\t\t\"multi-router\": nil,\n\t\t\t\t\t\"subnet-router\": { filter with 10.33.1.128/25:22 },\n\t\t\t\t\t\"big-router\":    { filter with 10.33.1.128/25:22 },\n\t\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit nodes also get filters\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.1.128/25\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.1.128/25\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// Exit nodes incorrectly get filters (exit route covers)\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.1.128/25\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.1.128/25\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// O5: Sibling routes with parent ACL\n\t\t// TODO: Fix exit route coverage and subnet-router getting parent ACL filter\n\t\t// TAILSCALE BEHAVIOR: Only big-router gets filters (exact /8 route match).\n\t\t// HEADSCALE BEHAVIOR: Exit nodes get filters (0.0.0.0/0 covers 10.0.0.0/8),\n\t\t//                     and subnet-router gets filters (10.33.0.0/16 is within /8).\n\t\t// ROOT CAUSE: Exit route coverage + child routes get parent ACL filters.\n\t\t// FIX REQUIRED: Exclude exit nodes and child routes from parent ACL.\n\t\t{\n\t\t\tname: \"O5_sibling_routes_with_parent_acl\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.0.0.0/8:*\"]}\n\t\t\t`),\n\t\t\t/*\n\t\t\t\tEXPECTED (Tailscale):\n\t\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\t\"client1\":       nil,\n\t\t\t\t\t\"client2\":       nil,\n\t\t\t\t\t\"user1\":         nil,\n\t\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t\t\"big-router\":    { filter with 10.0.0.0/8:* },\n\t\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit nodes and subnet-router also get filters\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.0.0.0/8\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// subnet-router incorrectly gets filter (child route within parent ACL)\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.0.0.0/8\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// Exit nodes incorrectly get filters (exit route covers)\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.0.0.0/8\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.0.0.0/8\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// O7: Specific IP targeting with multiple covering routes\n\t\t// TODO: Fix exit route coverage for specific IP destinations\n\t\t// TAILSCALE BEHAVIOR: Only subnet-router and big-router get filters.\n\t\t// HEADSCALE BEHAVIOR: Exit nodes also get filters (0.0.0.0/0 covers 10.33.0.100/32).\n\t\t// ROOT CAUSE: Exit route coverage.\n\t\t// FIX REQUIRED: Exclude exit nodes from subnet-specific destinations.\n\t\t{\n\t\t\tname: \"O7_specific_ip_targeting\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.33.0.100/32:80\"]}\n\t\t\t`),\n\t\t\t/*\n\t\t\t\tEXPECTED (Tailscale):\n\t\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\t\"client1\":      nil,\n\t\t\t\t\t\"client2\":      nil,\n\t\t\t\t\t\"user1\":        nil,\n\t\t\t\t\t\"ha-router1\":   nil,\n\t\t\t\t\t\"ha-router2\":   nil,\n\t\t\t\t\t\"exit-node\":    nil,\n\t\t\t\t\t\"multi-router\": nil,\n\t\t\t\t\t\"subnet-router\": { filter with 10.33.0.100/32:80 },\n\t\t\t\t\t\"big-router\":    { filter with 10.33.0.100/32:80 },\n\t\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit nodes also get filters\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.100/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.100/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// Exit nodes incorrectly get filters (exit route covers)\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.100/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.100/32\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// O10: ACL dest covered by multiple routes\n\t\t// TODO: Fix exit route coverage for subnet destinations\n\t\t// TAILSCALE BEHAVIOR: Only subnet-router and big-router get filters.\n\t\t// HEADSCALE BEHAVIOR: Exit nodes also get filters (0.0.0.0/0 covers 10.33.1.0/24).\n\t\t// ROOT CAUSE: Exit route coverage.\n\t\t// FIX REQUIRED: Exclude exit nodes from subnet-specific destinations.\n\t\t{\n\t\t\tname: \"O10_acl_dest_covered_by_multiple\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.33.1.0/24:22\"]}\n\t\t\t`),\n\t\t\t/*\n\t\t\t\tEXPECTED (Tailscale):\n\t\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\t\"client1\":      nil,\n\t\t\t\t\t\"client2\":      nil,\n\t\t\t\t\t\"user1\":        nil,\n\t\t\t\t\t\"ha-router1\":   nil,\n\t\t\t\t\t\"ha-router2\":   nil,\n\t\t\t\t\t\"exit-node\":    nil,\n\t\t\t\t\t\"multi-router\": nil,\n\t\t\t\t\t\"subnet-router\": { filter with 10.33.1.0/24:22 },\n\t\t\t\t\t\"big-router\":    { filter with 10.33.1.0/24:22 },\n\t\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit nodes also get filters\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// Exit nodes incorrectly get filters (exit route covers)\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// O11: ACL dest not covered by any route\n\t\t// TODO: Fix exit route coverage for uncovered destinations\n\t\t// TAILSCALE BEHAVIOR: No nodes get filters (no route covers 192.168.99.0/24).\n\t\t// HEADSCALE BEHAVIOR: Exit nodes get filters (0.0.0.0/0 covers 192.168.99.0/24).\n\t\t// ROOT CAUSE: Exit route coverage.\n\t\t// FIX REQUIRED: Exclude exit nodes from uncovered destinations.\n\t\t{\n\t\t\tname: \"O11_acl_dest_not_covered\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"192.168.99.0/24:22\"]}\n\t\t\t`),\n\t\t\t/*\n\t\t\t\tEXPECTED (Tailscale):\n\t\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\t\"client1\":       nil,\n\t\t\t\t\t\"client2\":       nil,\n\t\t\t\t\t\"user1\":         nil,\n\t\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t\t\"big-router\":    nil,\n\t\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit nodes get filters\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t\t// Exit nodes incorrectly get filters (exit route covers)\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.99.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.99.0/24\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\trunRoutesCompatTests(t, users, nodes, tests)\n}\n\n// TestTailscaleRoutesCompatAdditionalG tests additional protocol and port scenarios (Category G).\nfunc TestTailscaleRoutesCompatAdditionalG(t *testing.T) {\n\tt.Parallel()\n\n\tusers := setupRouteCompatUsers()\n\tnodes := setupRouteCompatNodes(users)\n\n\ttests := []routesCompatTest{\n\t\t// G3: Multiple ports on subnet\n\t\t// TODO: Fix exit route coverage for subnet destinations\n\t\t// TAILSCALE BEHAVIOR: Only subnet-router and big-router get filters.\n\t\t// HEADSCALE BEHAVIOR: Exit nodes also get filters (0.0.0.0/0 covers 10.33.0.0/16).\n\t\t// ROOT CAUSE: Exit route coverage.\n\t\t// FIX REQUIRED: Exclude exit nodes from subnet-specific destinations.\n\t\t{\n\t\t\tname: \"G3_multiple_ports_subnet\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.33.0.0/16:22,80,443\"]}\n\t\t\t`),\n\t\t\t/*\n\t\t\t\tEXPECTED (Tailscale):\n\t\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\t\"client1\":      nil,\n\t\t\t\t\t\"client2\":      nil,\n\t\t\t\t\t\"user1\":        nil,\n\t\t\t\t\t\"ha-router1\":   nil,\n\t\t\t\t\t\"ha-router2\":   nil,\n\t\t\t\t\t\"exit-node\":    nil,\n\t\t\t\t\t\"multi-router\": nil,\n\t\t\t\t\t\"subnet-router\": { ... },\n\t\t\t\t\t\"big-router\":   { ... },\n\t\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit nodes also get filters\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// Exit nodes incorrectly get filters (exit route covers)\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// G8: Default IPProto (all protocols)\n\t\t// TODO: Fix exit route coverage for subnet destinations\n\t\t// TAILSCALE BEHAVIOR: Only subnet-router and big-router get filters.\n\t\t// HEADSCALE BEHAVIOR: Exit nodes also get filters (0.0.0.0/0 covers 10.33.0.0/16).\n\t\t// ROOT CAUSE: Exit route coverage.\n\t\t// FIX REQUIRED: Exclude exit nodes from subnet-specific destinations.\n\t\t{\n\t\t\tname: \"G8_default_ipproto\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.33.0.0/16:22\"]}\n\t\t\t`),\n\t\t\t/*\n\t\t\t\tEXPECTED (Tailscale):\n\t\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\t\"exit-node\":    nil,\n\t\t\t\t\t\"multi-router\": nil,\n\t\t\t\t\t...\n\t\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit nodes also get filters\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// TCP=6, UDP=17, ICMP=1, ICMPv6=58\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// Exit nodes incorrectly get filters (exit route covers)\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\trunRoutesCompatTests(t, users, nodes, tests)\n}\n\n// TestTailscaleRoutesCompatAdditionalT tests additional tag resolution scenarios (Category T).\nfunc TestTailscaleRoutesCompatAdditionalT(t *testing.T) {\n\tt.Parallel()\n\n\tusers := setupRouteCompatUsers()\n\tnodes := setupRouteCompatNodes(users)\n\n\ttests := []routesCompatTest{\n\t\t// TODO: Fix wildcard destination expansion and filter distribution\n\t\t//\n\t\t// T3: Tag source includes all tagged nodes\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - Only client1 gets filter (user-owned, thus a valid destination)\n\t\t// - DstPorts uses literal \"*\" for wildcard destination\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - ALL nodes get filters (wildcard destination distributed to everyone)\n\t\t// - DstPorts expands to CGNAT ranges instead of \"*\"\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// 1. Wildcard destination distributed to all nodes instead of only non-source nodes\n\t\t// 2. DstPorts expands wildcards to explicit CGNAT ranges\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// 1. Limit filter distribution for wildcard destinations\n\t\t// 2. Use literal \"*\" in DstPorts for wildcard destinations\n\t\t{\n\t\t\tname: \"T3_tag_src_includes_all_tagged\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:router\"], \"dst\": [\"*:*\"]}\n\t\t\t`),\n\t\t\t/* EXPECTED (Tailscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{ tag:router IPs },\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): ALL nodes get filters, DstPorts expanded to CGNAT ranges\n\t\t\t// tag:router = subnet-router, multi-router, big-router\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t// INCORRECT: All nodes get filters, not just destination nodes\n\t\t\t\t\"client1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.100.100.1/32\",  // big-router\n\t\t\t\t\t\t\t\"100.119.139.79/32\", // subnet-router\n\t\t\t\t\t\t\t\"100.74.117.7/32\",   // multi-router\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// INCORRECT: DstPorts uses CGNAT ranges instead of \"*\"\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"client2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.100.100.1/32\",\n\t\t\t\t\t\t\t\"100.119.139.79/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.100.100.1/32\",\n\t\t\t\t\t\t\t\"100.119.139.79/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.100.100.1/32\",\n\t\t\t\t\t\t\t\"100.119.139.79/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.100.100.1/32\",\n\t\t\t\t\t\t\t\"100.119.139.79/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.100.100.1/32\",\n\t\t\t\t\t\t\t\"100.119.139.79/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.100.100.1/32\",\n\t\t\t\t\t\t\t\"100.119.139.79/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.100.100.1/32\",\n\t\t\t\t\t\t\t\"100.119.139.79/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.100.100.1/32\",\n\t\t\t\t\t\t\t\"100.119.139.79/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO: Fix per-node DstPorts visibility and exit route coverage\n\t\t//\n\t\t// T4: Tag destination includes all tagged nodes\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - Only ha-router1 and ha-router2 get filters (tag:ha nodes)\n\t\t// - DstPorts shows ALL tag:ha node IPs to each node\n\t\t// - exit-node and multi-router do NOT get filters\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - Exit nodes also get filter (0.0.0.0/0 route \"covers\" tag:ha IPs)\n\t\t// - Per-node DstPorts visibility: each node only sees its OWN IP in DstPorts\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// 1. Exit routes (0.0.0.0/0) are treated as covering all destinations\n\t\t// 2. Filter reduction logic scopes DstPorts to per-node visibility\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// 1. Exclude exit routes from tag-based filter distribution\n\t\t// 2. Show full destination set to all destination nodes (not per-node scoped)\n\t\t{\n\t\t\tname: \"T4_tag_dst_includes_all_tagged\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"tag:ha:*\"]}\n\t\t\t`),\n\t\t\t/* EXPECTED (Tailscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"exit-node\":     nil,\n\t\t\t\t\"multi-router\":  nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"ha-router1\": { DstPorts: ALL tag:ha IPs },\n\t\t\t\t\"ha-router2\": { DstPorts: ALL tag:ha IPs },\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit nodes get filters, per-node DstPorts scoped\n\t\t\t// tag:ha = ha-router1 (100.85.37.108), ha-router2 (100.119.130.32)\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t// INCORRECT: exit-node gets filter due to 0.0.0.0/0 coverage\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\t// exit-node sees ALL tag:ha IPs via exit route coverage\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.119.130.32/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.85.37.108/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::4501:82a9/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::f101:2597/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: multi-router gets filter due to 0.0.0.0/0 coverage\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\t// multi-router sees ALL tag:ha IPs via exit route coverage\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.119.130.32/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"100.85.37.108/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::4501:82a9/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::f101:2597/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: ha-router1 only sees its own IPs in DstPorts\n\t\t\t\t\"ha-router1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\t// Per-node scoped: only ha-router1's own IPs\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.85.37.108/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::f101:2597/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: ha-router2 only sees its own IPs in DstPorts\n\t\t\t\t\"ha-router2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\t// Per-node scoped: only ha-router2's own IPs\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.119.130.32/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::4501:82a9/128\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\trunRoutesCompatTests(t, users, nodes, tests)\n}\n\n// TestTailscaleRoutesCompatAutoApprover tests autoApprover behavior (Category D).\n// These tests validate automatic route approval based on tags and prefixes.\n// NOTE: AutoApprover affects route ENABLING, not filter distribution.\n// The filter tests here verify filters ASSUMING routes are enabled.\nfunc TestTailscaleRoutesCompatAutoApprover(t *testing.T) {\n\tt.Parallel()\n\n\tusers := setupRouteCompatUsers()\n\tnodes := setupRouteCompatNodes(users)\n\n\ttests := []routesCompatTest{\n\t\t// D1: Basic route auto-approval with autoApprover\n\t\t// 10.0.0.0/8 -> tag:router means routes within 10.0.0.0/8\n\t\t// advertised by nodes with tag:router are auto-approved\n\t\t{\n\t\t\tname: \"D1_basic_route_auto_approval\",\n\t\t\t// This test validates that with autoApprover configured,\n\t\t\t// routes matching the prefix/tag combination are enabled.\n\t\t\t// Filter distribution follows standard rules.\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.33.0.0/16:*\"]}\n\t\t\t`),\n\t\t\t// Assuming route is auto-approved and enabled:\n\t\t\t// Filter goes to subnet-router (route owner) + big-router (parent route)\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t// subnet-router owns 10.33.0.0/16\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// big-router owns 10.0.0.0/8 (covers 10.33.0.0/16)\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// exit-node and multi-router also get filter (0.0.0.0/0 covers)\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// D2: Nested prefix approval - autoApprover for parent covers child\n\t\t{\n\t\t\tname: \"D2_nested_prefix_approval\",\n\t\t\t// autoApprover 10.0.0.0/8 covers advertised 10.33.0.0/16\n\t\t\t// This test verifies subset prefixes are approved\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.33.0.0/16:22\"]}\n\t\t\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// D3: Exact prefix approval\n\t\t{\n\t\t\tname: \"D3_exact_prefix_approval\",\n\t\t\t// autoApprover for exactly 10.33.0.0/16 matches advertised 10.33.0.0/16\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.33.0.0/16:*\"]}\n\t\t\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// D4: Prefix not covered by autoApprover\n\t\t// 192.168.0.0/16, but node advertises 10.0.0.0/8 - NOT approved\n\t\t// Without approval, route not enabled, no filters distributed\n\t\t{\n\t\t\tname: \"D4_prefix_not_covered\",\n\t\t\t// If autoApprover is 192.168.0.0/16 but we target 10.0.0.0/8\n\t\t\t// the route would NOT be auto-approved\n\t\t\t// This tests that only matching prefixes get filters\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"192.168.1.0/24:*\"]}\n\t\t\t`),\n\t\t\t// Only HA routers own 192.168.1.0/24\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t\t// exit-node and multi-router get filter (0.0.0.0/0 covers)\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// D5: Wrong tag not approved\n\t\t// autoApprover 10.0.0.0/8 -> tag:router, but node is tag:ha\n\t\t{\n\t\t\tname: \"D5_wrong_tag_not_approved\",\n\t\t\t// HA routers have tag:ha, not tag:router\n\t\t\t// Their 192.168.1.0/24 route would not be auto-approved\n\t\t\t// by an autoApprover for tag:router\n\t\t\t// But we can still target the route in ACL if manually enabled\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:router\"], \"dst\": [\"192.168.1.0/24:*\"]}\n\t\t\t`),\n\t\t\t// tag:router sources: subnet-router, multi-router, big-router\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"subnet-router\": nil, // Source, not destination\n\t\t\t\t\"big-router\":    nil, // Source, not destination\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.100.100.1/32\",  // big-router\n\t\t\t\t\t\t\t\"100.119.139.79/32\", // subnet-router\n\t\t\t\t\t\t\t\"100.74.117.7/32\",   // multi-router\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.100.100.1/32\",\n\t\t\t\t\t\t\t\"100.119.139.79/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.100.100.1/32\",\n\t\t\t\t\t\t\t\"100.119.139.79/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.100.100.1/32\",\n\t\t\t\t\t\t\t\"100.119.139.79/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::4001:8ba0/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::6401:6401/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO: Fix wildcard DstPorts expansion to use \"*\" instead of CGNAT ranges\n\t\t//\n\t\t// D6: Exit node auto-approval - wildcard ACL with routes\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - DstPorts uses literal \"*\" for wildcard destination\n\t\t// - All nodes get filter with DstPorts: [{IP: \"*\", Ports: 0-65535}]\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - DstPorts expands to CGNAT ranges instead of using \"*\"\n\t\t// - Uses {IP: \"100.64.0.0/10\"} and {IP: \"fd7a:115c:a1e0::/48\"}\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// Headscale expands wildcard destinations to explicit IP ranges\n\t\t// instead of using the \"*\" shorthand that Tailscale uses\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// Use literal \"*\" in DstPorts for wildcard destinations\n\t\t{\n\t\t\tname: \"D6_exit_node_auto_approval\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]}\n\t\t\t`),\n\t\t\t/* EXPECTED (Tailscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// ... all other nodes same pattern with DstPorts: [{IP: \"*\"}]\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): DstPorts expanded to CGNAT ranges\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"client2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO: Fix wildcard DstPorts expansion to use \"*\" instead of CGNAT ranges\n\t\t//\n\t\t// D7: Exit auto-approval wrong tag - tag:exit to wildcard destination\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - DstPorts uses literal \"*\" for wildcard destination\n\t\t// - tag:exit (exit-node, multi-router) can access anywhere\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - DstPorts expands to CGNAT ranges instead of using \"*\"\n\t\t// - Uses {IP: \"100.64.0.0/10\"} and {IP: \"fd7a:115c:a1e0::/48\"}\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// Headscale expands wildcard destinations to explicit IP ranges\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// Use literal \"*\" in DstPorts for wildcard destinations\n\t\t{\n\t\t\tname: \"D7_exit_auto_approval_wrong_tag\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"tag:exit\"], \"dst\": [\"*:*\"]}\n\t\t\t`),\n\t\t\t/* EXPECTED (Tailscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.121.32.1/32\", \"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\", \"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// ... all other nodes same pattern with DstPorts: [{IP: \"*\"}]\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): DstPorts expanded to CGNAT ranges\n\t\t\t// tag:exit = exit-node, multi-router\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.121.32.1/32\", // exit-node\n\t\t\t\t\t\t\t\"100.74.117.7/32\", // multi-router\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"client2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.121.32.1/32\",\n\t\t\t\t\t\t\t\"100.74.117.7/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::7f01:2004/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::c401:7508/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// D8: Auto-approval enables route, but ACL still enforced\n\t\t// Route is enabled via autoApprover, but restrictive ACL limits access\n\t\t{\n\t\t\tname: \"D8_auto_approval_acl_interaction\",\n\t\t\t// Route auto-approved, but ACL only allows specific source\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"10.33.0.0/16:22\"]}\n\t\t\t`),\n\t\t\t// Only autogroup:member sources (user-owned nodes)\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil, // Source, not destination\n\t\t\t\t\"client2\":    nil, // Source, not destination\n\t\t\t\t\"user1\":      nil, // Source, not destination\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\", // client1\n\t\t\t\t\t\t\t\"100.89.42.23/32\",  // client2\n\t\t\t\t\t\t\t\"100.90.199.68/32\", // user1\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// D9: Auto-approval triggers on advertise\n\t\t// Policy exists first, then node advertises - triggers approval\n\t\t// This is a state/timing test - filter distribution is the same\n\t\t{\n\t\t\tname: \"D9_auto_approval_triggers_on_advertise\",\n\t\t\t// Same as D1 - validates consistent behavior\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.33.0.0/16:*\"]}\n\t\t\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// D10: Auto-approval retroactive\n\t\t// Node advertised first, policy added later - requires re-advertisement\n\t\t// Same filter distribution as D1 when route is enabled\n\t\t{\n\t\t\tname: \"D10_auto_approval_retroactive\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.33.0.0/16:443\"]}\n\t\t\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// D11: Overlapping auto-approvers\n\t\t// 10.0.0.0/8 -> tag:router, 10.33.0.0/16 -> tag:special\n\t\t// Both are valid for their respective tags\n\t\t{\n\t\t\tname: \"D11_overlapping_auto_approvers\",\n\t\t\t// Both big-router (10.0.0.0/8) and subnet-router (10.33.0.0/16)\n\t\t\t// can be approved by different autoApprover rules\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.0.0.0/8:80\"]}\n\t\t\t`),\n\t\t\t// Targeting 10.0.0.0/8 - only big-router exact match + exit nodes\n\t\t\t// subnet-router's 10.33.0.0/16 is WITHIN 10.0.0.0/8 so also gets filter\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.0.0.0/8\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.0.0.0/8\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.0.0.0/8\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.0.0.0/8\", Ports: tailcfg.PortRange{First: 80, Last: 80}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\trunRoutesCompatTests(t, users, nodes, tests)\n}\n\n// TestTailscaleRoutesCompatAdditionalProtocol tests additional protocol restrictions (G4-G6).\nfunc TestTailscaleRoutesCompatAdditionalProtocol(t *testing.T) {\n\tt.Parallel()\n\n\tusers := setupRouteCompatUsers()\n\tnodes := setupRouteCompatNodes(users)\n\n\ttests := []routesCompatTest{\n\t\t// G4: Protocol ICMP only\n\t\t// proto:icmp results in IPProto=[1] (ICMP only)\n\t\t// NOTE: Exit nodes still get filters due to exit route coverage issue (separate TODO)\n\t\t{\n\t\t\tname: \"G4_protocol_icmp_subnet\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.33.0.0/16:*\"], \"proto\": \"icmp\"}\n\t\t\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// Exit nodes also get filters (exit route coverage issue)\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// G5: Protocol TCP only\n\t\t{\n\t\t\tname: \"G5_protocol_tcp_only\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.33.0.0/16:22\"], \"proto\": \"tcp\"}\n\t\t\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// TCP only\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// G6: Protocol UDP only\n\t\t{\n\t\t\tname: \"G6_protocol_udp_only\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.33.0.0/16:53\"], \"proto\": \"udp\"}\n\t\t\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 53, Last: 53}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// UDP only\n\t\t\t\t\t\tIPProto: []int{ProtocolUDP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 53, Last: 53}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolUDP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 53, Last: 53}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolUDP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRange{First: 53, Last: 53}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolUDP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\trunRoutesCompatTests(t, users, nodes, tests)\n}\n\n// TestTailscaleRoutesCompatAdditionalEdgeCases tests additional edge cases (H5, H6, H8, H11).\nfunc TestTailscaleRoutesCompatAdditionalEdgeCases(t *testing.T) {\n\tt.Parallel()\n\n\tusers := setupRouteCompatUsers()\n\tnodes := setupRouteCompatNodes(users)\n\n\ttests := []routesCompatTest{\n\t\t// H5: Subnet overlaps CGNAT - cannot be enabled\n\t\t// Route 100.64.0.0/24 overlaps with Tailscale CGNAT range\n\t\t{\n\t\t\tname: \"H5_subnet_overlaps_cgnat\",\n\t\t\t// A route overlapping CGNAT cannot be enabled\n\t\t\t// This test verifies no filters are distributed for such routes\n\t\t\t// Using a normal subnet route as baseline\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"100.64.0.0/24:*\"]}\n\t\t\t`),\n\t\t\t// TODO: Tailscale blocks routes overlapping CGNAT\n\t\t\t// Headscale behavior may differ\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t\t// Exit nodes might still get filter since 0.0.0.0/0 covers everything\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.64.0.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.64.0.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// H6: Loopback routes not distributed\n\t\t// Route 127.0.0.1/32 can be advertised but NOT in peer AllowedIPs\n\t\t{\n\t\t\tname: \"H6_loopback_routes_not_distributed\",\n\t\t\t// Loopback routes are not practical but test edge case handling\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"127.0.0.1/32:*\"]}\n\t\t\t`),\n\t\t\t// TODO: Tailscale allows advertising loopback but doesn't distribute\n\t\t\t// Verify Headscale behavior\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t\t// Exit nodes might get filter\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"127.0.0.1/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"127.0.0.1/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// H8: CGNAT overlap blocked\n\t\t// TODO: Fix CGNAT overlap route handling\n\t\t// TAILSCALE BEHAVIOR: Routes overlapping CGNAT (100.64.0.0/10) are blocked.\n\t\t//   Only exit nodes get filters for destinations in the blocked range.\n\t\t// HEADSCALE BEHAVIOR: big-router gets filter because its IP (100.100.100.1)\n\t\t//   is within the destination range 100.100.0.0/16.\n\t\t// ROOT CAUSE: Headscale checks if node IPs are in destination range,\n\t\t//   not just if advertised routes cover the destination.\n\t\t// FIX REQUIRED: May need to exclude nodes whose IPs are in destination range.\n\t\t{\n\t\t\tname: \"H8_cgnat_overlap_blocked\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"100.100.0.0/16:*\"]}\n\t\t\t`),\n\t\t\t/*\n\t\t\t\tEXPECTED (Tailscale):\n\t\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\t\"client1\":       nil,\n\t\t\t\t\t\"client2\":       nil,\n\t\t\t\t\t\"user1\":         nil,\n\t\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\t\"big-router\":    nil, // No filter expected\n\t\t\t\t\t\"exit-node\":     { ... },\n\t\t\t\t\t\"multi-router\":  { ... },\n\t\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): big-router gets filter (its IP 100.100.100.1 is in 100.100.0.0/16)\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t// big-router gets filter because its IP (100.100.100.1) is in destination range\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.100.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.100.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.100.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// H11: IPv6 small prefix /128\n\t\t{\n\t\t\tname: \"H11_ipv6_small_prefix\",\n\t\t\t// /128 is a single IPv6 address - smallest possible prefix\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"fd00::1/128:443\"]}\n\t\t\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil,\n\t\t\t\t\"client2\":       nil,\n\t\t\t\t\"user1\":         nil,\n\t\t\t\t\"ha-router1\":    nil,\n\t\t\t\t\"ha-router2\":    nil,\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t\t// Exit nodes with ::/0 cover all IPv6\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"fd00::1/128\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"fd00::1/128\", Ports: tailcfg.PortRange{First: 443, Last: 443}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\trunRoutesCompatTests(t, users, nodes, tests)\n}\n\n// TestTailscaleRoutesCompatAdditionalIPv6 tests additional IPv6 scenarios (I2, I3, I6).\nfunc TestTailscaleRoutesCompatAdditionalIPv6(t *testing.T) {\n\tt.Parallel()\n\n\tusers := setupRouteCompatUsers()\n\tnodes := setupRouteCompatNodes(users)\n\n\ttests := []routesCompatTest{\n\t\t// TODO: Fix wildcard DstPorts format\n\t\t//\n\t\t// I2: IPv6 exit route ::/0 - verifies ::/0 NOT in SrcIPs\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - DstPorts uses {IP: \"*\"} for wildcard destinations\n\t\t// - ::/0 does NOT appear in SrcIPs (exit routes excluded)\n\t\t// - Filter distributed to all nodes\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - DstPorts expands to CGNAT ranges instead of \"*\"\n\t\t// - ::/0 correctly excluded from SrcIPs\n\t\t// - Filter distributed to all nodes (same as Tailscale)\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// Headscale expands \"*\" to CGNAT ranges instead of using \"*\".\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// Use {IP: \"*\"} for wildcard destinations.\n\t\t{\n\t\t\tname: \"I2_ipv6_exit_route\",\n\t\t\t// ::/0 is the IPv6 exit route (like 0.0.0.0/0 for IPv4)\n\t\t\t// Should NOT appear in SrcIPs\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]}\n\t\t\t`),\n\t\t\t/* EXPECTED (Tailscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"*\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// ... same for all nodes with {IP: \"*\"}\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): DstPorts expanded to CGNAT ranges\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"client2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"user1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs:   wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: wildcardDstPorts,\n\t\t\t\t\t\tIPProto:  []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO: Fix exit route coverage and per-node DstPorts filtering\n\t\t//\n\t\t// I3: IPv6 in wildcard SrcIPs\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - SrcIPs includes fd7a:115c:a1e0::/48 (IPv6 Tailscale range) - CORRECT\n\t\t// - Only tag:router nodes receive filters (subnet-router, multi-router, big-router)\n\t\t// - Exit-node (tag:exit only) does NOT get filter\n\t\t// - Each tag:router node sees ALL tag:router IPs in DstPorts\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - SrcIPs correctly includes fd7a:115c:a1e0::/48 (IPv6 range works)\n\t\t// - Exit-node incorrectly gets filter (exit route covers all tag:router IPs)\n\t\t// - Each node only sees its OWN IPs in DstPorts (not all tag:router IPs)\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// 1. Exit route coverage: exit-node's 0.0.0.0/0 + ::/0 covers tag:router IPs\n\t\t// 2. Per-node DstPorts: Headscale only includes self IPs in DstPorts\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// 1. Exclude exit nodes from tag-based destinations\n\t\t// 2. Include all matching tag IPs in DstPorts for each destination node\n\t\t{\n\t\t\tname: \"I3_ipv6_in_wildcard_srcips\",\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"tag:router:22\"]}\n\t\t\t`),\n\t\t\t/* EXPECTED (Tailscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\": nil,\n\t\t\t\t\"client2\": nil,\n\t\t\t\t\"user1\":   nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"exit-node\": nil, // tag:exit, NOT tag:router\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// ALL tag:router IPs\n\t\t\t\t\t\t\t{IP: \"100.100.100.1/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.119.139.79/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.74.117.7/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::4001:8ba0/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::6401:6401/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::c401:7508/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// ... multi-router and big-router with same ALL tag:router IPs\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): exit-node gets filter, each node sees only self IPs\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t// INCORRECT: subnet-router only sees its own IPs\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// Only self IPs (missing big-router and multi-router IPs)\n\t\t\t\t\t\t\t{IP: \"100.119.139.79/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::4001:8ba0/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// multi-router has tag:router AND tag:exit, gets all tag:router IPs\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// All tag:router IPs (multi-router sees all)\n\t\t\t\t\t\t\t{IP: \"100.100.100.1/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.119.139.79/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.74.117.7/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::4001:8ba0/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::6401:6401/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::c401:7508/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: big-router only sees its own IPs\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t// Only self IPs (missing subnet-router and multi-router IPs)\n\t\t\t\t\t\t\t{IP: \"100.100.100.1/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::6401:6401/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: Exit-node gets filter (should be nil - tag:exit not tag:router)\n\t\t\t\t// Due to exit route coverage, it sees all tag:router IPs\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"100.100.100.1/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.119.139.79/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"100.74.117.7/32\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::4001:8ba0/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::6401:6401/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t\t{IP: \"fd7a:115c:a1e0::c401:7508/128\", Ports: tailcfg.PortRange{First: 22, Last: 22}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO: Fix exit route coverage for subnet destinations\n\t\t//\n\t\t// I6: Dual-stack node - targeting both IPv4 and IPv6 subnets\n\t\t//\n\t\t// TAILSCALE BEHAVIOR:\n\t\t// - Only subnet-router (10.33.0.0/16) and big-router (10.0.0.0/8) get IPv4 filter\n\t\t// - No node has fd00:1::/64 route, so no node gets IPv6 filter\n\t\t// - Exit nodes do NOT get filters for specific subnet destinations\n\t\t// - Multiple rules with same SrcIPs kept as separate rules\n\t\t//\n\t\t// HEADSCALE BEHAVIOR:\n\t\t// - Exit nodes get filters (exit route covers both subnets)\n\t\t// - Rules with same SrcIPs and IPProto are MERGED into single rule\n\t\t//   with combined DstPorts\n\t\t// - No node owns fd00:1::/64, but exit nodes cover it via ::/0\n\t\t//\n\t\t// ROOT CAUSE:\n\t\t// 1. Exit route coverage: 0.0.0.0/0 and ::/0 cover all subnets\n\t\t// 2. Filter rule merging: Headscale merges rules with identical SrcIPs/IPProto\n\t\t//\n\t\t// FIX REQUIRED:\n\t\t// Exclude exit nodes from specific subnet destinations.\n\t\t{\n\t\t\tname: \"I6_dual_stack_node\",\n\t\t\t// Target both IPv4 and IPv6 subnets\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.33.0.0/16:*\"]},\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"fd00:1::/64:*\"]}\n\t\t\t`),\n\t\t\t/* EXPECTED (Tailscale):\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"exit-node\":  nil, // Exit nodes shouldn't get subnet filters\n\t\t\t\t\"multi-router\": nil, // Only has 172.16.0.0/24 + exit routes\n\t\t\t\t// subnet-router owns 10.33.0.0/16\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// big-router covers 10.33.0.0/16 via 10.0.0.0/8\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t*/\n\t\t\t// ACTUAL (Headscale): Exit nodes cover both, rules merged\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t// subnet-router owns 10.33.0.0/16\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// big-router covers 10.33.0.0/16 via 10.0.0.0/8\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: Exit-node gets MERGED filter covering both subnets\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\t// Both destinations merged into single rule\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd00:1::/64\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// INCORRECT: multi-router gets MERGED filter covering both subnets\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\t// Both destinations merged into single rule\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.0.0/16\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t{IP: \"fd00:1::/64\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\trunRoutesCompatTests(t, users, nodes, tests)\n}\n\n// TestTailscaleRoutesCompatAdditionalOverlapping tests additional overlapping route scenarios (O8, O9).\nfunc TestTailscaleRoutesCompatAdditionalOverlapping(t *testing.T) {\n\tt.Parallel()\n\n\tusers := setupRouteCompatUsers()\n\tnodes := setupRouteCompatNodes(users)\n\n\ttests := []routesCompatTest{\n\t\t// O8: Same node overlapping routes\n\t\t// Node with 10.0.0.0/8, 10.33.0.0/16, 10.33.1.0/24 - NOT merged\n\t\t{\n\t\t\tname: \"O8_same_node_overlapping_routes\",\n\t\t\t// If a single node advertises multiple overlapping routes,\n\t\t\t// they should all appear separately, not merged\n\t\t\t// big-router has 10.0.0.0/8\n\t\t\t// Let's target a specific child prefix\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"10.33.1.0/24:*\"]}\n\t\t\t`),\n\t\t\t// big-router (10.0.0.0/8) and subnet-router (10.33.0.0/16) both cover\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":    nil,\n\t\t\t\t\"client2\":    nil,\n\t\t\t\t\"user1\":      nil,\n\t\t\t\t\"ha-router1\": nil,\n\t\t\t\t\"ha-router2\": nil,\n\t\t\t\t\"subnet-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"big-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: wildcardSrcIPs,\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"10.33.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// O9: Different nodes same route\n\t\t// Two nodes with 192.168.1.0/24 - only first is primary\n\t\t{\n\t\t\tname: \"O9_different_nodes_same_route\",\n\t\t\t// ha-router1 and ha-router2 both have 192.168.1.0/24\n\t\t\t// Both should receive filters, but only one is primary\n\t\t\tpolicy: makeRoutesPolicy(`\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"192.168.1.0/24:*\"]}\n\t\t\t`),\n\t\t\twantFilters: map[string][]tailcfg.FilterRule{\n\t\t\t\t\"client1\":       nil, // Source\n\t\t\t\t\"client2\":       nil, // Source\n\t\t\t\t\"user1\":         nil, // Source\n\t\t\t\t\"subnet-router\": nil,\n\t\t\t\t\"big-router\":    nil,\n\t\t\t\t// Both HA routers get filter despite sharing route\n\t\t\t\t\"ha-router1\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"ha-router2\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// Exit nodes also cover\n\t\t\t\t\"exit-node\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"multi-router\": {\n\t\t\t\t\t{\n\t\t\t\t\t\tSrcIPs: []string{\n\t\t\t\t\t\t\t\"100.116.73.38/32\",\n\t\t\t\t\t\t\t\"100.89.42.23/32\",\n\t\t\t\t\t\t\t\"100.90.199.68/32\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::a801:4949/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::d01:2a2e/128\",\n\t\t\t\t\t\t\t\"fd7a:115c:a1e0::2d01:c747/128\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t\t{IP: \"192.168.1.0/24\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIPProto: []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\trunRoutesCompatTests(t, users, nodes, tests)\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/tailscale_ssh_data_compat_test.go",
    "content": "// This file is \"generated\" by Claude.\n// It contains a data-driven test that reads SSH-*.json test files captured\n// from Tailscale SaaS. Each file contains:\n//   - The SSH section of the policy\n//   - The expected SSHPolicy rules for each of 5 test nodes\n//\n// The test loads each JSON file, constructs a full policy from the SSH section,\n// applies it through headscale's SSH policy compilation, and compares the output\n// against Tailscale's actual behavior.\n//\n// Tests that are known to fail due to unimplemented features or known\n// differences are skipped with a TODO comment explaining the root cause.\n// As headscale's SSH implementation improves, tests should be removed\n// from the skip list.\n//\n// Test data source: testdata/ssh_results/SSH-*.json\n// Captured from: Tailscale SaaS API + tailscale debug localapi\n\npackage v2\n\nimport (\n\t\"encoding/json\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/google/go-cmp/cmp/cmpopts\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gorm.io/gorm\"\n\t\"tailscale.com/tailcfg\"\n)\n\n// sshTestFile represents the JSON structure of a captured SSH test file.\ntype sshTestFile struct {\n\tTestID     string                    `json:\"test_id\"`\n\tPolicyFile string                    `json:\"policy_file\"`\n\tSSHSection json.RawMessage           `json:\"ssh_section\"`\n\tNodes      map[string]sshNodeCapture `json:\"nodes\"`\n}\n\n// sshNodeCapture represents the expected SSH rules for a single node.\ntype sshNodeCapture struct {\n\tRules json.RawMessage `json:\"rules\"`\n}\n\n// setupSSHDataCompatUsers returns the 3 test users for SSH data-driven\n// compatibility tests. The user configuration matches the Tailscale test\n// environment with email domains preserved for localpart matching:\n//   - kratail2tid@example.com (converted from @passkey)\n//   - kristoffer@dalby.cc (kept as-is — different domain for localpart exclusion)\n//   - monitorpasskeykradalby@example.com (converted from @passkey)\nfunc setupSSHDataCompatUsers() types.Users {\n\treturn types.Users{\n\t\t{\n\t\t\tModel: gorm.Model{ID: 1},\n\t\t\tName:  \"kratail2tid\",\n\t\t\tEmail: \"kratail2tid@example.com\",\n\t\t},\n\t\t{\n\t\t\tModel: gorm.Model{ID: 2},\n\t\t\tName:  \"kristoffer\",\n\t\t\tEmail: \"kristoffer@dalby.cc\",\n\t\t},\n\t\t{\n\t\t\tModel: gorm.Model{ID: 3},\n\t\t\tName:  \"monitorpasskeykradalby\",\n\t\t\tEmail: \"monitorpasskeykradalby@example.com\",\n\t\t},\n\t}\n}\n\n// setupSSHDataCompatNodes returns the 5 test nodes for SSH data-driven\n// compatibility tests. Node GivenNames match the keys in the JSON files:\n//   - user1 (owned by kratail2tid)\n//   - user-kris (owned by kristoffer)\n//   - user-mon (owned by monitorpasskeykradalby)\n//   - tagged-server (tag:server)\n//   - tagged-prod (tag:prod)\nfunc setupSSHDataCompatNodes(users types.Users) types.Nodes {\n\treturn types.Nodes{\n\t\t&types.Node{\n\t\t\tID:        1,\n\t\t\tGivenName: \"user1\",\n\t\t\tUser:      &users[0],\n\t\t\tUserID:    &users[0].ID,\n\t\t\tIPv4:      ptrAddr(\"100.90.199.68\"),\n\t\t\tIPv6:      ptrAddr(\"fd7a:115c:a1e0::2d01:c747\"),\n\t\t\tHostinfo:  &tailcfg.Hostinfo{},\n\t\t},\n\t\t&types.Node{\n\t\t\tID:        2,\n\t\t\tGivenName: \"user-kris\",\n\t\t\tUser:      &users[1],\n\t\t\tUserID:    &users[1].ID,\n\t\t\tIPv4:      ptrAddr(\"100.110.121.96\"),\n\t\t\tIPv6:      ptrAddr(\"fd7a:115c:a1e0::1737:7960\"),\n\t\t\tHostinfo:  &tailcfg.Hostinfo{},\n\t\t},\n\t\t&types.Node{\n\t\t\tID:        3,\n\t\t\tGivenName: \"user-mon\",\n\t\t\tUser:      &users[2],\n\t\t\tUserID:    &users[2].ID,\n\t\t\tIPv4:      ptrAddr(\"100.103.90.82\"),\n\t\t\tIPv6:      ptrAddr(\"fd7a:115c:a1e0::9e37:5a52\"),\n\t\t\tHostinfo:  &tailcfg.Hostinfo{},\n\t\t},\n\t\t&types.Node{\n\t\t\tID:        4,\n\t\t\tGivenName: \"tagged-server\",\n\t\t\tIPv4:      ptrAddr(\"100.108.74.26\"),\n\t\t\tIPv6:      ptrAddr(\"fd7a:115c:a1e0::b901:4a87\"),\n\t\t\tTags:      []string{\"tag:server\"},\n\t\t\tHostinfo:  &tailcfg.Hostinfo{},\n\t\t},\n\t\t&types.Node{\n\t\t\tID:        5,\n\t\t\tGivenName: \"tagged-prod\",\n\t\t\tIPv4:      ptrAddr(\"100.103.8.15\"),\n\t\t\tIPv6:      ptrAddr(\"fd7a:115c:a1e0::5b37:80f\"),\n\t\t\tTags:      []string{\"tag:prod\"},\n\t\t\tHostinfo:  &tailcfg.Hostinfo{},\n\t\t},\n\t}\n}\n\n// convertSSHPolicyEmails converts Tailscale SaaS email domains to\n// headscale-compatible format in the raw policy JSON.\n//\n// Tailscale uses provider-specific email formats:\n//   - kratail2tid@passkey (passkey auth)\n//   - kristoffer@dalby.cc (email auth — kept as-is)\n//   - monitorpasskeykradalby@passkey (passkey auth)\n//\n// The @passkey domain is converted to @example.com. The @dalby.cc domain\n// is kept as-is to preserve localpart matching semantics (kristoffer should\n// NOT match localpart:*@example.com, just as it doesn't match\n// localpart:*@passkey in Tailscale SaaS).\nfunc convertSSHPolicyEmails(s string) string {\n\ts = strings.ReplaceAll(s, \"@passkey\", \"@example.com\")\n\n\treturn s\n}\n\n// constructSSHFullPolicy builds a complete headscale policy from the\n// ssh_section captured from Tailscale SaaS.\n//\n// The base policy includes:\n//   - groups matching the Tailscale test environment\n//   - tagOwners for tag:server and tag:prod\n//   - A permissive ACL allowing all traffic (matches the grants wildcard\n//     in the original Tailscale policy)\n//   - The SSH section from the test file\nfunc constructSSHFullPolicy(sshSection json.RawMessage) string {\n\t// Base policy template with groups, tagOwners, and ACLs\n\t// User references match the converted email addresses.\n\tconst basePolicyPrefix = `{\n\t\"groups\": {\n\t\t\"group:admins\": [\"kratail2tid@example.com\"],\n\t\t\"group:developers\": [\"kristoffer@dalby.cc\", \"kratail2tid@example.com\"],\n\t\t\"group:empty\": []\n\t},\n\t\"tagOwners\": {\n\t\t\"tag:server\": [\"kratail2tid@example.com\"],\n\t\t\"tag:prod\": [\"kratail2tid@example.com\"]\n\t},\n\t\"acls\": [{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]}]`\n\n\t// Handle null or empty SSH section\n\tif len(sshSection) == 0 || string(sshSection) == \"null\" {\n\t\t// No SSH section at all (like SSH-E4)\n\t\treturn basePolicyPrefix + \"\\n}\"\n\t}\n\n\tsshStr := string(sshSection)\n\n\t// Convert Tailscale email domains\n\tsshStr = convertSSHPolicyEmails(sshStr)\n\n\treturn basePolicyPrefix + `,\n\t\"ssh\": ` + sshStr + \"\\n}\"\n}\n\n// loadSSHTestFile loads and parses a single SSH test JSON file.\nfunc loadSSHTestFile(t *testing.T, path string) sshTestFile {\n\tt.Helper()\n\n\tcontent, err := os.ReadFile(path)\n\trequire.NoError(t, err, \"failed to read test file %s\", path)\n\n\tvar tf sshTestFile\n\n\terr = json.Unmarshal(content, &tf)\n\trequire.NoError(t, err, \"failed to parse test file %s\", path)\n\n\treturn tf\n}\n\n// sshSkipReasons documents why each skipped test fails and what needs to be\n// fixed. Tests are grouped by root cause to identify high-impact changes.\n//\n// 37 of 39 tests are expected to pass.\nvar sshSkipReasons = map[string]string{\n\t// user:*@domain source alias not yet implemented.\n\t// These tests use \"src\": [\"user:*@passkey\"] which requires UserWildcard\n\t// alias type support. Will be added in a follow-up PR that implements\n\t// user:*@domain across all contexts (ACLs, grants, tagOwners, autoApprovers).\n\t\"SSH-B5\":  \"user:*@domain source alias not yet implemented\",\n\t\"SSH-D10\": \"user:*@domain source alias not yet implemented\",\n}\n\n// TestSSHDataCompat is a data-driven test that loads all SSH-*.json test files\n// captured from Tailscale SaaS and compares headscale's SSH policy compilation\n// against the real Tailscale behavior.\n//\n// Each JSON file contains:\n//   - The SSH section of the policy\n//   - Expected SSH rules per node (5 nodes)\n//\n// The test constructs a full headscale policy from the SSH section, converts\n// Tailscale user email formats to headscale format, and runs the policy\n// through unmarshalPolicy and compileSSHPolicy.\nfunc TestSSHDataCompat(t *testing.T) {\n\tt.Parallel()\n\n\tfiles, err := filepath.Glob(\n\t\tfilepath.Join(\"testdata\", \"ssh_results\", \"SSH-*.json\"),\n\t)\n\trequire.NoError(t, err, \"failed to glob test files\")\n\trequire.NotEmpty(\n\t\tt,\n\t\tfiles,\n\t\t\"no SSH-*.json test files found in testdata/ssh_results/\",\n\t)\n\n\tt.Logf(\"Loaded %d SSH test files\", len(files))\n\n\tusers := setupSSHDataCompatUsers()\n\tnodes := setupSSHDataCompatNodes(users)\n\n\tfor _, file := range files {\n\t\ttf := loadSSHTestFile(t, file)\n\n\t\tt.Run(tf.TestID, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\t// Check if this test is in the skip list\n\t\t\tif reason, ok := sshSkipReasons[tf.TestID]; ok {\n\t\t\t\tt.Skipf(\n\t\t\t\t\t\"TODO: %s — see sshSkipReasons comments for details\",\n\t\t\t\t\treason,\n\t\t\t\t)\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Construct full policy from SSH section\n\t\t\tpolicyJSON := constructSSHFullPolicy(tf.SSHSection)\n\n\t\t\tpol, err := unmarshalPolicy([]byte(policyJSON))\n\t\t\trequire.NoError(\n\t\t\t\tt,\n\t\t\t\terr,\n\t\t\t\t\"%s: policy should parse successfully\\nPolicy:\\n%s\",\n\t\t\t\ttf.TestID,\n\t\t\t\tpolicyJSON,\n\t\t\t)\n\n\t\t\tfor nodeName, capture := range tf.Nodes {\n\t\t\t\tt.Run(nodeName, func(t *testing.T) {\n\t\t\t\t\tnode := findNodeByGivenName(nodes, nodeName)\n\t\t\t\t\trequire.NotNilf(\n\t\t\t\t\t\tt,\n\t\t\t\t\t\tnode,\n\t\t\t\t\t\t\"node %s not found in test setup\",\n\t\t\t\t\t\tnodeName,\n\t\t\t\t\t)\n\n\t\t\t\t\t// Compile headscale SSH policy for this node\n\t\t\t\t\tgotSSH, err := pol.compileSSHPolicy(\n\t\t\t\t\t\t\"unused-server-url\",\n\t\t\t\t\t\tusers,\n\t\t\t\t\t\tnode.View(),\n\t\t\t\t\t\tnodes.ViewSlice(),\n\t\t\t\t\t)\n\t\t\t\t\trequire.NoError(\n\t\t\t\t\t\tt,\n\t\t\t\t\t\terr,\n\t\t\t\t\t\t\"%s/%s: failed to compile SSH policy\",\n\t\t\t\t\t\ttf.TestID,\n\t\t\t\t\t\tnodeName,\n\t\t\t\t\t)\n\n\t\t\t\t\t// Parse expected rules from JSON capture\n\t\t\t\t\tvar wantRules []*tailcfg.SSHRule\n\t\t\t\t\tif len(capture.Rules) > 0 &&\n\t\t\t\t\t\tstring(capture.Rules) != \"null\" {\n\t\t\t\t\t\terr = json.Unmarshal(capture.Rules, &wantRules)\n\t\t\t\t\t\trequire.NoError(\n\t\t\t\t\t\t\tt,\n\t\t\t\t\t\t\terr,\n\t\t\t\t\t\t\t\"%s/%s: failed to unmarshal expected rules\",\n\t\t\t\t\t\t\ttf.TestID,\n\t\t\t\t\t\t\tnodeName,\n\t\t\t\t\t\t)\n\t\t\t\t\t}\n\n\t\t\t\t\t// Build expected SSHPolicy from the rules\n\t\t\t\t\tvar wantSSH *tailcfg.SSHPolicy\n\t\t\t\t\tif len(wantRules) > 0 {\n\t\t\t\t\t\twantSSH = &tailcfg.SSHPolicy{Rules: wantRules}\n\t\t\t\t\t}\n\n\t\t\t\t\t// Normalize: treat empty-rules SSHPolicy as nil\n\t\t\t\t\tif gotSSH != nil && len(gotSSH.Rules) == 0 {\n\t\t\t\t\t\tgotSSH = nil\n\t\t\t\t\t}\n\n\t\t\t\t\t// Compare headscale output against Tailscale expected.\n\t\t\t\t\t// EquateEmpty treats nil and empty slices as equal.\n\t\t\t\t\t// Sort principals within rules (order doesn't matter).\n\t\t\t\t\t// Do NOT sort rules — order matters (first-match-wins).\n\t\t\t\t\topts := cmp.Options{\n\t\t\t\t\t\tcmpopts.SortSlices(func(a, b *tailcfg.SSHPrincipal) bool {\n\t\t\t\t\t\t\treturn a.NodeIP < b.NodeIP\n\t\t\t\t\t\t}),\n\t\t\t\t\t\tcmpopts.EquateEmpty(),\n\t\t\t\t\t}\n\t\t\t\t\tif diff := cmp.Diff(wantSSH, gotSSH, opts...); diff != \"\" {\n\t\t\t\t\t\tt.Errorf(\n\t\t\t\t\t\t\t\"%s/%s: SSH policy mismatch (-tailscale +headscale):\\n%s\",\n\t\t\t\t\t\t\ttf.TestID,\n\t\t\t\t\t\t\tnodeName,\n\t\t\t\t\t\t\tdiff,\n\t\t\t\t\t\t)\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-A1.json",
    "content": "{\n  \"test_id\": \"SSH-A1\",\n  \"policy_file\": \"ssh_policies/ssh_a1.json\",\n  \"ssh_section\": [{ \"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"autogroup:self\"], \"users\": [\"root\"] }],\n  \"nodes\": {\n    \"user1\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-kris\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-mon\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-server\": { \"rules\": [] },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-A2.json",
    "content": "{\n  \"test_id\": \"SSH-A2\",\n  \"policy_file\": \"ssh_policies/ssh_a2.json\",\n  \"ssh_section\": [\n    { \"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"autogroup:self\"], \"users\": [\"autogroup:nonroot\"] }\n  ],\n  \"nodes\": {\n    \"user1\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-kris\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-mon\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-server\": { \"rules\": [] },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-A3.json",
    "content": "{\n  \"test_id\": \"SSH-A3\",\n  \"policy_file\": \"ssh_policies/ssh_a3.json\",\n  \"ssh_section\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\"autogroup:member\"],\n      \"dst\": [\"autogroup:self\"],\n      \"users\": [\"root\", \"autogroup:nonroot\"]\n    }\n  ],\n  \"nodes\": {\n    \"user1\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-kris\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-mon\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-server\": { \"rules\": [] },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-A4.json",
    "content": "{\n  \"test_id\": \"SSH-A4\",\n  \"policy_file\": \"ssh_policies/ssh_a4.json\",\n  \"ssh_section\": [{ \"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"tag:server\"], \"users\": [\"ubuntu\"] }],\n  \"nodes\": {\n    \"user1\": { \"rules\": [] },\n    \"user-kris\": { \"rules\": [] },\n    \"user-mon\": { \"rules\": [] },\n    \"tagged-server\": {\n      \"rules\": [\n        {\n          \"principals\": [\n            { \"nodeIP\": \"100.103.90.82\" },\n            { \"nodeIP\": \"100.110.121.96\" },\n            { \"nodeIP\": \"100.90.199.68\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }\n          ],\n          \"sshUsers\": { \"root\": \"\", \"ubuntu\": \"ubuntu\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-A5.json",
    "content": "{\n  \"test_id\": \"SSH-A5\",\n  \"policy_file\": \"ssh_policies/ssh_a5.json\",\n  \"ssh_section\": [\n    { \"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"tag:server\"], \"users\": [\"root\", \"ubuntu\"] }\n  ],\n  \"nodes\": {\n    \"user1\": { \"rules\": [] },\n    \"user-kris\": { \"rules\": [] },\n    \"user-mon\": { \"rules\": [] },\n    \"tagged-server\": {\n      \"rules\": [\n        {\n          \"principals\": [\n            { \"nodeIP\": \"100.103.90.82\" },\n            { \"nodeIP\": \"100.110.121.96\" },\n            { \"nodeIP\": \"100.90.199.68\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }\n          ],\n          \"sshUsers\": { \"root\": \"root\", \"ubuntu\": \"ubuntu\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-A6.json",
    "content": "{\n  \"test_id\": \"SSH-A6\",\n  \"policy_file\": \"ssh_policies/ssh_a6.json\",\n  \"ssh_section\": [\n    {\n      \"action\": \"check\",\n      \"src\": [\n        \"autogroup:member\"\n      ],\n      \"dst\": [\n        \"autogroup:self\"\n      ],\n      \"users\": [\n        \"root\"\n      ]\n    }\n  ],\n  \"nodes\": {\n    \"user1\": {\n      \"rules\": [\n        {\n          \"principals\": [\n            {\n              \"nodeIP\": \"100.90.199.68\"\n            },\n            {\n              \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\"\n            }\n          ],\n          \"sshUsers\": {\n            \"root\": \"root\"\n          },\n          \"action\": {\n            \"holdAndDelegate\": \"unused-server-url/machine/ssh/action/from/$SRC_NODE_ID/to/$DST_NODE_ID?ssh_user=$SSH_USER&local_user=$LOCAL_USER\",\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true,\n            \"sessionDuration\": 43200000000000\n          }\n        }\n      ]\n    },\n    \"user-kris\": {\n      \"rules\": [\n        {\n          \"principals\": [\n            {\n              \"nodeIP\": \"100.110.121.96\"\n            },\n            {\n              \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\"\n            }\n          ],\n          \"sshUsers\": {\n            \"root\": \"root\"\n          },\n          \"action\": {\n            \"holdAndDelegate\": \"unused-server-url/machine/ssh/action/from/$SRC_NODE_ID/to/$DST_NODE_ID?ssh_user=$SSH_USER&local_user=$LOCAL_USER\",\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true,\n            \"sessionDuration\": 43200000000000\n          }\n        }\n      ]\n    },\n    \"user-mon\": {\n      \"rules\": [\n        {\n          \"principals\": [\n            {\n              \"nodeIP\": \"100.103.90.82\"\n            },\n            {\n              \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\"\n            }\n          ],\n          \"sshUsers\": {\n            \"root\": \"root\"\n          },\n          \"action\": {\n            \"holdAndDelegate\": \"unused-server-url/machine/ssh/action/from/$SRC_NODE_ID/to/$DST_NODE_ID?ssh_user=$SSH_USER&local_user=$LOCAL_USER\",\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true,\n            \"sessionDuration\": 43200000000000\n          }\n        }\n      ]\n    },\n    \"tagged-server\": {\n      \"rules\": []\n    },\n    \"tagged-prod\": {\n      \"rules\": []\n    }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-A7.json",
    "content": "{\n  \"test_id\": \"SSH-A7\",\n  \"policy_file\": \"ssh_policies/ssh_a7.json\",\n  \"ssh_section\": [\n    {\n      \"action\": \"check\",\n      \"src\": [\n        \"autogroup:member\"\n      ],\n      \"dst\": [\n        \"autogroup:self\"\n      ],\n      \"users\": [\n        \"root\"\n      ],\n      \"checkPeriod\": \"1h\"\n    }\n  ],\n  \"nodes\": {\n    \"user1\": {\n      \"rules\": [\n        {\n          \"principals\": [\n            {\n              \"nodeIP\": \"100.90.199.68\"\n            },\n            {\n              \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\"\n            }\n          ],\n          \"sshUsers\": {\n            \"root\": \"root\"\n          },\n          \"action\": {\n            \"holdAndDelegate\": \"unused-server-url/machine/ssh/action/from/$SRC_NODE_ID/to/$DST_NODE_ID?ssh_user=$SSH_USER&local_user=$LOCAL_USER\",\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true,\n            \"sessionDuration\": 3600000000000\n          }\n        }\n      ]\n    },\n    \"user-kris\": {\n      \"rules\": [\n        {\n          \"principals\": [\n            {\n              \"nodeIP\": \"100.110.121.96\"\n            },\n            {\n              \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\"\n            }\n          ],\n          \"sshUsers\": {\n            \"root\": \"root\"\n          },\n          \"action\": {\n            \"holdAndDelegate\": \"unused-server-url/machine/ssh/action/from/$SRC_NODE_ID/to/$DST_NODE_ID?ssh_user=$SSH_USER&local_user=$LOCAL_USER\",\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true,\n            \"sessionDuration\": 3600000000000\n          }\n        }\n      ]\n    },\n    \"user-mon\": {\n      \"rules\": [\n        {\n          \"principals\": [\n            {\n              \"nodeIP\": \"100.103.90.82\"\n            },\n            {\n              \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\"\n            }\n          ],\n          \"sshUsers\": {\n            \"root\": \"root\"\n          },\n          \"action\": {\n            \"holdAndDelegate\": \"unused-server-url/machine/ssh/action/from/$SRC_NODE_ID/to/$DST_NODE_ID?ssh_user=$SSH_USER&local_user=$LOCAL_USER\",\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true,\n            \"sessionDuration\": 3600000000000\n          }\n        }\n      ]\n    },\n    \"tagged-server\": {\n      \"rules\": []\n    },\n    \"tagged-prod\": {\n      \"rules\": []\n    }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-A8.json",
    "content": "{\n  \"test_id\": \"SSH-A8\",\n  \"policy_file\": \"ssh_policies/ssh_a8.json\",\n  \"ssh_section\": [\n    {\n      \"action\": \"check\",\n      \"src\": [\n        \"autogroup:member\"\n      ],\n      \"dst\": [\n        \"autogroup:self\"\n      ],\n      \"users\": [\n        \"root\"\n      ],\n      \"checkPeriod\": \"always\"\n    }\n  ],\n  \"nodes\": {\n    \"user1\": {\n      \"rules\": [\n        {\n          \"principals\": [\n            {\n              \"nodeIP\": \"100.90.199.68\"\n            },\n            {\n              \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\"\n            }\n          ],\n          \"sshUsers\": {\n            \"root\": \"root\"\n          },\n          \"action\": {\n            \"holdAndDelegate\": \"unused-server-url/machine/ssh/action/from/$SRC_NODE_ID/to/$DST_NODE_ID?ssh_user=$SSH_USER&local_user=$LOCAL_USER\",\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true,\n            \"sessionDuration\": 0\n          }\n        }\n      ]\n    },\n    \"user-kris\": {\n      \"rules\": [\n        {\n          \"principals\": [\n            {\n              \"nodeIP\": \"100.110.121.96\"\n            },\n            {\n              \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\"\n            }\n          ],\n          \"sshUsers\": {\n            \"root\": \"root\"\n          },\n          \"action\": {\n            \"holdAndDelegate\": \"unused-server-url/machine/ssh/action/from/$SRC_NODE_ID/to/$DST_NODE_ID?ssh_user=$SSH_USER&local_user=$LOCAL_USER\",\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true,\n            \"sessionDuration\": 0\n          }\n        }\n      ]\n    },\n    \"user-mon\": {\n      \"rules\": [\n        {\n          \"principals\": [\n            {\n              \"nodeIP\": \"100.103.90.82\"\n            },\n            {\n              \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\"\n            }\n          ],\n          \"sshUsers\": {\n            \"root\": \"root\"\n          },\n          \"action\": {\n            \"holdAndDelegate\": \"unused-server-url/machine/ssh/action/from/$SRC_NODE_ID/to/$DST_NODE_ID?ssh_user=$SSH_USER&local_user=$LOCAL_USER\",\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true,\n            \"sessionDuration\": 0\n          }\n        }\n      ]\n    },\n    \"tagged-server\": {\n      \"rules\": []\n    },\n    \"tagged-prod\": {\n      \"rules\": []\n    }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-B1.json",
    "content": "{\n  \"test_id\": \"SSH-B1\",\n  \"policy_file\": \"ssh_policies/ssh_b1.json\",\n  \"ssh_section\": [{ \"action\": \"accept\", \"src\": [\"kristoffer@dalby.cc\"], \"dst\": [\"tag:server\"], \"users\": [\"root\"] }],\n  \"nodes\": {\n    \"user1\": { \"rules\": [] },\n    \"user-kris\": { \"rules\": [] },\n    \"user-mon\": { \"rules\": [] },\n    \"tagged-server\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-B2.json",
    "content": "{\n  \"test_id\": \"SSH-B2\",\n  \"policy_file\": \"ssh_policies/ssh_b2.json\",\n  \"ssh_section\": [{ \"action\": \"accept\", \"src\": [\"group:developers\"], \"dst\": [\"tag:server\"], \"users\": [\"root\"] }],\n  \"nodes\": {\n    \"user1\": { \"rules\": [] },\n    \"user-kris\": { \"rules\": [] },\n    \"user-mon\": { \"rules\": [] },\n    \"tagged-server\": {\n      \"rules\": [\n        {\n          \"principals\": [\n            { \"nodeIP\": \"100.110.121.96\" },\n            { \"nodeIP\": \"100.90.199.68\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }\n          ],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-B3.json",
    "content": "{\n  \"test_id\": \"SSH-B3\",\n  \"policy_file\": \"ssh_policies/ssh_b3.json\",\n  \"ssh_section\": [{ \"action\": \"accept\", \"src\": [\"tag:prod\"], \"dst\": [\"tag:server\"], \"users\": [\"root\"] }],\n  \"nodes\": {\n    \"user1\": { \"rules\": [] },\n    \"user-kris\": { \"rules\": [] },\n    \"user-mon\": { \"rules\": [] },\n    \"tagged-server\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.8.15\" }, { \"nodeIP\": \"fd7a:115c:a1e0::5b37:80f\" }],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-B5.json",
    "content": "{\n  \"test_id\": \"SSH-B5\",\n  \"policy_file\": \"ssh_policies/ssh_b5.json\",\n  \"ssh_section\": [{ \"action\": \"accept\", \"src\": [\"user:*@passkey\"], \"dst\": [\"tag:server\"], \"users\": [\"root\"] }],\n  \"nodes\": {\n    \"user1\": { \"rules\": [] },\n    \"user-kris\": { \"rules\": [] },\n    \"user-mon\": { \"rules\": [] },\n    \"tagged-server\": {\n      \"rules\": [\n        {\n          \"principals\": [\n            { \"nodeIP\": \"100.103.90.82\" },\n            { \"nodeIP\": \"100.90.199.68\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }\n          ],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-B6.json",
    "content": "{\n  \"test_id\": \"SSH-B6\",\n  \"policy_file\": \"ssh_policies/ssh_b6.json\",\n  \"ssh_section\": [{ \"action\": \"accept\", \"src\": [\"autogroup:tagged\"], \"dst\": [\"tag:server\"], \"users\": [\"root\"] }],\n  \"nodes\": {\n    \"user1\": { \"rules\": [] },\n    \"user-kris\": { \"rules\": [] },\n    \"user-mon\": { \"rules\": [] },\n    \"tagged-server\": {\n      \"rules\": [\n        {\n          \"principals\": [\n            { \"nodeIP\": \"100.103.8.15\" },\n            { \"nodeIP\": \"100.108.74.26\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::5b37:80f\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::b901:4a87\" }\n          ],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-C1.json",
    "content": "{\n  \"test_id\": \"SSH-C1\",\n  \"policy_file\": \"ssh_policies/ssh_c1.json\",\n  \"ssh_section\": [{ \"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"autogroup:self\"], \"users\": [\"root\"] }],\n  \"nodes\": {\n    \"user1\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-kris\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-mon\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-server\": { \"rules\": [] },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-C2.json",
    "content": "{\n  \"test_id\": \"SSH-C2\",\n  \"policy_file\": \"ssh_policies/ssh_c2.json\",\n  \"ssh_section\": [{ \"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"tag:server\"], \"users\": [\"root\"] }],\n  \"nodes\": {\n    \"user1\": { \"rules\": [] },\n    \"user-kris\": { \"rules\": [] },\n    \"user-mon\": { \"rules\": [] },\n    \"tagged-server\": {\n      \"rules\": [\n        {\n          \"principals\": [\n            { \"nodeIP\": \"100.103.90.82\" },\n            { \"nodeIP\": \"100.110.121.96\" },\n            { \"nodeIP\": \"100.90.199.68\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }\n          ],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-C3.json",
    "content": "{\n  \"test_id\": \"SSH-C3\",\n  \"policy_file\": \"ssh_policies/ssh_c3.json\",\n  \"ssh_section\": [\n    { \"action\": \"accept\", \"src\": [\"kristoffer@dalby.cc\"], \"dst\": [\"kristoffer@dalby.cc\"], \"users\": [\"root\"] }\n  ],\n  \"nodes\": {\n    \"user1\": { \"rules\": [] },\n    \"user-kris\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-mon\": { \"rules\": [] },\n    \"tagged-server\": { \"rules\": [] },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-C4.json",
    "content": "{\n  \"test_id\": \"SSH-C4\",\n  \"policy_file\": \"ssh_policies/ssh_c4.json\",\n  \"ssh_section\": [\n    { \"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"tag:server\", \"tag:prod\"], \"users\": [\"root\"] }\n  ],\n  \"nodes\": {\n    \"user1\": { \"rules\": [] },\n    \"user-kris\": { \"rules\": [] },\n    \"user-mon\": { \"rules\": [] },\n    \"tagged-server\": {\n      \"rules\": [\n        {\n          \"principals\": [\n            { \"nodeIP\": \"100.103.90.82\" },\n            { \"nodeIP\": \"100.110.121.96\" },\n            { \"nodeIP\": \"100.90.199.68\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }\n          ],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-prod\": {\n      \"rules\": [\n        {\n          \"principals\": [\n            { \"nodeIP\": \"100.103.90.82\" },\n            { \"nodeIP\": \"100.110.121.96\" },\n            { \"nodeIP\": \"100.90.199.68\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }\n          ],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-D10.json",
    "content": "{\n  \"test_id\": \"SSH-D10\",\n  \"policy_file\": \"ssh_policies/ssh_d10.json\",\n  \"ssh_section\": [\n    { \"action\": \"accept\", \"src\": [\"user:*@passkey\"], \"dst\": [\"tag:server\"], \"users\": [\"localpart:*@passkey\"] }\n  ],\n  \"nodes\": {\n    \"user1\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"kratail2tid\": \"kratail2tid\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-kris\": { \"rules\": [] },\n    \"user-mon\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"monitorpasskeykradalby\": \"monitorpasskeykradalby\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-server\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"kratail2tid\": \"kratail2tid\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"monitorpasskeykradalby\": \"monitorpasskeykradalby\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-D11.json",
    "content": "{\n  \"test_id\": \"SSH-D11\",\n  \"policy_file\": \"ssh_policies/ssh_d11.json\",\n  \"ssh_section\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\"autogroup:member\"],\n      \"dst\": [\"tag:server\"],\n      \"users\": [\"localpart:*@passkey\", \"ubuntu\"]\n    }\n  ],\n  \"nodes\": {\n    \"user1\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"root\": \"\", \"ubuntu\": \"ubuntu\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"kratail2tid\": \"kratail2tid\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-kris\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"root\": \"\", \"ubuntu\": \"ubuntu\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-mon\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"root\": \"\", \"ubuntu\": \"ubuntu\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"monitorpasskeykradalby\": \"monitorpasskeykradalby\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-server\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"root\": \"\", \"ubuntu\": \"ubuntu\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"kratail2tid\": \"kratail2tid\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"root\": \"\", \"ubuntu\": \"ubuntu\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"root\": \"\", \"ubuntu\": \"ubuntu\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"monitorpasskeykradalby\": \"monitorpasskeykradalby\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-D12.json",
    "content": "{\n  \"test_id\": \"SSH-D12\",\n  \"policy_file\": \"ssh_policies/ssh_d12.json\",\n  \"ssh_section\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\"autogroup:member\"],\n      \"dst\": [\"tag:server\"],\n      \"users\": [\"localpart:*@passkey\", \"ubuntu\"]\n    }\n  ],\n  \"nodes\": {\n    \"user1\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"root\": \"\", \"ubuntu\": \"ubuntu\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"kratail2tid\": \"kratail2tid\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-kris\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"root\": \"\", \"ubuntu\": \"ubuntu\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-mon\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"root\": \"\", \"ubuntu\": \"ubuntu\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"monitorpasskeykradalby\": \"monitorpasskeykradalby\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-server\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"root\": \"\", \"ubuntu\": \"ubuntu\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"kratail2tid\": \"kratail2tid\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"root\": \"\", \"ubuntu\": \"ubuntu\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"root\": \"\", \"ubuntu\": \"ubuntu\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"monitorpasskeykradalby\": \"monitorpasskeykradalby\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-D2.json",
    "content": "{\n  \"test_id\": \"SSH-D2\",\n  \"policy_file\": \"ssh_policies/ssh_d2.json\",\n  \"ssh_section\": [\n    { \"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"tag:server\"], \"users\": [\"localpart:*@passkey\"] }\n  ],\n  \"nodes\": {\n    \"user1\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"kratail2tid\": \"kratail2tid\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-kris\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-mon\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"monitorpasskeykradalby\": \"monitorpasskeykradalby\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-server\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"kratail2tid\": \"kratail2tid\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"monitorpasskeykradalby\": \"monitorpasskeykradalby\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-D3.json",
    "content": "{\n  \"test_id\": \"SSH-D3\",\n  \"policy_file\": \"ssh_policies/ssh_d3.json\",\n  \"ssh_section\": [\n    { \"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"tag:server\"], \"users\": [\"localpart:*@passkey\", \"root\"] }\n  ],\n  \"nodes\": {\n    \"user1\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"kratail2tid\": \"kratail2tid\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-kris\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-mon\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"monitorpasskeykradalby\": \"monitorpasskeykradalby\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-server\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"kratail2tid\": \"kratail2tid\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"monitorpasskeykradalby\": \"monitorpasskeykradalby\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-D4.json",
    "content": "{\n  \"test_id\": \"SSH-D4\",\n  \"policy_file\": \"ssh_policies/ssh_d4.json\",\n  \"ssh_section\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\"autogroup:member\"],\n      \"dst\": [\"tag:server\"],\n      \"users\": [\"localpart:*@passkey\", \"autogroup:nonroot\"]\n    }\n  ],\n  \"nodes\": {\n    \"user1\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"kratail2tid\": \"kratail2tid\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-kris\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-mon\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"monitorpasskeykradalby\": \"monitorpasskeykradalby\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-server\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"kratail2tid\": \"kratail2tid\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"monitorpasskeykradalby\": \"monitorpasskeykradalby\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-D5.json",
    "content": "{\n  \"test_id\": \"SSH-D5\",\n  \"policy_file\": \"ssh_policies/ssh_d5.json\",\n  \"ssh_section\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\"autogroup:member\"],\n      \"dst\": [\"tag:server\"],\n      \"users\": [\"localpart:*@passkey\", \"root\", \"autogroup:nonroot\"]\n    }\n  ],\n  \"nodes\": {\n    \"user1\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"kratail2tid\": \"kratail2tid\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-kris\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-mon\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"monitorpasskeykradalby\": \"monitorpasskeykradalby\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-server\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"kratail2tid\": \"kratail2tid\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"monitorpasskeykradalby\": \"monitorpasskeykradalby\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-D6.json",
    "content": "{\n  \"test_id\": \"SSH-D6\",\n  \"policy_file\": \"ssh_policies/ssh_d6.json\",\n  \"ssh_section\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\"autogroup:member\"],\n      \"dst\": [\"tag:server\"],\n      \"users\": [\"localpart:*@passkey\", \"autogroup:nonroot\"]\n    }\n  ],\n  \"nodes\": {\n    \"user1\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"kratail2tid\": \"kratail2tid\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-kris\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-mon\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"monitorpasskeykradalby\": \"monitorpasskeykradalby\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-server\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"kratail2tid\": \"kratail2tid\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"monitorpasskeykradalby\": \"monitorpasskeykradalby\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-D7.json",
    "content": "{\n  \"test_id\": \"SSH-D7\",\n  \"policy_file\": \"ssh_policies/ssh_d7.json\",\n  \"ssh_section\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\"autogroup:member\"],\n      \"dst\": [\"tag:server\"],\n      \"users\": [\"localpart:*@passkey\", \"root\", \"autogroup:nonroot\"]\n    }\n  ],\n  \"nodes\": {\n    \"user1\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"kratail2tid\": \"kratail2tid\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-kris\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-mon\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"monitorpasskeykradalby\": \"monitorpasskeykradalby\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-server\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"kratail2tid\": \"kratail2tid\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"monitorpasskeykradalby\": \"monitorpasskeykradalby\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-D8.json",
    "content": "{\n  \"test_id\": \"SSH-D8\",\n  \"policy_file\": \"ssh_policies/ssh_d8.json\",\n  \"ssh_section\": [\n    { \"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"autogroup:self\"], \"users\": [\"localpart:*@passkey\"] }\n  ],\n  \"nodes\": {\n    \"user1\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"kratail2tid\": \"kratail2tid\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-kris\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-mon\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"monitorpasskeykradalby\": \"monitorpasskeykradalby\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-server\": { \"rules\": [] },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-D9.json",
    "content": "{\n  \"test_id\": \"SSH-D9\",\n  \"policy_file\": \"ssh_policies/ssh_d9.json\",\n  \"ssh_section\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\"autogroup:member\"],\n      \"dst\": [\"autogroup:self\"],\n      \"users\": [\"localpart:*@passkey\", \"root\"]\n    }\n  ],\n  \"nodes\": {\n    \"user1\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"kratail2tid\": \"kratail2tid\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-kris\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-mon\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"monitorpasskeykradalby\": \"monitorpasskeykradalby\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-server\": { \"rules\": [] },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-E3.json",
    "content": "{\n  \"test_id\": \"SSH-E3\",\n  \"policy_file\": \"ssh_policies/ssh_e3.json\",\n  \"ssh_section\": [],\n  \"nodes\": {\n    \"user1\": { \"rules\": [] },\n    \"user-kris\": { \"rules\": [] },\n    \"user-mon\": { \"rules\": [] },\n    \"tagged-server\": { \"rules\": [] },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-E4.json",
    "content": "{\n  \"test_id\": \"SSH-E4\",\n  \"policy_file\": \"ssh_policies/ssh_e4.json\",\n  \"ssh_section\": null,\n  \"nodes\": {\n    \"user1\": { \"rules\": [] },\n    \"user-kris\": { \"rules\": [] },\n    \"user-mon\": { \"rules\": [] },\n    \"tagged-server\": { \"rules\": [] },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-E5.json",
    "content": "{\n  \"test_id\": \"SSH-E5\",\n  \"policy_file\": \"ssh_policies/ssh_e5.json\",\n  \"ssh_section\": [{ \"action\": \"accept\", \"src\": [\"tag:prod\"], \"dst\": [\"tag:server\"], \"users\": [\"localpart:*@passkey\"] }],\n  \"nodes\": {\n    \"user1\": { \"rules\": [] },\n    \"user-kris\": { \"rules\": [] },\n    \"user-mon\": { \"rules\": [] },\n    \"tagged-server\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.8.15\" }, { \"nodeIP\": \"fd7a:115c:a1e0::5b37:80f\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-prod\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.8.15\" }, { \"nodeIP\": \"fd7a:115c:a1e0::5b37:80f\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-E6.json",
    "content": "{\n  \"test_id\": \"SSH-E6\",\n  \"policy_file\": \"ssh_policies/ssh_e6.json\",\n  \"ssh_section\": [\n    {\n      \"action\": \"check\",\n      \"src\": [\n        \"autogroup:member\"\n      ],\n      \"dst\": [\n        \"tag:server\"\n      ],\n      \"users\": [\n        \"localpart:*@passkey\"\n      ],\n      \"checkPeriod\": \"1h\"\n    }\n  ],\n  \"nodes\": {\n    \"user1\": {\n      \"rules\": [\n        {\n          \"principals\": [\n            {\n              \"nodeIP\": \"100.90.199.68\"\n            },\n            {\n              \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\"\n            }\n          ],\n          \"sshUsers\": {\n            \"root\": \"\"\n          },\n          \"action\": {\n            \"holdAndDelegate\": \"unused-server-url/machine/ssh/action/from/$SRC_NODE_ID/to/$DST_NODE_ID?ssh_user=$SSH_USER&local_user=$LOCAL_USER\",\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true,\n            \"sessionDuration\": 3600000000000\n          }\n        },\n        {\n          \"principals\": [\n            {\n              \"nodeIP\": \"100.90.199.68\"\n            },\n            {\n              \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\"\n            }\n          ],\n          \"sshUsers\": {\n            \"kratail2tid\": \"kratail2tid\"\n          },\n          \"action\": {\n            \"holdAndDelegate\": \"unused-server-url/machine/ssh/action/from/$SRC_NODE_ID/to/$DST_NODE_ID?ssh_user=$SSH_USER&local_user=$LOCAL_USER\",\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true,\n            \"sessionDuration\": 3600000000000\n          }\n        }\n      ]\n    },\n    \"user-kris\": {\n      \"rules\": [\n        {\n          \"principals\": [\n            {\n              \"nodeIP\": \"100.110.121.96\"\n            },\n            {\n              \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\"\n            }\n          ],\n          \"sshUsers\": {\n            \"root\": \"\"\n          },\n          \"action\": {\n            \"holdAndDelegate\": \"unused-server-url/machine/ssh/action/from/$SRC_NODE_ID/to/$DST_NODE_ID?ssh_user=$SSH_USER&local_user=$LOCAL_USER\",\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true,\n            \"sessionDuration\": 3600000000000\n          }\n        }\n      ]\n    },\n    \"user-mon\": {\n      \"rules\": [\n        {\n          \"principals\": [\n            {\n              \"nodeIP\": \"100.103.90.82\"\n            },\n            {\n              \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\"\n            }\n          ],\n          \"sshUsers\": {\n            \"root\": \"\"\n          },\n          \"action\": {\n            \"holdAndDelegate\": \"unused-server-url/machine/ssh/action/from/$SRC_NODE_ID/to/$DST_NODE_ID?ssh_user=$SSH_USER&local_user=$LOCAL_USER\",\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true,\n            \"sessionDuration\": 3600000000000\n          }\n        },\n        {\n          \"principals\": [\n            {\n              \"nodeIP\": \"100.103.90.82\"\n            },\n            {\n              \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\"\n            }\n          ],\n          \"sshUsers\": {\n            \"monitorpasskeykradalby\": \"monitorpasskeykradalby\"\n          },\n          \"action\": {\n            \"holdAndDelegate\": \"unused-server-url/machine/ssh/action/from/$SRC_NODE_ID/to/$DST_NODE_ID?ssh_user=$SSH_USER&local_user=$LOCAL_USER\",\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true,\n            \"sessionDuration\": 3600000000000\n          }\n        }\n      ]\n    },\n    \"tagged-server\": {\n      \"rules\": [\n        {\n          \"principals\": [\n            {\n              \"nodeIP\": \"100.90.199.68\"\n            },\n            {\n              \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\"\n            }\n          ],\n          \"sshUsers\": {\n            \"root\": \"\"\n          },\n          \"action\": {\n            \"holdAndDelegate\": \"unused-server-url/machine/ssh/action/from/$SRC_NODE_ID/to/$DST_NODE_ID?ssh_user=$SSH_USER&local_user=$LOCAL_USER\",\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true,\n            \"sessionDuration\": 3600000000000\n          }\n        },\n        {\n          \"principals\": [\n            {\n              \"nodeIP\": \"100.90.199.68\"\n            },\n            {\n              \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\"\n            }\n          ],\n          \"sshUsers\": {\n            \"kratail2tid\": \"kratail2tid\"\n          },\n          \"action\": {\n            \"holdAndDelegate\": \"unused-server-url/machine/ssh/action/from/$SRC_NODE_ID/to/$DST_NODE_ID?ssh_user=$SSH_USER&local_user=$LOCAL_USER\",\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true,\n            \"sessionDuration\": 3600000000000\n          }\n        },\n        {\n          \"principals\": [\n            {\n              \"nodeIP\": \"100.110.121.96\"\n            },\n            {\n              \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\"\n            }\n          ],\n          \"sshUsers\": {\n            \"root\": \"\"\n          },\n          \"action\": {\n            \"holdAndDelegate\": \"unused-server-url/machine/ssh/action/from/$SRC_NODE_ID/to/$DST_NODE_ID?ssh_user=$SSH_USER&local_user=$LOCAL_USER\",\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true,\n            \"sessionDuration\": 3600000000000\n          }\n        },\n        {\n          \"principals\": [\n            {\n              \"nodeIP\": \"100.103.90.82\"\n            },\n            {\n              \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\"\n            }\n          ],\n          \"sshUsers\": {\n            \"root\": \"\"\n          },\n          \"action\": {\n            \"holdAndDelegate\": \"unused-server-url/machine/ssh/action/from/$SRC_NODE_ID/to/$DST_NODE_ID?ssh_user=$SSH_USER&local_user=$LOCAL_USER\",\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true,\n            \"sessionDuration\": 3600000000000\n          }\n        },\n        {\n          \"principals\": [\n            {\n              \"nodeIP\": \"100.103.90.82\"\n            },\n            {\n              \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\"\n            }\n          ],\n          \"sshUsers\": {\n            \"monitorpasskeykradalby\": \"monitorpasskeykradalby\"\n          },\n          \"action\": {\n            \"holdAndDelegate\": \"unused-server-url/machine/ssh/action/from/$SRC_NODE_ID/to/$DST_NODE_ID?ssh_user=$SSH_USER&local_user=$LOCAL_USER\",\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true,\n            \"sessionDuration\": 3600000000000\n          }\n        }\n      ]\n    },\n    \"tagged-prod\": {\n      \"rules\": []\n    }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-F1.json",
    "content": "{\n  \"test_id\": \"SSH-F1\",\n  \"policy_file\": \"ssh_policies/ssh_f1.json\",\n  \"ssh_section\": [\n    { \"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"autogroup:self\"], \"users\": [\"root\"] },\n    { \"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"tag:server\"], \"users\": [\"autogroup:nonroot\"] }\n  ],\n  \"nodes\": {\n    \"user1\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-kris\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-mon\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-server\": {\n      \"rules\": [\n        {\n          \"principals\": [\n            { \"nodeIP\": \"100.103.90.82\" },\n            { \"nodeIP\": \"100.110.121.96\" },\n            { \"nodeIP\": \"100.90.199.68\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }\n          ],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-F2.json",
    "content": "{\n  \"test_id\": \"SSH-F2\",\n  \"policy_file\": \"ssh_policies/ssh_f2.json\",\n  \"ssh_section\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"autogroup:member\"\n      ],\n      \"dst\": [\n        \"tag:server\"\n      ],\n      \"users\": [\n        \"root\"\n      ]\n    },\n    {\n      \"action\": \"check\",\n      \"src\": [\n        \"autogroup:member\"\n      ],\n      \"dst\": [\n        \"tag:server\"\n      ],\n      \"users\": [\n        \"root\"\n      ]\n    }\n  ],\n  \"nodes\": {\n    \"user1\": {\n      \"rules\": []\n    },\n    \"user-kris\": {\n      \"rules\": []\n    },\n    \"user-mon\": {\n      \"rules\": []\n    },\n    \"tagged-server\": {\n      \"rules\": [\n        {\n          \"principals\": [\n            {\n              \"nodeIP\": \"100.103.90.82\"\n            },\n            {\n              \"nodeIP\": \"100.110.121.96\"\n            },\n            {\n              \"nodeIP\": \"100.90.199.68\"\n            },\n            {\n              \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\"\n            },\n            {\n              \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\"\n            },\n            {\n              \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\"\n            }\n          ],\n          \"sshUsers\": {\n            \"root\": \"root\"\n          },\n          \"action\": {\n            \"holdAndDelegate\": \"unused-server-url/machine/ssh/action/from/$SRC_NODE_ID/to/$DST_NODE_ID?ssh_user=$SSH_USER&local_user=$LOCAL_USER\",\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true,\n            \"sessionDuration\": 43200000000000\n          }\n        },\n        {\n          \"principals\": [\n            {\n              \"nodeIP\": \"100.103.90.82\"\n            },\n            {\n              \"nodeIP\": \"100.110.121.96\"\n            },\n            {\n              \"nodeIP\": \"100.90.199.68\"\n            },\n            {\n              \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\"\n            },\n            {\n              \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\"\n            },\n            {\n              \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\"\n            }\n          ],\n          \"sshUsers\": {\n            \"root\": \"root\"\n          },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-prod\": {\n      \"rules\": []\n    }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-F3.json",
    "content": "{\n  \"test_id\": \"SSH-F3\",\n  \"policy_file\": \"ssh_policies/ssh_f3.json\",\n  \"ssh_section\": [\n    { \"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"tag:server\"], \"users\": [\"localpart:*@passkey\"] },\n    { \"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"tag:server\"], \"users\": [\"root\"] }\n  ],\n  \"nodes\": {\n    \"user1\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"kratail2tid\": \"kratail2tid\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-kris\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-mon\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"monitorpasskeykradalby\": \"monitorpasskeykradalby\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-server\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"kratail2tid\": \"kratail2tid\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"monitorpasskeykradalby\": \"monitorpasskeykradalby\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [\n            { \"nodeIP\": \"100.103.90.82\" },\n            { \"nodeIP\": \"100.110.121.96\" },\n            { \"nodeIP\": \"100.90.199.68\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }\n          ],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-F4.json",
    "content": "{\n  \"test_id\": \"SSH-F4\",\n  \"policy_file\": \"ssh_policies/ssh_f4.json\",\n  \"ssh_section\": [\n    { \"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"tag:server\"], \"users\": [\"localpart:*@passkey\"] },\n    { \"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"tag:server\"], \"users\": [\"autogroup:nonroot\"] }\n  ],\n  \"nodes\": {\n    \"user1\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"kratail2tid\": \"kratail2tid\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-kris\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-mon\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"monitorpasskeykradalby\": \"monitorpasskeykradalby\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-server\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"kratail2tid\": \"kratail2tid\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"monitorpasskeykradalby\": \"monitorpasskeykradalby\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [\n            { \"nodeIP\": \"100.103.90.82\" },\n            { \"nodeIP\": \"100.110.121.96\" },\n            { \"nodeIP\": \"100.90.199.68\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }\n          ],\n          \"sshUsers\": { \"*\": \"=\", \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-F5.json",
    "content": "{\n  \"test_id\": \"SSH-F5\",\n  \"policy_file\": \"ssh_policies/ssh_f5.json\",\n  \"ssh_section\": [\n    { \"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"autogroup:self\"], \"users\": [\"localpart:*@passkey\"] },\n    { \"action\": \"accept\", \"src\": [\"autogroup:member\"], \"dst\": [\"tag:server\"], \"users\": [\"localpart:*@passkey\"] }\n  ],\n  \"nodes\": {\n    \"user1\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"kratail2tid\": \"kratail2tid\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"kratail2tid\": \"kratail2tid\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-kris\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"user-mon\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"monitorpasskeykradalby\": \"monitorpasskeykradalby\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"monitorpasskeykradalby\": \"monitorpasskeykradalby\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-server\": {\n      \"rules\": [\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.90.199.68\" }, { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" }],\n          \"sshUsers\": { \"kratail2tid\": \"kratail2tid\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.110.121.96\" }, { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"root\": \"\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        },\n        {\n          \"principals\": [{ \"nodeIP\": \"100.103.90.82\" }, { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }],\n          \"sshUsers\": { \"monitorpasskeykradalby\": \"monitorpasskeykradalby\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          }\n        }\n      ]\n    },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-G1.json",
    "content": "{\n  \"test_id\": \"SSH-G1\",\n  \"policy_file\": \"ssh_policies/ssh_g1.json\",\n  \"ssh_section\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\"autogroup:member\"],\n      \"dst\": [\"tag:server\"],\n      \"users\": [\"root\"],\n      \"acceptEnv\": [\"GIT_EDITOR\", \"TERM\"]\n    }\n  ],\n  \"nodes\": {\n    \"user1\": { \"rules\": [] },\n    \"user-kris\": { \"rules\": [] },\n    \"user-mon\": { \"rules\": [] },\n    \"tagged-server\": {\n      \"rules\": [\n        {\n          \"principals\": [\n            { \"nodeIP\": \"100.103.90.82\" },\n            { \"nodeIP\": \"100.110.121.96\" },\n            { \"nodeIP\": \"100.90.199.68\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }\n          ],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          },\n          \"acceptEnv\": [\"GIT_EDITOR\", \"TERM\"]\n        }\n      ]\n    },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/testdata/ssh_results/SSH-G2.json",
    "content": "{\n  \"test_id\": \"SSH-G2\",\n  \"policy_file\": \"ssh_policies/ssh_g2.json\",\n  \"ssh_section\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\"autogroup:member\"],\n      \"dst\": [\"tag:server\"],\n      \"users\": [\"root\"],\n      \"acceptEnv\": [\"GIT_*\", \"CUSTOM_VAR_?\"]\n    }\n  ],\n  \"nodes\": {\n    \"user1\": { \"rules\": [] },\n    \"user-kris\": { \"rules\": [] },\n    \"user-mon\": { \"rules\": [] },\n    \"tagged-server\": {\n      \"rules\": [\n        {\n          \"principals\": [\n            { \"nodeIP\": \"100.103.90.82\" },\n            { \"nodeIP\": \"100.110.121.96\" },\n            { \"nodeIP\": \"100.90.199.68\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::1737:7960\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::2d01:c747\" },\n            { \"nodeIP\": \"fd7a:115c:a1e0::9e37:5a52\" }\n          ],\n          \"sshUsers\": { \"root\": \"root\" },\n          \"action\": {\n            \"accept\": true,\n            \"allowAgentForwarding\": true,\n            \"allowLocalPortForwarding\": true,\n            \"allowRemotePortForwarding\": true\n          },\n          \"acceptEnv\": [\"GIT_*\", \"CUSTOM_VAR_?\"]\n        }\n      ]\n    },\n    \"tagged-prod\": { \"rules\": [] }\n  }\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/types.go",
    "content": "package v2\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net/netip\"\n\t\"slices\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/go-json-experiment/json\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/prometheus/common/model\"\n\t\"github.com/tailscale/hujson\"\n\t\"go4.org/netipx\"\n\t\"tailscale.com/net/tsaddr\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/views\"\n\t\"tailscale.com/util/multierr\"\n\t\"tailscale.com/util/slicesx\"\n)\n\n// Global JSON options for consistent parsing across all struct unmarshaling.\nvar policyJSONOpts = []json.Options{\n\tjson.DefaultOptionsV2(),\n\tjson.MatchCaseInsensitiveNames(true),\n\tjson.RejectUnknownMembers(true),\n}\n\nconst Wildcard = Asterix(0)\n\nvar ErrAutogroupSelfRequiresPerNodeResolution = errors.New(\"autogroup:self requires per-node resolution and cannot be resolved in this context\")\n\nvar ErrCircularReference = errors.New(\"circular reference detected\")\n\nvar ErrUndefinedTagReference = errors.New(\"references undefined tag\")\n\n// SSH validation errors.\nvar (\n\tErrSSHTagSourceToUserDest             = errors.New(\"tags in SSH source cannot access user-owned devices\")\n\tErrSSHUserDestRequiresSameUser        = errors.New(\"user destination requires source to contain only that same user\")\n\tErrSSHAutogroupSelfRequiresUserSource = errors.New(\"autogroup:self destination requires source to contain only users or groups, not tags or autogroup:tagged\")\n\tErrSSHTagSourceToAutogroupMember      = errors.New(\"tags in SSH source cannot access autogroup:member (user-owned devices)\")\n\tErrSSHWildcardDestination             = errors.New(\"wildcard (*) is not supported as SSH destination\")\n\tErrSSHCheckPeriodBelowMin             = errors.New(\"checkPeriod below minimum of 1 minute\")\n\tErrSSHCheckPeriodAboveMax             = errors.New(\"checkPeriod above maximum of 168 hours (1 week)\")\n\tErrSSHCheckPeriodOnNonCheck           = errors.New(\"checkPeriod is only valid with action \\\"check\\\"\")\n\tErrInvalidLocalpart                   = errors.New(\"invalid localpart format, must be localpart:*@<domain>\")\n)\n\n// SSH check period constants per Tailscale docs:\n// https://tailscale.com/kb/1193/tailscale-ssh\nconst (\n\tSSHCheckPeriodDefault = 12 * time.Hour\n\tSSHCheckPeriodMin     = time.Minute\n\tSSHCheckPeriodMax     = 168 * time.Hour\n)\n\n// ACL validation errors.\nvar (\n\tErrACLAutogroupSelfInvalidSource = errors.New(\"autogroup:self destination requires sources to be users, groups, or autogroup:member only\")\n)\n\n// Policy validation errors.\nvar (\n\tErrUnknownAliasType            = errors.New(\"unknown alias type\")\n\tErrUnknownAutoApprover         = errors.New(\"unknown auto approver type\")\n\tErrUnknownOwnerType            = errors.New(\"unknown owner type\")\n\tErrInvalidUsername             = errors.New(\"username must contain @\")\n\tErrUserNotFound                = errors.New(\"user not found\")\n\tErrMultipleUsersFound          = errors.New(\"multiple users found\")\n\tErrInvalidGroupFormat          = errors.New(\"group must start with 'group:'\")\n\tErrInvalidTagFormat            = errors.New(\"tag must start with 'tag:'\")\n\tErrInvalidHostname             = errors.New(\"invalid hostname\")\n\tErrHostResolve                 = errors.New(\"error resolving host\")\n\tErrInvalidPrefix               = errors.New(\"invalid prefix\")\n\tErrInvalidAutogroup            = errors.New(\"invalid autogroup\")\n\tErrUnknownAutogroup            = errors.New(\"unknown autogroup\")\n\tErrHostportMissingColon        = errors.New(\"hostport must contain a colon\")\n\tErrTypeNotSupported            = errors.New(\"type not supported\")\n\tErrInvalidAlias                = errors.New(\"invalid alias format\")\n\tErrInvalidAutoApprover         = errors.New(\"invalid auto approver format\")\n\tErrInvalidOwner                = errors.New(\"invalid owner format\")\n\tErrGroupNotDefined             = errors.New(\"group not defined in policy\")\n\tErrInvalidGroupMember          = errors.New(\"invalid group member type\")\n\tErrGroupValueNotArray          = errors.New(\"group value must be an array of users\")\n\tErrNestedGroups                = errors.New(\"nested groups are not allowed\")\n\tErrInvalidHostIP               = errors.New(\"hostname contains invalid IP address\")\n\tErrTagNotDefined               = errors.New(\"tag not defined in policy\")\n\tErrAutoApproverNotAlias        = errors.New(\"auto approver is not an alias\")\n\tErrInvalidACLAction            = errors.New(\"invalid ACL action\")\n\tErrInvalidSSHAction            = errors.New(\"invalid SSH action\")\n\tErrInvalidProtocolNumber       = errors.New(\"invalid protocol number\")\n\tErrProtocolLeadingZero         = errors.New(\"leading 0 not permitted in protocol number\")\n\tErrProtocolOutOfRange          = errors.New(\"protocol number out of range (0-255)\")\n\tErrAutogroupNotSupported       = errors.New(\"autogroup not supported in headscale\")\n\tErrAutogroupInternetSrc        = errors.New(\"autogroup:internet can only be used in ACL destinations\")\n\tErrAutogroupSelfSrc            = errors.New(\"autogroup:self can only be used in ACL destinations\")\n\tErrAutogroupNotSupportedACLSrc = errors.New(\"autogroup not supported for ACL sources\")\n\tErrAutogroupNotSupportedACLDst = errors.New(\"autogroup not supported for ACL destinations\")\n\tErrAutogroupNotSupportedSSHSrc = errors.New(\"autogroup not supported for SSH sources\")\n\tErrAutogroupNotSupportedSSHDst = errors.New(\"autogroup not supported for SSH destinations\")\n\tErrAutogroupNotSupportedSSHUsr = errors.New(\"autogroup not supported for SSH user\")\n\tErrHostNotDefined              = errors.New(\"host not defined in policy\")\n\tErrSSHSourceAliasNotSupported  = errors.New(\"alias not supported for SSH source\")\n\tErrSSHDestAliasNotSupported    = errors.New(\"alias not supported for SSH destination\")\n\tErrUnknownSSHDestAlias         = errors.New(\"unknown SSH destination alias type\")\n\tErrUnknownSSHSrcAlias          = errors.New(\"unknown SSH source alias type\")\n\tErrUnknownField                = errors.New(\"unknown field\")\n\tErrProtocolNoSpecificPorts     = errors.New(\"protocol does not support specific ports\")\n)\n\ntype Asterix int\n\nfunc (a Asterix) Validate() error {\n\treturn nil\n}\n\nfunc (a Asterix) String() string {\n\treturn \"*\"\n}\n\n// MarshalJSON marshals the Asterix to JSON.\nfunc (a Asterix) MarshalJSON() ([]byte, error) {\n\treturn []byte(`\"*\"`), nil\n}\n\n// MarshalJSON marshals the AliasWithPorts to JSON.\nfunc (a AliasWithPorts) MarshalJSON() ([]byte, error) {\n\tif a.Alias == nil {\n\t\treturn []byte(`\"\"`), nil\n\t}\n\n\tvar alias string\n\n\tswitch v := a.Alias.(type) {\n\tcase *Username:\n\t\talias = string(*v)\n\tcase *Group:\n\t\talias = string(*v)\n\tcase *Tag:\n\t\talias = string(*v)\n\tcase *Host:\n\t\talias = string(*v)\n\tcase *Prefix:\n\t\talias = v.String()\n\tcase *AutoGroup:\n\t\talias = string(*v)\n\tcase Asterix:\n\t\talias = \"*\"\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"%w: %T\", ErrUnknownAliasType, v)\n\t}\n\n\t// If no ports are specified\n\tif len(a.Ports) == 0 {\n\t\treturn json.Marshal(alias)\n\t}\n\n\t// Check if it's the wildcard port range\n\tif len(a.Ports) == 1 && a.Ports[0].First == 0 && a.Ports[0].Last == 65535 {\n\t\treturn json.Marshal(alias + \":*\")\n\t}\n\n\t// Otherwise, format as \"alias:ports\"\n\tvar ports []string\n\n\tfor _, port := range a.Ports {\n\t\tif port.First == port.Last {\n\t\t\tports = append(ports, strconv.FormatUint(uint64(port.First), 10))\n\t\t} else {\n\t\t\tports = append(ports, fmt.Sprintf(\"%d-%d\", port.First, port.Last))\n\t\t}\n\t}\n\n\treturn json.Marshal(fmt.Sprintf(\"%s:%s\", alias, strings.Join(ports, \",\")))\n}\n\nfunc (a Asterix) UnmarshalJSON(b []byte) error {\n\treturn nil\n}\n\nfunc (a Asterix) Resolve(_ *Policy, _ types.Users, nodes views.Slice[types.NodeView]) (*netipx.IPSet, error) {\n\tvar ips netipx.IPSetBuilder\n\n\t// Use Tailscale's CGNAT range for IPv4 and ULA range for IPv6.\n\t// This matches Tailscale's behavior where wildcard (*) refers to\n\t// \"any node in the tailnet\" which uses these address ranges.\n\tips.AddPrefix(tsaddr.CGNATRange())\n\tips.AddPrefix(tsaddr.TailscaleULARange())\n\n\treturn ips.IPSet()\n}\n\n// Username is a string that represents a username, it must contain an @.\ntype Username string\n\nfunc (u *Username) Validate() error {\n\tif isUser(string(*u)) {\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"%w, got: %q\", ErrInvalidUsername, *u)\n}\n\nfunc (u *Username) String() string {\n\treturn string(*u)\n}\n\n// MarshalJSON marshals the Username to JSON.\nfunc (u *Username) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(string(*u))\n}\n\n// MarshalJSON marshals the Prefix to JSON.\nfunc (p *Prefix) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(p.String())\n}\n\nfunc (u *Username) UnmarshalJSON(b []byte) error {\n\t*u = Username(strings.Trim(string(b), `\"`))\n\n\terr := u.Validate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (u *Username) CanBeTagOwner() bool {\n\treturn true\n}\n\nfunc (u *Username) CanBeAutoApprover() bool {\n\treturn true\n}\n\n// resolveUser attempts to find a user in the provided [types.Users] slice that matches the Username.\n// It prioritizes matching the ProviderIdentifier, and if not found, it falls back to matching the Email or Name.\n// If no matching user is found, it returns an error indicating no user matching.\n// If multiple matching users are found, it returns an error indicating multiple users matching.\n// It returns the matched types.User and a nil error if exactly one match is found.\nfunc (u *Username) resolveUser(users types.Users) (types.User, error) {\n\tvar potentialUsers types.Users\n\n\t// At parsetime, we require all usernames to contain an \"@\" character, if the\n\t// username token does not naturally do so (like email), the user have to\n\t// add it to the end of the username. We strip it here as we do not expect the\n\t// usernames to be stored with the \"@\".\n\tuTrimmed := strings.TrimSuffix(u.String(), \"@\")\n\n\tfor _, user := range users {\n\t\tif user.ProviderIdentifier.Valid && user.ProviderIdentifier.String == uTrimmed {\n\t\t\t// Prioritize ProviderIdentifier match and exit early\n\t\t\treturn user, nil\n\t\t}\n\n\t\tif user.Email == uTrimmed || user.Name == uTrimmed {\n\t\t\tpotentialUsers = append(potentialUsers, user)\n\t\t}\n\t}\n\n\tif len(potentialUsers) == 0 {\n\t\treturn types.User{}, fmt.Errorf(\"%w: token %q\", ErrUserNotFound, u.String())\n\t}\n\n\tif len(potentialUsers) > 1 {\n\t\treturn types.User{}, fmt.Errorf(\"%w: token %q found: %s\", ErrMultipleUsersFound, u.String(), potentialUsers.String())\n\t}\n\n\treturn potentialUsers[0], nil\n}\n\nfunc (u *Username) Resolve(_ *Policy, users types.Users, nodes views.Slice[types.NodeView]) (*netipx.IPSet, error) {\n\tvar (\n\t\tips  netipx.IPSetBuilder\n\t\terrs []error\n\t)\n\n\tuser, err := u.resolveUser(users)\n\tif err != nil {\n\t\terrs = append(errs, err)\n\t}\n\n\tfor _, node := range nodes.All() {\n\t\t// Skip tagged nodes - they are identified by tags, not users\n\t\tif node.IsTagged() {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Skip nodes without a user (defensive check for tests)\n\t\tif !node.User().Valid() {\n\t\t\tcontinue\n\t\t}\n\n\t\tif node.User().ID() == user.ID {\n\t\t\tnode.AppendToIPSet(&ips)\n\t\t}\n\t}\n\n\treturn buildIPSetMultiErr(&ips, errs)\n}\n\n// Group is a special string which is always prefixed with `group:`.\ntype Group string\n\nfunc (g *Group) Validate() error {\n\tif isGroup(string(*g)) {\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"%w, got: %q\", ErrInvalidGroupFormat, *g)\n}\n\nfunc (g *Group) UnmarshalJSON(b []byte) error {\n\t*g = Group(strings.Trim(string(b), `\"`))\n\n\terr := g.Validate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (g *Group) CanBeTagOwner() bool {\n\treturn true\n}\n\nfunc (g *Group) CanBeAutoApprover() bool {\n\treturn true\n}\n\n// String returns the string representation of the Group.\nfunc (g *Group) String() string {\n\treturn string(*g)\n}\n\nfunc (h *Host) String() string {\n\treturn string(*h)\n}\n\n// MarshalJSON marshals the Host to JSON.\nfunc (h *Host) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(string(*h))\n}\n\n// MarshalJSON marshals the Group to JSON.\nfunc (g *Group) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(string(*g))\n}\n\nfunc (g *Group) Resolve(p *Policy, users types.Users, nodes views.Slice[types.NodeView]) (*netipx.IPSet, error) {\n\tvar (\n\t\tips  netipx.IPSetBuilder\n\t\terrs []error\n\t)\n\n\tfor _, user := range p.Groups[*g] {\n\t\tuips, err := user.Resolve(nil, users, nodes)\n\t\tif err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\n\t\tips.AddSet(uips)\n\t}\n\n\treturn buildIPSetMultiErr(&ips, errs)\n}\n\n// Tag is a special string which is always prefixed with `tag:`.\ntype Tag string\n\nfunc (t *Tag) Validate() error {\n\tif isTag(string(*t)) {\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"%w, got: %q\", ErrInvalidTagFormat, *t)\n}\n\nfunc (t *Tag) UnmarshalJSON(b []byte) error {\n\t*t = Tag(strings.Trim(string(b), `\"`))\n\n\terr := t.Validate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (t *Tag) Resolve(p *Policy, users types.Users, nodes views.Slice[types.NodeView]) (*netipx.IPSet, error) {\n\tvar ips netipx.IPSetBuilder\n\n\tfor _, node := range nodes.All() {\n\t\t// Check if node has this tag\n\t\tif node.HasTag(string(*t)) {\n\t\t\tnode.AppendToIPSet(&ips)\n\t\t}\n\t}\n\n\treturn ips.IPSet()\n}\n\nfunc (t *Tag) CanBeAutoApprover() bool {\n\treturn true\n}\n\nfunc (t *Tag) CanBeTagOwner() bool {\n\treturn true\n}\n\nfunc (t *Tag) String() string {\n\treturn string(*t)\n}\n\n// MarshalJSON marshals the Tag to JSON.\nfunc (t *Tag) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(string(*t))\n}\n\n// Host is a string that represents a hostname.\ntype Host string\n\nfunc (h *Host) Validate() error {\n\tif isHost(string(*h)) {\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"%w: %q\", ErrInvalidHostname, *h)\n}\n\nfunc (h *Host) UnmarshalJSON(b []byte) error {\n\t*h = Host(strings.Trim(string(b), `\"`))\n\n\terr := h.Validate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (h *Host) Resolve(p *Policy, _ types.Users, nodes views.Slice[types.NodeView]) (*netipx.IPSet, error) {\n\tvar (\n\t\tips  netipx.IPSetBuilder\n\t\terrs []error\n\t)\n\n\tpref, ok := p.Hosts[*h]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"%w: %q\", ErrHostResolve, *h)\n\t}\n\n\terr := pref.Validate()\n\tif err != nil {\n\t\terrs = append(errs, err)\n\t}\n\n\tips.AddPrefix(netip.Prefix(pref))\n\n\t// If the IP is a single host, look for a node to ensure we add all the IPs of\n\t// the node to the IPSet.\n\tappendIfNodeHasIP(nodes, &ips, netip.Prefix(pref))\n\n\t// TODO(kradalby): I am a bit unsure what is the correct way to do this,\n\t// should a host with a non single IP be able to resolve the full host (inc all IPs).\n\tipsTemp, err := ips.IPSet()\n\tif err != nil {\n\t\terrs = append(errs, err)\n\t}\n\n\tfor _, node := range nodes.All() {\n\t\tif node.InIPSet(ipsTemp) {\n\t\t\tnode.AppendToIPSet(&ips)\n\t\t}\n\t}\n\n\treturn buildIPSetMultiErr(&ips, errs)\n}\n\ntype Prefix netip.Prefix\n\nfunc (p *Prefix) Validate() error {\n\tif netip.Prefix(*p).IsValid() {\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"%w: %s\", ErrInvalidPrefix, p.String())\n}\n\nfunc (p *Prefix) String() string {\n\treturn netip.Prefix(*p).String()\n}\n\nfunc (p *Prefix) parseString(addr string) error {\n\tif !strings.Contains(addr, \"/\") {\n\t\taddr, err := netip.ParseAddr(addr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\taddrPref, err := addr.Prefix(addr.BitLen())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t*p = Prefix(addrPref)\n\n\t\treturn nil\n\t}\n\n\tpref, err := netip.ParsePrefix(addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t*p = Prefix(pref)\n\n\treturn nil\n}\n\nfunc (p *Prefix) UnmarshalJSON(b []byte) error {\n\terr := p.parseString(strings.Trim(string(b), `\"`))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := p.Validate(); err != nil { //nolint:noinlineerr\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// Resolve resolves the Prefix to an IPSet. The IPSet will contain all the IP\n// addresses that the Prefix represents within Headscale. It is the product\n// of the Prefix and the Policy, Users, and Nodes.\n//\n// See [Policy], [types.Users], and [types.Nodes] for more details.\nfunc (p *Prefix) Resolve(_ *Policy, _ types.Users, nodes views.Slice[types.NodeView]) (*netipx.IPSet, error) {\n\tvar (\n\t\tips  netipx.IPSetBuilder\n\t\terrs []error\n\t)\n\n\tips.AddPrefix(netip.Prefix(*p))\n\t// If the IP is a single host, look for a node to ensure we add all the IPs of\n\t// the node to the IPSet.\n\tappendIfNodeHasIP(nodes, &ips, netip.Prefix(*p))\n\n\treturn buildIPSetMultiErr(&ips, errs)\n}\n\n// appendIfNodeHasIP appends the IPs of the nodes to the IPSet if the node has the\n// IP address in the prefix.\nfunc appendIfNodeHasIP(nodes views.Slice[types.NodeView], ips *netipx.IPSetBuilder, pref netip.Prefix) {\n\tif !pref.IsSingleIP() && !tsaddr.IsTailscaleIP(pref.Addr()) {\n\t\treturn\n\t}\n\n\tfor _, node := range nodes.All() {\n\t\tif node.HasIP(pref.Addr()) {\n\t\t\tnode.AppendToIPSet(ips)\n\t\t}\n\t}\n}\n\n// AutoGroup is a special string which is always prefixed with `autogroup:`.\ntype AutoGroup string\n\nconst (\n\tAutoGroupInternet AutoGroup = \"autogroup:internet\"\n\tAutoGroupMember   AutoGroup = \"autogroup:member\"\n\tAutoGroupNonRoot  AutoGroup = \"autogroup:nonroot\"\n\tAutoGroupTagged   AutoGroup = \"autogroup:tagged\"\n\tAutoGroupSelf     AutoGroup = \"autogroup:self\"\n)\n\nvar autogroups = []AutoGroup{\n\tAutoGroupInternet,\n\tAutoGroupMember,\n\tAutoGroupNonRoot,\n\tAutoGroupTagged,\n\tAutoGroupSelf,\n}\n\nfunc (ag *AutoGroup) Validate() error {\n\tif slices.Contains(autogroups, *ag) {\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"%w: got %q, must be one of %v\", ErrInvalidAutogroup, *ag, autogroups)\n}\n\nfunc (ag *AutoGroup) UnmarshalJSON(b []byte) error {\n\t*ag = AutoGroup(strings.Trim(string(b), `\"`))\n\n\terr := ag.Validate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (ag *AutoGroup) String() string {\n\treturn string(*ag)\n}\n\n// MarshalJSON marshals the AutoGroup to JSON.\nfunc (ag *AutoGroup) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(string(*ag))\n}\n\nfunc (ag *AutoGroup) Resolve(p *Policy, users types.Users, nodes views.Slice[types.NodeView]) (*netipx.IPSet, error) {\n\tvar build netipx.IPSetBuilder\n\n\tswitch *ag {\n\tcase AutoGroupInternet:\n\t\treturn util.TheInternet(), nil\n\n\tcase AutoGroupMember:\n\t\tfor _, node := range nodes.All() {\n\t\t\t// Skip if node is tagged\n\t\t\tif node.IsTagged() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Node is a member if it is not tagged\n\t\t\tnode.AppendToIPSet(&build)\n\t\t}\n\n\t\treturn build.IPSet()\n\n\tcase AutoGroupTagged:\n\t\tfor _, node := range nodes.All() {\n\t\t\t// Include if node is tagged\n\t\t\tif !node.IsTagged() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tnode.AppendToIPSet(&build)\n\t\t}\n\n\t\treturn build.IPSet()\n\n\tcase AutoGroupSelf:\n\t\t// autogroup:self represents all devices owned by the same user.\n\t\t// This cannot be resolved in the general context and should be handled\n\t\t// specially during policy compilation per-node for security.\n\t\treturn nil, ErrAutogroupSelfRequiresPerNodeResolution\n\n\tcase AutoGroupNonRoot:\n\t\t// autogroup:nonroot represents non-root users on multi-user devices.\n\t\t// This is not supported in headscale and requires OS-level user detection.\n\t\treturn nil, fmt.Errorf(\"%w: %q\", ErrUnknownAutogroup, *ag)\n\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"%w: %q\", ErrUnknownAutogroup, *ag)\n\t}\n}\n\nfunc (ag *AutoGroup) Is(c AutoGroup) bool {\n\tif ag == nil {\n\t\treturn false\n\t}\n\n\treturn *ag == c\n}\n\ntype Alias interface {\n\tValidate() error\n\tUnmarshalJSON(b []byte) error\n\n\t// Resolve resolves the Alias to an IPSet. The IPSet will contain all the IP\n\t// addresses that the Alias represents within Headscale. It is the product\n\t// of the Alias and the Policy, Users and Nodes.\n\t// This is an interface definition and the implementation is independent of\n\t// the Alias type.\n\tResolve(pol *Policy, users types.Users, nodes views.Slice[types.NodeView]) (*netipx.IPSet, error)\n}\n\ntype AliasWithPorts struct {\n\tAlias\n\n\tPorts []tailcfg.PortRange\n}\n\nfunc (ve *AliasWithPorts) UnmarshalJSON(b []byte) error {\n\tvar v any\n\n\terr := json.Unmarshal(b, &v)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch vs := v.(type) {\n\tcase string:\n\t\tvar (\n\t\t\tportsPart string\n\t\t\terr       error\n\t\t)\n\n\t\tif strings.Contains(vs, \":\") {\n\t\t\tvs, portsPart, err = splitDestinationAndPort(vs)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tports, err := parsePortRange(portsPart)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tve.Ports = ports\n\t\t} else {\n\t\t\treturn ErrHostportMissingColon\n\t\t}\n\n\t\tve.Alias, err = parseAlias(vs)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := ve.Validate(); err != nil { //nolint:noinlineerr\n\t\t\treturn err\n\t\t}\n\n\tdefault:\n\t\treturn fmt.Errorf(\"%w: %T\", ErrTypeNotSupported, vs)\n\t}\n\n\treturn nil\n}\n\nfunc isWildcard(str string) bool {\n\treturn str == \"*\"\n}\n\nfunc isUser(str string) bool {\n\treturn strings.Contains(str, \"@\")\n}\n\nfunc isGroup(str string) bool {\n\treturn strings.HasPrefix(str, \"group:\")\n}\n\nfunc isTag(str string) bool {\n\treturn strings.HasPrefix(str, \"tag:\")\n}\n\nfunc isAutoGroup(str string) bool {\n\treturn strings.HasPrefix(str, \"autogroup:\")\n}\n\nfunc isHost(str string) bool {\n\treturn !isUser(str) && !strings.Contains(str, \":\")\n}\n\nfunc parseAlias(vs string) (Alias, error) {\n\tvar pref Prefix\n\n\terr := pref.parseString(vs)\n\tif err == nil {\n\t\treturn &pref, nil\n\t}\n\n\tswitch {\n\tcase isWildcard(vs):\n\t\treturn Wildcard, nil\n\tcase isUser(vs):\n\t\treturn new(Username(vs)), nil\n\tcase isGroup(vs):\n\t\treturn new(Group(vs)), nil\n\tcase isTag(vs):\n\t\treturn new(Tag(vs)), nil\n\tcase isAutoGroup(vs):\n\t\treturn new(AutoGroup(vs)), nil\n\t}\n\n\tif isHost(vs) {\n\t\treturn new(Host(vs)), nil\n\t}\n\n\treturn nil, fmt.Errorf(\"%w: %q\", ErrInvalidAlias, vs)\n}\n\n// AliasEnc is used to deserialize a Alias.\ntype AliasEnc struct{ Alias }\n\nfunc (ve *AliasEnc) UnmarshalJSON(b []byte) error {\n\tptr, err := unmarshalPointer(\n\t\tb,\n\t\tparseAlias,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tve.Alias = ptr\n\n\treturn nil\n}\n\ntype Aliases []Alias\n\nfunc (a *Aliases) UnmarshalJSON(b []byte) error {\n\tvar aliases []AliasEnc\n\n\terr := json.Unmarshal(b, &aliases, policyJSONOpts...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t*a = make([]Alias, len(aliases))\n\tfor i, alias := range aliases {\n\t\t(*a)[i] = alias.Alias\n\t}\n\n\treturn nil\n}\n\n// MarshalJSON marshals the Aliases to JSON.\nfunc (a *Aliases) MarshalJSON() ([]byte, error) {\n\tif *a == nil {\n\t\treturn []byte(\"[]\"), nil\n\t}\n\n\taliases := make([]string, len(*a))\n\tfor i, alias := range *a {\n\t\tswitch v := alias.(type) {\n\t\tcase *Username:\n\t\t\taliases[i] = string(*v)\n\t\tcase *Group:\n\t\t\taliases[i] = string(*v)\n\t\tcase *Tag:\n\t\t\taliases[i] = string(*v)\n\t\tcase *Host:\n\t\t\taliases[i] = string(*v)\n\t\tcase *Prefix:\n\t\t\taliases[i] = v.String()\n\t\tcase *AutoGroup:\n\t\t\taliases[i] = string(*v)\n\t\tcase Asterix:\n\t\t\taliases[i] = \"*\"\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"%w: %T\", ErrUnknownAliasType, v)\n\t\t}\n\t}\n\n\treturn json.Marshal(aliases)\n}\n\nfunc (a *Aliases) Resolve(p *Policy, users types.Users, nodes views.Slice[types.NodeView]) (*netipx.IPSet, error) {\n\tvar (\n\t\tips  netipx.IPSetBuilder\n\t\terrs []error\n\t)\n\n\tfor _, alias := range *a {\n\t\taips, err := alias.Resolve(p, users, nodes)\n\t\tif err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\n\t\tips.AddSet(aips)\n\t}\n\n\treturn buildIPSetMultiErr(&ips, errs)\n}\n\nfunc buildIPSetMultiErr(ipBuilder *netipx.IPSetBuilder, errs []error) (*netipx.IPSet, error) {\n\tips, err := ipBuilder.IPSet()\n\treturn ips, multierr.New(append(errs, err)...)\n}\n\n// Helper function to unmarshal a JSON string into either an AutoApprover or Owner pointer.\nfunc unmarshalPointer[T any](\n\tb []byte,\n\tparseFunc func(string) (T, error),\n) (T, error) {\n\tvar s string\n\n\terr := json.Unmarshal(b, &s)\n\tif err != nil {\n\t\tvar t T\n\t\treturn t, err\n\t}\n\n\treturn parseFunc(s)\n}\n\ntype AutoApprover interface {\n\tCanBeAutoApprover() bool\n\tUnmarshalJSON(b []byte) error\n\tString() string\n}\n\ntype AutoApprovers []AutoApprover\n\nfunc (aa *AutoApprovers) UnmarshalJSON(b []byte) error {\n\tvar autoApprovers []AutoApproverEnc\n\n\terr := json.Unmarshal(b, &autoApprovers, policyJSONOpts...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t*aa = make([]AutoApprover, len(autoApprovers))\n\tfor i, autoApprover := range autoApprovers {\n\t\t(*aa)[i] = autoApprover.AutoApprover\n\t}\n\n\treturn nil\n}\n\n// MarshalJSON marshals the AutoApprovers to JSON.\nfunc (aa AutoApprovers) MarshalJSON() ([]byte, error) {\n\tif aa == nil {\n\t\treturn []byte(\"[]\"), nil\n\t}\n\n\tapprovers := make([]string, len(aa))\n\tfor i, approver := range aa {\n\t\tswitch v := approver.(type) {\n\t\tcase *Username:\n\t\t\tapprovers[i] = string(*v)\n\t\tcase *Tag:\n\t\t\tapprovers[i] = string(*v)\n\t\tcase *Group:\n\t\t\tapprovers[i] = string(*v)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"%w: %T\", ErrUnknownAutoApprover, v)\n\t\t}\n\t}\n\n\treturn json.Marshal(approvers)\n}\n\nfunc parseAutoApprover(s string) (AutoApprover, error) {\n\tswitch {\n\tcase isUser(s):\n\t\treturn new(Username(s)), nil\n\tcase isGroup(s):\n\t\treturn new(Group(s)), nil\n\tcase isTag(s):\n\t\treturn new(Tag(s)), nil\n\t}\n\n\treturn nil, fmt.Errorf(\"%w: %q\", ErrInvalidAutoApprover, s)\n}\n\n// AutoApproverEnc is used to deserialize a AutoApprover.\ntype AutoApproverEnc struct{ AutoApprover }\n\nfunc (ve *AutoApproverEnc) UnmarshalJSON(b []byte) error {\n\tptr, err := unmarshalPointer(\n\t\tb,\n\t\tparseAutoApprover,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tve.AutoApprover = ptr\n\n\treturn nil\n}\n\ntype Owner interface {\n\tCanBeTagOwner() bool\n\tUnmarshalJSON(b []byte) error\n\tString() string\n}\n\n// OwnerEnc is used to deserialize a Owner.\ntype OwnerEnc struct{ Owner }\n\nfunc (ve *OwnerEnc) UnmarshalJSON(b []byte) error {\n\tptr, err := unmarshalPointer(\n\t\tb,\n\t\tparseOwner,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tve.Owner = ptr\n\n\treturn nil\n}\n\ntype Owners []Owner\n\nfunc (o *Owners) UnmarshalJSON(b []byte) error {\n\tvar owners []OwnerEnc\n\n\terr := json.Unmarshal(b, &owners, policyJSONOpts...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t*o = make([]Owner, len(owners))\n\tfor i, owner := range owners {\n\t\t(*o)[i] = owner.Owner\n\t}\n\n\treturn nil\n}\n\n// MarshalJSON marshals the Owners to JSON.\nfunc (o Owners) MarshalJSON() ([]byte, error) {\n\tif o == nil {\n\t\treturn []byte(\"[]\"), nil\n\t}\n\n\towners := make([]string, len(o))\n\tfor i, owner := range o {\n\t\tswitch v := owner.(type) {\n\t\tcase *Username:\n\t\t\towners[i] = string(*v)\n\t\tcase *Group:\n\t\t\towners[i] = string(*v)\n\t\tcase *Tag:\n\t\t\towners[i] = string(*v)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"%w: %T\", ErrUnknownOwnerType, v)\n\t\t}\n\t}\n\n\treturn json.Marshal(owners)\n}\n\nfunc parseOwner(s string) (Owner, error) {\n\tswitch {\n\tcase isUser(s):\n\t\treturn new(Username(s)), nil\n\tcase isGroup(s):\n\t\treturn new(Group(s)), nil\n\tcase isTag(s):\n\t\treturn new(Tag(s)), nil\n\t}\n\n\treturn nil, fmt.Errorf(\"%w: %q\", ErrInvalidOwner, s)\n}\n\ntype Usernames []Username\n\n// Groups are a map of Group to a list of Username.\ntype Groups map[Group]Usernames\n\nfunc (g *Groups) Contains(group *Group) error {\n\tif group == nil {\n\t\treturn nil\n\t}\n\n\tfor defined := range map[Group]Usernames(*g) {\n\t\tif defined == *group {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn fmt.Errorf(\"%w: %q\", ErrGroupNotDefined, group)\n}\n\n// UnmarshalJSON overrides the default JSON unmarshalling for Groups to ensure\n// that each group name is validated using the isGroup function. This ensures\n// that all group names conform to the expected format, which is always prefixed\n// with \"group:\". If any group name is invalid, an error is returned.\nfunc (g *Groups) UnmarshalJSON(b []byte) error {\n\t// First unmarshal as a generic map to validate group names first\n\tvar rawMap map[string]any\n\n\terr := json.Unmarshal(b, &rawMap)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Validate group names first before checking data types\n\tfor key := range rawMap {\n\t\tgroup := Group(key)\n\n\t\terr := group.Validate()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Then validate each field can be converted to []string\n\trawGroups := make(map[string][]string)\n\n\tfor key, value := range rawMap {\n\t\tswitch v := value.(type) {\n\t\tcase []any:\n\t\t\t// Convert []interface{} to []string\n\t\t\tvar stringSlice []string\n\n\t\t\tfor _, item := range v {\n\t\t\t\tif str, ok := item.(string); ok {\n\t\t\t\t\tstringSlice = append(stringSlice, str)\n\t\t\t\t} else {\n\t\t\t\t\treturn fmt.Errorf(\"%w: group %q expected string but got %T\", ErrInvalidGroupMember, key, item)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\trawGroups[key] = stringSlice\n\t\tcase string:\n\t\t\treturn fmt.Errorf(\"%w: group %q got string: %q\", ErrGroupValueNotArray, key, v)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"%w: group %q got %T\", ErrGroupValueNotArray, key, v)\n\t\t}\n\t}\n\n\t*g = make(Groups)\n\n\tfor key, value := range rawGroups {\n\t\tgroup := Group(key)\n\t\t// Group name already validated above\n\t\tvar usernames Usernames\n\n\t\tfor _, u := range value {\n\t\t\tusername := Username(u)\n\n\t\t\terr := username.Validate()\n\t\t\tif err != nil {\n\t\t\t\tif isGroup(u) {\n\t\t\t\t\treturn fmt.Errorf(\"%w: found %q inside %q\", ErrNestedGroups, u, group)\n\t\t\t\t}\n\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tusernames = append(usernames, username)\n\t\t}\n\n\t\t(*g)[group] = usernames\n\t}\n\n\treturn nil\n}\n\n// Hosts are alias for IP addresses or subnets.\ntype Hosts map[Host]Prefix\n\nfunc (h *Hosts) UnmarshalJSON(b []byte) error {\n\tvar rawHosts map[string]string\n\n\terr := json.Unmarshal(b, &rawHosts, policyJSONOpts...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t*h = make(Hosts)\n\n\tfor key, value := range rawHosts {\n\t\thost := Host(key)\n\n\t\terr := host.Validate()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tvar prefix Prefix\n\n\t\terr = prefix.parseString(value)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%w: hostname %q address %q\", ErrInvalidHostIP, key, value)\n\t\t}\n\n\t\t(*h)[host] = prefix\n\t}\n\n\treturn nil\n}\n\n// MarshalJSON marshals the Hosts to JSON.\nfunc (h *Hosts) MarshalJSON() ([]byte, error) {\n\tif *h == nil {\n\t\treturn []byte(\"{}\"), nil\n\t}\n\n\trawHosts := make(map[string]string)\n\tfor host, prefix := range *h {\n\t\trawHosts[string(host)] = prefix.String()\n\t}\n\n\treturn json.Marshal(rawHosts)\n}\n\nfunc (h *Hosts) exist(name Host) bool {\n\t_, ok := (*h)[name]\n\treturn ok\n}\n\n// MarshalJSON marshals the TagOwners to JSON.\nfunc (to TagOwners) MarshalJSON() ([]byte, error) {\n\tif to == nil {\n\t\treturn []byte(\"{}\"), nil\n\t}\n\n\trawTagOwners := make(map[string][]string)\n\n\tfor tag, owners := range to {\n\t\ttagStr := string(tag)\n\t\townerStrs := make([]string, len(owners))\n\n\t\tfor i, owner := range owners {\n\t\t\tswitch v := owner.(type) {\n\t\t\tcase *Username:\n\t\t\t\townerStrs[i] = string(*v)\n\t\t\tcase *Group:\n\t\t\t\townerStrs[i] = string(*v)\n\t\t\tcase *Tag:\n\t\t\t\townerStrs[i] = string(*v)\n\t\t\tdefault:\n\t\t\t\treturn nil, fmt.Errorf(\"%w: %T\", ErrUnknownOwnerType, v)\n\t\t\t}\n\t\t}\n\n\t\trawTagOwners[tagStr] = ownerStrs\n\t}\n\n\treturn json.Marshal(rawTagOwners)\n}\n\n// TagOwners are a map of Tag to a list of the UserEntities that own the tag.\ntype TagOwners map[Tag]Owners\n\nfunc (to TagOwners) Contains(tagOwner *Tag) error {\n\tif tagOwner == nil {\n\t\treturn nil\n\t}\n\n\tfor defined := range map[Tag]Owners(to) {\n\t\tif defined == *tagOwner {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn fmt.Errorf(\"%w: %q\", ErrTagNotDefined, tagOwner)\n}\n\ntype AutoApproverPolicy struct {\n\tRoutes   map[netip.Prefix]AutoApprovers `json:\"routes,omitempty\"`\n\tExitNode AutoApprovers                  `json:\"exitNode,omitempty\"`\n}\n\n// MarshalJSON marshals the AutoApproverPolicy to JSON.\nfunc (ap AutoApproverPolicy) MarshalJSON() ([]byte, error) {\n\t// Marshal empty policies as empty object\n\tif ap.Routes == nil && ap.ExitNode == nil {\n\t\treturn []byte(\"{}\"), nil\n\t}\n\n\ttype Alias AutoApproverPolicy\n\n\t// Create a new object to avoid marshalling nil slices as null instead of empty arrays\n\tobj := Alias(ap)\n\n\t// Initialize empty maps/slices to ensure they're marshalled as empty objects/arrays instead of null\n\tif obj.Routes == nil {\n\t\tobj.Routes = make(map[netip.Prefix]AutoApprovers)\n\t}\n\n\tif obj.ExitNode == nil {\n\t\tobj.ExitNode = AutoApprovers{}\n\t}\n\n\treturn json.Marshal(&obj)\n}\n\n// resolveAutoApprovers resolves the AutoApprovers to a map of netip.Prefix to netipx.IPSet.\n// The resulting map can be used to quickly look up if a node can self-approve a route.\n// It is intended for internal use in a PolicyManager.\nfunc resolveAutoApprovers(p *Policy, users types.Users, nodes views.Slice[types.NodeView]) (map[netip.Prefix]*netipx.IPSet, *netipx.IPSet, error) {\n\tif p == nil {\n\t\treturn nil, nil, nil\n\t}\n\n\tvar err error\n\n\troutes := make(map[netip.Prefix]*netipx.IPSetBuilder)\n\n\tfor prefix, autoApprovers := range p.AutoApprovers.Routes {\n\t\tif _, ok := routes[prefix]; !ok {\n\t\t\troutes[prefix] = new(netipx.IPSetBuilder)\n\t\t}\n\n\t\tfor _, autoApprover := range autoApprovers {\n\t\t\taa, ok := autoApprover.(Alias)\n\t\t\tif !ok {\n\t\t\t\t// Should never happen\n\t\t\t\treturn nil, nil, fmt.Errorf(\"%w: %v\", ErrAutoApproverNotAlias, autoApprover)\n\t\t\t}\n\t\t\t// If it does not resolve, that means the autoApprover is not associated with any IP addresses.\n\t\t\tips, _ := aa.Resolve(p, users, nodes)\n\t\t\troutes[prefix].AddSet(ips)\n\t\t}\n\t}\n\n\tvar exitNodeSetBuilder netipx.IPSetBuilder\n\n\tif len(p.AutoApprovers.ExitNode) > 0 {\n\t\tfor _, autoApprover := range p.AutoApprovers.ExitNode {\n\t\t\taa, ok := autoApprover.(Alias)\n\t\t\tif !ok {\n\t\t\t\t// Should never happen\n\t\t\t\treturn nil, nil, fmt.Errorf(\"%w: %v\", ErrAutoApproverNotAlias, autoApprover)\n\t\t\t}\n\t\t\t// If it does not resolve, that means the autoApprover is not associated with any IP addresses.\n\t\t\tips, _ := aa.Resolve(p, users, nodes)\n\t\t\texitNodeSetBuilder.AddSet(ips)\n\t\t}\n\t}\n\n\tret := make(map[netip.Prefix]*netipx.IPSet)\n\n\tfor prefix, builder := range routes {\n\t\tipSet, err := builder.IPSet()\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\tret[prefix] = ipSet\n\t}\n\n\tvar exitNodeSet *netipx.IPSet\n\tif len(p.AutoApprovers.ExitNode) > 0 {\n\t\texitNodeSet, err = exitNodeSetBuilder.IPSet()\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\n\treturn ret, exitNodeSet, nil\n}\n\n// Action represents the action to take for an ACL rule.\ntype Action string\n\nconst (\n\tActionAccept Action = \"accept\"\n)\n\n// SSHAction represents the action to take for an SSH rule.\ntype SSHAction string\n\nconst (\n\tSSHActionAccept SSHAction = \"accept\"\n\tSSHActionCheck  SSHAction = \"check\"\n)\n\n// String returns the string representation of the Action.\nfunc (a *Action) String() string {\n\treturn string(*a)\n}\n\n// UnmarshalJSON implements JSON unmarshaling for Action.\nfunc (a *Action) UnmarshalJSON(b []byte) error {\n\tstr := strings.Trim(string(b), `\"`)\n\tswitch str {\n\tcase \"accept\":\n\t\t*a = ActionAccept\n\tdefault:\n\t\treturn fmt.Errorf(\"%w: %q, must be %q\", ErrInvalidACLAction, str, ActionAccept)\n\t}\n\n\treturn nil\n}\n\n// MarshalJSON implements JSON marshaling for Action.\nfunc (a *Action) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(string(*a))\n}\n\n// String returns the string representation of the SSHAction.\nfunc (a *SSHAction) String() string {\n\treturn string(*a)\n}\n\n// UnmarshalJSON implements JSON unmarshaling for SSHAction.\nfunc (a *SSHAction) UnmarshalJSON(b []byte) error {\n\tstr := strings.Trim(string(b), `\"`)\n\tswitch str {\n\tcase \"accept\":\n\t\t*a = SSHActionAccept\n\tcase \"check\":\n\t\t*a = SSHActionCheck\n\tdefault:\n\t\treturn fmt.Errorf(\"%w: %q, must be one of: accept, check\", ErrInvalidSSHAction, str)\n\t}\n\n\treturn nil\n}\n\n// MarshalJSON implements JSON marshaling for SSHAction.\nfunc (a *SSHAction) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(string(*a))\n}\n\n// Protocol represents a network protocol with its IANA number and descriptions.\ntype Protocol string\n\nconst (\n\tProtocolNameICMP     Protocol = \"icmp\"\n\tProtocolNameIGMP     Protocol = \"igmp\"\n\tProtocolNameIPv4     Protocol = \"ipv4\"\n\tProtocolNameIPInIP   Protocol = \"ip-in-ip\"\n\tProtocolNameTCP      Protocol = \"tcp\"\n\tProtocolNameEGP      Protocol = \"egp\"\n\tProtocolNameIGP      Protocol = \"igp\"\n\tProtocolNameUDP      Protocol = \"udp\"\n\tProtocolNameGRE      Protocol = \"gre\"\n\tProtocolNameESP      Protocol = \"esp\"\n\tProtocolNameAH       Protocol = \"ah\"\n\tProtocolNameIPv6ICMP Protocol = \"ipv6-icmp\"\n\tProtocolNameSCTP     Protocol = \"sctp\"\n\tProtocolNameFC       Protocol = \"fc\"\n\tProtocolNameWildcard Protocol = \"*\"\n)\n\n// String returns the string representation of the Protocol.\nfunc (p *Protocol) String() string {\n\treturn string(*p)\n}\n\n// Description returns the human-readable description of the Protocol.\nfunc (p *Protocol) Description() string {\n\tswitch *p {\n\tcase ProtocolNameICMP:\n\t\treturn \"Internet Control Message Protocol\"\n\tcase ProtocolNameIGMP:\n\t\treturn \"Internet Group Management Protocol\"\n\tcase ProtocolNameIPv4:\n\t\treturn \"IPv4 encapsulation\"\n\tcase ProtocolNameTCP:\n\t\treturn \"Transmission Control Protocol\"\n\tcase ProtocolNameEGP:\n\t\treturn \"Exterior Gateway Protocol\"\n\tcase ProtocolNameIGP:\n\t\treturn \"Interior Gateway Protocol\"\n\tcase ProtocolNameUDP:\n\t\treturn \"User Datagram Protocol\"\n\tcase ProtocolNameGRE:\n\t\treturn \"Generic Routing Encapsulation\"\n\tcase ProtocolNameESP:\n\t\treturn \"Encapsulating Security Payload\"\n\tcase ProtocolNameAH:\n\t\treturn \"Authentication Header\"\n\tcase ProtocolNameIPv6ICMP:\n\t\treturn \"Internet Control Message Protocol for IPv6\"\n\tcase ProtocolNameSCTP:\n\t\treturn \"Stream Control Transmission Protocol\"\n\tcase ProtocolNameFC:\n\t\treturn \"Fibre Channel\"\n\tcase ProtocolNameIPInIP:\n\t\treturn \"IP-in-IP Encapsulation\"\n\tcase ProtocolNameWildcard:\n\t\treturn \"Wildcard (not supported - use specific protocol)\"\n\tdefault:\n\t\treturn \"Unknown Protocol\"\n\t}\n}\n\n// parseProtocol converts a Protocol to its IANA protocol numbers.\n// Since validation happens during UnmarshalJSON, this method should not fail for valid Protocol values.\nfunc (p *Protocol) parseProtocol() []int {\n\tswitch *p {\n\tcase \"\":\n\t\t// Empty protocol applies to TCP, UDP, ICMP, and ICMPv6 traffic\n\t\t// This matches Tailscale's behavior for protocol defaults\n\t\treturn []int{ProtocolTCP, ProtocolUDP, ProtocolICMP, ProtocolIPv6ICMP}\n\tcase ProtocolNameWildcard:\n\t\t// Wildcard protocol - defensive handling (should not reach here due to validation)\n\t\treturn nil\n\tcase ProtocolNameIGMP:\n\t\treturn []int{ProtocolIGMP}\n\tcase ProtocolNameIPv4, ProtocolNameIPInIP:\n\t\treturn []int{ProtocolIPv4}\n\tcase ProtocolNameTCP:\n\t\treturn []int{ProtocolTCP}\n\tcase ProtocolNameEGP:\n\t\treturn []int{ProtocolEGP}\n\tcase ProtocolNameIGP:\n\t\treturn []int{ProtocolIGP}\n\tcase ProtocolNameUDP:\n\t\treturn []int{ProtocolUDP}\n\tcase ProtocolNameGRE:\n\t\treturn []int{ProtocolGRE}\n\tcase ProtocolNameESP:\n\t\treturn []int{ProtocolESP}\n\tcase ProtocolNameAH:\n\t\treturn []int{ProtocolAH}\n\tcase ProtocolNameSCTP:\n\t\treturn []int{ProtocolSCTP}\n\tcase ProtocolNameICMP:\n\t\t// ICMP only - use \"ipv6-icmp\" or protocol number 58 for ICMPv6\n\t\treturn []int{ProtocolICMP}\n\tcase ProtocolNameIPv6ICMP:\n\t\treturn []int{ProtocolIPv6ICMP}\n\tcase ProtocolNameFC:\n\t\treturn []int{ProtocolFC}\n\tdefault:\n\t\t// Try to parse as a numeric protocol number\n\t\t// This should not fail since validation happened during unmarshaling\n\t\tprotocolNumber, _ := strconv.Atoi(string(*p))\n\t\treturn []int{protocolNumber}\n\t}\n}\n\n// UnmarshalJSON implements JSON unmarshaling for Protocol.\nfunc (p *Protocol) UnmarshalJSON(b []byte) error {\n\tstr := strings.Trim(string(b), `\"`)\n\n\t// Normalize to lowercase for case-insensitive matching\n\t*p = Protocol(strings.ToLower(str))\n\n\t// Validate the protocol\n\terr := p.validate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// validate checks if the Protocol is valid.\nfunc (p *Protocol) validate() error {\n\tswitch *p {\n\tcase \"\", ProtocolNameICMP, ProtocolNameIGMP, ProtocolNameIPv4, ProtocolNameIPInIP,\n\t\tProtocolNameTCP, ProtocolNameEGP, ProtocolNameIGP, ProtocolNameUDP, ProtocolNameGRE,\n\t\tProtocolNameESP, ProtocolNameAH, ProtocolNameSCTP, ProtocolNameIPv6ICMP, ProtocolNameFC:\n\t\treturn nil\n\tcase ProtocolNameWildcard:\n\t\t// Wildcard \"*\" is not allowed - Tailscale rejects it\n\t\treturn errUnknownProtocolWildcard\n\tdefault:\n\t\t// Try to parse as a numeric protocol number\n\t\tstr := string(*p)\n\n\t\t// Check for leading zeros (not allowed by Tailscale)\n\t\tif str == \"0\" || (len(str) > 1 && str[0] == '0') {\n\t\t\treturn fmt.Errorf(\"%w: %q\", ErrProtocolLeadingZero, str)\n\t\t}\n\n\t\tprotocolNumber, err := strconv.Atoi(str)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%w: %q must be a known protocol name or valid protocol number 0-255\", ErrInvalidProtocolNumber, *p)\n\t\t}\n\n\t\tif protocolNumber < 0 || protocolNumber > 255 {\n\t\t\treturn fmt.Errorf(\"%w: %d\", ErrProtocolOutOfRange, protocolNumber)\n\t\t}\n\n\t\treturn nil\n\t}\n}\n\n// MarshalJSON implements JSON marshaling for Protocol.\nfunc (p *Protocol) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(string(*p))\n}\n\n// Protocol constants matching the IANA numbers.\nconst (\n\tProtocolICMP     = 1   // Internet Control Message\n\tProtocolIGMP     = 2   // Internet Group Management\n\tProtocolIPv4     = 4   // IPv4 encapsulation\n\tProtocolTCP      = 6   // Transmission Control\n\tProtocolEGP      = 8   // Exterior Gateway Protocol\n\tProtocolIGP      = 9   // any private interior gateway (used by Cisco for their IGRP)\n\tProtocolUDP      = 17  // User Datagram\n\tProtocolGRE      = 47  // Generic Routing Encapsulation\n\tProtocolESP      = 50  // Encap Security Payload\n\tProtocolAH       = 51  // Authentication Header\n\tProtocolIPv6ICMP = 58  // ICMP for IPv6\n\tProtocolSCTP     = 132 // Stream Control Transmission Protocol\n\tProtocolFC       = 133 // Fibre Channel\n)\n\ntype ACL struct {\n\tAction       Action           `json:\"action\"`\n\tProtocol     Protocol         `json:\"proto\"`\n\tSources      Aliases          `json:\"src\"`\n\tDestinations []AliasWithPorts `json:\"dst\"`\n}\n\n// UnmarshalJSON implements custom unmarshalling for ACL that ignores fields starting with '#'.\n// headscale-admin uses # in some field names to add metadata, so we will ignore\n// those to ensure it doesnt break.\n// https://github.com/GoodiesHQ/headscale-admin/blob/214a44a9c15c92d2b42383f131b51df10c84017c/src/lib/common/acl.svelte.ts#L38\nfunc (a *ACL) UnmarshalJSON(b []byte) error {\n\t// First unmarshal into a map to filter out comment fields\n\tvar raw map[string]any\n\tif err := json.Unmarshal(b, &raw, policyJSONOpts...); err != nil { //nolint:noinlineerr\n\t\treturn err\n\t}\n\n\t// Remove any fields that start with '#'\n\tfiltered := make(map[string]any)\n\n\tfor key, value := range raw {\n\t\tif !strings.HasPrefix(key, \"#\") {\n\t\t\tfiltered[key] = value\n\t\t}\n\t}\n\n\t// Marshal the filtered map back to JSON\n\tfilteredBytes, err := json.Marshal(filtered)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Create a type alias to avoid infinite recursion\n\ttype aclAlias ACL\n\n\tvar temp aclAlias\n\n\t// Unmarshal into the temporary struct using the v2 JSON options\n\tif err := json.Unmarshal(filteredBytes, &temp, policyJSONOpts...); err != nil { //nolint:noinlineerr\n\t\treturn err\n\t}\n\n\t// Copy the result back to the original struct\n\t*a = ACL(temp)\n\n\treturn nil\n}\n\n// Policy represents a Tailscale Network Policy.\n// TODO(kradalby):\n// Add validation method checking:\n// All users exists\n// All groups and users are valid tag TagOwners\n// Everything referred to in ACLs exists in other\n// entities.\ntype Policy struct {\n\t// validated is set if the policy has been validated.\n\t// It is not safe to use before it is validated, and\n\t// callers using it should panic if not\n\tvalidated bool `json:\"-\"`\n\n\tGroups        Groups             `json:\"groups,omitempty\"`\n\tHosts         Hosts              `json:\"hosts,omitempty\"`\n\tTagOwners     TagOwners          `json:\"tagOwners,omitempty\"`\n\tACLs          []ACL              `json:\"acls,omitempty\"`\n\tAutoApprovers AutoApproverPolicy `json:\"autoApprovers\"`\n\tSSHs          []SSH              `json:\"ssh,omitempty\"`\n}\n\n// MarshalJSON is deliberately not implemented for Policy.\n// We use the default JSON marshalling behavior provided by the Go runtime.\n\nvar (\n\t// TODO(kradalby): Add these checks for tagOwners and autoApprovers.\n\tautogroupForSrc       = []AutoGroup{AutoGroupMember, AutoGroupTagged}\n\tautogroupForDst       = []AutoGroup{AutoGroupInternet, AutoGroupMember, AutoGroupTagged, AutoGroupSelf}\n\tautogroupForSSHSrc    = []AutoGroup{AutoGroupMember, AutoGroupTagged}\n\tautogroupForSSHDst    = []AutoGroup{AutoGroupMember, AutoGroupTagged, AutoGroupSelf}\n\tautogroupForSSHUser   = []AutoGroup{AutoGroupNonRoot}\n\tautogroupNotSupported = []AutoGroup{}\n\n\terrUnknownProtocolWildcard = errors.New(\"proto name \\\"*\\\" not known; use protocol number 0-255 or protocol name (icmp, tcp, udp, etc.)\")\n)\n\nfunc validateAutogroupSupported(ag *AutoGroup) error {\n\tif ag == nil {\n\t\treturn nil\n\t}\n\n\tif slices.Contains(autogroupNotSupported, *ag) {\n\t\treturn fmt.Errorf(\"%w: %q\", ErrAutogroupNotSupported, *ag)\n\t}\n\n\treturn nil\n}\n\nfunc validateAutogroupForSrc(src *AutoGroup) error {\n\tif src == nil {\n\t\treturn nil\n\t}\n\n\tif src.Is(AutoGroupInternet) {\n\t\treturn ErrAutogroupInternetSrc\n\t}\n\n\tif src.Is(AutoGroupSelf) {\n\t\treturn ErrAutogroupSelfSrc\n\t}\n\n\tif !slices.Contains(autogroupForSrc, *src) {\n\t\treturn fmt.Errorf(\"%w: %q, can be %v\", ErrAutogroupNotSupportedACLSrc, *src, autogroupForSrc)\n\t}\n\n\treturn nil\n}\n\nfunc validateAutogroupForDst(dst *AutoGroup) error {\n\tif dst == nil {\n\t\treturn nil\n\t}\n\n\tif !slices.Contains(autogroupForDst, *dst) {\n\t\treturn fmt.Errorf(\"%w: %q, can be %v\", ErrAutogroupNotSupportedACLDst, *dst, autogroupForDst)\n\t}\n\n\treturn nil\n}\n\nfunc validateAutogroupForSSHSrc(src *AutoGroup) error {\n\tif src == nil {\n\t\treturn nil\n\t}\n\n\tif src.Is(AutoGroupInternet) {\n\t\treturn ErrAutogroupInternetSrc\n\t}\n\n\tif !slices.Contains(autogroupForSSHSrc, *src) {\n\t\treturn fmt.Errorf(\"%w: %q, can be %v\", ErrAutogroupNotSupportedSSHSrc, *src, autogroupForSSHSrc)\n\t}\n\n\treturn nil\n}\n\nfunc validateAutogroupForSSHDst(dst *AutoGroup) error {\n\tif dst == nil {\n\t\treturn nil\n\t}\n\n\tif dst.Is(AutoGroupInternet) {\n\t\treturn ErrAutogroupInternetSrc\n\t}\n\n\tif !slices.Contains(autogroupForSSHDst, *dst) {\n\t\treturn fmt.Errorf(\"%w: %q, can be %v\", ErrAutogroupNotSupportedSSHDst, *dst, autogroupForSSHDst)\n\t}\n\n\treturn nil\n}\n\nfunc validateAutogroupForSSHUser(user *AutoGroup) error {\n\tif user == nil {\n\t\treturn nil\n\t}\n\n\tif !slices.Contains(autogroupForSSHUser, *user) {\n\t\treturn fmt.Errorf(\"%w: %q, can be %v\", ErrAutogroupNotSupportedSSHUsr, *user, autogroupForSSHUser)\n\t}\n\n\treturn nil\n}\n\n// validateSSHSrcDstCombination validates that SSH source/destination combinations\n// follow Tailscale's security model:\n// - Destination can be: tags, autogroup:self (if source is users/groups), or same-user\n// - Tags/autogroup:tagged CANNOT SSH to user destinations\n// - Username destinations require the source to be that same single user only.\nfunc validateSSHSrcDstCombination(sources SSHSrcAliases, destinations SSHDstAliases) error {\n\t// Categorize source types\n\tsrcHasTaggedEntities := false\n\tsrcHasGroups := false\n\tsrcUsernames := make(map[string]bool)\n\n\tfor _, src := range sources {\n\t\tswitch v := src.(type) {\n\t\tcase *Tag:\n\t\t\tsrcHasTaggedEntities = true\n\t\tcase *AutoGroup:\n\t\t\tif v.Is(AutoGroupTagged) {\n\t\t\t\tsrcHasTaggedEntities = true\n\t\t\t} else if v.Is(AutoGroupMember) {\n\t\t\t\tsrcHasGroups = true // autogroup:member is like a group of users\n\t\t\t}\n\t\tcase *Group:\n\t\t\tsrcHasGroups = true\n\t\tcase *Username:\n\t\t\tsrcUsernames[string(*v)] = true\n\t\t}\n\t}\n\n\t// Check destinations against source constraints\n\tfor _, dst := range destinations {\n\t\tswitch v := dst.(type) {\n\t\tcase *Username:\n\t\t\t// Rule: Tags/autogroup:tagged CANNOT SSH to user destinations\n\t\t\tif srcHasTaggedEntities {\n\t\t\t\treturn fmt.Errorf(\"%w (%s); use autogroup:tagged or specific tags as destinations instead\",\n\t\t\t\t\tErrSSHTagSourceToUserDest, *v)\n\t\t\t}\n\t\t\t// Rule: Username destination requires source to be that same single user only\n\t\t\tif srcHasGroups || len(srcUsernames) != 1 || !srcUsernames[string(*v)] {\n\t\t\t\treturn fmt.Errorf(\"%w %q; use autogroup:self instead for same-user SSH access\",\n\t\t\t\t\tErrSSHUserDestRequiresSameUser, *v)\n\t\t\t}\n\t\tcase *AutoGroup:\n\t\t\t// Rule: autogroup:self requires source to NOT contain tags\n\t\t\tif v.Is(AutoGroupSelf) && srcHasTaggedEntities {\n\t\t\t\treturn ErrSSHAutogroupSelfRequiresUserSource\n\t\t\t}\n\t\t\t// Rule: autogroup:member (user-owned devices) cannot be accessed by tagged entities\n\t\t\tif v.Is(AutoGroupMember) && srcHasTaggedEntities {\n\t\t\t\treturn ErrSSHTagSourceToAutogroupMember\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// validateACLSrcDstCombination validates that ACL source/destination combinations\n// follow Tailscale's security model:\n// - autogroup:self destinations require ALL sources to be users, groups, autogroup:member, or wildcard (*)\n// - Tags, autogroup:tagged, hosts, and raw IPs are NOT valid sources for autogroup:self\n// - Wildcard (*) is allowed because autogroup:self evaluation narrows it per-node to the node's own IPs.\nfunc validateACLSrcDstCombination(sources Aliases, destinations []AliasWithPorts) error {\n\t// Check if any destination is autogroup:self\n\thasAutogroupSelf := false\n\n\tfor _, dst := range destinations {\n\t\tif ag, ok := dst.Alias.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {\n\t\t\thasAutogroupSelf = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !hasAutogroupSelf {\n\t\treturn nil // No autogroup:self, no validation needed\n\t}\n\n\t// Validate all sources are valid for autogroup:self\n\tfor _, src := range sources {\n\t\tswitch v := src.(type) {\n\t\tcase *Username, *Group, Asterix:\n\t\t\t// Valid sources - users, groups, and wildcard (*) are allowed\n\t\t\t// Wildcard is allowed because autogroup:self evaluation narrows it per-node\n\t\t\tcontinue\n\t\tcase *AutoGroup:\n\t\t\tif v.Is(AutoGroupMember) {\n\t\t\t\tcontinue // autogroup:member is valid\n\t\t\t}\n\t\t\t// autogroup:tagged and others are NOT valid\n\t\t\treturn ErrACLAutogroupSelfInvalidSource\n\t\tcase *Tag, *Host, *Prefix:\n\t\t\t// Tags, hosts, and IPs are NOT valid sources for autogroup:self\n\t\t\treturn ErrACLAutogroupSelfInvalidSource\n\t\tdefault:\n\t\t\t// Unknown type - be conservative and reject\n\t\t\treturn ErrACLAutogroupSelfInvalidSource\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// validate reports if there are any errors in a policy after\n// the unmarshaling process.\n// It runs through all rules and checks if there are any inconsistencies\n// in the policy that needs to be addressed before it can be used.\n//\n//nolint:gocyclo // comprehensive policy validation\nfunc (p *Policy) validate() error {\n\tif p == nil {\n\t\tpanic(\"passed nil policy\")\n\t}\n\n\t// All errors are collected and presented to the user,\n\t// when adding more validation, please add to the list of errors.\n\tvar errs []error\n\n\tfor _, acl := range p.ACLs {\n\t\tfor _, src := range acl.Sources {\n\t\t\tswitch src := src.(type) {\n\t\t\tcase *Host:\n\t\t\t\th := src\n\t\t\t\tif !p.Hosts.exist(*h) {\n\t\t\t\t\terrs = append(errs, fmt.Errorf(\"%w: %q\", ErrHostNotDefined, *h))\n\t\t\t\t}\n\t\t\tcase *AutoGroup:\n\t\t\t\tag := src\n\n\t\t\t\terr := validateAutogroupSupported(ag)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs = append(errs, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\terr = validateAutogroupForSrc(ag)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs = append(errs, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase *Group:\n\t\t\t\tg := src\n\n\t\t\t\terr := p.Groups.Contains(g)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs = append(errs, err)\n\t\t\t\t}\n\t\t\tcase *Tag:\n\t\t\t\ttagOwner := src\n\n\t\t\t\terr := p.TagOwners.Contains(tagOwner)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs = append(errs, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor _, dst := range acl.Destinations {\n\t\t\tswitch h := dst.Alias.(type) {\n\t\t\tcase *Host:\n\t\t\t\tif !p.Hosts.exist(*h) {\n\t\t\t\t\terrs = append(errs, fmt.Errorf(\"%w: %q\", ErrHostNotDefined, *h))\n\t\t\t\t}\n\t\t\tcase *AutoGroup:\n\t\t\t\terr := validateAutogroupSupported(h)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs = append(errs, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\terr = validateAutogroupForDst(h)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs = append(errs, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase *Group:\n\t\t\t\terr := p.Groups.Contains(h)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs = append(errs, err)\n\t\t\t\t}\n\t\t\tcase *Tag:\n\t\t\t\terr := p.TagOwners.Contains(h)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs = append(errs, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Validate protocol-port compatibility\n\t\tif err := validateProtocolPortCompatibility(acl.Protocol, acl.Destinations); err != nil { //nolint:noinlineerr\n\t\t\terrs = append(errs, err)\n\t\t}\n\n\t\t// Validate ACL source/destination combinations follow Tailscale's security model\n\t\terr := validateACLSrcDstCombination(acl.Sources, acl.Destinations)\n\t\tif err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\n\tfor _, ssh := range p.SSHs {\n\t\tfor _, user := range ssh.Users {\n\t\t\tif strings.HasPrefix(string(user), \"autogroup:\") {\n\t\t\t\tmaybeAuto := AutoGroup(user)\n\n\t\t\t\terr := validateAutogroupForSSHUser(&maybeAuto)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs = append(errs, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif user.IsLocalpart() {\n\t\t\t\t_, err := user.ParseLocalpart()\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs = append(errs, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor _, src := range ssh.Sources {\n\t\t\tswitch src := src.(type) {\n\t\t\tcase *AutoGroup:\n\t\t\t\tag := src\n\n\t\t\t\terr := validateAutogroupSupported(ag)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs = append(errs, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\terr = validateAutogroupForSSHSrc(ag)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs = append(errs, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase *Group:\n\t\t\t\tg := src\n\n\t\t\t\terr := p.Groups.Contains(g)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs = append(errs, err)\n\t\t\t\t}\n\t\t\tcase *Tag:\n\t\t\t\ttagOwner := src\n\n\t\t\t\terr := p.TagOwners.Contains(tagOwner)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs = append(errs, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor _, dst := range ssh.Destinations {\n\t\t\tswitch dst := dst.(type) {\n\t\t\tcase *AutoGroup:\n\t\t\t\tag := dst\n\n\t\t\t\terr := validateAutogroupSupported(ag)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs = append(errs, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\terr = validateAutogroupForSSHDst(ag)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs = append(errs, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase *Tag:\n\t\t\t\ttagOwner := dst\n\n\t\t\t\terr := p.TagOwners.Contains(tagOwner)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs = append(errs, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Validate SSH source/destination combinations follow Tailscale's security model\n\t\terr := validateSSHSrcDstCombination(ssh.Sources, ssh.Destinations)\n\t\tif err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\n\t\t// Validate checkPeriod\n\t\tif ssh.CheckPeriod != nil {\n\t\t\tswitch {\n\t\t\tcase ssh.Action != SSHActionCheck:\n\t\t\t\terrs = append(errs, ErrSSHCheckPeriodOnNonCheck)\n\t\t\tdefault:\n\t\t\t\terr := ssh.CheckPeriod.Validate()\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs = append(errs, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, tagOwners := range p.TagOwners {\n\t\tfor _, tagOwner := range tagOwners {\n\t\t\tswitch tagOwner := tagOwner.(type) {\n\t\t\tcase *Group:\n\t\t\t\tg := tagOwner\n\n\t\t\t\terr := p.Groups.Contains(g)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs = append(errs, err)\n\t\t\t\t}\n\t\t\tcase *Tag:\n\t\t\t\tt := tagOwner\n\n\t\t\t\terr := p.TagOwners.Contains(t)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs = append(errs, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Validate tag ownership chains for circular references and undefined tags.\n\t_, err := flattenTagOwners(p.TagOwners)\n\tif err != nil {\n\t\terrs = append(errs, err)\n\t}\n\n\tfor _, approvers := range p.AutoApprovers.Routes {\n\t\tfor _, approver := range approvers {\n\t\t\tswitch approver := approver.(type) {\n\t\t\tcase *Group:\n\t\t\t\tg := approver\n\n\t\t\t\terr := p.Groups.Contains(g)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs = append(errs, err)\n\t\t\t\t}\n\t\t\tcase *Tag:\n\t\t\t\ttagOwner := approver\n\n\t\t\t\terr := p.TagOwners.Contains(tagOwner)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs = append(errs, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, approver := range p.AutoApprovers.ExitNode {\n\t\tswitch approver := approver.(type) {\n\t\tcase *Group:\n\t\t\tg := approver\n\n\t\t\terr := p.Groups.Contains(g)\n\t\t\tif err != nil {\n\t\t\t\terrs = append(errs, err)\n\t\t\t}\n\t\tcase *Tag:\n\t\t\ttagOwner := approver\n\n\t\t\terr := p.TagOwners.Contains(tagOwner)\n\t\t\tif err != nil {\n\t\t\t\terrs = append(errs, err)\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(errs) > 0 {\n\t\treturn multierr.New(errs...)\n\t}\n\n\tp.validated = true\n\n\treturn nil\n}\n\n// SSHCheckPeriod represents the check period for SSH \"check\" mode rules.\n// nil means not specified (runtime default of 12h applies).\n// Always=true means \"always\" (check on every request).\n// Duration is an explicit period (min 1m, max 168h).\ntype SSHCheckPeriod struct {\n\tAlways   bool\n\tDuration time.Duration\n}\n\n// UnmarshalJSON implements JSON unmarshaling for SSHCheckPeriod.\nfunc (p *SSHCheckPeriod) UnmarshalJSON(b []byte) error {\n\tstr := strings.Trim(string(b), `\"`)\n\tif str == \"always\" {\n\t\tp.Always = true\n\n\t\treturn nil\n\t}\n\n\td, err := model.ParseDuration(str)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"parsing checkPeriod %q: %w\", str, err)\n\t}\n\n\tp.Duration = time.Duration(d)\n\n\treturn nil\n}\n\n// MarshalJSON implements JSON marshaling for SSHCheckPeriod.\nfunc (p SSHCheckPeriod) MarshalJSON() ([]byte, error) {\n\tif p.Always {\n\t\treturn []byte(`\"always\"`), nil\n\t}\n\n\treturn fmt.Appendf(nil, \"%q\", p.Duration.String()), nil\n}\n\n// Validate checks that the SSHCheckPeriod is within allowed bounds.\nfunc (p *SSHCheckPeriod) Validate() error {\n\tif p.Always {\n\t\treturn nil\n\t}\n\n\tif p.Duration < SSHCheckPeriodMin {\n\t\treturn fmt.Errorf(\n\t\t\t\"%w: got %s\",\n\t\t\tErrSSHCheckPeriodBelowMin,\n\t\t\tp.Duration,\n\t\t)\n\t}\n\n\tif p.Duration > SSHCheckPeriodMax {\n\t\treturn fmt.Errorf(\n\t\t\t\"%w: got %s\",\n\t\t\tErrSSHCheckPeriodAboveMax,\n\t\t\tp.Duration,\n\t\t)\n\t}\n\n\treturn nil\n}\n\n// SSH controls who can ssh into which machines.\ntype SSH struct {\n\tAction       SSHAction       `json:\"action\"`\n\tSources      SSHSrcAliases   `json:\"src\"`\n\tDestinations SSHDstAliases   `json:\"dst\"`\n\tUsers        SSHUsers        `json:\"users\"`\n\tCheckPeriod  *SSHCheckPeriod `json:\"checkPeriod,omitempty\"`\n\tAcceptEnv    []string        `json:\"acceptEnv,omitempty\"`\n}\n\n// SSHSrcAliases is a list of aliases that can be used as sources in an SSH rule.\n// It can be a list of usernames, groups, tags or autogroups.\ntype SSHSrcAliases []Alias\n\n// MarshalJSON marshals the Groups to JSON.\nfunc (g *Groups) MarshalJSON() ([]byte, error) {\n\tif *g == nil {\n\t\treturn []byte(\"{}\"), nil\n\t}\n\n\traw := make(map[string][]string)\n\tfor group, usernames := range *g {\n\t\tusers := make([]string, len(usernames))\n\t\tfor i, username := range usernames {\n\t\t\tusers[i] = string(username)\n\t\t}\n\n\t\traw[string(group)] = users\n\t}\n\n\treturn json.Marshal(raw)\n}\n\nfunc (a *SSHSrcAliases) UnmarshalJSON(b []byte) error {\n\tvar aliases []AliasEnc\n\n\terr := json.Unmarshal(b, &aliases, policyJSONOpts...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t*a = make([]Alias, len(aliases))\n\tfor i, alias := range aliases {\n\t\tswitch alias.Alias.(type) {\n\t\tcase *Username, *Group, *Tag, *AutoGroup:\n\t\t\t(*a)[i] = alias.Alias\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"%w: %T\", ErrSSHSourceAliasNotSupported, alias.Alias)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (a *SSHDstAliases) UnmarshalJSON(b []byte) error {\n\tvar aliases []AliasEnc\n\n\terr := json.Unmarshal(b, &aliases, policyJSONOpts...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t*a = make([]Alias, len(aliases))\n\tfor i, alias := range aliases {\n\t\tswitch alias.Alias.(type) {\n\t\tcase *Username, *Tag, *AutoGroup, *Host:\n\t\t\t(*a)[i] = alias.Alias\n\t\tcase Asterix:\n\t\t\treturn fmt.Errorf(\"%w; use 'autogroup:member' for user-owned devices, \"+\n\t\t\t\t\"'autogroup:tagged' for tagged devices, or specific tags/users\",\n\t\t\t\tErrSSHWildcardDestination)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"%w: %T\", ErrSSHDestAliasNotSupported, alias.Alias)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// MarshalJSON marshals the SSHDstAliases to JSON.\nfunc (a SSHDstAliases) MarshalJSON() ([]byte, error) {\n\tif a == nil {\n\t\treturn []byte(\"[]\"), nil\n\t}\n\n\taliases := make([]string, len(a))\n\tfor i, alias := range a {\n\t\tswitch v := alias.(type) {\n\t\tcase *Username:\n\t\t\taliases[i] = string(*v)\n\t\tcase *Tag:\n\t\t\taliases[i] = string(*v)\n\t\tcase *AutoGroup:\n\t\t\taliases[i] = string(*v)\n\t\tcase *Host:\n\t\t\taliases[i] = string(*v)\n\t\tcase Asterix:\n\t\t\t// Marshal wildcard as \"*\" so it gets rejected during unmarshal\n\t\t\t// with a proper error message explaining alternatives\n\t\t\taliases[i] = \"*\"\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"%w: %T\", ErrUnknownSSHDestAlias, v)\n\t\t}\n\t}\n\n\treturn json.Marshal(aliases)\n}\n\n// MarshalJSON marshals the SSHSrcAliases to JSON.\nfunc (a *SSHSrcAliases) MarshalJSON() ([]byte, error) {\n\tif a == nil || *a == nil {\n\t\treturn []byte(\"[]\"), nil\n\t}\n\n\taliases := make([]string, len(*a))\n\tfor i, alias := range *a {\n\t\tswitch v := alias.(type) {\n\t\tcase *Username:\n\t\t\taliases[i] = string(*v)\n\t\tcase *Group:\n\t\t\taliases[i] = string(*v)\n\t\tcase *Tag:\n\t\t\taliases[i] = string(*v)\n\t\tcase *AutoGroup:\n\t\t\taliases[i] = string(*v)\n\t\tcase Asterix:\n\t\t\taliases[i] = \"*\"\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"%w: %T\", ErrUnknownSSHSrcAlias, v)\n\t\t}\n\t}\n\n\treturn json.Marshal(aliases)\n}\n\nfunc (a *SSHSrcAliases) Resolve(p *Policy, users types.Users, nodes views.Slice[types.NodeView]) (*netipx.IPSet, error) {\n\tvar (\n\t\tips  netipx.IPSetBuilder\n\t\terrs []error\n\t)\n\n\tfor _, alias := range *a {\n\t\taips, err := alias.Resolve(p, users, nodes)\n\t\tif err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\n\t\tips.AddSet(aips)\n\t}\n\n\treturn buildIPSetMultiErr(&ips, errs)\n}\n\n// SSHDstAliases is a list of aliases that can be used as destinations in an SSH rule.\n// It can be a list of usernames, tags or autogroups.\ntype SSHDstAliases []Alias\n\ntype SSHUsers []SSHUser\n\n// SSHUserLocalpartPrefix is the prefix for localpart SSH user entries.\n// Format: localpart:*@<domain>\n// See: https://tailscale.com/docs/features/tailscale-ssh#users\nconst SSHUserLocalpartPrefix = \"localpart:\"\n\nfunc (u SSHUsers) ContainsRoot() bool {\n\treturn slices.Contains(u, \"root\")\n}\n\nfunc (u SSHUsers) ContainsNonRoot() bool {\n\treturn slices.Contains(u, SSHUser(AutoGroupNonRoot))\n}\n\n// ContainsLocalpart returns true if any entry has the localpart: prefix.\nfunc (u SSHUsers) ContainsLocalpart() bool {\n\treturn slices.ContainsFunc(u, func(user SSHUser) bool {\n\t\treturn user.IsLocalpart()\n\t})\n}\n\n// NormalUsers returns all SSH users that are not root, autogroup:nonroot,\n// or localpart: entries.\nfunc (u SSHUsers) NormalUsers() []SSHUser {\n\treturn slicesx.Filter(nil, u, func(user SSHUser) bool {\n\t\treturn user != \"root\" && user != SSHUser(AutoGroupNonRoot) && !user.IsLocalpart()\n\t})\n}\n\n// LocalpartEntries returns only the localpart: prefixed entries.\nfunc (u SSHUsers) LocalpartEntries() []SSHUser {\n\treturn slicesx.Filter(nil, u, func(user SSHUser) bool {\n\t\treturn user.IsLocalpart()\n\t})\n}\n\ntype SSHUser string\n\nfunc (u SSHUser) String() string {\n\treturn string(u)\n}\n\n// IsLocalpart returns true if the SSHUser has the localpart: prefix.\nfunc (u SSHUser) IsLocalpart() bool {\n\treturn strings.HasPrefix(string(u), SSHUserLocalpartPrefix)\n}\n\n// ParseLocalpart validates and extracts the domain from a localpart: entry.\n// The expected format is localpart:*@<domain>.\n// Returns the domain part or an error if the format is invalid.\nfunc (u SSHUser) ParseLocalpart() (string, error) {\n\tif !u.IsLocalpart() {\n\t\treturn \"\", fmt.Errorf(\"%w: missing prefix %q in %q\", ErrInvalidLocalpart, SSHUserLocalpartPrefix, u)\n\t}\n\n\tpattern := strings.TrimPrefix(string(u), SSHUserLocalpartPrefix)\n\n\t// Must be *@<domain>\n\tatIdx := strings.LastIndex(pattern, \"@\")\n\tif atIdx < 0 {\n\t\treturn \"\", fmt.Errorf(\"%w: missing @ in %q\", ErrInvalidLocalpart, u)\n\t}\n\n\tlocalPart := pattern[:atIdx]\n\tdomain := pattern[atIdx+1:]\n\n\tif localPart != \"*\" {\n\t\treturn \"\", fmt.Errorf(\"%w: local part must be *, got %q in %q\", ErrInvalidLocalpart, localPart, u)\n\t}\n\n\tif domain == \"\" {\n\t\treturn \"\", fmt.Errorf(\"%w: empty domain in %q\", ErrInvalidLocalpart, u)\n\t}\n\n\treturn domain, nil\n}\n\n// MarshalJSON marshals the SSHUser to JSON.\nfunc (u SSHUser) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(string(u))\n}\n\n// unmarshalPolicy takes a byte slice and unmarshals it into a Policy struct.\n// In addition to unmarshalling, it will also validate the policy.\n// This is the only entrypoint of reading a policy from a file or other source.\nfunc unmarshalPolicy(b []byte) (*Policy, error) {\n\tif len(b) == 0 {\n\t\treturn nil, nil //nolint:nilnil // intentional: no policy when empty input\n\t}\n\n\tvar policy Policy\n\n\tast, err := hujson.Parse(b)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"parsing HuJSON: %w\", err)\n\t}\n\n\tast.Standardize()\n\n\tif err = json.Unmarshal(ast.Pack(), &policy, policyJSONOpts...); err != nil { //nolint:noinlineerr\n\t\tif serr, ok := errors.AsType[*json.SemanticError](err); ok && errors.Is(serr.Err, json.ErrUnknownName) {\n\t\t\tptr := serr.JSONPointer\n\t\t\tname := ptr.LastToken()\n\n\t\t\treturn nil, fmt.Errorf(\"%w: %q\", ErrUnknownField, name)\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"parsing policy from bytes: %w\", err)\n\t}\n\n\tif err := policy.validate(); err != nil { //nolint:noinlineerr\n\t\treturn nil, err\n\t}\n\n\treturn &policy, nil\n}\n\n// validateProtocolPortCompatibility checks that only TCP, UDP, and SCTP protocols\n// can have specific ports. All other protocols should only use wildcard ports.\nfunc validateProtocolPortCompatibility(protocol Protocol, destinations []AliasWithPorts) error {\n\t// Only TCP, UDP, and SCTP support specific ports\n\tsupportsSpecificPorts := protocol == ProtocolNameTCP || protocol == ProtocolNameUDP || protocol == ProtocolNameSCTP || protocol == \"\"\n\n\tif supportsSpecificPorts {\n\t\treturn nil // No validation needed for these protocols\n\t}\n\n\t// For all other protocols, check that all destinations use wildcard ports\n\tfor _, dst := range destinations {\n\t\tfor _, portRange := range dst.Ports {\n\t\t\t// Check if it's not a wildcard port (0-65535)\n\t\t\tif portRange.First != 0 || portRange.Last != 65535 {\n\t\t\t\treturn fmt.Errorf(\"%w: %q, only \\\"*\\\" is allowed\", ErrProtocolNoSpecificPorts, protocol)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// usesAutogroupSelf checks if the policy uses autogroup:self in any ACL or SSH rules.\nfunc (p *Policy) usesAutogroupSelf() bool {\n\tif p == nil {\n\t\treturn false\n\t}\n\n\t// Check ACL rules\n\tfor _, acl := range p.ACLs {\n\t\tfor _, src := range acl.Sources {\n\t\t\tif ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\tfor _, dest := range acl.Destinations {\n\t\t\tif ag, ok := dest.Alias.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\t// Check SSH rules\n\tfor _, ssh := range p.SSHs {\n\t\tfor _, src := range ssh.Sources {\n\t\t\tif ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\tfor _, dest := range ssh.Destinations {\n\t\t\tif ag, ok := dest.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/types_test.go",
    "content": "package v2\n\nimport (\n\t\"encoding/json\"\n\t\"net/netip\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/google/go-cmp/cmp/cmpopts\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"go4.org/netipx\"\n\txmaps \"golang.org/x/exp/maps\"\n\t\"gorm.io/gorm\"\n\t\"tailscale.com/net/tsaddr\"\n\t\"tailscale.com/tailcfg\"\n)\n\n// TestUnmarshalPolicy tests the unmarshalling of JSON into Policy objects and the marshalling\n// back to JSON (round-trip). It ensures that:\n// 1. JSON can be correctly unmarshalled into a Policy object\n// 2. A Policy object can be correctly marshalled back to JSON\n// 3. The unmarshalled Policy matches the expected Policy\n// 4. The marshalled and then unmarshalled Policy is semantically equivalent to the original\n//    (accounting for nil vs empty map/slice differences)\n//\n// This test also verifies that all the required struct fields are properly marshalled and\n// unmarshalled, maintaining semantic equivalence through a complete JSON round-trip.\n\n// TestMarshalJSON tests explicit marshalling of Policy objects to JSON.\n// This test ensures our custom MarshalJSON methods properly encode\n// the various data structures used in the Policy.\nfunc TestMarshalJSON(t *testing.T) {\n\t// Create a complex test policy\n\tpolicy := &Policy{\n\t\tGroups: Groups{\n\t\t\tGroup(\"group:example\"): []Username{Username(\"user@example.com\")},\n\t\t},\n\t\tHosts: Hosts{\n\t\t\t\"host-1\": Prefix(mp(\"100.100.100.100/32\")),\n\t\t},\n\t\tTagOwners: TagOwners{\n\t\t\tTag(\"tag:test\"): Owners{up(\"user@example.com\")},\n\t\t},\n\t\tACLs: []ACL{\n\t\t\t{\n\t\t\t\tAction:   \"accept\",\n\t\t\t\tProtocol: \"tcp\",\n\t\t\t\tSources: Aliases{\n\t\t\t\t\tnew(Username(\"user@example.com\")),\n\t\t\t\t},\n\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t{\n\t\t\t\t\t\tAlias: new(Username(\"other@example.com\")),\n\t\t\t\t\t\tPorts: []tailcfg.PortRange{{First: 80, Last: 80}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t// Marshal the policy to JSON\n\tmarshalled, err := json.MarshalIndent(policy, \"\", \"  \")\n\trequire.NoError(t, err)\n\n\t// Make sure all expected fields are present in the JSON\n\tjsonString := string(marshalled)\n\tassert.Contains(t, jsonString, \"group:example\")\n\tassert.Contains(t, jsonString, \"user@example.com\")\n\tassert.Contains(t, jsonString, \"host-1\")\n\tassert.Contains(t, jsonString, \"100.100.100.100/32\")\n\tassert.Contains(t, jsonString, \"tag:test\")\n\tassert.Contains(t, jsonString, \"accept\")\n\tassert.Contains(t, jsonString, \"tcp\")\n\tassert.Contains(t, jsonString, \"80\")\n\n\t// Unmarshal back to verify round trip\n\tvar roundTripped Policy\n\n\terr = json.Unmarshal(marshalled, &roundTripped)\n\trequire.NoError(t, err)\n\n\t// Compare the original and round-tripped policies\n\tcmps := append(util.Comparers,\n\t\tcmp.Comparer(func(x, y Prefix) bool {\n\t\t\treturn x == y\n\t\t}),\n\t\tcmpopts.IgnoreUnexported(Policy{}),\n\t\tcmpopts.EquateEmpty(),\n\t)\n\n\tif diff := cmp.Diff(policy, &roundTripped, cmps...); diff != \"\" {\n\t\tt.Fatalf(\"round trip policy (-original +roundtripped):\\n%s\", diff)\n\t}\n}\n\nfunc TestUnmarshalPolicy(t *testing.T) {\n\ttests := []struct {\n\t\tname    string\n\t\tinput   string\n\t\twant    *Policy\n\t\twantErr string\n\t}{\n\t\t{\n\t\t\tname:  \"empty\",\n\t\t\tinput: \"{}\",\n\t\t\twant:  &Policy{},\n\t\t},\n\t\t{\n\t\t\tname: \"groups\",\n\t\t\tinput: `\n{\n\t\"groups\": {\n\t\t\"group:example\": [\n\t\t\t\"derp@headscale.net\",\n\t\t],\n\t},\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tGroups: Groups{\n\t\t\t\t\tGroup(\"group:example\"): []Username{Username(\"derp@headscale.net\")},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"basic-types\",\n\t\t\tinput: `\n{\n\t\"groups\": {\n\t\t\"group:example\": [\n\t\t\t\"testuser@headscale.net\",\n\t\t],\n\t\t\"group:other\": [\n\t\t\t\"otheruser@headscale.net\",\n\t\t],\n\t\t\"group:noat\": [\n\t\t\t\"noat@\",\n\t\t],\n\t},\n\n\t\"tagOwners\": {\n\t\t\"tag:user\": [\"testuser@headscale.net\"],\n\t\t\"tag:group\": [\"group:other\"],\n\t\t\"tag:userandgroup\": [\"testuser@headscale.net\", \"group:other\"],\n\t},\n\n\t\"hosts\": {\n\t\t\"host-1\": \"100.100.100.100\",\n\t\t\"subnet-1\": \"100.100.101.100/24\",\n\t\t\"outside\": \"192.168.0.0/16\",\n\t},\n\n\t\"acls\": [\n\t    // All\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"proto\": \"tcp\",\n\t\t\t\"src\": [\"*\"],\n\t\t\t\"dst\": [\"*:*\"],\n\t\t},\n\t\t// Users\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"proto\": \"tcp\",\n\t\t\t\"src\": [\"testuser@headscale.net\"],\n\t\t\t\"dst\": [\"otheruser@headscale.net:80\"],\n\t\t},\n\t\t// Groups\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"proto\": \"tcp\",\n\t\t\t\"src\": [\"group:example\"],\n\t\t\t\"dst\": [\"group:other:80\"],\n\t\t},\n\t\t// Tailscale IP\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"proto\": \"tcp\",\n\t\t\t\"src\": [\"100.101.102.103\"],\n\t\t\t\"dst\": [\"100.101.102.104:80\"],\n\t\t},\n\t\t// Subnet\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"proto\": \"udp\",\n\t\t\t\"src\": [\"10.0.0.0/8\"],\n\t\t\t\"dst\": [\"172.16.0.0/16:80\"],\n\t\t},\n\t\t// Hosts\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"proto\": \"tcp\",\n\t\t\t\"src\": [\"subnet-1\"],\n\t\t\t\"dst\": [\"host-1:80-88\"],\n\t\t},\n\t\t// Tags\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"proto\": \"tcp\",\n\t\t\t\"src\": [\"tag:group\"],\n\t\t\t\"dst\": [\"tag:user:80,443\"],\n\t\t},\n\t\t// Autogroup\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"proto\": \"tcp\",\n\t\t\t\"src\": [\"tag:group\"],\n\t\t\t\"dst\": [\"autogroup:internet:80\"],\n\t\t},\n\t],\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tGroups: Groups{\n\t\t\t\t\tGroup(\"group:example\"): []Username{Username(\"testuser@headscale.net\")},\n\t\t\t\t\tGroup(\"group:other\"):   []Username{Username(\"otheruser@headscale.net\")},\n\t\t\t\t\tGroup(\"group:noat\"):    []Username{Username(\"noat@\")},\n\t\t\t\t},\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:user\"):         Owners{up(\"testuser@headscale.net\")},\n\t\t\t\t\tTag(\"tag:group\"):        Owners{gp(\"group:other\")},\n\t\t\t\t\tTag(\"tag:userandgroup\"): Owners{up(\"testuser@headscale.net\"), gp(\"group:other\")},\n\t\t\t\t},\n\t\t\t\tHosts: Hosts{\n\t\t\t\t\t\"host-1\":   Prefix(mp(\"100.100.100.100/32\")),\n\t\t\t\t\t\"subnet-1\": Prefix(mp(\"100.100.101.100/24\")),\n\t\t\t\t\t\"outside\":  Prefix(mp(\"192.168.0.0/16\")),\n\t\t\t\t},\n\t\t\t\tACLs: []ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:   \"accept\",\n\t\t\t\t\t\tProtocol: \"tcp\",\n\t\t\t\t\t\tSources: Aliases{\n\t\t\t\t\t\t\tWildcard,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t// TODO(kradalby): Should this be host?\n\t\t\t\t\t\t\t\t// It is:\n\t\t\t\t\t\t\t\t// Includes any destination (no restrictions).\n\t\t\t\t\t\t\t\tAlias: Wildcard,\n\t\t\t\t\t\t\t\tPorts: []tailcfg.PortRange{tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:   \"accept\",\n\t\t\t\t\t\tProtocol: \"tcp\",\n\t\t\t\t\t\tSources: Aliases{\n\t\t\t\t\t\t\tnew(Username(\"testuser@headscale.net\")),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tAlias: new(Username(\"otheruser@headscale.net\")),\n\t\t\t\t\t\t\t\tPorts: []tailcfg.PortRange{{First: 80, Last: 80}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:   \"accept\",\n\t\t\t\t\t\tProtocol: \"tcp\",\n\t\t\t\t\t\tSources: Aliases{\n\t\t\t\t\t\t\tgp(\"group:example\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tAlias: gp(\"group:other\"),\n\t\t\t\t\t\t\t\tPorts: []tailcfg.PortRange{{First: 80, Last: 80}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:   \"accept\",\n\t\t\t\t\t\tProtocol: \"tcp\",\n\t\t\t\t\t\tSources: Aliases{\n\t\t\t\t\t\t\tpp(\"100.101.102.103/32\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tAlias: pp(\"100.101.102.104/32\"),\n\t\t\t\t\t\t\t\tPorts: []tailcfg.PortRange{{First: 80, Last: 80}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:   \"accept\",\n\t\t\t\t\t\tProtocol: \"udp\",\n\t\t\t\t\t\tSources: Aliases{\n\t\t\t\t\t\t\tpp(\"10.0.0.0/8\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tAlias: pp(\"172.16.0.0/16\"),\n\t\t\t\t\t\t\t\tPorts: []tailcfg.PortRange{{First: 80, Last: 80}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:   \"accept\",\n\t\t\t\t\t\tProtocol: \"tcp\",\n\t\t\t\t\t\tSources: Aliases{\n\t\t\t\t\t\t\thp(\"subnet-1\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tAlias: hp(\"host-1\"),\n\t\t\t\t\t\t\t\tPorts: []tailcfg.PortRange{{First: 80, Last: 88}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:   \"accept\",\n\t\t\t\t\t\tProtocol: \"tcp\",\n\t\t\t\t\t\tSources: Aliases{\n\t\t\t\t\t\t\ttp(\"tag:group\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tAlias: tp(\"tag:user\"),\n\t\t\t\t\t\t\t\tPorts: []tailcfg.PortRange{\n\t\t\t\t\t\t\t\t\t{First: 80, Last: 80},\n\t\t\t\t\t\t\t\t\t{First: 443, Last: 443},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:   \"accept\",\n\t\t\t\t\t\tProtocol: \"tcp\",\n\t\t\t\t\t\tSources: Aliases{\n\t\t\t\t\t\t\ttp(\"tag:group\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tAlias: agp(\"autogroup:internet\"),\n\t\t\t\t\t\t\t\tPorts: []tailcfg.PortRange{\n\t\t\t\t\t\t\t\t\t{First: 80, Last: 80},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"2652-asterix-error-better-explain\",\n\t\t\tinput: `\n{\n\t\"ssh\": [\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"src\": [\n\t\t\t\t\"*\"\n\t\t\t],\n\t\t\t\"dst\": [\n\t\t\t\t\"*\"\n\t\t\t],\n\t\t\t\"users\": [\"root\"]\n\t\t}\n\t]\n}\n\t\t\t`,\n\t\t\twantErr: \"alias not supported for SSH source: v2.Asterix\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid-username\",\n\t\t\tinput: `\n{\n\t\"groups\": {\n\t\t\"group:example\": [\n\t\t\t\"valid@\",\n\t\t\t\"invalid\",\n\t\t],\n\t},\n}\n`,\n\t\t\twantErr: `username must contain @, got: \"invalid\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"invalid-group\",\n\t\t\tinput: `\n{\n\t\"groups\": {\n\t\t\"grou:example\": [\n\t\t\t\"valid@\",\n\t\t],\n\t},\n}\n`,\n\t\t\twantErr: `group must start with 'group:', got: \"grou:example\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"group-in-group\",\n\t\t\tinput: `\n{\n\t\"groups\": {\n\t\t\"group:inner\": [],\n\t\t\"group:example\": [\n\t\t\t\"group:inner\",\n\t\t],\n\t},\n}\n`,\n\t\t\t// wantErr: `username must contain @, got: \"group:inner\"`,\n\t\t\twantErr: `nested groups are not allowed: found \"group:inner\" inside \"group:example\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"invalid-addr\",\n\t\t\tinput: `\n{\n\t\"hosts\": {\n\t\t\"derp\": \"10.0\",\n\t},\n}\n`,\n\t\t\twantErr: `hostname contains invalid IP address: hostname \"derp\" address \"10.0\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"invalid-prefix\",\n\t\t\tinput: `\n{\n\t\t\t\"hosts\": {\n\t\t\t\t\"derp\": \"10.0/42\",\n\t\t\t},\n}\n`,\n\t\t\twantErr: `hostname contains invalid IP address: hostname \"derp\" address \"10.0/42\"`,\n\t\t},\n\t\t// TODO(kradalby): Figure out why this doesn't work.\n\t\t// \t\t{\n\t\t// \t\t\tname: \"invalid-hostname\",\n\t\t// \t\t\tinput: `\n\t\t// {\n\t\t// \t\t\t\"hosts\": {\n\t\t// \t\t\t\t\"derp:merp\": \"10.0.0.0/31\",\n\t\t// \t\t\t},\n\t\t// }\n\t\t// `,\n\t\t// \t\t\twantErr: `Hostname \"derp:merp\" is invalid`,\n\t\t// \t\t},\n\t\t{\n\t\t\tname: \"invalid-auto-group\",\n\t\t\tinput: `\n{\n\t\"acls\": [\n\t\t// Autogroup\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"proto\": \"tcp\",\n\t\t\t\"src\": [\"tag:group\"],\n\t\t\t\"dst\": [\"autogroup:invalid:80\"],\n\t\t},\n\t],\n}\n`,\n\t\t\twantErr: `invalid autogroup: got \"autogroup:invalid\", must be one of [autogroup:internet autogroup:member autogroup:nonroot autogroup:tagged autogroup:self]`,\n\t\t},\n\t\t{\n\t\t\tname: \"undefined-hostname-errors-2490\",\n\t\t\tinput: `\n{\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"user1\"\n      ],\n      \"dst\": [\n        \"user1:*\"\n      ]\n    }\n  ]\n}\n`,\n\t\t\twantErr: `host not defined in policy: \"user1\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"defined-hostname-does-not-err-2490\",\n\t\t\tinput: `\n{\n  \"hosts\": {\n\t\t\"user1\": \"100.100.100.100\",\n  },\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"user1\"\n      ],\n      \"dst\": [\n        \"user1:*\"\n      ]\n    }\n  ]\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tHosts: Hosts{\n\t\t\t\t\t\"user1\": Prefix(mp(\"100.100.100.100/32\")),\n\t\t\t\t},\n\t\t\t\tACLs: []ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction: \"accept\",\n\t\t\t\t\t\tSources: Aliases{\n\t\t\t\t\t\t\thp(\"user1\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tAlias: hp(\"user1\"),\n\t\t\t\t\t\t\t\tPorts: []tailcfg.PortRange{tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"autogroup:internet-in-dst-allowed\",\n\t\t\tinput: `\n{\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"10.0.0.1\"\n      ],\n      \"dst\": [\n        \"autogroup:internet:*\"\n      ]\n    }\n  ]\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tACLs: []ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction: \"accept\",\n\t\t\t\t\t\tSources: Aliases{\n\t\t\t\t\t\t\tpp(\"10.0.0.1/32\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tAlias: new(AutoGroup(\"autogroup:internet\")),\n\t\t\t\t\t\t\t\tPorts: []tailcfg.PortRange{tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"autogroup:internet-in-src-not-allowed\",\n\t\t\tinput: `\n{\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"autogroup:internet\"\n      ],\n      \"dst\": [\n        \"10.0.0.1:*\"\n      ]\n    }\n  ]\n}\n`,\n\t\t\twantErr: `autogroup:internet can only be used in ACL destinations`,\n\t\t},\n\t\t{\n\t\t\tname: \"autogroup:internet-in-ssh-src-not-allowed\",\n\t\t\tinput: `\n{\n  \"ssh\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"autogroup:internet\"\n      ],\n      \"dst\": [\n        \"tag:test\"\n      ]\n    }\n  ]\n}\n`,\n\t\t\twantErr: `tag not defined in policy: \"tag:test\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"autogroup:internet-in-ssh-dst-not-allowed\",\n\t\t\tinput: `\n{\n  \"ssh\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"tag:test\"\n      ],\n      \"dst\": [\n        \"autogroup:internet\"\n      ]\n    }\n  ]\n}\n`,\n\t\t\twantErr: `autogroup:internet can only be used in ACL destinations`,\n\t\t},\n\t\t{\n\t\t\tname: \"ssh-basic\",\n\t\t\tinput: `\n{\n  \"groups\": {\n    \"group:admins\": [\"admin@example.com\"]\n  },\n  \"tagOwners\": {\n    \"tag:servers\": [\"group:admins\"]\n  },\n  \"ssh\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"group:admins\"\n      ],\n      \"dst\": [\n        \"tag:servers\"\n      ],\n      \"users\": [\"root\", \"admin\"]\n    }\n  ]\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tGroups: Groups{\n\t\t\t\t\tGroup(\"group:admins\"): []Username{Username(\"admin@example.com\")},\n\t\t\t\t},\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:servers\"): Owners{gp(\"group:admins\")},\n\t\t\t\t},\n\t\t\t\tSSHs: []SSH{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction: \"accept\",\n\t\t\t\t\t\tSources: SSHSrcAliases{\n\t\t\t\t\t\t\tgp(\"group:admins\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDestinations: SSHDstAliases{\n\t\t\t\t\t\t\ttp(\"tag:servers\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tUsers: []SSHUser{\n\t\t\t\t\t\t\tSSHUser(\"root\"),\n\t\t\t\t\t\t\tSSHUser(\"admin\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ssh-with-tag-and-user\",\n\t\t\tinput: `\n{\n  \"tagOwners\": {\n    \"tag:web\": [\"admin@example.com\"],\n    \"tag:server\": [\"admin@example.com\"]\n  },\n  \"ssh\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"tag:web\"\n      ],\n      \"dst\": [\n        \"tag:server\"\n      ],\n      \"users\": [\"*\"]\n    }\n  ]\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:web\"):    Owners{new(Username(\"admin@example.com\"))},\n\t\t\t\t\tTag(\"tag:server\"): Owners{new(Username(\"admin@example.com\"))},\n\t\t\t\t},\n\t\t\t\tSSHs: []SSH{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction: \"accept\",\n\t\t\t\t\t\tSources: SSHSrcAliases{\n\t\t\t\t\t\t\ttp(\"tag:web\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDestinations: SSHDstAliases{\n\t\t\t\t\t\t\ttp(\"tag:server\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tUsers: []SSHUser{\n\t\t\t\t\t\t\tSSHUser(\"*\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ssh-with-check-period\",\n\t\t\tinput: `\n{\n  \"groups\": {\n    \"group:admins\": [\"admin@example.com\"]\n  },\n  \"ssh\": [\n    {\n      \"action\": \"check\",\n      \"src\": [\n        \"group:admins\"\n      ],\n      \"dst\": [\n        \"autogroup:self\"\n      ],\n      \"users\": [\"root\"],\n      \"checkPeriod\": \"24h\"\n    }\n  ]\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tGroups: Groups{\n\t\t\t\t\tGroup(\"group:admins\"): []Username{Username(\"admin@example.com\")},\n\t\t\t\t},\n\t\t\t\tSSHs: []SSH{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction: \"check\",\n\t\t\t\t\t\tSources: SSHSrcAliases{\n\t\t\t\t\t\t\tgp(\"group:admins\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDestinations: SSHDstAliases{\n\t\t\t\t\t\t\tagp(\"autogroup:self\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tUsers: []SSHUser{\n\t\t\t\t\t\t\tSSHUser(\"root\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tCheckPeriod: &SSHCheckPeriod{Duration: 24 * time.Hour},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"group-must-be-defined-acl-src\",\n\t\t\tinput: `\n{\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"group:notdefined\"\n      ],\n      \"dst\": [\n        \"autogroup:internet:*\"\n      ]\n    }\n  ]\n}\n`,\n\t\t\twantErr: `group not defined in policy: \"group:notdefined\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"group-must-be-defined-acl-dst\",\n\t\t\tinput: `\n{\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"*\"\n      ],\n      \"dst\": [\n        \"group:notdefined:*\"\n      ]\n    }\n  ]\n}\n`,\n\t\t\twantErr: `group not defined in policy: \"group:notdefined\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"group-must-be-defined-acl-ssh-src\",\n\t\t\tinput: `\n{\n  \"ssh\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"group:notdefined\"\n      ],\n      \"dst\": [\n        \"user@\"\n      ]\n    }\n  ]\n}\n`,\n\t\t\twantErr: `user destination requires source to contain only that same user \"user@\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"group-must-be-defined-acl-tagOwner\",\n\t\t\tinput: `\n{\n  \"tagOwners\": {\n    \"tag:test\": [\"group:notdefined\"],\n  },\n}\n`,\n\t\t\twantErr: `group not defined in policy: \"group:notdefined\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"group-must-be-defined-acl-autoapprover-route\",\n\t\t\tinput: `\n{\n  \"autoApprovers\": {\n    \"routes\": {\n      \"10.0.0.0/16\": [\"group:notdefined\"]\n    }\n  },\n}\n`,\n\t\t\twantErr: `group not defined in policy: \"group:notdefined\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"group-must-be-defined-acl-autoapprover-exitnode\",\n\t\t\tinput: `\n{\n  \"autoApprovers\": {\n    \"exitNode\": [\"group:notdefined\"]\n   },\n}\n`,\n\t\t\twantErr: `group not defined in policy: \"group:notdefined\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"tag-must-be-defined-acl-src\",\n\t\t\tinput: `\n{\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"tag:notdefined\"\n      ],\n      \"dst\": [\n        \"autogroup:internet:*\"\n      ]\n    }\n  ]\n}\n`,\n\t\t\twantErr: `tag not defined in policy: \"tag:notdefined\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"tag-must-be-defined-acl-dst\",\n\t\t\tinput: `\n{\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"*\"\n      ],\n      \"dst\": [\n        \"tag:notdefined:*\"\n      ]\n    }\n  ]\n}\n`,\n\t\t\twantErr: `tag not defined in policy: \"tag:notdefined\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"tag-must-be-defined-acl-ssh-src\",\n\t\t\tinput: `\n{\n  \"ssh\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"tag:notdefined\"\n      ],\n      \"dst\": [\n        \"user@\"\n      ]\n    }\n  ]\n}\n`,\n\t\t\twantErr: `tag not defined in policy: \"tag:notdefined\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"tag-must-be-defined-acl-ssh-dst\",\n\t\t\tinput: `\n{\n  \"groups\": {\n  \t\"group:defined\": [\"user@\"],\n  },\n  \"ssh\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"group:defined\"\n      ],\n      \"dst\": [\n        \"tag:notdefined\",\n      ],\n    }\n  ]\n}\n`,\n\t\t\twantErr: `tag not defined in policy: \"tag:notdefined\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"tag-must-be-defined-acl-autoapprover-route\",\n\t\t\tinput: `\n{\n  \"autoApprovers\": {\n    \"routes\": {\n      \"10.0.0.0/16\": [\"tag:notdefined\"]\n    }\n  },\n}\n`,\n\t\t\twantErr: `tag not defined in policy: \"tag:notdefined\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"tag-must-be-defined-acl-autoapprover-exitnode\",\n\t\t\tinput: `\n{\n  \"autoApprovers\": {\n    \"exitNode\": [\"tag:notdefined\"]\n   },\n}\n`,\n\t\t\twantErr: `tag not defined in policy: \"tag:notdefined\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"missing-dst-port-is-err\",\n\t\t\tinput: `\n\t\t\t{\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"*\"\n      ],\n      \"dst\": [\n        \"100.64.0.1\"\n      ]\n    }\n  ]\n}\n`,\n\t\t\twantErr: `hostport must contain a colon`,\n\t\t},\n\t\t{\n\t\t\tname: \"dst-port-zero-is-err\",\n\t\t\tinput: `\n\t\t\t{\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\n        \"*\"\n      ],\n      \"dst\": [\n        \"100.64.0.1:0\"\n      ]\n    }\n  ]\n}\n`,\n\t\t\twantErr: `first port must be >0, or use '*' for wildcard`,\n\t\t},\n\t\t{\n\t\t\tname: \"disallow-unsupported-fields\",\n\t\t\tinput: `\n{\n  // rules doesnt exists, we have \"acls\"\n  \"rules\": [\n  ]\n}\n`,\n\t\t\twantErr: `unknown field: \"rules\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"disallow-unsupported-fields-nested\",\n\t\t\tinput: `\n{\n    \"acls\": [\n        { \"action\": \"accept\", \"BAD\": [\"FOO:BAR:FOO:BAR\"], \"NOT\": [\"BAD:BAD:BAD:BAD\"] }\n      ]\n}\n`,\n\t\t\twantErr: `unknown field`,\n\t\t},\n\t\t{\n\t\t\tname: \"invalid-group-name\",\n\t\t\tinput: `\n{\n  \"groups\": {\n    \"group:test\": [\"user@example.com\"],\n    \"INVALID_GROUP_FIELD\": [\"user@example.com\"]\n  }\n}\n`,\n\t\t\twantErr: `group must start with 'group:', got: \"INVALID_GROUP_FIELD\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"invalid-group-datatype\",\n\t\t\tinput: `\n{\n  \"groups\": {\n    \"group:test\": [\"user@example.com\"],\n    \"group:invalid\": \"should fail\"\n  }\n}\n`,\n\t\t\twantErr: `group value must be an array of users: group \"group:invalid\" got string: \"should fail\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"invalid-group-name-and-datatype-fails-on-name-first\",\n\t\t\tinput: `\n{\n  \"groups\": {\n    \"group:test\": [\"user@example.com\"],\n    \"INVALID_GROUP_FIELD\": \"should fail\"\n  }\n}\n`,\n\t\t\twantErr: `group must start with 'group:', got: \"INVALID_GROUP_FIELD\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"disallow-unsupported-fields-hosts-level\",\n\t\t\tinput: `\n{\n  \"hosts\": {\n    \"host1\": \"10.0.0.1\",\n    \"INVALID_HOST_FIELD\": \"should fail\"\n  }\n}\n`,\n\t\t\twantErr: `hostname contains invalid IP address: hostname \"INVALID_HOST_FIELD\" address \"should fail\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"disallow-unsupported-fields-tagowners-level\",\n\t\t\tinput: `\n{\n  \"tagOwners\": {\n    \"tag:test\": [\"user@example.com\"],\n    \"INVALID_TAG_FIELD\": \"should fail\"\n  }\n}\n`,\n\t\t\twantErr: `tag must start with 'tag:', got: \"INVALID_TAG_FIELD\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"disallow-unsupported-fields-acls-level\",\n\t\t\tinput: `\n{\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"proto\": \"tcp\",\n      \"src\": [\"*\"],\n      \"dst\": [\"*:*\"],\n      \"INVALID_ACL_FIELD\": \"should fail\"\n    }\n  ]\n}\n`,\n\t\t\twantErr: `unknown field: \"INVALID_ACL_FIELD\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"disallow-unsupported-fields-ssh-level\",\n\t\t\tinput: `\n{\n  \"ssh\": [\n    {\n      \"action\": \"accept\",\n      \"src\": [\"user@example.com\"],\n      \"dst\": [\"user@example.com\"],\n      \"users\": [\"root\"],\n      \"INVALID_SSH_FIELD\": \"should fail\"\n    }\n  ]\n}\n`,\n\t\t\twantErr: `unknown field: \"INVALID_SSH_FIELD\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"disallow-unsupported-fields-policy-level\",\n\t\t\tinput: `\n{\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"proto\": \"tcp\",\n      \"src\": [\"*\"],\n      \"dst\": [\"*:*\"]\n    }\n  ],\n  \"INVALID_POLICY_FIELD\": \"should fail at policy level\"\n}\n`,\n\t\t\twantErr: `unknown field: \"INVALID_POLICY_FIELD\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"disallow-unsupported-fields-autoapprovers-level\",\n\t\t\tinput: `\n{\n  \"autoApprovers\": {\n    \"routes\": {\n      \"10.0.0.0/8\": [\"user@example.com\"]\n    },\n    \"exitNode\": [\"user@example.com\"],\n    \"INVALID_AUTO_APPROVER_FIELD\": \"should fail\"\n  }\n}\n`,\n\t\t\twantErr: `unknown field: \"INVALID_AUTO_APPROVER_FIELD\"`,\n\t\t},\n\t\t// headscale-admin uses # in some field names to add metadata, so we will ignore\n\t\t// those to ensure it doesnt break.\n\t\t// https://github.com/GoodiesHQ/headscale-admin/blob/214a44a9c15c92d2b42383f131b51df10c84017c/src/lib/common/acl.svelte.ts#L38\n\t\t{\n\t\t\tname: \"hash-fields-are-allowed-but-ignored\",\n\t\t\tinput: `\n{\n  \"acls\": [\n    {\n      \"#ha-test\": \"SOME VALUE\",\n      \"action\": \"accept\",\n      \"src\": [\n        \"10.0.0.1\"\n      ],\n      \"dst\": [\n        \"autogroup:internet:*\"\n      ]\n    }\n  ]\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tACLs: []ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction: \"accept\",\n\t\t\t\t\t\tSources: Aliases{\n\t\t\t\t\t\t\tpp(\"10.0.0.1/32\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tAlias: new(AutoGroup(\"autogroup:internet\")),\n\t\t\t\t\t\t\t\tPorts: []tailcfg.PortRange{tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ssh-asterix-invalid-acl-input\",\n\t\t\tinput: `\n{\n\t\"ssh\": [\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"src\": [\n\t\t\t\t\"user@example.com\"\n\t\t\t],\n\t\t\t\"dst\": [\n\t\t\t\t\"user@example.com\"\n\t\t\t],\n\t\t\t\"users\": [\"root\"],\n\t\t\t\"proto\": \"tcp\"\n\t\t}\n\t]\n}\n`,\n\t\t\twantErr: `unknown field: \"proto\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"protocol-wildcard-not-allowed\",\n\t\t\tinput: `\n{\n\t\"acls\": [\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"proto\": \"*\",\n\t\t\t\"src\": [\"*\"],\n\t\t\t\"dst\": [\"*:*\"]\n\t\t}\n\t]\n}\n`,\n\t\t\twantErr: `proto name \"*\" not known; use protocol number 0-255 or protocol name (icmp, tcp, udp, etc.)`,\n\t\t},\n\t\t{\n\t\t\tname: \"protocol-case-insensitive-uppercase\",\n\t\t\tinput: `\n{\n\t\"acls\": [\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"proto\": \"ICMP\",\n\t\t\t\"src\": [\"*\"],\n\t\t\t\"dst\": [\"*:*\"]\n\t\t}\n\t]\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tACLs: []ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:   \"accept\",\n\t\t\t\t\t\tProtocol: \"icmp\",\n\t\t\t\t\t\tSources: Aliases{\n\t\t\t\t\t\t\tWildcard,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tAlias: Wildcard,\n\t\t\t\t\t\t\t\tPorts: []tailcfg.PortRange{tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"protocol-case-insensitive-mixed\",\n\t\t\tinput: `\n{\n\t\"acls\": [\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"proto\": \"IcmP\",\n\t\t\t\"src\": [\"*\"],\n\t\t\t\"dst\": [\"*:*\"]\n\t\t}\n\t]\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tACLs: []ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:   \"accept\",\n\t\t\t\t\t\tProtocol: \"icmp\",\n\t\t\t\t\t\tSources: Aliases{\n\t\t\t\t\t\t\tWildcard,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tAlias: Wildcard,\n\t\t\t\t\t\t\t\tPorts: []tailcfg.PortRange{tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"protocol-leading-zero-not-permitted\",\n\t\t\tinput: `\n{\n\t\"acls\": [\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"proto\": \"0\",\n\t\t\t\"src\": [\"*\"],\n\t\t\t\"dst\": [\"*:*\"]\n\t\t}\n\t]\n}\n`,\n\t\t\twantErr: `leading 0 not permitted in protocol number: \"0\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"protocol-empty-applies-to-tcp-udp-only\",\n\t\t\tinput: `\n{\n\t\"acls\": [\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"src\": [\"*\"],\n\t\t\t\"dst\": [\"*:80\"]\n\t\t}\n\t]\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tACLs: []ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:   \"accept\",\n\t\t\t\t\t\tProtocol: \"\",\n\t\t\t\t\t\tSources: Aliases{\n\t\t\t\t\t\t\tWildcard,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tAlias: Wildcard,\n\t\t\t\t\t\t\t\tPorts: []tailcfg.PortRange{{First: 80, Last: 80}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"protocol-icmp-with-specific-port-not-allowed\",\n\t\t\tinput: `\n{\n\t\"acls\": [\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"proto\": \"icmp\",\n\t\t\t\"src\": [\"*\"],\n\t\t\t\"dst\": [\"*:80\"]\n\t\t}\n\t]\n}\n`,\n\t\t\twantErr: `protocol does not support specific ports: \"icmp\", only \"*\" is allowed`,\n\t\t},\n\t\t{\n\t\t\tname: \"protocol-icmp-with-wildcard-port-allowed\",\n\t\t\tinput: `\n{\n\t\"acls\": [\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"proto\": \"icmp\",\n\t\t\t\"src\": [\"*\"],\n\t\t\t\"dst\": [\"*:*\"]\n\t\t}\n\t]\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tACLs: []ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:   \"accept\",\n\t\t\t\t\t\tProtocol: \"icmp\",\n\t\t\t\t\t\tSources: Aliases{\n\t\t\t\t\t\t\tWildcard,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tAlias: Wildcard,\n\t\t\t\t\t\t\t\tPorts: []tailcfg.PortRange{tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"protocol-gre-with-specific-port-not-allowed\",\n\t\t\tinput: `\n{\n\t\"acls\": [\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"proto\": \"gre\",\n\t\t\t\"src\": [\"*\"],\n\t\t\t\"dst\": [\"*:443\"]\n\t\t}\n\t]\n}\n`,\n\t\t\twantErr: `protocol does not support specific ports: \"gre\", only \"*\" is allowed`,\n\t\t},\n\t\t{\n\t\t\tname: \"protocol-tcp-with-specific-port-allowed\",\n\t\t\tinput: `\n{\n\t\"acls\": [\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"proto\": \"tcp\",\n\t\t\t\"src\": [\"*\"],\n\t\t\t\"dst\": [\"*:80\"]\n\t\t}\n\t]\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tACLs: []ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:   \"accept\",\n\t\t\t\t\t\tProtocol: \"tcp\",\n\t\t\t\t\t\tSources: Aliases{\n\t\t\t\t\t\t\tWildcard,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tAlias: Wildcard,\n\t\t\t\t\t\t\t\tPorts: []tailcfg.PortRange{{First: 80, Last: 80}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"protocol-udp-with-specific-port-allowed\",\n\t\t\tinput: `\n{\n\t\"acls\": [\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"proto\": \"udp\",\n\t\t\t\"src\": [\"*\"],\n\t\t\t\"dst\": [\"*:53\"]\n\t\t}\n\t]\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tACLs: []ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:   \"accept\",\n\t\t\t\t\t\tProtocol: \"udp\",\n\t\t\t\t\t\tSources: Aliases{\n\t\t\t\t\t\t\tWildcard,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tAlias: Wildcard,\n\t\t\t\t\t\t\t\tPorts: []tailcfg.PortRange{{First: 53, Last: 53}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"protocol-sctp-with-specific-port-allowed\",\n\t\t\tinput: `\n{\n\t\"acls\": [\n\t\t{\n\t\t\t\"action\": \"accept\",\n\t\t\t\"proto\": \"sctp\",\n\t\t\t\"src\": [\"*\"],\n\t\t\t\"dst\": [\"*:9000\"]\n\t\t}\n\t]\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tACLs: []ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:   \"accept\",\n\t\t\t\t\t\tProtocol: \"sctp\",\n\t\t\t\t\t\tSources: Aliases{\n\t\t\t\t\t\t\tWildcard,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tAlias: Wildcard,\n\t\t\t\t\t\t\t\tPorts: []tailcfg.PortRange{{First: 9000, Last: 9000}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"tags-can-own-other-tags\",\n\t\t\tinput: `\n{\n  \"tagOwners\": {\n    \"tag:bigbrother\": [],\n    \"tag:smallbrother\": [\"tag:bigbrother\"],\n  },\n  \"acls\": [\n    {\n      \"action\": \"accept\",\n      \"proto\": \"tcp\",\n      \"src\": [\"*\"],\n      \"dst\": [\"tag:smallbrother:9000\"]\n    }\n  ]\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:bigbrother\"):   {},\n\t\t\t\t\tTag(\"tag:smallbrother\"): {new(Tag(\"tag:bigbrother\"))},\n\t\t\t\t},\n\t\t\t\tACLs: []ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:   \"accept\",\n\t\t\t\t\t\tProtocol: \"tcp\",\n\t\t\t\t\t\tSources: Aliases{\n\t\t\t\t\t\t\tWildcard,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tAlias: new(Tag(\"tag:smallbrother\")),\n\t\t\t\t\t\t\t\tPorts: []tailcfg.PortRange{{First: 9000, Last: 9000}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"tag-owner-references-undefined-tag\",\n\t\t\tinput: `\n{\n  \"tagOwners\": {\n    \"tag:child\": [\"tag:nonexistent\"],\n  },\n}\n`,\n\t\t\twantErr: `tag \"tag:child\" references undefined tag \"tag:nonexistent\"`,\n\t\t},\n\t\t// SSH source/destination validation tests (#3009, #3010)\n\t\t{\n\t\t\tname: \"ssh-tag-to-user-rejected\",\n\t\t\tinput: `\n{\n  \"tagOwners\": {\"tag:server\": [\"admin@\"]},\n  \"ssh\": [{\n    \"action\": \"accept\",\n    \"src\": [\"tag:server\"],\n    \"dst\": [\"admin@\"],\n    \"users\": [\"autogroup:nonroot\"]\n  }]\n}\n`,\n\t\t\twantErr: \"tags in SSH source cannot access user-owned devices\",\n\t\t},\n\t\t{\n\t\t\tname: \"ssh-autogroup-tagged-to-user-rejected\",\n\t\t\tinput: `\n{\n  \"ssh\": [{\n    \"action\": \"accept\",\n    \"src\": [\"autogroup:tagged\"],\n    \"dst\": [\"admin@\"],\n    \"users\": [\"autogroup:nonroot\"]\n  }]\n}\n`,\n\t\t\twantErr: \"tags in SSH source cannot access user-owned devices\",\n\t\t},\n\t\t{\n\t\t\tname: \"ssh-tag-to-autogroup-self-rejected\",\n\t\t\tinput: `\n{\n  \"tagOwners\": {\"tag:server\": [\"admin@\"]},\n  \"ssh\": [{\n    \"action\": \"accept\",\n    \"src\": [\"tag:server\"],\n    \"dst\": [\"autogroup:self\"],\n    \"users\": [\"autogroup:nonroot\"]\n  }]\n}\n`,\n\t\t\twantErr: \"autogroup:self destination requires source to contain only users or groups\",\n\t\t},\n\t\t{\n\t\t\tname: \"ssh-group-to-user-rejected\",\n\t\t\tinput: `\n{\n  \"groups\": {\"group:admins\": [\"admin@\", \"user1@\"]},\n  \"ssh\": [{\n    \"action\": \"accept\",\n    \"src\": [\"group:admins\"],\n    \"dst\": [\"admin@\"],\n    \"users\": [\"autogroup:nonroot\"]\n  }]\n}\n`,\n\t\t\twantErr: `user destination requires source to contain only that same user \"admin@\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"ssh-same-user-to-user-allowed\",\n\t\t\tinput: `\n{\n  \"ssh\": [{\n    \"action\": \"accept\",\n    \"src\": [\"admin@\"],\n    \"dst\": [\"admin@\"],\n    \"users\": [\"autogroup:nonroot\"]\n  }]\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tSSHs: []SSH{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:       \"accept\",\n\t\t\t\t\t\tSources:      SSHSrcAliases{up(\"admin@\")},\n\t\t\t\t\t\tDestinations: SSHDstAliases{up(\"admin@\")},\n\t\t\t\t\t\tUsers:        []SSHUser{SSHUser(AutoGroupNonRoot)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ssh-group-to-autogroup-self-allowed\",\n\t\t\tinput: `\n{\n  \"groups\": {\"group:admins\": [\"admin@\", \"user1@\"]},\n  \"ssh\": [{\n    \"action\": \"accept\",\n    \"src\": [\"group:admins\"],\n    \"dst\": [\"autogroup:self\"],\n    \"users\": [\"autogroup:nonroot\"]\n  }]\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tGroups: Groups{\n\t\t\t\t\tGroup(\"group:admins\"): []Username{Username(\"admin@\"), Username(\"user1@\")},\n\t\t\t\t},\n\t\t\t\tSSHs: []SSH{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:       \"accept\",\n\t\t\t\t\t\tSources:      SSHSrcAliases{gp(\"group:admins\")},\n\t\t\t\t\t\tDestinations: SSHDstAliases{agp(\"autogroup:self\")},\n\t\t\t\t\t\tUsers:        []SSHUser{SSHUser(AutoGroupNonRoot)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ssh-autogroup-tagged-to-autogroup-member-rejected\",\n\t\t\tinput: `\n{\n  \"ssh\": [{\n    \"action\": \"accept\",\n    \"src\": [\"autogroup:tagged\"],\n    \"dst\": [\"autogroup:member\"],\n    \"users\": [\"autogroup:nonroot\"]\n  }]\n}\n`,\n\t\t\twantErr: \"tags in SSH source cannot access autogroup:member\",\n\t\t},\n\t\t{\n\t\t\tname: \"ssh-autogroup-tagged-to-autogroup-tagged-allowed\",\n\t\t\tinput: `\n{\n  \"ssh\": [{\n    \"action\": \"accept\",\n    \"src\": [\"autogroup:tagged\"],\n    \"dst\": [\"autogroup:tagged\"],\n    \"users\": [\"autogroup:nonroot\"]\n  }]\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tSSHs: []SSH{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:       \"accept\",\n\t\t\t\t\t\tSources:      SSHSrcAliases{agp(\"autogroup:tagged\")},\n\t\t\t\t\t\tDestinations: SSHDstAliases{agp(\"autogroup:tagged\")},\n\t\t\t\t\t\tUsers:        []SSHUser{SSHUser(AutoGroupNonRoot)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ssh-wildcard-destination-rejected\",\n\t\t\tinput: `\n{\n  \"groups\": {\"group:admins\": [\"admin@\"]},\n  \"ssh\": [{\n    \"action\": \"accept\",\n    \"src\": [\"group:admins\"],\n    \"dst\": [\"*\"],\n    \"users\": [\"autogroup:nonroot\"]\n  }]\n}\n`,\n\t\t\twantErr: \"wildcard (*) is not supported as SSH destination\",\n\t\t},\n\t\t{\n\t\t\tname: \"ssh-group-to-tag-allowed\",\n\t\t\tinput: `\n{\n  \"tagOwners\": {\"tag:server\": [\"admin@\"]},\n  \"groups\": {\"group:admins\": [\"admin@\"]},\n  \"ssh\": [{\n    \"action\": \"accept\",\n    \"src\": [\"group:admins\"],\n    \"dst\": [\"tag:server\"],\n    \"users\": [\"autogroup:nonroot\"]\n  }]\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:server\"): Owners{up(\"admin@\")},\n\t\t\t\t},\n\t\t\t\tGroups: Groups{\n\t\t\t\t\tGroup(\"group:admins\"): []Username{Username(\"admin@\")},\n\t\t\t\t},\n\t\t\t\tSSHs: []SSH{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:       \"accept\",\n\t\t\t\t\t\tSources:      SSHSrcAliases{gp(\"group:admins\")},\n\t\t\t\t\t\tDestinations: SSHDstAliases{tp(\"tag:server\")},\n\t\t\t\t\t\tUsers:        []SSHUser{SSHUser(AutoGroupNonRoot)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ssh-user-to-tag-allowed\",\n\t\t\tinput: `\n{\n  \"tagOwners\": {\"tag:server\": [\"admin@\"]},\n  \"ssh\": [{\n    \"action\": \"accept\",\n    \"src\": [\"admin@\"],\n    \"dst\": [\"tag:server\"],\n    \"users\": [\"autogroup:nonroot\"]\n  }]\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:server\"): Owners{up(\"admin@\")},\n\t\t\t\t},\n\t\t\t\tSSHs: []SSH{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:       \"accept\",\n\t\t\t\t\t\tSources:      SSHSrcAliases{up(\"admin@\")},\n\t\t\t\t\t\tDestinations: SSHDstAliases{tp(\"tag:server\")},\n\t\t\t\t\t\tUsers:        []SSHUser{SSHUser(AutoGroupNonRoot)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ssh-autogroup-member-to-autogroup-tagged-allowed\",\n\t\t\tinput: `\n{\n  \"ssh\": [{\n    \"action\": \"accept\",\n    \"src\": [\"autogroup:member\"],\n    \"dst\": [\"autogroup:tagged\"],\n    \"users\": [\"autogroup:nonroot\"]\n  }]\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tSSHs: []SSH{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:       \"accept\",\n\t\t\t\t\t\tSources:      SSHSrcAliases{agp(\"autogroup:member\")},\n\t\t\t\t\t\tDestinations: SSHDstAliases{agp(\"autogroup:tagged\")},\n\t\t\t\t\t\tUsers:        []SSHUser{SSHUser(AutoGroupNonRoot)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Issue #2754: IPv6 addresses with brackets in ACL destinations.\n\t\t{\n\t\t\tname: \"2754-bracketed-ipv6-single-port\",\n\t\t\tinput: `\n{\n\t\"acls\": [{\n\t\t\"action\": \"accept\",\n\t\t\"src\": [\"alice@\"],\n\t\t\"dst\": [\"[fd7a:115c:a1e0::87e1]:443\"]\n\t}]\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tACLs: []ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction: \"accept\",\n\t\t\t\t\t\tSources: Aliases{\n\t\t\t\t\t\t\tup(\"alice@\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tAlias: pp(\"fd7a:115c:a1e0::87e1/128\"),\n\t\t\t\t\t\t\t\tPorts: []tailcfg.PortRange{{First: 443, Last: 443}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ssh-localpart-valid\",\n\t\t\tinput: `\n{\n  \"tagOwners\": {\"tag:prod\": [\"admin@\"]},\n  \"ssh\": [{\n    \"action\": \"accept\",\n    \"src\": [\"autogroup:member\"],\n    \"dst\": [\"tag:prod\"],\n    \"users\": [\"localpart:*@example.com\"]\n  }]\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:prod\"): Owners{up(\"admin@\")},\n\t\t\t\t},\n\t\t\t\tSSHs: []SSH{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:       \"accept\",\n\t\t\t\t\t\tSources:      SSHSrcAliases{agp(\"autogroup:member\")},\n\t\t\t\t\t\tDestinations: SSHDstAliases{tp(\"tag:prod\")},\n\t\t\t\t\t\tUsers:        []SSHUser{SSHUser(\"localpart:*@example.com\")},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"2754-bracketed-ipv6-multiple-ports\",\n\t\t\tinput: `\n{\n\t\"acls\": [{\n\t\t\"action\": \"accept\",\n\t\t\"src\": [\"alice@\"],\n\t\t\"dst\": [\"[fd7a:115c:a1e0::87e1]:80,443\"]\n\t}]\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tACLs: []ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction: \"accept\",\n\t\t\t\t\t\tSources: Aliases{\n\t\t\t\t\t\t\tup(\"alice@\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tAlias: pp(\"fd7a:115c:a1e0::87e1/128\"),\n\t\t\t\t\t\t\t\tPorts: []tailcfg.PortRange{\n\t\t\t\t\t\t\t\t\t{First: 80, Last: 80},\n\t\t\t\t\t\t\t\t\t{First: 443, Last: 443},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ssh-localpart-with-other-users\",\n\t\t\tinput: `\n{\n  \"tagOwners\": {\"tag:prod\": [\"admin@\"]},\n  \"ssh\": [{\n    \"action\": \"accept\",\n    \"src\": [\"autogroup:member\"],\n    \"dst\": [\"tag:prod\"],\n    \"users\": [\"localpart:*@example.com\", \"root\", \"autogroup:nonroot\"]\n  }]\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:prod\"): Owners{up(\"admin@\")},\n\t\t\t\t},\n\t\t\t\tSSHs: []SSH{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:       \"accept\",\n\t\t\t\t\t\tSources:      SSHSrcAliases{agp(\"autogroup:member\")},\n\t\t\t\t\t\tDestinations: SSHDstAliases{tp(\"tag:prod\")},\n\t\t\t\t\t\tUsers:        []SSHUser{SSHUser(\"localpart:*@example.com\"), \"root\", SSHUser(AutoGroupNonRoot)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"2754-bracketed-ipv6-wildcard-port\",\n\t\t\tinput: `\n{\n\t\"acls\": [{\n\t\t\"action\": \"accept\",\n\t\t\"src\": [\"alice@\"],\n\t\t\"dst\": [\"[fd7a:115c:a1e0::87e1]:*\"]\n\t}]\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tACLs: []ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction: \"accept\",\n\t\t\t\t\t\tSources: Aliases{\n\t\t\t\t\t\t\tup(\"alice@\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tAlias: pp(\"fd7a:115c:a1e0::87e1/128\"),\n\t\t\t\t\t\t\t\tPorts: []tailcfg.PortRange{tailcfg.PortRangeAny},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"2754-bracketed-ipv6-cidr-inside-rejected\",\n\t\t\tinput: `\n{\n\t\"acls\": [{\n\t\t\"action\": \"accept\",\n\t\t\"src\": [\"alice@\"],\n\t\t\"dst\": [\"[fd7a:115c:a1e0::/48]:443\"]\n\t}]\n}\n`,\n\t\t\twantErr: \"square brackets are only valid around IPv6 addresses\",\n\t\t},\n\t\t{\n\t\t\tname: \"2754-bracketed-ipv6-port-range\",\n\t\t\tinput: `\n{\n\t\"acls\": [{\n\t\t\"action\": \"accept\",\n\t\t\"src\": [\"alice@\"],\n\t\t\"dst\": [\"[::1]:80-443\"]\n\t}]\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tACLs: []ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction: \"accept\",\n\t\t\t\t\t\tSources: Aliases{\n\t\t\t\t\t\t\tup(\"alice@\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tAlias: pp(\"::1/128\"),\n\t\t\t\t\t\t\t\tPorts: []tailcfg.PortRange{{First: 80, Last: 443}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"2754-bracketed-ipv6-cidr-outside-brackets\",\n\t\t\tinput: `\n{\n\t\"acls\": [{\n\t\t\"action\": \"accept\",\n\t\t\"src\": [\"alice@\"],\n\t\t\"dst\": [\"[fd7a:115c:a1e0::2905]/128:80,443\"]\n\t}]\n}\n`,\n\t\t\twant: &Policy{\n\t\t\t\tACLs: []ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction: \"accept\",\n\t\t\t\t\t\tSources: Aliases{\n\t\t\t\t\t\t\tup(\"alice@\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tAlias: pp(\"fd7a:115c:a1e0::2905/128\"),\n\t\t\t\t\t\t\t\tPorts: []tailcfg.PortRange{\n\t\t\t\t\t\t\t\t\t{First: 80, Last: 80},\n\t\t\t\t\t\t\t\t\t{First: 443, Last: 443},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"2754-bracketed-ipv4-rejected\",\n\t\t\tinput: `\n{\n\t\"acls\": [{\n\t\t\"action\": \"accept\",\n\t\t\"src\": [\"alice@\"],\n\t\t\"dst\": [\"[192.168.1.1]:80\"]\n\t}]\n}\n`,\n\t\t\twantErr: \"square brackets are only valid around IPv6 addresses\",\n\t\t},\n\t\t{\n\t\t\tname: \"2754-bracketed-hostname-rejected\",\n\t\t\tinput: `\n{\n\t\"acls\": [{\n\t\t\"action\": \"accept\",\n\t\t\"src\": [\"alice@\"],\n\t\t\"dst\": [\"[my-hostname]:80\"]\n\t}]\n}\n`,\n\t\t\twantErr: \"square brackets are only valid around IPv6 addresses\",\n\t\t},\n\t\t{\n\t\t\tname: \"ssh-localpart-invalid-no-at-sign\",\n\t\t\tinput: `\n{\n  \"tagOwners\": {\"tag:prod\": [\"admin@\"]},\n  \"ssh\": [{\n    \"action\": \"accept\",\n    \"src\": [\"autogroup:member\"],\n    \"dst\": [\"tag:prod\"],\n    \"users\": [\"localpart:foo\"]\n  }]\n}\n`,\n\t\t\twantErr: \"invalid localpart format\",\n\t\t},\n\t\t{\n\t\t\tname: \"ssh-localpart-invalid-non-wildcard\",\n\t\t\tinput: `\n{\n  \"tagOwners\": {\"tag:prod\": [\"admin@\"]},\n  \"ssh\": [{\n    \"action\": \"accept\",\n    \"src\": [\"autogroup:member\"],\n    \"dst\": [\"tag:prod\"],\n    \"users\": [\"localpart:alice@example.com\"]\n  }]\n}\n`,\n\t\t\twantErr: \"invalid localpart format\",\n\t\t},\n\t\t{\n\t\t\tname: \"ssh-localpart-invalid-empty-domain\",\n\t\t\tinput: `\n{\n  \"tagOwners\": {\"tag:prod\": [\"admin@\"]},\n  \"ssh\": [{\n    \"action\": \"accept\",\n    \"src\": [\"autogroup:member\"],\n    \"dst\": [\"tag:prod\"],\n    \"users\": [\"localpart:*@\"]\n  }]\n}\n`,\n\t\t\twantErr: \"invalid localpart format\",\n\t\t},\n\t}\n\n\tcmps := append(util.Comparers,\n\t\tcmp.Comparer(func(x, y Prefix) bool {\n\t\t\treturn x == y\n\t\t}),\n\t\tcmpopts.IgnoreUnexported(Policy{}),\n\t)\n\n\t// For round-trip testing, we'll normalize the policies before comparing\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\t// Test unmarshalling\n\t\t\tpolicy, err := unmarshalPolicy([]byte(tt.input))\n\t\t\tif tt.wantErr == \"\" {\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"unmarshalling: got %v; want no error\", err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif err == nil {\n\t\t\t\t\tt.Fatalf(\"unmarshalling: got nil; want error %q\", tt.wantErr)\n\t\t\t\t} else if !strings.Contains(err.Error(), tt.wantErr) {\n\t\t\t\t\tt.Fatalf(\"unmarshalling: got err %v; want error %q\", err, tt.wantErr)\n\t\t\t\t}\n\n\t\t\t\treturn // Skip the rest of the test if we expected an error\n\t\t\t}\n\n\t\t\tif diff := cmp.Diff(tt.want, policy, cmps...); diff != \"\" {\n\t\t\t\tt.Fatalf(\"unexpected policy (-want +got):\\n%s\", diff)\n\t\t\t}\n\n\t\t\t// Test round-trip marshalling/unmarshalling\n\t\t\tif policy != nil {\n\t\t\t\t// Marshal the policy back to JSON\n\t\t\t\tmarshalled, err := json.MarshalIndent(policy, \"\", \"  \")\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"marshalling: %v\", err)\n\t\t\t\t}\n\n\t\t\t\t// Unmarshal it again\n\t\t\t\troundTripped, err := unmarshalPolicy(marshalled)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"round-trip unmarshalling: %v\", err)\n\t\t\t\t}\n\n\t\t\t\t// Add EquateEmpty to handle nil vs empty maps/slices\n\t\t\t\troundTripCmps := append(cmps,\n\t\t\t\t\tcmpopts.EquateEmpty(),\n\t\t\t\t\tcmpopts.IgnoreUnexported(Policy{}),\n\t\t\t\t)\n\n\t\t\t\t// Compare using the enhanced comparers for round-trip testing\n\t\t\t\tif diff := cmp.Diff(policy, roundTripped, roundTripCmps...); diff != \"\" {\n\t\t\t\t\tt.Fatalf(\"round trip policy (-original +roundtripped):\\n%s\", diff)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc gp(s string) *Group          { return new(Group(s)) }\nfunc up(s string) *Username       { return new(Username(s)) }\nfunc hp(s string) *Host           { return new(Host(s)) }\nfunc tp(s string) *Tag            { return new(Tag(s)) }\nfunc agp(s string) *AutoGroup     { return new(AutoGroup(s)) }\nfunc mp(pref string) netip.Prefix { return netip.MustParsePrefix(pref) }\nfunc ap(addr string) *netip.Addr  { return new(netip.MustParseAddr(addr)) }\nfunc pp(pref string) *Prefix      { return new(Prefix(mp(pref))) }\nfunc p(pref string) Prefix        { return Prefix(mp(pref)) }\n\nfunc TestResolvePolicy(t *testing.T) {\n\tusers := map[string]types.User{\n\t\t\"testuser\":   {Model: gorm.Model{ID: 1}, Name: \"testuser\"},\n\t\t\"groupuser\":  {Model: gorm.Model{ID: 2}, Name: \"groupuser\"},\n\t\t\"groupuser1\": {Model: gorm.Model{ID: 3}, Name: \"groupuser1\"},\n\t\t\"groupuser2\": {Model: gorm.Model{ID: 4}, Name: \"groupuser2\"},\n\t\t\"notme\":      {Model: gorm.Model{ID: 5}, Name: \"notme\"},\n\t\t\"testuser2\":  {Model: gorm.Model{ID: 6}, Name: \"testuser2\"},\n\t}\n\n\ttests := []struct {\n\t\tname      string\n\t\tnodes     types.Nodes\n\t\tpol       *Policy\n\t\ttoResolve Alias\n\t\twant      []netip.Prefix\n\t\twantErr   string\n\t}{\n\t\t{\n\t\t\tname:      \"prefix\",\n\t\t\ttoResolve: pp(\"100.100.101.101/32\"),\n\t\t\twant:      []netip.Prefix{mp(\"100.100.101.101/32\")},\n\t\t},\n\t\t{\n\t\t\tname: \"host\",\n\t\t\tpol: &Policy{\n\t\t\t\tHosts: Hosts{\n\t\t\t\t\t\"testhost\": p(\"100.100.101.102/32\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\ttoResolve: hp(\"testhost\"),\n\t\t\twant:      []netip.Prefix{mp(\"100.100.101.102/32\")},\n\t\t},\n\t\t{\n\t\t\tname:      \"username\",\n\t\t\ttoResolve: new(Username(\"testuser@\")),\n\t\t\tnodes: types.Nodes{\n\t\t\t\t// Not matching other user\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"notme\"]),\n\t\t\t\t\tIPv4: ap(\"100.100.101.1\"),\n\t\t\t\t},\n\t\t\t\t// Not matching forced tags\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"testuser\"]),\n\t\t\t\t\tTags: []string{\"tag:anything\"},\n\t\t\t\t\tIPv4: ap(\"100.100.101.2\"),\n\t\t\t\t},\n\t\t\t\t// not matching because it's tagged (tags copied from AuthKey)\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"testuser\"]),\n\t\t\t\t\tTags: []string{\"alsotagged\"},\n\t\t\t\t\tIPv4: ap(\"100.100.101.3\"),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"testuser\"]),\n\t\t\t\t\tIPv4: ap(\"100.100.101.103\"),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"testuser\"]),\n\t\t\t\t\tIPv4: ap(\"100.100.101.104\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []netip.Prefix{mp(\"100.100.101.103/32\"), mp(\"100.100.101.104/32\")},\n\t\t},\n\t\t{\n\t\t\tname:      \"group\",\n\t\t\ttoResolve: new(Group(\"group:testgroup\")),\n\t\t\tnodes: types.Nodes{\n\t\t\t\t// Not matching other user\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"notme\"]),\n\t\t\t\t\tIPv4: ap(\"100.100.101.4\"),\n\t\t\t\t},\n\t\t\t\t// Not matching forced tags\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"groupuser\"]),\n\t\t\t\t\tTags: []string{\"tag:anything\"},\n\t\t\t\t\tIPv4: ap(\"100.100.101.5\"),\n\t\t\t\t},\n\t\t\t\t// not matching because it's tagged (tags copied from AuthKey)\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"groupuser\"]),\n\t\t\t\t\tTags: []string{\"tag:alsotagged\"},\n\t\t\t\t\tIPv4: ap(\"100.100.101.6\"),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"groupuser\"]),\n\t\t\t\t\tIPv4: ap(\"100.100.101.203\"),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"groupuser\"]),\n\t\t\t\t\tIPv4: ap(\"100.100.101.204\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tpol: &Policy{\n\t\t\t\tGroups: Groups{\n\t\t\t\t\t\"group:testgroup\":  Usernames{\"groupuser\"},\n\t\t\t\t\t\"group:othergroup\": Usernames{\"notmetoo\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []netip.Prefix{mp(\"100.100.101.203/32\"), mp(\"100.100.101.204/32\")},\n\t\t},\n\t\t{\n\t\t\tname:      \"tag\",\n\t\t\ttoResolve: tp(\"tag:test\"),\n\t\t\tnodes: types.Nodes{\n\t\t\t\t// Not matching other user\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"notme\"]),\n\t\t\t\t\tIPv4: ap(\"100.100.101.9\"),\n\t\t\t\t},\n\t\t\t\t// Not matching forced tags\n\t\t\t\t{\n\t\t\t\t\tTags: []string{\"tag:anything\"},\n\t\t\t\t\tIPv4: ap(\"100.100.101.10\"),\n\t\t\t\t},\n\t\t\t\t// not matching pak tag\n\t\t\t\t{\n\t\t\t\t\tAuthKey: &types.PreAuthKey{\n\t\t\t\t\t\tTags: []string{\"tag:alsotagged\"},\n\t\t\t\t\t},\n\t\t\t\t\tIPv4: ap(\"100.100.101.11\"),\n\t\t\t\t},\n\t\t\t\t// Not matching forced tags\n\t\t\t\t{\n\t\t\t\t\tTags: []string{\"tag:test\"},\n\t\t\t\t\tIPv4: ap(\"100.100.101.234\"),\n\t\t\t\t},\n\t\t\t\t// matching tag (tags copied from AuthKey during registration)\n\t\t\t\t{\n\t\t\t\t\tTags: []string{\"tag:test\"},\n\t\t\t\t\tIPv4: ap(\"100.100.101.239\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\t// TODO(kradalby): tests handling TagOwners + hostinfo\n\t\t\tpol:  &Policy{},\n\t\t\twant: []netip.Prefix{mp(\"100.100.101.234/32\"), mp(\"100.100.101.239/32\")},\n\t\t},\n\t\t{\n\t\t\tname:      \"tag-owned-by-tag-call-child\",\n\t\t\ttoResolve: tp(\"tag:smallbrother\"),\n\t\t\tpol: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:bigbrother\"):   {},\n\t\t\t\t\tTag(\"tag:smallbrother\"): {new(Tag(\"tag:bigbrother\"))},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnodes: types.Nodes{\n\t\t\t\t// Should not match as we resolve the \"child\" tag.\n\t\t\t\t{\n\t\t\t\t\tTags: []string{\"tag:bigbrother\"},\n\t\t\t\t\tIPv4: ap(\"100.100.101.234\"),\n\t\t\t\t},\n\t\t\t\t// Should match.\n\t\t\t\t{\n\t\t\t\t\tTags: []string{\"tag:smallbrother\"},\n\t\t\t\t\tIPv4: ap(\"100.100.101.239\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []netip.Prefix{mp(\"100.100.101.239/32\")},\n\t\t},\n\t\t{\n\t\t\tname:      \"tag-owned-by-tag-call-parent\",\n\t\t\ttoResolve: tp(\"tag:bigbrother\"),\n\t\t\tpol: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:bigbrother\"):   {},\n\t\t\t\t\tTag(\"tag:smallbrother\"): {new(Tag(\"tag:bigbrother\"))},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnodes: types.Nodes{\n\t\t\t\t// Should match - we are resolving \"tag:bigbrother\" which this node has.\n\t\t\t\t{\n\t\t\t\t\tTags: []string{\"tag:bigbrother\"},\n\t\t\t\t\tIPv4: ap(\"100.100.101.234\"),\n\t\t\t\t},\n\t\t\t\t// Should not match - this node has \"tag:smallbrother\", not the tag we're resolving.\n\t\t\t\t{\n\t\t\t\t\tTags: []string{\"tag:smallbrother\"},\n\t\t\t\t\tIPv4: ap(\"100.100.101.239\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []netip.Prefix{mp(\"100.100.101.234/32\")},\n\t\t},\n\t\t{\n\t\t\tname:      \"empty-policy\",\n\t\t\ttoResolve: pp(\"100.100.101.101/32\"),\n\t\t\tpol:       &Policy{},\n\t\t\twant:      []netip.Prefix{mp(\"100.100.101.101/32\")},\n\t\t},\n\t\t{\n\t\t\tname:      \"invalid-host\",\n\t\t\ttoResolve: hp(\"invalidhost\"),\n\t\t\tpol: &Policy{\n\t\t\t\tHosts: Hosts{\n\t\t\t\t\t\"testhost\": p(\"100.100.101.102/32\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: `resolving host: \"invalidhost\"`,\n\t\t},\n\t\t{\n\t\t\tname:      \"multiple-groups\",\n\t\t\ttoResolve: new(Group(\"group:testgroup\")),\n\t\t\tnodes: types.Nodes{\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"groupuser1\"]),\n\t\t\t\t\tIPv4: ap(\"100.100.101.203\"),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"groupuser2\"]),\n\t\t\t\t\tIPv4: ap(\"100.100.101.204\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tpol: &Policy{\n\t\t\t\tGroups: Groups{\n\t\t\t\t\t\"group:testgroup\": Usernames{\"groupuser1@\", \"groupuser2@\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []netip.Prefix{mp(\"100.100.101.203/32\"), mp(\"100.100.101.204/32\")},\n\t\t},\n\t\t{\n\t\t\tname:      \"autogroup-internet\",\n\t\t\ttoResolve: agp(\"autogroup:internet\"),\n\t\t\twant:      util.TheInternet().Prefixes(),\n\t\t},\n\t\t{\n\t\t\tname:      \"invalid-username\",\n\t\t\ttoResolve: new(Username(\"invaliduser@\")),\n\t\t\tnodes: types.Nodes{\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"testuser\"]),\n\t\t\t\t\tIPv4: ap(\"100.100.101.103\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: `user not found: token \"invaliduser@\"`,\n\t\t},\n\t\t{\n\t\t\tname:      \"invalid-tag\",\n\t\t\ttoResolve: tp(\"tag:invalid\"),\n\t\t\tnodes: types.Nodes{\n\t\t\t\t{\n\t\t\t\t\tTags: []string{\"tag:test\"},\n\t\t\t\t\tIPv4: ap(\"100.100.101.234\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:      \"ipv6-address\",\n\t\t\ttoResolve: pp(\"fd7a:115c:a1e0::1/128\"),\n\t\t\twant:      []netip.Prefix{mp(\"fd7a:115c:a1e0::1/128\")},\n\t\t},\n\t\t{\n\t\t\tname:      \"wildcard-alias\",\n\t\t\ttoResolve: Wildcard,\n\t\t\twant:      []netip.Prefix{tsaddr.CGNATRange(), tsaddr.TailscaleULARange()},\n\t\t},\n\t\t{\n\t\t\tname:      \"autogroup-member-comprehensive\",\n\t\t\ttoResolve: new(AutoGroupMember),\n\t\t\tnodes: types.Nodes{\n\t\t\t\t// Node with no tags (should be included - is a member)\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"testuser\"]),\n\t\t\t\t\tIPv4: ap(\"100.100.101.1\"),\n\t\t\t\t},\n\t\t\t\t// Node with single tag (should be excluded - tagged nodes are not members)\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"testuser\"]),\n\t\t\t\t\tTags: []string{\"tag:test\"},\n\t\t\t\t\tIPv4: ap(\"100.100.101.2\"),\n\t\t\t\t},\n\t\t\t\t// Node with multiple tags, all defined in policy (should be excluded)\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"testuser\"]),\n\t\t\t\t\tTags: []string{\"tag:test\", \"tag:other\"},\n\t\t\t\t\tIPv4: ap(\"100.100.101.3\"),\n\t\t\t\t},\n\t\t\t\t// Node with tag not defined in policy (should be excluded - still tagged)\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"testuser\"]),\n\t\t\t\t\tTags: []string{\"tag:undefined\"},\n\t\t\t\t\tIPv4: ap(\"100.100.101.4\"),\n\t\t\t\t},\n\t\t\t\t// Node with mixed tags - some defined, some not (should be excluded)\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"testuser\"]),\n\t\t\t\t\tTags: []string{\"tag:test\", \"tag:undefined\"},\n\t\t\t\t\tIPv4: ap(\"100.100.101.5\"),\n\t\t\t\t},\n\t\t\t\t// Another untagged node from different user (should be included)\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"testuser2\"]),\n\t\t\t\t\tIPv4: ap(\"100.100.101.6\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tpol: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:test\"):  Owners{new(Username(\"testuser@\"))},\n\t\t\t\t\tTag(\"tag:other\"): Owners{new(Username(\"testuser@\"))},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []netip.Prefix{\n\t\t\t\tmp(\"100.100.101.1/32\"), // No tags - is a member\n\t\t\t\tmp(\"100.100.101.6/32\"), // No tags, different user - is a member\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:      \"autogroup-tagged\",\n\t\t\ttoResolve: new(AutoGroupTagged),\n\t\t\tnodes: types.Nodes{\n\t\t\t\t// Node with no tags (should be excluded - not tagged)\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"testuser\"]),\n\t\t\t\t\tIPv4: ap(\"100.100.101.1\"),\n\t\t\t\t},\n\t\t\t\t// Node with single tag defined in policy (should be included)\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"testuser\"]),\n\t\t\t\t\tTags: []string{\"tag:test\"},\n\t\t\t\t\tIPv4: ap(\"100.100.101.2\"),\n\t\t\t\t},\n\t\t\t\t// Node with multiple tags, all defined in policy (should be included)\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"testuser\"]),\n\t\t\t\t\tTags: []string{\"tag:test\", \"tag:other\"},\n\t\t\t\t\tIPv4: ap(\"100.100.101.3\"),\n\t\t\t\t},\n\t\t\t\t// Node with tag not defined in policy (should be included - still tagged)\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"testuser\"]),\n\t\t\t\t\tTags: []string{\"tag:undefined\"},\n\t\t\t\t\tIPv4: ap(\"100.100.101.4\"),\n\t\t\t\t},\n\t\t\t\t// Node with mixed tags - some defined, some not (should be included)\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"testuser\"]),\n\t\t\t\t\tTags: []string{\"tag:test\", \"tag:undefined\"},\n\t\t\t\t\tIPv4: ap(\"100.100.101.5\"),\n\t\t\t\t},\n\t\t\t\t// Another untagged node from different user (should be excluded)\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"testuser2\"]),\n\t\t\t\t\tIPv4: ap(\"100.100.101.6\"),\n\t\t\t\t},\n\t\t\t\t// Tagged node from different user (should be included)\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"testuser2\"]),\n\t\t\t\t\tTags: []string{\"tag:server\"},\n\t\t\t\t\tIPv4: ap(\"100.100.101.7\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tpol: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:test\"):   Owners{new(Username(\"testuser@\"))},\n\t\t\t\t\tTag(\"tag:other\"):  Owners{new(Username(\"testuser@\"))},\n\t\t\t\t\tTag(\"tag:server\"): Owners{new(Username(\"testuser2@\"))},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []netip.Prefix{\n\t\t\t\tmp(\"100.100.101.2/31\"), // .2, .3 consecutive tagged nodes\n\t\t\t\tmp(\"100.100.101.4/31\"), // .4, .5 consecutive tagged nodes\n\t\t\t\tmp(\"100.100.101.7/32\"), // Tagged node from different user\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:      \"autogroup-self\",\n\t\t\ttoResolve: new(AutoGroupSelf),\n\t\t\tnodes: types.Nodes{\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"testuser\"]),\n\t\t\t\t\tIPv4: ap(\"100.100.101.1\"),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"testuser2\"]),\n\t\t\t\t\tIPv4: ap(\"100.100.101.2\"),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"testuser\"]),\n\t\t\t\t\tTags: []string{\"tag:test\"},\n\t\t\t\t\tIPv4: ap(\"100.100.101.3\"),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tUser: new(users[\"testuser2\"]),\n\t\t\t\t\tTags: []string{\"tag:test\"},\n\t\t\t\t\tIPv4: ap(\"100.100.101.4\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tpol: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:test\"): Owners{new(Username(\"testuser@\"))},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: \"autogroup:self requires per-node resolution\",\n\t\t},\n\t\t{\n\t\t\tname:      \"autogroup-invalid\",\n\t\t\ttoResolve: new(AutoGroup(\"autogroup:invalid\")),\n\t\t\twantErr:   \"unknown autogroup\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tips, err := tt.toResolve.Resolve(tt.pol,\n\t\t\t\txmaps.Values(users),\n\t\t\t\ttt.nodes.ViewSlice())\n\t\t\tif tt.wantErr == \"\" {\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"got %v; want no error\", err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif err == nil {\n\t\t\t\t\tt.Fatalf(\"got nil; want error %q\", tt.wantErr)\n\t\t\t\t} else if !strings.Contains(err.Error(), tt.wantErr) {\n\t\t\t\t\tt.Fatalf(\"got err %v; want error %q\", err, tt.wantErr)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar prefs []netip.Prefix\n\n\t\t\tif ips != nil {\n\t\t\t\tif p := ips.Prefixes(); len(p) > 0 {\n\t\t\t\t\tprefs = p\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif diff := cmp.Diff(tt.want, prefs, util.Comparers...); diff != \"\" {\n\t\t\t\tt.Fatalf(\"unexpected prefs (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestResolveAutoApprovers(t *testing.T) {\n\tusers := types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"user1\"},\n\t\t{Model: gorm.Model{ID: 2}, Name: \"user2\"},\n\t\t{Model: gorm.Model{ID: 3}, Name: \"user3\"},\n\t}\n\n\tnodes := types.Nodes{\n\t\t{\n\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\tUser: &users[0],\n\t\t},\n\t\t{\n\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\tUser: &users[1],\n\t\t},\n\t\t{\n\t\t\tIPv4: ap(\"100.64.0.3\"),\n\t\t\tUser: &users[2],\n\t\t},\n\t\t{\n\t\t\tIPv4: ap(\"100.64.0.4\"),\n\t\t\tTags: []string{\"tag:testtag\"},\n\t\t},\n\t\t{\n\t\t\tIPv4: ap(\"100.64.0.5\"),\n\t\t\tTags: []string{\"tag:exittest\"},\n\t\t},\n\t}\n\n\ttests := []struct {\n\t\tname            string\n\t\tpolicy          *Policy\n\t\twant            map[netip.Prefix]*netipx.IPSet\n\t\twantAllIPRoutes *netipx.IPSet\n\t\twantErr         bool\n\t}{\n\t\t{\n\t\t\tname: \"single-route\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tAutoApprovers: AutoApproverPolicy{\n\t\t\t\t\tRoutes: map[netip.Prefix]AutoApprovers{\n\t\t\t\t\t\tmp(\"10.0.0.0/24\"): {new(Username(\"user1@\"))},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: map[netip.Prefix]*netipx.IPSet{\n\t\t\t\tmp(\"10.0.0.0/24\"): mustIPSet(\"100.64.0.1/32\"),\n\t\t\t},\n\t\t\twantAllIPRoutes: nil,\n\t\t\twantErr:         false,\n\t\t},\n\t\t{\n\t\t\tname: \"multiple-routes\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tAutoApprovers: AutoApproverPolicy{\n\t\t\t\t\tRoutes: map[netip.Prefix]AutoApprovers{\n\t\t\t\t\t\tmp(\"10.0.0.0/24\"): {new(Username(\"user1@\"))},\n\t\t\t\t\t\tmp(\"10.0.1.0/24\"): {new(Username(\"user2@\"))},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: map[netip.Prefix]*netipx.IPSet{\n\t\t\t\tmp(\"10.0.0.0/24\"): mustIPSet(\"100.64.0.1/32\"),\n\t\t\t\tmp(\"10.0.1.0/24\"): mustIPSet(\"100.64.0.2/32\"),\n\t\t\t},\n\t\t\twantAllIPRoutes: nil,\n\t\t\twantErr:         false,\n\t\t},\n\t\t{\n\t\t\tname: \"exit-node\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tAutoApprovers: AutoApproverPolicy{\n\t\t\t\t\tExitNode: AutoApprovers{new(Username(\"user1@\"))},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant:            map[netip.Prefix]*netipx.IPSet{},\n\t\t\twantAllIPRoutes: mustIPSet(\"100.64.0.1/32\"),\n\t\t\twantErr:         false,\n\t\t},\n\t\t{\n\t\t\tname: \"group-route\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tGroups: Groups{\n\t\t\t\t\t\"group:testgroup\": Usernames{\"user1@\", \"user2@\"},\n\t\t\t\t},\n\t\t\t\tAutoApprovers: AutoApproverPolicy{\n\t\t\t\t\tRoutes: map[netip.Prefix]AutoApprovers{\n\t\t\t\t\t\tmp(\"10.0.0.0/24\"): {new(Group(\"group:testgroup\"))},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: map[netip.Prefix]*netipx.IPSet{\n\t\t\t\tmp(\"10.0.0.0/24\"): mustIPSet(\"100.64.0.1/32\", \"100.64.0.2/32\"),\n\t\t\t},\n\t\t\twantAllIPRoutes: nil,\n\t\t\twantErr:         false,\n\t\t},\n\t\t{\n\t\t\tname: \"tag-route-and-exit\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\t\"tag:testtag\": Owners{\n\t\t\t\t\t\tnew(Username(\"user1@\")),\n\t\t\t\t\t\tnew(Username(\"user2@\")),\n\t\t\t\t\t},\n\t\t\t\t\t\"tag:exittest\": Owners{\n\t\t\t\t\t\tnew(Group(\"group:exitgroup\")),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tGroups: Groups{\n\t\t\t\t\t\"group:exitgroup\": Usernames{\"user2@\"},\n\t\t\t\t},\n\t\t\t\tAutoApprovers: AutoApproverPolicy{\n\t\t\t\t\tExitNode: AutoApprovers{new(Tag(\"tag:exittest\"))},\n\t\t\t\t\tRoutes: map[netip.Prefix]AutoApprovers{\n\t\t\t\t\t\tmp(\"10.0.1.0/24\"): {new(Tag(\"tag:testtag\"))},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: map[netip.Prefix]*netipx.IPSet{\n\t\t\t\tmp(\"10.0.1.0/24\"): mustIPSet(\"100.64.0.4/32\"),\n\t\t\t},\n\t\t\twantAllIPRoutes: mustIPSet(\"100.64.0.5/32\"),\n\t\t\twantErr:         false,\n\t\t},\n\t\t{\n\t\t\tname: \"mixed-routes-and-exit-nodes\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tGroups: Groups{\n\t\t\t\t\t\"group:testgroup\": Usernames{\"user1@\", \"user2@\"},\n\t\t\t\t},\n\t\t\t\tAutoApprovers: AutoApproverPolicy{\n\t\t\t\t\tRoutes: map[netip.Prefix]AutoApprovers{\n\t\t\t\t\t\tmp(\"10.0.0.0/24\"): {new(Group(\"group:testgroup\"))},\n\t\t\t\t\t\tmp(\"10.0.1.0/24\"): {new(Username(\"user3@\"))},\n\t\t\t\t\t},\n\t\t\t\t\tExitNode: AutoApprovers{new(Username(\"user1@\"))},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: map[netip.Prefix]*netipx.IPSet{\n\t\t\t\tmp(\"10.0.0.0/24\"): mustIPSet(\"100.64.0.1/32\", \"100.64.0.2/32\"),\n\t\t\t\tmp(\"10.0.1.0/24\"): mustIPSet(\"100.64.0.3/32\"),\n\t\t\t},\n\t\t\twantAllIPRoutes: mustIPSet(\"100.64.0.1/32\"),\n\t\t\twantErr:         false,\n\t\t},\n\t}\n\n\tcmps := append(util.Comparers, cmp.Comparer(ipSetComparer))\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot, gotAllIPRoutes, err := resolveAutoApprovers(tt.policy, users, nodes.ViewSlice())\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"resolveAutoApprovers() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif diff := cmp.Diff(tt.want, got, cmps...); diff != \"\" {\n\t\t\t\tt.Errorf(\"resolveAutoApprovers() mismatch (-want +got):\\n%s\", diff)\n\t\t\t}\n\n\t\t\tif tt.wantAllIPRoutes != nil {\n\t\t\t\tif gotAllIPRoutes == nil {\n\t\t\t\t\tt.Error(\"resolveAutoApprovers() expected non-nil allIPRoutes, got nil\")\n\t\t\t\t} else if diff := cmp.Diff(tt.wantAllIPRoutes, gotAllIPRoutes, cmps...); diff != \"\" {\n\t\t\t\t\tt.Errorf(\"resolveAutoApprovers() allIPRoutes mismatch (-want +got):\\n%s\", diff)\n\t\t\t\t}\n\t\t\t} else if gotAllIPRoutes != nil {\n\t\t\t\tt.Error(\"resolveAutoApprovers() expected nil allIPRoutes, got non-nil\")\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestSSHUsers_NormalUsers(t *testing.T) {\n\ttests := []struct {\n\t\tname  string\n\t\tusers SSHUsers\n\t\twant  []SSHUser\n\t}{\n\t\t{\n\t\t\tname:  \"empty users\",\n\t\t\tusers: SSHUsers{},\n\t\t\twant:  nil,\n\t\t},\n\t\t{\n\t\t\tname:  \"only root\",\n\t\t\tusers: SSHUsers{\"root\"},\n\t\t\twant:  nil,\n\t\t},\n\t\t{\n\t\t\tname:  \"only autogroup:nonroot\",\n\t\t\tusers: SSHUsers{SSHUser(AutoGroupNonRoot)},\n\t\t\twant:  nil,\n\t\t},\n\t\t{\n\t\t\tname:  \"only normal user\",\n\t\t\tusers: SSHUsers{\"ssh-it-user\"},\n\t\t\twant:  []SSHUser{\"ssh-it-user\"},\n\t\t},\n\t\t{\n\t\t\tname:  \"multiple normal users\",\n\t\t\tusers: SSHUsers{\"ubuntu\", \"admin\", \"user1\"},\n\t\t\twant:  []SSHUser{\"ubuntu\", \"admin\", \"user1\"},\n\t\t},\n\t\t{\n\t\t\tname:  \"mixed users with root\",\n\t\t\tusers: SSHUsers{\"ubuntu\", \"root\", \"admin\"},\n\t\t\twant:  []SSHUser{\"ubuntu\", \"admin\"},\n\t\t},\n\t\t{\n\t\t\tname:  \"mixed users with autogroup:nonroot\",\n\t\t\tusers: SSHUsers{\"ubuntu\", SSHUser(AutoGroupNonRoot), \"admin\"},\n\t\t\twant:  []SSHUser{\"ubuntu\", \"admin\"},\n\t\t},\n\t\t{\n\t\t\tname:  \"mixed users with both root and autogroup:nonroot\",\n\t\t\tusers: SSHUsers{\"ubuntu\", \"root\", SSHUser(AutoGroupNonRoot), \"admin\"},\n\t\t\twant:  []SSHUser{\"ubuntu\", \"admin\"},\n\t\t},\n\t\t{\n\t\t\tname:  \"excludes localpart entries\",\n\t\t\tusers: SSHUsers{\"ubuntu\", \"root\", SSHUser(AutoGroupNonRoot), SSHUser(\"localpart:*@example.com\"), \"admin\"},\n\t\t\twant:  []SSHUser{\"ubuntu\", \"admin\"},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot := tt.users.NormalUsers()\n\t\t\tif diff := cmp.Diff(tt.want, got); diff != \"\" {\n\t\t\t\tt.Errorf(\"NormalUsers() unexpected result (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestSSHUsers_ContainsRoot(t *testing.T) {\n\ttests := []struct {\n\t\tname     string\n\t\tusers    SSHUsers\n\t\texpected bool\n\t}{\n\t\t{\n\t\t\tname:     \"empty users\",\n\t\t\tusers:    SSHUsers{},\n\t\t\texpected: false,\n\t\t},\n\t\t{\n\t\t\tname:     \"contains root\",\n\t\t\tusers:    SSHUsers{\"root\"},\n\t\t\texpected: true,\n\t\t},\n\t\t{\n\t\t\tname:     \"does not contain root\",\n\t\t\tusers:    SSHUsers{\"ubuntu\", \"admin\"},\n\t\t\texpected: false,\n\t\t},\n\t\t{\n\t\t\tname:     \"contains root among others\",\n\t\t\tusers:    SSHUsers{\"ubuntu\", \"root\", \"admin\"},\n\t\t\texpected: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tresult := tt.users.ContainsRoot()\n\t\t\tassert.Equal(t, tt.expected, result, \"ContainsRoot() should return expected result\")\n\t\t})\n\t}\n}\n\nfunc TestSSHUsers_ContainsNonRoot(t *testing.T) {\n\ttests := []struct {\n\t\tname     string\n\t\tusers    SSHUsers\n\t\texpected bool\n\t}{\n\t\t{\n\t\t\tname:     \"empty users\",\n\t\t\tusers:    SSHUsers{},\n\t\t\texpected: false,\n\t\t},\n\t\t{\n\t\t\tname:     \"contains autogroup:nonroot\",\n\t\t\tusers:    SSHUsers{SSHUser(AutoGroupNonRoot)},\n\t\t\texpected: true,\n\t\t},\n\t\t{\n\t\t\tname:     \"does not contain autogroup:nonroot\",\n\t\t\tusers:    SSHUsers{\"ubuntu\", \"admin\", \"root\"},\n\t\t\texpected: false,\n\t\t},\n\t\t{\n\t\t\tname:     \"contains autogroup:nonroot among others\",\n\t\t\tusers:    SSHUsers{\"ubuntu\", SSHUser(AutoGroupNonRoot), \"admin\"},\n\t\t\texpected: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tresult := tt.users.ContainsNonRoot()\n\t\t\tassert.Equal(t, tt.expected, result, \"ContainsNonRoot() should return expected result\")\n\t\t})\n\t}\n}\n\nfunc TestSSHUsers_ContainsLocalpart(t *testing.T) {\n\ttests := []struct {\n\t\tname     string\n\t\tusers    SSHUsers\n\t\texpected bool\n\t}{\n\t\t{\n\t\t\tname:     \"empty users\",\n\t\t\tusers:    SSHUsers{},\n\t\t\texpected: false,\n\t\t},\n\t\t{\n\t\t\tname:     \"contains localpart\",\n\t\t\tusers:    SSHUsers{SSHUser(\"localpart:*@example.com\")},\n\t\t\texpected: true,\n\t\t},\n\t\t{\n\t\t\tname:     \"does not contain localpart\",\n\t\t\tusers:    SSHUsers{\"ubuntu\", \"admin\", \"root\"},\n\t\t\texpected: false,\n\t\t},\n\t\t{\n\t\t\tname:     \"contains localpart among others\",\n\t\t\tusers:    SSHUsers{\"ubuntu\", SSHUser(\"localpart:*@example.com\"), \"admin\"},\n\t\t\texpected: true,\n\t\t},\n\t\t{\n\t\t\tname:     \"multiple localpart entries\",\n\t\t\tusers:    SSHUsers{SSHUser(\"localpart:*@a.com\"), SSHUser(\"localpart:*@b.com\")},\n\t\t\texpected: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tresult := tt.users.ContainsLocalpart()\n\t\t\tassert.Equal(t, tt.expected, result, \"ContainsLocalpart() should return expected result\")\n\t\t})\n\t}\n}\n\nfunc TestSSHUsers_LocalpartEntries(t *testing.T) {\n\ttests := []struct {\n\t\tname  string\n\t\tusers SSHUsers\n\t\twant  []SSHUser\n\t}{\n\t\t{\n\t\t\tname:  \"empty users\",\n\t\t\tusers: SSHUsers{},\n\t\t\twant:  nil,\n\t\t},\n\t\t{\n\t\t\tname:  \"no localpart entries\",\n\t\t\tusers: SSHUsers{\"root\", \"ubuntu\", SSHUser(AutoGroupNonRoot)},\n\t\t\twant:  nil,\n\t\t},\n\t\t{\n\t\t\tname:  \"single localpart entry\",\n\t\t\tusers: SSHUsers{\"root\", SSHUser(\"localpart:*@example.com\"), \"ubuntu\"},\n\t\t\twant:  []SSHUser{SSHUser(\"localpart:*@example.com\")},\n\t\t},\n\t\t{\n\t\t\tname:  \"multiple localpart entries\",\n\t\t\tusers: SSHUsers{SSHUser(\"localpart:*@a.com\"), \"root\", SSHUser(\"localpart:*@b.com\")},\n\t\t\twant:  []SSHUser{SSHUser(\"localpart:*@a.com\"), SSHUser(\"localpart:*@b.com\")},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot := tt.users.LocalpartEntries()\n\t\t\tif diff := cmp.Diff(tt.want, got); diff != \"\" {\n\t\t\t\tt.Errorf(\"LocalpartEntries() unexpected result (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestSSHUser_ParseLocalpart(t *testing.T) {\n\ttests := []struct {\n\t\tname           string\n\t\tuser           SSHUser\n\t\texpectedDomain string\n\t\texpectErr      bool\n\t}{\n\t\t{\n\t\t\tname:           \"valid localpart\",\n\t\t\tuser:           SSHUser(\"localpart:*@example.com\"),\n\t\t\texpectedDomain: \"example.com\",\n\t\t},\n\t\t{\n\t\t\tname:           \"valid localpart with subdomain\",\n\t\t\tuser:           SSHUser(\"localpart:*@corp.example.com\"),\n\t\t\texpectedDomain: \"corp.example.com\",\n\t\t},\n\t\t{\n\t\t\tname:      \"missing prefix\",\n\t\t\tuser:      SSHUser(\"ubuntu\"),\n\t\t\texpectErr: true,\n\t\t},\n\t\t{\n\t\t\tname:      \"missing @ sign\",\n\t\t\tuser:      SSHUser(\"localpart:foo\"),\n\t\t\texpectErr: true,\n\t\t},\n\t\t{\n\t\t\tname:      \"non-wildcard local part\",\n\t\t\tuser:      SSHUser(\"localpart:alice@example.com\"),\n\t\t\texpectErr: true,\n\t\t},\n\t\t{\n\t\t\tname:      \"empty domain\",\n\t\t\tuser:      SSHUser(\"localpart:*@\"),\n\t\t\texpectErr: true,\n\t\t},\n\t\t{\n\t\t\tname:      \"just prefix\",\n\t\t\tuser:      SSHUser(\"localpart:\"),\n\t\t\texpectErr: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tdomain, err := tt.user.ParseLocalpart()\n\t\t\tif tt.expectErr {\n\t\t\t\trequire.Error(t, err)\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.Equal(t, tt.expectedDomain, domain)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc mustIPSet(prefixes ...string) *netipx.IPSet {\n\tvar builder netipx.IPSetBuilder\n\tfor _, p := range prefixes {\n\t\tbuilder.AddPrefix(mp(p))\n\t}\n\n\tipSet, _ := builder.IPSet()\n\n\treturn ipSet\n}\n\nfunc ipSetComparer(x, y *netipx.IPSet) bool {\n\tif x == nil || y == nil {\n\t\treturn x == y\n\t}\n\n\treturn cmp.Equal(x.Prefixes(), y.Prefixes(), util.Comparers...)\n}\n\nfunc TestNodeCanApproveRoute(t *testing.T) {\n\tusers := types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"user1\"},\n\t\t{Model: gorm.Model{ID: 2}, Name: \"user2\"},\n\t\t{Model: gorm.Model{ID: 3}, Name: \"user3\"},\n\t}\n\n\tnodes := types.Nodes{\n\t\t{\n\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\tUser: &users[0],\n\t\t},\n\t\t{\n\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\tUser: &users[1],\n\t\t},\n\t\t{\n\t\t\tIPv4: ap(\"100.64.0.3\"),\n\t\t\tUser: &users[2],\n\t\t},\n\t}\n\n\ttests := []struct {\n\t\tname    string\n\t\tpolicy  *Policy\n\t\tnode    *types.Node\n\t\troute   netip.Prefix\n\t\twant    bool\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname: \"single-route-approval\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tAutoApprovers: AutoApproverPolicy{\n\t\t\t\t\tRoutes: map[netip.Prefix]AutoApprovers{\n\t\t\t\t\t\tmp(\"10.0.0.0/24\"): {new(Username(\"user1@\"))},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnode:  nodes[0],\n\t\t\troute: mp(\"10.0.0.0/24\"),\n\t\t\twant:  true,\n\t\t},\n\t\t{\n\t\t\tname: \"multiple-routes-approval\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tAutoApprovers: AutoApproverPolicy{\n\t\t\t\t\tRoutes: map[netip.Prefix]AutoApprovers{\n\t\t\t\t\t\tmp(\"10.0.0.0/24\"): {new(Username(\"user1@\"))},\n\t\t\t\t\t\tmp(\"10.0.1.0/24\"): {new(Username(\"user2@\"))},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnode:  nodes[1],\n\t\t\troute: mp(\"10.0.1.0/24\"),\n\t\t\twant:  true,\n\t\t},\n\t\t{\n\t\t\tname: \"exit-node-approval\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tAutoApprovers: AutoApproverPolicy{\n\t\t\t\t\tExitNode: AutoApprovers{new(Username(\"user1@\"))},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnode:  nodes[0],\n\t\t\troute: tsaddr.AllIPv4(),\n\t\t\twant:  true,\n\t\t},\n\t\t{\n\t\t\tname: \"group-route-approval\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tGroups: Groups{\n\t\t\t\t\t\"group:testgroup\": Usernames{\"user1@\", \"user2@\"},\n\t\t\t\t},\n\t\t\t\tAutoApprovers: AutoApproverPolicy{\n\t\t\t\t\tRoutes: map[netip.Prefix]AutoApprovers{\n\t\t\t\t\t\tmp(\"10.0.0.0/24\"): {new(Group(\"group:testgroup\"))},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnode:  nodes[1],\n\t\t\troute: mp(\"10.0.0.0/24\"),\n\t\t\twant:  true,\n\t\t},\n\t\t{\n\t\t\tname: \"mixed-routes-and-exit-nodes-approval\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tGroups: Groups{\n\t\t\t\t\t\"group:testgroup\": Usernames{\"user1@\", \"user2@\"},\n\t\t\t\t},\n\t\t\t\tAutoApprovers: AutoApproverPolicy{\n\t\t\t\t\tRoutes: map[netip.Prefix]AutoApprovers{\n\t\t\t\t\t\tmp(\"10.0.0.0/24\"): {new(Group(\"group:testgroup\"))},\n\t\t\t\t\t\tmp(\"10.0.1.0/24\"): {new(Username(\"user3@\"))},\n\t\t\t\t\t},\n\t\t\t\t\tExitNode: AutoApprovers{new(Username(\"user1@\"))},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnode:  nodes[0],\n\t\t\troute: tsaddr.AllIPv4(),\n\t\t\twant:  true,\n\t\t},\n\t\t{\n\t\t\tname: \"no-approval\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tAutoApprovers: AutoApproverPolicy{\n\t\t\t\t\tRoutes: map[netip.Prefix]AutoApprovers{\n\t\t\t\t\t\tmp(\"10.0.0.0/24\"): {new(Username(\"user2@\"))},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnode:  nodes[0],\n\t\t\troute: mp(\"10.0.0.0/24\"),\n\t\t\twant:  false,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tb, err := json.Marshal(tt.policy)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tpm, err := NewPolicyManager(b, users, nodes.ViewSlice())\n\t\t\trequire.NoErrorf(t, err, \"NewPolicyManager() error = %v\", err)\n\n\t\t\tgot := pm.NodeCanApproveRoute(tt.node.View(), tt.route)\n\t\t\tif got != tt.want {\n\t\t\t\tt.Errorf(\"NodeCanApproveRoute() = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestResolveTagOwners(t *testing.T) {\n\tusers := types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"user1\"},\n\t\t{Model: gorm.Model{ID: 2}, Name: \"user2\"},\n\t\t{Model: gorm.Model{ID: 3}, Name: \"user3\"},\n\t}\n\n\tnodes := types.Nodes{\n\t\t{\n\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\tUser: &users[0],\n\t\t},\n\t\t{\n\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\tUser: &users[1],\n\t\t},\n\t\t{\n\t\t\tIPv4: ap(\"100.64.0.3\"),\n\t\t\tUser: &users[2],\n\t\t},\n\t}\n\n\ttests := []struct {\n\t\tname    string\n\t\tpolicy  *Policy\n\t\twant    map[Tag]*netipx.IPSet\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname: \"single-tag-owner\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:test\"): Owners{new(Username(\"user1@\"))},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: map[Tag]*netipx.IPSet{\n\t\t\t\tTag(\"tag:test\"): mustIPSet(\"100.64.0.1/32\"),\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"multiple-tag-owners\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:test\"): Owners{new(Username(\"user1@\")), new(Username(\"user2@\"))},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: map[Tag]*netipx.IPSet{\n\t\t\t\tTag(\"tag:test\"): mustIPSet(\"100.64.0.1/32\", \"100.64.0.2/32\"),\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"group-tag-owner\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tGroups: Groups{\n\t\t\t\t\t\"group:testgroup\": Usernames{\"user1@\", \"user2@\"},\n\t\t\t\t},\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:test\"): Owners{new(Group(\"group:testgroup\"))},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: map[Tag]*netipx.IPSet{\n\t\t\t\tTag(\"tag:test\"): mustIPSet(\"100.64.0.1/32\", \"100.64.0.2/32\"),\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"tag-owns-tag\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:bigbrother\"):   Owners{new(Username(\"user1@\"))},\n\t\t\t\t\tTag(\"tag:smallbrother\"): Owners{new(Tag(\"tag:bigbrother\"))},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: map[Tag]*netipx.IPSet{\n\t\t\t\tTag(\"tag:bigbrother\"):   mustIPSet(\"100.64.0.1/32\"),\n\t\t\t\tTag(\"tag:smallbrother\"): mustIPSet(\"100.64.0.1/32\"),\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t}\n\n\tcmps := append(util.Comparers, cmp.Comparer(ipSetComparer))\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot, err := resolveTagOwners(tt.policy, users, nodes.ViewSlice())\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"resolveTagOwners() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif diff := cmp.Diff(tt.want, got, cmps...); diff != \"\" {\n\t\t\t\tt.Errorf(\"resolveTagOwners() mismatch (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestNodeCanHaveTag(t *testing.T) {\n\tusers := types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"user1\"},\n\t\t{Model: gorm.Model{ID: 2}, Name: \"user2\"},\n\t\t{Model: gorm.Model{ID: 3}, Name: \"user3\"},\n\t}\n\n\tnodes := types.Nodes{\n\t\t{\n\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\tUser: &users[0],\n\t\t},\n\t\t{\n\t\t\tIPv4: ap(\"100.64.0.2\"),\n\t\t\tUser: &users[1],\n\t\t},\n\t\t{\n\t\t\tIPv4: ap(\"100.64.0.3\"),\n\t\t\tUser: &users[2],\n\t\t},\n\t}\n\n\ttests := []struct {\n\t\tname    string\n\t\tpolicy  *Policy\n\t\tnode    *types.Node\n\t\ttag     string\n\t\twant    bool\n\t\twantErr string\n\t}{\n\t\t{\n\t\t\tname: \"single-tag-owner\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:test\"): Owners{new(Username(\"user1@\"))},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnode: nodes[0],\n\t\t\ttag:  \"tag:test\",\n\t\t\twant: true,\n\t\t},\n\t\t{\n\t\t\tname: \"multiple-tag-owners\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:test\"): Owners{new(Username(\"user1@\")), new(Username(\"user2@\"))},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnode: nodes[1],\n\t\t\ttag:  \"tag:test\",\n\t\t\twant: true,\n\t\t},\n\t\t{\n\t\t\tname: \"group-tag-owner\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tGroups: Groups{\n\t\t\t\t\t\"group:testgroup\": Usernames{\"user1@\", \"user2@\"},\n\t\t\t\t},\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:test\"): Owners{new(Group(\"group:testgroup\"))},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnode: nodes[1],\n\t\t\ttag:  \"tag:test\",\n\t\t\twant: true,\n\t\t},\n\t\t{\n\t\t\tname: \"invalid-group\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tGroups: Groups{\n\t\t\t\t\t\"group:testgroup\": Usernames{\"invalid\"},\n\t\t\t\t},\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:test\"): Owners{new(Group(\"group:testgroup\"))},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnode:    nodes[0],\n\t\t\ttag:     \"tag:test\",\n\t\t\twant:    false,\n\t\t\twantErr: \"username must contain @\",\n\t\t},\n\t\t{\n\t\t\tname: \"node-cannot-have-tag\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:test\"): Owners{new(Username(\"user2@\"))},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnode: nodes[0],\n\t\t\ttag:  \"tag:test\",\n\t\t\twant: false,\n\t\t},\n\t\t{\n\t\t\tname: \"node-with-unauthorized-tag-different-user\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:prod\"): Owners{new(Username(\"user1@\"))},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnode: nodes[2], // user3's node\n\t\t\ttag:  \"tag:prod\",\n\t\t\twant: false,\n\t\t},\n\t\t{\n\t\t\tname: \"node-with-multiple-tags-one-unauthorized\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:web\"):      Owners{new(Username(\"user1@\"))},\n\t\t\t\t\tTag(\"tag:database\"): Owners{new(Username(\"user2@\"))},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnode: nodes[0], // user1's node\n\t\t\ttag:  \"tag:database\",\n\t\t\twant: false, // user1 cannot have tag:database (owned by user2)\n\t\t},\n\t\t{\n\t\t\tname: \"empty-tagowners-map\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tTagOwners: TagOwners{},\n\t\t\t},\n\t\t\tnode: nodes[0],\n\t\t\ttag:  \"tag:test\",\n\t\t\twant: false, // No one can have tags if tagOwners is empty\n\t\t},\n\t\t{\n\t\t\tname: \"tag-not-in-tagowners\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:prod\"): Owners{new(Username(\"user1@\"))},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnode: nodes[0],\n\t\t\ttag:  \"tag:dev\", // This tag is not defined in tagOwners\n\t\t\twant: false,\n\t\t},\n\t\t// Test cases for nodes without IPs (new registration scenario)\n\t\t// These test the user-based fallback in NodeCanHaveTag\n\t\t{\n\t\t\tname: \"node-without-ip-user-owns-tag\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:test\"): Owners{new(Username(\"user1@\"))},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnode: &types.Node{\n\t\t\t\t// No IPv4 or IPv6 - simulates new node registration\n\t\t\t\tUser:   &users[0],\n\t\t\t\tUserID: new(users[0].ID),\n\t\t\t},\n\t\t\ttag:  \"tag:test\",\n\t\t\twant: true, // Should succeed via user-based fallback\n\t\t},\n\t\t{\n\t\t\tname: \"node-without-ip-user-does-not-own-tag\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:test\"): Owners{new(Username(\"user2@\"))},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnode: &types.Node{\n\t\t\t\t// No IPv4 or IPv6 - simulates new node registration\n\t\t\t\tUser:   &users[0], // user1, but tag owned by user2\n\t\t\t\tUserID: new(users[0].ID),\n\t\t\t},\n\t\t\ttag:  \"tag:test\",\n\t\t\twant: false, // user1 does not own tag:test\n\t\t},\n\t\t{\n\t\t\tname: \"node-without-ip-group-owns-tag\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tGroups: Groups{\n\t\t\t\t\t\"group:admins\": Usernames{\"user1@\", \"user2@\"},\n\t\t\t\t},\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:admin\"): Owners{new(Group(\"group:admins\"))},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnode: &types.Node{\n\t\t\t\t// No IPv4 or IPv6 - simulates new node registration\n\t\t\t\tUser:   &users[1], // user2 is in group:admins\n\t\t\t\tUserID: new(users[1].ID),\n\t\t\t},\n\t\t\ttag:  \"tag:admin\",\n\t\t\twant: true, // Should succeed via group membership\n\t\t},\n\t\t{\n\t\t\tname: \"node-without-ip-not-in-group\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tGroups: Groups{\n\t\t\t\t\t\"group:admins\": Usernames{\"user1@\"},\n\t\t\t\t},\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:admin\"): Owners{new(Group(\"group:admins\"))},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnode: &types.Node{\n\t\t\t\t// No IPv4 or IPv6 - simulates new node registration\n\t\t\t\tUser:   &users[1], // user2 is NOT in group:admins\n\t\t\t\tUserID: new(users[1].ID),\n\t\t\t},\n\t\t\ttag:  \"tag:admin\",\n\t\t\twant: false, // user2 is not in group:admins\n\t\t},\n\t\t{\n\t\t\tname: \"node-without-ip-no-user\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:test\"): Owners{new(Username(\"user1@\"))},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnode: &types.Node{\n\t\t\t\t// No IPv4, IPv6, or User - edge case\n\t\t\t},\n\t\t\ttag:  \"tag:test\",\n\t\t\twant: false, // No user means can't authorize via user-based fallback\n\t\t},\n\t\t{\n\t\t\tname: \"node-without-ip-mixed-owners-user-match\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tGroups: Groups{\n\t\t\t\t\t\"group:ops\": Usernames{\"user3@\"},\n\t\t\t\t},\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:server\"): Owners{\n\t\t\t\t\t\tnew(Username(\"user1@\")),\n\t\t\t\t\t\tnew(Group(\"group:ops\")),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnode: &types.Node{\n\t\t\t\tUser:   &users[0], // user1 directly owns the tag\n\t\t\t\tUserID: new(users[0].ID),\n\t\t\t},\n\t\t\ttag:  \"tag:server\",\n\t\t\twant: true,\n\t\t},\n\t\t{\n\t\t\tname: \"node-without-ip-mixed-owners-group-match\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tGroups: Groups{\n\t\t\t\t\t\"group:ops\": Usernames{\"user3@\"},\n\t\t\t\t},\n\t\t\t\tTagOwners: TagOwners{\n\t\t\t\t\tTag(\"tag:server\"): Owners{\n\t\t\t\t\t\tnew(Username(\"user1@\")),\n\t\t\t\t\t\tnew(Group(\"group:ops\")),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnode: &types.Node{\n\t\t\t\tUser:   &users[2], // user3 is in group:ops\n\t\t\t\tUserID: new(users[2].ID),\n\t\t\t},\n\t\t\ttag:  \"tag:server\",\n\t\t\twant: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tb, err := json.Marshal(tt.policy)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tpm, err := NewPolicyManager(b, users, nodes.ViewSlice())\n\t\t\tif tt.wantErr != \"\" {\n\t\t\t\trequire.ErrorContains(t, err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\n\t\t\tgot := pm.NodeCanHaveTag(tt.node.View(), tt.tag)\n\t\t\tif got != tt.want {\n\t\t\t\tt.Errorf(\"NodeCanHaveTag() = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestUserMatchesOwner(t *testing.T) {\n\tusers := types.Users{\n\t\t{Model: gorm.Model{ID: 1}, Name: \"user1\"},\n\t\t{Model: gorm.Model{ID: 2}, Name: \"user2\"},\n\t\t{Model: gorm.Model{ID: 3}, Name: \"user3\"},\n\t}\n\n\ttests := []struct {\n\t\tname   string\n\t\tpolicy *Policy\n\t\tuser   types.User\n\t\towner  Owner\n\t\twant   bool\n\t}{\n\t\t{\n\t\t\tname:   \"username-match\",\n\t\t\tpolicy: &Policy{},\n\t\t\tuser:   users[0],\n\t\t\towner:  new(Username(\"user1@\")),\n\t\t\twant:   true,\n\t\t},\n\t\t{\n\t\t\tname:   \"username-no-match\",\n\t\t\tpolicy: &Policy{},\n\t\t\tuser:   users[0],\n\t\t\towner:  new(Username(\"user2@\")),\n\t\t\twant:   false,\n\t\t},\n\t\t{\n\t\t\tname: \"group-match\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tGroups: Groups{\n\t\t\t\t\t\"group:admins\": Usernames{\"user1@\", \"user2@\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tuser:  users[1], // user2 is in group:admins\n\t\t\towner: new(Group(\"group:admins\")),\n\t\t\twant:  true,\n\t\t},\n\t\t{\n\t\t\tname: \"group-no-match\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tGroups: Groups{\n\t\t\t\t\t\"group:admins\": Usernames{\"user1@\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tuser:  users[1], // user2 is NOT in group:admins\n\t\t\towner: new(Group(\"group:admins\")),\n\t\t\twant:  false,\n\t\t},\n\t\t{\n\t\t\tname: \"group-not-defined\",\n\t\t\tpolicy: &Policy{\n\t\t\t\tGroups: Groups{},\n\t\t\t},\n\t\t\tuser:  users[0],\n\t\t\towner: new(Group(\"group:undefined\")),\n\t\t\twant:  false,\n\t\t},\n\t\t{\n\t\t\tname:   \"nil-username-owner\",\n\t\t\tpolicy: &Policy{},\n\t\t\tuser:   users[0],\n\t\t\towner:  (*Username)(nil),\n\t\t\twant:   false,\n\t\t},\n\t\t{\n\t\t\tname:   \"nil-group-owner\",\n\t\t\tpolicy: &Policy{},\n\t\t\tuser:   users[0],\n\t\t\towner:  (*Group)(nil),\n\t\t\twant:   false,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\t// Create a minimal PolicyManager for testing\n\t\t\t// We need nodes with IPs to initialize the tagOwnerMap\n\t\t\tnodes := types.Nodes{\n\t\t\t\t{\n\t\t\t\t\tIPv4: ap(\"100.64.0.1\"),\n\t\t\t\t\tUser: &users[0],\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tb, err := json.Marshal(tt.policy)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tpm, err := NewPolicyManager(b, users, nodes.ViewSlice())\n\t\t\trequire.NoError(t, err)\n\n\t\t\tgot := pm.userMatchesOwner(tt.user.View(), tt.owner)\n\t\t\tif got != tt.want {\n\t\t\t\tt.Errorf(\"userMatchesOwner() = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestACL_UnmarshalJSON_WithCommentFields(t *testing.T) {\n\ttests := []struct {\n\t\tname     string\n\t\tinput    string\n\t\texpected ACL\n\t\twantErr  bool\n\t}{\n\t\t{\n\t\t\tname: \"basic ACL with comment fields\",\n\t\t\tinput: `{\n\t\t\t\t\"#comment\": \"This is a comment\",\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"proto\": \"tcp\",\n\t\t\t\t\"src\": [\"user1@example.com\"],\n\t\t\t\t\"dst\": [\"tag:server:80\"]\n\t\t\t}`,\n\t\t\texpected: ACL{\n\t\t\t\tAction:   \"accept\",\n\t\t\t\tProtocol: \"tcp\",\n\t\t\t\tSources:  []Alias{mustParseAlias(\"user1@example.com\")},\n\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t{\n\t\t\t\t\t\tAlias: mustParseAlias(\"tag:server\"),\n\t\t\t\t\t\tPorts: []tailcfg.PortRange{{First: 80, Last: 80}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"multiple comment fields\",\n\t\t\tinput: `{\n\t\t\t\t\"#description\": \"Allow access to web servers\",\n\t\t\t\t\"#note\": \"Created by admin\",\n\t\t\t\t\"#created_date\": \"2024-01-15\",\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"proto\": \"tcp\",\n\t\t\t\t\"src\": [\"group:developers\"],\n\t\t\t\t\"dst\": [\"10.0.0.0/24:443\"]\n\t\t\t}`,\n\t\t\texpected: ACL{\n\t\t\t\tAction:   \"accept\",\n\t\t\t\tProtocol: \"tcp\",\n\t\t\t\tSources:  []Alias{mustParseAlias(\"group:developers\")},\n\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t{\n\t\t\t\t\t\tAlias: mustParseAlias(\"10.0.0.0/24\"),\n\t\t\t\t\t\tPorts: []tailcfg.PortRange{{First: 443, Last: 443}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"comment field with complex object value\",\n\t\t\tinput: `{\n\t\t\t\t\"#metadata\": {\n\t\t\t\t\t\"description\": \"Complex comment object\",\n\t\t\t\t\t\"tags\": [\"web\", \"production\"],\n\t\t\t\t\t\"created_by\": \"admin\"\n\t\t\t\t},\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"proto\": \"udp\",\n\t\t\t\t\"src\": [\"*\"],\n\t\t\t\t\"dst\": [\"autogroup:internet:53\"]\n\t\t\t}`,\n\t\t\texpected: ACL{\n\t\t\t\tAction:   ActionAccept,\n\t\t\t\tProtocol: \"udp\",\n\t\t\t\tSources:  []Alias{Wildcard},\n\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t{\n\t\t\t\t\t\tAlias: mustParseAlias(\"autogroup:internet\"),\n\t\t\t\t\t\tPorts: []tailcfg.PortRange{{First: 53, Last: 53}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"invalid action should fail\",\n\t\t\tinput: `{\n\t\t\t\t\"action\": \"deny\",\n\t\t\t\t\"proto\": \"tcp\",\n\t\t\t\t\"src\": [\"*\"],\n\t\t\t\t\"dst\": [\"*:*\"]\n\t\t\t}`,\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"no comment fields\",\n\t\t\tinput: `{\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"proto\": \"icmp\",\n\t\t\t\t\"src\": [\"tag:client\"],\n\t\t\t\t\"dst\": [\"tag:server:*\"]\n\t\t\t}`,\n\t\t\texpected: ACL{\n\t\t\t\tAction:   ActionAccept,\n\t\t\t\tProtocol: \"icmp\",\n\t\t\t\tSources:  []Alias{mustParseAlias(\"tag:client\")},\n\t\t\t\tDestinations: []AliasWithPorts{\n\t\t\t\t\t{\n\t\t\t\t\t\tAlias: mustParseAlias(\"tag:server\"),\n\t\t\t\t\t\tPorts: []tailcfg.PortRange{tailcfg.PortRangeAny},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"only comment fields\",\n\t\t\tinput: `{\n\t\t\t\t\"#comment\": \"This rule is disabled\",\n\t\t\t\t\"#reason\": \"Temporary disable for maintenance\"\n\t\t\t}`,\n\t\t\texpected: ACL{\n\t\t\t\tAction:       Action(\"\"),\n\t\t\t\tProtocol:     Protocol(\"\"),\n\t\t\t\tSources:      nil,\n\t\t\t\tDestinations: nil,\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"invalid JSON\",\n\t\t\tinput: `{\n\t\t\t\t\"#comment\": \"This is a comment\",\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"proto\": \"tcp\"\n\t\t\t\t\"src\": [\"invalid json\"]\n\t\t\t}`,\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"invalid field after comment filtering\",\n\t\t\tinput: `{\n\t\t\t\t\"#comment\": \"This is a comment\",\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"proto\": \"tcp\",\n\t\t\t\t\"src\": [\"user1@example.com\"],\n\t\t\t\t\"dst\": [\"invalid-destination\"]\n\t\t\t}`,\n\t\t\twantErr: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tvar acl ACL\n\n\t\t\terr := json.Unmarshal([]byte(tt.input), &acl)\n\n\t\t\tif tt.wantErr {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expected.Action, acl.Action)\n\t\t\tassert.Equal(t, tt.expected.Protocol, acl.Protocol)\n\t\t\tassert.Len(t, acl.Sources, len(tt.expected.Sources))\n\t\t\tassert.Len(t, acl.Destinations, len(tt.expected.Destinations))\n\n\t\t\t// Compare sources\n\t\t\tfor i, expectedSrc := range tt.expected.Sources {\n\t\t\t\tif i < len(acl.Sources) {\n\t\t\t\t\tassert.Equal(t, expectedSrc, acl.Sources[i])\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Compare destinations\n\t\t\tfor i, expectedDst := range tt.expected.Destinations {\n\t\t\t\tif i < len(acl.Destinations) {\n\t\t\t\t\tassert.Equal(t, expectedDst.Alias, acl.Destinations[i].Alias)\n\t\t\t\t\tassert.Equal(t, expectedDst.Ports, acl.Destinations[i].Ports)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestACL_UnmarshalJSON_Roundtrip(t *testing.T) {\n\t// Test that marshaling and unmarshaling preserves data (excluding comments)\n\toriginal := ACL{\n\t\tAction:   \"accept\",\n\t\tProtocol: \"tcp\",\n\t\tSources:  []Alias{mustParseAlias(\"group:admins\")},\n\t\tDestinations: []AliasWithPorts{\n\t\t\t{\n\t\t\t\tAlias: mustParseAlias(\"tag:server\"),\n\t\t\t\tPorts: []tailcfg.PortRange{{First: 22, Last: 22}, {First: 80, Last: 80}},\n\t\t\t},\n\t\t},\n\t}\n\n\t// Marshal to JSON\n\tjsonBytes, err := json.Marshal(original)\n\trequire.NoError(t, err)\n\n\t// Unmarshal back\n\tvar unmarshaled ACL\n\n\terr = json.Unmarshal(jsonBytes, &unmarshaled)\n\trequire.NoError(t, err)\n\n\t// Should be equal\n\tassert.Equal(t, original.Action, unmarshaled.Action)\n\tassert.Equal(t, original.Protocol, unmarshaled.Protocol)\n\tassert.Len(t, unmarshaled.Sources, len(original.Sources))\n\tassert.Len(t, unmarshaled.Destinations, len(original.Destinations))\n}\n\nfunc TestACL_UnmarshalJSON_PolicyIntegration(t *testing.T) {\n\t// Test that ACL unmarshaling works within a Policy context\n\tpolicyJSON := `{\n\t\t\"groups\": {\n\t\t\t\"group:developers\": [\"user1@example.com\", \"user2@example.com\"]\n\t\t},\n\t\t\"tagOwners\": {\n\t\t\t\"tag:server\": [\"group:developers\"]\n\t\t},\n\t\t\"acls\": [\n\t\t\t{\n\t\t\t\t\"#description\": \"Allow developers to access servers\",\n\t\t\t\t\"#priority\": \"high\",\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"proto\": \"tcp\",\n\t\t\t\t\"src\": [\"group:developers\"],\n\t\t\t\t\"dst\": [\"tag:server:22,80,443\"]\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"#note\": \"Allow all other traffic\",\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"proto\": \"tcp\",\n\t\t\t\t\"src\": [\"*\"],\n\t\t\t\t\"dst\": [\"*:*\"]\n\t\t\t}\n\t\t]\n\t}`\n\n\tpolicy, err := unmarshalPolicy([]byte(policyJSON))\n\trequire.NoError(t, err)\n\trequire.NotNil(t, policy)\n\n\t// Check that ACLs were parsed correctly\n\trequire.Len(t, policy.ACLs, 2)\n\n\t// First ACL\n\tacl1 := policy.ACLs[0]\n\tassert.Equal(t, ActionAccept, acl1.Action)\n\tassert.Equal(t, Protocol(\"tcp\"), acl1.Protocol)\n\trequire.Len(t, acl1.Sources, 1)\n\trequire.Len(t, acl1.Destinations, 1)\n\n\t// Second ACL\n\tacl2 := policy.ACLs[1]\n\tassert.Equal(t, ActionAccept, acl2.Action)\n\tassert.Equal(t, Protocol(\"tcp\"), acl2.Protocol)\n\trequire.Len(t, acl2.Sources, 1)\n\trequire.Len(t, acl2.Destinations, 1)\n}\n\nfunc TestACL_UnmarshalJSON_InvalidAction(t *testing.T) {\n\t// Test that invalid actions are rejected\n\tpolicyJSON := `{\n\t\t\"acls\": [\n\t\t\t{\n\t\t\t\t\"action\": \"deny\",\n\t\t\t\t\"proto\": \"tcp\",\n\t\t\t\t\"src\": [\"*\"],\n\t\t\t\t\"dst\": [\"*:*\"]\n\t\t\t}\n\t\t]\n\t}`\n\n\t_, err := unmarshalPolicy([]byte(policyJSON))\n\trequire.Error(t, err)\n\tassert.Contains(t, err.Error(), `invalid ACL action: \"deny\"`)\n}\n\n// Helper function to parse aliases for testing.\nfunc mustParseAlias(s string) Alias {\n\talias, err := parseAlias(s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn alias\n}\n\nfunc TestFlattenTagOwners(t *testing.T) {\n\ttests := []struct {\n\t\tname    string\n\t\tinput   TagOwners\n\t\twant    TagOwners\n\t\twantErr string\n\t}{\n\t\t{\n\t\t\tname: \"tag-owns-tag\",\n\t\t\tinput: TagOwners{\n\t\t\t\tTag(\"tag:bigbrother\"):   Owners{new(Group(\"group:user1\"))},\n\t\t\t\tTag(\"tag:smallbrother\"): Owners{new(Tag(\"tag:bigbrother\"))},\n\t\t\t},\n\t\t\twant: TagOwners{\n\t\t\t\tTag(\"tag:bigbrother\"):   Owners{new(Group(\"group:user1\"))},\n\t\t\t\tTag(\"tag:smallbrother\"): Owners{new(Group(\"group:user1\"))},\n\t\t\t},\n\t\t\twantErr: \"\",\n\t\t},\n\t\t{\n\t\t\tname: \"circular-reference\",\n\t\t\tinput: TagOwners{\n\t\t\t\tTag(\"tag:a\"): Owners{new(Tag(\"tag:b\"))},\n\t\t\t\tTag(\"tag:b\"): Owners{new(Tag(\"tag:a\"))},\n\t\t\t},\n\t\t\twant:    nil,\n\t\t\twantErr: \"circular reference detected: tag:a -> tag:b\",\n\t\t},\n\t\t{\n\t\t\tname: \"mixed-owners\",\n\t\t\tinput: TagOwners{\n\t\t\t\tTag(\"tag:x\"): Owners{new(Username(\"user1@\")), new(Tag(\"tag:y\"))},\n\t\t\t\tTag(\"tag:y\"): Owners{new(Username(\"user2@\"))},\n\t\t\t},\n\t\t\twant: TagOwners{\n\t\t\t\tTag(\"tag:x\"): Owners{new(Username(\"user1@\")), new(Username(\"user2@\"))},\n\t\t\t\tTag(\"tag:y\"): Owners{new(Username(\"user2@\"))},\n\t\t\t},\n\t\t\twantErr: \"\",\n\t\t},\n\t\t{\n\t\t\tname: \"mixed-dupe-owners\",\n\t\t\tinput: TagOwners{\n\t\t\t\tTag(\"tag:x\"): Owners{new(Username(\"user1@\")), new(Tag(\"tag:y\"))},\n\t\t\t\tTag(\"tag:y\"): Owners{new(Username(\"user1@\"))},\n\t\t\t},\n\t\t\twant: TagOwners{\n\t\t\t\tTag(\"tag:x\"): Owners{new(Username(\"user1@\"))},\n\t\t\t\tTag(\"tag:y\"): Owners{new(Username(\"user1@\"))},\n\t\t\t},\n\t\t\twantErr: \"\",\n\t\t},\n\t\t{\n\t\t\tname: \"no-tag-owners\",\n\t\t\tinput: TagOwners{\n\t\t\t\tTag(\"tag:solo\"): Owners{new(Username(\"user1@\"))},\n\t\t\t},\n\t\t\twant: TagOwners{\n\t\t\t\tTag(\"tag:solo\"): Owners{new(Username(\"user1@\"))},\n\t\t\t},\n\t\t\twantErr: \"\",\n\t\t},\n\t\t{\n\t\t\tname: \"tag-long-owner-chain\",\n\t\t\tinput: TagOwners{\n\t\t\t\tTag(\"tag:a\"): Owners{new(Group(\"group:user1\"))},\n\t\t\t\tTag(\"tag:b\"): Owners{new(Tag(\"tag:a\"))},\n\t\t\t\tTag(\"tag:c\"): Owners{new(Tag(\"tag:b\"))},\n\t\t\t\tTag(\"tag:d\"): Owners{new(Tag(\"tag:c\"))},\n\t\t\t\tTag(\"tag:e\"): Owners{new(Tag(\"tag:d\"))},\n\t\t\t\tTag(\"tag:f\"): Owners{new(Tag(\"tag:e\"))},\n\t\t\t\tTag(\"tag:g\"): Owners{new(Tag(\"tag:f\"))},\n\t\t\t},\n\t\t\twant: TagOwners{\n\t\t\t\tTag(\"tag:a\"): Owners{new(Group(\"group:user1\"))},\n\t\t\t\tTag(\"tag:b\"): Owners{new(Group(\"group:user1\"))},\n\t\t\t\tTag(\"tag:c\"): Owners{new(Group(\"group:user1\"))},\n\t\t\t\tTag(\"tag:d\"): Owners{new(Group(\"group:user1\"))},\n\t\t\t\tTag(\"tag:e\"): Owners{new(Group(\"group:user1\"))},\n\t\t\t\tTag(\"tag:f\"): Owners{new(Group(\"group:user1\"))},\n\t\t\t\tTag(\"tag:g\"): Owners{new(Group(\"group:user1\"))},\n\t\t\t},\n\t\t\twantErr: \"\",\n\t\t},\n\t\t{\n\t\t\tname: \"tag-long-circular-chain\",\n\t\t\tinput: TagOwners{\n\t\t\t\tTag(\"tag:a\"): Owners{new(Tag(\"tag:g\"))},\n\t\t\t\tTag(\"tag:b\"): Owners{new(Tag(\"tag:a\"))},\n\t\t\t\tTag(\"tag:c\"): Owners{new(Tag(\"tag:b\"))},\n\t\t\t\tTag(\"tag:d\"): Owners{new(Tag(\"tag:c\"))},\n\t\t\t\tTag(\"tag:e\"): Owners{new(Tag(\"tag:d\"))},\n\t\t\t\tTag(\"tag:f\"): Owners{new(Tag(\"tag:e\"))},\n\t\t\t\tTag(\"tag:g\"): Owners{new(Tag(\"tag:f\"))},\n\t\t\t},\n\t\t\twantErr: \"circular reference detected: tag:a -> tag:b -> tag:c -> tag:d -> tag:e -> tag:f -> tag:g\",\n\t\t},\n\t\t{\n\t\t\tname: \"undefined-tag-reference\",\n\t\t\tinput: TagOwners{\n\t\t\t\tTag(\"tag:a\"): Owners{new(Tag(\"tag:nonexistent\"))},\n\t\t\t},\n\t\t\twantErr: `tag \"tag:a\" references undefined tag \"tag:nonexistent\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"tag-with-empty-owners-is-valid\",\n\t\t\tinput: TagOwners{\n\t\t\t\tTag(\"tag:a\"): Owners{new(Tag(\"tag:b\"))},\n\t\t\t\tTag(\"tag:b\"): Owners{}, // empty owners but exists\n\t\t\t},\n\t\t\twant: TagOwners{\n\t\t\t\tTag(\"tag:a\"): nil,\n\t\t\t\tTag(\"tag:b\"): nil,\n\t\t\t},\n\t\t\twantErr: \"\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot, err := flattenTagOwners(tt.input)\n\t\t\tif tt.wantErr != \"\" {\n\t\t\t\tif err == nil {\n\t\t\t\t\tt.Fatalf(\"flattenTagOwners() expected error %q, got nil\", tt.wantErr)\n\t\t\t\t}\n\n\t\t\t\tif err.Error() != tt.wantErr {\n\t\t\t\t\tt.Fatalf(\"flattenTagOwners() expected error %q, got %q\", tt.wantErr, err.Error())\n\t\t\t\t}\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"flattenTagOwners() unexpected error: %v\", err)\n\t\t\t}\n\n\t\t\tif diff := cmp.Diff(tt.want, got); diff != \"\" {\n\t\t\t\tt.Errorf(\"flattenTagOwners() mismatch (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestSSHCheckPeriodUnmarshal(t *testing.T) {\n\ttests := []struct {\n\t\tname    string\n\t\tinput   string\n\t\twant    *SSHCheckPeriod\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname:  \"always\",\n\t\t\tinput: `\"always\"`,\n\t\t\twant:  &SSHCheckPeriod{Always: true},\n\t\t},\n\t\t{\n\t\t\tname:  \"1h\",\n\t\t\tinput: `\"1h\"`,\n\t\t\twant:  &SSHCheckPeriod{Duration: time.Hour},\n\t\t},\n\t\t{\n\t\t\tname:  \"30m\",\n\t\t\tinput: `\"30m\"`,\n\t\t\twant:  &SSHCheckPeriod{Duration: 30 * time.Minute},\n\t\t},\n\t\t{\n\t\t\tname:  \"168h\",\n\t\t\tinput: `\"168h\"`,\n\t\t\twant:  &SSHCheckPeriod{Duration: 168 * time.Hour},\n\t\t},\n\t\t{\n\t\t\tname:    \"invalid\",\n\t\t\tinput:   `\"notaduration\"`,\n\t\t\twantErr: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tvar got SSHCheckPeriod\n\n\t\t\terr := json.Unmarshal([]byte(tt.input), &got)\n\t\t\tif tt.wantErr {\n\t\t\t\trequire.Error(t, err)\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, *tt.want, got)\n\t\t})\n\t}\n}\n\nfunc TestSSHCheckPeriodRoundTrip(t *testing.T) {\n\ttests := []struct {\n\t\tname  string\n\t\tinput SSHCheckPeriod\n\t}{\n\t\t{\n\t\t\tname:  \"always\",\n\t\t\tinput: SSHCheckPeriod{Always: true},\n\t\t},\n\t\t{\n\t\t\tname:  \"2h\",\n\t\t\tinput: SSHCheckPeriod{Duration: 2 * time.Hour},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tdata, err := json.Marshal(tt.input)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tvar got SSHCheckPeriod\n\n\t\t\terr = json.Unmarshal(data, &got)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tassert.Equal(t, tt.input, got)\n\t\t})\n\t}\n}\n\nfunc TestSSHCheckPeriodNilInSSH(t *testing.T) {\n\tinput := `{\n\t\t\"action\": \"check\",\n\t\t\"src\": [\"user@\"],\n\t\t\"dst\": [\"autogroup:member\"],\n\t\t\"users\": [\"root\"]\n\t}`\n\n\tvar ssh SSH\n\n\terr := json.Unmarshal([]byte(input), &ssh)\n\trequire.NoError(t, err)\n\tassert.Nil(t, ssh.CheckPeriod)\n}\n\nfunc TestSSHCheckPeriodValidate(t *testing.T) {\n\ttests := []struct {\n\t\tname    string\n\t\tperiod  SSHCheckPeriod\n\t\twantErr error\n\t}{\n\t\t{\n\t\t\tname:   \"always is valid\",\n\t\t\tperiod: SSHCheckPeriod{Always: true},\n\t\t},\n\t\t{\n\t\t\tname:   \"1m minimum valid\",\n\t\t\tperiod: SSHCheckPeriod{Duration: time.Minute},\n\t\t},\n\t\t{\n\t\t\tname:   \"168h maximum valid\",\n\t\t\tperiod: SSHCheckPeriod{Duration: 168 * time.Hour},\n\t\t},\n\t\t{\n\t\t\tname:    \"30s below minimum\",\n\t\t\tperiod:  SSHCheckPeriod{Duration: 30 * time.Second},\n\t\t\twantErr: ErrSSHCheckPeriodBelowMin,\n\t\t},\n\t\t{\n\t\t\tname:    \"169h above maximum\",\n\t\t\tperiod:  SSHCheckPeriod{Duration: 169 * time.Hour},\n\t\t\twantErr: ErrSSHCheckPeriodAboveMax,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\terr := tt.period.Validate()\n\t\t\tif tt.wantErr != nil {\n\t\t\t\trequire.ErrorIs(t, err, tt.wantErr)\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\t\t})\n\t}\n}\n\nfunc TestSSHCheckPeriodPolicyValidation(t *testing.T) {\n\ttests := []struct {\n\t\tname    string\n\t\tssh     SSH\n\t\twantErr error\n\t}{\n\t\t{\n\t\t\tname: \"check with nil period is valid\",\n\t\t\tssh: SSH{\n\t\t\t\tAction:       SSHActionCheck,\n\t\t\t\tSources:      SSHSrcAliases{up(\"user@\")},\n\t\t\t\tDestinations: SSHDstAliases{agp(\"autogroup:member\")},\n\t\t\t\tUsers:        SSHUsers{\"root\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"check with always is valid\",\n\t\t\tssh: SSH{\n\t\t\t\tAction:       SSHActionCheck,\n\t\t\t\tSources:      SSHSrcAliases{up(\"user@\")},\n\t\t\t\tDestinations: SSHDstAliases{agp(\"autogroup:member\")},\n\t\t\t\tUsers:        SSHUsers{\"root\"},\n\t\t\t\tCheckPeriod:  &SSHCheckPeriod{Always: true},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"check with 1h is valid\",\n\t\t\tssh: SSH{\n\t\t\t\tAction:       SSHActionCheck,\n\t\t\t\tSources:      SSHSrcAliases{up(\"user@\")},\n\t\t\t\tDestinations: SSHDstAliases{agp(\"autogroup:member\")},\n\t\t\t\tUsers:        SSHUsers{\"root\"},\n\t\t\t\tCheckPeriod:  &SSHCheckPeriod{Duration: time.Hour},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"accept with checkPeriod is invalid\",\n\t\t\tssh: SSH{\n\t\t\t\tAction:       SSHActionAccept,\n\t\t\t\tSources:      SSHSrcAliases{up(\"user@\")},\n\t\t\t\tDestinations: SSHDstAliases{agp(\"autogroup:member\")},\n\t\t\t\tUsers:        SSHUsers{\"root\"},\n\t\t\t\tCheckPeriod:  &SSHCheckPeriod{Duration: time.Hour},\n\t\t\t},\n\t\t\twantErr: ErrSSHCheckPeriodOnNonCheck,\n\t\t},\n\t\t{\n\t\t\tname: \"check with 30s is invalid\",\n\t\t\tssh: SSH{\n\t\t\t\tAction:       SSHActionCheck,\n\t\t\t\tSources:      SSHSrcAliases{up(\"user@\")},\n\t\t\t\tDestinations: SSHDstAliases{agp(\"autogroup:member\")},\n\t\t\t\tUsers:        SSHUsers{\"root\"},\n\t\t\t\tCheckPeriod:  &SSHCheckPeriod{Duration: 30 * time.Second},\n\t\t\t},\n\t\t\twantErr: ErrSSHCheckPeriodBelowMin,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tpol := &Policy{SSHs: []SSH{tt.ssh}}\n\t\t\terr := pol.validate()\n\n\t\t\tif tt.wantErr != nil {\n\t\t\t\trequire.ErrorIs(t, err, tt.wantErr)\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/utils.go",
    "content": "package v2\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net/netip\"\n\t\"slices\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"tailscale.com/tailcfg\"\n)\n\n// Port parsing errors.\nvar (\n\tErrInputMissingColon      = errors.New(\"input must contain a colon character separating destination and port\")\n\tErrInputStartsWithColon   = errors.New(\"input cannot start with a colon character\")\n\tErrInputEndsWithColon     = errors.New(\"input cannot end with a colon character\")\n\tErrInvalidPortRangeFormat = errors.New(\"invalid port range format\")\n\tErrPortRangeInverted      = errors.New(\"invalid port range: first port is greater than last port\")\n\tErrPortMustBePositive     = errors.New(\"first port must be >0, or use '*' for wildcard\")\n\tErrInvalidPortNumber      = errors.New(\"invalid port number\")\n\tErrPortNumberOutOfRange   = errors.New(\"port number out of range\")\n\tErrBracketsNotIPv6        = errors.New(\"square brackets are only valid around IPv6 addresses\")\n)\n\n// splitDestinationAndPort takes an input string and returns the destination and port as a tuple, or an error if the input is invalid.\n// It supports two bracketed IPv6 forms:\n//   - \"[addr]:port\" (RFC 3986, e.g. \"[::1]:80\")\n//   - \"[addr]/prefix:port\" (e.g. \"[fd7a::1]/128:80,443\")\n//\n// Brackets are only accepted around IPv6 addresses, not IPv4, hostnames, or other alias types.\n// Bracket stripping reduces both forms to bare \"addr:port\" or \"addr/prefix:port\",\n// which the normal LastIndex(\":\") split handles correctly because port strings\n// never contain colons.\nfunc splitDestinationAndPort(input string) (string, string, error) {\n\t// Handle RFC 3986 bracketed IPv6 (e.g. \"[::1]:80\" or \"[fd7a::1]/128:80,443\").\n\t// Strip brackets after validation and fall through to normal parsing.\n\tif strings.HasPrefix(input, \"[\") {\n\t\tcloseBracket := strings.Index(input, \"]\")\n\t\tif closeBracket == -1 {\n\t\t\treturn \"\", \"\", ErrBracketsNotIPv6\n\t\t}\n\n\t\thost := input[1:closeBracket]\n\n\t\taddr, err := netip.ParseAddr(host)\n\t\tif err != nil || !addr.Is6() {\n\t\t\treturn \"\", \"\", fmt.Errorf(\"%w: %q\", ErrBracketsNotIPv6, host)\n\t\t}\n\n\t\trest := input[closeBracket+1:]\n\t\tif len(rest) == 0 || (rest[0] != ':' && rest[0] != '/') {\n\t\t\treturn \"\", \"\", fmt.Errorf(\"%w: %q\", ErrBracketsNotIPv6, input)\n\t\t}\n\n\t\t// Strip brackets: \"[addr]:port\" → \"addr:port\",\n\t\t// \"[addr]/prefix:port\" → \"addr/prefix:port\".\n\t\tinput = host + rest\n\t}\n\n\t// Find the last occurrence of the colon character\n\tlastColonIndex := strings.LastIndex(input, \":\")\n\n\t// Check if the colon character is present and not at the beginning or end of the string\n\tif lastColonIndex == -1 {\n\t\treturn \"\", \"\", ErrInputMissingColon\n\t}\n\n\tif lastColonIndex == 0 {\n\t\treturn \"\", \"\", ErrInputStartsWithColon\n\t}\n\n\tif lastColonIndex == len(input)-1 {\n\t\treturn \"\", \"\", ErrInputEndsWithColon\n\t}\n\n\t// Split the string into destination and port based on the last colon\n\tdestination := input[:lastColonIndex]\n\tport := input[lastColonIndex+1:]\n\n\treturn destination, port, nil\n}\n\n// parsePortRange parses a port definition string and returns a slice of PortRange structs.\nfunc parsePortRange(portDef string) ([]tailcfg.PortRange, error) {\n\tif portDef == \"*\" {\n\t\treturn []tailcfg.PortRange{tailcfg.PortRangeAny}, nil\n\t}\n\n\tvar portRanges []tailcfg.PortRange\n\n\tparts := strings.SplitSeq(portDef, \",\")\n\n\tfor part := range parts {\n\t\tif strings.Contains(part, \"-\") {\n\t\t\trangeParts := strings.Split(part, \"-\")\n\n\t\t\trangeParts = slices.DeleteFunc(rangeParts, func(e string) bool {\n\t\t\t\treturn e == \"\"\n\t\t\t})\n\t\t\tif len(rangeParts) != 2 {\n\t\t\t\treturn nil, ErrInvalidPortRangeFormat\n\t\t\t}\n\n\t\t\tfirst, err := parsePort(rangeParts[0])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tlast, err := parsePort(rangeParts[1])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif first > last {\n\t\t\t\treturn nil, ErrPortRangeInverted\n\t\t\t}\n\n\t\t\tportRanges = append(portRanges, tailcfg.PortRange{First: first, Last: last})\n\t\t} else {\n\t\t\tport, err := parsePort(part)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif port < 1 {\n\t\t\t\treturn nil, ErrPortMustBePositive\n\t\t\t}\n\n\t\t\tportRanges = append(portRanges, tailcfg.PortRange{First: port, Last: port})\n\t\t}\n\t}\n\n\treturn portRanges, nil\n}\n\n// parsePort parses a single port number from a string.\nfunc parsePort(portStr string) (uint16, error) {\n\tport, err := strconv.Atoi(portStr)\n\tif err != nil {\n\t\treturn 0, ErrInvalidPortNumber\n\t}\n\n\tif port < 0 || port > 65535 {\n\t\treturn 0, ErrPortNumberOutOfRange\n\t}\n\n\treturn uint16(port), nil\n}\n"
  },
  {
    "path": "hscontrol/policy/v2/utils_test.go",
    "content": "package v2\n\nimport (\n\t\"errors\"\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"tailscale.com/tailcfg\"\n)\n\n// TestParseDestinationAndPort tests the splitDestinationAndPort function using table-driven tests.\nfunc TestParseDestinationAndPort(t *testing.T) {\n\ttestCases := []struct {\n\t\tinput       string\n\t\twantDst     string\n\t\twantPort    string\n\t\twantErrIs   error\n\t\twantNoError bool\n\t}{\n\t\t// --- Non-bracketed inputs (existing behavior, unchanged) ---\n\n\t\t// Hostnames and tags\n\t\t{\"git-server:*\", \"git-server\", \"*\", nil, true},\n\t\t{\"example-host-1:*\", \"example-host-1\", \"*\", nil, true},\n\t\t{\"hostname:80-90\", \"hostname\", \"80-90\", nil, true},\n\t\t{\"tag:montreal-webserver:80,443\", \"tag:montreal-webserver\", \"80,443\", nil, true},\n\t\t{\"tag:api-server:443\", \"tag:api-server\", \"443\", nil, true},\n\n\t\t// IPv4 and IPv4 CIDR\n\t\t{\"192.168.1.0/24:22\", \"192.168.1.0/24\", \"22\", nil, true},\n\t\t{\"10.0.0.1:443\", \"10.0.0.1\", \"443\", nil, true},\n\n\t\t// Bare IPv6 (no brackets) — last colon splits correctly\n\t\t{\"fd7a:115c:a1e0::2:22\", \"fd7a:115c:a1e0::2\", \"22\", nil, true},\n\t\t{\"fd7a:115c:a1e0::2/128:22\", \"fd7a:115c:a1e0::2/128\", \"22\", nil, true},\n\n\t\t// --- Bracketed IPv6: [addr]:port ---\n\n\t\t// Single port\n\t\t{\"[fd7a:115c:a1e0::87e1]:22\", \"fd7a:115c:a1e0::87e1\", \"22\", nil, true},\n\t\t{\"[::1]:80\", \"::1\", \"80\", nil, true},\n\t\t{\"[2001:db8::1]:443\", \"2001:db8::1\", \"443\", nil, true},\n\t\t{\"[fe80::1]:22\", \"fe80::1\", \"22\", nil, true},\n\n\t\t// Multiple ports\n\t\t{\"[fd7a:115c:a1e0::87e1]:80,443\", \"fd7a:115c:a1e0::87e1\", \"80,443\", nil, true},\n\t\t{\"[::1]:22,80,443\", \"::1\", \"22,80,443\", nil, true},\n\n\t\t// Port range\n\t\t{\"[fd7a:115c:a1e0::2]:80-90\", \"fd7a:115c:a1e0::2\", \"80-90\", nil, true},\n\n\t\t// Wildcard port\n\t\t{\"[fd7a:115c:a1e0::87e1]:*\", \"fd7a:115c:a1e0::87e1\", \"*\", nil, true},\n\n\t\t// Unspecified address [::]\n\t\t{\"[::]:80\", \"::\", \"80\", nil, true},\n\t\t{\"[::]:*\", \"::\", \"*\", nil, true},\n\n\t\t// Full-length IPv6\n\t\t{\"[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:443\", \"2001:0db8:85a3:0000:0000:8a2e:0370:7334\", \"443\", nil, true},\n\n\t\t// --- Bracketed IPv6 CIDR: [addr]/prefix:port ---\n\n\t\t{\"[fd7a:115c:a1e0::2905]/128:80,443\", \"fd7a:115c:a1e0::2905/128\", \"80,443\", nil, true},\n\t\t{\"[fd7a:115c:a1e0::1]/128:22\", \"fd7a:115c:a1e0::1/128\", \"22\", nil, true},\n\t\t{\"[2001:db8::1]/32:443\", \"2001:db8::1/32\", \"443\", nil, true},\n\t\t{\"[::1]/128:*\", \"::1/128\", \"*\", nil, true},\n\t\t{\"[fd7a:115c:a1e0::2]/64:80-90\", \"fd7a:115c:a1e0::2/64\", \"80-90\", nil, true},\n\t\t{\"[::]/0:*\", \"::/0\", \"*\", nil, true},\n\n\t\t// --- Errors: brackets around non-IPv6 ---\n\n\t\t// IPv4 in brackets\n\t\t{\"[192.168.1.1]:80\", \"\", \"\", ErrBracketsNotIPv6, false},\n\t\t{\"[10.0.0.1]:443\", \"\", \"\", ErrBracketsNotIPv6, false},\n\t\t{\"[192.168.1.1]/32:80\", \"\", \"\", ErrBracketsNotIPv6, false},\n\n\t\t// IPv4 CIDR inside brackets\n\t\t{\"[10.0.0.0/8]:80\", \"\", \"\", ErrBracketsNotIPv6, false},\n\n\t\t// Hostnames in brackets\n\t\t{\"[my-hostname]:80\", \"\", \"\", ErrBracketsNotIPv6, false},\n\t\t{\"[git-server]:*\", \"\", \"\", ErrBracketsNotIPv6, false},\n\n\t\t// Tags in brackets\n\t\t{\"[tag:server]:80\", \"\", \"\", ErrBracketsNotIPv6, false},\n\n\t\t// --- Errors: CIDR inside brackets (must use [addr]/prefix:port) ---\n\n\t\t{\"[fd7a:115c:a1e0::2/128]:22\", \"\", \"\", ErrBracketsNotIPv6, false},\n\t\t{\"[2001:db8::/32]:443\", \"\", \"\", ErrBracketsNotIPv6, false},\n\t\t{\"[::1/128]:80\", \"\", \"\", ErrBracketsNotIPv6, false},\n\n\t\t// --- Errors: malformed bracket syntax ---\n\n\t\t// No port after brackets\n\t\t{\"[::1]\", \"\", \"\", ErrBracketsNotIPv6, false},\n\t\t{\"[2001:db8::1]\", \"\", \"\", ErrBracketsNotIPv6, false},\n\n\t\t// Empty brackets\n\t\t{\"[]:80\", \"\", \"\", ErrBracketsNotIPv6, false},\n\n\t\t// Missing close bracket\n\t\t{\"[::1\", \"\", \"\", ErrBracketsNotIPv6, false},\n\t\t{\"[2001:db8::1:80\", \"\", \"\", ErrBracketsNotIPv6, false},\n\n\t\t// Empty port after colon\n\t\t{\"[fd7a:115c:a1e0::1]:\", \"\", \"\", ErrInputEndsWithColon, false},\n\t\t{\"[::1]:\", \"\", \"\", ErrInputEndsWithColon, false},\n\t\t{\"[fd7a::1]/128:\", \"\", \"\", ErrInputEndsWithColon, false},\n\n\t\t// Junk after close bracket (not : or /)\n\t\t{\"[::1]blah\", \"\", \"\", ErrBracketsNotIPv6, false},\n\t\t{\"[::1] :80\", \"\", \"\", ErrBracketsNotIPv6, false},\n\n\t\t// --- Errors: non-bracketed malformed input (unchanged) ---\n\n\t\t{\"invalidinput\", \"\", \"\", ErrInputMissingColon, false},\n\t\t{\":invalid\", \"\", \"\", ErrInputStartsWithColon, false},\n\t\t{\"invalid:\", \"\", \"\", ErrInputEndsWithColon, false},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.input, func(t *testing.T) {\n\t\t\tdst, port, err := splitDestinationAndPort(tc.input)\n\n\t\t\tif tc.wantNoError {\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"splitDestinationAndPort(%q) unexpected error: %v\", tc.input, err)\n\t\t\t\t}\n\n\t\t\t\tif dst != tc.wantDst {\n\t\t\t\t\tt.Errorf(\"splitDestinationAndPort(%q) dst = %q, want %q\", tc.input, dst, tc.wantDst)\n\t\t\t\t}\n\n\t\t\t\tif port != tc.wantPort {\n\t\t\t\t\tt.Errorf(\"splitDestinationAndPort(%q) port = %q, want %q\", tc.input, port, tc.wantPort)\n\t\t\t\t}\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif err == nil {\n\t\t\t\tt.Fatalf(\"splitDestinationAndPort(%q) = (%q, %q, nil), want error wrapping %v\", tc.input, dst, port, tc.wantErrIs)\n\t\t\t}\n\n\t\t\tif !errors.Is(err, tc.wantErrIs) {\n\t\t\t\tt.Errorf(\"splitDestinationAndPort(%q) error = %v, want error wrapping %v\", tc.input, err, tc.wantErrIs)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestParsePort(t *testing.T) {\n\ttests := []struct {\n\t\tinput    string\n\t\texpected uint16\n\t\terr      string\n\t}{\n\t\t{\"80\", 80, \"\"},\n\t\t{\"0\", 0, \"\"},\n\t\t{\"65535\", 65535, \"\"},\n\t\t{\"-1\", 0, \"port number out of range\"},\n\t\t{\"65536\", 0, \"port number out of range\"},\n\t\t{\"abc\", 0, \"invalid port number\"},\n\t\t{\"\", 0, \"invalid port number\"},\n\t}\n\n\tfor _, test := range tests {\n\t\tresult, err := parsePort(test.input)\n\t\tif err != nil && err.Error() != test.err {\n\t\t\tt.Errorf(\"parsePort(%q) error = %v, expected error = %v\", test.input, err, test.err)\n\t\t}\n\n\t\tif err == nil && test.err != \"\" {\n\t\t\tt.Errorf(\"parsePort(%q) expected error = %v, got nil\", test.input, test.err)\n\t\t}\n\n\t\tif result != test.expected {\n\t\t\tt.Errorf(\"parsePort(%q) = %v, expected %v\", test.input, result, test.expected)\n\t\t}\n\t}\n}\n\nfunc TestParsePortRange(t *testing.T) {\n\ttests := []struct {\n\t\tinput    string\n\t\texpected []tailcfg.PortRange\n\t\terr      string\n\t}{\n\t\t{\"80\", []tailcfg.PortRange{{First: 80, Last: 80}}, \"\"},\n\t\t{\"80-90\", []tailcfg.PortRange{{First: 80, Last: 90}}, \"\"},\n\t\t{\"80,90\", []tailcfg.PortRange{{First: 80, Last: 80}, {First: 90, Last: 90}}, \"\"},\n\t\t{\"80-91,92,93-95\", []tailcfg.PortRange{{First: 80, Last: 91}, {First: 92, Last: 92}, {First: 93, Last: 95}}, \"\"},\n\t\t{\"*\", []tailcfg.PortRange{tailcfg.PortRangeAny}, \"\"},\n\t\t{\"80-\", nil, \"invalid port range format\"},\n\t\t{\"-90\", nil, \"invalid port range format\"},\n\t\t{\"80-90,\", nil, \"invalid port number\"},\n\t\t{\"80,90-\", nil, \"invalid port range format\"},\n\t\t{\"80-90,abc\", nil, \"invalid port number\"},\n\t\t{\"80-90,65536\", nil, \"port number out of range\"},\n\t\t{\"80-90,90-80\", nil, \"invalid port range: first port is greater than last port\"},\n\t}\n\n\tfor _, test := range tests {\n\t\tresult, err := parsePortRange(test.input)\n\t\tif err != nil && err.Error() != test.err {\n\t\t\tt.Errorf(\"parsePortRange(%q) error = %v, expected error = %v\", test.input, err, test.err)\n\t\t}\n\n\t\tif err == nil && test.err != \"\" {\n\t\t\tt.Errorf(\"parsePortRange(%q) expected error = %v, got nil\", test.input, test.err)\n\t\t}\n\n\t\tif diff := cmp.Diff(result, test.expected); diff != \"\" {\n\t\t\tt.Errorf(\"parsePortRange(%q) mismatch (-want +got):\\n%s\", test.input, diff)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "hscontrol/poll.go",
    "content": "package hscontrol\n\nimport (\n\t\"context\"\n\t\"encoding/binary\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"math/rand/v2\"\n\t\"net/http\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/types/change\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/juanfont/headscale/hscontrol/util/zlog/zf\"\n\t\"github.com/rs/zerolog\"\n\t\"github.com/rs/zerolog/log\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/util/zstdframe\"\n)\n\nconst (\n\tkeepAliveInterval = 50 * time.Second\n)\n\ntype contextKey string\n\nconst nodeNameContextKey = contextKey(\"nodeName\")\n\ntype mapSession struct {\n\th      *Headscale\n\treq    tailcfg.MapRequest\n\tctx    context.Context //nolint:containedctx\n\tcapVer tailcfg.CapabilityVersion\n\n\tch             chan *tailcfg.MapResponse\n\tcancelCh       chan struct{}\n\tcancelChClosed atomic.Bool\n\n\tkeepAlive       time.Duration\n\tkeepAliveTicker *time.Ticker\n\n\tnode *types.Node\n\tw    http.ResponseWriter\n\n\tlog zerolog.Logger\n}\n\nfunc (h *Headscale) newMapSession(\n\tctx context.Context,\n\treq tailcfg.MapRequest,\n\tw http.ResponseWriter,\n\tnode *types.Node,\n) *mapSession {\n\tka := keepAliveInterval + (time.Duration(rand.IntN(9000)) * time.Millisecond) //nolint:gosec // weak random is fine for jitter\n\n\treturn &mapSession{\n\t\th:      h,\n\t\tctx:    ctx,\n\t\treq:    req,\n\t\tw:      w,\n\t\tnode:   node,\n\t\tcapVer: req.Version,\n\n\t\tch:       make(chan *tailcfg.MapResponse, h.cfg.Tuning.NodeMapSessionBufferedChanSize),\n\t\tcancelCh: make(chan struct{}),\n\n\t\tkeepAlive:       ka,\n\t\tkeepAliveTicker: nil,\n\n\t\tlog: log.With().\n\t\t\tStr(zf.Component, \"poll\").\n\t\t\tEmbedObject(node).\n\t\t\tBool(zf.OmitPeers, req.OmitPeers).\n\t\t\tBool(zf.Stream, req.Stream).\n\t\t\tLogger(),\n\t}\n}\n\nfunc (m *mapSession) isStreaming() bool {\n\treturn m.req.Stream\n}\n\nfunc (m *mapSession) isEndpointUpdate() bool {\n\treturn !m.req.Stream && m.req.OmitPeers\n}\n\nfunc (m *mapSession) resetKeepAlive() {\n\tm.keepAliveTicker.Reset(m.keepAlive)\n}\n\nfunc (m *mapSession) stopFromBatcher() {\n\tif m.cancelChClosed.CompareAndSwap(false, true) {\n\t\tclose(m.cancelCh)\n\t}\n}\n\nfunc (m *mapSession) beforeServeLongPoll() {\n\tif m.node.IsEphemeral() {\n\t\tm.h.ephemeralGC.Cancel(m.node.ID)\n\t}\n}\n\n// afterServeLongPoll is called when a long-polling session ends and the node\n// is disconnected.\nfunc (m *mapSession) afterServeLongPoll() {\n\tif m.node.IsEphemeral() {\n\t\tm.h.ephemeralGC.Schedule(m.node.ID, m.h.cfg.EphemeralNodeInactivityTimeout)\n\t}\n}\n\n// serve handles non-streaming requests.\nfunc (m *mapSession) serve() {\n\t// This is the mechanism where the node gives us information about its\n\t// current configuration.\n\t//\n\t// Process the MapRequest to update node state (endpoints, hostinfo, etc.)\n\tc, err := m.h.state.UpdateNodeFromMapRequest(m.node.ID, m.req)\n\tif err != nil {\n\t\thttpError(m.w, err)\n\t\treturn\n\t}\n\n\tm.h.Change(c)\n\n\t// If OmitPeers is true and Stream is false\n\t// then the server will let clients update their endpoints without\n\t// breaking existing long-polling (Stream == true) connections.\n\t// In this case, the server can omit the entire response; the client\n\t// only checks the HTTP response status code.\n\t//\n\t// This is what Tailscale calls a Lite update, the client ignores\n\t// the response and just wants a 200.\n\t// !req.stream && req.OmitPeers\n\tif m.isEndpointUpdate() {\n\t\tm.w.WriteHeader(http.StatusOK)\n\t\tmapResponseEndpointUpdates.WithLabelValues(\"ok\").Inc()\n\t}\n}\n\n// serveLongPoll ensures the node gets the appropriate updates from either\n// polling or immediate responses.\n//\n//nolint:gocyclo\nfunc (m *mapSession) serveLongPoll() {\n\tm.beforeServeLongPoll()\n\n\tm.log.Trace().Caller().Msg(\"long poll session started\")\n\n\t// connectGen is set by Connect() below and captured by the deferred cleanup closure.\n\t// It allows Disconnect() to reject stale calls from old sessions — if a newer session\n\t// has called Connect() (incrementing the generation), the old session's Disconnect()\n\t// sees a mismatched generation and becomes a no-op.\n\tvar connectGen uint64\n\n\t// Clean up the session when the client disconnects\n\tdefer func() {\n\t\tm.stopFromBatcher()\n\n\t\tstillConnected := m.h.mapBatcher.RemoveNode(m.node.ID, m.ch)\n\n\t\t// If another session already exists for this node (reconnect\n\t\t// happened before this cleanup ran), skip the grace period\n\t\t// entirely — the node is not actually disconnecting.\n\t\tif stillConnected {\n\t\t\treturn\n\t\t}\n\n\t\t// When a node disconnects, it might rapidly reconnect (e.g. mobile clients, network weather).\n\t\t// Instead of immediately marking the node as offline, we wait a few seconds to see if it reconnects.\n\t\t// If it does reconnect, the existing mapSession will be replaced and the node remains online.\n\t\t// If it doesn't reconnect within the timeout, we mark it as offline.\n\t\t//\n\t\t// This avoids flapping nodes in the UI and unnecessary churn in the network.\n\t\t// This is not my favourite solution, but it kind of works in our eventually consistent world.\n\t\tticker := time.NewTicker(time.Second)\n\t\tdefer ticker.Stop()\n\n\t\tdisconnected := true\n\t\t// Wait up to 10 seconds for the node to reconnect.\n\t\t// 10 seconds was arbitrary chosen as a reasonable time to reconnect.\n\t\tfor range 10 {\n\t\t\tif m.h.mapBatcher.IsConnected(m.node.ID) {\n\t\t\t\tdisconnected = false\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t<-ticker.C\n\t\t}\n\n\t\tif disconnected {\n\t\t\t// Pass the generation from our Connect() call. If a newer session has\n\t\t\t// connected since (bumping the generation), Disconnect() will detect\n\t\t\t// the mismatch and skip the state update, preventing the race where\n\t\t\t// an old grace period goroutine overwrites a newer session's online status.\n\t\t\tdisconnectChanges, err := m.h.state.Disconnect(m.node.ID, connectGen)\n\t\t\tif err != nil {\n\t\t\t\tm.log.Error().Caller().Err(err).Msg(\"failed to disconnect node\")\n\t\t\t}\n\n\t\t\tm.h.Change(disconnectChanges...)\n\t\t\tm.afterServeLongPoll()\n\t\t\tm.log.Info().Caller().Str(zf.Chan, fmt.Sprintf(\"%p\", m.ch)).Msg(\"node has disconnected\")\n\t\t}\n\t}()\n\n\t// Set up the client stream\n\tm.h.clientStreamsOpen.Add(1)\n\tdefer m.h.clientStreamsOpen.Done()\n\n\tctx, cancel := context.WithCancel(context.WithValue(m.ctx, nodeNameContextKey, m.node.Hostname))\n\tdefer cancel()\n\n\tm.keepAliveTicker = time.NewTicker(m.keepAlive)\n\n\t// Process the initial MapRequest to update node state (endpoints, hostinfo, etc.)\n\t// This must be done BEFORE calling Connect() to ensure routes are properly synchronized.\n\t// When nodes reconnect, they send their hostinfo with announced routes in the MapRequest.\n\t// We need this data in NodeStore before Connect() sets up the primary routes, because\n\t// SubnetRoutes() calculates the intersection of announced and approved routes. If we\n\t// call Connect() first, SubnetRoutes() returns empty (no announced routes yet), causing\n\t// the node to be incorrectly removed from AvailableRoutes.\n\tmapReqChange, err := m.h.state.UpdateNodeFromMapRequest(m.node.ID, m.req)\n\tif err != nil {\n\t\tm.log.Error().Caller().Err(err).Msg(\"failed to update node from initial MapRequest\")\n\t\treturn\n\t}\n\n\t// Connect the node after its state has been updated.\n\t// We send two separate change notifications because these are distinct operations:\n\t// 1. UpdateNodeFromMapRequest: processes the client's reported state (routes, endpoints, hostinfo)\n\t// 2. Connect: marks the node online and recalculates primary routes based on the updated state\n\t// While this results in two notifications, it ensures route data is synchronized before\n\t// primary route selection occurs, which is critical for proper HA subnet router failover.\n\tvar connectChanges []change.Change\n\n\tconnectChanges, connectGen = m.h.state.Connect(m.node.ID)\n\n\tm.log.Info().Caller().Str(zf.Chan, fmt.Sprintf(\"%p\", m.ch)).Msg(\"node has connected\")\n\n\t// TODO(kradalby): Redo the comments here\n\t// Add node to batcher so it can receive updates,\n\t// adding this before connecting it to the state ensure that\n\t// it does not miss any updates that might be sent in the split\n\t// time between the node connecting and the batcher being ready.\n\tif err := m.h.mapBatcher.AddNode(m.node.ID, m.ch, m.capVer, m.stopFromBatcher); err != nil { //nolint:noinlineerr\n\t\tm.log.Error().Caller().Err(err).Msg(\"failed to add node to batcher\")\n\t\treturn\n\t}\n\n\tm.log.Debug().Caller().Msg(\"node added to batcher\")\n\n\tm.h.Change(mapReqChange)\n\tm.h.Change(connectChanges...)\n\n\t// Loop through updates and continuously send them to the\n\t// client.\n\tfor {\n\t\t// consume channels with update, keep alives or \"batch\" blocking signals\n\t\tselect {\n\t\tcase <-m.cancelCh:\n\t\t\tm.log.Trace().Caller().Msg(\"poll cancelled received\")\n\t\t\tmapResponseEnded.WithLabelValues(\"cancelled\").Inc()\n\n\t\t\treturn\n\n\t\tcase <-ctx.Done():\n\t\t\tm.log.Trace().Caller().Str(zf.Chan, fmt.Sprintf(\"%p\", m.ch)).Msg(\"poll context done\")\n\t\t\tmapResponseEnded.WithLabelValues(\"done\").Inc()\n\n\t\t\treturn\n\n\t\t// Consume updates sent to node\n\t\tcase update, ok := <-m.ch:\n\t\t\tm.log.Trace().Caller().Bool(zf.OK, ok).Msg(\"received update from channel\")\n\n\t\t\tif !ok {\n\t\t\t\tm.log.Trace().Caller().Msg(\"update channel closed, streaming session is likely being replaced\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\terr := m.writeMap(update)\n\t\t\tif err != nil {\n\t\t\t\tm.log.Error().Caller().Err(err).Msg(\"cannot write update to client\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tm.log.Trace().Caller().Msg(\"update sent\")\n\t\t\tm.resetKeepAlive()\n\n\t\tcase <-m.keepAliveTicker.C:\n\t\t\terr := m.writeMap(&keepAlive)\n\t\t\tif err != nil {\n\t\t\t\tm.log.Error().Caller().Err(err).Msg(\"cannot write keep alive\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif debugHighCardinalityMetrics {\n\t\t\t\tmapResponseLastSentSeconds.WithLabelValues(\"keepalive\", m.node.ID.String()).Set(float64(time.Now().Unix()))\n\t\t\t}\n\n\t\t\tmapResponseSent.WithLabelValues(\"ok\", \"keepalive\").Inc()\n\t\t\tm.resetKeepAlive()\n\t\t}\n\t}\n}\n\n// writeMap writes the map response to the client.\n// It handles compression if requested and any headers that need to be set.\n// It also handles flushing the response if the ResponseWriter\n// implements http.Flusher.\nfunc (m *mapSession) writeMap(msg *tailcfg.MapResponse) error {\n\tjsonBody, err := json.Marshal(msg)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"marshalling map response: %w\", err)\n\t}\n\n\tif m.req.Compress == util.ZstdCompression {\n\t\tjsonBody = zstdframe.AppendEncode(nil, jsonBody, zstdframe.FastestCompression)\n\t}\n\n\tdata := make([]byte, reservedResponseHeaderSize, reservedResponseHeaderSize+len(jsonBody))\n\t//nolint:gosec // G115: JSON response size will not exceed uint32 max\n\tbinary.LittleEndian.PutUint32(data, uint32(len(jsonBody)))\n\tdata = append(data, jsonBody...)\n\n\tstartWrite := time.Now()\n\n\t_, err = m.w.Write(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif m.isStreaming() {\n\t\tif f, ok := m.w.(http.Flusher); ok {\n\t\t\tf.Flush()\n\t\t} else {\n\t\t\tm.log.Error().Caller().Msg(\"responseWriter does not implement http.Flusher, cannot flush\")\n\t\t}\n\t}\n\n\tm.log.Trace().\n\t\tCaller().\n\t\tStr(zf.Chan, fmt.Sprintf(\"%p\", m.ch)).\n\t\tTimeDiff(\"timeSpent\", time.Now(), startWrite).\n\t\tStr(zf.MachineKey, m.node.MachineKey.String()).\n\t\tBool(\"keepalive\", msg.KeepAlive).\n\t\tMsg(\"finished writing mapresp to node\")\n\n\treturn nil\n}\n\nvar keepAlive = tailcfg.MapResponse{\n\tKeepAlive: true,\n}\n"
  },
  {
    "path": "hscontrol/poll_test.go",
    "content": "package hscontrol\n\nimport (\n\t\"context\"\n\t\"net/http\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/mapper\"\n\t\"github.com/juanfont/headscale/hscontrol/state\"\n\t\"github.com/juanfont/headscale/hscontrol/types/change\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/tailcfg\"\n)\n\ntype delayedSuccessResponseWriter struct {\n\theader http.Header\n\n\tfirstWriteDelay time.Duration\n\n\tfirstWriteStarted     chan struct{}\n\tfirstWriteStartedOnce sync.Once\n\n\tfirstWriteFinished     chan struct{}\n\tfirstWriteFinishedOnce sync.Once\n\n\tmu         sync.Mutex\n\twriteCount int\n}\n\nfunc newDelayedSuccessResponseWriter(firstWriteDelay time.Duration) *delayedSuccessResponseWriter {\n\treturn &delayedSuccessResponseWriter{\n\t\theader:             make(http.Header),\n\t\tfirstWriteDelay:    firstWriteDelay,\n\t\tfirstWriteStarted:  make(chan struct{}),\n\t\tfirstWriteFinished: make(chan struct{}),\n\t}\n}\n\nfunc (w *delayedSuccessResponseWriter) Header() http.Header {\n\treturn w.header\n}\n\nfunc (w *delayedSuccessResponseWriter) WriteHeader(int) {}\n\nfunc (w *delayedSuccessResponseWriter) Write(data []byte) (int, error) {\n\tw.mu.Lock()\n\tw.writeCount++\n\twriteCount := w.writeCount\n\tw.mu.Unlock()\n\n\tif writeCount == 1 {\n\t\t// Only the first write is delayed. This simulates a transiently wedged map response:\n\t\t// long enough to make the batcher time out future sends,\n\t\t// but short enough that the old session can still recover if we leave it alive\n\t\tw.firstWriteStartedOnce.Do(func() {\n\t\t\tclose(w.firstWriteStarted)\n\t\t})\n\n\t\ttimer := time.NewTimer(w.firstWriteDelay)\n\t\tdefer timer.Stop()\n\n\t\t<-timer.C\n\n\t\tw.firstWriteFinishedOnce.Do(func() {\n\t\t\tclose(w.firstWriteFinished)\n\t\t})\n\t}\n\n\treturn len(data), nil\n}\n\nfunc (w *delayedSuccessResponseWriter) Flush() {}\n\nfunc (w *delayedSuccessResponseWriter) FirstWriteStarted() <-chan struct{} {\n\treturn w.firstWriteStarted\n}\n\nfunc (w *delayedSuccessResponseWriter) FirstWriteFinished() <-chan struct{} {\n\treturn w.firstWriteFinished\n}\n\nfunc (w *delayedSuccessResponseWriter) WriteCount() int {\n\tw.mu.Lock()\n\tdefer w.mu.Unlock()\n\n\treturn w.writeCount\n}\n\n// TestGitHubIssue3129_TransientlyBlockedWriteDoesNotLeaveLiveStaleSession\n// tests the scenario reported in\n// https://github.com/juanfont/headscale/issues/3129.\n//\n// Scenario:\n//  1. Start a real long-poll session for one node.\n//  2. Block the first map write long enough for the session to stop draining\n//     its buffered map-response channel.\n//  3. While that write is blocked, queue enough updates to fill the buffered\n//     channel and make the next batcher send hit the stale-send timeout.\n//  4. That stale-send path removes the session from the batcher, so without an\n//     explicit teardown hook the old serveLongPoll goroutine would stay alive\n//     but stop receiving future updates.\n//  5. Release the blocked write and verify the batcher-side stop signal makes\n//     that stale session exit instead of lingering as an orphaned goroutine.\nfunc TestGitHubIssue3129_TransientlyBlockedWriteDoesNotLeaveLiveStaleSession(t *testing.T) {\n\tt.Parallel()\n\n\tapp := createTestApp(t)\n\tuser := app.state.CreateUserForTest(\"poll-stale-session-user\")\n\tcreatedNode := app.state.CreateRegisteredNodeForTest(user, \"poll-stale-session-node\")\n\trequire.NoError(t, app.state.UpdatePolicyManagerUsersForTest())\n\n\tapp.cfg.Tuning.BatchChangeDelay = 20 * time.Millisecond\n\tapp.cfg.Tuning.NodeMapSessionBufferedChanSize = 1\n\n\tapp.mapBatcher.Close()\n\trequire.NoError(t, app.state.Close())\n\n\treloadedState, err := state.NewState(app.cfg)\n\trequire.NoError(t, err)\n\n\tapp.state = reloadedState\n\n\tapp.mapBatcher = mapper.NewBatcherAndMapper(app.cfg, app.state)\n\tapp.mapBatcher.Start()\n\n\tt.Cleanup(func() {\n\t\tapp.mapBatcher.Close()\n\t\trequire.NoError(t, app.state.Close())\n\t})\n\n\tnodeView, ok := app.state.GetNodeByID(createdNode.ID)\n\trequire.True(t, ok, \"expected node to be present in NodeStore after reload\")\n\trequire.True(t, nodeView.Valid(), \"expected valid node view after reload\")\n\tnode := nodeView.AsStruct()\n\n\tctx, cancel := context.WithCancel(context.Background())\n\twriter := newDelayedSuccessResponseWriter(250 * time.Millisecond)\n\tsession := app.newMapSession(ctx, tailcfg.MapRequest{\n\t\tStream:  true,\n\t\tVersion: tailcfg.CapabilityVersion(100),\n\t}, writer, node)\n\n\tserveDone := make(chan struct{})\n\n\tgo func() {\n\t\tsession.serveLongPoll()\n\t\tclose(serveDone)\n\t}()\n\n\tt.Cleanup(func() {\n\t\tdummyCh := make(chan *tailcfg.MapResponse, 1)\n\t\t_ = app.mapBatcher.AddNode(node.ID, dummyCh, tailcfg.CapabilityVersion(100), nil)\n\n\t\tcancel()\n\n\t\tselect {\n\t\tcase <-serveDone:\n\t\tcase <-time.After(2 * time.Second):\n\t\t}\n\n\t\t_ = app.mapBatcher.RemoveNode(node.ID, dummyCh)\n\t})\n\n\tselect {\n\tcase <-writer.FirstWriteStarted():\n\tcase <-time.After(2 * time.Second):\n\t\tt.Fatal(\"expected initial map write to start\")\n\t}\n\n\tstreamsClosed := make(chan struct{})\n\n\tgo func() {\n\t\tapp.clientStreamsOpen.Wait()\n\t\tclose(streamsClosed)\n\t}()\n\n\t// One update fills the buffered session channel while the first write is blocked.\n\t// The second update then hits the 50ms stale-send timeout, so the batcher prunes\n\t// the stale connection and triggers its stop hook.\n\tapp.mapBatcher.AddWork(change.SelfUpdate(node.ID), change.SelfUpdate(node.ID))\n\n\tselect {\n\tcase <-writer.FirstWriteFinished():\n\tcase <-time.After(2 * time.Second):\n\t\tt.Fatal(\"expected the blocked write to eventually complete\")\n\t}\n\n\tassert.Eventually(t, func() bool {\n\t\tselect {\n\t\tcase <-streamsClosed:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}, time.Second, 20*time.Millisecond, \"after stale-send cleanup, the stale session should exit\")\n}\n"
  },
  {
    "path": "hscontrol/routes/primary.go",
    "content": "package routes\n\nimport (\n\t\"fmt\"\n\t\"net/netip\"\n\t\"slices\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/juanfont/headscale/hscontrol/util/zlog/zf\"\n\t\"github.com/rs/zerolog/log\"\n\txmaps \"golang.org/x/exp/maps\"\n\t\"tailscale.com/net/tsaddr\"\n\t\"tailscale.com/util/set\"\n)\n\ntype PrimaryRoutes struct {\n\tmu sync.Mutex\n\n\t// routes is a map of prefixes that are adverties and approved and available\n\t// in the global headscale state.\n\troutes map[types.NodeID]set.Set[netip.Prefix]\n\n\t// primaries is a map of prefixes to the node that is the primary for that prefix.\n\tprimaries map[netip.Prefix]types.NodeID\n\tisPrimary map[types.NodeID]bool\n}\n\nfunc New() *PrimaryRoutes {\n\treturn &PrimaryRoutes{\n\t\troutes:    make(map[types.NodeID]set.Set[netip.Prefix]),\n\t\tprimaries: make(map[netip.Prefix]types.NodeID),\n\t\tisPrimary: make(map[types.NodeID]bool),\n\t}\n}\n\n// updatePrimaryLocked recalculates the primary routes and updates the internal state.\n// It returns true if the primary routes have changed.\n// It is assumed that the caller holds the lock.\n// The algorithm is as follows:\n// 1. Reset the primaries map.\n// 2. Iterate over the routes and count the number of times a prefix is advertised.\n// 3. If a prefix is advertised by at least two nodes, it is a primary route.\n// 4. If the primary routes have changed, update the internal state and return true.\n// 5. Otherwise, return false.\nfunc (pr *PrimaryRoutes) updatePrimaryLocked() bool {\n\tlog.Debug().Caller().Msg(\"updatePrimaryLocked starting\")\n\n\t// reset the primaries map, as we are going to recalculate it.\n\tallPrimaries := make(map[netip.Prefix][]types.NodeID)\n\tpr.isPrimary = make(map[types.NodeID]bool)\n\tchanged := false\n\n\t// sort the node ids so we can iterate over them in a deterministic order.\n\t// this is important so the same node is chosen two times in a row\n\t// as the primary route.\n\tids := types.NodeIDs(xmaps.Keys(pr.routes))\n\tsort.Sort(ids)\n\n\t// Create a map of prefixes to nodes that serve them so we\n\t// can determine the primary route for each prefix.\n\tfor _, id := range ids {\n\t\troutes := pr.routes[id]\n\t\tfor route := range routes {\n\t\t\tif _, ok := allPrimaries[route]; !ok {\n\t\t\t\tallPrimaries[route] = []types.NodeID{id}\n\t\t\t} else {\n\t\t\t\tallPrimaries[route] = append(allPrimaries[route], id)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Go through all prefixes and determine the primary route for each.\n\t// If the number of routes is below the minimum, remove the primary.\n\t// If the current primary is still available, continue.\n\t// If the current primary is not available, select a new one.\n\tfor prefix, nodes := range allPrimaries {\n\t\tlog.Debug().\n\t\t\tCaller().\n\t\t\tStr(zf.Prefix, prefix.String()).\n\t\t\tUints64(\"availableNodes\", func() []uint64 {\n\t\t\t\tids := make([]uint64, len(nodes))\n\t\t\t\tfor i, id := range nodes {\n\t\t\t\t\tids[i] = id.Uint64()\n\t\t\t\t}\n\n\t\t\t\treturn ids\n\t\t\t}()).\n\t\t\tMsg(\"processing prefix for primary route selection\")\n\n\t\tif node, ok := pr.primaries[prefix]; ok {\n\t\t\t// If the current primary is still available, continue.\n\t\t\tif slices.Contains(nodes, node) {\n\t\t\t\tlog.Debug().\n\t\t\t\t\tCaller().\n\t\t\t\t\tStr(zf.Prefix, prefix.String()).\n\t\t\t\t\tUint64(\"currentPrimary\", node.Uint64()).\n\t\t\t\t\tMsg(\"current primary still available, keeping it\")\n\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tlog.Debug().\n\t\t\t\t\tCaller().\n\t\t\t\t\tStr(zf.Prefix, prefix.String()).\n\t\t\t\t\tUint64(\"oldPrimary\", node.Uint64()).\n\t\t\t\t\tMsg(\"current primary no longer available\")\n\t\t\t}\n\t\t}\n\n\t\tif len(nodes) >= 1 {\n\t\t\tpr.primaries[prefix] = nodes[0]\n\t\t\tchanged = true\n\n\t\t\tlog.Debug().\n\t\t\t\tCaller().\n\t\t\t\tStr(zf.Prefix, prefix.String()).\n\t\t\t\tUint64(\"newPrimary\", nodes[0].Uint64()).\n\t\t\t\tMsg(\"selected new primary for prefix\")\n\t\t}\n\t}\n\n\t// Clean up any remaining primaries that are no longer valid.\n\tfor prefix := range pr.primaries {\n\t\tif _, ok := allPrimaries[prefix]; !ok {\n\t\t\tlog.Debug().\n\t\t\t\tCaller().\n\t\t\t\tStr(zf.Prefix, prefix.String()).\n\t\t\t\tMsg(\"cleaning up primary route that no longer has available nodes\")\n\t\t\tdelete(pr.primaries, prefix)\n\n\t\t\tchanged = true\n\t\t}\n\t}\n\n\t// Populate the quick lookup index for primary routes\n\tfor _, nodeID := range pr.primaries {\n\t\tpr.isPrimary[nodeID] = true\n\t}\n\n\tlog.Debug().\n\t\tCaller().\n\t\tBool(zf.Changes, changed).\n\t\tStr(zf.FinalState, pr.stringLocked()).\n\t\tMsg(\"updatePrimaryLocked completed\")\n\n\treturn changed\n}\n\n// SetRoutes sets the routes for a given Node ID and recalculates the primary routes\n// of the headscale.\n// It returns true if there was a change in primary routes.\n// All exit routes are ignored as they are not used in primary route context.\nfunc (pr *PrimaryRoutes) SetRoutes(node types.NodeID, prefixes ...netip.Prefix) bool {\n\tpr.mu.Lock()\n\tdefer pr.mu.Unlock()\n\n\tnlog := log.With().Uint64(zf.NodeID, node.Uint64()).Logger()\n\n\tnlog.Debug().\n\t\tCaller().\n\t\tStrs(\"prefixes\", util.PrefixesToString(prefixes)).\n\t\tMsg(\"PrimaryRoutes.SetRoutes called\")\n\n\t// If no routes are being set, remove the node from the routes map.\n\tif len(prefixes) == 0 {\n\t\twasPresent := false\n\n\t\tif _, ok := pr.routes[node]; ok {\n\t\t\tdelete(pr.routes, node)\n\n\t\t\twasPresent = true\n\n\t\t\tnlog.Debug().\n\t\t\t\tCaller().\n\t\t\t\tMsg(\"removed node from primary routes (no prefixes)\")\n\t\t}\n\n\t\tchanged := pr.updatePrimaryLocked()\n\t\tnlog.Debug().\n\t\t\tCaller().\n\t\t\tBool(\"wasPresent\", wasPresent).\n\t\t\tBool(zf.Changes, changed).\n\t\t\tStr(zf.NewState, pr.stringLocked()).\n\t\t\tMsg(\"SetRoutes completed (remove)\")\n\n\t\treturn changed\n\t}\n\n\trs := make(set.Set[netip.Prefix], len(prefixes))\n\tfor _, prefix := range prefixes {\n\t\tif !tsaddr.IsExitRoute(prefix) {\n\t\t\trs.Add(prefix)\n\t\t}\n\t}\n\n\tif rs.Len() != 0 {\n\t\tpr.routes[node] = rs\n\t\tnlog.Debug().\n\t\t\tCaller().\n\t\t\tStrs(\"routes\", util.PrefixesToString(rs.Slice())).\n\t\t\tMsg(\"updated node routes in primary route manager\")\n\t} else {\n\t\tdelete(pr.routes, node)\n\t\tnlog.Debug().\n\t\t\tCaller().\n\t\t\tMsg(\"removed node from primary routes (only exit routes)\")\n\t}\n\n\tchanged := pr.updatePrimaryLocked()\n\tnlog.Debug().\n\t\tCaller().\n\t\tBool(zf.Changes, changed).\n\t\tStr(zf.NewState, pr.stringLocked()).\n\t\tMsg(\"SetRoutes completed (update)\")\n\n\treturn changed\n}\n\nfunc (pr *PrimaryRoutes) PrimaryRoutes(id types.NodeID) []netip.Prefix {\n\tif pr == nil {\n\t\treturn nil\n\t}\n\n\tpr.mu.Lock()\n\tdefer pr.mu.Unlock()\n\n\t// Short circuit if the node is not a primary for any route.\n\tif _, ok := pr.isPrimary[id]; !ok {\n\t\treturn nil\n\t}\n\n\tvar routes []netip.Prefix\n\n\tfor prefix, node := range pr.primaries {\n\t\tif node == id {\n\t\t\troutes = append(routes, prefix)\n\t\t}\n\t}\n\n\tslices.SortFunc(routes, netip.Prefix.Compare)\n\n\treturn routes\n}\n\nfunc (pr *PrimaryRoutes) String() string {\n\tpr.mu.Lock()\n\tdefer pr.mu.Unlock()\n\n\treturn pr.stringLocked()\n}\n\nfunc (pr *PrimaryRoutes) stringLocked() string {\n\tvar sb strings.Builder\n\n\tfmt.Fprintln(&sb, \"Available routes:\")\n\n\tids := types.NodeIDs(xmaps.Keys(pr.routes))\n\tsort.Sort(ids)\n\n\tfor _, id := range ids {\n\t\tprefixes := pr.routes[id]\n\t\tfmt.Fprintf(&sb, \"\\nNode %d: %s\", id, strings.Join(util.PrefixesToString(prefixes.Slice()), \", \"))\n\t}\n\n\tfmt.Fprintln(&sb, \"\\n\\nCurrent primary routes:\")\n\n\tfor route, nodeID := range pr.primaries {\n\t\tfmt.Fprintf(&sb, \"\\nRoute %s: %d\", route, nodeID)\n\t}\n\n\treturn sb.String()\n}\n\n// DebugRoutes represents the primary routes state in a structured format for JSON serialization.\ntype DebugRoutes struct {\n\t// AvailableRoutes maps node IDs to their advertised routes\n\t// In the context of primary routes, this represents the routes that are available\n\t// for each node. A route will only be available if it is advertised by the node\n\t// AND approved.\n\t// Only routes by nodes currently connected to the headscale server are included.\n\tAvailableRoutes map[types.NodeID][]netip.Prefix `json:\"available_routes\"`\n\n\t// PrimaryRoutes maps route prefixes to the primary node serving them\n\tPrimaryRoutes map[string]types.NodeID `json:\"primary_routes\"`\n}\n\n// DebugJSON returns a structured representation of the primary routes state suitable for JSON serialization.\nfunc (pr *PrimaryRoutes) DebugJSON() DebugRoutes {\n\tpr.mu.Lock()\n\tdefer pr.mu.Unlock()\n\n\tdebug := DebugRoutes{\n\t\tAvailableRoutes: make(map[types.NodeID][]netip.Prefix),\n\t\tPrimaryRoutes:   make(map[string]types.NodeID),\n\t}\n\n\t// Populate available routes\n\tfor nodeID, routes := range pr.routes {\n\t\tprefixes := routes.Slice()\n\t\tslices.SortFunc(prefixes, netip.Prefix.Compare)\n\t\tdebug.AvailableRoutes[nodeID] = prefixes\n\t}\n\n\t// Populate primary routes\n\tfor prefix, nodeID := range pr.primaries {\n\t\tdebug.PrimaryRoutes[prefix.String()] = nodeID\n\t}\n\n\treturn debug\n}\n"
  },
  {
    "path": "hscontrol/routes/primary_test.go",
    "content": "package routes\n\nimport (\n\t\"net/netip\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/google/go-cmp/cmp/cmpopts\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"tailscale.com/util/set\"\n)\n\n// mp is a helper function that wraps netip.MustParsePrefix.\nfunc mp(prefix string) netip.Prefix {\n\treturn netip.MustParsePrefix(prefix)\n}\n\nfunc TestPrimaryRoutes(t *testing.T) {\n\ttests := []struct {\n\t\tname              string\n\t\toperations        func(pr *PrimaryRoutes) bool\n\t\texpectedRoutes    map[types.NodeID]set.Set[netip.Prefix]\n\t\texpectedPrimaries map[netip.Prefix]types.NodeID\n\t\texpectedIsPrimary map[types.NodeID]bool\n\t\texpectedChange    bool\n\n\t\t// primaries is a map of prefixes to the node that is the primary for that prefix.\n\t\tprimaries map[netip.Prefix]types.NodeID\n\t\tisPrimary map[types.NodeID]bool\n\t}{\n\t\t{\n\t\t\tname: \"single-node-registers-single-route\",\n\t\t\toperations: func(pr *PrimaryRoutes) bool {\n\t\t\t\treturn pr.SetRoutes(1, mp(\"192.168.1.0/24\"))\n\t\t\t},\n\t\t\texpectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{\n\t\t\t\t1: {\n\t\t\t\t\tmp(\"192.168.1.0/24\"): {},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedPrimaries: map[netip.Prefix]types.NodeID{\n\t\t\t\tmp(\"192.168.1.0/24\"): 1,\n\t\t\t},\n\t\t\texpectedIsPrimary: map[types.NodeID]bool{\n\t\t\t\t1: true,\n\t\t\t},\n\t\t\texpectedChange: true,\n\t\t},\n\t\t{\n\t\t\tname: \"multiple-nodes-register-different-routes\",\n\t\t\toperations: func(pr *PrimaryRoutes) bool {\n\t\t\t\tpr.SetRoutes(1, mp(\"192.168.1.0/24\"))\n\t\t\t\treturn pr.SetRoutes(2, mp(\"192.168.2.0/24\"))\n\t\t\t},\n\t\t\texpectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{\n\t\t\t\t1: {\n\t\t\t\t\tmp(\"192.168.1.0/24\"): {},\n\t\t\t\t},\n\t\t\t\t2: {\n\t\t\t\t\tmp(\"192.168.2.0/24\"): {},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedPrimaries: map[netip.Prefix]types.NodeID{\n\t\t\t\tmp(\"192.168.1.0/24\"): 1,\n\t\t\t\tmp(\"192.168.2.0/24\"): 2,\n\t\t\t},\n\t\t\texpectedIsPrimary: map[types.NodeID]bool{\n\t\t\t\t1: true,\n\t\t\t\t2: true,\n\t\t\t},\n\t\t\texpectedChange: true,\n\t\t},\n\t\t{\n\t\t\tname: \"multiple-nodes-register-overlapping-routes\",\n\t\t\toperations: func(pr *PrimaryRoutes) bool {\n\t\t\t\tpr.SetRoutes(1, mp(\"192.168.1.0/24\"))        // true\n\t\t\t\treturn pr.SetRoutes(2, mp(\"192.168.1.0/24\")) // false\n\t\t\t},\n\t\t\texpectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{\n\t\t\t\t1: {\n\t\t\t\t\tmp(\"192.168.1.0/24\"): {},\n\t\t\t\t},\n\t\t\t\t2: {\n\t\t\t\t\tmp(\"192.168.1.0/24\"): {},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedPrimaries: map[netip.Prefix]types.NodeID{\n\t\t\t\tmp(\"192.168.1.0/24\"): 1,\n\t\t\t},\n\t\t\texpectedIsPrimary: map[types.NodeID]bool{\n\t\t\t\t1: true,\n\t\t\t},\n\t\t\texpectedChange: false,\n\t\t},\n\t\t{\n\t\t\tname: \"node-deregisters-a-route\",\n\t\t\toperations: func(pr *PrimaryRoutes) bool {\n\t\t\t\tpr.SetRoutes(1, mp(\"192.168.1.0/24\"))\n\t\t\t\treturn pr.SetRoutes(1) // Deregister by setting no routes\n\t\t\t},\n\t\t\texpectedRoutes:    nil,\n\t\t\texpectedPrimaries: nil,\n\t\t\texpectedIsPrimary: nil,\n\t\t\texpectedChange:    true,\n\t\t},\n\t\t{\n\t\t\tname: \"node-deregisters-one-of-multiple-routes\",\n\t\t\toperations: func(pr *PrimaryRoutes) bool {\n\t\t\t\tpr.SetRoutes(1, mp(\"192.168.1.0/24\"), mp(\"192.168.2.0/24\"))\n\t\t\t\treturn pr.SetRoutes(1, mp(\"192.168.2.0/24\")) // Deregister one route by setting the remaining route\n\t\t\t},\n\t\t\texpectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{\n\t\t\t\t1: {\n\t\t\t\t\tmp(\"192.168.2.0/24\"): {},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedPrimaries: map[netip.Prefix]types.NodeID{\n\t\t\t\tmp(\"192.168.2.0/24\"): 1,\n\t\t\t},\n\t\t\texpectedIsPrimary: map[types.NodeID]bool{\n\t\t\t\t1: true,\n\t\t\t},\n\t\t\texpectedChange: true,\n\t\t},\n\t\t{\n\t\t\tname: \"node-registers-and-deregisters-routes-in-sequence\",\n\t\t\toperations: func(pr *PrimaryRoutes) bool {\n\t\t\t\tpr.SetRoutes(1, mp(\"192.168.1.0/24\"))\n\t\t\t\tpr.SetRoutes(2, mp(\"192.168.2.0/24\"))\n\t\t\t\tpr.SetRoutes(1) // Deregister by setting no routes\n\n\t\t\t\treturn pr.SetRoutes(1, mp(\"192.168.3.0/24\"))\n\t\t\t},\n\t\t\texpectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{\n\t\t\t\t1: {\n\t\t\t\t\tmp(\"192.168.3.0/24\"): {},\n\t\t\t\t},\n\t\t\t\t2: {\n\t\t\t\t\tmp(\"192.168.2.0/24\"): {},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedPrimaries: map[netip.Prefix]types.NodeID{\n\t\t\t\tmp(\"192.168.2.0/24\"): 2,\n\t\t\t\tmp(\"192.168.3.0/24\"): 1,\n\t\t\t},\n\t\t\texpectedIsPrimary: map[types.NodeID]bool{\n\t\t\t\t1: true,\n\t\t\t\t2: true,\n\t\t\t},\n\t\t\texpectedChange: true,\n\t\t},\n\t\t{\n\t\t\tname: \"multiple-nodes-register-same-route\",\n\t\t\toperations: func(pr *PrimaryRoutes) bool {\n\t\t\t\tpr.SetRoutes(1, mp(\"192.168.1.0/24\")) // false\n\t\t\t\tpr.SetRoutes(2, mp(\"192.168.1.0/24\")) // true\n\n\t\t\t\treturn pr.SetRoutes(3, mp(\"192.168.1.0/24\")) // false\n\t\t\t},\n\t\t\texpectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{\n\t\t\t\t1: {\n\t\t\t\t\tmp(\"192.168.1.0/24\"): {},\n\t\t\t\t},\n\t\t\t\t2: {\n\t\t\t\t\tmp(\"192.168.1.0/24\"): {},\n\t\t\t\t},\n\t\t\t\t3: {\n\t\t\t\t\tmp(\"192.168.1.0/24\"): {},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedPrimaries: map[netip.Prefix]types.NodeID{\n\t\t\t\tmp(\"192.168.1.0/24\"): 1,\n\t\t\t},\n\t\t\texpectedIsPrimary: map[types.NodeID]bool{\n\t\t\t\t1: true,\n\t\t\t},\n\t\t\texpectedChange: false,\n\t\t},\n\t\t{\n\t\t\tname: \"register-multiple-routes-shift-primary-check-primary\",\n\t\t\toperations: func(pr *PrimaryRoutes) bool {\n\t\t\t\tpr.SetRoutes(1, mp(\"192.168.1.0/24\")) // false\n\t\t\t\tpr.SetRoutes(2, mp(\"192.168.1.0/24\")) // true, 1 primary\n\t\t\t\tpr.SetRoutes(3, mp(\"192.168.1.0/24\")) // false, 1 primary\n\n\t\t\t\treturn pr.SetRoutes(1) // true, 2 primary\n\t\t\t},\n\t\t\texpectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{\n\t\t\t\t2: {\n\t\t\t\t\tmp(\"192.168.1.0/24\"): {},\n\t\t\t\t},\n\t\t\t\t3: {\n\t\t\t\t\tmp(\"192.168.1.0/24\"): {},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedPrimaries: map[netip.Prefix]types.NodeID{\n\t\t\t\tmp(\"192.168.1.0/24\"): 2,\n\t\t\t},\n\t\t\texpectedIsPrimary: map[types.NodeID]bool{\n\t\t\t\t2: true,\n\t\t\t},\n\t\t\texpectedChange: true,\n\t\t},\n\t\t{\n\t\t\tname: \"primary-route-map-is-cleared-up-no-primary\",\n\t\t\toperations: func(pr *PrimaryRoutes) bool {\n\t\t\t\tpr.SetRoutes(1, mp(\"192.168.1.0/24\")) // false\n\t\t\t\tpr.SetRoutes(2, mp(\"192.168.1.0/24\")) // true, 1 primary\n\t\t\t\tpr.SetRoutes(3, mp(\"192.168.1.0/24\")) // false, 1 primary\n\t\t\t\tpr.SetRoutes(1)                       // true, 2 primary\n\n\t\t\t\treturn pr.SetRoutes(2) // true, no primary\n\t\t\t},\n\t\t\texpectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{\n\t\t\t\t3: {\n\t\t\t\t\tmp(\"192.168.1.0/24\"): {},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedPrimaries: map[netip.Prefix]types.NodeID{\n\t\t\t\tmp(\"192.168.1.0/24\"): 3,\n\t\t\t},\n\t\t\texpectedIsPrimary: map[types.NodeID]bool{\n\t\t\t\t3: true,\n\t\t\t},\n\t\t\texpectedChange: true,\n\t\t},\n\t\t{\n\t\t\tname: \"primary-route-map-is-cleared-up-all-no-primary\",\n\t\t\toperations: func(pr *PrimaryRoutes) bool {\n\t\t\t\tpr.SetRoutes(1, mp(\"192.168.1.0/24\")) // false\n\t\t\t\tpr.SetRoutes(2, mp(\"192.168.1.0/24\")) // true, 1 primary\n\t\t\t\tpr.SetRoutes(3, mp(\"192.168.1.0/24\")) // false, 1 primary\n\t\t\t\tpr.SetRoutes(1)                       // true, 2 primary\n\t\t\t\tpr.SetRoutes(2)                       // true, no primary\n\n\t\t\t\treturn pr.SetRoutes(3) // false, no primary\n\t\t\t},\n\t\t\texpectedChange: true,\n\t\t},\n\t\t{\n\t\t\tname: \"primary-route-map-is-cleared-up\",\n\t\t\toperations: func(pr *PrimaryRoutes) bool {\n\t\t\t\tpr.SetRoutes(1, mp(\"192.168.1.0/24\")) // false\n\t\t\t\tpr.SetRoutes(2, mp(\"192.168.1.0/24\")) // true, 1 primary\n\t\t\t\tpr.SetRoutes(3, mp(\"192.168.1.0/24\")) // false, 1 primary\n\t\t\t\tpr.SetRoutes(1)                       // true, 2 primary\n\n\t\t\t\treturn pr.SetRoutes(2) // true, no primary\n\t\t\t},\n\t\t\texpectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{\n\t\t\t\t3: {\n\t\t\t\t\tmp(\"192.168.1.0/24\"): {},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedPrimaries: map[netip.Prefix]types.NodeID{\n\t\t\t\tmp(\"192.168.1.0/24\"): 3,\n\t\t\t},\n\t\t\texpectedIsPrimary: map[types.NodeID]bool{\n\t\t\t\t3: true,\n\t\t\t},\n\t\t\texpectedChange: true,\n\t\t},\n\t\t{\n\t\t\tname: \"primary-route-no-flake\",\n\t\t\toperations: func(pr *PrimaryRoutes) bool {\n\t\t\t\tpr.SetRoutes(1, mp(\"192.168.1.0/24\")) // false\n\t\t\t\tpr.SetRoutes(2, mp(\"192.168.1.0/24\")) // true, 1 primary\n\t\t\t\tpr.SetRoutes(3, mp(\"192.168.1.0/24\")) // false, 1 primary\n\t\t\t\tpr.SetRoutes(1)                       // true, 2 primary\n\n\t\t\t\treturn pr.SetRoutes(1, mp(\"192.168.1.0/24\")) // false, 2 primary\n\t\t\t},\n\t\t\texpectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{\n\t\t\t\t1: {\n\t\t\t\t\tmp(\"192.168.1.0/24\"): {},\n\t\t\t\t},\n\t\t\t\t2: {\n\t\t\t\t\tmp(\"192.168.1.0/24\"): {},\n\t\t\t\t},\n\t\t\t\t3: {\n\t\t\t\t\tmp(\"192.168.1.0/24\"): {},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedPrimaries: map[netip.Prefix]types.NodeID{\n\t\t\t\tmp(\"192.168.1.0/24\"): 2,\n\t\t\t},\n\t\t\texpectedIsPrimary: map[types.NodeID]bool{\n\t\t\t\t2: true,\n\t\t\t},\n\t\t\texpectedChange: false,\n\t\t},\n\t\t{\n\t\t\tname: \"primary-route-no-flake-check-old-primary\",\n\t\t\toperations: func(pr *PrimaryRoutes) bool {\n\t\t\t\tpr.SetRoutes(1, mp(\"192.168.1.0/24\")) // false\n\t\t\t\tpr.SetRoutes(2, mp(\"192.168.1.0/24\")) // true, 1 primary\n\t\t\t\tpr.SetRoutes(3, mp(\"192.168.1.0/24\")) // false, 1 primary\n\t\t\t\tpr.SetRoutes(1)                       // true, 2 primary\n\n\t\t\t\treturn pr.SetRoutes(1, mp(\"192.168.1.0/24\")) // false, 2 primary\n\t\t\t},\n\t\t\texpectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{\n\t\t\t\t1: {\n\t\t\t\t\tmp(\"192.168.1.0/24\"): {},\n\t\t\t\t},\n\t\t\t\t2: {\n\t\t\t\t\tmp(\"192.168.1.0/24\"): {},\n\t\t\t\t},\n\t\t\t\t3: {\n\t\t\t\t\tmp(\"192.168.1.0/24\"): {},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedPrimaries: map[netip.Prefix]types.NodeID{\n\t\t\t\tmp(\"192.168.1.0/24\"): 2,\n\t\t\t},\n\t\t\texpectedIsPrimary: map[types.NodeID]bool{\n\t\t\t\t2: true,\n\t\t\t},\n\t\t\texpectedChange: false,\n\t\t},\n\t\t{\n\t\t\tname: \"primary-route-no-flake-full-integration\",\n\t\t\toperations: func(pr *PrimaryRoutes) bool {\n\t\t\t\tpr.SetRoutes(1, mp(\"192.168.1.0/24\")) // false\n\t\t\t\tpr.SetRoutes(2, mp(\"192.168.1.0/24\")) // true, 1 primary\n\t\t\t\tpr.SetRoutes(3, mp(\"192.168.1.0/24\")) // false, 1 primary\n\t\t\t\tpr.SetRoutes(1)                       // true, 2 primary\n\t\t\t\tpr.SetRoutes(2)                       // true, 3 primary\n\t\t\t\tpr.SetRoutes(1, mp(\"192.168.1.0/24\")) // true, 3 primary\n\t\t\t\tpr.SetRoutes(2, mp(\"192.168.1.0/24\")) // true, 3 primary\n\t\t\t\tpr.SetRoutes(1)                       // true, 3 primary\n\n\t\t\t\treturn pr.SetRoutes(1, mp(\"192.168.1.0/24\")) // false, 3 primary\n\t\t\t},\n\t\t\texpectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{\n\t\t\t\t1: {\n\t\t\t\t\tmp(\"192.168.1.0/24\"): {},\n\t\t\t\t},\n\t\t\t\t2: {\n\t\t\t\t\tmp(\"192.168.1.0/24\"): {},\n\t\t\t\t},\n\t\t\t\t3: {\n\t\t\t\t\tmp(\"192.168.1.0/24\"): {},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedPrimaries: map[netip.Prefix]types.NodeID{\n\t\t\t\tmp(\"192.168.1.0/24\"): 3,\n\t\t\t},\n\t\t\texpectedIsPrimary: map[types.NodeID]bool{\n\t\t\t\t3: true,\n\t\t\t},\n\t\t\texpectedChange: false,\n\t\t},\n\t\t{\n\t\t\tname: \"multiple-nodes-register-same-route-and-exit\",\n\t\t\toperations: func(pr *PrimaryRoutes) bool {\n\t\t\t\tpr.SetRoutes(1, mp(\"0.0.0.0/0\"), mp(\"192.168.1.0/24\"))\n\t\t\t\treturn pr.SetRoutes(2, mp(\"192.168.1.0/24\"))\n\t\t\t},\n\t\t\texpectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{\n\t\t\t\t1: {\n\t\t\t\t\tmp(\"192.168.1.0/24\"): {},\n\t\t\t\t},\n\t\t\t\t2: {\n\t\t\t\t\tmp(\"192.168.1.0/24\"): {},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedPrimaries: map[netip.Prefix]types.NodeID{\n\t\t\t\tmp(\"192.168.1.0/24\"): 1,\n\t\t\t},\n\t\t\texpectedIsPrimary: map[types.NodeID]bool{\n\t\t\t\t1: true,\n\t\t\t},\n\t\t\texpectedChange: false,\n\t\t},\n\t\t{\n\t\t\tname: \"deregister-non-existent-route\",\n\t\t\toperations: func(pr *PrimaryRoutes) bool {\n\t\t\t\treturn pr.SetRoutes(1) // Deregister by setting no routes\n\t\t\t},\n\t\t\texpectedRoutes: nil,\n\t\t\texpectedChange: false,\n\t\t},\n\t\t{\n\t\t\tname: \"register-empty-prefix-list\",\n\t\t\toperations: func(pr *PrimaryRoutes) bool {\n\t\t\t\treturn pr.SetRoutes(1)\n\t\t\t},\n\t\t\texpectedRoutes: nil,\n\t\t\texpectedChange: false,\n\t\t},\n\t\t{\n\t\t\tname: \"exit-nodes\",\n\t\t\toperations: func(pr *PrimaryRoutes) bool {\n\t\t\t\tpr.SetRoutes(1, mp(\"10.0.0.0/16\"), mp(\"0.0.0.0/0\"), mp(\"::/0\"))\n\t\t\t\tpr.SetRoutes(3, mp(\"0.0.0.0/0\"), mp(\"::/0\"))\n\n\t\t\t\treturn pr.SetRoutes(2, mp(\"0.0.0.0/0\"), mp(\"::/0\"))\n\t\t\t},\n\t\t\texpectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{\n\t\t\t\t1: {\n\t\t\t\t\tmp(\"10.0.0.0/16\"): {},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedPrimaries: map[netip.Prefix]types.NodeID{\n\t\t\t\tmp(\"10.0.0.0/16\"): 1,\n\t\t\t},\n\t\t\texpectedIsPrimary: map[types.NodeID]bool{\n\t\t\t\t1: true,\n\t\t\t},\n\t\t\texpectedChange: false,\n\t\t},\n\t\t{\n\t\t\tname: \"concurrent-access\",\n\t\t\toperations: func(pr *PrimaryRoutes) bool {\n\t\t\t\tvar wg sync.WaitGroup\n\t\t\t\twg.Add(2)\n\n\t\t\t\tvar change1, change2 bool\n\n\t\t\t\tgo func() {\n\t\t\t\t\tdefer wg.Done()\n\n\t\t\t\t\tchange1 = pr.SetRoutes(1, mp(\"192.168.1.0/24\"))\n\t\t\t\t}()\n\t\t\t\tgo func() {\n\t\t\t\t\tdefer wg.Done()\n\n\t\t\t\t\tchange2 = pr.SetRoutes(2, mp(\"192.168.2.0/24\"))\n\t\t\t\t}()\n\n\t\t\t\twg.Wait()\n\n\t\t\t\treturn change1 || change2\n\t\t\t},\n\t\t\texpectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{\n\t\t\t\t1: {\n\t\t\t\t\tmp(\"192.168.1.0/24\"): {},\n\t\t\t\t},\n\t\t\t\t2: {\n\t\t\t\t\tmp(\"192.168.2.0/24\"): {},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedPrimaries: map[netip.Prefix]types.NodeID{\n\t\t\t\tmp(\"192.168.1.0/24\"): 1,\n\t\t\t\tmp(\"192.168.2.0/24\"): 2,\n\t\t\t},\n\t\t\texpectedIsPrimary: map[types.NodeID]bool{\n\t\t\t\t1: true,\n\t\t\t\t2: true,\n\t\t\t},\n\t\t\texpectedChange: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tpr := New()\n\n\t\t\tchange := tt.operations(pr)\n\t\t\tif change != tt.expectedChange {\n\t\t\t\tt.Errorf(\"change = %v, want %v\", change, tt.expectedChange)\n\t\t\t}\n\n\t\t\tcomps := append(util.Comparers, cmpopts.EquateEmpty())\n\t\t\tif diff := cmp.Diff(tt.expectedRoutes, pr.routes, comps...); diff != \"\" {\n\t\t\t\tt.Errorf(\"routes mismatch (-want +got):\\n%s\", diff)\n\t\t\t}\n\n\t\t\tif diff := cmp.Diff(tt.expectedPrimaries, pr.primaries, comps...); diff != \"\" {\n\t\t\t\tt.Errorf(\"primaries mismatch (-want +got):\\n%s\", diff)\n\t\t\t}\n\n\t\t\tif diff := cmp.Diff(tt.expectedIsPrimary, pr.isPrimary, comps...); diff != \"\" {\n\t\t\t\tt.Errorf(\"isPrimary mismatch (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "hscontrol/servertest/assertions.go",
    "content": "package servertest\n\nimport (\n\t\"net/netip\"\n\t\"testing\"\n\t\"time\"\n)\n\n// AssertMeshComplete verifies that every client in the slice sees\n// exactly (len(clients) - 1) peers, i.e. a fully connected mesh.\nfunc AssertMeshComplete(tb testing.TB, clients []*TestClient) {\n\ttb.Helper()\n\n\texpected := len(clients) - 1\n\tfor _, c := range clients {\n\t\tnm := c.Netmap()\n\t\tif nm == nil {\n\t\t\ttb.Errorf(\"AssertMeshComplete: %s has no netmap\", c.Name)\n\n\t\t\tcontinue\n\t\t}\n\n\t\tif got := len(nm.Peers); got != expected {\n\t\t\ttb.Errorf(\"AssertMeshComplete: %s has %d peers, want %d (peers: %v)\",\n\t\t\t\tc.Name, got, expected, c.PeerNames())\n\t\t}\n\t}\n}\n\n// AssertSymmetricVisibility checks that peer visibility is symmetric:\n// if client A sees client B, then client B must also see client A.\nfunc AssertSymmetricVisibility(tb testing.TB, clients []*TestClient) {\n\ttb.Helper()\n\n\tfor _, a := range clients {\n\t\tfor _, b := range clients {\n\t\t\tif a == b {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t_, aSeesB := a.PeerByName(b.Name)\n\n\t\t\t_, bSeesA := b.PeerByName(a.Name)\n\t\t\tif aSeesB != bSeesA {\n\t\t\t\ttb.Errorf(\"AssertSymmetricVisibility: %s sees %s = %v, but %s sees %s = %v\",\n\t\t\t\t\ta.Name, b.Name, aSeesB, b.Name, a.Name, bSeesA)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// AssertPeerOnline checks that the observer sees peerName as online.\nfunc AssertPeerOnline(tb testing.TB, observer *TestClient, peerName string) {\n\ttb.Helper()\n\n\tpeer, ok := observer.PeerByName(peerName)\n\tif !ok {\n\t\ttb.Errorf(\"AssertPeerOnline: %s does not see peer %s\", observer.Name, peerName)\n\n\t\treturn\n\t}\n\n\tisOnline, known := peer.Online().GetOk()\n\tif !known || !isOnline {\n\t\ttb.Errorf(\"AssertPeerOnline: %s sees peer %s but Online=%v (known=%v), want true\",\n\t\t\tobserver.Name, peerName, isOnline, known)\n\t}\n}\n\n// AssertPeerOffline checks that the observer sees peerName as offline.\nfunc AssertPeerOffline(tb testing.TB, observer *TestClient, peerName string) {\n\ttb.Helper()\n\n\tpeer, ok := observer.PeerByName(peerName)\n\tif !ok {\n\t\t// Peer gone entirely counts as \"offline\" for this assertion.\n\t\treturn\n\t}\n\n\tisOnline, known := peer.Online().GetOk()\n\tif known && isOnline {\n\t\ttb.Errorf(\"AssertPeerOffline: %s sees peer %s as online, want offline\",\n\t\t\tobserver.Name, peerName)\n\t}\n}\n\n// AssertPeerGone checks that the observer does NOT have peerName in\n// its peer list at all.\nfunc AssertPeerGone(tb testing.TB, observer *TestClient, peerName string) {\n\ttb.Helper()\n\n\t_, ok := observer.PeerByName(peerName)\n\tif ok {\n\t\ttb.Errorf(\"AssertPeerGone: %s still sees peer %s\", observer.Name, peerName)\n\t}\n}\n\n// AssertPeerHasAllowedIPs checks that a peer has the expected\n// AllowedIPs prefixes.\nfunc AssertPeerHasAllowedIPs(tb testing.TB, observer *TestClient, peerName string, want []netip.Prefix) {\n\ttb.Helper()\n\n\tpeer, ok := observer.PeerByName(peerName)\n\tif !ok {\n\t\ttb.Errorf(\"AssertPeerHasAllowedIPs: %s does not see peer %s\", observer.Name, peerName)\n\n\t\treturn\n\t}\n\n\tgot := make([]netip.Prefix, 0, peer.AllowedIPs().Len())\n\tfor i := range peer.AllowedIPs().Len() {\n\t\tgot = append(got, peer.AllowedIPs().At(i))\n\t}\n\n\tif len(got) != len(want) {\n\t\ttb.Errorf(\"AssertPeerHasAllowedIPs: %s sees %s with AllowedIPs %v, want %v\",\n\t\t\tobserver.Name, peerName, got, want)\n\n\t\treturn\n\t}\n\n\t// Build a set for comparison.\n\twantSet := make(map[netip.Prefix]bool, len(want))\n\tfor _, p := range want {\n\t\twantSet[p] = true\n\t}\n\n\tfor _, p := range got {\n\t\tif !wantSet[p] {\n\t\t\ttb.Errorf(\"AssertPeerHasAllowedIPs: %s sees %s with unexpected AllowedIP %v (want %v)\",\n\t\t\t\tobserver.Name, peerName, p, want)\n\t\t}\n\t}\n}\n\n// AssertConsistentState checks that all clients agree on peer\n// properties: every connected client should see the same set of\n// peer hostnames.\nfunc AssertConsistentState(tb testing.TB, clients []*TestClient) {\n\ttb.Helper()\n\n\tfor _, c := range clients {\n\t\tnm := c.Netmap()\n\t\tif nm == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tpeerNames := make(map[string]bool, len(nm.Peers))\n\t\tfor _, p := range nm.Peers {\n\t\t\thi := p.Hostinfo()\n\t\t\tif hi.Valid() {\n\t\t\t\tpeerNames[hi.Hostname()] = true\n\t\t\t}\n\t\t}\n\n\t\t// Check that c sees all other connected clients.\n\t\tfor _, other := range clients {\n\t\t\tif other == c || other.Netmap() == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif !peerNames[other.Name] {\n\t\t\t\ttb.Errorf(\"AssertConsistentState: %s does not see %s (peers: %v)\",\n\t\t\t\t\tc.Name, other.Name, c.PeerNames())\n\t\t\t}\n\t\t}\n\t}\n}\n\n// AssertDERPMapPresent checks that the netmap contains a DERP map.\nfunc AssertDERPMapPresent(tb testing.TB, client *TestClient) {\n\ttb.Helper()\n\n\tnm := client.Netmap()\n\tif nm == nil {\n\t\ttb.Errorf(\"AssertDERPMapPresent: %s has no netmap\", client.Name)\n\n\t\treturn\n\t}\n\n\tif nm.DERPMap == nil {\n\t\ttb.Errorf(\"AssertDERPMapPresent: %s has nil DERPMap\", client.Name)\n\n\t\treturn\n\t}\n\n\tif len(nm.DERPMap.Regions) == 0 {\n\t\ttb.Errorf(\"AssertDERPMapPresent: %s has empty DERPMap regions\", client.Name)\n\t}\n}\n\n// AssertSelfHasAddresses checks that the self node has at least one address.\nfunc AssertSelfHasAddresses(tb testing.TB, client *TestClient) {\n\ttb.Helper()\n\n\tnm := client.Netmap()\n\tif nm == nil {\n\t\ttb.Errorf(\"AssertSelfHasAddresses: %s has no netmap\", client.Name)\n\n\t\treturn\n\t}\n\n\tif !nm.SelfNode.Valid() {\n\t\ttb.Errorf(\"AssertSelfHasAddresses: %s self node is invalid\", client.Name)\n\n\t\treturn\n\t}\n\n\tif nm.SelfNode.Addresses().Len() == 0 {\n\t\ttb.Errorf(\"AssertSelfHasAddresses: %s self node has no addresses\", client.Name)\n\t}\n}\n\n// EventuallyAssertMeshComplete retries AssertMeshComplete up to\n// timeout, useful when waiting for state to propagate.\nfunc EventuallyAssertMeshComplete(tb testing.TB, clients []*TestClient, timeout time.Duration) {\n\ttb.Helper()\n\n\texpected := len(clients) - 1\n\tdeadline := time.After(timeout)\n\n\tfor {\n\t\tallGood := true\n\n\t\tfor _, c := range clients {\n\t\t\tnm := c.Netmap()\n\t\t\tif nm == nil || len(nm.Peers) < expected {\n\t\t\t\tallGood = false\n\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif allGood {\n\t\t\t// Final strict check.\n\t\t\tAssertMeshComplete(tb, clients)\n\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase <-deadline:\n\t\t\t// Report the failure with details.\n\t\t\tfor _, c := range clients {\n\t\t\t\tnm := c.Netmap()\n\n\t\t\t\tgot := 0\n\t\t\t\tif nm != nil {\n\t\t\t\t\tgot = len(nm.Peers)\n\t\t\t\t}\n\n\t\t\t\tif got != expected {\n\t\t\t\t\ttb.Errorf(\"EventuallyAssertMeshComplete: %s has %d peers, want %d (timeout %v)\",\n\t\t\t\t\t\tc.Name, got, expected, timeout)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn\n\t\tcase <-time.After(100 * time.Millisecond):\n\t\t\t// Poll again.\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "hscontrol/servertest/client.go",
    "content": "package servertest\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"tailscale.com/control/controlclient\"\n\t\"tailscale.com/health\"\n\t\"tailscale.com/net/netmon\"\n\t\"tailscale.com/net/tsdial\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/key\"\n\t\"tailscale.com/types/netmap\"\n\t\"tailscale.com/types/persist\"\n\t\"tailscale.com/util/eventbus\"\n)\n\n// TestClient wraps a Tailscale controlclient.Direct connected to a\n// TestServer. It tracks all received NetworkMap updates, providing\n// helpers to wait for convergence and inspect the client's view of\n// the network.\ntype TestClient struct {\n\t// Name is a human-readable identifier for this client.\n\tName string\n\n\tserver  *TestServer\n\tdirect  *controlclient.Direct\n\tauthKey string\n\tuser    *types.User\n\n\t// Connection lifecycle.\n\tpollCtx    context.Context //nolint:containedctx // test-only; context stored for cancel control\n\tpollCancel context.CancelFunc\n\tpollDone   chan struct{}\n\n\t// Accumulated state from MapResponse callbacks.\n\tmu      sync.RWMutex\n\tnetmap  *netmap.NetworkMap\n\thistory []*netmap.NetworkMap\n\n\t// updates is a buffered channel that receives a signal\n\t// each time a new NetworkMap arrives.\n\tupdates chan *netmap.NetworkMap\n\n\tbus     *eventbus.Bus\n\tdialer  *tsdial.Dialer\n\ttracker *health.Tracker\n}\n\n// ClientOption configures a TestClient.\ntype ClientOption func(*clientConfig)\n\ntype clientConfig struct {\n\tephemeral bool\n\thostname  string\n\ttags      []string\n\tuser      *types.User\n}\n\n// WithEphemeral makes the client register as an ephemeral node.\nfunc WithEphemeral() ClientOption {\n\treturn func(c *clientConfig) { c.ephemeral = true }\n}\n\n// WithHostname sets the client's hostname in Hostinfo.\nfunc WithHostname(name string) ClientOption {\n\treturn func(c *clientConfig) { c.hostname = name }\n}\n\n// WithTags sets ACL tags on the pre-auth key.\nfunc WithTags(tags ...string) ClientOption {\n\treturn func(c *clientConfig) { c.tags = tags }\n}\n\n// WithUser sets the user for the client. If not set, the harness\n// creates a default user.\nfunc WithUser(user *types.User) ClientOption {\n\treturn func(c *clientConfig) { c.user = user }\n}\n\n// NewClient creates a TestClient, registers it with the TestServer\n// using a pre-auth key, and starts long-polling for map updates.\nfunc NewClient(tb testing.TB, server *TestServer, name string, opts ...ClientOption) *TestClient {\n\ttb.Helper()\n\n\tcc := &clientConfig{\n\t\thostname: name,\n\t}\n\tfor _, o := range opts {\n\t\to(cc)\n\t}\n\n\t// Resolve user.\n\tuser := cc.user\n\tif user == nil {\n\t\t// Create a per-client user if none specified.\n\t\tuser = server.CreateUser(tb, \"user-\"+name)\n\t}\n\n\t// Create pre-auth key.\n\tuid := types.UserID(user.ID)\n\n\tvar authKey string\n\tif cc.ephemeral {\n\t\tauthKey = server.CreateEphemeralPreAuthKey(tb, uid)\n\t} else {\n\t\tauthKey = server.CreatePreAuthKey(tb, uid)\n\t}\n\n\t// Set up Tailscale client infrastructure.\n\tbus := eventbus.New()\n\ttracker := health.NewTracker(bus)\n\tdialer := tsdial.NewDialer(netmon.NewStatic())\n\tdialer.SetBus(bus)\n\n\tmachineKey := key.NewMachine()\n\n\tdirect, err := controlclient.NewDirect(controlclient.Options{\n\t\tPersist:              persist.Persist{},\n\t\tGetMachinePrivateKey: func() (key.MachinePrivate, error) { return machineKey, nil },\n\t\tServerURL:            server.URL,\n\t\tAuthKey:              authKey,\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tBackendLogID: \"servertest-\" + name,\n\t\t\tHostname:     cc.hostname,\n\t\t},\n\t\tDiscoPublicKey: key.NewDisco().Public(),\n\t\tLogf:           tb.Logf,\n\t\tHealthTracker:  tracker,\n\t\tDialer:         dialer,\n\t\tBus:            bus,\n\t})\n\tif err != nil {\n\t\ttb.Fatalf(\"servertest: NewDirect(%s): %v\", name, err)\n\t}\n\n\ttc := &TestClient{\n\t\tName:    name,\n\t\tserver:  server,\n\t\tdirect:  direct,\n\t\tauthKey: authKey,\n\t\tuser:    user,\n\t\tupdates: make(chan *netmap.NetworkMap, 64),\n\t\tbus:     bus,\n\t\tdialer:  dialer,\n\t\ttracker: tracker,\n\t}\n\n\ttb.Cleanup(func() {\n\t\ttc.cleanup()\n\t})\n\n\t// Register with the server.\n\ttc.register(tb)\n\n\t// Start long-polling in the background.\n\ttc.startPoll(tb)\n\n\treturn tc\n}\n\n// register performs the initial TryLogin to register the client.\nfunc (c *TestClient) register(tb testing.TB) {\n\ttb.Helper()\n\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\n\turl, err := c.direct.TryLogin(ctx, controlclient.LoginDefault)\n\tif err != nil {\n\t\ttb.Fatalf(\"servertest: TryLogin(%s): %v\", c.Name, err)\n\t}\n\n\tif url != \"\" {\n\t\ttb.Fatalf(\"servertest: TryLogin(%s): unexpected auth URL: %s (expected auto-auth with preauth key)\", c.Name, url)\n\t}\n}\n\n// startPoll begins the long-poll MapRequest loop.\nfunc (c *TestClient) startPoll(tb testing.TB) {\n\ttb.Helper()\n\n\tc.pollCtx, c.pollCancel = context.WithCancel(context.Background())\n\tc.pollDone = make(chan struct{})\n\n\tgo func() {\n\t\tdefer close(c.pollDone)\n\t\t// PollNetMap blocks until ctx is cancelled or the server closes\n\t\t// the connection.\n\t\t_ = c.direct.PollNetMap(c.pollCtx, c)\n\t}()\n}\n\n// UpdateFullNetmap implements controlclient.NetmapUpdater.\n// Called by controlclient.Direct when a new NetworkMap is received.\nfunc (c *TestClient) UpdateFullNetmap(nm *netmap.NetworkMap) {\n\tc.mu.Lock()\n\tc.netmap = nm\n\tc.history = append(c.history, nm)\n\tc.mu.Unlock()\n\n\t// Non-blocking send to the updates channel.\n\tselect {\n\tcase c.updates <- nm:\n\tdefault:\n\t}\n}\n\n// cleanup releases all resources.\nfunc (c *TestClient) cleanup() {\n\tif c.pollCancel != nil {\n\t\tc.pollCancel()\n\t}\n\n\tif c.pollDone != nil {\n\t\t// Wait for PollNetMap to exit, but don't hang.\n\t\tselect {\n\t\tcase <-c.pollDone:\n\t\tcase <-time.After(5 * time.Second):\n\t\t}\n\t}\n\n\tif c.direct != nil {\n\t\tc.direct.Close()\n\t}\n\n\tif c.dialer != nil {\n\t\tc.dialer.Close()\n\t}\n\n\tif c.bus != nil {\n\t\tc.bus.Close()\n\t}\n}\n\n// --- Lifecycle methods ---\n\n// Disconnect cancels the long-poll context, simulating a clean\n// client disconnect.\nfunc (c *TestClient) Disconnect(tb testing.TB) {\n\ttb.Helper()\n\n\tif c.pollCancel != nil {\n\t\tc.pollCancel()\n\t\t<-c.pollDone\n\t}\n}\n\n// Reconnect registers and starts a new long-poll session.\n// Call Disconnect first, or this will disconnect automatically.\nfunc (c *TestClient) Reconnect(tb testing.TB) {\n\ttb.Helper()\n\n\t// Cancel any existing poll.\n\tif c.pollCancel != nil {\n\t\tc.pollCancel()\n\n\t\tselect {\n\t\tcase <-c.pollDone:\n\t\tcase <-time.After(5 * time.Second):\n\t\t\ttb.Fatalf(\"servertest: Reconnect(%s): old poll did not exit\", c.Name)\n\t\t}\n\t}\n\n\t// Clear stale netmap data so that callers like WaitForPeers\n\t// actually wait for the new session's map instead of returning\n\t// immediately based on the old session's cached state.\n\tc.mu.Lock()\n\tc.netmap = nil\n\tc.mu.Unlock()\n\n\t// Drain any pending updates from the old session so they\n\t// don't satisfy a subsequent WaitForPeers/WaitForUpdate.\n\tfor {\n\t\tselect {\n\t\tcase <-c.updates:\n\t\tdefault:\n\t\t\tgoto drained\n\t\t}\n\t}\n\ndrained:\n\n\t// Re-register and start polling again.\n\tc.register(tb)\n\n\tc.startPoll(tb)\n}\n\n// ReconnectAfter disconnects, waits for d, then reconnects.\n// The timer works correctly with testing/synctest for\n// time-controlled tests.\nfunc (c *TestClient) ReconnectAfter(tb testing.TB, d time.Duration) {\n\ttb.Helper()\n\tc.Disconnect(tb)\n\n\ttimer := time.NewTimer(d)\n\tdefer timer.Stop()\n\n\t<-timer.C\n\tc.Reconnect(tb)\n}\n\n// --- State accessors ---\n\n// Netmap returns the latest NetworkMap, or nil if none received yet.\nfunc (c *TestClient) Netmap() *netmap.NetworkMap {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\n\treturn c.netmap\n}\n\n// WaitForPeers blocks until the client sees at least n peers,\n// or until timeout expires.\nfunc (c *TestClient) WaitForPeers(tb testing.TB, n int, timeout time.Duration) {\n\ttb.Helper()\n\n\tdeadline := time.After(timeout)\n\n\tfor {\n\t\tif nm := c.Netmap(); nm != nil && len(nm.Peers) >= n {\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase <-c.updates:\n\t\t\t// Check again.\n\t\tcase <-deadline:\n\t\t\tnm := c.Netmap()\n\n\t\t\tgot := 0\n\t\t\tif nm != nil {\n\t\t\t\tgot = len(nm.Peers)\n\t\t\t}\n\n\t\t\ttb.Fatalf(\"servertest: WaitForPeers(%s, %d): timeout after %v (got %d peers)\", c.Name, n, timeout, got)\n\t\t}\n\t}\n}\n\n// WaitForUpdate blocks until the next netmap update arrives or timeout.\nfunc (c *TestClient) WaitForUpdate(tb testing.TB, timeout time.Duration) *netmap.NetworkMap {\n\ttb.Helper()\n\n\tselect {\n\tcase nm := <-c.updates:\n\t\treturn nm\n\tcase <-time.After(timeout):\n\t\ttb.Fatalf(\"servertest: WaitForUpdate(%s): timeout after %v\", c.Name, timeout)\n\n\t\treturn nil\n\t}\n}\n\n// Peers returns the current peer list, or nil.\nfunc (c *TestClient) Peers() []tailcfg.NodeView {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\n\tif c.netmap == nil {\n\t\treturn nil\n\t}\n\n\treturn c.netmap.Peers\n}\n\n// PeerByName finds a peer by hostname. Returns the peer and true\n// if found, zero value and false otherwise.\nfunc (c *TestClient) PeerByName(hostname string) (tailcfg.NodeView, bool) {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\n\tif c.netmap == nil {\n\t\treturn tailcfg.NodeView{}, false\n\t}\n\n\tfor _, p := range c.netmap.Peers {\n\t\thi := p.Hostinfo()\n\t\tif hi.Valid() && hi.Hostname() == hostname {\n\t\t\treturn p, true\n\t\t}\n\t}\n\n\treturn tailcfg.NodeView{}, false\n}\n\n// PeerNames returns the hostnames of all current peers.\nfunc (c *TestClient) PeerNames() []string {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\n\tif c.netmap == nil {\n\t\treturn nil\n\t}\n\n\tnames := make([]string, 0, len(c.netmap.Peers))\n\tfor _, p := range c.netmap.Peers {\n\t\thi := p.Hostinfo()\n\t\tif hi.Valid() {\n\t\t\tnames = append(names, hi.Hostname())\n\t\t}\n\t}\n\n\treturn names\n}\n\n// UpdateCount returns the total number of full netmap updates received.\nfunc (c *TestClient) UpdateCount() int {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\n\treturn len(c.history)\n}\n\n// History returns a copy of all NetworkMap snapshots in order.\nfunc (c *TestClient) History() []*netmap.NetworkMap {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\n\tout := make([]*netmap.NetworkMap, len(c.history))\n\tcopy(out, c.history)\n\n\treturn out\n}\n\n// SelfName returns the self node's hostname from the latest netmap.\nfunc (c *TestClient) SelfName() string {\n\tnm := c.Netmap()\n\tif nm == nil || !nm.SelfNode.Valid() {\n\t\treturn \"\"\n\t}\n\n\treturn nm.SelfNode.Hostinfo().Hostname()\n}\n\n// WaitForPeerCount blocks until the client sees exactly n peers.\nfunc (c *TestClient) WaitForPeerCount(tb testing.TB, n int, timeout time.Duration) {\n\ttb.Helper()\n\n\tdeadline := time.After(timeout)\n\n\tfor {\n\t\tif nm := c.Netmap(); nm != nil && len(nm.Peers) == n {\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase <-c.updates:\n\t\t\t// Check again.\n\t\tcase <-deadline:\n\t\t\tnm := c.Netmap()\n\n\t\t\tgot := 0\n\t\t\tif nm != nil {\n\t\t\t\tgot = len(nm.Peers)\n\t\t\t}\n\n\t\t\ttb.Fatalf(\"servertest: WaitForPeerCount(%s, %d): timeout after %v (got %d peers)\", c.Name, n, timeout, got)\n\t\t}\n\t}\n}\n\n// WaitForCondition blocks until condFn returns true on the latest\n// netmap, or until timeout expires. This is useful for waiting for\n// specific state changes (e.g., peer going offline).\nfunc (c *TestClient) WaitForCondition(tb testing.TB, desc string, timeout time.Duration, condFn func(*netmap.NetworkMap) bool) {\n\ttb.Helper()\n\n\tdeadline := time.After(timeout)\n\n\tfor {\n\t\tif nm := c.Netmap(); nm != nil && condFn(nm) {\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase <-c.updates:\n\t\t\t// Check again.\n\t\tcase <-deadline:\n\t\t\ttb.Fatalf(\"servertest: WaitForCondition(%s, %q): timeout after %v\", c.Name, desc, timeout)\n\t\t}\n\t}\n}\n\n// Direct returns the underlying controlclient.Direct for\n// advanced operations like SetHostinfo or SendUpdate.\nfunc (c *TestClient) Direct() *controlclient.Direct {\n\treturn c.direct\n}\n\n// String implements fmt.Stringer for debug output.\nfunc (c *TestClient) String() string {\n\tnm := c.Netmap()\n\tif nm == nil {\n\t\treturn fmt.Sprintf(\"TestClient(%s, no netmap)\", c.Name)\n\t}\n\n\treturn fmt.Sprintf(\"TestClient(%s, %d peers)\", c.Name, len(nm.Peers))\n}\n"
  },
  {
    "path": "hscontrol/servertest/consistency_test.go",
    "content": "package servertest_test\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/servertest\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\n// TestConsistency verifies that all nodes converge to the same\n// view of the network and that no updates are lost during various\n// operations.\nfunc TestConsistency(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"all_nodes_converge\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 5)\n\t\tservertest.AssertMeshComplete(t, h.Clients())\n\t\tservertest.AssertConsistentState(t, h.Clients())\n\t\tservertest.AssertSymmetricVisibility(t, h.Clients())\n\t})\n\n\tt.Run(\"self_node_has_correct_hostname\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\th := servertest.NewHarness(t, 3)\n\t\tfor _, c := range h.Clients() {\n\t\t\tassert.Equal(t, c.Name, c.SelfName(),\n\t\t\t\t\"client %s self name should match\", c.Name)\n\t\t}\n\t})\n\n\tt.Run(\"update_count_positive\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 3)\n\t\t// After mesh formation, each client should have received\n\t\t// at least one update.\n\t\tfor _, c := range h.Clients() {\n\t\t\tassert.Positive(t, c.UpdateCount(),\n\t\t\t\t\"client %s should have received at least one update\", c.Name)\n\t\t}\n\t})\n\n\tt.Run(\"new_node_visible_to_all\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 3)\n\n\t\tnewClient := h.AddClient(t)\n\t\th.WaitForMeshComplete(t, 10*time.Second)\n\n\t\t// Verify every original client sees the new node.\n\t\tfor _, c := range h.Clients() {\n\t\t\tif c == newClient {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t_, found := c.PeerByName(newClient.Name)\n\t\t\tassert.True(t, found,\n\t\t\t\t\"client %s should see new client %s\", c.Name, newClient.Name)\n\t\t}\n\n\t\t// And the new node sees all others.\n\t\tfor _, c := range h.Clients() {\n\t\t\tif c == newClient {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t_, found := newClient.PeerByName(c.Name)\n\t\t\tassert.True(t, found,\n\t\t\t\t\"new client %s should see %s\", newClient.Name, c.Name)\n\t\t}\n\t})\n\n\tt.Run(\"interleaved_join_and_leave\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 5)\n\n\t\t// Disconnect 2 nodes.\n\t\th.Client(0).Disconnect(t)\n\t\th.Client(1).Disconnect(t)\n\n\t\t// Add 3 new nodes while 2 are disconnected.\n\t\tc5 := h.AddClient(t)\n\t\tc6 := h.AddClient(t)\n\t\tc7 := h.AddClient(t)\n\n\t\t// Wait for new nodes to see at least all other connected\n\t\t// clients (they may also see the disconnected nodes during\n\t\t// the grace period, so we check >= not ==).\n\t\tconnected := h.ConnectedClients()\n\t\tminPeers := len(connected) - 1\n\n\t\tfor _, c := range connected {\n\t\t\tc.WaitForPeers(t, minPeers, 30*time.Second)\n\t\t}\n\n\t\t// Verify the new nodes can see each other.\n\t\tfor _, a := range []*servertest.TestClient{c5, c6, c7} {\n\t\t\tfor _, b := range []*servertest.TestClient{c5, c6, c7} {\n\t\t\t\tif a == b {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t_, found := a.PeerByName(b.Name)\n\t\t\t\tassert.True(t, found,\n\t\t\t\t\t\"new client %s should see %s\", a.Name, b.Name)\n\t\t\t}\n\t\t}\n\n\t\t// Verify all connected clients see each other (consistent state).\n\t\tservertest.AssertConsistentState(t, connected)\n\t})\n}\n"
  },
  {
    "path": "hscontrol/servertest/content_test.go",
    "content": "package servertest_test\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/servertest\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/types/netmap\"\n)\n\n// TestContentVerification exercises the correctness of MapResponse\n// content: that the self node, peers, DERP map, and other fields\n// are populated correctly.\nfunc TestContentVerification(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"self_node\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tt.Run(\"has_addresses\", func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\th := servertest.NewHarness(t, 1)\n\t\t\tservertest.AssertSelfHasAddresses(t, h.Client(0))\n\t\t})\n\n\t\tt.Run(\"has_machine_authorized\", func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\th := servertest.NewHarness(t, 1)\n\t\t\tnm := h.Client(0).Netmap()\n\t\t\trequire.NotNil(t, nm)\n\t\t\trequire.True(t, nm.SelfNode.Valid())\n\t\t\tassert.True(t, nm.SelfNode.MachineAuthorized(),\n\t\t\t\t\"self node should be machine-authorized\")\n\t\t})\n\t})\n\n\tt.Run(\"derp_map\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tt.Run(\"present_in_netmap\", func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\th := servertest.NewHarness(t, 1)\n\t\t\tservertest.AssertDERPMapPresent(t, h.Client(0))\n\t\t})\n\n\t\tt.Run(\"has_test_region\", func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\th := servertest.NewHarness(t, 1)\n\t\t\tnm := h.Client(0).Netmap()\n\t\t\trequire.NotNil(t, nm)\n\t\t\trequire.NotNil(t, nm.DERPMap)\n\t\t\t_, ok := nm.DERPMap.Regions[900]\n\t\t\tassert.True(t, ok, \"DERPMap should contain test region 900\")\n\t\t})\n\t})\n\n\tt.Run(\"peers\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tt.Run(\"have_addresses\", func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\th := servertest.NewHarness(t, 3)\n\n\t\t\tfor _, c := range h.Clients() {\n\t\t\t\tnm := c.Netmap()\n\t\t\t\trequire.NotNil(t, nm, \"client %s has no netmap\", c.Name)\n\n\t\t\t\tfor _, peer := range nm.Peers {\n\t\t\t\t\tassert.Positive(t, peer.Addresses().Len(),\n\t\t\t\t\t\t\"client %s: peer %d should have addresses\",\n\t\t\t\t\t\tc.Name, peer.ID())\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"have_allowed_ips\", func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\th := servertest.NewHarness(t, 3)\n\n\t\t\tfor _, c := range h.Clients() {\n\t\t\t\tnm := c.Netmap()\n\t\t\t\trequire.NotNil(t, nm)\n\n\t\t\t\tfor _, peer := range nm.Peers {\n\t\t\t\t\t// AllowedIPs should at least contain the peer's addresses.\n\t\t\t\t\tassert.Positive(t, peer.AllowedIPs().Len(),\n\t\t\t\t\t\t\"client %s: peer %d should have AllowedIPs\",\n\t\t\t\t\t\tc.Name, peer.ID())\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"online_status\", func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\th := servertest.NewHarness(t, 3)\n\n\t\t\t// Wait for online status to propagate (it may take an\n\t\t\t// extra update cycle after initial mesh formation).\n\t\t\tfor _, c := range h.Clients() {\n\t\t\t\tc.WaitForCondition(t, \"all peers online\",\n\t\t\t\t\t15*time.Second,\n\t\t\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\t\t\tfor _, peer := range nm.Peers {\n\t\t\t\t\t\t\tisOnline, known := peer.Online().GetOk()\n\t\t\t\t\t\t\tif !known || !isOnline {\n\t\t\t\t\t\t\t\treturn false\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\treturn len(nm.Peers) >= 2\n\t\t\t\t\t})\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"hostnames_match\", func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\th := servertest.NewHarness(t, 3)\n\n\t\t\tfor _, c := range h.Clients() {\n\t\t\t\tfor _, other := range h.Clients() {\n\t\t\t\t\tif c == other {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tpeer, found := c.PeerByName(other.Name)\n\t\t\t\t\trequire.True(t, found,\n\t\t\t\t\t\t\"client %s should see peer %s\", c.Name, other.Name)\n\n\t\t\t\t\thi := peer.Hostinfo()\n\t\t\t\t\tassert.True(t, hi.Valid())\n\t\t\t\t\tassert.Equal(t, other.Name, hi.Hostname())\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t})\n\n\tt.Run(\"update_history\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tt.Run(\"monotonic_peer_count_growth\", func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\t// Connect nodes one at a time and verify the first\n\t\t\t// node's history shows monotonic peer count growth.\n\t\t\tsrv := servertest.NewServer(t)\n\t\t\tuser := srv.CreateUser(t, \"hist-user\")\n\n\t\t\tc0 := servertest.NewClient(t, srv, \"hist-0\", servertest.WithUser(user))\n\t\t\tc0.WaitForUpdate(t, 10*time.Second)\n\n\t\t\t// Add second node.\n\t\t\tservertest.NewClient(t, srv, \"hist-1\", servertest.WithUser(user))\n\t\t\tc0.WaitForPeers(t, 1, 10*time.Second)\n\n\t\t\t// Add third node.\n\t\t\tservertest.NewClient(t, srv, \"hist-2\", servertest.WithUser(user))\n\t\t\tc0.WaitForPeers(t, 2, 10*time.Second)\n\n\t\t\t// Verify update history is monotonically increasing in peer count.\n\t\t\thistory := c0.History()\n\t\t\trequire.Greater(t, len(history), 1,\n\t\t\t\t\"should have multiple netmap updates\")\n\n\t\t\tmaxPeers := 0\n\t\t\tfor _, nm := range history {\n\t\t\t\tif len(nm.Peers) > maxPeers {\n\t\t\t\t\tmaxPeers = len(nm.Peers)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tassert.Equal(t, 2, maxPeers,\n\t\t\t\t\"max peer count should be 2 (for 3 total nodes)\")\n\t\t})\n\n\t\tt.Run(\"self_node_consistent_across_updates\", func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\th := servertest.NewHarness(t, 2)\n\n\t\t\thistory := h.Client(0).History()\n\t\t\trequire.NotEmpty(t, history)\n\n\t\t\t// All updates should have the same self node key.\n\t\t\tfirstKey := history[0].NodeKey\n\t\t\tfor i, nm := range history {\n\t\t\t\tassert.Equal(t, firstKey, nm.NodeKey,\n\t\t\t\t\t\"update %d: NodeKey should be consistent\", i)\n\t\t\t}\n\t\t})\n\t})\n\n\tt.Run(\"domain\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 1)\n\t\tnm := h.Client(0).Netmap()\n\t\trequire.NotNil(t, nm)\n\t\t// The domain might be empty in test mode, but shouldn't panic.\n\t\t_ = nm.Domain\n\t})\n\n\tt.Run(\"user_profiles\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 2)\n\t\tnm := h.Client(0).Netmap()\n\t\trequire.NotNil(t, nm)\n\t\t// User profiles should be populated for at least the self node.\n\t\tif nm.SelfNode.Valid() {\n\t\t\tuserID := nm.SelfNode.User()\n\t\t\t_, hasProfile := nm.UserProfiles[userID]\n\t\t\tassert.True(t, hasProfile,\n\t\t\t\t\"UserProfiles should contain the self node's user\")\n\t\t}\n\t})\n\n\tt.Run(\"peers_have_key\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 2)\n\n\t\t// Each client's peer should have a non-zero node key.\n\t\tnm := h.Client(0).Netmap()\n\t\trequire.NotNil(t, nm)\n\t\trequire.Len(t, nm.Peers, 1)\n\t\tassert.False(t, nm.Peers[0].Key().IsZero(),\n\t\t\t\"peer should have a non-zero node key\")\n\t})\n\n\tt.Run(\"endpoint_update_propagates\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 2)\n\n\t\t// Record initial update count on client 1.\n\t\tinitialCount := h.Client(1).UpdateCount()\n\n\t\t// Client 0 sends a non-streaming endpoint update\n\t\t// (this triggers a state update on the server).\n\t\th.Client(0).WaitForCondition(t, \"has netmap\", 5*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\treturn nm.SelfNode.Valid()\n\t\t\t})\n\n\t\t// Wait for client 1 to receive an update after mesh formation.\n\t\t// The initial mesh formation already delivered updates, but\n\t\t// any future change should also propagate.\n\t\tassert.GreaterOrEqual(t, h.Client(1).UpdateCount(), initialCount,\n\t\t\t\"client 1 should have received updates\")\n\t})\n}\n"
  },
  {
    "path": "hscontrol/servertest/ephemeral_test.go",
    "content": "package servertest_test\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/servertest\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/types/netmap\"\n)\n\n// TestEphemeralNodes tests the lifecycle of ephemeral nodes,\n// which should be automatically cleaned up when they disconnect.\nfunc TestEphemeralNodes(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"ephemeral_connects_and_sees_peers\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t,\n\t\t\tservertest.WithEphemeralTimeout(5*time.Second))\n\t\tuser := srv.CreateUser(t, \"eph-user\")\n\n\t\tregular := servertest.NewClient(t, srv, \"eph-regular\",\n\t\t\tservertest.WithUser(user))\n\t\tephemeral := servertest.NewClient(t, srv, \"eph-ephemeral\",\n\t\t\tservertest.WithUser(user), servertest.WithEphemeral())\n\n\t\t// Both should see each other.\n\t\tregular.WaitForPeers(t, 1, 10*time.Second)\n\t\tephemeral.WaitForPeers(t, 1, 10*time.Second)\n\n\t\t_, found := regular.PeerByName(\"eph-ephemeral\")\n\t\tassert.True(t, found, \"regular should see ephemeral peer\")\n\n\t\t_, found = ephemeral.PeerByName(\"eph-regular\")\n\t\tassert.True(t, found, \"ephemeral should see regular peer\")\n\t})\n\n\tt.Run(\"ephemeral_cleanup_after_disconnect\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\t// Use a short ephemeral timeout so the test doesn't take long.\n\t\tsrv := servertest.NewServer(t,\n\t\t\tservertest.WithEphemeralTimeout(3*time.Second))\n\t\tuser := srv.CreateUser(t, \"eph-cleanup-user\")\n\n\t\tregular := servertest.NewClient(t, srv, \"eph-cleanup-regular\",\n\t\t\tservertest.WithUser(user))\n\t\tephemeral := servertest.NewClient(t, srv, \"eph-cleanup-ephemeral\",\n\t\t\tservertest.WithUser(user), servertest.WithEphemeral())\n\n\t\tregular.WaitForPeers(t, 1, 10*time.Second)\n\n\t\t// Verify ephemeral peer is present before disconnect.\n\t\t_, found := regular.PeerByName(\"eph-cleanup-ephemeral\")\n\t\trequire.True(t, found, \"ephemeral peer should be visible before disconnect\")\n\n\t\t// Ensure the ephemeral node's long-poll session is fully\n\t\t// established on the server before disconnecting. Without\n\t\t// this, the Disconnect may cancel a PollNetMap that hasn't\n\t\t// yet reached serveLongPoll, so no grace period or ephemeral\n\t\t// GC would ever be scheduled.\n\t\tephemeral.WaitForPeers(t, 1, 10*time.Second)\n\n\t\t// Disconnect the ephemeral node.\n\t\tephemeral.Disconnect(t)\n\n\t\t// After the grace period (10s) + ephemeral timeout (3s),\n\t\t// the ephemeral node should be deleted from the server and\n\t\t// disappear from the regular node's peer list entirely.\n\t\t// Unlike non-ephemeral nodes which go offline but stay in\n\t\t// the peer list, ephemeral nodes should be garbage collected.\n\t\tregular.WaitForCondition(t, \"ephemeral peer removed from peer list\",\n\t\t\t60*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\tfor _, p := range nm.Peers {\n\t\t\t\t\thi := p.Hostinfo()\n\t\t\t\t\tif hi.Valid() && hi.Hostname() == \"eph-cleanup-ephemeral\" {\n\t\t\t\t\t\treturn false // still present\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn true // gone\n\t\t\t})\n\t})\n\n\tt.Run(\"ephemeral_and_regular_mixed\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t,\n\t\t\tservertest.WithEphemeralTimeout(5*time.Second))\n\t\tuser := srv.CreateUser(t, \"mix-user\")\n\n\t\tr1 := servertest.NewClient(t, srv, \"mix-regular-1\",\n\t\t\tservertest.WithUser(user))\n\t\tr2 := servertest.NewClient(t, srv, \"mix-regular-2\",\n\t\t\tservertest.WithUser(user))\n\t\te1 := servertest.NewClient(t, srv, \"mix-eph-1\",\n\t\t\tservertest.WithUser(user), servertest.WithEphemeral())\n\n\t\t// All three should see each other.\n\t\tr1.WaitForPeers(t, 2, 15*time.Second)\n\t\tr2.WaitForPeers(t, 2, 15*time.Second)\n\t\te1.WaitForPeers(t, 2, 15*time.Second)\n\n\t\tservertest.AssertMeshComplete(t,\n\t\t\t[]*servertest.TestClient{r1, r2, e1})\n\t})\n\n\tt.Run(\"ephemeral_reconnect_prevents_cleanup\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t,\n\t\t\tservertest.WithEphemeralTimeout(5*time.Second))\n\t\tuser := srv.CreateUser(t, \"eph-recon-user\")\n\n\t\tregular := servertest.NewClient(t, srv, \"eph-recon-regular\",\n\t\t\tservertest.WithUser(user))\n\t\tephemeral := servertest.NewClient(t, srv, \"eph-recon-ephemeral\",\n\t\t\tservertest.WithUser(user), servertest.WithEphemeral())\n\n\t\tregular.WaitForPeers(t, 1, 10*time.Second)\n\n\t\t// Ensure the ephemeral node's long-poll is established.\n\t\tephemeral.WaitForPeers(t, 1, 10*time.Second)\n\n\t\t// Disconnect and quickly reconnect.\n\t\tephemeral.Disconnect(t)\n\t\tephemeral.Reconnect(t)\n\n\t\t// After reconnecting, the ephemeral node should still be visible.\n\t\tregular.WaitForPeers(t, 1, 15*time.Second)\n\n\t\t_, found := regular.PeerByName(\"eph-recon-ephemeral\")\n\t\tassert.True(t, found,\n\t\t\t\"ephemeral node should still be visible after quick reconnect\")\n\t})\n}\n"
  },
  {
    "path": "hscontrol/servertest/harness.go",
    "content": "package servertest\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n)\n\n// TestHarness orchestrates a TestServer with multiple TestClients,\n// providing a convenient setup for multi-node control plane tests.\ntype TestHarness struct {\n\tServer  *TestServer\n\tclients []*TestClient\n\n\t// Default user shared by all clients unless overridden.\n\tdefaultUser *types.User\n}\n\n// HarnessOption configures a TestHarness.\ntype HarnessOption func(*harnessConfig)\n\ntype harnessConfig struct {\n\tserverOpts     []ServerOption\n\tclientOpts     []ClientOption\n\tconvergenceMax time.Duration\n}\n\nfunc defaultHarnessConfig() *harnessConfig {\n\treturn &harnessConfig{\n\t\tconvergenceMax: 30 * time.Second,\n\t}\n}\n\n// WithServerOptions passes ServerOptions through to the underlying\n// TestServer.\nfunc WithServerOptions(opts ...ServerOption) HarnessOption {\n\treturn func(c *harnessConfig) { c.serverOpts = append(c.serverOpts, opts...) }\n}\n\n// WithDefaultClientOptions applies ClientOptions to every client\n// created by NewHarness.\nfunc WithDefaultClientOptions(opts ...ClientOption) HarnessOption {\n\treturn func(c *harnessConfig) { c.clientOpts = append(c.clientOpts, opts...) }\n}\n\n// WithConvergenceTimeout sets how long WaitForMeshComplete waits.\nfunc WithConvergenceTimeout(d time.Duration) HarnessOption {\n\treturn func(c *harnessConfig) { c.convergenceMax = d }\n}\n\n// NewHarness creates a TestServer and numClients connected clients.\n// All clients share a default user and are registered with reusable\n// pre-auth keys. The harness waits for all clients to form a\n// complete mesh before returning.\nfunc NewHarness(tb testing.TB, numClients int, opts ...HarnessOption) *TestHarness {\n\ttb.Helper()\n\n\thc := defaultHarnessConfig()\n\tfor _, o := range opts {\n\t\to(hc)\n\t}\n\n\tserver := NewServer(tb, hc.serverOpts...)\n\n\t// Create a shared default user.\n\tuser := server.CreateUser(tb, \"harness-default\")\n\n\th := &TestHarness{\n\t\tServer:      server,\n\t\tdefaultUser: user,\n\t}\n\n\t// Create and connect clients.\n\tfor i := range numClients {\n\t\tname := clientName(i)\n\n\t\tcopts := append([]ClientOption{WithUser(user)}, hc.clientOpts...)\n\t\tc := NewClient(tb, server, name, copts...)\n\t\th.clients = append(h.clients, c)\n\t}\n\n\t// Wait for the mesh to converge.\n\tif numClients > 1 {\n\t\th.WaitForMeshComplete(tb, hc.convergenceMax)\n\t} else if numClients == 1 {\n\t\t// Single node: just wait for the first netmap.\n\t\th.clients[0].WaitForUpdate(tb, hc.convergenceMax)\n\t}\n\n\treturn h\n}\n\n// Client returns the i-th client (0-indexed).\nfunc (h *TestHarness) Client(i int) *TestClient {\n\treturn h.clients[i]\n}\n\n// Clients returns all clients.\nfunc (h *TestHarness) Clients() []*TestClient {\n\treturn h.clients\n}\n\n// ConnectedClients returns clients that currently have an active\n// long-poll session (pollDone channel is still open).\nfunc (h *TestHarness) ConnectedClients() []*TestClient {\n\tvar out []*TestClient\n\n\tfor _, c := range h.clients {\n\t\tselect {\n\t\tcase <-c.pollDone:\n\t\t\t// Poll has ended, client is disconnected.\n\t\tdefault:\n\t\t\tout = append(out, c)\n\t\t}\n\t}\n\n\treturn out\n}\n\n// AddClient creates and connects a new client to the existing mesh.\nfunc (h *TestHarness) AddClient(tb testing.TB, opts ...ClientOption) *TestClient {\n\ttb.Helper()\n\n\tname := clientName(len(h.clients))\n\tcopts := append([]ClientOption{WithUser(h.defaultUser)}, opts...)\n\tc := NewClient(tb, h.Server, name, copts...)\n\th.clients = append(h.clients, c)\n\n\treturn c\n}\n\n// WaitForMeshComplete blocks until every connected client sees\n// (connectedCount - 1) peers.\nfunc (h *TestHarness) WaitForMeshComplete(tb testing.TB, timeout time.Duration) {\n\ttb.Helper()\n\n\tconnected := h.ConnectedClients()\n\n\texpectedPeers := max(len(connected)-1, 0)\n\n\tfor _, c := range connected {\n\t\tc.WaitForPeers(tb, expectedPeers, timeout)\n\t}\n}\n\n// WaitForConvergence waits until all connected clients have a\n// non-nil NetworkMap and their peer counts have stabilised.\nfunc (h *TestHarness) WaitForConvergence(tb testing.TB, timeout time.Duration) {\n\ttb.Helper()\n\th.WaitForMeshComplete(tb, timeout)\n}\n\n// ChangePolicy sets an ACL policy on the server and propagates changes\n// to all connected nodes. The policy should be a valid HuJSON policy document.\nfunc (h *TestHarness) ChangePolicy(tb testing.TB, policy []byte) {\n\ttb.Helper()\n\n\tchanged, err := h.Server.State().SetPolicy(policy)\n\tif err != nil {\n\t\ttb.Fatalf(\"servertest: ChangePolicy: %v\", err)\n\t}\n\n\tif changed {\n\t\tchanges, err := h.Server.State().ReloadPolicy()\n\t\tif err != nil {\n\t\t\ttb.Fatalf(\"servertest: ReloadPolicy: %v\", err)\n\t\t}\n\n\t\th.Server.App.Change(changes...)\n\t}\n}\n\n// DefaultUser returns the shared user for adding more clients.\nfunc (h *TestHarness) DefaultUser() *types.User {\n\treturn h.defaultUser\n}\n\nfunc clientName(index int) string {\n\treturn fmt.Sprintf(\"node-%d\", index)\n}\n"
  },
  {
    "path": "hscontrol/servertest/issues_test.go",
    "content": "package servertest_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/netip\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/servertest\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/netmap\"\n)\n\n// These tests are intentionally strict about expected behavior.\n// Failures surface real issues in the control plane.\n\n// TestIssuesMapContent tests issues with MapResponse content correctness.\nfunc TestIssuesMapContent(t *testing.T) {\n\tt.Parallel()\n\n\t// After mesh formation, all peers should have a known Online status.\n\t// The Online field is set when Connect() sends a NodeOnline PeerChange\n\t// patch. The initial MapResponse (from auth handler) may have Online=nil\n\t// because Connect() hasn't run yet, so we wait for the status to propagate.\n\tt.Run(\"initial_map_should_include_peer_online_status\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 3)\n\n\t\tfor _, c := range h.Clients() {\n\t\t\tc.WaitForCondition(t, \"all peers have known Online status\",\n\t\t\t\t10*time.Second,\n\t\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\t\tif len(nm.Peers) < 2 {\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\n\t\t\t\t\tfor _, peer := range nm.Peers {\n\t\t\t\t\t\tif _, known := peer.Online().GetOk(); !known {\n\t\t\t\t\t\t\treturn false\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\treturn true\n\t\t\t\t})\n\t\t}\n\t})\n\n\t// DiscoPublicKey set by the client should be visible to peers.\n\tt.Run(\"disco_key_should_propagate_to_peers\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 2)\n\n\t\t// The DiscoKey is sent in the first MapRequest (not the RegisterRequest),\n\t\t// so it may take an extra map update to propagate to peers. Wait for\n\t\t// the condition rather than checking the initial netmap.\n\t\th.Client(0).WaitForCondition(t, \"peer has non-zero DiscoKey\",\n\t\t\t10*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\tif len(nm.Peers) < 1 {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\n\t\t\t\treturn !nm.Peers[0].DiscoKey().IsZero()\n\t\t\t})\n\t})\n\n\t// All peers should reference a valid DERP region.\n\tt.Run(\"peers_have_valid_derp_region\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 3)\n\n\t\tfor _, c := range h.Clients() {\n\t\t\tnm := c.Netmap()\n\t\t\trequire.NotNil(t, nm)\n\t\t\trequire.NotNil(t, nm.DERPMap)\n\n\t\t\tfor _, peer := range nm.Peers {\n\t\t\t\tderpRegion := peer.HomeDERP()\n\n\t\t\t\tif derpRegion != 0 {\n\t\t\t\t\t_, regionExists := nm.DERPMap.Regions[derpRegion]\n\t\t\t\t\tassert.True(t, regionExists,\n\t\t\t\t\t\t\"client %s: peer %d has HomeDERP=%d which is not in DERPMap\",\n\t\t\t\t\t\tc.Name, peer.ID(), derpRegion)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\n\t// Each peer should have a valid user profile in the netmap.\n\tt.Run(\"all_peers_have_user_profiles\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser1 := srv.CreateUser(t, \"profile-user1\")\n\t\tuser2 := srv.CreateUser(t, \"profile-user2\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"profile-node1\",\n\t\t\tservertest.WithUser(user1))\n\t\tc2 := servertest.NewClient(t, srv, \"profile-node2\",\n\t\t\tservertest.WithUser(user2))\n\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\t\tc2.WaitForPeers(t, 1, 10*time.Second)\n\n\t\tnm := c1.Netmap()\n\t\trequire.NotNil(t, nm)\n\n\t\tselfUserID := nm.SelfNode.User()\n\t\tselfProfile, hasSelf := nm.UserProfiles[selfUserID]\n\t\tassert.True(t, hasSelf, \"should have self user profile\")\n\n\t\tif hasSelf {\n\t\t\tassert.NotEmpty(t, selfProfile.DisplayName(),\n\t\t\t\t\"self user profile should have a display name\")\n\t\t}\n\n\t\trequire.Len(t, nm.Peers, 1)\n\t\tpeerUserID := nm.Peers[0].User()\n\n\t\tpeerProfile, hasPeer := nm.UserProfiles[peerUserID]\n\t\tassert.True(t, hasPeer,\n\t\t\t\"should have peer's user profile (user %d)\", peerUserID)\n\n\t\tif hasPeer {\n\t\t\tassert.NotEmpty(t, peerProfile.DisplayName(),\n\t\t\t\t\"peer user profile should have a display name\")\n\t\t}\n\t})\n}\n\n// TestIssuesRoutes tests issues with route propagation.\nfunc TestIssuesRoutes(t *testing.T) {\n\tt.Parallel()\n\n\t// Approving a route via API without the node announcing it must NOT\n\t// make the route visible in AllowedIPs. Tailscale uses a strict\n\t// advertise-then-approve model: routes are only distributed when the\n\t// node advertises them (Hostinfo.RoutableIPs) AND they are approved.\n\t// An approval without advertisement is a dormant pre-approval that\n\t// activates once the node starts advertising.\n\tt.Run(\"approved_route_without_announcement_not_distributed\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"noannounce-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"noannounce-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tc2 := servertest.NewClient(t, srv, \"noannounce-node2\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\n\t\tnodeID := findNodeID(t, srv, \"noannounce-node1\")\n\t\troute := netip.MustParsePrefix(\"10.0.0.0/24\")\n\n\t\t// The API should accept the approval without error — the route\n\t\t// is stored but dormant because the node is not advertising it.\n\t\t_, routeChange, err := srv.State().SetApprovedRoutes(\n\t\t\tnodeID, []netip.Prefix{route})\n\t\trequire.NoError(t, err)\n\t\tsrv.App.Change(routeChange)\n\n\t\t// Wait for any updates triggered by the route change to propagate,\n\t\t// then verify the route does NOT appear in AllowedIPs.\n\t\ttimer := time.NewTimer(3 * time.Second)\n\t\tdefer timer.Stop()\n\n\t\t<-timer.C\n\n\t\tnm := c2.Netmap()\n\t\trequire.NotNil(t, nm)\n\n\t\tfor _, p := range nm.Peers {\n\t\t\thi := p.Hostinfo()\n\t\t\tif hi.Valid() && hi.Hostname() == \"noannounce-node1\" {\n\t\t\t\tfor i := range p.AllowedIPs().Len() {\n\t\t\t\t\tassert.NotEqual(t, route, p.AllowedIPs().At(i),\n\t\t\t\t\t\t\"approved-but-not-announced route should not appear in AllowedIPs\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\n\t// When the server approves routes for a node, that node\n\t// should receive a self-update reflecting the change.\n\tt.Run(\"self_update_after_route_approval\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"selfup-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"selfup-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tservertest.NewClient(t, srv, \"selfup-node2\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\n\t\tnodeID := findNodeID(t, srv, \"selfup-node1\")\n\t\troute := netip.MustParsePrefix(\"10.77.0.0/24\")\n\n\t\tcountBefore := c1.UpdateCount()\n\n\t\t_, routeChange, err := srv.State().SetApprovedRoutes(\n\t\t\tnodeID, []netip.Prefix{route})\n\t\trequire.NoError(t, err)\n\t\tsrv.App.Change(routeChange)\n\n\t\tc1.WaitForCondition(t, \"self-update after route approval\",\n\t\t\t10*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\treturn c1.UpdateCount() > countBefore\n\t\t\t})\n\t})\n\n\t// Hostinfo route advertisement should be stored on server.\n\tt.Run(\"hostinfo_route_advertisement_stored_on_server\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"histore-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"histore-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tc2 := servertest.NewClient(t, srv, \"histore-node2\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\n\t\troute := netip.MustParsePrefix(\"10.99.0.0/24\")\n\n\t\tc1.Direct().SetHostinfo(&tailcfg.Hostinfo{\n\t\t\tBackendLogID: \"servertest-histore-node1\",\n\t\t\tHostname:     \"histore-node1\",\n\t\t\tRoutableIPs:  []netip.Prefix{route},\n\t\t})\n\n\t\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\t\tdefer cancel()\n\n\t\t_ = c1.Direct().SendUpdate(ctx)\n\n\t\tc2.WaitForCondition(t, \"route in peer hostinfo\", 10*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\tfor _, p := range nm.Peers {\n\t\t\t\t\thi := p.Hostinfo()\n\t\t\t\t\tif hi.Valid() && hi.Hostname() == \"histore-node1\" {\n\t\t\t\t\t\treturn hi.RoutableIPs().Len() > 0\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn false\n\t\t\t})\n\n\t\tnodeID := findNodeID(t, srv, \"histore-node1\")\n\t\tnv, ok := srv.State().GetNodeByID(nodeID)\n\t\trequire.True(t, ok, \"node should exist in server state\")\n\n\t\tannounced := nv.AnnouncedRoutes()\n\t\tassert.Contains(t, announced, route,\n\t\t\t\"server should store the advertised route as announced\")\n\t})\n}\n\n// TestIssuesIPAllocation tests IP address allocation correctness.\nfunc TestIssuesIPAllocation(t *testing.T) {\n\tt.Parallel()\n\n\t// Every node should get unique IPs.\n\tt.Run(\"ip_addresses_are_unique_across_nodes\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"ipuniq-user\")\n\n\t\tconst n = 10\n\n\t\tclients := make([]*servertest.TestClient, n)\n\t\tfor i := range n {\n\t\t\tclients[i] = servertest.NewClient(t, srv,\n\t\t\t\tfmt.Sprintf(\"ipuniq-%d\", i),\n\t\t\t\tservertest.WithUser(user))\n\t\t}\n\n\t\tfor _, c := range clients {\n\t\t\tc.WaitForUpdate(t, 15*time.Second)\n\t\t}\n\n\t\tseen := make(map[netip.Prefix]string)\n\n\t\tfor _, c := range clients {\n\t\t\tnm := c.Netmap()\n\t\t\trequire.NotNil(t, nm)\n\t\t\trequire.True(t, nm.SelfNode.Valid())\n\n\t\t\tfor i := range nm.SelfNode.Addresses().Len() {\n\t\t\t\taddr := nm.SelfNode.Addresses().At(i)\n\t\t\t\tif other, exists := seen[addr]; exists {\n\t\t\t\t\tt.Errorf(\"IP collision: %v assigned to both %s and %s\",\n\t\t\t\t\t\taddr, other, c.Name)\n\t\t\t\t}\n\n\t\t\t\tseen[addr] = c.Name\n\t\t\t}\n\t\t}\n\t})\n\n\t// After reconnect, IP addresses should be stable.\n\tt.Run(\"reconnect_preserves_ip_addresses\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 2)\n\n\t\tnm := h.Client(0).Netmap()\n\t\trequire.NotNil(t, nm)\n\t\trequire.True(t, nm.SelfNode.Valid())\n\n\t\taddrsBefore := make([]netip.Prefix, 0, nm.SelfNode.Addresses().Len())\n\t\tfor i := range nm.SelfNode.Addresses().Len() {\n\t\t\taddrsBefore = append(addrsBefore, nm.SelfNode.Addresses().At(i))\n\t\t}\n\n\t\trequire.NotEmpty(t, addrsBefore)\n\n\t\th.Client(0).Disconnect(t)\n\t\th.Client(0).Reconnect(t)\n\t\th.Client(0).WaitForPeers(t, 1, 15*time.Second)\n\n\t\tnmAfter := h.Client(0).Netmap()\n\t\trequire.NotNil(t, nmAfter)\n\t\trequire.True(t, nmAfter.SelfNode.Valid())\n\n\t\taddrsAfter := make([]netip.Prefix, 0, nmAfter.SelfNode.Addresses().Len())\n\t\tfor i := range nmAfter.SelfNode.Addresses().Len() {\n\t\t\taddrsAfter = append(addrsAfter, nmAfter.SelfNode.Addresses().At(i))\n\t\t}\n\n\t\tassert.Equal(t, addrsBefore, addrsAfter,\n\t\t\t\"IP addresses should be stable across reconnect\")\n\t})\n\n\t// New peers should have addresses immediately.\n\tt.Run(\"new_peer_has_addresses_immediately\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"newaddr-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"newaddr-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tc1.WaitForUpdate(t, 10*time.Second)\n\n\t\tservertest.NewClient(t, srv, \"newaddr-node2\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\n\t\tnm := c1.Netmap()\n\t\trequire.NotNil(t, nm)\n\t\trequire.Len(t, nm.Peers, 1)\n\n\t\tassert.Positive(t, nm.Peers[0].Addresses().Len(),\n\t\t\t\"new peer should have addresses in the first update that includes it\")\n\t})\n}\n\n// TestIssuesServerMutations tests that server-side mutations propagate correctly.\nfunc TestIssuesServerMutations(t *testing.T) {\n\tt.Parallel()\n\n\t// Renaming a node via API should propagate to peers.\n\tt.Run(\"node_rename_propagates_to_peers\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"rename-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"rename-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tc2 := servertest.NewClient(t, srv, \"rename-node2\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\n\t\tnodeID := findNodeID(t, srv, \"rename-node1\")\n\n\t\t_, renameChange, err := srv.State().RenameNode(nodeID, \"renamed-node1\")\n\t\trequire.NoError(t, err)\n\t\tsrv.App.Change(renameChange)\n\n\t\tc2.WaitForCondition(t, \"renamed peer visible\", 10*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\tfor _, p := range nm.Peers {\n\t\t\t\t\tif p.Name() == \"renamed-node1\" {\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn false\n\t\t\t})\n\t})\n\n\t// Deleting a node via API should remove it from all peers.\n\tt.Run(\"node_delete_removes_from_all_peers\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"del-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"del-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tservertest.NewClient(t, srv, \"del-node2\",\n\t\t\tservertest.WithUser(user))\n\t\tc3 := servertest.NewClient(t, srv, \"del-node3\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 2, 15*time.Second)\n\n\t\tnodeID2 := findNodeID(t, srv, \"del-node2\")\n\t\tnode2View, ok := srv.State().GetNodeByID(nodeID2)\n\t\trequire.True(t, ok)\n\n\t\tdeleteChange, err := srv.State().DeleteNode(node2View)\n\t\trequire.NoError(t, err)\n\t\tsrv.App.Change(deleteChange)\n\n\t\tc1.WaitForCondition(t, \"deleted peer gone\", 10*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\tfor _, p := range nm.Peers {\n\t\t\t\t\thi := p.Hostinfo()\n\t\t\t\t\tif hi.Valid() && hi.Hostname() == \"del-node2\" {\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn true\n\t\t\t})\n\n\t\tc3.WaitForCondition(t, \"deleted peer gone from c3\", 10*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\tfor _, p := range nm.Peers {\n\t\t\t\t\thi := p.Hostinfo()\n\t\t\t\t\tif hi.Valid() && hi.Hostname() == \"del-node2\" {\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn true\n\t\t\t})\n\n\t\tassert.Len(t, c1.Peers(), 1)\n\t\tassert.Len(t, c3.Peers(), 1)\n\t})\n\n\t// Hostinfo changes should propagate to peers.\n\tt.Run(\"hostinfo_changes_propagate_to_peers\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"hichange-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"hichange-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tc2 := servertest.NewClient(t, srv, \"hichange-node2\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\n\t\tc1.Direct().SetHostinfo(&tailcfg.Hostinfo{\n\t\t\tBackendLogID: \"servertest-hichange-node1\",\n\t\t\tHostname:     \"hichange-node1\",\n\t\t\tOS:           \"TestOS\",\n\t\t})\n\n\t\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\t\tdefer cancel()\n\n\t\t_ = c1.Direct().SendUpdate(ctx)\n\n\t\tc2.WaitForCondition(t, \"OS change visible\", 10*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\tfor _, p := range nm.Peers {\n\t\t\t\t\thi := p.Hostinfo()\n\t\t\t\t\tif hi.Valid() && hi.Hostname() == \"hichange-node1\" {\n\t\t\t\t\t\treturn hi.OS() == \"TestOS\"\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn false\n\t\t\t})\n\t})\n}\n\n// TestIssuesNodeStoreConsistency tests NodeStore + DB consistency.\nfunc TestIssuesNodeStoreConsistency(t *testing.T) {\n\tt.Parallel()\n\n\t// NodeStore and DB should agree after mutations.\n\tt.Run(\"nodestore_db_consistency_after_operations\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"consist-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"consist-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tservertest.NewClient(t, srv, \"consist-node2\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\n\t\tnodeID1 := findNodeID(t, srv, \"consist-node1\")\n\n\t\troute := netip.MustParsePrefix(\"10.50.0.0/24\")\n\t\t_, routeChange, err := srv.State().SetApprovedRoutes(\n\t\t\tnodeID1, []netip.Prefix{route})\n\t\trequire.NoError(t, err)\n\t\tsrv.App.Change(routeChange)\n\n\t\tnsView, ok := srv.State().GetNodeByID(nodeID1)\n\t\trequire.True(t, ok, \"node should be in NodeStore\")\n\n\t\tdbNode, err := srv.State().DB().GetNodeByID(nodeID1)\n\t\trequire.NoError(t, err, \"node should be in database\")\n\n\t\tnsRoutes := nsView.ApprovedRoutes().AsSlice()\n\t\tdbRoutes := dbNode.ApprovedRoutes\n\n\t\tassert.Equal(t, nsRoutes, dbRoutes,\n\t\t\t\"NodeStore and DB should agree on approved routes\")\n\t})\n\n\t// After rapid reconnect, NodeStore should reflect correct state.\n\tt.Run(\"nodestore_correct_after_rapid_reconnect\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"nsrecon-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"nsrecon-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tservertest.NewClient(t, srv, \"nsrecon-node2\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\n\t\tnodeID1 := findNodeID(t, srv, \"nsrecon-node1\")\n\n\t\tfor range 5 {\n\t\t\tc1.Disconnect(t)\n\t\t\tc1.Reconnect(t)\n\t\t}\n\n\t\tc1.WaitForPeers(t, 1, 15*time.Second)\n\n\t\tnv, ok := srv.State().GetNodeByID(nodeID1)\n\t\trequire.True(t, ok)\n\n\t\tisOnline, known := nv.IsOnline().GetOk()\n\t\tassert.True(t, known, \"NodeStore should know online status after reconnect\")\n\t\tassert.True(t, isOnline, \"NodeStore should show node as online after reconnect\")\n\t})\n}\n\n// TestIssuesGracePeriod tests the disconnect grace period behavior.\nfunc TestIssuesGracePeriod(t *testing.T) {\n\tt.Parallel()\n\n\t// Offline status should arrive promptly after grace period.\n\tt.Run(\"offline_status_arrives_within_grace_period_plus_margin\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 2)\n\n\t\tpeerName := h.Client(1).Name\n\n\t\th.Client(0).WaitForCondition(t, \"peer online\", 15*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\tfor _, p := range nm.Peers {\n\t\t\t\t\thi := p.Hostinfo()\n\t\t\t\t\tif hi.Valid() && hi.Hostname() == peerName {\n\t\t\t\t\t\tisOnline, known := p.Online().GetOk()\n\n\t\t\t\t\t\treturn known && isOnline\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn false\n\t\t\t})\n\n\t\tdisconnectTime := time.Now()\n\n\t\th.Client(1).Disconnect(t)\n\n\t\th.Client(0).WaitForCondition(t, \"peer offline\", 20*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\tfor _, p := range nm.Peers {\n\t\t\t\t\thi := p.Hostinfo()\n\t\t\t\t\tif hi.Valid() && hi.Hostname() == peerName {\n\t\t\t\t\t\tisOnline, known := p.Online().GetOk()\n\n\t\t\t\t\t\treturn known && !isOnline\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn false\n\t\t\t})\n\n\t\telapsed := time.Since(disconnectTime)\n\t\tt.Logf(\"offline status arrived after %v\", elapsed)\n\n\t\tassert.Greater(t, elapsed, 8*time.Second,\n\t\t\t\"offline status arrived too quickly -- grace period may not be working\")\n\t\tassert.Less(t, elapsed, 20*time.Second,\n\t\t\t\"offline status took too long -- propagation delay issue\")\n\t})\n\n\t// Ephemeral nodes should be fully deleted.\n\tt.Run(\"ephemeral_node_deleted_not_just_offline\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t,\n\t\t\tservertest.WithEphemeralTimeout(3*time.Second))\n\t\tuser := srv.CreateUser(t, \"eph-del-user\")\n\n\t\tregular := servertest.NewClient(t, srv, \"eph-del-regular\",\n\t\t\tservertest.WithUser(user))\n\t\tephemeral := servertest.NewClient(t, srv, \"eph-del-ephemeral\",\n\t\t\tservertest.WithUser(user), servertest.WithEphemeral())\n\n\t\tregular.WaitForPeers(t, 1, 10*time.Second)\n\n\t\t_, found := regular.PeerByName(\"eph-del-ephemeral\")\n\t\trequire.True(t, found)\n\n\t\t// Ensure the ephemeral node's long-poll session is fully\n\t\t// established on the server before disconnecting. Without\n\t\t// this, the Disconnect may cancel a PollNetMap that hasn't\n\t\t// yet reached serveLongPoll, so no grace period or ephemeral\n\t\t// GC would ever be scheduled.\n\t\tephemeral.WaitForPeers(t, 1, 10*time.Second)\n\n\t\tephemeral.Disconnect(t)\n\n\t\t// Grace period (10s) + ephemeral GC timeout (3s) + propagation.\n\t\t// Use a generous timeout for CI environments under load.\n\t\tregular.WaitForCondition(t, \"ephemeral peer removed\", 60*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\tfor _, p := range nm.Peers {\n\t\t\t\t\thi := p.Hostinfo()\n\t\t\t\t\tif hi.Valid() && hi.Hostname() == \"eph-del-ephemeral\" {\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn true\n\t\t\t})\n\n\t\tnodes := srv.State().ListNodes()\n\t\tfor i := range nodes.Len() {\n\t\t\tn := nodes.At(i)\n\t\t\tassert.NotEqual(t, \"eph-del-ephemeral\", n.Hostname(),\n\t\t\t\t\"ephemeral node should be deleted from server state\")\n\t\t}\n\t})\n}\n\n// TestIssuesScale tests behavior under scale and rapid changes.\nfunc TestIssuesScale(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"simultaneous_connect_all_see_all\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"simul-user\")\n\n\t\tconst n = 10\n\n\t\tclients := make([]*servertest.TestClient, n)\n\t\tfor i := range n {\n\t\t\tclients[i] = servertest.NewClient(t, srv,\n\t\t\t\tfmt.Sprintf(\"simul-node-%d\", i),\n\t\t\t\tservertest.WithUser(user))\n\t\t}\n\n\t\tfor _, c := range clients {\n\t\t\tc.WaitForPeers(t, n-1, 30*time.Second)\n\t\t}\n\n\t\tservertest.AssertMeshComplete(t, clients)\n\t\tservertest.AssertSymmetricVisibility(t, clients)\n\t})\n\n\t// Many rapid additions should all be delivered.\n\tt.Run(\"rapid_sequential_additions\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"rapid-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"rapid-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tc1.WaitForUpdate(t, 10*time.Second)\n\n\t\tfor i := range 5 {\n\t\t\tservertest.NewClient(t, srv,\n\t\t\t\tfmt.Sprintf(\"rapid-node-%d\", i+2),\n\t\t\t\tservertest.WithUser(user))\n\t\t}\n\n\t\tc1.WaitForPeers(t, 5, 30*time.Second)\n\t\tassert.Len(t, c1.Peers(), 5)\n\t})\n\n\t// Reconnect should give a complete map.\n\tt.Run(\"reconnect_gets_complete_map\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 3)\n\n\t\th.Client(0).Disconnect(t)\n\t\th.Client(0).Reconnect(t)\n\t\th.Client(0).WaitForPeers(t, 2, 15*time.Second)\n\n\t\tnm := h.Client(0).Netmap()\n\t\trequire.NotNil(t, nm)\n\t\tassert.Len(t, nm.Peers, 2)\n\t\tassert.True(t, nm.SelfNode.Valid())\n\t\tassert.Positive(t, nm.SelfNode.Addresses().Len())\n\t})\n}\n\n// TestIssuesIdentity tests node identity and naming behavior.\nfunc TestIssuesIdentity(t *testing.T) {\n\tt.Parallel()\n\n\t// Cross-user visibility with default policy.\n\tt.Run(\"cross_user_visibility_default_policy\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser1 := srv.CreateUser(t, \"xuser1\")\n\t\tuser2 := srv.CreateUser(t, \"xuser2\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"xuser-node1\",\n\t\t\tservertest.WithUser(user1))\n\t\tc2 := servertest.NewClient(t, srv, \"xuser-node2\",\n\t\t\tservertest.WithUser(user2))\n\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\t\tc2.WaitForPeers(t, 1, 10*time.Second)\n\n\t\t_, found := c1.PeerByName(\"xuser-node2\")\n\t\tassert.True(t, found, \"user1's node should see user2's node\")\n\n\t\t_, found = c2.PeerByName(\"xuser-node1\")\n\t\tassert.True(t, found, \"user2's node should see user1's node\")\n\t})\n\n\t// Multiple nodes same user should be distinct.\n\tt.Run(\"multiple_nodes_same_user_distinct\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"sameuser\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"sameuser-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tc2 := servertest.NewClient(t, srv, \"sameuser-node2\",\n\t\t\tservertest.WithUser(user))\n\t\tc3 := servertest.NewClient(t, srv, \"sameuser-node3\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 2, 15*time.Second)\n\t\tc2.WaitForPeers(t, 2, 15*time.Second)\n\t\tc3.WaitForPeers(t, 2, 15*time.Second)\n\n\t\tnm1 := c1.Netmap()\n\t\tnm2 := c2.Netmap()\n\t\tnm3 := c3.Netmap()\n\n\t\trequire.NotNil(t, nm1)\n\t\trequire.NotNil(t, nm2)\n\t\trequire.NotNil(t, nm3)\n\n\t\tids := map[tailcfg.NodeID]string{\n\t\t\tnm1.SelfNode.ID(): c1.Name,\n\t\t\tnm2.SelfNode.ID(): c2.Name,\n\t\t\tnm3.SelfNode.ID(): c3.Name,\n\t\t}\n\t\tassert.Len(t, ids, 3,\n\t\t\t\"three nodes with same user should have distinct node IDs\")\n\t})\n\n\t// Same hostname should get unique GivenNames.\n\tt.Run(\"same_hostname_gets_unique_given_names\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"samename-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"samename\",\n\t\t\tservertest.WithUser(user))\n\t\tc2 := servertest.NewClient(t, srv, \"samename\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\t\tc2.WaitForPeers(t, 1, 10*time.Second)\n\n\t\tnm1 := c1.Netmap()\n\t\tnm2 := c2.Netmap()\n\n\t\trequire.NotNil(t, nm1)\n\t\trequire.NotNil(t, nm2)\n\t\trequire.True(t, nm1.SelfNode.Valid())\n\t\trequire.True(t, nm2.SelfNode.Valid())\n\n\t\tname1 := nm1.SelfNode.Name()\n\t\tname2 := nm2.SelfNode.Name()\n\n\t\tassert.NotEqual(t, name1, name2,\n\t\t\t\"nodes with same hostname should get distinct Name (GivenName): %q vs %q\",\n\t\t\tname1, name2)\n\t})\n\n\t// Policy change during connect should still converge.\n\tt.Run(\"policy_change_during_connect\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"polcon-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"polcon-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tc1.WaitForUpdate(t, 10*time.Second)\n\n\t\tchanged, err := srv.State().SetPolicy([]byte(`{\n\t\t\t\"acls\": [\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]}\n\t\t\t]\n\t\t}`))\n\t\trequire.NoError(t, err)\n\n\t\tif changed {\n\t\t\tchanges, err := srv.State().ReloadPolicy()\n\t\t\trequire.NoError(t, err)\n\t\t\tsrv.App.Change(changes...)\n\t\t}\n\n\t\tc2 := servertest.NewClient(t, srv, \"polcon-node2\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 1, 15*time.Second)\n\t\tc2.WaitForPeers(t, 1, 15*time.Second)\n\n\t\tfor _, c := range []*servertest.TestClient{c1, c2} {\n\t\t\tnm := c.Netmap()\n\t\t\trequire.NotNil(t, nm)\n\t\t\tassert.NotNil(t, nm.PacketFilter,\n\t\t\t\t\"client %s should have packet filter after policy change\", c.Name)\n\t\t}\n\t})\n}\n\nfunc findNodeID(tb testing.TB, srv *servertest.TestServer, hostname string) types.NodeID {\n\ttb.Helper()\n\n\tnodes := srv.State().ListNodes()\n\tfor i := range nodes.Len() {\n\t\tn := nodes.At(i)\n\t\tif n.Hostname() == hostname {\n\t\t\treturn n.ID()\n\t\t}\n\t}\n\n\ttb.Fatalf(\"node %q not found in server state\", hostname)\n\n\treturn 0\n}\n"
  },
  {
    "path": "hscontrol/servertest/lifecycle_test.go",
    "content": "package servertest_test\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/servertest\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"tailscale.com/types/netmap\"\n)\n\n// TestConnectionLifecycle exercises the core node lifecycle:\n// connecting, seeing peers, joining mid-session, departing, and\n// reconnecting.\nfunc TestConnectionLifecycle(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"single_node\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 1)\n\t\tnm := h.Client(0).Netmap()\n\t\tassert.NotNil(t, nm, \"single node should receive a netmap\")\n\t\tassert.Empty(t, nm.Peers, \"single node should have no peers\")\n\t})\n\n\tt.Run(\"new_node_joins_mesh\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 3)\n\n\t\t// Add a 4th client mid-test.\n\t\th.AddClient(t)\n\t\th.WaitForMeshComplete(t, 10*time.Second)\n\t\tservertest.AssertMeshComplete(t, h.Clients())\n\t\tservertest.AssertSymmetricVisibility(t, h.Clients())\n\t})\n\n\tt.Run(\"node_departs_peer_goes_offline\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 3)\n\n\t\tdepartingName := h.Client(2).Name\n\n\t\t// First verify the departing node is online (may need a moment\n\t\t// for Online status to propagate after mesh formation).\n\t\th.Client(0).WaitForCondition(t, \"peer initially online\", 15*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\tfor _, p := range nm.Peers {\n\t\t\t\t\thi := p.Hostinfo()\n\t\t\t\t\tif hi.Valid() && hi.Hostname() == departingName {\n\t\t\t\t\t\tisOnline, known := p.Online().GetOk()\n\n\t\t\t\t\t\treturn known && isOnline\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn false\n\t\t\t})\n\n\t\th.Client(2).Disconnect(t)\n\n\t\t// After the 10-second grace period, the remaining clients\n\t\t// should see the departed node as offline. The peer stays\n\t\t// in the peer list (non-ephemeral nodes are not removed).\n\t\th.Client(0).WaitForCondition(t, \"peer goes offline\", 30*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\tfor _, p := range nm.Peers {\n\t\t\t\t\thi := p.Hostinfo()\n\t\t\t\t\tif hi.Valid() && hi.Hostname() == departingName {\n\t\t\t\t\t\tisOnline, known := p.Online().GetOk()\n\n\t\t\t\t\t\treturn known && !isOnline\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn false\n\t\t\t})\n\t})\n\n\tt.Run(\"reconnect_restores_mesh\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 2)\n\n\t\t// Disconnect and reconnect.\n\t\th.Client(0).Disconnect(t)\n\t\th.Client(0).Reconnect(t)\n\n\t\t// Mesh should recover.\n\t\th.WaitForMeshComplete(t, 15*time.Second)\n\t\tservertest.AssertMeshComplete(t, h.Clients())\n\t})\n\n\tt.Run(\"session_replacement\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 2)\n\n\t\t// Reconnect without explicitly waiting for the old session to\n\t\t// fully drain. This tests that Headscale correctly replaces\n\t\t// the old map session for the same node.\n\t\th.Client(0).Reconnect(t)\n\t\th.WaitForMeshComplete(t, 15*time.Second)\n\t\tservertest.AssertMeshComplete(t, h.Clients())\n\t})\n\n\tt.Run(\"multiple_nodes_join_sequentially\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsizes := []int{2, 5, 10}\n\t\tfor _, n := range sizes {\n\t\t\tt.Run(fmt.Sprintf(\"%d_nodes\", n), func(t *testing.T) {\n\t\t\t\tt.Parallel()\n\t\t\t\th := servertest.NewHarness(t, n)\n\t\t\t\tservertest.AssertMeshComplete(t, h.Clients())\n\t\t\t\tservertest.AssertSymmetricVisibility(t, h.Clients())\n\t\t\t})\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "hscontrol/servertest/policy_test.go",
    "content": "package servertest_test\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/servertest\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/types/netmap\"\n)\n\n// TestPolicyChanges verifies that ACL policy changes propagate\n// correctly to all connected nodes, affecting peer visibility\n// and packet filters.\nfunc TestPolicyChanges(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"default_allow_all\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\t// With no explicit policy (database mode), the default\n\t\t// is to allow all traffic. All nodes should see each other.\n\t\th := servertest.NewHarness(t, 3)\n\t\tservertest.AssertMeshComplete(t, h.Clients())\n\t})\n\n\tt.Run(\"explicit_allow_all_policy\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 2)\n\n\t\t// Record update counts before policy change.\n\t\tcountBefore := h.Client(0).UpdateCount()\n\n\t\t// Set an allow-all policy explicitly.\n\t\th.ChangePolicy(t, []byte(`{\n\t\t\t\"acls\": [\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]}\n\t\t\t]\n\t\t}`))\n\n\t\t// Both clients should receive an update after the policy change.\n\t\th.Client(0).WaitForCondition(t, \"update after policy\",\n\t\t\t10*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\treturn h.Client(0).UpdateCount() > countBefore\n\t\t\t})\n\t})\n\n\tt.Run(\"policy_with_allow_all_has_packet_filter\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"pf-user\")\n\n\t\t// Set a valid allow-all policy.\n\t\tchanged, err := srv.State().SetPolicy([]byte(`{\n\t\t\t\"acls\": [\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]}\n\t\t\t]\n\t\t}`))\n\t\trequire.NoError(t, err)\n\n\t\tif changed {\n\t\t\tchanges, err := srv.State().ReloadPolicy()\n\t\t\trequire.NoError(t, err)\n\t\t\tsrv.App.Change(changes...)\n\t\t}\n\n\t\tc := servertest.NewClient(t, srv, \"pf-node\", servertest.WithUser(user))\n\t\tc.WaitForUpdate(t, 15*time.Second)\n\n\t\tnm := c.Netmap()\n\t\trequire.NotNil(t, nm)\n\n\t\t// The netmap should have packet filter rules from the\n\t\t// allow-all policy.\n\t\tassert.NotNil(t, nm.PacketFilter,\n\t\t\t\"PacketFilter should be populated with allow-all rules\")\n\t})\n\n\tt.Run(\"policy_change_triggers_update_on_all_nodes\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 3)\n\n\t\tcounts := make([]int, len(h.Clients()))\n\t\tfor i, c := range h.Clients() {\n\t\t\tcounts[i] = c.UpdateCount()\n\t\t}\n\n\t\t// Change policy.\n\t\th.ChangePolicy(t, []byte(`{\n\t\t\t\"acls\": [\n\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]}\n\t\t\t]\n\t\t}`))\n\n\t\t// All clients should receive at least one more update.\n\t\tfor i, c := range h.Clients() {\n\t\t\tc.WaitForCondition(t, \"update after policy change\",\n\t\t\t\t10*time.Second,\n\t\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\t\treturn c.UpdateCount() > counts[i]\n\t\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"multiple_policy_changes\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 2)\n\n\t\t// Apply policy twice and verify updates arrive both times.\n\t\tfor round := range 2 {\n\t\t\tcountBefore := h.Client(0).UpdateCount()\n\n\t\t\th.ChangePolicy(t, []byte(`{\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]}\n\t\t\t\t]\n\t\t\t}`))\n\n\t\t\th.Client(0).WaitForCondition(t, \"update after policy change\",\n\t\t\t\t10*time.Second,\n\t\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\t\treturn h.Client(0).UpdateCount() > countBefore\n\t\t\t\t})\n\n\t\t\tt.Logf(\"round %d: update received\", round)\n\t\t}\n\t})\n\n\tt.Run(\"policy_with_multiple_users\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser1 := srv.CreateUser(t, \"multi-user1\")\n\t\tuser2 := srv.CreateUser(t, \"multi-user2\")\n\t\tuser3 := srv.CreateUser(t, \"multi-user3\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"multi-node1\", servertest.WithUser(user1))\n\t\tc2 := servertest.NewClient(t, srv, \"multi-node2\", servertest.WithUser(user2))\n\t\tc3 := servertest.NewClient(t, srv, \"multi-node3\", servertest.WithUser(user3))\n\n\t\t// With default allow-all, all should see each other.\n\t\tc1.WaitForPeers(t, 2, 15*time.Second)\n\t\tc2.WaitForPeers(t, 2, 15*time.Second)\n\t\tc3.WaitForPeers(t, 2, 15*time.Second)\n\n\t\tservertest.AssertMeshComplete(t,\n\t\t\t[]*servertest.TestClient{c1, c2, c3})\n\t})\n}\n"
  },
  {
    "path": "hscontrol/servertest/poll_race_test.go",
    "content": "package servertest_test\n\nimport (\n\t\"fmt\"\n\t\"net/netip\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/servertest\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/types/netmap\"\n)\n\n// TestPollRace targets logical race conditions specifically in the\n// poll.go session lifecycle and the batcher's handling of concurrent\n// sessions for the same node.\n\nfunc TestPollRace(t *testing.T) {\n\tt.Parallel()\n\n\t// The core race: when a node disconnects, poll.go starts a\n\t// grace period goroutine (10s ticker loop). If the node\n\t// reconnects during this period, the new session calls\n\t// Connect() to mark the node online. But the old grace period\n\t// goroutine is still running and may call Disconnect() AFTER\n\t// the new Connect(), setting IsOnline=false incorrectly.\n\t//\n\t// This test verifies the exact symptom: after reconnect within\n\t// the grace period, the server-side node state should be online.\n\tt.Run(\"server_state_online_after_reconnect_within_grace\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"gracerace-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"gracerace-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tservertest.NewClient(t, srv, \"gracerace-node2\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\n\t\tnodeID := findNodeID(t, srv, \"gracerace-node1\")\n\n\t\t// Disconnect and immediately reconnect.\n\t\tc1.Disconnect(t)\n\t\tc1.Reconnect(t)\n\t\tc1.WaitForPeers(t, 1, 15*time.Second)\n\n\t\t// Check server-side state immediately.\n\t\tnv, ok := srv.State().GetNodeByID(nodeID)\n\t\trequire.True(t, ok)\n\n\t\tisOnline, known := nv.IsOnline().GetOk()\n\t\tassert.True(t, known,\n\t\t\t\"server should know online status after reconnect\")\n\t\tassert.True(t, isOnline,\n\t\t\t\"server should show node as online after reconnect within grace period\")\n\t})\n\n\t// Same test but wait a few seconds after reconnect. The old\n\t// grace period goroutine may still be running.\n\tt.Run(\"server_state_online_2s_after_reconnect\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"gracewait-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"gracewait-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tservertest.NewClient(t, srv, \"gracewait-node2\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\n\t\tnodeID := findNodeID(t, srv, \"gracewait-node1\")\n\n\t\tc1.Disconnect(t)\n\t\tc1.Reconnect(t)\n\t\tc1.WaitForPeers(t, 1, 15*time.Second)\n\n\t\t// Wait 2 seconds for the old grace period to potentially fire.\n\t\ttimer := time.NewTimer(2 * time.Second)\n\t\tdefer timer.Stop()\n\n\t\t<-timer.C\n\n\t\tnv, ok := srv.State().GetNodeByID(nodeID)\n\t\trequire.True(t, ok)\n\n\t\tisOnline, known := nv.IsOnline().GetOk()\n\t\tassert.True(t, known,\n\t\t\t\"server should know online status 2s after reconnect\")\n\t\tassert.True(t, isOnline,\n\t\t\t\"server should STILL show node as online 2s after reconnect (grace period goroutine should not overwrite)\")\n\t})\n\n\t// Wait the full grace period (10s) after reconnect. The old\n\t// grace period goroutine should have checked IsConnected\n\t// and found the node connected, so should NOT have called\n\t// Disconnect().\n\tt.Run(\"server_state_online_12s_after_reconnect\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"gracelong-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"gracelong-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tservertest.NewClient(t, srv, \"gracelong-node2\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\n\t\tnodeID := findNodeID(t, srv, \"gracelong-node1\")\n\n\t\tc1.Disconnect(t)\n\t\tc1.Reconnect(t)\n\t\tc1.WaitForPeers(t, 1, 15*time.Second)\n\n\t\t// Wait past the full grace period.\n\t\ttimer := time.NewTimer(12 * time.Second)\n\t\tdefer timer.Stop()\n\n\t\t<-timer.C\n\n\t\tnv, ok := srv.State().GetNodeByID(nodeID)\n\t\trequire.True(t, ok)\n\n\t\tisOnline, known := nv.IsOnline().GetOk()\n\t\tassert.True(t, known,\n\t\t\t\"server should know online status after grace period expires\")\n\t\tassert.True(t, isOnline,\n\t\t\t\"server should show node as online after grace period -- the reconnect should have prevented the Disconnect() call\")\n\t})\n\n\t// Peer's view: after rapid reconnect, the peer should see\n\t// the reconnected node as online, not offline.\n\tt.Run(\"peer_sees_online_after_rapid_reconnect\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"peeronl-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"peeronl-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tc2 := servertest.NewClient(t, srv, \"peeronl-node2\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\n\t\t// Wait for online status to propagate first.\n\t\tc2.WaitForCondition(t, \"peer initially online\",\n\t\t\t15*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\tfor _, p := range nm.Peers {\n\t\t\t\t\thi := p.Hostinfo()\n\t\t\t\t\tif hi.Valid() && hi.Hostname() == \"peeronl-node1\" {\n\t\t\t\t\t\tisOnline, known := p.Online().GetOk()\n\n\t\t\t\t\t\treturn known && isOnline\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn false\n\t\t\t})\n\n\t\t// Rapid reconnect.\n\t\tc1.Disconnect(t)\n\t\tc1.Reconnect(t)\n\t\tc1.WaitForPeers(t, 1, 15*time.Second)\n\n\t\t// Wait 3 seconds for any stale updates to propagate.\n\t\ttimer := time.NewTimer(3 * time.Second)\n\t\tdefer timer.Stop()\n\n\t\t<-timer.C\n\n\t\t// At this point, c2 should see c1 as ONLINE.\n\t\t// If the grace period race is present, c2 might\n\t\t// temporarily see offline and then online again.\n\t\tnm := c2.Netmap()\n\t\trequire.NotNil(t, nm)\n\n\t\tfor _, p := range nm.Peers {\n\t\t\thi := p.Hostinfo()\n\t\t\tif hi.Valid() && hi.Hostname() == \"peeronl-node1\" {\n\t\t\t\tisOnline, known := p.Online().GetOk()\n\t\t\t\tassert.True(t, known,\n\t\t\t\t\t\"peer online status should be known\")\n\t\t\t\tassert.True(t, isOnline,\n\t\t\t\t\t\"peer should be online 3s after rapid reconnect\")\n\t\t\t}\n\t\t}\n\t})\n\n\t// The batcher's IsConnected check: when the grace period\n\t// goroutine calls IsConnected(), it should return true if\n\t// a new session has been added for the same node.\n\tt.Run(\"batcher_knows_reconnected_during_grace\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"batchknow-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"batchknow-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tc2 := servertest.NewClient(t, srv, \"batchknow-node2\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\t\tc2.WaitForPeers(t, 1, 10*time.Second)\n\n\t\t// Disconnect and reconnect.\n\t\tc1.Disconnect(t)\n\t\tc1.Reconnect(t)\n\t\tc1.WaitForPeers(t, 1, 15*time.Second)\n\n\t\t// The mesh should be complete with both nodes seeing\n\t\t// each other as online.\n\t\tc2.WaitForCondition(t, \"c1 online after reconnect\",\n\t\t\t15*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\tfor _, p := range nm.Peers {\n\t\t\t\t\thi := p.Hostinfo()\n\t\t\t\t\tif hi.Valid() && hi.Hostname() == \"batchknow-node1\" {\n\t\t\t\t\t\tisOnline, known := p.Online().GetOk()\n\n\t\t\t\t\t\treturn known && isOnline\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn false\n\t\t\t})\n\t})\n\n\t// Test that the update history shows a clean transition:\n\t// the peer should never appear in the history with\n\t// online=false if the reconnect was fast enough.\n\tt.Run(\"update_history_no_false_offline\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"histroff-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"histroff-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tc2 := servertest.NewClient(t, srv, \"histroff-node2\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\t\tc2.WaitForPeers(t, 1, 10*time.Second)\n\n\t\t// Record c2's update count before reconnect.\n\t\tcountBefore := c2.UpdateCount()\n\n\t\t// Rapid reconnect.\n\t\tc1.Disconnect(t)\n\t\tc1.Reconnect(t)\n\t\tc1.WaitForPeers(t, 1, 15*time.Second)\n\n\t\t// Wait a moment for all updates to arrive.\n\t\ttimer := time.NewTimer(3 * time.Second)\n\t\tdefer timer.Stop()\n\n\t\t<-timer.C\n\n\t\t// Check c2's update history for any false offline.\n\t\thistory := c2.History()\n\t\tsawOffline := false\n\n\t\tfor i := countBefore; i < len(history); i++ {\n\t\t\tnm := history[i]\n\t\t\tfor _, p := range nm.Peers {\n\t\t\t\thi := p.Hostinfo()\n\t\t\t\tif hi.Valid() && hi.Hostname() == \"histroff-node1\" {\n\t\t\t\t\tisOnline, known := p.Online().GetOk()\n\t\t\t\t\tif known && !isOnline {\n\t\t\t\t\t\tsawOffline = true\n\n\t\t\t\t\t\tt.Logf(\"update %d: saw peer offline (should not happen during rapid reconnect)\", i)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tassert.False(t, sawOffline,\n\t\t\t\"peer should never appear offline in update history during rapid reconnect\")\n\t})\n\n\t// Multiple rapid reconnects should not cause the peer count\n\t// to be wrong. After N reconnects, the reconnecting node should\n\t// still see the right number of peers and vice versa.\n\tt.Run(\"peer_count_stable_after_many_reconnects\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"peercount-user\")\n\n\t\tconst n = 4\n\n\t\tclients := make([]*servertest.TestClient, n)\n\t\tfor i := range n {\n\t\t\tclients[i] = servertest.NewClient(t, srv,\n\t\t\t\tfmt.Sprintf(\"peercount-%d\", i),\n\t\t\t\tservertest.WithUser(user))\n\t\t}\n\n\t\tfor _, c := range clients {\n\t\t\tc.WaitForPeers(t, n-1, 20*time.Second)\n\t\t}\n\n\t\t// Reconnect client 0 five times.\n\t\tfor range 5 {\n\t\t\tclients[0].Disconnect(t)\n\t\t\tclients[0].Reconnect(t)\n\t\t}\n\n\t\t// All clients should still see n-1 peers.\n\t\tfor _, c := range clients {\n\t\t\tc.WaitForPeers(t, n-1, 15*time.Second)\n\t\t}\n\n\t\tservertest.AssertMeshComplete(t, clients)\n\t})\n\n\t// Route approval during reconnect: approve a route while a\n\t// node is reconnecting. Both the reconnecting node and peers\n\t// should eventually see the correct state.\n\tt.Run(\"route_approval_during_reconnect\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"rtrecon-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"rtrecon-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tservertest.NewClient(t, srv, \"rtrecon-node2\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\n\t\tnodeID1 := findNodeID(t, srv, \"rtrecon-node1\")\n\n\t\t// Disconnect c1.\n\t\tc1.Disconnect(t)\n\n\t\t// While c1 is disconnected, approve a route for it.\n\t\troute := netip.MustParsePrefix(\"10.55.0.0/24\")\n\t\t_, routeChange, err := srv.State().SetApprovedRoutes(\n\t\t\tnodeID1, []netip.Prefix{route})\n\t\trequire.NoError(t, err)\n\t\tsrv.App.Change(routeChange)\n\n\t\t// Reconnect c1.\n\t\tc1.Reconnect(t)\n\t\tc1.WaitForPeers(t, 1, 15*time.Second)\n\n\t\t// c1 should receive a self-update with the new route.\n\t\tc1.WaitForCondition(t, \"self-update after route+reconnect\",\n\t\t\t10*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\treturn nm != nil && nm.SelfNode.Valid()\n\t\t\t})\n\n\t\t// Verify server state is correct.\n\t\tnv, ok := srv.State().GetNodeByID(nodeID1)\n\t\trequire.True(t, ok)\n\n\t\troutes := nv.ApprovedRoutes().AsSlice()\n\t\tassert.Contains(t, routes, route,\n\t\t\t\"approved route should persist through reconnect\")\n\t})\n}\n"
  },
  {
    "path": "hscontrol/servertest/race_test.go",
    "content": "package servertest_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/netip\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/servertest\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/netmap\"\n)\n\n// TestRace contains tests designed to trigger race conditions in\n// the control plane. Run with -race to detect data races.\n// These tests stress concurrent access patterns in poll.go,\n// the batcher, the NodeStore, and the mapper.\n\n// TestRacePollSessionReplacement tests the race between an old\n// poll session's deferred cleanup and a new session starting.\nfunc TestRacePollSessionReplacement(t *testing.T) {\n\tt.Parallel()\n\n\t// Rapidly replace the poll session by doing immediate\n\t// disconnect+reconnect. This races the old session's\n\t// deferred cleanup (RemoveNode, Disconnect, grace period\n\t// goroutine) with the new session's setup (AddNode, Connect,\n\t// initial map send).\n\tt.Run(\"immediate_session_replace_10x\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"sessrepl-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"sessrepl-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tc2 := servertest.NewClient(t, srv, \"sessrepl-node2\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\n\t\tfor range 10 {\n\t\t\tc1.Disconnect(t)\n\t\t\t// Reconnect immediately -- no sleep. This creates the\n\t\t\t// tightest possible race between old session cleanup\n\t\t\t// and new session setup.\n\t\t\tc1.Reconnect(t)\n\t\t}\n\n\t\tc1.WaitForPeers(t, 1, 15*time.Second)\n\t\tc2.WaitForPeers(t, 1, 15*time.Second)\n\n\t\t// Both clients should still have a consistent view.\n\t\tservertest.AssertMeshComplete(t,\n\t\t\t[]*servertest.TestClient{c1, c2})\n\t})\n\n\t// Two nodes rapidly reconnecting simultaneously.\n\tt.Run(\"two_nodes_reconnect_simultaneously\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"simrecon-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"simrecon-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tc2 := servertest.NewClient(t, srv, \"simrecon-node2\",\n\t\t\tservertest.WithUser(user))\n\t\tc3 := servertest.NewClient(t, srv, \"simrecon-node3\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 2, 15*time.Second)\n\n\t\tfor range 5 {\n\t\t\t// Both disconnect at the same time.\n\t\t\tc1.Disconnect(t)\n\t\t\tc2.Disconnect(t)\n\n\t\t\t// Both reconnect at the same time.\n\t\t\tc1.Reconnect(t)\n\t\t\tc2.Reconnect(t)\n\t\t}\n\n\t\t// Mesh should recover.\n\t\tc1.WaitForPeers(t, 2, 15*time.Second)\n\t\tc2.WaitForPeers(t, 2, 15*time.Second)\n\t\tc3.WaitForPeers(t, 2, 15*time.Second)\n\n\t\tservertest.AssertConsistentState(t,\n\t\t\t[]*servertest.TestClient{c1, c2, c3})\n\t})\n}\n\n// TestRaceConcurrentServerMutations tests concurrent mutations\n// on the server side while nodes are connected and polling.\nfunc TestRaceConcurrentServerMutations(t *testing.T) {\n\tt.Parallel()\n\n\t// Rename, route approval, and policy change all happening\n\t// concurrently while nodes are connected.\n\tt.Run(\"concurrent_rename_route_policy\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"conmut-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"conmut-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tc2 := servertest.NewClient(t, srv, \"conmut-node2\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\n\t\tnodeID1 := findNodeID(t, srv, \"conmut-node1\")\n\n\t\tvar wg sync.WaitGroup\n\n\t\t// Concurrent renames.\n\n\t\twg.Go(func() {\n\t\t\tfor i := range 5 {\n\t\t\t\tname := fmt.Sprintf(\"conmut-renamed-%d\", i)\n\t\t\t\tsrv.State().RenameNode(nodeID1, name) //nolint:errcheck\n\t\t\t}\n\t\t})\n\n\t\t// Concurrent route changes.\n\n\t\twg.Go(func() {\n\t\t\tfor i := range 5 {\n\t\t\t\troute := netip.MustParsePrefix(\n\t\t\t\t\tfmt.Sprintf(\"10.%d.0.0/24\", i))\n\t\t\t\t_, c, _ := srv.State().SetApprovedRoutes(\n\t\t\t\t\tnodeID1, []netip.Prefix{route})\n\t\t\t\tsrv.App.Change(c)\n\t\t\t}\n\t\t})\n\n\t\t// Concurrent policy changes.\n\n\t\twg.Go(func() {\n\t\t\tfor range 5 {\n\t\t\t\tchanged, err := srv.State().SetPolicy([]byte(`{\n\t\t\t\t\t\"acls\": [\n\t\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]}\n\t\t\t\t\t]\n\t\t\t\t}`))\n\t\t\t\tif err == nil && changed {\n\t\t\t\t\tchanges, err := srv.State().ReloadPolicy()\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tsrv.App.Change(changes...)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\n\t\twg.Wait()\n\n\t\t// Server should not have panicked, and clients should still\n\t\t// be getting updates.\n\t\tc2.WaitForCondition(t, \"still receiving updates\",\n\t\t\t10*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\treturn nm != nil && len(nm.Peers) > 0\n\t\t\t})\n\t})\n\n\t// Delete a node while simultaneously changing policy.\n\tt.Run(\"delete_during_policy_change\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"delpol-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"delpol-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tservertest.NewClient(t, srv, \"delpol-node2\",\n\t\t\tservertest.WithUser(user))\n\t\tc3 := servertest.NewClient(t, srv, \"delpol-node3\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 2, 15*time.Second)\n\n\t\tnodeID2 := findNodeID(t, srv, \"delpol-node2\")\n\t\tnv2, ok := srv.State().GetNodeByID(nodeID2)\n\t\trequire.True(t, ok)\n\n\t\tvar wg sync.WaitGroup\n\n\t\t// Delete node2 and change policy simultaneously.\n\n\t\twg.Go(func() {\n\t\t\tdelChange, err := srv.State().DeleteNode(nv2)\n\t\t\tif err == nil {\n\t\t\t\tsrv.App.Change(delChange)\n\t\t\t}\n\t\t})\n\n\t\twg.Go(func() {\n\t\t\tchanged, err := srv.State().SetPolicy([]byte(`{\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]}\n\t\t\t\t]\n\t\t\t}`))\n\t\t\tif err == nil && changed {\n\t\t\t\tchanges, err := srv.State().ReloadPolicy()\n\t\t\t\tif err == nil {\n\t\t\t\t\tsrv.App.Change(changes...)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\n\t\twg.Wait()\n\n\t\t// c1 and c3 should converge -- both should see each other\n\t\t// but not node2.\n\t\tc1.WaitForCondition(t, \"node2 gone from c1\", 10*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\tfor _, p := range nm.Peers {\n\t\t\t\t\thi := p.Hostinfo()\n\t\t\t\t\tif hi.Valid() && hi.Hostname() == \"delpol-node2\" {\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn true\n\t\t\t})\n\n\t\tc3.WaitForCondition(t, \"node2 gone from c3\", 10*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\tfor _, p := range nm.Peers {\n\t\t\t\t\thi := p.Hostinfo()\n\t\t\t\t\tif hi.Valid() && hi.Hostname() == \"delpol-node2\" {\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn true\n\t\t\t})\n\t})\n\n\t// Many clients sending hostinfo updates simultaneously.\n\tt.Run(\"concurrent_hostinfo_updates\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"chiupd-user\")\n\n\t\tconst n = 6\n\n\t\tclients := make([]*servertest.TestClient, n)\n\t\tfor i := range n {\n\t\t\tclients[i] = servertest.NewClient(t, srv,\n\t\t\t\tfmt.Sprintf(\"chiupd-%d\", i),\n\t\t\t\tservertest.WithUser(user))\n\t\t}\n\n\t\tfor _, c := range clients {\n\t\t\tc.WaitForPeers(t, n-1, 20*time.Second)\n\t\t}\n\n\t\t// All clients update their hostinfo simultaneously.\n\t\tvar wg sync.WaitGroup\n\t\tfor i, c := range clients {\n\t\t\twg.Go(func() {\n\t\t\t\tc.Direct().SetHostinfo(&tailcfg.Hostinfo{\n\t\t\t\t\tBackendLogID: fmt.Sprintf(\"servertest-chiupd-%d\", i),\n\t\t\t\t\tHostname:     fmt.Sprintf(\"chiupd-%d\", i),\n\t\t\t\t\tOS:           fmt.Sprintf(\"ConcurrentOS-%d\", i),\n\t\t\t\t})\n\n\t\t\t\tctx, cancel := context.WithTimeout(\n\t\t\t\t\tcontext.Background(), 5*time.Second)\n\t\t\t\tdefer cancel()\n\n\t\t\t\t_ = c.Direct().SendUpdate(ctx)\n\t\t\t})\n\t\t}\n\n\t\twg.Wait()\n\n\t\t// Each client should eventually see all others' updated OS.\n\t\tfor _, observer := range clients {\n\t\t\tobserver.WaitForCondition(t, \"all OS updates visible\",\n\t\t\t\t15*time.Second,\n\t\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\t\tseenOS := 0\n\n\t\t\t\t\tfor _, p := range nm.Peers {\n\t\t\t\t\t\thi := p.Hostinfo()\n\t\t\t\t\t\tif hi.Valid() && hi.OS() != \"\" &&\n\t\t\t\t\t\t\tlen(hi.OS()) > 12 { // \"ConcurrentOS-\" prefix\n\t\t\t\t\t\t\tseenOS++\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t// Should see n-1 peers with updated OS.\n\t\t\t\t\treturn seenOS >= n-1\n\t\t\t\t})\n\t\t}\n\t})\n}\n\n// TestRaceConnectDuringGracePeriod tests connecting a new node\n// while another node is in its grace period.\nfunc TestRaceConnectDuringGracePeriod(t *testing.T) {\n\tt.Parallel()\n\n\t// A node disconnects, and during the 10-second grace period\n\t// a new node joins. The new node should see the disconnecting\n\t// node as a peer (it hasn't been removed yet).\n\tt.Run(\"new_node_during_grace_period\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"grace-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"grace-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tc2 := servertest.NewClient(t, srv, \"grace-node2\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\n\t\t// Disconnect c1 -- starts grace period.\n\t\tc1.Disconnect(t)\n\n\t\t// Immediately add a new node while c1 is in grace period.\n\t\tc3 := servertest.NewClient(t, srv, \"grace-node3\",\n\t\t\tservertest.WithUser(user))\n\n\t\t// c3 should see c2 for sure. Whether it sees c1 depends on\n\t\t// whether c1's grace period has expired. Either way it should\n\t\t// not panic or hang.\n\t\tc3.WaitForPeers(t, 1, 15*time.Second)\n\n\t\t// c2 should see c3.\n\t\tc2.WaitForCondition(t, \"c2 sees c3\", 10*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\t_, found := c2.PeerByName(\"grace-node3\")\n\n\t\t\t\treturn found\n\t\t\t})\n\t})\n\n\t// Multiple nodes disconnect and new ones connect simultaneously,\n\t// creating a mixed grace-period race.\n\tt.Run(\"multi_disconnect_multi_connect_race\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"mixgrace-user\")\n\n\t\tconst n = 4\n\n\t\toriginals := make([]*servertest.TestClient, n)\n\t\tfor i := range n {\n\t\t\toriginals[i] = servertest.NewClient(t, srv,\n\t\t\t\tfmt.Sprintf(\"mixgrace-orig-%d\", i),\n\t\t\t\tservertest.WithUser(user))\n\t\t}\n\n\t\tfor _, c := range originals {\n\t\t\tc.WaitForPeers(t, n-1, 20*time.Second)\n\t\t}\n\n\t\t// Disconnect half.\n\t\tfor i := range n / 2 {\n\t\t\toriginals[i].Disconnect(t)\n\t\t}\n\n\t\t// Add new nodes during grace period.\n\t\treplacements := make([]*servertest.TestClient, n/2)\n\t\tfor i := range n / 2 {\n\t\t\treplacements[i] = servertest.NewClient(t, srv,\n\t\t\t\tfmt.Sprintf(\"mixgrace-new-%d\", i),\n\t\t\t\tservertest.WithUser(user))\n\t\t}\n\n\t\t// The surviving originals + new nodes should form a mesh.\n\t\tsurviving := originals[n/2:]\n\t\tallActive := append(surviving, replacements...)\n\n\t\tfor _, c := range allActive {\n\t\t\tc.WaitForPeers(t, len(allActive)-1, 30*time.Second)\n\t\t}\n\n\t\tservertest.AssertConsistentState(t, allActive)\n\t})\n}\n\n// TestRaceBatcherContention tests race conditions in the batcher\n// when many changes arrive simultaneously.\nfunc TestRaceBatcherContention(t *testing.T) {\n\tt.Parallel()\n\n\t// Many nodes connecting at the same time generates many\n\t// concurrent Change() calls. The batcher must handle this\n\t// without dropping updates or panicking.\n\tt.Run(\"many_simultaneous_connects\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"batchcon-user\")\n\n\t\tconst n = 8\n\n\t\tclients := make([]*servertest.TestClient, n)\n\n\t\t// Create all clients as fast as possible.\n\t\tfor i := range n {\n\t\t\tclients[i] = servertest.NewClient(t, srv,\n\t\t\t\tfmt.Sprintf(\"batchcon-%d\", i),\n\t\t\t\tservertest.WithUser(user))\n\t\t}\n\n\t\t// All should converge.\n\t\tfor _, c := range clients {\n\t\t\tc.WaitForPeers(t, n-1, 30*time.Second)\n\t\t}\n\n\t\tservertest.AssertMeshComplete(t, clients)\n\t})\n\n\t// Rapid connect + disconnect + connect of different nodes\n\t// generates interleaved AddNode/RemoveNode/AddNode in the\n\t// batcher.\n\tt.Run(\"interleaved_add_remove_add\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"intleave-user\")\n\n\t\tobserver := servertest.NewClient(t, srv, \"intleave-obs\",\n\t\t\tservertest.WithUser(user))\n\t\tobserver.WaitForUpdate(t, 10*time.Second)\n\n\t\t// Rapidly create, disconnect, create nodes.\n\t\tfor i := range 5 {\n\t\t\tc := servertest.NewClient(t, srv,\n\t\t\t\tfmt.Sprintf(\"intleave-temp-%d\", i),\n\t\t\t\tservertest.WithUser(user))\n\t\t\tc.WaitForUpdate(t, 10*time.Second)\n\t\t\tc.Disconnect(t)\n\t\t}\n\n\t\t// Add a final persistent node.\n\t\tfinal := servertest.NewClient(t, srv, \"intleave-final\",\n\t\t\tservertest.WithUser(user))\n\n\t\t// Observer should see at least the final node.\n\t\tobserver.WaitForCondition(t, \"sees final node\",\n\t\t\t15*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\t_, found := observer.PeerByName(\"intleave-final\")\n\n\t\t\t\treturn found\n\t\t\t})\n\n\t\t// Final should see observer.\n\t\tfinal.WaitForCondition(t, \"sees observer\",\n\t\t\t15*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\t_, found := final.PeerByName(\"intleave-obs\")\n\n\t\t\t\treturn found\n\t\t\t})\n\t})\n\n\t// Route changes and node connect happening at the same time.\n\tt.Run(\"route_change_during_connect\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"rtcon-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"rtcon-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tc1.WaitForUpdate(t, 10*time.Second)\n\n\t\tnodeID1 := findNodeID(t, srv, \"rtcon-node1\")\n\n\t\t// Approve routes while c2 is connecting.\n\t\tvar wg sync.WaitGroup\n\n\t\twg.Go(func() {\n\t\t\troute := netip.MustParsePrefix(\"10.88.0.0/24\")\n\t\t\t_, c, _ := srv.State().SetApprovedRoutes(\n\t\t\t\tnodeID1, []netip.Prefix{route})\n\t\t\tsrv.App.Change(c)\n\t\t})\n\n\t\twg.Add(1)\n\n\t\tvar c2 *servertest.TestClient\n\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\n\t\t\tc2 = servertest.NewClient(t, srv, \"rtcon-node2\",\n\t\t\t\tservertest.WithUser(user))\n\t\t}()\n\n\t\twg.Wait()\n\n\t\t// Both should converge.\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\t\tc2.WaitForPeers(t, 1, 10*time.Second)\n\t})\n}\n\n// TestRaceMapResponseDuringDisconnect tests what happens when a\n// map response is being written while the session is being torn down.\nfunc TestRaceMapResponseDuringDisconnect(t *testing.T) {\n\tt.Parallel()\n\n\t// Generate a lot of updates for a node, then disconnect it\n\t// while updates are still being delivered. The disconnect\n\t// should be clean -- no panics, no hangs.\n\tt.Run(\"disconnect_during_update_storm\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"updstorm-user\")\n\n\t\tvictim := servertest.NewClient(t, srv, \"updstorm-victim\",\n\t\t\tservertest.WithUser(user))\n\t\tvictim.WaitForUpdate(t, 10*time.Second)\n\n\t\t// Create several nodes to generate connection updates.\n\t\tfor i := range 5 {\n\t\t\tservertest.NewClient(t, srv,\n\t\t\t\tfmt.Sprintf(\"updstorm-gen-%d\", i),\n\t\t\t\tservertest.WithUser(user))\n\t\t}\n\n\t\t// While updates are flying, disconnect the victim.\n\t\tvictim.Disconnect(t)\n\n\t\t// No panic, no hang = success. The other nodes should\n\t\t// still be working.\n\t\tremaining := servertest.NewClient(t, srv, \"updstorm-check\",\n\t\t\tservertest.WithUser(user))\n\t\tremaining.WaitForPeers(t, 5, 15*time.Second)\n\t})\n\n\t// Send a hostinfo update and disconnect almost simultaneously.\n\tt.Run(\"hostinfo_update_then_immediate_disconnect\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"hidc-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"hidc-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tc2 := servertest.NewClient(t, srv, \"hidc-node2\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\n\t\t// Fire a hostinfo update.\n\t\tc1.Direct().SetHostinfo(&tailcfg.Hostinfo{\n\t\t\tBackendLogID: \"servertest-hidc-node1\",\n\t\t\tHostname:     \"hidc-node1\",\n\t\t\tOS:           \"DisconnectOS\",\n\t\t})\n\n\t\tctx, cancel := context.WithTimeout(\n\t\t\tcontext.Background(), 5*time.Second)\n\t\tdefer cancel()\n\n\t\t_ = c1.Direct().SendUpdate(ctx)\n\n\t\t// Immediately disconnect.\n\t\tc1.Disconnect(t)\n\n\t\t// c2 might or might not see the OS update, but it should\n\t\t// not panic or hang. Verify c2 is still functional.\n\t\tc2.WaitForCondition(t, \"c2 still functional\", 10*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\treturn nm != nil\n\t\t\t})\n\t})\n}\n\n// TestRaceNodeStoreContention tests concurrent access to the NodeStore.\nfunc TestRaceNodeStoreContention(t *testing.T) {\n\tt.Parallel()\n\n\t// Many GetNodeByID calls while nodes are connecting and\n\t// disconnecting. This tests the NodeStore's read/write locking.\n\tt.Run(\"concurrent_reads_during_mutations\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"nsrace-user\")\n\n\t\tconst n = 4\n\n\t\tclients := make([]*servertest.TestClient, n)\n\t\tfor i := range n {\n\t\t\tclients[i] = servertest.NewClient(t, srv,\n\t\t\t\tfmt.Sprintf(\"nsrace-%d\", i),\n\t\t\t\tservertest.WithUser(user))\n\t\t}\n\n\t\tfor _, c := range clients {\n\t\t\tc.WaitForPeers(t, n-1, 15*time.Second)\n\t\t}\n\n\t\tnodeIDs := make([]types.NodeID, n)\n\t\tfor i := range n {\n\t\t\tnodeIDs[i] = findNodeID(t, srv,\n\t\t\t\tfmt.Sprintf(\"nsrace-%d\", i))\n\t\t}\n\n\t\t// Concurrently: read nodes, disconnect/reconnect, read again.\n\t\tvar wg sync.WaitGroup\n\n\t\t// Readers.\n\t\tfor range 4 {\n\t\t\twg.Go(func() {\n\t\t\t\tfor range 100 {\n\t\t\t\t\tfor _, id := range nodeIDs {\n\t\t\t\t\t\tnv, ok := srv.State().GetNodeByID(id)\n\t\t\t\t\t\tif ok {\n\t\t\t\t\t\t\t_ = nv.Hostname()\n\t\t\t\t\t\t\t_ = nv.IsOnline()\n\t\t\t\t\t\t\t_ = nv.ApprovedRoutes()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\n\t\t// Mutators: disconnect and reconnect nodes.\n\t\tfor i := range 2 {\n\t\t\twg.Go(func() {\n\t\t\t\tclients[i].Disconnect(t)\n\t\t\t\tclients[i].Reconnect(t)\n\t\t\t})\n\t\t}\n\n\t\twg.Wait()\n\n\t\t// Everything should still be working.\n\t\tfor i := 2; i < n; i++ {\n\t\t\t_, ok := srv.State().GetNodeByID(nodeIDs[i])\n\t\t\tassert.True(t, ok,\n\t\t\t\t\"node %d should still be in NodeStore\", i)\n\t\t}\n\t})\n\n\t// ListNodes while nodes are being added and removed.\n\tt.Run(\"list_nodes_during_churn\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"listrace-user\")\n\n\t\tvar wg sync.WaitGroup\n\n\t\t// Continuously list nodes.\n\t\tstop := make(chan struct{})\n\n\t\twg.Go(func() {\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-stop:\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t\tnodes := srv.State().ListNodes()\n\t\t\t\t\t// Access each node to exercise read paths.\n\t\t\t\t\tfor i := range nodes.Len() {\n\t\t\t\t\t\tn := nodes.At(i)\n\t\t\t\t\t\t_ = n.Hostname()\n\t\t\t\t\t\t_ = n.IPs()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\n\t\t// Add and remove nodes.\n\t\tfor i := range 5 {\n\t\t\tc := servertest.NewClient(t, srv,\n\t\t\t\tfmt.Sprintf(\"listrace-%d\", i),\n\t\t\t\tservertest.WithUser(user))\n\t\t\tc.WaitForUpdate(t, 10*time.Second)\n\n\t\t\tif i%2 == 0 {\n\t\t\t\tc.Disconnect(t)\n\t\t\t}\n\t\t}\n\n\t\tclose(stop)\n\t\twg.Wait()\n\t})\n}\n"
  },
  {
    "path": "hscontrol/servertest/routes_test.go",
    "content": "package servertest_test\n\nimport (\n\t\"context\"\n\t\"net/netip\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/servertest\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/netmap\"\n)\n\n// TestRoutes verifies that route advertisements and approvals\n// propagate correctly through the control plane to all peers.\nfunc TestRoutes(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"node_addresses_in_allowed_ips\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 2)\n\n\t\t// Each peer's AllowedIPs should contain the peer's addresses.\n\t\tfor _, c := range h.Clients() {\n\t\t\tnm := c.Netmap()\n\t\t\trequire.NotNil(t, nm)\n\n\t\t\tfor _, peer := range nm.Peers {\n\t\t\t\taddrs := make(map[netip.Prefix]bool)\n\t\t\t\tfor i := range peer.Addresses().Len() {\n\t\t\t\t\taddrs[peer.Addresses().At(i)] = true\n\t\t\t\t}\n\n\t\t\t\tfor i := range peer.AllowedIPs().Len() {\n\t\t\t\t\taip := peer.AllowedIPs().At(i)\n\t\t\t\t\tif addrs[aip] {\n\t\t\t\t\t\tdelete(addrs, aip)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tassert.Empty(t, addrs,\n\t\t\t\t\t\"client %s: peer %d AllowedIPs should contain all of Addresses\",\n\t\t\t\t\tc.Name, peer.ID())\n\t\t\t}\n\t\t}\n\t})\n\n\tt.Run(\"advertised_routes_in_hostinfo\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"advroute-user\")\n\n\t\troutePrefix := netip.MustParsePrefix(\"192.168.1.0/24\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"advroute-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tc2 := servertest.NewClient(t, srv, \"advroute-node2\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\n\t\t// Update hostinfo with advertised routes.\n\t\tc1.Direct().SetHostinfo(&tailcfg.Hostinfo{\n\t\t\tBackendLogID: \"servertest-advroute-node1\",\n\t\t\tHostname:     \"advroute-node1\",\n\t\t\tRoutableIPs:  []netip.Prefix{routePrefix},\n\t\t})\n\n\t\t// Send a non-streaming update to push the new hostinfo.\n\t\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\t\tdefer cancel()\n\n\t\t_ = c1.Direct().SendUpdate(ctx)\n\n\t\t// The observer should eventually see the advertised routes\n\t\t// in the peer's hostinfo.\n\t\tc2.WaitForCondition(t, \"advertised route in hostinfo\",\n\t\t\t15*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\tfor _, p := range nm.Peers {\n\t\t\t\t\thi := p.Hostinfo()\n\t\t\t\t\tif hi.Valid() && hi.Hostname() == \"advroute-node1\" {\n\t\t\t\t\t\tfor i := range hi.RoutableIPs().Len() {\n\t\t\t\t\t\t\tif hi.RoutableIPs().At(i) == routePrefix {\n\t\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn false\n\t\t\t})\n\t})\n\n\tt.Run(\"route_advertise_and_approve\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"fullrt-user\")\n\n\t\troute := netip.MustParsePrefix(\"10.0.0.0/24\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"fullrt-advertiser\",\n\t\t\tservertest.WithUser(user))\n\t\tc2 := servertest.NewClient(t, srv, \"fullrt-observer\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\t\tc2.WaitForPeers(t, 1, 10*time.Second)\n\n\t\t// Step 1: Advertise the route by updating hostinfo.\n\t\tc1.Direct().SetHostinfo(&tailcfg.Hostinfo{\n\t\t\tBackendLogID: \"servertest-fullrt-advertiser\",\n\t\t\tHostname:     \"fullrt-advertiser\",\n\t\t\tRoutableIPs:  []netip.Prefix{route},\n\t\t})\n\n\t\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\t\tdefer cancel()\n\n\t\t_ = c1.Direct().SendUpdate(ctx)\n\n\t\t// Wait for the server to process the hostinfo update\n\t\t// by waiting for observer to see the advertised route.\n\t\tc2.WaitForCondition(t, \"hostinfo update propagated\",\n\t\t\t10*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\tfor _, p := range nm.Peers {\n\t\t\t\t\thi := p.Hostinfo()\n\t\t\t\t\tif hi.Valid() && hi.Hostname() == \"fullrt-advertiser\" {\n\t\t\t\t\t\treturn hi.RoutableIPs().Len() > 0\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn false\n\t\t\t})\n\n\t\t// Step 2: Approve the route on the server.\n\t\tnodeID := findNodeID(t, srv, \"fullrt-advertiser\")\n\n\t\t_, routeChange, err := srv.State().SetApprovedRoutes(\n\t\t\tnodeID, []netip.Prefix{route})\n\t\trequire.NoError(t, err)\n\t\tsrv.App.Change(routeChange)\n\n\t\t// Step 3: Observer should see the route in AllowedIPs.\n\t\tc2.WaitForCondition(t, \"approved route in AllowedIPs\",\n\t\t\t15*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\tfor _, p := range nm.Peers {\n\t\t\t\t\thi := p.Hostinfo()\n\t\t\t\t\tif hi.Valid() && hi.Hostname() == \"fullrt-advertiser\" {\n\t\t\t\t\t\tfor i := range p.AllowedIPs().Len() {\n\t\t\t\t\t\t\tif p.AllowedIPs().At(i) == route {\n\t\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn false\n\t\t\t})\n\t})\n\n\tt.Run(\"allowed_ips_superset_of_addresses\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 3)\n\n\t\tfor _, c := range h.Clients() {\n\t\t\tnm := c.Netmap()\n\t\t\trequire.NotNil(t, nm)\n\n\t\t\tfor _, peer := range nm.Peers {\n\t\t\t\tallowedSet := make(map[netip.Prefix]bool)\n\t\t\t\tfor i := range peer.AllowedIPs().Len() {\n\t\t\t\t\tallowedSet[peer.AllowedIPs().At(i)] = true\n\t\t\t\t}\n\n\t\t\t\tfor i := range peer.Addresses().Len() {\n\t\t\t\t\taddr := peer.Addresses().At(i)\n\t\t\t\t\tassert.True(t, allowedSet[addr],\n\t\t\t\t\t\t\"client %s: peer %d Address %v should be in AllowedIPs\",\n\t\t\t\t\t\tc.Name, peer.ID(), addr)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\n\tt.Run(\"addresses_are_in_cgnat_range\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 2)\n\n\t\tcgnat := netip.MustParsePrefix(\"100.64.0.0/10\")\n\t\tula := netip.MustParsePrefix(\"fd7a:115c:a1e0::/48\")\n\n\t\tfor _, c := range h.Clients() {\n\t\t\tnm := c.Netmap()\n\t\t\trequire.NotNil(t, nm)\n\t\t\trequire.True(t, nm.SelfNode.Valid())\n\n\t\t\tfor i := range nm.SelfNode.Addresses().Len() {\n\t\t\t\taddr := nm.SelfNode.Addresses().At(i)\n\t\t\t\tinCGNAT := cgnat.Contains(addr.Addr())\n\t\t\t\tinULA := ula.Contains(addr.Addr())\n\t\t\t\tassert.True(t, inCGNAT || inULA,\n\t\t\t\t\t\"client %s: address %v should be in CGNAT or ULA range\",\n\t\t\t\t\tc.Name, addr)\n\t\t\t}\n\t\t}\n\t})\n}\n\n// findNodeID is defined in issues_test.go.\n"
  },
  {
    "path": "hscontrol/servertest/server.go",
    "content": "// Package servertest provides an in-process test harness for Headscale's\n// control plane. It wires a real Headscale server to real Tailscale\n// controlclient.Direct instances, enabling fast, deterministic tests\n// of the full control protocol without Docker or separate processes.\npackage servertest\n\nimport (\n\t\"net/http/httptest\"\n\t\"net/netip\"\n\t\"testing\"\n\t\"time\"\n\n\thscontrol \"github.com/juanfont/headscale/hscontrol\"\n\t\"github.com/juanfont/headscale/hscontrol/state\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"tailscale.com/tailcfg\"\n)\n\n// TestServer is an in-process Headscale control server suitable for\n// use with Tailscale's controlclient.Direct.\ntype TestServer struct {\n\tApp        *hscontrol.Headscale\n\tHTTPServer *httptest.Server\n\tURL        string\n\tst         *state.State\n}\n\n// ServerOption configures a TestServer.\ntype ServerOption func(*serverConfig)\n\ntype serverConfig struct {\n\tbatchDelay       time.Duration\n\tbufferedChanSize int\n\tephemeralTimeout time.Duration\n\tbatcherWorkers   int\n}\n\nfunc defaultServerConfig() *serverConfig {\n\treturn &serverConfig{\n\t\tbatchDelay:       50 * time.Millisecond,\n\t\tbufferedChanSize: 30,\n\t\tbatcherWorkers:   1,\n\t\tephemeralTimeout: 30 * time.Second,\n\t}\n}\n\n// WithBatchDelay sets the batcher's change coalescing delay.\nfunc WithBatchDelay(d time.Duration) ServerOption {\n\treturn func(c *serverConfig) { c.batchDelay = d }\n}\n\n// WithBufferedChanSize sets the per-node map session channel buffer.\nfunc WithBufferedChanSize(n int) ServerOption {\n\treturn func(c *serverConfig) { c.bufferedChanSize = n }\n}\n\n// WithEphemeralTimeout sets the ephemeral node inactivity timeout.\nfunc WithEphemeralTimeout(d time.Duration) ServerOption {\n\treturn func(c *serverConfig) { c.ephemeralTimeout = d }\n}\n\n// NewServer creates and starts a Headscale test server.\n// The server is fully functional and accepts real Tailscale control\n// protocol connections over Noise.\nfunc NewServer(tb testing.TB, opts ...ServerOption) *TestServer {\n\ttb.Helper()\n\n\tsc := defaultServerConfig()\n\tfor _, o := range opts {\n\t\to(sc)\n\t}\n\n\ttmpDir := tb.TempDir()\n\n\tprefixV4 := netip.MustParsePrefix(\"100.64.0.0/10\")\n\tprefixV6 := netip.MustParsePrefix(\"fd7a:115c:a1e0::/48\")\n\n\tcfg := types.Config{\n\t\t// Placeholder; updated below once httptest server starts.\n\t\tServerURL:                      \"http://localhost:0\",\n\t\tNoisePrivateKeyPath:            tmpDir + \"/noise_private.key\",\n\t\tEphemeralNodeInactivityTimeout: sc.ephemeralTimeout,\n\t\tPrefixV4:                       &prefixV4,\n\t\tPrefixV6:                       &prefixV6,\n\t\tIPAllocation:                   types.IPAllocationStrategySequential,\n\t\tDatabase: types.DatabaseConfig{\n\t\t\tType: \"sqlite3\",\n\t\t\tSqlite: types.SqliteConfig{\n\t\t\t\tPath: tmpDir + \"/headscale_test.db\",\n\t\t\t},\n\t\t},\n\t\tPolicy: types.PolicyConfig{\n\t\t\tMode: types.PolicyModeDB,\n\t\t},\n\t\tTuning: types.Tuning{\n\t\t\tBatchChangeDelay:               sc.batchDelay,\n\t\t\tBatcherWorkers:                 sc.batcherWorkers,\n\t\t\tNodeMapSessionBufferedChanSize: sc.bufferedChanSize,\n\t\t},\n\t}\n\n\tapp, err := hscontrol.NewHeadscale(&cfg)\n\tif err != nil {\n\t\ttb.Fatalf(\"servertest: NewHeadscale: %v\", err)\n\t}\n\n\t// Set a minimal DERP map so MapResponse generation works.\n\tapp.GetState().SetDERPMap(&tailcfg.DERPMap{\n\t\tRegions: map[int]*tailcfg.DERPRegion{\n\t\t\t900: {\n\t\t\t\tRegionID:   900,\n\t\t\t\tRegionCode: \"test\",\n\t\t\t\tRegionName: \"Test Region\",\n\t\t\t\tNodes: []*tailcfg.DERPNode{{\n\t\t\t\t\tName:     \"test0\",\n\t\t\t\t\tRegionID: 900,\n\t\t\t\t\tHostName: \"127.0.0.1\",\n\t\t\t\t\tIPv4:     \"127.0.0.1\",\n\t\t\t\t\tDERPPort: -1, // not a real DERP, just needed for MapResponse\n\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t})\n\n\t// Start subsystems.\n\tapp.StartBatcherForTest(tb)\n\tapp.StartEphemeralGCForTest(tb)\n\n\t// Start the HTTP server with Headscale's full handler (including\n\t// /key and /ts2021 Noise upgrade).\n\tts := httptest.NewServer(app.HTTPHandler())\n\ttb.Cleanup(ts.Close)\n\n\t// Now update the config to point at the real URL so that\n\t// MapResponse.ControlURL etc. are correct.\n\tapp.SetServerURLForTest(tb, ts.URL)\n\n\treturn &TestServer{\n\t\tApp:        app,\n\t\tHTTPServer: ts,\n\t\tURL:        ts.URL,\n\t\tst:         app.GetState(),\n\t}\n}\n\n// State returns the server's state manager for creating users,\n// nodes, and pre-auth keys.\nfunc (s *TestServer) State() *state.State {\n\treturn s.st\n}\n\n// CreateUser creates a test user and returns it.\nfunc (s *TestServer) CreateUser(tb testing.TB, name string) *types.User {\n\ttb.Helper()\n\n\tu, _, err := s.st.CreateUser(types.User{Name: name})\n\tif err != nil {\n\t\ttb.Fatalf(\"servertest: CreateUser(%q): %v\", name, err)\n\t}\n\n\treturn u\n}\n\n// CreatePreAuthKey creates a reusable pre-auth key for the given user.\nfunc (s *TestServer) CreatePreAuthKey(tb testing.TB, userID types.UserID) string {\n\ttb.Helper()\n\n\tuid := userID\n\n\tpak, err := s.st.CreatePreAuthKey(&uid, true, false, nil, nil)\n\tif err != nil {\n\t\ttb.Fatalf(\"servertest: CreatePreAuthKey: %v\", err)\n\t}\n\n\treturn pak.Key\n}\n\n// CreateEphemeralPreAuthKey creates an ephemeral pre-auth key.\nfunc (s *TestServer) CreateEphemeralPreAuthKey(tb testing.TB, userID types.UserID) string {\n\ttb.Helper()\n\n\tuid := userID\n\n\tpak, err := s.st.CreatePreAuthKey(&uid, false, true, nil, nil)\n\tif err != nil {\n\t\ttb.Fatalf(\"servertest: CreateEphemeralPreAuthKey: %v\", err)\n\t}\n\n\treturn pak.Key\n}\n"
  },
  {
    "path": "hscontrol/servertest/stress_test.go",
    "content": "package servertest_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/netip\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/servertest\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/netmap\"\n)\n\n// TestStress hammers the control plane with concurrent operations,\n// rapid mutations, and edge cases to surface race conditions and\n// consistency bugs.\n\n// TestStressConnectDisconnect exercises rapid connect/disconnect\n// patterns that stress the grace period, batcher, and NodeStore.\nfunc TestStressConnectDisconnect(t *testing.T) {\n\tt.Parallel()\n\n\t// A node that disconnects and reconnects faster than the\n\t// grace period should never cause a second node to see\n\t// the first node as offline.\n\tt.Run(\"rapid_reconnect_peer_never_sees_offline\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 2)\n\n\t\t// Wait for both to be online.\n\t\th.Client(0).WaitForCondition(t, \"peer online\", 15*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\tfor _, p := range nm.Peers {\n\t\t\t\t\tisOnline, known := p.Online().GetOk()\n\t\t\t\t\tif known && isOnline {\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn false\n\t\t\t})\n\n\t\t// Do 10 rapid reconnects and check that client 0 never\n\t\t// sees client 1 as offline during the process.\n\t\tsawOffline := false\n\n\t\tvar offlineMu sync.Mutex\n\n\t\t// Monitor client 0's view of client 1 in the background.\n\t\tstopMonitor := make(chan struct{})\n\t\tmonitorDone := make(chan struct{})\n\n\t\tgo func() {\n\t\t\tdefer close(monitorDone)\n\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-stopMonitor:\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t}\n\n\t\t\t\tnm := h.Client(0).Netmap()\n\t\t\t\tif nm == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tfor _, p := range nm.Peers {\n\t\t\t\t\tisOnline, known := p.Online().GetOk()\n\t\t\t\t\tif known && !isOnline {\n\t\t\t\t\t\tofflineMu.Lock()\n\t\t\t\t\t\tsawOffline = true\n\t\t\t\t\t\tofflineMu.Unlock()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\n\t\tfor range 10 {\n\t\t\th.Client(1).Disconnect(t)\n\t\t\th.Client(1).Reconnect(t)\n\t\t}\n\n\t\t// Give the monitor a moment to catch up, then stop it.\n\t\th.Client(0).WaitForPeers(t, 1, 10*time.Second)\n\t\tclose(stopMonitor)\n\t\t<-monitorDone\n\n\t\tofflineMu.Lock()\n\t\tdefer offlineMu.Unlock()\n\n\t\tassert.False(t, sawOffline,\n\t\t\t\"peer should never appear offline during rapid reconnect cycles\")\n\t})\n\n\t// Delete a node while it has an active poll session. The poll\n\t// session should terminate cleanly and other peers should see\n\t// the node disappear.\n\tt.Run(\"delete_node_during_active_poll\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"delpoll-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"delpoll-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tc2 := servertest.NewClient(t, srv, \"delpoll-node2\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\t\tc2.WaitForPeers(t, 1, 10*time.Second)\n\n\t\t// Delete c1 while it's actively polling.\n\t\tnodeID := findNodeID(t, srv, \"delpoll-node1\")\n\t\tnv, ok := srv.State().GetNodeByID(nodeID)\n\t\trequire.True(t, ok)\n\n\t\tdeleteChange, err := srv.State().DeleteNode(nv)\n\t\trequire.NoError(t, err)\n\t\tsrv.App.Change(deleteChange)\n\n\t\t// c2 should see c1 disappear.\n\t\tc2.WaitForCondition(t, \"deleted node gone\", 10*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\tfor _, p := range nm.Peers {\n\t\t\t\t\thi := p.Hostinfo()\n\t\t\t\t\tif hi.Valid() && hi.Hostname() == \"delpoll-node1\" {\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn true\n\t\t\t})\n\n\t\tassert.Empty(t, c2.Peers(),\n\t\t\t\"c2 should have no peers after c1 is deleted\")\n\t})\n\n\t// Connect many nodes, then disconnect half simultaneously.\n\t// The remaining half should converge to see only each other.\n\tt.Run(\"disconnect_half_remaining_converge\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"halfdisc-user\")\n\n\t\tconst total = 6\n\n\t\tclients := make([]*servertest.TestClient, total)\n\t\tfor i := range total {\n\t\t\tclients[i] = servertest.NewClient(t, srv,\n\t\t\t\tfmt.Sprintf(\"halfdisc-%d\", i),\n\t\t\t\tservertest.WithUser(user))\n\t\t}\n\n\t\t// Wait for full mesh.\n\t\tfor _, c := range clients {\n\t\t\tc.WaitForPeers(t, total-1, 30*time.Second)\n\t\t}\n\n\t\t// Disconnect the first half.\n\t\tfor i := range total / 2 {\n\t\t\tclients[i].Disconnect(t)\n\t\t}\n\n\t\t// The remaining half should eventually converge.\n\t\tremaining := clients[total/2:]\n\n\t\tfor _, c := range remaining {\n\t\t\tc.WaitForCondition(t, \"remaining converge\",\n\t\t\t\t30*time.Second,\n\t\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\t\t// Should see at least the other remaining peers.\n\t\t\t\t\tonlinePeers := 0\n\n\t\t\t\t\tfor _, p := range nm.Peers {\n\t\t\t\t\t\tisOnline, known := p.Online().GetOk()\n\t\t\t\t\t\tif known && isOnline {\n\t\t\t\t\t\t\tonlinePeers++\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t// Remaining peers minus self = total/2 - 1\n\t\t\t\t\treturn onlinePeers >= len(remaining)-1\n\t\t\t\t})\n\t\t}\n\t})\n}\n\n// TestStressStateMutations tests rapid server-side state changes.\nfunc TestStressStateMutations(t *testing.T) {\n\tt.Parallel()\n\n\t// Rapidly approve and remove routes. The final state should\n\t// be consistent.\n\tt.Run(\"rapid_route_changes_final_state_correct\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"rapidrt-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"rapidrt-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tc2 := servertest.NewClient(t, srv, \"rapidrt-node2\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\n\t\tnodeID := findNodeID(t, srv, \"rapidrt-node1\")\n\n\t\t// Rapidly change routes 10 times.\n\t\tfor i := range 10 {\n\t\t\troute := netip.MustParsePrefix(\n\t\t\t\tfmt.Sprintf(\"10.%d.0.0/24\", i))\n\n\t\t\t_, routeChange, err := srv.State().SetApprovedRoutes(\n\t\t\t\tnodeID, []netip.Prefix{route})\n\t\t\trequire.NoError(t, err)\n\t\t\tsrv.App.Change(routeChange)\n\t\t}\n\n\t\t// Final route should be 10.9.0.0/24.\n\t\t// Verify server state is correct.\n\t\tnv, ok := srv.State().GetNodeByID(nodeID)\n\t\trequire.True(t, ok)\n\n\t\tfinalRoutes := nv.ApprovedRoutes().AsSlice()\n\t\texpected := netip.MustParsePrefix(\"10.9.0.0/24\")\n\t\tassert.Contains(t, finalRoutes, expected,\n\t\t\t\"final approved routes should contain the last route set\")\n\t\tassert.Len(t, finalRoutes, 1,\n\t\t\t\"should have exactly 1 approved route (the last one set)\")\n\n\t\t// c2 should eventually see the update.\n\t\tc2.WaitForCondition(t, \"final route update received\",\n\t\t\t10*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\treturn c2.UpdateCount() > 2\n\t\t\t})\n\t})\n\n\t// Rename a node multiple times rapidly. The final name should\n\t// be correct in the server state and visible to peers.\n\tt.Run(\"rapid_rename_final_state_correct\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"rapidname-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"rapidname-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tc2 := servertest.NewClient(t, srv, \"rapidname-node2\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\n\t\tnodeID := findNodeID(t, srv, \"rapidname-node1\")\n\n\t\t// Rename 5 times rapidly.\n\t\tvar finalName string\n\t\tfor i := range 5 {\n\t\t\tfinalName = fmt.Sprintf(\"renamed-%d\", i)\n\n\t\t\t_, renameChange, err := srv.State().RenameNode(nodeID, finalName)\n\t\t\trequire.NoError(t, err)\n\t\t\tsrv.App.Change(renameChange)\n\t\t}\n\n\t\t// Server state should have the final name.\n\t\tnv, ok := srv.State().GetNodeByID(nodeID)\n\t\trequire.True(t, ok)\n\t\tassert.Equal(t, finalName, nv.AsStruct().GivenName,\n\t\t\t\"server should have the final renamed value\")\n\n\t\t// c2 should see the final name.\n\t\tc2.WaitForCondition(t, \"final name visible\", 10*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\tfor _, p := range nm.Peers {\n\t\t\t\t\tif p.Name() == finalName {\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn false\n\t\t\t})\n\t})\n\n\t// Multiple policy changes in rapid succession. The final\n\t// policy should be applied correctly.\n\tt.Run(\"rapid_policy_changes\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"rapidpol-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"rapidpol-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tc1.WaitForUpdate(t, 10*time.Second)\n\n\t\tcountBefore := c1.UpdateCount()\n\n\t\t// Change policy 5 times rapidly.\n\t\tfor range 5 {\n\t\t\tchanged, err := srv.State().SetPolicy([]byte(`{\n\t\t\t\t\"acls\": [\n\t\t\t\t\t{\"action\": \"accept\", \"src\": [\"*\"], \"dst\": [\"*:*\"]}\n\t\t\t\t]\n\t\t\t}`))\n\t\t\trequire.NoError(t, err)\n\n\t\t\tif changed {\n\t\t\t\tchanges, err := srv.State().ReloadPolicy()\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tsrv.App.Change(changes...)\n\t\t\t}\n\t\t}\n\n\t\t// Client should have received at least some updates.\n\t\tc1.WaitForCondition(t, \"updates after policy changes\",\n\t\t\t10*time.Second,\n\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\treturn c1.UpdateCount() > countBefore\n\t\t\t})\n\t})\n}\n\n// TestStressDataIntegrity verifies data correctness under various conditions.\nfunc TestStressDataIntegrity(t *testing.T) {\n\tt.Parallel()\n\n\t// Every node's self-addresses should match what peers see\n\t// as that node's Addresses.\n\tt.Run(\"self_addresses_match_peer_view\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"addrmatch-user\")\n\n\t\tconst n = 5\n\n\t\tclients := make([]*servertest.TestClient, n)\n\t\tfor i := range n {\n\t\t\tclients[i] = servertest.NewClient(t, srv,\n\t\t\t\tfmt.Sprintf(\"addrmatch-%d\", i),\n\t\t\t\tservertest.WithUser(user))\n\t\t}\n\n\t\tfor _, c := range clients {\n\t\t\tc.WaitForPeers(t, n-1, 20*time.Second)\n\t\t}\n\n\t\t// Build a map of hostname -> self-addresses.\n\t\tselfAddrs := make(map[string][]netip.Prefix)\n\n\t\tfor _, c := range clients {\n\t\t\tnm := c.Netmap()\n\t\t\trequire.NotNil(t, nm)\n\t\t\trequire.True(t, nm.SelfNode.Valid())\n\n\t\t\taddrs := make([]netip.Prefix, 0, nm.SelfNode.Addresses().Len())\n\t\t\tfor i := range nm.SelfNode.Addresses().Len() {\n\t\t\t\taddrs = append(addrs, nm.SelfNode.Addresses().At(i))\n\t\t\t}\n\n\t\t\tselfAddrs[c.Name] = addrs\n\t\t}\n\n\t\t// Now verify each client's peers have the same addresses\n\t\t// as those peers' self-view.\n\t\tfor _, c := range clients {\n\t\t\tnm := c.Netmap()\n\t\t\trequire.NotNil(t, nm)\n\n\t\t\tfor _, peer := range nm.Peers {\n\t\t\t\thi := peer.Hostinfo()\n\t\t\t\tif !hi.Valid() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tpeerName := hi.Hostname()\n\n\t\t\t\texpected, ok := selfAddrs[peerName]\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tpeerAddrs := make([]netip.Prefix, 0, peer.Addresses().Len())\n\t\t\t\tfor i := range peer.Addresses().Len() {\n\t\t\t\t\tpeerAddrs = append(peerAddrs, peer.Addresses().At(i))\n\t\t\t\t}\n\n\t\t\t\tassert.Equal(t, expected, peerAddrs,\n\t\t\t\t\t\"client %s: peer %s addresses should match that peer's self-view\",\n\t\t\t\t\tc.Name, peerName)\n\t\t\t}\n\t\t}\n\t})\n\n\t// After mesh formation, no peer should have Expired=true.\n\tt.Run(\"no_peers_expired_after_mesh_formation\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 3)\n\n\t\tfor _, c := range h.Clients() {\n\t\t\tnm := c.Netmap()\n\t\t\trequire.NotNil(t, nm)\n\n\t\t\tassert.False(t, nm.SelfNode.Expired(),\n\t\t\t\t\"client %s: self should not be expired\", c.Name)\n\n\t\t\tfor _, peer := range nm.Peers {\n\t\t\t\tassert.False(t, peer.Expired(),\n\t\t\t\t\t\"client %s: peer %d should not be expired\",\n\t\t\t\t\tc.Name, peer.ID())\n\t\t\t}\n\t\t}\n\t})\n\n\t// Self node should always be machine-authorized.\n\tt.Run(\"self_always_machine_authorized\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 2)\n\n\t\tfor _, c := range h.Clients() {\n\t\t\tnm := c.Netmap()\n\t\t\trequire.NotNil(t, nm)\n\t\t\tassert.True(t, nm.SelfNode.MachineAuthorized(),\n\t\t\t\t\"client %s: self should be machine-authorized\", c.Name)\n\t\t}\n\n\t\t// After reconnect, should still be authorized.\n\t\th.Client(0).Disconnect(t)\n\t\th.Client(0).Reconnect(t)\n\t\th.Client(0).WaitForPeers(t, 1, 10*time.Second)\n\n\t\tnm := h.Client(0).Netmap()\n\t\trequire.NotNil(t, nm)\n\t\tassert.True(t, nm.SelfNode.MachineAuthorized(),\n\t\t\t\"after reconnect: self should be machine-authorized\")\n\t})\n\n\t// Node IDs in the server state should match what clients see.\n\tt.Run(\"node_ids_consistent_between_server_and_client\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"idcheck-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"idcheck-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tc2 := servertest.NewClient(t, srv, \"idcheck-node2\",\n\t\t\tservertest.WithUser(user))\n\n\t\tc1.WaitForPeers(t, 1, 10*time.Second)\n\t\tc2.WaitForPeers(t, 1, 10*time.Second)\n\n\t\t// Get server-side node IDs.\n\t\tserverID1 := findNodeID(t, srv, \"idcheck-node1\")\n\t\tserverID2 := findNodeID(t, srv, \"idcheck-node2\")\n\n\t\t// Get client-side node IDs.\n\t\tnm1 := c1.Netmap()\n\t\tnm2 := c2.Netmap()\n\n\t\trequire.NotNil(t, nm1)\n\t\trequire.NotNil(t, nm2)\n\n\t\tclientID1 := nm1.SelfNode.ID()\n\t\tclientID2 := nm2.SelfNode.ID()\n\n\t\t//nolint:gosec // G115: test-only, IDs won't overflow\n\t\tassert.Equal(t, int64(serverID1), int64(clientID1),\n\t\t\t\"node 1: server ID should match client self ID\")\n\t\t//nolint:gosec // G115: test-only, IDs won't overflow\n\t\tassert.Equal(t, int64(serverID2), int64(clientID2),\n\t\t\t\"node 2: server ID should match client self ID\")\n\n\t\t// c1's view of c2's ID should also match.\n\t\trequire.Len(t, nm1.Peers, 1)\n\t\t//nolint:gosec // G115: test-only, IDs won't overflow\n\t\tassert.Equal(t, int64(serverID2), int64(nm1.Peers[0].ID()),\n\t\t\t\"c1's view of c2's ID should match server\")\n\t})\n\n\t// After hostinfo update, ALL peers should see the updated\n\t// hostinfo, not just some.\n\tt.Run(\"hostinfo_update_reaches_all_peers\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"hiall-user\")\n\n\t\tconst n = 5\n\n\t\tclients := make([]*servertest.TestClient, n)\n\t\tfor i := range n {\n\t\t\tclients[i] = servertest.NewClient(t, srv,\n\t\t\t\tfmt.Sprintf(\"hiall-%d\", i),\n\t\t\t\tservertest.WithUser(user))\n\t\t}\n\n\t\tfor _, c := range clients {\n\t\t\tc.WaitForPeers(t, n-1, 20*time.Second)\n\t\t}\n\n\t\t// Client 0 updates its OS.\n\t\tclients[0].Direct().SetHostinfo(&tailcfg.Hostinfo{\n\t\t\tBackendLogID: \"servertest-hiall-0\",\n\t\t\tHostname:     \"hiall-0\",\n\t\t\tOS:           \"StressTestOS\",\n\t\t})\n\n\t\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\t\tdefer cancel()\n\n\t\t_ = clients[0].Direct().SendUpdate(ctx)\n\n\t\t// ALL other clients should see the updated OS.\n\t\tfor i := 1; i < n; i++ {\n\t\t\tclients[i].WaitForCondition(t,\n\t\t\t\tfmt.Sprintf(\"client %d sees OS update\", i),\n\t\t\t\t15*time.Second,\n\t\t\t\tfunc(nm *netmap.NetworkMap) bool {\n\t\t\t\t\tfor _, p := range nm.Peers {\n\t\t\t\t\t\thi := p.Hostinfo()\n\t\t\t\t\t\tif hi.Valid() && hi.Hostname() == \"hiall-0\" {\n\t\t\t\t\t\t\treturn hi.OS() == \"StressTestOS\"\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\treturn false\n\t\t\t\t})\n\t\t}\n\t})\n\n\t// MachineKey should be consistent: the server should track\n\t// the same machine key the client registered with.\n\tt.Run(\"machine_key_consistent\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"mkey-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"mkey-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tc1.WaitForUpdate(t, 10*time.Second)\n\n\t\tnm := c1.Netmap()\n\t\trequire.NotNil(t, nm)\n\n\t\t// The client's MachineKey in the netmap should be non-zero.\n\t\tassert.False(t, nm.MachineKey.IsZero(),\n\t\t\t\"client's MachineKey should be non-zero\")\n\n\t\t// Server should have the same key.\n\t\tnodeID := findNodeID(t, srv, \"mkey-node1\")\n\t\tnv, ok := srv.State().GetNodeByID(nodeID)\n\t\trequire.True(t, ok)\n\n\t\tassert.Equal(t, nm.MachineKey.String(), nv.MachineKey().String(),\n\t\t\t\"client and server should agree on MachineKey\")\n\t})\n\n\t// NodeKey should be consistent between client and server.\n\tt.Run(\"node_key_consistent\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"nkey-user\")\n\n\t\tc1 := servertest.NewClient(t, srv, \"nkey-node1\",\n\t\t\tservertest.WithUser(user))\n\t\tc1.WaitForUpdate(t, 10*time.Second)\n\n\t\tnm := c1.Netmap()\n\t\trequire.NotNil(t, nm)\n\n\t\tassert.False(t, nm.NodeKey.IsZero(),\n\t\t\t\"client's NodeKey should be non-zero\")\n\n\t\tnodeID := findNodeID(t, srv, \"nkey-node1\")\n\t\tnv, ok := srv.State().GetNodeByID(nodeID)\n\t\trequire.True(t, ok)\n\n\t\tassert.Equal(t, nm.NodeKey.String(), nv.NodeKey().String(),\n\t\t\t\"client and server should agree on NodeKey\")\n\t})\n}\n\n// TestStressChurn tests behavior under sustained connect/disconnect churn.\nfunc TestStressChurn(t *testing.T) {\n\tt.Parallel()\n\n\t// Connect 10 nodes, then replace them all one by one.\n\t// Each replacement connects a new node and disconnects the old.\n\t// The remaining nodes should always see a consistent mesh.\n\tt.Run(\"rolling_replacement\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"rolling-user\")\n\n\t\tconst n = 5\n\n\t\tclients := make([]*servertest.TestClient, n)\n\t\tfor i := range n {\n\t\t\tclients[i] = servertest.NewClient(t, srv,\n\t\t\t\tfmt.Sprintf(\"rolling-%d\", i),\n\t\t\t\tservertest.WithUser(user))\n\t\t}\n\n\t\tfor _, c := range clients {\n\t\t\tc.WaitForPeers(t, n-1, 20*time.Second)\n\t\t}\n\n\t\t// Replace each node one at a time.\n\t\tfor i := range n {\n\t\t\tclients[i].Disconnect(t)\n\t\t\tclients[i] = servertest.NewClient(t, srv,\n\t\t\t\tfmt.Sprintf(\"rolling-new-%d\", i),\n\t\t\t\tservertest.WithUser(user))\n\t\t}\n\n\t\t// Wait for the new set to converge.\n\t\tfor _, c := range clients {\n\t\t\tc.WaitForPeers(t, n-1, 30*time.Second)\n\t\t}\n\n\t\tservertest.AssertSymmetricVisibility(t, clients)\n\t})\n\n\t// Add nodes one at a time and verify the mesh grows correctly\n\t// at each step.\n\tt.Run(\"incremental_mesh_growth\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"incr-user\")\n\n\t\tclients := make([]*servertest.TestClient, 0, 8)\n\n\t\tfor i := range 8 {\n\t\t\tc := servertest.NewClient(t, srv,\n\t\t\t\tfmt.Sprintf(\"incr-%d\", i),\n\t\t\t\tservertest.WithUser(user))\n\t\t\tclients = append(clients, c)\n\n\t\t\t// After each addition, verify all existing clients see\n\t\t\t// the correct number of peers.\n\t\t\texpectedPeers := i // i-th node means i peers for existing nodes\n\t\t\tfor _, existing := range clients {\n\t\t\t\texisting.WaitForPeers(t, expectedPeers, 15*time.Second)\n\t\t\t}\n\t\t}\n\n\t\t// Final check.\n\t\tservertest.AssertMeshComplete(t, clients)\n\t})\n\n\t// Connect/disconnect the same node many times. The server\n\t// should handle this without leaking state.\n\tt.Run(\"repeated_connect_disconnect_same_node\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsrv := servertest.NewServer(t)\n\t\tuser := srv.CreateUser(t, \"repeat-user\")\n\n\t\tobserver := servertest.NewClient(t, srv, \"repeat-observer\",\n\t\t\tservertest.WithUser(user))\n\t\tflapper := servertest.NewClient(t, srv, \"repeat-flapper\",\n\t\t\tservertest.WithUser(user))\n\n\t\tobserver.WaitForPeers(t, 1, 10*time.Second)\n\n\t\tfor i := range 10 {\n\t\t\tflapper.Disconnect(t)\n\t\t\tflapper.Reconnect(t)\n\t\t\tflapper.WaitForPeers(t, 1, 10*time.Second)\n\n\t\t\tif i%3 == 0 {\n\t\t\t\tt.Logf(\"cycle %d: flapper sees %d peers, observer sees %d peers\",\n\t\t\t\t\ti, len(flapper.Peers()), len(observer.Peers()))\n\t\t\t}\n\t\t}\n\n\t\t// After all cycles, mesh should be healthy.\n\t\tobserver.WaitForPeers(t, 1, 10*time.Second)\n\n\t\t_, found := observer.PeerByName(\"repeat-flapper\")\n\t\tassert.True(t, found,\n\t\t\t\"observer should still see flapper after 10 reconnect cycles\")\n\t})\n\n\t// All nodes disconnect and reconnect simultaneously.\n\tt.Run(\"mass_reconnect\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tsizes := []int{4, 6}\n\t\tfor _, n := range sizes {\n\t\t\tt.Run(fmt.Sprintf(\"%d_nodes\", n), func(t *testing.T) {\n\t\t\t\tt.Parallel()\n\n\t\t\t\tsrv := servertest.NewServer(t)\n\t\t\t\tuser := srv.CreateUser(t, \"massrecon-user\")\n\n\t\t\t\tclients := make([]*servertest.TestClient, n)\n\t\t\t\tfor i := range n {\n\t\t\t\t\tclients[i] = servertest.NewClient(t, srv,\n\t\t\t\t\t\tfmt.Sprintf(\"massrecon-%d\", i),\n\t\t\t\t\t\tservertest.WithUser(user))\n\t\t\t\t}\n\n\t\t\t\tfor _, c := range clients {\n\t\t\t\t\tc.WaitForPeers(t, n-1, 20*time.Second)\n\t\t\t\t}\n\n\t\t\t\t// All disconnect.\n\t\t\t\tfor _, c := range clients {\n\t\t\t\t\tc.Disconnect(t)\n\t\t\t\t}\n\n\t\t\t\t// All reconnect.\n\t\t\t\tfor _, c := range clients {\n\t\t\t\t\tc.Reconnect(t)\n\t\t\t\t}\n\n\t\t\t\t// Should re-form mesh.\n\t\t\t\tfor _, c := range clients {\n\t\t\t\t\tc.WaitForPeers(t, n-1, 30*time.Second)\n\t\t\t\t}\n\n\t\t\t\tservertest.AssertMeshComplete(t, clients)\n\t\t\t\tservertest.AssertConsistentState(t, clients)\n\t\t\t})\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "hscontrol/servertest/weather_test.go",
    "content": "package servertest_test\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/servertest\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\n// TestNetworkWeather exercises scenarios that simulate unstable\n// network conditions: rapid reconnects, disconnect/reconnect\n// timing, and connection flapping.\nfunc TestNetworkWeather(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"rapid_reconnect_stays_online\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\th := servertest.NewHarness(t, 2)\n\n\t\tfor range 10 {\n\t\t\th.Client(0).Disconnect(t)\n\t\t\th.Client(0).Reconnect(t)\n\t\t}\n\n\t\t// After rapid flapping, mesh should still be complete.\n\t\th.WaitForMeshComplete(t, 15*time.Second)\n\t\tservertest.AssertMeshComplete(t, h.Clients())\n\t})\n\n\tt.Run(\"reconnect_within_grace_period\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 2)\n\n\t\th.Client(0).Disconnect(t)\n\n\t\t// Reconnect quickly (well within the 10-second grace period).\n\t\th.Client(0).ReconnectAfter(t, 1*time.Second)\n\t\th.WaitForMeshComplete(t, 15*time.Second)\n\n\t\t// Peer should see us as online after reconnection.\n\t\tservertest.AssertPeerOnline(t, h.Client(1), h.Client(0).Name)\n\t})\n\n\tt.Run(\"disconnect_types\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tcases := []struct {\n\t\t\tname       string\n\t\t\tdisconnect func(c *servertest.TestClient, tb testing.TB)\n\t\t}{\n\t\t\t{\"clean_disconnect\", (*servertest.TestClient).Disconnect},\n\t\t}\n\t\tfor _, tc := range cases {\n\t\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\t\tt.Parallel()\n\t\t\t\th := servertest.NewHarness(t, 2)\n\n\t\t\t\ttc.disconnect(h.Client(1), t)\n\n\t\t\t\t// The remaining client should eventually see peer gone/offline.\n\t\t\t\tassert.Eventually(t, func() bool {\n\t\t\t\t\t_, found := h.Client(0).PeerByName(h.Client(1).Name)\n\t\t\t\t\tif found {\n\t\t\t\t\t\t// If still in peer list, check if it's marked offline.\n\t\t\t\t\t\tisOnline, known := func() (bool, bool) {\n\t\t\t\t\t\t\tpeer, ok := h.Client(0).PeerByName(h.Client(1).Name)\n\t\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\t\treturn false, false\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\treturn peer.Online().GetOk()\n\t\t\t\t\t\t}()\n\t\t\t\t\t\t// Either unknown or offline is acceptable.\n\t\t\t\t\t\treturn known && !isOnline\n\t\t\t\t\t}\n\n\t\t\t\t\treturn true // peer gone\n\t\t\t\t}, 30*time.Second, 500*time.Millisecond,\n\t\t\t\t\t\"peer should become offline or disappear\")\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"state_consistent_through_reconnection\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 3)\n\n\t\t// Disconnect and reconnect the middle node.\n\t\th.Client(1).Disconnect(t)\n\t\th.Client(1).Reconnect(t)\n\n\t\t// Wait for convergence and verify consistency.\n\t\th.WaitForMeshComplete(t, 15*time.Second)\n\t\tservertest.AssertConsistentState(t, h.Clients())\n\t})\n\n\tt.Run(\"multiple_reconnect_delays\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tdelays := []struct {\n\t\t\tname  string\n\t\t\tdelay time.Duration\n\t\t}{\n\t\t\t{\"immediate\", 0},\n\t\t\t{\"100ms\", 100 * time.Millisecond},\n\t\t\t{\"500ms\", 500 * time.Millisecond},\n\t\t\t{\"1s\", 1 * time.Second},\n\t\t}\n\t\tfor _, tc := range delays {\n\t\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\t\tt.Parallel()\n\t\t\t\th := servertest.NewHarness(t, 2)\n\n\t\t\t\tif tc.delay > 0 {\n\t\t\t\t\th.Client(0).ReconnectAfter(t, tc.delay)\n\t\t\t\t} else {\n\t\t\t\t\th.Client(0).Disconnect(t)\n\t\t\t\t\th.Client(0).Reconnect(t)\n\t\t\t\t}\n\n\t\t\t\th.WaitForMeshComplete(t, 15*time.Second)\n\t\t\t\tservertest.AssertMeshComplete(t, h.Clients())\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"flapping_does_not_leak_goroutines\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 2)\n\n\t\t// Do many rapid disconnect/reconnect cycles.\n\t\tfor i := range 20 {\n\t\t\th.Client(0).Disconnect(t)\n\t\t\th.Client(0).Reconnect(t)\n\n\t\t\tif i%5 == 0 {\n\t\t\t\tt.Logf(\"flap cycle %d: %s has %d peers\",\n\t\t\t\t\ti, h.Client(0).Name, len(h.Client(0).Peers()))\n\t\t\t}\n\t\t}\n\n\t\t// Mesh should still be working.\n\t\th.WaitForMeshComplete(t, 15*time.Second)\n\t\tservertest.AssertMeshComplete(t, h.Clients())\n\t})\n\n\tt.Run(\"scale_20_nodes\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\th := servertest.NewHarness(t, 20)\n\t\tservertest.AssertMeshComplete(t, h.Clients())\n\t})\n}\n"
  },
  {
    "path": "hscontrol/state/debug.go",
    "content": "package state\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\thsdb \"github.com/juanfont/headscale/hscontrol/db\"\n\t\"github.com/juanfont/headscale/hscontrol/routes\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"tailscale.com/tailcfg\"\n)\n\n// DebugOverviewInfo represents the state overview information in a structured format.\ntype DebugOverviewInfo struct {\n\tNodes struct {\n\t\tTotal     int `json:\"total\"`\n\t\tOnline    int `json:\"online\"`\n\t\tExpired   int `json:\"expired\"`\n\t\tEphemeral int `json:\"ephemeral\"`\n\t} `json:\"nodes\"`\n\tUsers      map[string]int `json:\"users\"` // username -> node count\n\tTotalUsers int            `json:\"total_users\"`\n\tPolicy     struct {\n\t\tMode string `json:\"mode\"`\n\t\tPath string `json:\"path,omitempty\"`\n\t} `json:\"policy\"`\n\tDERP struct {\n\t\tConfigured bool `json:\"configured\"`\n\t\tRegions    int  `json:\"regions\"`\n\t} `json:\"derp\"`\n\tPrimaryRoutes int `json:\"primary_routes\"`\n}\n\n// DebugDERPInfo represents DERP map information in a structured format.\ntype DebugDERPInfo struct {\n\tConfigured   bool                     `json:\"configured\"`\n\tTotalRegions int                      `json:\"total_regions\"`\n\tRegions      map[int]*DebugDERPRegion `json:\"regions,omitempty\"`\n}\n\n// DebugDERPRegion represents a single DERP region.\ntype DebugDERPRegion struct {\n\tRegionID   int              `json:\"region_id\"`\n\tRegionName string           `json:\"region_name\"`\n\tNodes      []*DebugDERPNode `json:\"nodes\"`\n}\n\n// DebugDERPNode represents a single DERP node.\ntype DebugDERPNode struct {\n\tName     string `json:\"name\"`\n\tHostName string `json:\"hostname\"`\n\tDERPPort int    `json:\"derp_port\"`\n\tSTUNPort int    `json:\"stun_port,omitempty\"`\n}\n\n// DebugStringInfo wraps a debug string for JSON serialization.\ntype DebugStringInfo struct {\n\tContent string `json:\"content\"`\n}\n\n// DebugOverview returns a comprehensive overview of the current state for debugging.\nfunc (s *State) DebugOverview() string {\n\tallNodes := s.nodeStore.ListNodes()\n\tusers, _ := s.ListAllUsers()\n\n\tvar sb strings.Builder\n\n\tsb.WriteString(\"=== Headscale State Overview ===\\n\\n\")\n\n\t// Node statistics\n\tsb.WriteString(fmt.Sprintf(\"Nodes: %d total\\n\", allNodes.Len()))\n\n\tuserNodeCounts := make(map[string]int)\n\tonlineCount := 0\n\texpiredCount := 0\n\tephemeralCount := 0\n\n\tnow := time.Now()\n\n\tfor _, node := range allNodes.All() {\n\t\tif node.Valid() {\n\t\t\tuserName := node.Owner().Name()\n\t\t\tuserNodeCounts[userName]++\n\n\t\t\tif node.IsOnline().Valid() && node.IsOnline().Get() {\n\t\t\t\tonlineCount++\n\t\t\t}\n\n\t\t\tif node.Expiry().Valid() && node.Expiry().Get().Before(now) {\n\t\t\t\texpiredCount++\n\t\t\t}\n\n\t\t\tif node.AuthKey().Valid() && node.AuthKey().Ephemeral() {\n\t\t\t\tephemeralCount++\n\t\t\t}\n\t\t}\n\t}\n\n\tsb.WriteString(fmt.Sprintf(\"  - Online: %d\\n\", onlineCount))\n\tsb.WriteString(fmt.Sprintf(\"  - Expired: %d\\n\", expiredCount))\n\tsb.WriteString(fmt.Sprintf(\"  - Ephemeral: %d\\n\", ephemeralCount))\n\tsb.WriteString(\"\\n\")\n\n\t// User statistics\n\tsb.WriteString(fmt.Sprintf(\"Users: %d total\\n\", len(users)))\n\n\tfor userName, nodeCount := range userNodeCounts {\n\t\tsb.WriteString(fmt.Sprintf(\"  - %s: %d nodes\\n\", userName, nodeCount))\n\t}\n\n\tsb.WriteString(\"\\n\")\n\n\t// Policy information\n\tsb.WriteString(\"Policy:\\n\")\n\tsb.WriteString(fmt.Sprintf(\"  - Mode: %s\\n\", s.cfg.Policy.Mode))\n\n\tif s.cfg.Policy.Mode == types.PolicyModeFile {\n\t\tsb.WriteString(fmt.Sprintf(\"  - Path: %s\\n\", s.cfg.Policy.Path))\n\t}\n\n\tsb.WriteString(\"\\n\")\n\n\t// DERP information\n\tderpMap := s.derpMap.Load()\n\tif derpMap != nil {\n\t\tsb.WriteString(fmt.Sprintf(\"DERP: %d regions configured\\n\", len(derpMap.Regions)))\n\t} else {\n\t\tsb.WriteString(\"DERP: not configured\\n\")\n\t}\n\n\tsb.WriteString(\"\\n\")\n\n\t// Route information\n\trouteCount := len(strings.Split(strings.TrimSpace(s.primaryRoutes.String()), \"\\n\"))\n\tif s.primaryRoutes.String() == \"\" {\n\t\trouteCount = 0\n\t}\n\n\tsb.WriteString(fmt.Sprintf(\"Primary Routes: %d active\\n\", routeCount))\n\tsb.WriteString(\"\\n\")\n\n\t// Registration cache\n\tsb.WriteString(\"Registration Cache: active\\n\")\n\tsb.WriteString(\"\\n\")\n\n\treturn sb.String()\n}\n\n// DebugNodeStore returns debug information about the NodeStore.\nfunc (s *State) DebugNodeStore() string {\n\treturn s.nodeStore.DebugString()\n}\n\n// DebugDERPMap returns debug information about the DERP map configuration.\nfunc (s *State) DebugDERPMap() string {\n\tderpMap := s.derpMap.Load()\n\tif derpMap == nil {\n\t\treturn \"DERP Map: not configured\\n\"\n\t}\n\n\tvar sb strings.Builder\n\n\tsb.WriteString(\"=== DERP Map Configuration ===\\n\\n\")\n\n\tsb.WriteString(fmt.Sprintf(\"Total Regions: %d\\n\\n\", len(derpMap.Regions)))\n\n\tfor regionID, region := range derpMap.Regions {\n\t\tsb.WriteString(fmt.Sprintf(\"Region %d: %s\\n\", regionID, region.RegionName))\n\t\tsb.WriteString(fmt.Sprintf(\"  - Nodes: %d\\n\", len(region.Nodes)))\n\n\t\tfor _, node := range region.Nodes {\n\t\t\tsb.WriteString(fmt.Sprintf(\"    - %s (%s:%d)\\n\",\n\t\t\t\tnode.Name, node.HostName, node.DERPPort))\n\n\t\t\tif node.STUNPort != 0 {\n\t\t\t\tsb.WriteString(fmt.Sprintf(\"      STUN: %d\\n\", node.STUNPort))\n\t\t\t}\n\t\t}\n\n\t\tsb.WriteString(\"\\n\")\n\t}\n\n\treturn sb.String()\n}\n\n// DebugSSHPolicies returns debug information about SSH policies for all nodes.\nfunc (s *State) DebugSSHPolicies() map[string]*tailcfg.SSHPolicy {\n\tnodes := s.nodeStore.ListNodes()\n\n\tsshPolicies := make(map[string]*tailcfg.SSHPolicy)\n\n\tfor _, node := range nodes.All() {\n\t\tif !node.Valid() {\n\t\t\tcontinue\n\t\t}\n\n\t\tpol, err := s.SSHPolicy(node)\n\t\tif err != nil {\n\t\t\t// Store the error information\n\t\t\tcontinue\n\t\t}\n\n\t\tkey := fmt.Sprintf(\"id:%d hostname:%s givenname:%s\",\n\t\t\tnode.ID(), node.Hostname(), node.GivenName())\n\t\tsshPolicies[key] = pol\n\t}\n\n\treturn sshPolicies\n}\n\n// DebugRegistrationCache returns debug information about the registration cache.\nfunc (s *State) DebugRegistrationCache() map[string]any {\n\t// The cache doesn't expose internal statistics, so we provide basic info\n\tresult := map[string]any{\n\t\t\"type\":       \"zcache\",\n\t\t\"expiration\": registerCacheExpiration.String(),\n\t\t\"cleanup\":    registerCacheCleanup.String(),\n\t\t\"status\":     \"active\",\n\t}\n\n\treturn result\n}\n\n// DebugConfig returns debug information about the current configuration.\nfunc (s *State) DebugConfig() *types.Config {\n\treturn s.cfg\n}\n\n// DebugPolicy returns the current policy data as a string.\nfunc (s *State) DebugPolicy() (string, error) {\n\tswitch s.cfg.Policy.Mode {\n\tcase types.PolicyModeDB:\n\t\tp, err := s.GetPolicy()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\treturn p.Data, nil\n\tcase types.PolicyModeFile:\n\t\tpol, err := hsdb.PolicyBytes(s.db.DB, s.cfg)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\treturn string(pol), nil\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"%w: %s\", ErrUnsupportedPolicyMode, s.cfg.Policy.Mode)\n\t}\n}\n\n// DebugFilter returns the current filter rules and matchers.\nfunc (s *State) DebugFilter() ([]tailcfg.FilterRule, error) {\n\tfilter, _ := s.Filter()\n\treturn filter, nil\n}\n\n// DebugRoutes returns the current primary routes information as a structured object.\nfunc (s *State) DebugRoutes() routes.DebugRoutes {\n\treturn s.primaryRoutes.DebugJSON()\n}\n\n// DebugRoutesString returns the current primary routes information as a string.\nfunc (s *State) DebugRoutesString() string {\n\treturn s.PrimaryRoutesString()\n}\n\n// DebugPolicyManager returns the policy manager debug string.\nfunc (s *State) DebugPolicyManager() string {\n\treturn s.PolicyDebugString()\n}\n\n// PolicyDebugString returns a debug representation of the current policy.\nfunc (s *State) PolicyDebugString() string {\n\treturn s.polMan.DebugString()\n}\n\n// DebugOverviewJSON returns a structured overview of the current state for debugging.\nfunc (s *State) DebugOverviewJSON() DebugOverviewInfo {\n\tallNodes := s.nodeStore.ListNodes()\n\tusers, _ := s.ListAllUsers()\n\n\tinfo := DebugOverviewInfo{\n\t\tUsers:      make(map[string]int),\n\t\tTotalUsers: len(users),\n\t}\n\n\t// Node statistics\n\tinfo.Nodes.Total = allNodes.Len()\n\tnow := time.Now()\n\n\tfor _, node := range allNodes.All() {\n\t\tif node.Valid() {\n\t\t\tuserName := node.Owner().Name()\n\t\t\tinfo.Users[userName]++\n\n\t\t\tif node.IsOnline().Valid() && node.IsOnline().Get() {\n\t\t\t\tinfo.Nodes.Online++\n\t\t\t}\n\n\t\t\tif node.Expiry().Valid() && node.Expiry().Get().Before(now) {\n\t\t\t\tinfo.Nodes.Expired++\n\t\t\t}\n\n\t\t\tif node.AuthKey().Valid() && node.AuthKey().Ephemeral() {\n\t\t\t\tinfo.Nodes.Ephemeral++\n\t\t\t}\n\t\t}\n\t}\n\n\t// Policy information\n\tinfo.Policy.Mode = string(s.cfg.Policy.Mode)\n\tif s.cfg.Policy.Mode == types.PolicyModeFile {\n\t\tinfo.Policy.Path = s.cfg.Policy.Path\n\t}\n\n\tderpMap := s.derpMap.Load()\n\tif derpMap != nil {\n\t\tinfo.DERP.Configured = true\n\t\tinfo.DERP.Regions = len(derpMap.Regions)\n\t} else {\n\t\tinfo.DERP.Configured = false\n\t\tinfo.DERP.Regions = 0\n\t}\n\n\t// Route information\n\trouteCount := len(strings.Split(strings.TrimSpace(s.primaryRoutes.String()), \"\\n\"))\n\tif s.primaryRoutes.String() == \"\" {\n\t\trouteCount = 0\n\t}\n\n\tinfo.PrimaryRoutes = routeCount\n\n\treturn info\n}\n\n// DebugDERPJSON returns structured debug information about the DERP map configuration.\nfunc (s *State) DebugDERPJSON() DebugDERPInfo {\n\tderpMap := s.derpMap.Load()\n\n\tinfo := DebugDERPInfo{\n\t\tConfigured: derpMap != nil,\n\t\tRegions:    make(map[int]*DebugDERPRegion),\n\t}\n\n\tif derpMap == nil {\n\t\treturn info\n\t}\n\n\tinfo.TotalRegions = len(derpMap.Regions)\n\n\tfor regionID, region := range derpMap.Regions {\n\t\tdebugRegion := &DebugDERPRegion{\n\t\t\tRegionID:   regionID,\n\t\t\tRegionName: region.RegionName,\n\t\t\tNodes:      make([]*DebugDERPNode, 0, len(region.Nodes)),\n\t\t}\n\n\t\tfor _, node := range region.Nodes {\n\t\t\tdebugNode := &DebugDERPNode{\n\t\t\t\tName:     node.Name,\n\t\t\t\tHostName: node.HostName,\n\t\t\t\tDERPPort: node.DERPPort,\n\t\t\t\tSTUNPort: node.STUNPort,\n\t\t\t}\n\t\t\tdebugRegion.Nodes = append(debugRegion.Nodes, debugNode)\n\t\t}\n\n\t\tinfo.Regions[regionID] = debugRegion\n\t}\n\n\treturn info\n}\n\n// DebugNodeStoreJSON returns the actual nodes map from the current NodeStore snapshot.\nfunc (s *State) DebugNodeStoreJSON() map[types.NodeID]types.Node {\n\tsnapshot := s.nodeStore.data.Load()\n\treturn snapshot.nodesByID\n}\n\n// DebugPolicyManagerJSON returns structured debug information about the policy manager.\nfunc (s *State) DebugPolicyManagerJSON() DebugStringInfo {\n\treturn DebugStringInfo{\n\t\tContent: s.polMan.DebugString(),\n\t}\n}\n"
  },
  {
    "path": "hscontrol/state/debug_test.go",
    "content": "package state\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestNodeStoreDebugString(t *testing.T) {\n\ttests := []struct {\n\t\tname     string\n\t\tsetupFn  func() *NodeStore\n\t\tcontains []string\n\t}{\n\t\t{\n\t\t\tname: \"empty nodestore\",\n\t\t\tsetupFn: func() *NodeStore {\n\t\t\t\treturn NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout)\n\t\t\t},\n\t\t\tcontains: []string{\n\t\t\t\t\"=== NodeStore Debug Information ===\",\n\t\t\t\t\"Total Nodes: 0\",\n\t\t\t\t\"Users with Nodes: 0\",\n\t\t\t\t\"NodeKey Index: 0 entries\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"nodestore with data\",\n\t\t\tsetupFn: func() *NodeStore {\n\t\t\t\tnode1 := createTestNode(1, 1, \"user1\", \"node1\")\n\t\t\t\tnode2 := createTestNode(2, 2, \"user2\", \"node2\")\n\n\t\t\t\tstore := NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout)\n\t\t\t\tstore.Start()\n\n\t\t\t\t_ = store.PutNode(node1)\n\t\t\t\t_ = store.PutNode(node2)\n\n\t\t\t\treturn store\n\t\t\t},\n\t\t\tcontains: []string{\n\t\t\t\t\"Total Nodes: 2\",\n\t\t\t\t\"Users with Nodes: 2\",\n\t\t\t\t\"Peer Relationships:\",\n\t\t\t\t\"NodeKey Index: 2 entries\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tstore := tt.setupFn()\n\t\t\tif store.writeQueue != nil {\n\t\t\t\tdefer store.Stop()\n\t\t\t}\n\n\t\t\tdebugStr := store.DebugString()\n\n\t\t\tfor _, expected := range tt.contains {\n\t\t\t\tassert.Contains(t, debugStr, expected,\n\t\t\t\t\t\"Debug string should contain: %s\\nActual debug:\\n%s\", expected, debugStr)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestDebugRegistrationCache(t *testing.T) {\n\t// Create a minimal NodeStore for testing debug methods\n\tstore := NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout)\n\n\tdebugStr := store.DebugString()\n\n\t// Should contain basic debug information\n\tassert.Contains(t, debugStr, \"=== NodeStore Debug Information ===\")\n\tassert.Contains(t, debugStr, \"Total Nodes: 0\")\n\tassert.Contains(t, debugStr, \"Users with Nodes: 0\")\n\tassert.Contains(t, debugStr, \"NodeKey Index: 0 entries\")\n}\n"
  },
  {
    "path": "hscontrol/state/endpoint_test.go",
    "content": "package state\n\nimport (\n\t\"net/netip\"\n\t\"testing\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/tailcfg\"\n)\n\n// TestEndpointStorageInNodeStore verifies that endpoints sent in MapRequest via ApplyPeerChange\n// are correctly stored in the NodeStore and can be retrieved for sending to peers.\n// This test reproduces the issue reported in https://github.com/juanfont/headscale/issues/2846\nfunc TestEndpointStorageInNodeStore(t *testing.T) {\n\t// Create two test nodes\n\tnode1 := createTestNode(1, 1, \"test-user\", \"node1\")\n\tnode2 := createTestNode(2, 1, \"test-user\", \"node2\")\n\n\t// Create NodeStore with allow-all peers function\n\tstore := NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout)\n\n\tstore.Start()\n\tdefer store.Stop()\n\n\t// Add both nodes to NodeStore\n\tstore.PutNode(node1)\n\tstore.PutNode(node2)\n\n\t// Create a MapRequest with endpoints for node1\n\tendpoints := []netip.AddrPort{\n\t\tnetip.MustParseAddrPort(\"192.168.1.1:41641\"),\n\t\tnetip.MustParseAddrPort(\"10.0.0.1:41641\"),\n\t}\n\n\tmapReq := tailcfg.MapRequest{\n\t\tNodeKey:   node1.NodeKey,\n\t\tDiscoKey:  node1.DiscoKey,\n\t\tEndpoints: endpoints,\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tHostname: \"node1\",\n\t\t},\n\t}\n\n\t// Simulate what UpdateNodeFromMapRequest does: create PeerChange and apply it\n\tpeerChange := node1.PeerChangeFromMapRequest(mapReq)\n\n\t// Verify PeerChange has endpoints\n\trequire.NotNil(t, peerChange.Endpoints, \"PeerChange should contain endpoints\")\n\tassert.Len(t, peerChange.Endpoints, len(endpoints),\n\t\t\"PeerChange should have same number of endpoints as MapRequest\")\n\n\t// Apply the PeerChange via NodeStore.UpdateNode\n\tupdatedNode, ok := store.UpdateNode(node1.ID, func(n *types.Node) {\n\t\tn.ApplyPeerChange(&peerChange)\n\t})\n\trequire.True(t, ok, \"UpdateNode should succeed\")\n\trequire.True(t, updatedNode.Valid(), \"Updated node should be valid\")\n\n\t// Verify endpoints are in the updated node view\n\tstoredEndpoints := updatedNode.Endpoints().AsSlice()\n\tassert.Len(t, storedEndpoints, len(endpoints),\n\t\t\"NodeStore should have same number of endpoints as sent\")\n\n\tif len(storedEndpoints) == len(endpoints) {\n\t\tfor i, ep := range endpoints {\n\t\t\tassert.Equal(t, ep, storedEndpoints[i],\n\t\t\t\t\"Endpoint %d should match\", i)\n\t\t}\n\t}\n\n\t// Verify we can retrieve the node again and endpoints are still there\n\tretrievedNode, found := store.GetNode(node1.ID)\n\trequire.True(t, found, \"node1 should exist in NodeStore\")\n\n\tretrievedEndpoints := retrievedNode.Endpoints().AsSlice()\n\tassert.Len(t, retrievedEndpoints, len(endpoints),\n\t\t\"Retrieved node should have same number of endpoints\")\n\n\t// Verify that when we get node1 as a peer of node2, it has endpoints\n\t// This is the critical part that was failing in the bug report\n\tpeers := store.ListPeers(node2.ID)\n\trequire.Positive(t, peers.Len(), \"node2 should have at least one peer\")\n\n\t// Find node1 in the peer list\n\tvar node1Peer types.NodeView\n\n\tfoundPeer := false\n\n\tfor _, peer := range peers.All() {\n\t\tif peer.ID() == node1.ID {\n\t\t\tnode1Peer = peer\n\t\t\tfoundPeer = true\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\trequire.True(t, foundPeer, \"node1 should be in node2's peer list\")\n\n\t// Check that node1's endpoints are available in the peer view\n\tpeerEndpoints := node1Peer.Endpoints().AsSlice()\n\tassert.Len(t, peerEndpoints, len(endpoints),\n\t\t\"Peer view should have same number of endpoints as sent\")\n\n\tif len(peerEndpoints) == len(endpoints) {\n\t\tfor i, ep := range endpoints {\n\t\t\tassert.Equal(t, ep, peerEndpoints[i],\n\t\t\t\t\"Peer endpoint %d should match\", i)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "hscontrol/state/ephemeral_test.go",
    "content": "package state\n\nimport (\n\t\"net/netip\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\n// TestEphemeralNodeDeleteWithConcurrentUpdate tests the race condition where UpdateNode and DeleteNode\n// are called concurrently and may be batched together. This reproduces the issue where ephemeral nodes\n// are not properly deleted during logout because UpdateNodeFromMapRequest returns a stale node view\n// after the node has been deleted from the NodeStore.\nfunc TestEphemeralNodeDeleteWithConcurrentUpdate(t *testing.T) {\n\t// Create a simple test node\n\tnode := createTestNode(1, 1, \"test-user\", \"test-node\")\n\n\t// Create NodeStore\n\tstore := NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout)\n\n\tstore.Start()\n\tdefer store.Stop()\n\n\t// Put the node in the store\n\tresultNode := store.PutNode(node)\n\trequire.True(t, resultNode.Valid(), \"initial PutNode should return valid node\")\n\n\t// Verify node exists\n\tretrievedNode, found := store.GetNode(node.ID)\n\trequire.True(t, found)\n\trequire.Equal(t, node.ID, retrievedNode.ID())\n\n\t// Test scenario: UpdateNode is called, returns a node view from the batch,\n\t// but in the same batch a DeleteNode removes the node.\n\t// This simulates what happens when:\n\t// 1. UpdateNodeFromMapRequest calls UpdateNode and gets back updatedNode\n\t// 2. At the same time, handleLogout calls DeleteNode\n\t// 3. They get batched together: [UPDATE, DELETE]\n\t// 4. UPDATE modifies the node, DELETE removes it\n\t// 5. UpdateNode returns a node view based on the state AFTER both operations\n\t// 6. If DELETE came after UPDATE, the returned node should be invalid\n\n\tdone := make(chan bool, 2)\n\n\tvar (\n\t\tupdatedNode types.NodeView\n\t\tupdateOk    bool\n\t)\n\n\t// Goroutine 1: UpdateNode (simulates UpdateNodeFromMapRequest)\n\n\tgo func() {\n\t\tupdatedNode, updateOk = store.UpdateNode(node.ID, func(n *types.Node) {\n\t\t\tn.LastSeen = new(time.Now())\n\t\t})\n\n\t\tdone <- true\n\t}()\n\n\t// Goroutine 2: DeleteNode (simulates handleLogout for ephemeral node)\n\tgo func() {\n\t\tstore.DeleteNode(node.ID)\n\n\t\tdone <- true\n\t}()\n\n\t// Wait for both operations\n\t<-done\n\t<-done\n\n\t// Verify node is eventually deleted\n\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t_, found = store.GetNode(node.ID)\n\t\tassert.False(c, found, \"node should be deleted from NodeStore\")\n\t}, 1*time.Second, 10*time.Millisecond, \"waiting for node to be deleted\")\n\n\t// If the update happened before delete in the batch, the returned node might be invalid\n\tif updateOk {\n\t\tt.Logf(\"UpdateNode returned ok=true, valid=%v\", updatedNode.Valid())\n\t\t// This is the bug scenario - UpdateNode thinks it succeeded but node is gone\n\t\tif updatedNode.Valid() {\n\t\t\tt.Logf(\"WARNING: UpdateNode returned valid node but node was deleted - this indicates the race condition bug\")\n\t\t}\n\t} else {\n\t\tt.Logf(\"UpdateNode correctly returned ok=false (node deleted in same batch)\")\n\t}\n}\n\n// TestUpdateNodeReturnsInvalidWhenDeletedInSameBatch specifically tests that when\n// UpdateNode and DeleteNode are in the same batch with DELETE after UPDATE,\n// the UpdateNode should return an invalid node view.\nfunc TestUpdateNodeReturnsInvalidWhenDeletedInSameBatch(t *testing.T) {\n\tnode := createTestNode(2, 1, \"test-user\", \"test-node-2\")\n\n\t// Use batch size of 2 to guarantee UpdateNode and DeleteNode batch together\n\tstore := NewNodeStore(nil, allowAllPeersFunc, 2, TestBatchTimeout)\n\n\tstore.Start()\n\tdefer store.Stop()\n\n\t// Put node in store\n\t_ = store.PutNode(node)\n\n\t// Queue UpdateNode and DeleteNode - with batch size of 2, they will batch together\n\tresultChan := make(chan struct {\n\t\tnode types.NodeView\n\t\tok   bool\n\t})\n\n\t// Start UpdateNode in goroutine - it will queue and wait for batch\n\tgo func() {\n\t\tnode, ok := store.UpdateNode(node.ID, func(n *types.Node) {\n\t\t\tn.LastSeen = new(time.Now())\n\t\t})\n\t\tresultChan <- struct {\n\t\t\tnode types.NodeView\n\t\t\tok   bool\n\t\t}{node, ok}\n\t}()\n\n\t// Start DeleteNode in goroutine - it will queue and trigger batch processing\n\t// Since batch size is 2, both operations will be processed together\n\tgo func() {\n\t\tstore.DeleteNode(node.ID)\n\t}()\n\n\t// Get the result from UpdateNode\n\tresult := <-resultChan\n\n\t// Node should be deleted\n\t_, found := store.GetNode(node.ID)\n\tassert.False(t, found, \"node should be deleted\")\n\n\t// The critical check: what did UpdateNode return?\n\t// After the commit c6b09289988f34398eb3157e31ba092eb8721a9f,\n\t// UpdateNode returns the node state from the batch.\n\t// If DELETE came after UPDATE in the batch, the node doesn't exist anymore,\n\t// so UpdateNode should return (invalid, false)\n\tt.Logf(\"UpdateNode returned: ok=%v, valid=%v\", result.ok, result.node.Valid())\n\n\t// This is the expected behavior - if node was deleted in same batch,\n\t// UpdateNode should return invalid node\n\tif result.ok && result.node.Valid() {\n\t\tt.Error(\"BUG: UpdateNode returned valid node even though it was deleted in same batch\")\n\t}\n}\n\n// TestPersistNodeToDBPreventsRaceCondition tests that persistNodeToDB correctly handles\n// the race condition where a node is deleted after UpdateNode returns but before\n// persistNodeToDB is called. This reproduces the ephemeral node deletion bug.\nfunc TestPersistNodeToDBPreventsRaceCondition(t *testing.T) {\n\tnode := createTestNode(3, 1, \"test-user\", \"test-node-3\")\n\n\tstore := NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout)\n\n\tstore.Start()\n\tdefer store.Stop()\n\n\t// Put node in store\n\t_ = store.PutNode(node)\n\n\t// Simulate UpdateNode being called\n\tupdatedNode, ok := store.UpdateNode(node.ID, func(n *types.Node) {\n\t\tn.LastSeen = new(time.Now())\n\t})\n\trequire.True(t, ok, \"UpdateNode should succeed\")\n\trequire.True(t, updatedNode.Valid(), \"UpdateNode should return valid node\")\n\n\t// Now delete the node (simulating ephemeral logout happening concurrently)\n\tstore.DeleteNode(node.ID)\n\n\t// Verify node is eventually deleted\n\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t_, found := store.GetNode(node.ID)\n\t\tassert.False(c, found, \"node should be deleted\")\n\t}, 1*time.Second, 10*time.Millisecond, \"waiting for node to be deleted\")\n\n\t// Now try to use the updatedNode from before the deletion\n\t// In the old code, this would re-insert the node into the database\n\t// With our fix, GetNode check in persistNodeToDB should prevent this\n\n\t// Simulate what persistNodeToDB does - check if node still exists\n\t_, exists := store.GetNode(updatedNode.ID())\n\tif !exists {\n\t\tt.Log(\"SUCCESS: persistNodeToDB check would prevent re-insertion of deleted node\")\n\t} else {\n\t\tt.Error(\"BUG: Node still exists in NodeStore after deletion\")\n\t}\n\n\t// The key assertion: after deletion, attempting to persist the old updatedNode\n\t// should fail because the node no longer exists in NodeStore\n\tassert.False(t, exists, \"persistNodeToDB should detect node was deleted and refuse to persist\")\n}\n\n// TestEphemeralNodeLogoutRaceCondition tests the specific race condition that occurs\n// when an ephemeral node logs out. This reproduces the bug where:\n//  1. UpdateNodeFromMapRequest calls UpdateNode and receives a node view\n//  2. Concurrently, handleLogout is called for the ephemeral node and calls DeleteNode\n//  3. UpdateNode and DeleteNode get batched together\n//  4. If UpdateNode's result is used to call persistNodeToDB after the deletion,\n//     the node could be re-inserted into the database even though it was deleted\nfunc TestEphemeralNodeLogoutRaceCondition(t *testing.T) {\n\tephemeralNode := createTestNode(4, 1, \"test-user\", \"ephemeral-node\")\n\tephemeralNode.AuthKey = &types.PreAuthKey{\n\t\tID:        1,\n\t\tKey:       \"test-key\",\n\t\tEphemeral: true,\n\t}\n\n\t// Use batch size of 2 to guarantee UpdateNode and DeleteNode batch together\n\tstore := NewNodeStore(nil, allowAllPeersFunc, 2, TestBatchTimeout)\n\n\tstore.Start()\n\tdefer store.Stop()\n\n\t// Put ephemeral node in store\n\t_ = store.PutNode(ephemeralNode)\n\n\t// Simulate concurrent operations:\n\t// 1. UpdateNode (from UpdateNodeFromMapRequest during polling)\n\t// 2. DeleteNode (from handleLogout when client sends logout request)\n\n\tvar (\n\t\tupdatedNode types.NodeView\n\t\tupdateOk    bool\n\t)\n\n\tdone := make(chan bool, 2)\n\n\t// Goroutine 1: UpdateNode (simulates UpdateNodeFromMapRequest)\n\tgo func() {\n\t\tupdatedNode, updateOk = store.UpdateNode(ephemeralNode.ID, func(n *types.Node) {\n\t\t\tn.LastSeen = new(time.Now())\n\t\t})\n\n\t\tdone <- true\n\t}()\n\n\t// Goroutine 2: DeleteNode (simulates handleLogout for ephemeral node)\n\tgo func() {\n\t\tstore.DeleteNode(ephemeralNode.ID)\n\n\t\tdone <- true\n\t}()\n\n\t// Wait for both operations\n\t<-done\n\t<-done\n\n\t// Verify node is eventually deleted\n\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t_, found := store.GetNode(ephemeralNode.ID)\n\t\tassert.False(c, found, \"ephemeral node should be deleted from NodeStore\")\n\t}, 1*time.Second, 10*time.Millisecond, \"waiting for ephemeral node to be deleted\")\n\n\t// Critical assertion: if UpdateNode returned before DeleteNode completed,\n\t// the updatedNode might be valid but the node is actually deleted.\n\t// This is the bug - UpdateNodeFromMapRequest would get a valid node,\n\t// then try to persist it, re-inserting the deleted ephemeral node.\n\tif updateOk && updatedNode.Valid() {\n\t\tt.Log(\"UpdateNode returned valid node, but node is deleted - this is the race condition\")\n\n\t\t// In the real code, this would cause persistNodeToDB to be called with updatedNode\n\t\t// The fix in persistNodeToDB checks if the node still exists:\n\t\t_, stillExists := store.GetNode(updatedNode.ID())\n\t\tassert.False(t, stillExists, \"persistNodeToDB should check NodeStore and find node deleted\")\n\t} else if !updateOk || !updatedNode.Valid() {\n\t\tt.Log(\"UpdateNode correctly returned invalid/not-ok result (delete happened in same batch)\")\n\t}\n}\n\n// TestUpdateNodeFromMapRequestEphemeralLogoutSequence tests the exact sequence\n// that causes ephemeral node logout failures:\n// 1. Client sends MapRequest with updated endpoint info\n// 2. UpdateNodeFromMapRequest starts processing, calls UpdateNode\n// 3. Client sends logout request (past expiry)\n// 4. handleLogout calls DeleteNode for ephemeral node\n// 5. UpdateNode and DeleteNode batch together\n// 6. UpdateNode returns a valid node (from before delete in batch)\n// 7. persistNodeToDB is called with the stale valid node\n// 8. Node gets re-inserted into database instead of staying deleted.\nfunc TestUpdateNodeFromMapRequestEphemeralLogoutSequence(t *testing.T) {\n\tephemeralNode := createTestNode(5, 1, \"test-user\", \"ephemeral-node-5\")\n\tephemeralNode.AuthKey = &types.PreAuthKey{\n\t\tID:        2,\n\t\tKey:       \"test-key-2\",\n\t\tEphemeral: true,\n\t}\n\n\t// Use batch size of 2 to guarantee UpdateNode and DeleteNode batch together\n\t// Use batch size of 2 to guarantee UpdateNode and DeleteNode batch together\n\tstore := NewNodeStore(nil, allowAllPeersFunc, 2, TestBatchTimeout)\n\n\tstore.Start()\n\tdefer store.Stop()\n\n\t// Put ephemeral node in store\n\t_ = store.PutNode(ephemeralNode)\n\n\t// Step 1: UpdateNodeFromMapRequest calls UpdateNode\n\t// (simulating client sending MapRequest with endpoint updates)\n\tupdateResult := make(chan struct {\n\t\tnode types.NodeView\n\t\tok   bool\n\t})\n\n\tgo func() {\n\t\tnode, ok := store.UpdateNode(ephemeralNode.ID, func(n *types.Node) {\n\t\t\tn.LastSeen = new(time.Now())\n\t\t\tendpoint := netip.MustParseAddrPort(\"10.0.0.1:41641\")\n\t\t\tn.Endpoints = []netip.AddrPort{endpoint}\n\t\t})\n\t\tupdateResult <- struct {\n\t\t\tnode types.NodeView\n\t\t\tok   bool\n\t\t}{node, ok}\n\t}()\n\n\t// Step 2: Logout happens - handleLogout calls DeleteNode\n\t// With batch size of 2, this will trigger batch processing with UpdateNode\n\tgo func() {\n\t\tstore.DeleteNode(ephemeralNode.ID)\n\t}()\n\n\t// Step 3: Wait and verify node is eventually deleted\n\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t_, nodeExists := store.GetNode(ephemeralNode.ID)\n\t\tassert.False(c, nodeExists, \"ephemeral node must be deleted after logout\")\n\t}, 1*time.Second, 10*time.Millisecond, \"waiting for ephemeral node to be deleted\")\n\n\t// Step 4: Get the update result\n\tresult := <-updateResult\n\n\t// Simulate what happens if we try to persist the updatedNode\n\tif result.ok && result.node.Valid() {\n\t\t// This is the problematic path - UpdateNode returned a valid node\n\t\t// but the node was deleted in the same batch\n\t\tt.Log(\"UpdateNode returned valid node even though node was deleted\")\n\n\t\t// The fix: persistNodeToDB must check NodeStore before persisting\n\t\t_, checkExists := store.GetNode(result.node.ID())\n\t\tif checkExists {\n\t\t\tt.Error(\"BUG: Node still exists in NodeStore after deletion - should be impossible\")\n\t\t} else {\n\t\t\tt.Log(\"SUCCESS: persistNodeToDB would detect node is deleted and refuse to persist\")\n\t\t}\n\t} else {\n\t\tt.Log(\"UpdateNode correctly indicated node was deleted (returned invalid or not-ok)\")\n\t}\n\n\t// Final assertion: node must not exist\n\t_, finalExists := store.GetNode(ephemeralNode.ID)\n\tassert.False(t, finalExists, \"ephemeral node must remain deleted\")\n}\n\n// TestUpdateNodeDeletedInSameBatchReturnsInvalid specifically tests that when\n// UpdateNode and DeleteNode are batched together with DELETE after UPDATE,\n// UpdateNode returns ok=false to indicate the node was deleted.\nfunc TestUpdateNodeDeletedInSameBatchReturnsInvalid(t *testing.T) {\n\tnode := createTestNode(6, 1, \"test-user\", \"test-node-6\")\n\n\t// Use batch size of 2 to guarantee UpdateNode and DeleteNode batch together\n\tstore := NewNodeStore(nil, allowAllPeersFunc, 2, TestBatchTimeout)\n\n\tstore.Start()\n\tdefer store.Stop()\n\n\t// Put node in store\n\t_ = store.PutNode(node)\n\n\t// Queue UpdateNode and DeleteNode - with batch size of 2, they will batch together\n\tupdateDone := make(chan struct {\n\t\tnode types.NodeView\n\t\tok   bool\n\t})\n\n\tgo func() {\n\t\tupdatedNode, ok := store.UpdateNode(node.ID, func(n *types.Node) {\n\t\t\tn.LastSeen = new(time.Now())\n\t\t})\n\t\tupdateDone <- struct {\n\t\t\tnode types.NodeView\n\t\t\tok   bool\n\t\t}{updatedNode, ok}\n\t}()\n\n\t// Queue DeleteNode - with batch size of 2, this triggers batch processing\n\tgo func() {\n\t\tstore.DeleteNode(node.ID)\n\t}()\n\n\t// Get UpdateNode result\n\tresult := <-updateDone\n\n\t// Node should be deleted\n\t_, exists := store.GetNode(node.ID)\n\tassert.False(t, exists, \"node should be deleted from store\")\n\n\t// UpdateNode should indicate the node was deleted\n\t// After c6b09289988f34398eb3157e31ba092eb8721a9f, when UPDATE and DELETE\n\t// are in the same batch with DELETE after UPDATE, UpdateNode returns\n\t// the state after the batch is applied - which means the node doesn't exist\n\tassert.False(t, result.ok, \"UpdateNode should return ok=false when node deleted in same batch\")\n\tassert.False(t, result.node.Valid(), \"UpdateNode should return invalid node when node deleted in same batch\")\n}\n\n// TestPersistNodeToDBChecksNodeStoreBeforePersist verifies that persistNodeToDB\n// checks if the node still exists in NodeStore before persisting to database.\n// This prevents the race condition where:\n// 1. UpdateNodeFromMapRequest calls UpdateNode and gets a valid node\n// 2. Ephemeral node logout calls DeleteNode\n// 3. UpdateNode and DeleteNode batch together\n// 4. UpdateNode returns a valid node (from before delete in batch)\n// 5. UpdateNodeFromMapRequest calls persistNodeToDB with the stale node\n// 6. persistNodeToDB must detect the node is deleted and refuse to persist.\nfunc TestPersistNodeToDBChecksNodeStoreBeforePersist(t *testing.T) {\n\tephemeralNode := createTestNode(7, 1, \"test-user\", \"ephemeral-node-7\")\n\tephemeralNode.AuthKey = &types.PreAuthKey{\n\t\tID:        3,\n\t\tKey:       \"test-key-3\",\n\t\tEphemeral: true,\n\t}\n\n\tstore := NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout)\n\n\tstore.Start()\n\tdefer store.Stop()\n\n\t// Put node\n\t_ = store.PutNode(ephemeralNode)\n\n\t// UpdateNode returns a node\n\tupdatedNode, ok := store.UpdateNode(ephemeralNode.ID, func(n *types.Node) {\n\t\tn.LastSeen = new(time.Now())\n\t})\n\trequire.True(t, ok, \"UpdateNode should succeed\")\n\trequire.True(t, updatedNode.Valid(), \"updated node should be valid\")\n\n\t// Delete the node\n\tstore.DeleteNode(ephemeralNode.ID)\n\n\t// Verify node is eventually deleted\n\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t_, exists := store.GetNode(ephemeralNode.ID)\n\t\tassert.False(c, exists, \"node should be deleted from NodeStore\")\n\t}, 1*time.Second, 10*time.Millisecond, \"waiting for node to be deleted\")\n\n\t// 4. Simulate what persistNodeToDB does - check if node still exists\n\t// The fix in persistNodeToDB checks NodeStore before persisting:\n\t// if !exists { return error }\n\t// This prevents re-inserting the deleted node into the database\n\n\t// Verify the node from UpdateNode is valid but node is gone from store\n\tassert.True(t, updatedNode.Valid(), \"UpdateNode returned a valid node view\")\n\t_, stillExists := store.GetNode(updatedNode.ID())\n\tassert.False(t, stillExists, \"but node should be deleted from NodeStore\")\n\n\t// This is the critical test: persistNodeToDB must check NodeStore\n\t// and refuse to persist if the node doesn't exist anymore\n\t// The actual persistNodeToDB implementation does:\n\t// _, exists := s.nodeStore.GetNode(node.ID())\n\t// if !exists { return error }\n}\n"
  },
  {
    "path": "hscontrol/state/maprequest.go",
    "content": "// Package state provides pure functions for processing MapRequest data.\n// These functions are extracted from UpdateNodeFromMapRequest to improve\n// testability and maintainability.\n\npackage state\n\nimport (\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/rs/zerolog/log\"\n\t\"tailscale.com/tailcfg\"\n)\n\n// netInfoFromMapRequest determines the correct NetInfo to use.\n// Returns the NetInfo that should be used for this request.\nfunc netInfoFromMapRequest(\n\tnodeID types.NodeID,\n\tcurrentHostinfo *tailcfg.Hostinfo,\n\treqHostinfo *tailcfg.Hostinfo,\n) *tailcfg.NetInfo {\n\t// If request has NetInfo, use it\n\tif reqHostinfo != nil && reqHostinfo.NetInfo != nil {\n\t\treturn reqHostinfo.NetInfo\n\t}\n\n\t// Otherwise, use current NetInfo if available\n\tif currentHostinfo != nil && currentHostinfo.NetInfo != nil {\n\t\tlog.Debug().\n\t\t\tCaller().\n\t\t\tUint64(\"node.id\", nodeID.Uint64()).\n\t\t\tInt(\"preferredDERP\", currentHostinfo.NetInfo.PreferredDERP).\n\t\t\tMsg(\"using NetInfo from previous Hostinfo in MapRequest\")\n\n\t\treturn currentHostinfo.NetInfo\n\t}\n\n\t// No NetInfo available anywhere - log for debugging\n\tvar hostname string\n\tif reqHostinfo != nil {\n\t\thostname = reqHostinfo.Hostname\n\t} else if currentHostinfo != nil {\n\t\thostname = currentHostinfo.Hostname\n\t}\n\n\tlog.Debug().\n\t\tCaller().\n\t\tUint64(\"node.id\", nodeID.Uint64()).\n\t\tStr(\"node.hostname\", hostname).\n\t\tMsg(\"node sent update but has no NetInfo in request or database\")\n\n\treturn nil\n}\n"
  },
  {
    "path": "hscontrol/state/maprequest_test.go",
    "content": "package state\n\nimport (\n\t\"testing\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/tailcfg\"\n)\n\nfunc TestNetInfoFromMapRequest(t *testing.T) {\n\tnodeID := types.NodeID(1)\n\n\ttests := []struct {\n\t\tname            string\n\t\tcurrentHostinfo *tailcfg.Hostinfo\n\t\treqHostinfo     *tailcfg.Hostinfo\n\t\texpectNetInfo   *tailcfg.NetInfo\n\t}{\n\t\t{\n\t\t\tname:            \"no current NetInfo - return nil\",\n\t\t\tcurrentHostinfo: nil,\n\t\t\treqHostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"test-node\",\n\t\t\t},\n\t\t\texpectNetInfo: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"current has NetInfo, request has NetInfo - use request\",\n\t\t\tcurrentHostinfo: &tailcfg.Hostinfo{\n\t\t\t\tNetInfo: &tailcfg.NetInfo{PreferredDERP: 1},\n\t\t\t},\n\t\t\treqHostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"test-node\",\n\t\t\t\tNetInfo:  &tailcfg.NetInfo{PreferredDERP: 2},\n\t\t\t},\n\t\t\texpectNetInfo: &tailcfg.NetInfo{PreferredDERP: 2},\n\t\t},\n\t\t{\n\t\t\tname: \"current has NetInfo, request has no NetInfo - use current\",\n\t\t\tcurrentHostinfo: &tailcfg.Hostinfo{\n\t\t\t\tNetInfo: &tailcfg.NetInfo{PreferredDERP: 3},\n\t\t\t},\n\t\t\treqHostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"test-node\",\n\t\t\t},\n\t\t\texpectNetInfo: &tailcfg.NetInfo{PreferredDERP: 3},\n\t\t},\n\t\t{\n\t\t\tname: \"current has NetInfo, no request Hostinfo - use current\",\n\t\t\tcurrentHostinfo: &tailcfg.Hostinfo{\n\t\t\t\tNetInfo: &tailcfg.NetInfo{PreferredDERP: 4},\n\t\t\t},\n\t\t\treqHostinfo:   nil,\n\t\t\texpectNetInfo: &tailcfg.NetInfo{PreferredDERP: 4},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tresult := netInfoFromMapRequest(nodeID, tt.currentHostinfo, tt.reqHostinfo)\n\n\t\t\tif tt.expectNetInfo == nil {\n\t\t\t\tassert.Nil(t, result, \"expected nil NetInfo\")\n\t\t\t} else {\n\t\t\t\trequire.NotNil(t, result, \"expected non-nil NetInfo\")\n\t\t\t\tassert.Equal(t, tt.expectNetInfo.PreferredDERP, result.PreferredDERP, \"DERP mismatch\")\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestNetInfoPreservationInRegistrationFlow(t *testing.T) {\n\tnodeID := types.NodeID(1)\n\n\t// This test reproduces the bug in registration flows where NetInfo was lost\n\t// because we used the wrong hostinfo reference when calling NetInfoFromMapRequest\n\tt.Run(\"registration_flow_bug_reproduction\", func(t *testing.T) {\n\t\t// Simulate existing node with NetInfo (before re-registration)\n\t\texistingNodeHostinfo := &tailcfg.Hostinfo{\n\t\t\tHostname: \"test-node\",\n\t\t\tNetInfo:  &tailcfg.NetInfo{PreferredDERP: 5},\n\t\t}\n\n\t\t// Simulate new registration request (no NetInfo)\n\t\tnewRegistrationHostinfo := &tailcfg.Hostinfo{\n\t\t\tHostname: \"test-node\",\n\t\t\tOS:       \"linux\",\n\t\t\t// NetInfo is nil - this is what comes from the registration request\n\t\t}\n\n\t\t// Simulate what was happening in the bug: we passed the \"current node being modified\"\n\t\t// hostinfo (which has no NetInfo) instead of the existing node's hostinfo\n\t\tnodeBeingModifiedHostinfo := &tailcfg.Hostinfo{\n\t\t\tHostname: \"test-node\",\n\t\t\t// NetInfo is nil because this node is being modified/reset\n\t\t}\n\n\t\t// BUG: Using the node being modified (no NetInfo) instead of existing node (has NetInfo)\n\t\tbuggyResult := netInfoFromMapRequest(nodeID, nodeBeingModifiedHostinfo, newRegistrationHostinfo)\n\t\tassert.Nil(t, buggyResult, \"Bug: Should return nil when using wrong hostinfo reference\")\n\n\t\t// CORRECT: Using the existing node's hostinfo (has NetInfo)\n\t\tcorrectResult := netInfoFromMapRequest(nodeID, existingNodeHostinfo, newRegistrationHostinfo)\n\t\tassert.NotNil(t, correctResult, \"Fix: Should preserve NetInfo when using correct hostinfo reference\")\n\t\tassert.Equal(t, 5, correctResult.PreferredDERP, \"Should preserve the DERP region from existing node\")\n\t})\n\n\tt.Run(\"new_node_creation_for_different_user_should_preserve_netinfo\", func(t *testing.T) {\n\t\t// This test covers the scenario where:\n\t\t// 1. A node exists for user1 with NetInfo\n\t\t// 2. The same machine logs in as user2 (different user)\n\t\t// 3. A NEW node is created for user2 (pre-auth key flow)\n\t\t// 4. The new node should preserve NetInfo from the old node\n\n\t\t// Existing node for user1 with NetInfo\n\t\texistingNodeUser1Hostinfo := &tailcfg.Hostinfo{\n\t\t\tHostname: \"test-node\",\n\t\t\tNetInfo:  &tailcfg.NetInfo{PreferredDERP: 7},\n\t\t}\n\n\t\t// New registration request for user2 (no NetInfo yet)\n\t\tnewNodeUser2Hostinfo := &tailcfg.Hostinfo{\n\t\t\tHostname: \"test-node\",\n\t\t\tOS:       \"linux\",\n\t\t\t// NetInfo is nil - registration request doesn't include it\n\t\t}\n\n\t\t// When creating a new node for user2, we should preserve NetInfo from user1's node\n\t\tresult := netInfoFromMapRequest(types.NodeID(2), existingNodeUser1Hostinfo, newNodeUser2Hostinfo)\n\t\tassert.NotNil(t, result, \"New node for user2 should preserve NetInfo from user1's node\")\n\t\tassert.Equal(t, 7, result.PreferredDERP, \"Should preserve DERP region from existing node\")\n\t})\n}\n"
  },
  {
    "path": "hscontrol/state/node_store.go",
    "content": "package state\n\nimport (\n\t\"fmt\"\n\t\"maps\"\n\t\"strings\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/prometheus/client_golang/prometheus/promauto\"\n\t\"tailscale.com/types/key\"\n\t\"tailscale.com/types/views\"\n)\n\nconst (\n\tput             = 1\n\tdel             = 2\n\tupdate          = 3\n\trebuildPeerMaps = 4\n)\n\nconst prometheusNamespace = \"headscale\"\n\nvar (\n\tnodeStoreOperations = promauto.NewCounterVec(prometheus.CounterOpts{\n\t\tNamespace: prometheusNamespace,\n\t\tName:      \"nodestore_operations_total\",\n\t\tHelp:      \"Total number of NodeStore operations\",\n\t}, []string{\"operation\"})\n\tnodeStoreOperationDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{\n\t\tNamespace: prometheusNamespace,\n\t\tName:      \"nodestore_operation_duration_seconds\",\n\t\tHelp:      \"Duration of NodeStore operations\",\n\t\tBuckets:   prometheus.DefBuckets,\n\t}, []string{\"operation\"})\n\tnodeStoreBatchSize = promauto.NewHistogram(prometheus.HistogramOpts{\n\t\tNamespace: prometheusNamespace,\n\t\tName:      \"nodestore_batch_size\",\n\t\tHelp:      \"Size of NodeStore write batches\",\n\t\tBuckets:   []float64{1, 2, 5, 10, 20, 50, 100},\n\t})\n\tnodeStoreBatchDuration = promauto.NewHistogram(prometheus.HistogramOpts{\n\t\tNamespace: prometheusNamespace,\n\t\tName:      \"nodestore_batch_duration_seconds\",\n\t\tHelp:      \"Duration of NodeStore batch processing\",\n\t\tBuckets:   prometheus.DefBuckets,\n\t})\n\tnodeStoreSnapshotBuildDuration = promauto.NewHistogram(prometheus.HistogramOpts{\n\t\tNamespace: prometheusNamespace,\n\t\tName:      \"nodestore_snapshot_build_duration_seconds\",\n\t\tHelp:      \"Duration of NodeStore snapshot building from nodes\",\n\t\tBuckets:   prometheus.DefBuckets,\n\t})\n\tnodeStoreNodesCount = promauto.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: prometheusNamespace,\n\t\tName:      \"nodestore_nodes\",\n\t\tHelp:      \"Number of nodes in the NodeStore\",\n\t})\n\tnodeStorePeersCalculationDuration = promauto.NewHistogram(prometheus.HistogramOpts{\n\t\tNamespace: prometheusNamespace,\n\t\tName:      \"nodestore_peers_calculation_duration_seconds\",\n\t\tHelp:      \"Duration of peers calculation in NodeStore\",\n\t\tBuckets:   prometheus.DefBuckets,\n\t})\n\tnodeStoreQueueDepth = promauto.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: prometheusNamespace,\n\t\tName:      \"nodestore_queue_depth\",\n\t\tHelp:      \"Current depth of NodeStore write queue\",\n\t})\n)\n\n// NodeStore is a thread-safe store for nodes.\n// It is a copy-on-write structure, replacing the \"snapshot\"\n// when a change to the structure occurs. It is optimised for reads,\n// and while batches are not fast, they are grouped together\n// to do less of the expensive peer calculation if there are many\n// changes rapidly.\n//\n// Writes will block until committed, while reads are never\n// blocked. This means that the caller of a write operation\n// is responsible for ensuring an update depending on a write\n// is not issued before the write is complete.\ntype NodeStore struct {\n\tdata atomic.Pointer[Snapshot]\n\n\tpeersFunc  PeersFunc\n\twriteQueue chan work\n\n\tbatchSize    int\n\tbatchTimeout time.Duration\n}\n\nfunc NewNodeStore(allNodes types.Nodes, peersFunc PeersFunc, batchSize int, batchTimeout time.Duration) *NodeStore {\n\tnodes := make(map[types.NodeID]types.Node, len(allNodes))\n\tfor _, n := range allNodes {\n\t\tnodes[n.ID] = *n\n\t}\n\n\tsnap := snapshotFromNodes(nodes, peersFunc)\n\n\tstore := &NodeStore{\n\t\tpeersFunc:    peersFunc,\n\t\tbatchSize:    batchSize,\n\t\tbatchTimeout: batchTimeout,\n\t}\n\tstore.data.Store(&snap)\n\n\t// Initialize node count gauge\n\tnodeStoreNodesCount.Set(float64(len(nodes)))\n\n\treturn store\n}\n\n// Snapshot is the representation of the current state of the NodeStore.\n// It contains all nodes and their relationships.\n// It is a copy-on-write structure, meaning that when a write occurs,\n// a new Snapshot is created with the updated state,\n// and replaces the old one atomically.\ntype Snapshot struct {\n\t// nodesByID is the main source of truth for nodes.\n\tnodesByID map[types.NodeID]types.Node\n\n\t// calculated from nodesByID\n\tnodesByNodeKey    map[key.NodePublic]types.NodeView\n\tnodesByMachineKey map[key.MachinePublic]map[types.UserID]types.NodeView\n\tpeersByNode       map[types.NodeID][]types.NodeView\n\tnodesByUser       map[types.UserID][]types.NodeView\n\tallNodes          []types.NodeView\n}\n\n// PeersFunc is a function that takes a list of nodes and returns a map\n// with the relationships between nodes and their peers.\n// This will typically be used to calculate which nodes can see each other\n// based on the current policy.\ntype PeersFunc func(nodes []types.NodeView) map[types.NodeID][]types.NodeView\n\n// work represents a single operation to be performed on the NodeStore.\ntype work struct {\n\top         int\n\tnodeID     types.NodeID\n\tnode       types.Node\n\tupdateFn   UpdateNodeFunc\n\tresult     chan struct{}\n\tnodeResult chan types.NodeView // Channel to return the resulting node after batch application\n\t// For rebuildPeerMaps operation\n\trebuildResult chan struct{}\n}\n\n// PutNode adds or updates a node in the store.\n// If the node already exists, it will be replaced.\n// If the node does not exist, it will be added.\n// This is a blocking operation that waits for the write to complete.\n// Returns the resulting node after all modifications in the batch have been applied.\nfunc (s *NodeStore) PutNode(n types.Node) types.NodeView {\n\ttimer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues(\"put\"))\n\tdefer timer.ObserveDuration()\n\n\twork := work{\n\t\top:         put,\n\t\tnodeID:     n.ID,\n\t\tnode:       n,\n\t\tresult:     make(chan struct{}),\n\t\tnodeResult: make(chan types.NodeView, 1),\n\t}\n\n\tnodeStoreQueueDepth.Inc()\n\n\ts.writeQueue <- work\n\n\t<-work.result\n\tnodeStoreQueueDepth.Dec()\n\n\tresultNode := <-work.nodeResult\n\n\tnodeStoreOperations.WithLabelValues(\"put\").Inc()\n\n\treturn resultNode\n}\n\n// UpdateNodeFunc is a function type that takes a pointer to a Node and modifies it.\ntype UpdateNodeFunc func(n *types.Node)\n\n// UpdateNode applies a function to modify a specific node in the store.\n// This is a blocking operation that waits for the write to complete.\n// This is analogous to a database \"transaction\", or, the caller should\n// rather collect all data they want to change, and then call this function.\n// Fewer calls are better.\n// Returns the resulting node after all modifications in the batch have been applied.\n//\n// TODO(kradalby): Technically we could have a version of this that modifies the node\n// in the current snapshot if _we know_ that the change will not affect the peer relationships.\n// This is because the main nodesByID map contains the struct, and every other map is using a\n// pointer to the underlying struct. The gotcha with this is that we will need to introduce\n// a lock around the nodesByID map to ensure that no other writes are happening\n// while we are modifying the node. Which mean we would need to implement read-write locks\n// on all read operations.\nfunc (s *NodeStore) UpdateNode(nodeID types.NodeID, updateFn func(n *types.Node)) (types.NodeView, bool) {\n\ttimer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues(\"update\"))\n\tdefer timer.ObserveDuration()\n\n\twork := work{\n\t\top:         update,\n\t\tnodeID:     nodeID,\n\t\tupdateFn:   updateFn,\n\t\tresult:     make(chan struct{}),\n\t\tnodeResult: make(chan types.NodeView, 1),\n\t}\n\n\tnodeStoreQueueDepth.Inc()\n\n\ts.writeQueue <- work\n\n\t<-work.result\n\tnodeStoreQueueDepth.Dec()\n\n\tresultNode := <-work.nodeResult\n\n\tnodeStoreOperations.WithLabelValues(\"update\").Inc()\n\n\t// Return the node and whether it exists (is valid)\n\treturn resultNode, resultNode.Valid()\n}\n\n// DeleteNode removes a node from the store by its ID.\n// This is a blocking operation that waits for the write to complete.\nfunc (s *NodeStore) DeleteNode(id types.NodeID) {\n\ttimer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues(\"delete\"))\n\tdefer timer.ObserveDuration()\n\n\twork := work{\n\t\top:     del,\n\t\tnodeID: id,\n\t\tresult: make(chan struct{}),\n\t}\n\n\tnodeStoreQueueDepth.Inc()\n\n\ts.writeQueue <- work\n\n\t<-work.result\n\tnodeStoreQueueDepth.Dec()\n\n\tnodeStoreOperations.WithLabelValues(\"delete\").Inc()\n}\n\n// Start initializes the NodeStore and starts processing the write queue.\nfunc (s *NodeStore) Start() {\n\ts.writeQueue = make(chan work)\n\tgo s.processWrite()\n}\n\n// Stop stops the NodeStore.\nfunc (s *NodeStore) Stop() {\n\tclose(s.writeQueue)\n}\n\n// processWrite processes the write queue in batches.\nfunc (s *NodeStore) processWrite() {\n\tc := time.NewTicker(s.batchTimeout)\n\tdefer c.Stop()\n\n\tbatch := make([]work, 0, s.batchSize)\n\n\tfor {\n\t\tselect {\n\t\tcase w, ok := <-s.writeQueue:\n\t\t\tif !ok {\n\t\t\t\t// Channel closed, apply any remaining batch and exit\n\t\t\t\tif len(batch) != 0 {\n\t\t\t\t\ts.applyBatch(batch)\n\t\t\t\t}\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tbatch = append(batch, w)\n\t\t\tif len(batch) >= s.batchSize {\n\t\t\t\ts.applyBatch(batch)\n\t\t\t\tbatch = batch[:0]\n\n\t\t\t\tc.Reset(s.batchTimeout)\n\t\t\t}\n\t\tcase <-c.C:\n\t\t\tif len(batch) != 0 {\n\t\t\t\ts.applyBatch(batch)\n\t\t\t\tbatch = batch[:0]\n\t\t\t}\n\n\t\t\tc.Reset(s.batchTimeout)\n\t\t}\n\t}\n}\n\n// applyBatch applies a batch of work to the node store.\n// This means that it takes a copy of the current nodes,\n// then applies the batch of operations to that copy,\n// runs any precomputation needed (like calculating peers),\n// and finally replaces the snapshot in the store with the new one.\n// The replacement of the snapshot is atomic, ensuring that reads\n// are never blocked by writes.\n// Each write item is blocked until the batch is applied to ensure\n// the caller knows the operation is complete and do not send any\n// updates that are dependent on a read that is yet to be written.\nfunc (s *NodeStore) applyBatch(batch []work) {\n\ttimer := prometheus.NewTimer(nodeStoreBatchDuration)\n\tdefer timer.ObserveDuration()\n\n\tnodeStoreBatchSize.Observe(float64(len(batch)))\n\n\tnodes := make(map[types.NodeID]types.Node)\n\tmaps.Copy(nodes, s.data.Load().nodesByID)\n\n\t// Track which work items need node results\n\tnodeResultRequests := make(map[types.NodeID][]*work)\n\n\t// Track rebuildPeerMaps operations\n\tvar rebuildOps []*work\n\n\tfor i := range batch {\n\t\tw := &batch[i]\n\t\tswitch w.op {\n\t\tcase put:\n\t\t\tnodes[w.nodeID] = w.node\n\t\t\tif w.nodeResult != nil {\n\t\t\t\tnodeResultRequests[w.nodeID] = append(nodeResultRequests[w.nodeID], w)\n\t\t\t}\n\t\tcase update:\n\t\t\t// Update the specific node identified by nodeID\n\t\t\tif n, exists := nodes[w.nodeID]; exists {\n\t\t\t\tw.updateFn(&n)\n\t\t\t\tnodes[w.nodeID] = n\n\t\t\t}\n\n\t\t\tif w.nodeResult != nil {\n\t\t\t\tnodeResultRequests[w.nodeID] = append(nodeResultRequests[w.nodeID], w)\n\t\t\t}\n\t\tcase del:\n\t\t\tdelete(nodes, w.nodeID)\n\t\t\t// For delete operations, send an invalid NodeView if requested\n\t\t\tif w.nodeResult != nil {\n\t\t\t\tnodeResultRequests[w.nodeID] = append(nodeResultRequests[w.nodeID], w)\n\t\t\t}\n\t\tcase rebuildPeerMaps:\n\t\t\t// rebuildPeerMaps doesn't modify nodes, it just forces the snapshot rebuild\n\t\t\t// below to recalculate peer relationships using the current peersFunc\n\t\t\trebuildOps = append(rebuildOps, w)\n\t\t}\n\t}\n\n\tnewSnap := snapshotFromNodes(nodes, s.peersFunc)\n\ts.data.Store(&newSnap)\n\n\t// Update node count gauge\n\tnodeStoreNodesCount.Set(float64(len(nodes)))\n\n\t// Send the resulting nodes to all work items that requested them\n\tfor nodeID, workItems := range nodeResultRequests {\n\t\tif node, exists := nodes[nodeID]; exists {\n\t\t\tnodeView := node.View()\n\t\t\tfor _, w := range workItems {\n\t\t\t\tw.nodeResult <- nodeView\n\n\t\t\t\tclose(w.nodeResult)\n\t\t\t}\n\t\t} else {\n\t\t\t// Node was deleted or doesn't exist\n\t\t\tfor _, w := range workItems {\n\t\t\t\tw.nodeResult <- types.NodeView{} // Send invalid view\n\n\t\t\t\tclose(w.nodeResult)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Signal completion for rebuildPeerMaps operations\n\tfor _, w := range rebuildOps {\n\t\tclose(w.rebuildResult)\n\t}\n\n\t// Signal completion for all other work items\n\tfor _, w := range batch {\n\t\tif w.op != rebuildPeerMaps {\n\t\t\tclose(w.result)\n\t\t}\n\t}\n}\n\n// snapshotFromNodes creates a new Snapshot from the provided nodes.\n// It builds a lot of \"indexes\" to make lookups fast for datasets we\n// that is used frequently, like nodesByNodeKey, peersByNode, and nodesByUser.\n// This is not a fast operation, it is the \"slow\" part of our copy-on-write\n// structure, but it allows us to have fast reads and efficient lookups.\nfunc snapshotFromNodes(nodes map[types.NodeID]types.Node, peersFunc PeersFunc) Snapshot {\n\ttimer := prometheus.NewTimer(nodeStoreSnapshotBuildDuration)\n\tdefer timer.ObserveDuration()\n\n\tallNodes := make([]types.NodeView, 0, len(nodes))\n\tfor _, n := range nodes {\n\t\tallNodes = append(allNodes, n.View())\n\t}\n\n\tnewSnap := Snapshot{\n\t\tnodesByID:         nodes,\n\t\tallNodes:          allNodes,\n\t\tnodesByNodeKey:    make(map[key.NodePublic]types.NodeView),\n\t\tnodesByMachineKey: make(map[key.MachinePublic]map[types.UserID]types.NodeView),\n\n\t\t// peersByNode is most likely the most expensive operation,\n\t\t// it will use the list of all nodes, combined with the\n\t\t// current policy to precalculate which nodes are peers and\n\t\t// can see each other.\n\t\tpeersByNode: func() map[types.NodeID][]types.NodeView {\n\t\t\tpeersTimer := prometheus.NewTimer(nodeStorePeersCalculationDuration)\n\t\t\tdefer peersTimer.ObserveDuration()\n\n\t\t\treturn peersFunc(allNodes)\n\t\t}(),\n\t\tnodesByUser: make(map[types.UserID][]types.NodeView),\n\t}\n\n\t// Build nodesByUser, nodesByNodeKey, and nodesByMachineKey maps\n\tfor _, n := range nodes {\n\t\tnodeView := n.View()\n\t\tuserID := n.TypedUserID()\n\n\t\t// Tagged nodes are owned by their tags, not a user,\n\t\t// so they are not indexed by user.\n\t\tif !n.IsTagged() {\n\t\t\tnewSnap.nodesByUser[userID] = append(newSnap.nodesByUser[userID], nodeView)\n\t\t}\n\n\t\tnewSnap.nodesByNodeKey[n.NodeKey] = nodeView\n\n\t\t// Build machine key index\n\t\tif newSnap.nodesByMachineKey[n.MachineKey] == nil {\n\t\t\tnewSnap.nodesByMachineKey[n.MachineKey] = make(map[types.UserID]types.NodeView)\n\t\t}\n\n\t\tnewSnap.nodesByMachineKey[n.MachineKey][userID] = nodeView\n\t}\n\n\treturn newSnap\n}\n\n// GetNode retrieves a node by its ID.\n// The bool indicates if the node exists or is available (like \"err not found\").\n// The NodeView might be invalid, so it must be checked with .Valid(), which must be used to ensure\n// it isn't an invalid node (this is more of a node error or node is broken).\nfunc (s *NodeStore) GetNode(id types.NodeID) (types.NodeView, bool) {\n\ttimer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues(\"get\"))\n\tdefer timer.ObserveDuration()\n\n\tnodeStoreOperations.WithLabelValues(\"get\").Inc()\n\n\tn, exists := s.data.Load().nodesByID[id]\n\tif !exists {\n\t\treturn types.NodeView{}, false\n\t}\n\n\treturn n.View(), true\n}\n\n// GetNodeByNodeKey retrieves a node by its NodeKey.\n// The bool indicates if the node exists or is available (like \"err not found\").\n// The NodeView might be invalid, so it must be checked with .Valid(), which must be used to ensure\n// it isn't an invalid node (this is more of a node error or node is broken).\nfunc (s *NodeStore) GetNodeByNodeKey(nodeKey key.NodePublic) (types.NodeView, bool) {\n\ttimer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues(\"get_by_key\"))\n\tdefer timer.ObserveDuration()\n\n\tnodeStoreOperations.WithLabelValues(\"get_by_key\").Inc()\n\n\tnodeView, exists := s.data.Load().nodesByNodeKey[nodeKey]\n\n\treturn nodeView, exists\n}\n\n// GetNodeByMachineKey returns a node by its machine key and user ID. The bool indicates if the node exists.\nfunc (s *NodeStore) GetNodeByMachineKey(machineKey key.MachinePublic, userID types.UserID) (types.NodeView, bool) {\n\ttimer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues(\"get_by_machine_key\"))\n\tdefer timer.ObserveDuration()\n\n\tnodeStoreOperations.WithLabelValues(\"get_by_machine_key\").Inc()\n\n\tsnapshot := s.data.Load()\n\tif userMap, exists := snapshot.nodesByMachineKey[machineKey]; exists {\n\t\tif node, exists := userMap[userID]; exists {\n\t\t\treturn node, true\n\t\t}\n\t}\n\n\treturn types.NodeView{}, false\n}\n\n// GetNodeByMachineKeyAnyUser returns the first node with the given machine key,\n// regardless of which user it belongs to. This is useful for scenarios like\n// transferring a node to a different user when re-authenticating with a\n// different user's auth key.\n// If multiple nodes exist with the same machine key (different users), the\n// first one found is returned (order is not guaranteed).\nfunc (s *NodeStore) GetNodeByMachineKeyAnyUser(machineKey key.MachinePublic) (types.NodeView, bool) {\n\ttimer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues(\"get_by_machine_key_any_user\"))\n\tdefer timer.ObserveDuration()\n\n\tnodeStoreOperations.WithLabelValues(\"get_by_machine_key_any_user\").Inc()\n\n\tsnapshot := s.data.Load()\n\tif userMap, exists := snapshot.nodesByMachineKey[machineKey]; exists {\n\t\t// Return the first node found (order not guaranteed due to map iteration)\n\t\tfor _, node := range userMap {\n\t\t\treturn node, true\n\t\t}\n\t}\n\n\treturn types.NodeView{}, false\n}\n\n// DebugString returns debug information about the NodeStore.\nfunc (s *NodeStore) DebugString() string {\n\tsnapshot := s.data.Load()\n\n\tvar sb strings.Builder\n\n\tsb.WriteString(\"=== NodeStore Debug Information ===\\n\\n\")\n\n\t// Basic counts\n\tsb.WriteString(fmt.Sprintf(\"Total Nodes: %d\\n\", len(snapshot.nodesByID)))\n\tsb.WriteString(fmt.Sprintf(\"Users with Nodes: %d\\n\", len(snapshot.nodesByUser)))\n\tsb.WriteString(\"\\n\")\n\n\t// User distribution (shows internal UserID tracking, not display owner)\n\tsb.WriteString(\"Nodes by Internal User ID:\\n\")\n\n\tfor userID, nodes := range snapshot.nodesByUser {\n\t\tif len(nodes) > 0 {\n\t\t\tuserName := \"unknown\"\n\n\t\t\tif nodes[0].Valid() && nodes[0].User().Valid() {\n\t\t\t\tuserName = nodes[0].User().Name()\n\t\t\t}\n\n\t\t\tsb.WriteString(fmt.Sprintf(\"  - User %d (%s): %d nodes\\n\", userID, userName, len(nodes)))\n\t\t}\n\t}\n\n\tsb.WriteString(\"\\n\")\n\n\t// Peer relationships summary\n\tsb.WriteString(\"Peer Relationships:\\n\")\n\n\ttotalPeers := 0\n\n\tfor nodeID, peers := range snapshot.peersByNode {\n\t\tpeerCount := len(peers)\n\n\t\ttotalPeers += peerCount\n\t\tif node, exists := snapshot.nodesByID[nodeID]; exists {\n\t\t\tsb.WriteString(fmt.Sprintf(\"  - Node %d (%s): %d peers\\n\",\n\t\t\t\tnodeID, node.Hostname, peerCount))\n\t\t}\n\t}\n\n\tif len(snapshot.peersByNode) > 0 {\n\t\tavgPeers := float64(totalPeers) / float64(len(snapshot.peersByNode))\n\t\tsb.WriteString(fmt.Sprintf(\"  - Average peers per node: %.1f\\n\", avgPeers))\n\t}\n\n\tsb.WriteString(\"\\n\")\n\n\t// Node key index\n\tsb.WriteString(fmt.Sprintf(\"NodeKey Index: %d entries\\n\", len(snapshot.nodesByNodeKey)))\n\tsb.WriteString(\"\\n\")\n\n\treturn sb.String()\n}\n\n// ListNodes returns a slice of all nodes in the store.\nfunc (s *NodeStore) ListNodes() views.Slice[types.NodeView] {\n\ttimer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues(\"list\"))\n\tdefer timer.ObserveDuration()\n\n\tnodeStoreOperations.WithLabelValues(\"list\").Inc()\n\n\treturn views.SliceOf(s.data.Load().allNodes)\n}\n\n// ListPeers returns a slice of all peers for a given node ID.\nfunc (s *NodeStore) ListPeers(id types.NodeID) views.Slice[types.NodeView] {\n\ttimer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues(\"list_peers\"))\n\tdefer timer.ObserveDuration()\n\n\tnodeStoreOperations.WithLabelValues(\"list_peers\").Inc()\n\n\treturn views.SliceOf(s.data.Load().peersByNode[id])\n}\n\n// RebuildPeerMaps rebuilds the peer relationship map using the current peersFunc.\n// This must be called after policy changes because peersFunc uses PolicyManager's\n// filters to determine which nodes can see each other. Without rebuilding, the\n// peer map would use stale filter data until the next node add/delete.\nfunc (s *NodeStore) RebuildPeerMaps() {\n\tresult := make(chan struct{})\n\n\tw := work{\n\t\top:            rebuildPeerMaps,\n\t\trebuildResult: result,\n\t}\n\n\ts.writeQueue <- w\n\n\t<-result\n}\n\n// ListNodesByUser returns a slice of all nodes for a given user ID.\nfunc (s *NodeStore) ListNodesByUser(uid types.UserID) views.Slice[types.NodeView] {\n\ttimer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues(\"list_by_user\"))\n\tdefer timer.ObserveDuration()\n\n\tnodeStoreOperations.WithLabelValues(\"list_by_user\").Inc()\n\n\treturn views.SliceOf(s.data.Load().nodesByUser[uid])\n}\n"
  },
  {
    "path": "hscontrol/state/node_store_test.go",
    "content": "package state\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/netip\"\n\t\"runtime\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/types/key\"\n)\n\nfunc TestSnapshotFromNodes(t *testing.T) {\n\ttests := []struct {\n\t\tname      string\n\t\tsetupFunc func() (map[types.NodeID]types.Node, PeersFunc)\n\t\tvalidate  func(t *testing.T, nodes map[types.NodeID]types.Node, snapshot Snapshot)\n\t}{\n\t\t{\n\t\t\tname: \"empty nodes\",\n\t\t\tsetupFunc: func() (map[types.NodeID]types.Node, PeersFunc) {\n\t\t\t\tnodes := make(map[types.NodeID]types.Node)\n\t\t\t\tpeersFunc := func(nodes []types.NodeView) map[types.NodeID][]types.NodeView {\n\t\t\t\t\treturn make(map[types.NodeID][]types.NodeView)\n\t\t\t\t}\n\n\t\t\t\treturn nodes, peersFunc\n\t\t\t},\n\t\t\tvalidate: func(t *testing.T, nodes map[types.NodeID]types.Node, snapshot Snapshot) { //nolint:thelper\n\t\t\t\tassert.Empty(t, snapshot.nodesByID)\n\t\t\t\tassert.Empty(t, snapshot.allNodes)\n\t\t\t\tassert.Empty(t, snapshot.peersByNode)\n\t\t\t\tassert.Empty(t, snapshot.nodesByUser)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"single node\",\n\t\t\tsetupFunc: func() (map[types.NodeID]types.Node, PeersFunc) {\n\t\t\t\tnodes := map[types.NodeID]types.Node{\n\t\t\t\t\t1: createTestNode(1, 1, \"user1\", \"node1\"),\n\t\t\t\t}\n\n\t\t\t\treturn nodes, allowAllPeersFunc\n\t\t\t},\n\t\t\tvalidate: func(t *testing.T, nodes map[types.NodeID]types.Node, snapshot Snapshot) { //nolint:thelper\n\t\t\t\tassert.Len(t, snapshot.nodesByID, 1)\n\t\t\t\tassert.Len(t, snapshot.allNodes, 1)\n\t\t\t\tassert.Len(t, snapshot.peersByNode, 1)\n\t\t\t\tassert.Len(t, snapshot.nodesByUser, 1)\n\n\t\t\t\trequire.Contains(t, snapshot.nodesByID, types.NodeID(1))\n\t\t\t\tassert.Equal(t, nodes[1].ID, snapshot.nodesByID[1].ID)\n\t\t\t\tassert.Empty(t, snapshot.peersByNode[1]) // no other nodes, so no peers\n\t\t\t\tassert.Len(t, snapshot.nodesByUser[1], 1)\n\t\t\t\tassert.Equal(t, types.NodeID(1), snapshot.nodesByUser[1][0].ID())\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multiple nodes same user\",\n\t\t\tsetupFunc: func() (map[types.NodeID]types.Node, PeersFunc) {\n\t\t\t\tnodes := map[types.NodeID]types.Node{\n\t\t\t\t\t1: createTestNode(1, 1, \"user1\", \"node1\"),\n\t\t\t\t\t2: createTestNode(2, 1, \"user1\", \"node2\"),\n\t\t\t\t}\n\n\t\t\t\treturn nodes, allowAllPeersFunc\n\t\t\t},\n\t\t\tvalidate: func(t *testing.T, nodes map[types.NodeID]types.Node, snapshot Snapshot) { //nolint:thelper\n\t\t\t\tassert.Len(t, snapshot.nodesByID, 2)\n\t\t\t\tassert.Len(t, snapshot.allNodes, 2)\n\t\t\t\tassert.Len(t, snapshot.peersByNode, 2)\n\t\t\t\tassert.Len(t, snapshot.nodesByUser, 1)\n\n\t\t\t\t// Each node sees the other as peer (but not itself)\n\t\t\t\tassert.Len(t, snapshot.peersByNode[1], 1)\n\t\t\t\tassert.Equal(t, types.NodeID(2), snapshot.peersByNode[1][0].ID())\n\t\t\t\tassert.Len(t, snapshot.peersByNode[2], 1)\n\t\t\t\tassert.Equal(t, types.NodeID(1), snapshot.peersByNode[2][0].ID())\n\t\t\t\tassert.Len(t, snapshot.nodesByUser[1], 2)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multiple nodes different users\",\n\t\t\tsetupFunc: func() (map[types.NodeID]types.Node, PeersFunc) {\n\t\t\t\tnodes := map[types.NodeID]types.Node{\n\t\t\t\t\t1: createTestNode(1, 1, \"user1\", \"node1\"),\n\t\t\t\t\t2: createTestNode(2, 2, \"user2\", \"node2\"),\n\t\t\t\t\t3: createTestNode(3, 1, \"user1\", \"node3\"),\n\t\t\t\t}\n\n\t\t\t\treturn nodes, allowAllPeersFunc\n\t\t\t},\n\t\t\tvalidate: func(t *testing.T, nodes map[types.NodeID]types.Node, snapshot Snapshot) { //nolint:thelper\n\t\t\t\tassert.Len(t, snapshot.nodesByID, 3)\n\t\t\t\tassert.Len(t, snapshot.allNodes, 3)\n\t\t\t\tassert.Len(t, snapshot.peersByNode, 3)\n\t\t\t\tassert.Len(t, snapshot.nodesByUser, 2)\n\n\t\t\t\t// Each node should have 2 peers (all others, but not itself)\n\t\t\t\tassert.Len(t, snapshot.peersByNode[1], 2)\n\t\t\t\tassert.Len(t, snapshot.peersByNode[2], 2)\n\t\t\t\tassert.Len(t, snapshot.peersByNode[3], 2)\n\n\t\t\t\t// User groupings\n\t\t\t\tassert.Len(t, snapshot.nodesByUser[1], 2) // user1 has nodes 1,3\n\t\t\t\tassert.Len(t, snapshot.nodesByUser[2], 1) // user2 has node 2\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"odd-even peers filtering\",\n\t\t\tsetupFunc: func() (map[types.NodeID]types.Node, PeersFunc) {\n\t\t\t\tnodes := map[types.NodeID]types.Node{\n\t\t\t\t\t1: createTestNode(1, 1, \"user1\", \"node1\"),\n\t\t\t\t\t2: createTestNode(2, 2, \"user2\", \"node2\"),\n\t\t\t\t\t3: createTestNode(3, 3, \"user3\", \"node3\"),\n\t\t\t\t\t4: createTestNode(4, 4, \"user4\", \"node4\"),\n\t\t\t\t}\n\t\t\t\tpeersFunc := oddEvenPeersFunc\n\n\t\t\t\treturn nodes, peersFunc\n\t\t\t},\n\t\t\tvalidate: func(t *testing.T, nodes map[types.NodeID]types.Node, snapshot Snapshot) { //nolint:thelper\n\t\t\t\tassert.Len(t, snapshot.nodesByID, 4)\n\t\t\t\tassert.Len(t, snapshot.allNodes, 4)\n\t\t\t\tassert.Len(t, snapshot.peersByNode, 4)\n\t\t\t\tassert.Len(t, snapshot.nodesByUser, 4)\n\n\t\t\t\t// Odd nodes should only see other odd nodes as peers\n\t\t\t\trequire.Len(t, snapshot.peersByNode[1], 1)\n\t\t\t\tassert.Equal(t, types.NodeID(3), snapshot.peersByNode[1][0].ID())\n\n\t\t\t\trequire.Len(t, snapshot.peersByNode[3], 1)\n\t\t\t\tassert.Equal(t, types.NodeID(1), snapshot.peersByNode[3][0].ID())\n\n\t\t\t\t// Even nodes should only see other even nodes as peers\n\t\t\t\trequire.Len(t, snapshot.peersByNode[2], 1)\n\t\t\t\tassert.Equal(t, types.NodeID(4), snapshot.peersByNode[2][0].ID())\n\n\t\t\t\trequire.Len(t, snapshot.peersByNode[4], 1)\n\t\t\t\tassert.Equal(t, types.NodeID(2), snapshot.peersByNode[4][0].ID())\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tnodes, peersFunc := tt.setupFunc()\n\t\t\tsnapshot := snapshotFromNodes(nodes, peersFunc)\n\t\t\ttt.validate(t, nodes, snapshot)\n\t\t})\n\t}\n}\n\n// Helper functions\n\nfunc createTestNode(nodeID types.NodeID, userID uint, username, hostname string) types.Node {\n\tnow := time.Now()\n\tmachineKey := key.NewMachine()\n\tnodeKey := key.NewNode()\n\tdiscoKey := key.NewDisco()\n\n\tipv4 := netip.MustParseAddr(\"100.64.0.1\")\n\tipv6 := netip.MustParseAddr(\"fd7a:115c:a1e0::1\")\n\n\treturn types.Node{\n\t\tID:         nodeID,\n\t\tMachineKey: machineKey.Public(),\n\t\tNodeKey:    nodeKey.Public(),\n\t\tDiscoKey:   discoKey.Public(),\n\t\tHostname:   hostname,\n\t\tGivenName:  hostname,\n\t\tUserID:     new(userID),\n\t\tUser: &types.User{\n\t\t\tName:        username,\n\t\t\tDisplayName: username,\n\t\t},\n\t\tRegisterMethod: \"test\",\n\t\tIPv4:           &ipv4,\n\t\tIPv6:           &ipv6,\n\t\tCreatedAt:      now,\n\t\tUpdatedAt:      now,\n\t}\n}\n\n// Peer functions\n\nfunc allowAllPeersFunc(nodes []types.NodeView) map[types.NodeID][]types.NodeView {\n\tret := make(map[types.NodeID][]types.NodeView, len(nodes))\n\tfor _, node := range nodes {\n\t\tvar peers []types.NodeView\n\n\t\tfor _, n := range nodes {\n\t\t\tif n.ID() != node.ID() {\n\t\t\t\tpeers = append(peers, n)\n\t\t\t}\n\t\t}\n\n\t\tret[node.ID()] = peers\n\t}\n\n\treturn ret\n}\n\nfunc oddEvenPeersFunc(nodes []types.NodeView) map[types.NodeID][]types.NodeView {\n\tret := make(map[types.NodeID][]types.NodeView, len(nodes))\n\tfor _, node := range nodes {\n\t\tvar peers []types.NodeView\n\n\t\tnodeIsOdd := node.ID()%2 == 1\n\n\t\tfor _, n := range nodes {\n\t\t\tif n.ID() == node.ID() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tpeerIsOdd := n.ID()%2 == 1\n\n\t\t\t// Only add peer if both are odd or both are even\n\t\t\tif nodeIsOdd == peerIsOdd {\n\t\t\t\tpeers = append(peers, n)\n\t\t\t}\n\t\t}\n\n\t\tret[node.ID()] = peers\n\t}\n\n\treturn ret\n}\n\nfunc TestNodeStoreOperations(t *testing.T) {\n\ttests := []struct {\n\t\tname      string\n\t\tsetupFunc func(t *testing.T) *NodeStore\n\t\tsteps     []testStep\n\t}{\n\t\t{\n\t\t\tname: \"create empty store and add single node\",\n\t\t\tsetupFunc: func(t *testing.T) *NodeStore { //nolint:thelper\n\t\t\t\treturn NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout)\n\t\t\t},\n\t\t\tsteps: []testStep{\n\t\t\t\t{\n\t\t\t\t\tname: \"verify empty store\",\n\t\t\t\t\taction: func(store *NodeStore) {\n\t\t\t\t\t\tsnapshot := store.data.Load()\n\t\t\t\t\t\tassert.Empty(t, snapshot.nodesByID)\n\t\t\t\t\t\tassert.Empty(t, snapshot.allNodes)\n\t\t\t\t\t\tassert.Empty(t, snapshot.peersByNode)\n\t\t\t\t\t\tassert.Empty(t, snapshot.nodesByUser)\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: \"add first node\",\n\t\t\t\t\taction: func(store *NodeStore) {\n\t\t\t\t\t\tnode := createTestNode(1, 1, \"user1\", \"node1\")\n\t\t\t\t\t\tresultNode := store.PutNode(node)\n\t\t\t\t\t\tassert.True(t, resultNode.Valid(), \"PutNode should return valid node\")\n\t\t\t\t\t\tassert.Equal(t, node.ID, resultNode.ID())\n\n\t\t\t\t\t\tsnapshot := store.data.Load()\n\t\t\t\t\t\tassert.Len(t, snapshot.nodesByID, 1)\n\t\t\t\t\t\tassert.Len(t, snapshot.allNodes, 1)\n\t\t\t\t\t\tassert.Len(t, snapshot.peersByNode, 1)\n\t\t\t\t\t\tassert.Len(t, snapshot.nodesByUser, 1)\n\n\t\t\t\t\t\trequire.Contains(t, snapshot.nodesByID, types.NodeID(1))\n\t\t\t\t\t\tassert.Equal(t, node.ID, snapshot.nodesByID[1].ID)\n\t\t\t\t\t\tassert.Empty(t, snapshot.peersByNode[1]) // no peers yet\n\t\t\t\t\t\tassert.Len(t, snapshot.nodesByUser[1], 1)\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"create store with initial node and add more\",\n\t\t\tsetupFunc: func(t *testing.T) *NodeStore { //nolint:thelper\n\t\t\t\tnode1 := createTestNode(1, 1, \"user1\", \"node1\")\n\t\t\t\tinitialNodes := types.Nodes{&node1}\n\n\t\t\t\treturn NewNodeStore(initialNodes, allowAllPeersFunc, TestBatchSize, TestBatchTimeout)\n\t\t\t},\n\t\t\tsteps: []testStep{\n\t\t\t\t{\n\t\t\t\t\tname: \"verify initial state\",\n\t\t\t\t\taction: func(store *NodeStore) {\n\t\t\t\t\t\tsnapshot := store.data.Load()\n\t\t\t\t\t\tassert.Len(t, snapshot.nodesByID, 1)\n\t\t\t\t\t\tassert.Len(t, snapshot.allNodes, 1)\n\t\t\t\t\t\tassert.Len(t, snapshot.peersByNode, 1)\n\t\t\t\t\t\tassert.Len(t, snapshot.nodesByUser, 1)\n\t\t\t\t\t\tassert.Empty(t, snapshot.peersByNode[1])\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: \"add second node same user\",\n\t\t\t\t\taction: func(store *NodeStore) {\n\t\t\t\t\t\tnode2 := createTestNode(2, 1, \"user1\", \"node2\")\n\t\t\t\t\t\tresultNode := store.PutNode(node2)\n\t\t\t\t\t\tassert.True(t, resultNode.Valid(), \"PutNode should return valid node\")\n\t\t\t\t\t\tassert.Equal(t, types.NodeID(2), resultNode.ID())\n\n\t\t\t\t\t\tsnapshot := store.data.Load()\n\t\t\t\t\t\tassert.Len(t, snapshot.nodesByID, 2)\n\t\t\t\t\t\tassert.Len(t, snapshot.allNodes, 2)\n\t\t\t\t\t\tassert.Len(t, snapshot.peersByNode, 2)\n\t\t\t\t\t\tassert.Len(t, snapshot.nodesByUser, 1)\n\n\t\t\t\t\t\t// Now both nodes should see each other as peers\n\t\t\t\t\t\tassert.Len(t, snapshot.peersByNode[1], 1)\n\t\t\t\t\t\tassert.Equal(t, types.NodeID(2), snapshot.peersByNode[1][0].ID())\n\t\t\t\t\t\tassert.Len(t, snapshot.peersByNode[2], 1)\n\t\t\t\t\t\tassert.Equal(t, types.NodeID(1), snapshot.peersByNode[2][0].ID())\n\t\t\t\t\t\tassert.Len(t, snapshot.nodesByUser[1], 2)\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: \"add third node different user\",\n\t\t\t\t\taction: func(store *NodeStore) {\n\t\t\t\t\t\tnode3 := createTestNode(3, 2, \"user2\", \"node3\")\n\t\t\t\t\t\tresultNode := store.PutNode(node3)\n\t\t\t\t\t\tassert.True(t, resultNode.Valid(), \"PutNode should return valid node\")\n\t\t\t\t\t\tassert.Equal(t, types.NodeID(3), resultNode.ID())\n\n\t\t\t\t\t\tsnapshot := store.data.Load()\n\t\t\t\t\t\tassert.Len(t, snapshot.nodesByID, 3)\n\t\t\t\t\t\tassert.Len(t, snapshot.allNodes, 3)\n\t\t\t\t\t\tassert.Len(t, snapshot.peersByNode, 3)\n\t\t\t\t\t\tassert.Len(t, snapshot.nodesByUser, 2)\n\n\t\t\t\t\t\t// All nodes should see the other 2 as peers\n\t\t\t\t\t\tassert.Len(t, snapshot.peersByNode[1], 2)\n\t\t\t\t\t\tassert.Len(t, snapshot.peersByNode[2], 2)\n\t\t\t\t\t\tassert.Len(t, snapshot.peersByNode[3], 2)\n\n\t\t\t\t\t\t// User groupings\n\t\t\t\t\t\tassert.Len(t, snapshot.nodesByUser[1], 2) // user1 has nodes 1,2\n\t\t\t\t\t\tassert.Len(t, snapshot.nodesByUser[2], 1) // user2 has node 3\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"test node deletion\",\n\t\t\tsetupFunc: func(t *testing.T) *NodeStore { //nolint:thelper\n\t\t\t\tnode1 := createTestNode(1, 1, \"user1\", \"node1\")\n\t\t\t\tnode2 := createTestNode(2, 1, \"user1\", \"node2\")\n\t\t\t\tnode3 := createTestNode(3, 2, \"user2\", \"node3\")\n\t\t\t\tinitialNodes := types.Nodes{&node1, &node2, &node3}\n\n\t\t\t\treturn NewNodeStore(initialNodes, allowAllPeersFunc, TestBatchSize, TestBatchTimeout)\n\t\t\t},\n\t\t\tsteps: []testStep{\n\t\t\t\t{\n\t\t\t\t\tname: \"verify initial 3 nodes\",\n\t\t\t\t\taction: func(store *NodeStore) {\n\t\t\t\t\t\tsnapshot := store.data.Load()\n\t\t\t\t\t\tassert.Len(t, snapshot.nodesByID, 3)\n\t\t\t\t\t\tassert.Len(t, snapshot.allNodes, 3)\n\t\t\t\t\t\tassert.Len(t, snapshot.peersByNode, 3)\n\t\t\t\t\t\tassert.Len(t, snapshot.nodesByUser, 2)\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: \"delete middle node\",\n\t\t\t\t\taction: func(store *NodeStore) {\n\t\t\t\t\t\tstore.DeleteNode(2)\n\n\t\t\t\t\t\tsnapshot := store.data.Load()\n\t\t\t\t\t\tassert.Len(t, snapshot.nodesByID, 2)\n\t\t\t\t\t\tassert.Len(t, snapshot.allNodes, 2)\n\t\t\t\t\t\tassert.Len(t, snapshot.peersByNode, 2)\n\t\t\t\t\t\tassert.Len(t, snapshot.nodesByUser, 2)\n\n\t\t\t\t\t\t// Node 2 should be gone\n\t\t\t\t\t\tassert.NotContains(t, snapshot.nodesByID, types.NodeID(2))\n\n\t\t\t\t\t\t// Remaining nodes should see each other as peers\n\t\t\t\t\t\tassert.Len(t, snapshot.peersByNode[1], 1)\n\t\t\t\t\t\tassert.Equal(t, types.NodeID(3), snapshot.peersByNode[1][0].ID())\n\t\t\t\t\t\tassert.Len(t, snapshot.peersByNode[3], 1)\n\t\t\t\t\t\tassert.Equal(t, types.NodeID(1), snapshot.peersByNode[3][0].ID())\n\n\t\t\t\t\t\t// User groupings updated\n\t\t\t\t\t\tassert.Len(t, snapshot.nodesByUser[1], 1) // user1 now has only node 1\n\t\t\t\t\t\tassert.Len(t, snapshot.nodesByUser[2], 1) // user2 still has node 3\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: \"delete all remaining nodes\",\n\t\t\t\t\taction: func(store *NodeStore) {\n\t\t\t\t\t\tstore.DeleteNode(1)\n\t\t\t\t\t\tstore.DeleteNode(3)\n\n\t\t\t\t\t\tsnapshot := store.data.Load()\n\t\t\t\t\t\tassert.Empty(t, snapshot.nodesByID)\n\t\t\t\t\t\tassert.Empty(t, snapshot.allNodes)\n\t\t\t\t\t\tassert.Empty(t, snapshot.peersByNode)\n\t\t\t\t\t\tassert.Empty(t, snapshot.nodesByUser)\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"test node updates\",\n\t\t\tsetupFunc: func(t *testing.T) *NodeStore { //nolint:thelper\n\t\t\t\tnode1 := createTestNode(1, 1, \"user1\", \"node1\")\n\t\t\t\tnode2 := createTestNode(2, 1, \"user1\", \"node2\")\n\t\t\t\tinitialNodes := types.Nodes{&node1, &node2}\n\n\t\t\t\treturn NewNodeStore(initialNodes, allowAllPeersFunc, TestBatchSize, TestBatchTimeout)\n\t\t\t},\n\t\t\tsteps: []testStep{\n\t\t\t\t{\n\t\t\t\t\tname: \"verify initial hostnames\",\n\t\t\t\t\taction: func(store *NodeStore) {\n\t\t\t\t\t\tsnapshot := store.data.Load()\n\t\t\t\t\t\tassert.Equal(t, \"node1\", snapshot.nodesByID[1].Hostname)\n\t\t\t\t\t\tassert.Equal(t, \"node2\", snapshot.nodesByID[2].Hostname)\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: \"update node hostname\",\n\t\t\t\t\taction: func(store *NodeStore) {\n\t\t\t\t\t\tresultNode, ok := store.UpdateNode(1, func(n *types.Node) {\n\t\t\t\t\t\t\tn.Hostname = \"updated-node1\"\n\t\t\t\t\t\t\tn.GivenName = \"updated-node1\"\n\t\t\t\t\t\t})\n\t\t\t\t\t\tassert.True(t, ok, \"UpdateNode should return true for existing node\")\n\t\t\t\t\t\tassert.True(t, resultNode.Valid(), \"Result node should be valid\")\n\t\t\t\t\t\tassert.Equal(t, \"updated-node1\", resultNode.Hostname())\n\t\t\t\t\t\tassert.Equal(t, \"updated-node1\", resultNode.GivenName())\n\n\t\t\t\t\t\tsnapshot := store.data.Load()\n\t\t\t\t\t\tassert.Equal(t, \"updated-node1\", snapshot.nodesByID[1].Hostname)\n\t\t\t\t\t\tassert.Equal(t, \"updated-node1\", snapshot.nodesByID[1].GivenName)\n\t\t\t\t\t\tassert.Equal(t, \"node2\", snapshot.nodesByID[2].Hostname) // unchanged\n\n\t\t\t\t\t\t// Peers should still work correctly\n\t\t\t\t\t\tassert.Len(t, snapshot.peersByNode[1], 1)\n\t\t\t\t\t\tassert.Len(t, snapshot.peersByNode[2], 1)\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"test with odd-even peers filtering\",\n\t\t\tsetupFunc: func(t *testing.T) *NodeStore { //nolint:thelper\n\t\t\t\treturn NewNodeStore(nil, oddEvenPeersFunc, TestBatchSize, TestBatchTimeout)\n\t\t\t},\n\t\t\tsteps: []testStep{\n\t\t\t\t{\n\t\t\t\t\tname: \"add nodes with odd-even filtering\",\n\t\t\t\t\taction: func(store *NodeStore) {\n\t\t\t\t\t\t// Add nodes in sequence\n\t\t\t\t\t\tn1 := store.PutNode(createTestNode(1, 1, \"user1\", \"node1\"))\n\t\t\t\t\t\tassert.True(t, n1.Valid())\n\n\t\t\t\t\t\tn2 := store.PutNode(createTestNode(2, 2, \"user2\", \"node2\"))\n\t\t\t\t\t\tassert.True(t, n2.Valid())\n\n\t\t\t\t\t\tn3 := store.PutNode(createTestNode(3, 3, \"user3\", \"node3\"))\n\t\t\t\t\t\tassert.True(t, n3.Valid())\n\n\t\t\t\t\t\tn4 := store.PutNode(createTestNode(4, 4, \"user4\", \"node4\"))\n\t\t\t\t\t\tassert.True(t, n4.Valid())\n\n\t\t\t\t\t\tsnapshot := store.data.Load()\n\t\t\t\t\t\tassert.Len(t, snapshot.nodesByID, 4)\n\n\t\t\t\t\t\t// Verify odd-even peer relationships\n\t\t\t\t\t\trequire.Len(t, snapshot.peersByNode[1], 1)\n\t\t\t\t\t\tassert.Equal(t, types.NodeID(3), snapshot.peersByNode[1][0].ID())\n\n\t\t\t\t\t\trequire.Len(t, snapshot.peersByNode[2], 1)\n\t\t\t\t\t\tassert.Equal(t, types.NodeID(4), snapshot.peersByNode[2][0].ID())\n\n\t\t\t\t\t\trequire.Len(t, snapshot.peersByNode[3], 1)\n\t\t\t\t\t\tassert.Equal(t, types.NodeID(1), snapshot.peersByNode[3][0].ID())\n\n\t\t\t\t\t\trequire.Len(t, snapshot.peersByNode[4], 1)\n\t\t\t\t\t\tassert.Equal(t, types.NodeID(2), snapshot.peersByNode[4][0].ID())\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: \"delete odd node and verify even nodes unaffected\",\n\t\t\t\t\taction: func(store *NodeStore) {\n\t\t\t\t\t\tstore.DeleteNode(1)\n\n\t\t\t\t\t\tsnapshot := store.data.Load()\n\t\t\t\t\t\tassert.Len(t, snapshot.nodesByID, 3)\n\n\t\t\t\t\t\t// Node 3 (odd) should now have no peers\n\t\t\t\t\t\tassert.Empty(t, snapshot.peersByNode[3])\n\n\t\t\t\t\t\t// Even nodes should still see each other\n\t\t\t\t\t\trequire.Len(t, snapshot.peersByNode[2], 1)\n\t\t\t\t\t\tassert.Equal(t, types.NodeID(4), snapshot.peersByNode[2][0].ID())\n\t\t\t\t\t\trequire.Len(t, snapshot.peersByNode[4], 1)\n\t\t\t\t\t\tassert.Equal(t, types.NodeID(2), snapshot.peersByNode[4][0].ID())\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"test batch modifications return correct node state\",\n\t\t\tsetupFunc: func(t *testing.T) *NodeStore { //nolint:thelper\n\t\t\t\tnode1 := createTestNode(1, 1, \"user1\", \"node1\")\n\t\t\t\tnode2 := createTestNode(2, 1, \"user1\", \"node2\")\n\t\t\t\tinitialNodes := types.Nodes{&node1, &node2}\n\n\t\t\t\treturn NewNodeStore(initialNodes, allowAllPeersFunc, TestBatchSize, TestBatchTimeout)\n\t\t\t},\n\t\t\tsteps: []testStep{\n\t\t\t\t{\n\t\t\t\t\tname: \"verify initial state\",\n\t\t\t\t\taction: func(store *NodeStore) {\n\t\t\t\t\t\tsnapshot := store.data.Load()\n\t\t\t\t\t\tassert.Len(t, snapshot.nodesByID, 2)\n\t\t\t\t\t\tassert.Equal(t, \"node1\", snapshot.nodesByID[1].Hostname)\n\t\t\t\t\t\tassert.Equal(t, \"node2\", snapshot.nodesByID[2].Hostname)\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: \"concurrent updates should reflect all batch changes\",\n\t\t\t\t\taction: func(store *NodeStore) {\n\t\t\t\t\t\t// Start multiple updates that will be batched together\n\t\t\t\t\t\tdone1 := make(chan struct{})\n\t\t\t\t\t\tdone2 := make(chan struct{})\n\t\t\t\t\t\tdone3 := make(chan struct{})\n\n\t\t\t\t\t\tvar (\n\t\t\t\t\t\t\tresultNode1, resultNode2 types.NodeView\n\t\t\t\t\t\t\tnewNode3                 types.NodeView\n\t\t\t\t\t\t\tok1, ok2                 bool\n\t\t\t\t\t\t)\n\n\t\t\t\t\t\t// These should all be processed in the same batch\n\n\t\t\t\t\t\tgo func() {\n\t\t\t\t\t\t\tresultNode1, ok1 = store.UpdateNode(1, func(n *types.Node) {\n\t\t\t\t\t\t\t\tn.Hostname = \"batch-updated-node1\"\n\t\t\t\t\t\t\t\tn.GivenName = \"batch-given-1\"\n\t\t\t\t\t\t\t})\n\n\t\t\t\t\t\t\tclose(done1)\n\t\t\t\t\t\t}()\n\n\t\t\t\t\t\tgo func() {\n\t\t\t\t\t\t\tresultNode2, ok2 = store.UpdateNode(2, func(n *types.Node) {\n\t\t\t\t\t\t\t\tn.Hostname = \"batch-updated-node2\"\n\t\t\t\t\t\t\t\tn.GivenName = \"batch-given-2\"\n\t\t\t\t\t\t\t})\n\n\t\t\t\t\t\t\tclose(done2)\n\t\t\t\t\t\t}()\n\n\t\t\t\t\t\tgo func() {\n\t\t\t\t\t\t\tnode3 := createTestNode(3, 1, \"user1\", \"node3\")\n\t\t\t\t\t\t\tnewNode3 = store.PutNode(node3)\n\n\t\t\t\t\t\t\tclose(done3)\n\t\t\t\t\t\t}()\n\n\t\t\t\t\t\t// Wait for all operations to complete\n\t\t\t\t\t\t<-done1\n\t\t\t\t\t\t<-done2\n\t\t\t\t\t\t<-done3\n\n\t\t\t\t\t\t// Verify the returned nodes reflect the batch state\n\t\t\t\t\t\tassert.True(t, ok1, \"UpdateNode should succeed for node 1\")\n\t\t\t\t\t\tassert.True(t, ok2, \"UpdateNode should succeed for node 2\")\n\t\t\t\t\t\tassert.True(t, resultNode1.Valid())\n\t\t\t\t\t\tassert.True(t, resultNode2.Valid())\n\t\t\t\t\t\tassert.True(t, newNode3.Valid())\n\n\t\t\t\t\t\t// Check that returned nodes have the updated values\n\t\t\t\t\t\tassert.Equal(t, \"batch-updated-node1\", resultNode1.Hostname())\n\t\t\t\t\t\tassert.Equal(t, \"batch-given-1\", resultNode1.GivenName())\n\t\t\t\t\t\tassert.Equal(t, \"batch-updated-node2\", resultNode2.Hostname())\n\t\t\t\t\t\tassert.Equal(t, \"batch-given-2\", resultNode2.GivenName())\n\t\t\t\t\t\tassert.Equal(t, \"node3\", newNode3.Hostname())\n\n\t\t\t\t\t\t// Verify the snapshot also reflects all changes\n\t\t\t\t\t\tsnapshot := store.data.Load()\n\t\t\t\t\t\tassert.Len(t, snapshot.nodesByID, 3)\n\t\t\t\t\t\tassert.Equal(t, \"batch-updated-node1\", snapshot.nodesByID[1].Hostname)\n\t\t\t\t\t\tassert.Equal(t, \"batch-updated-node2\", snapshot.nodesByID[2].Hostname)\n\t\t\t\t\t\tassert.Equal(t, \"node3\", snapshot.nodesByID[3].Hostname)\n\n\t\t\t\t\t\t// Verify peer relationships are updated correctly with new node\n\t\t\t\t\t\tassert.Len(t, snapshot.peersByNode[1], 2) // sees nodes 2 and 3\n\t\t\t\t\t\tassert.Len(t, snapshot.peersByNode[2], 2) // sees nodes 1 and 3\n\t\t\t\t\t\tassert.Len(t, snapshot.peersByNode[3], 2) // sees nodes 1 and 2\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: \"update non-existent node returns invalid view\",\n\t\t\t\t\taction: func(store *NodeStore) {\n\t\t\t\t\t\tresultNode, ok := store.UpdateNode(999, func(n *types.Node) {\n\t\t\t\t\t\t\tn.Hostname = \"should-not-exist\"\n\t\t\t\t\t\t})\n\n\t\t\t\t\t\tassert.False(t, ok, \"UpdateNode should return false for non-existent node\")\n\t\t\t\t\t\tassert.False(t, resultNode.Valid(), \"Result should be invalid NodeView\")\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: \"multiple updates to same node in batch all see final state\",\n\t\t\t\t\taction: func(store *NodeStore) {\n\t\t\t\t\t\t// This test verifies that when multiple updates to the same node\n\t\t\t\t\t\t// are batched together, each returned node reflects ALL changes\n\t\t\t\t\t\t// in the batch, not just the individual update's changes.\n\t\t\t\t\t\tdone1 := make(chan struct{})\n\t\t\t\t\t\tdone2 := make(chan struct{})\n\t\t\t\t\t\tdone3 := make(chan struct{})\n\n\t\t\t\t\t\tvar (\n\t\t\t\t\t\t\tresultNode1, resultNode2, resultNode3 types.NodeView\n\t\t\t\t\t\t\tok1, ok2, ok3                         bool\n\t\t\t\t\t\t)\n\n\t\t\t\t\t\t// These updates all modify node 1 and should be batched together\n\t\t\t\t\t\t// The final state should have all three modifications applied\n\n\t\t\t\t\t\tgo func() {\n\t\t\t\t\t\t\tresultNode1, ok1 = store.UpdateNode(1, func(n *types.Node) {\n\t\t\t\t\t\t\t\tn.Hostname = \"multi-update-hostname\"\n\t\t\t\t\t\t\t})\n\n\t\t\t\t\t\t\tclose(done1)\n\t\t\t\t\t\t}()\n\n\t\t\t\t\t\tgo func() {\n\t\t\t\t\t\t\tresultNode2, ok2 = store.UpdateNode(1, func(n *types.Node) {\n\t\t\t\t\t\t\t\tn.GivenName = \"multi-update-givenname\"\n\t\t\t\t\t\t\t})\n\n\t\t\t\t\t\t\tclose(done2)\n\t\t\t\t\t\t}()\n\n\t\t\t\t\t\tgo func() {\n\t\t\t\t\t\t\tresultNode3, ok3 = store.UpdateNode(1, func(n *types.Node) {\n\t\t\t\t\t\t\t\tn.Tags = []string{\"tag1\", \"tag2\"}\n\t\t\t\t\t\t\t})\n\n\t\t\t\t\t\t\tclose(done3)\n\t\t\t\t\t\t}()\n\n\t\t\t\t\t\t// Wait for all operations to complete\n\t\t\t\t\t\t<-done1\n\t\t\t\t\t\t<-done2\n\t\t\t\t\t\t<-done3\n\n\t\t\t\t\t\t// All updates should succeed\n\t\t\t\t\t\tassert.True(t, ok1, \"First update should succeed\")\n\t\t\t\t\t\tassert.True(t, ok2, \"Second update should succeed\")\n\t\t\t\t\t\tassert.True(t, ok3, \"Third update should succeed\")\n\n\t\t\t\t\t\t// CRITICAL: Each returned node should reflect ALL changes from the batch\n\t\t\t\t\t\t// not just the change from its specific update call\n\n\t\t\t\t\t\t// resultNode1 (from hostname update) should also have the givenname and tags changes\n\t\t\t\t\t\tassert.Equal(t, \"multi-update-hostname\", resultNode1.Hostname())\n\t\t\t\t\t\tassert.Equal(t, \"multi-update-givenname\", resultNode1.GivenName())\n\t\t\t\t\t\tassert.Equal(t, []string{\"tag1\", \"tag2\"}, resultNode1.Tags().AsSlice())\n\n\t\t\t\t\t\t// resultNode2 (from givenname update) should also have the hostname and tags changes\n\t\t\t\t\t\tassert.Equal(t, \"multi-update-hostname\", resultNode2.Hostname())\n\t\t\t\t\t\tassert.Equal(t, \"multi-update-givenname\", resultNode2.GivenName())\n\t\t\t\t\t\tassert.Equal(t, []string{\"tag1\", \"tag2\"}, resultNode2.Tags().AsSlice())\n\n\t\t\t\t\t\t// resultNode3 (from tags update) should also have the hostname and givenname changes\n\t\t\t\t\t\tassert.Equal(t, \"multi-update-hostname\", resultNode3.Hostname())\n\t\t\t\t\t\tassert.Equal(t, \"multi-update-givenname\", resultNode3.GivenName())\n\t\t\t\t\t\tassert.Equal(t, []string{\"tag1\", \"tag2\"}, resultNode3.Tags().AsSlice())\n\n\t\t\t\t\t\t// Verify the snapshot also has all changes\n\t\t\t\t\t\tsnapshot := store.data.Load()\n\t\t\t\t\t\tfinalNode := snapshot.nodesByID[1]\n\t\t\t\t\t\tassert.Equal(t, \"multi-update-hostname\", finalNode.Hostname)\n\t\t\t\t\t\tassert.Equal(t, \"multi-update-givenname\", finalNode.GivenName)\n\t\t\t\t\t\tassert.Equal(t, []string{\"tag1\", \"tag2\"}, finalNode.Tags)\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"test UpdateNode result is immutable for database save\",\n\t\t\tsetupFunc: func(t *testing.T) *NodeStore { //nolint:thelper\n\t\t\t\tnode1 := createTestNode(1, 1, \"user1\", \"node1\")\n\t\t\t\tnode2 := createTestNode(2, 1, \"user1\", \"node2\")\n\t\t\t\tinitialNodes := types.Nodes{&node1, &node2}\n\n\t\t\t\treturn NewNodeStore(initialNodes, allowAllPeersFunc, TestBatchSize, TestBatchTimeout)\n\t\t\t},\n\t\t\tsteps: []testStep{\n\t\t\t\t{\n\t\t\t\t\tname: \"verify returned node is complete and consistent\",\n\t\t\t\t\taction: func(store *NodeStore) {\n\t\t\t\t\t\t// Update a node and verify the returned view is complete\n\t\t\t\t\t\tresultNode, ok := store.UpdateNode(1, func(n *types.Node) {\n\t\t\t\t\t\t\tn.Hostname = \"db-save-hostname\"\n\t\t\t\t\t\t\tn.GivenName = \"db-save-given\"\n\t\t\t\t\t\t\tn.Tags = []string{\"db-tag1\", \"db-tag2\"}\n\t\t\t\t\t\t})\n\n\t\t\t\t\t\tassert.True(t, ok, \"UpdateNode should succeed\")\n\t\t\t\t\t\tassert.True(t, resultNode.Valid(), \"Result should be valid\")\n\n\t\t\t\t\t\t// Verify the returned node has all expected values\n\t\t\t\t\t\tassert.Equal(t, \"db-save-hostname\", resultNode.Hostname())\n\t\t\t\t\t\tassert.Equal(t, \"db-save-given\", resultNode.GivenName())\n\t\t\t\t\t\tassert.Equal(t, []string{\"db-tag1\", \"db-tag2\"}, resultNode.Tags().AsSlice())\n\n\t\t\t\t\t\t// Convert to struct as would be done for database save\n\t\t\t\t\t\tnodePtr := resultNode.AsStruct()\n\t\t\t\t\t\tassert.NotNil(t, nodePtr)\n\t\t\t\t\t\tassert.Equal(t, \"db-save-hostname\", nodePtr.Hostname)\n\t\t\t\t\t\tassert.Equal(t, \"db-save-given\", nodePtr.GivenName)\n\t\t\t\t\t\tassert.Equal(t, []string{\"db-tag1\", \"db-tag2\"}, nodePtr.Tags)\n\n\t\t\t\t\t\t// Verify the snapshot also reflects the same state\n\t\t\t\t\t\tsnapshot := store.data.Load()\n\t\t\t\t\t\tstoredNode := snapshot.nodesByID[1]\n\t\t\t\t\t\tassert.Equal(t, \"db-save-hostname\", storedNode.Hostname)\n\t\t\t\t\t\tassert.Equal(t, \"db-save-given\", storedNode.GivenName)\n\t\t\t\t\t\tassert.Equal(t, []string{\"db-tag1\", \"db-tag2\"}, storedNode.Tags)\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: \"concurrent updates all return consistent final state for DB save\",\n\t\t\t\t\taction: func(store *NodeStore) {\n\t\t\t\t\t\t// Multiple goroutines updating the same node\n\t\t\t\t\t\t// All should receive the final batch state suitable for DB save\n\t\t\t\t\t\tdone1 := make(chan struct{})\n\t\t\t\t\t\tdone2 := make(chan struct{})\n\t\t\t\t\t\tdone3 := make(chan struct{})\n\n\t\t\t\t\t\tvar (\n\t\t\t\t\t\t\tresult1, result2, result3 types.NodeView\n\t\t\t\t\t\t\tok1, ok2, ok3             bool\n\t\t\t\t\t\t)\n\n\t\t\t\t\t\t// Start concurrent updates\n\n\t\t\t\t\t\tgo func() {\n\t\t\t\t\t\t\tresult1, ok1 = store.UpdateNode(1, func(n *types.Node) {\n\t\t\t\t\t\t\t\tn.Hostname = \"concurrent-db-hostname\"\n\t\t\t\t\t\t\t})\n\n\t\t\t\t\t\t\tclose(done1)\n\t\t\t\t\t\t}()\n\n\t\t\t\t\t\tgo func() {\n\t\t\t\t\t\t\tresult2, ok2 = store.UpdateNode(1, func(n *types.Node) {\n\t\t\t\t\t\t\t\tn.GivenName = \"concurrent-db-given\"\n\t\t\t\t\t\t\t})\n\n\t\t\t\t\t\t\tclose(done2)\n\t\t\t\t\t\t}()\n\n\t\t\t\t\t\tgo func() {\n\t\t\t\t\t\t\tresult3, ok3 = store.UpdateNode(1, func(n *types.Node) {\n\t\t\t\t\t\t\t\tn.Tags = []string{\"concurrent-tag\"}\n\t\t\t\t\t\t\t})\n\n\t\t\t\t\t\t\tclose(done3)\n\t\t\t\t\t\t}()\n\n\t\t\t\t\t\t// Wait for all to complete\n\t\t\t\t\t\t<-done1\n\t\t\t\t\t\t<-done2\n\t\t\t\t\t\t<-done3\n\n\t\t\t\t\t\tassert.True(t, ok1 && ok2 && ok3, \"All updates should succeed\")\n\n\t\t\t\t\t\t// All results should be valid and suitable for database save\n\t\t\t\t\t\tassert.True(t, result1.Valid())\n\t\t\t\t\t\tassert.True(t, result2.Valid())\n\t\t\t\t\t\tassert.True(t, result3.Valid())\n\n\t\t\t\t\t\t// Convert each to struct as would be done for DB save\n\t\t\t\t\t\tnodePtr1 := result1.AsStruct()\n\t\t\t\t\t\tnodePtr2 := result2.AsStruct()\n\t\t\t\t\t\tnodePtr3 := result3.AsStruct()\n\n\t\t\t\t\t\t// All should have the complete final state\n\t\t\t\t\t\tassert.Equal(t, \"concurrent-db-hostname\", nodePtr1.Hostname)\n\t\t\t\t\t\tassert.Equal(t, \"concurrent-db-given\", nodePtr1.GivenName)\n\t\t\t\t\t\tassert.Equal(t, []string{\"concurrent-tag\"}, nodePtr1.Tags)\n\n\t\t\t\t\t\tassert.Equal(t, \"concurrent-db-hostname\", nodePtr2.Hostname)\n\t\t\t\t\t\tassert.Equal(t, \"concurrent-db-given\", nodePtr2.GivenName)\n\t\t\t\t\t\tassert.Equal(t, []string{\"concurrent-tag\"}, nodePtr2.Tags)\n\n\t\t\t\t\t\tassert.Equal(t, \"concurrent-db-hostname\", nodePtr3.Hostname)\n\t\t\t\t\t\tassert.Equal(t, \"concurrent-db-given\", nodePtr3.GivenName)\n\t\t\t\t\t\tassert.Equal(t, []string{\"concurrent-tag\"}, nodePtr3.Tags)\n\n\t\t\t\t\t\t// Verify consistency with stored state\n\t\t\t\t\t\tsnapshot := store.data.Load()\n\t\t\t\t\t\tstoredNode := snapshot.nodesByID[1]\n\t\t\t\t\t\tassert.Equal(t, nodePtr1.Hostname, storedNode.Hostname)\n\t\t\t\t\t\tassert.Equal(t, nodePtr1.GivenName, storedNode.GivenName)\n\t\t\t\t\t\tassert.Equal(t, nodePtr1.Tags, storedNode.Tags)\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: \"verify returned node preserves all fields for DB save\",\n\t\t\t\t\taction: func(store *NodeStore) {\n\t\t\t\t\t\t// Get initial state\n\t\t\t\t\t\tsnapshot := store.data.Load()\n\t\t\t\t\t\toriginalNode := snapshot.nodesByID[2]\n\t\t\t\t\t\toriginalIPv4 := originalNode.IPv4\n\t\t\t\t\t\toriginalIPv6 := originalNode.IPv6\n\t\t\t\t\t\toriginalCreatedAt := originalNode.CreatedAt\n\t\t\t\t\t\toriginalUser := originalNode.User\n\n\t\t\t\t\t\t// Update only hostname\n\t\t\t\t\t\tresultNode, ok := store.UpdateNode(2, func(n *types.Node) {\n\t\t\t\t\t\t\tn.Hostname = \"preserve-test-hostname\"\n\t\t\t\t\t\t})\n\n\t\t\t\t\t\tassert.True(t, ok, \"Update should succeed\")\n\n\t\t\t\t\t\t// Convert to struct for DB save\n\t\t\t\t\t\tnodeForDB := resultNode.AsStruct()\n\n\t\t\t\t\t\t// Verify all fields are preserved\n\t\t\t\t\t\tassert.Equal(t, \"preserve-test-hostname\", nodeForDB.Hostname)\n\t\t\t\t\t\tassert.Equal(t, originalIPv4, nodeForDB.IPv4)\n\t\t\t\t\t\tassert.Equal(t, originalIPv6, nodeForDB.IPv6)\n\t\t\t\t\t\tassert.Equal(t, originalCreatedAt, nodeForDB.CreatedAt)\n\t\t\t\t\t\tassert.Equal(t, originalUser.Name, nodeForDB.User.Name)\n\t\t\t\t\t\tassert.Equal(t, types.NodeID(2), nodeForDB.ID)\n\n\t\t\t\t\t\t// These fields should be suitable for direct database save\n\t\t\t\t\t\tassert.NotNil(t, nodeForDB.IPv4)\n\t\t\t\t\t\tassert.NotNil(t, nodeForDB.IPv6)\n\t\t\t\t\t\tassert.False(t, nodeForDB.CreatedAt.IsZero())\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tstore := tt.setupFunc(t)\n\n\t\t\tstore.Start()\n\t\t\tdefer store.Stop()\n\n\t\t\tfor _, step := range tt.steps {\n\t\t\t\tt.Run(step.name, func(t *testing.T) {\n\t\t\t\t\tstep.action(store)\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\ntype testStep struct {\n\tname   string\n\taction func(store *NodeStore)\n}\n\n// --- Additional NodeStore concurrency, batching, race, resource, timeout, and allocation tests ---\n\n// Helper for concurrent test nodes.\nfunc createConcurrentTestNode(id types.NodeID, hostname string) types.Node {\n\tmachineKey := key.NewMachine()\n\tnodeKey := key.NewNode()\n\n\treturn types.Node{\n\t\tID:         id,\n\t\tHostname:   hostname,\n\t\tMachineKey: machineKey.Public(),\n\t\tNodeKey:    nodeKey.Public(),\n\t\tUserID:     new(uint(1)),\n\t\tUser: &types.User{\n\t\t\tName: \"concurrent-test-user\",\n\t\t},\n\t}\n}\n\n// --- Concurrency: concurrent PutNode operations ---.\nfunc TestNodeStoreConcurrentPutNode(t *testing.T) {\n\tconst concurrentOps = 20\n\n\tstore := NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout)\n\n\tstore.Start()\n\tdefer store.Stop()\n\n\tvar wg sync.WaitGroup\n\n\tresults := make(chan bool, concurrentOps)\n\tfor i := range concurrentOps {\n\t\twg.Add(1)\n\n\t\tgo func(nodeID int) {\n\t\t\tdefer wg.Done()\n\n\t\t\tnode := createConcurrentTestNode(types.NodeID(nodeID), \"concurrent-node\") //nolint:gosec // safe conversion in test\n\n\t\t\tresultNode := store.PutNode(node)\n\t\t\tresults <- resultNode.Valid()\n\t\t}(i + 1)\n\t}\n\n\twg.Wait()\n\tclose(results)\n\n\tsuccessCount := 0\n\n\tfor success := range results {\n\t\tif success {\n\t\t\tsuccessCount++\n\t\t}\n\t}\n\n\trequire.Equal(t, concurrentOps, successCount, \"All concurrent PutNode operations should succeed\")\n}\n\n// --- Batching: concurrent ops fit in one batch ---.\nfunc TestNodeStoreBatchingEfficiency(t *testing.T) {\n\tconst ops = 15 // more than batchSize\n\n\tstore := NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout)\n\n\tstore.Start()\n\tdefer store.Stop()\n\n\tvar wg sync.WaitGroup\n\n\tresults := make(chan bool, ops)\n\tfor i := range ops {\n\t\twg.Add(1)\n\n\t\tgo func(nodeID int) {\n\t\t\tdefer wg.Done()\n\n\t\t\tnode := createConcurrentTestNode(types.NodeID(nodeID), \"batch-node\") //nolint:gosec // test code with small integers\n\n\t\t\tresultNode := store.PutNode(node)\n\t\t\tresults <- resultNode.Valid()\n\t\t}(i + 1)\n\t}\n\n\twg.Wait()\n\tclose(results)\n\n\tsuccessCount := 0\n\n\tfor success := range results {\n\t\tif success {\n\t\t\tsuccessCount++\n\t\t}\n\t}\n\n\trequire.Equal(t, ops, successCount, \"All batch PutNode operations should succeed\")\n}\n\n// --- Race conditions: many goroutines on same node ---.\nfunc TestNodeStoreRaceConditions(t *testing.T) {\n\tstore := NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout)\n\n\tstore.Start()\n\tdefer store.Stop()\n\n\tnodeID := types.NodeID(1)\n\tnode := createConcurrentTestNode(nodeID, \"race-node\")\n\tresultNode := store.PutNode(node)\n\trequire.True(t, resultNode.Valid())\n\n\tconst (\n\t\tnumGoroutines   = 30\n\t\topsPerGoroutine = 10\n\t)\n\n\tvar wg sync.WaitGroup\n\n\terrors := make(chan error, numGoroutines*opsPerGoroutine)\n\n\tfor i := range numGoroutines {\n\t\twg.Add(1)\n\n\t\tgo func(gid int) {\n\t\t\tdefer wg.Done()\n\n\t\t\tfor j := range opsPerGoroutine {\n\t\t\t\tswitch j % 3 {\n\t\t\t\tcase 0:\n\t\t\t\t\tresultNode, _ := store.UpdateNode(nodeID, func(n *types.Node) {\n\t\t\t\t\t\tn.Hostname = \"race-updated\"\n\t\t\t\t\t})\n\t\t\t\t\tif !resultNode.Valid() {\n\t\t\t\t\t\terrors <- fmt.Errorf(\"UpdateNode failed in goroutine %d, op %d\", gid, j) //nolint:err113\n\t\t\t\t\t}\n\t\t\t\tcase 1:\n\t\t\t\t\tretrieved, found := store.GetNode(nodeID)\n\t\t\t\t\tif !found || !retrieved.Valid() {\n\t\t\t\t\t\terrors <- fmt.Errorf(\"GetNode failed in goroutine %d, op %d\", gid, j) //nolint:err113\n\t\t\t\t\t}\n\t\t\t\tcase 2:\n\t\t\t\t\tnewNode := createConcurrentTestNode(nodeID, \"race-put\")\n\n\t\t\t\t\tresultNode := store.PutNode(newNode)\n\t\t\t\t\tif !resultNode.Valid() {\n\t\t\t\t\t\terrors <- fmt.Errorf(\"PutNode failed in goroutine %d, op %d\", gid, j) //nolint:err113\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}(i)\n\t}\n\n\twg.Wait()\n\tclose(errors)\n\n\terrorCount := 0\n\n\tfor err := range errors {\n\t\tt.Error(err)\n\n\t\terrorCount++\n\t}\n\n\tif errorCount > 0 {\n\t\tt.Fatalf(\"Race condition test failed with %d errors\", errorCount)\n\t}\n}\n\n// --- Resource cleanup: goroutine leak detection ---.\nfunc TestNodeStoreResourceCleanup(t *testing.T) {\n\t// initialGoroutines := runtime.NumGoroutine()\n\tstore := NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout)\n\n\tstore.Start()\n\tdefer store.Stop()\n\n\t// Wait for store to be ready\n\tvar afterStartGoroutines int\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tafterStartGoroutines = runtime.NumGoroutine()\n\t\tassert.Positive(c, afterStartGoroutines) // Just ensure we have a valid count\n\t}, time.Second, 10*time.Millisecond, \"store should be running\")\n\n\tconst ops = 100\n\tfor i := range ops {\n\t\tnodeID := types.NodeID(i + 1) //nolint:gosec // test code with small integers\n\t\tnode := createConcurrentTestNode(nodeID, \"cleanup-node\")\n\t\tresultNode := store.PutNode(node)\n\t\tassert.True(t, resultNode.Valid())\n\t\tstore.UpdateNode(nodeID, func(n *types.Node) {\n\t\t\tn.Hostname = \"cleanup-updated\"\n\t\t})\n\t\tretrieved, found := store.GetNode(nodeID)\n\t\tassert.True(t, found && retrieved.Valid())\n\n\t\tif i%10 == 9 {\n\t\t\tstore.DeleteNode(nodeID)\n\t\t}\n\t}\n\n\truntime.GC()\n\n\t// Wait for goroutines to settle and check for leaks\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tfinalGoroutines := runtime.NumGoroutine()\n\t\tassert.LessOrEqual(c, finalGoroutines, afterStartGoroutines+2,\n\t\t\t\"Potential goroutine leak: started with %d, ended with %d\", afterStartGoroutines, finalGoroutines)\n\t}, time.Second, 10*time.Millisecond, \"goroutines should not leak\")\n}\n\n// --- Timeout/deadlock: operations complete within reasonable time ---.\nfunc TestNodeStoreOperationTimeout(t *testing.T) {\n\tstore := NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout)\n\n\tstore.Start()\n\tdefer store.Stop()\n\n\tctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)\n\tdefer cancel()\n\n\tconst ops = 30\n\n\tvar wg sync.WaitGroup\n\n\tputResults := make([]error, ops)\n\tupdateResults := make([]error, ops)\n\n\t// Launch all PutNode operations concurrently\n\tfor i := 1; i <= ops; i++ {\n\t\tnodeID := types.NodeID(i) //nolint:gosec // test code with small integers\n\n\t\twg.Add(1)\n\n\t\tgo func(idx int, id types.NodeID) {\n\t\t\tdefer wg.Done()\n\n\t\t\tstartPut := time.Now()\n\t\t\tfmt.Printf(\"[TestNodeStoreOperationTimeout] %s: PutNode(%d) starting\\n\", startPut.Format(\"15:04:05.000\"), id)\n\t\t\tnode := createConcurrentTestNode(id, \"timeout-node\")\n\t\t\tresultNode := store.PutNode(node)\n\t\t\tendPut := time.Now()\n\t\t\tfmt.Printf(\"[TestNodeStoreOperationTimeout] %s: PutNode(%d) finished, valid=%v, duration=%v\\n\", endPut.Format(\"15:04:05.000\"), id, resultNode.Valid(), endPut.Sub(startPut))\n\n\t\t\tif !resultNode.Valid() {\n\t\t\t\tputResults[idx-1] = fmt.Errorf(\"PutNode failed for node %d\", id) //nolint:err113\n\t\t\t}\n\t\t}(i, nodeID)\n\t}\n\n\twg.Wait()\n\n\t// Launch all UpdateNode operations concurrently\n\twg = sync.WaitGroup{}\n\n\tfor i := 1; i <= ops; i++ {\n\t\tnodeID := types.NodeID(i) //nolint:gosec // test code with small integers\n\n\t\twg.Add(1)\n\n\t\tgo func(idx int, id types.NodeID) {\n\t\t\tdefer wg.Done()\n\n\t\t\tstartUpdate := time.Now()\n\t\t\tfmt.Printf(\"[TestNodeStoreOperationTimeout] %s: UpdateNode(%d) starting\\n\", startUpdate.Format(\"15:04:05.000\"), id)\n\t\t\tresultNode, ok := store.UpdateNode(id, func(n *types.Node) {\n\t\t\t\tn.Hostname = \"timeout-updated\"\n\t\t\t})\n\t\t\tendUpdate := time.Now()\n\t\t\tfmt.Printf(\"[TestNodeStoreOperationTimeout] %s: UpdateNode(%d) finished, valid=%v, ok=%v, duration=%v\\n\", endUpdate.Format(\"15:04:05.000\"), id, resultNode.Valid(), ok, endUpdate.Sub(startUpdate))\n\n\t\t\tif !ok || !resultNode.Valid() {\n\t\t\t\tupdateResults[idx-1] = fmt.Errorf(\"UpdateNode failed for node %d\", id) //nolint:err113\n\t\t\t}\n\t\t}(i, nodeID)\n\t}\n\n\tdone := make(chan struct{})\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(done)\n\t}()\n\n\tselect {\n\tcase <-done:\n\t\terrorCount := 0\n\n\t\tfor _, err := range putResults {\n\t\t\tif err != nil {\n\t\t\t\tt.Error(err)\n\n\t\t\t\terrorCount++\n\t\t\t}\n\t\t}\n\n\t\tfor _, err := range updateResults {\n\t\t\tif err != nil {\n\t\t\t\tt.Error(err)\n\n\t\t\t\terrorCount++\n\t\t\t}\n\t\t}\n\n\t\tif errorCount == 0 {\n\t\t\tt.Log(\"All concurrent operations completed successfully within timeout\")\n\t\t} else {\n\t\t\tt.Fatalf(\"Some concurrent operations failed: %d errors\", errorCount)\n\t\t}\n\tcase <-ctx.Done():\n\t\tfmt.Println(\"[TestNodeStoreOperationTimeout] Timeout reached, test failed\")\n\t\tt.Fatal(\"Operations timed out - potential deadlock or resource issue\")\n\t}\n}\n\n// --- Edge case: update non-existent node ---.\nfunc TestNodeStoreUpdateNonExistentNode(t *testing.T) {\n\tfor i := range 10 {\n\t\tstore := NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout)\n\t\tstore.Start()\n\n\t\tnonExistentID := types.NodeID(999 + i) //nolint:gosec // test code with small integers\n\t\tupdateCallCount := 0\n\n\t\tfmt.Printf(\"[TestNodeStoreUpdateNonExistentNode] UpdateNode(%d) starting\\n\", nonExistentID)\n\t\tresultNode, ok := store.UpdateNode(nonExistentID, func(n *types.Node) {\n\t\t\tupdateCallCount++\n\t\t\tn.Hostname = \"should-never-be-called\"\n\t\t})\n\t\tfmt.Printf(\"[TestNodeStoreUpdateNonExistentNode] UpdateNode(%d) finished, valid=%v, ok=%v, updateCallCount=%d\\n\", nonExistentID, resultNode.Valid(), ok, updateCallCount)\n\t\tassert.False(t, ok, \"UpdateNode should return false for non-existent node\")\n\t\tassert.False(t, resultNode.Valid(), \"UpdateNode should return invalid node for non-existent node\")\n\t\tassert.Equal(t, 0, updateCallCount, \"UpdateFn should not be called for non-existent node\")\n\t\tstore.Stop()\n\t}\n}\n\n// --- Allocation benchmark ---.\nfunc BenchmarkNodeStoreAllocations(b *testing.B) {\n\tstore := NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout)\n\n\tstore.Start()\n\tdefer store.Stop()\n\n\tfor i := 0; b.Loop(); i++ {\n\t\tnodeID := types.NodeID(i + 1) //nolint:gosec // benchmark code with small integers\n\t\tnode := createConcurrentTestNode(nodeID, \"bench-node\")\n\t\tstore.PutNode(node)\n\t\tstore.UpdateNode(nodeID, func(n *types.Node) {\n\t\t\tn.Hostname = \"bench-updated\"\n\t\t})\n\t\tstore.GetNode(nodeID)\n\n\t\tif i%10 == 9 {\n\t\t\tstore.DeleteNode(nodeID)\n\t\t}\n\t}\n}\n\nfunc TestNodeStoreAllocationStats(t *testing.T) {\n\tres := testing.Benchmark(BenchmarkNodeStoreAllocations)\n\tallocs := res.AllocsPerOp()\n\tt.Logf(\"NodeStore allocations per op: %.2f\", float64(allocs))\n}\n\n// TestRebuildPeerMapsWithChangedPeersFunc tests that RebuildPeerMaps correctly\n// rebuilds the peer map when the peersFunc behavior changes.\n// This simulates what happens when SetNodeTags changes node tags and the\n// PolicyManager's matchers are updated, requiring the peer map to be rebuilt.\nfunc TestRebuildPeerMapsWithChangedPeersFunc(t *testing.T) {\n\t// Create a peersFunc that can be controlled via a channel\n\t// Initially it returns all nodes as peers, then we change it to return no peers\n\tallowPeers := true\n\n\t// This simulates how PolicyManager.BuildPeerMap works - it reads state\n\t// that can change between calls\n\tdynamicPeersFunc := func(nodes []types.NodeView) map[types.NodeID][]types.NodeView {\n\t\tret := make(map[types.NodeID][]types.NodeView, len(nodes))\n\t\tif allowPeers {\n\t\t\t// Allow all peers\n\t\t\tfor _, node := range nodes {\n\t\t\t\tvar peers []types.NodeView\n\n\t\t\t\tfor _, n := range nodes {\n\t\t\t\t\tif n.ID() != node.ID() {\n\t\t\t\t\t\tpeers = append(peers, n)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tret[node.ID()] = peers\n\t\t\t}\n\t\t} else {\n\t\t\t// Allow no peers\n\t\t\tfor _, node := range nodes {\n\t\t\t\tret[node.ID()] = []types.NodeView{}\n\t\t\t}\n\t\t}\n\n\t\treturn ret\n\t}\n\n\t// Create nodes\n\tnode1 := createTestNode(1, 1, \"user1\", \"node1\")\n\tnode2 := createTestNode(2, 2, \"user2\", \"node2\")\n\tinitialNodes := types.Nodes{&node1, &node2}\n\n\t// Create store with dynamic peersFunc\n\tstore := NewNodeStore(initialNodes, dynamicPeersFunc, TestBatchSize, TestBatchTimeout)\n\n\tstore.Start()\n\tdefer store.Stop()\n\n\t// Initially, nodes should see each other as peers\n\tsnapshot := store.data.Load()\n\trequire.Len(t, snapshot.peersByNode[1], 1, \"node1 should have 1 peer initially\")\n\trequire.Len(t, snapshot.peersByNode[2], 1, \"node2 should have 1 peer initially\")\n\trequire.Equal(t, types.NodeID(2), snapshot.peersByNode[1][0].ID())\n\trequire.Equal(t, types.NodeID(1), snapshot.peersByNode[2][0].ID())\n\n\t// Now \"change the policy\" by disabling peers\n\tallowPeers = false\n\n\t// Call RebuildPeerMaps to rebuild with the new behavior\n\tstore.RebuildPeerMaps()\n\n\t// After rebuild, nodes should have no peers\n\tsnapshot = store.data.Load()\n\tassert.Empty(t, snapshot.peersByNode[1], \"node1 should have no peers after rebuild\")\n\tassert.Empty(t, snapshot.peersByNode[2], \"node2 should have no peers after rebuild\")\n\n\t// Verify that ListPeers returns the correct result\n\tpeers1 := store.ListPeers(1)\n\tpeers2 := store.ListPeers(2)\n\n\tassert.Equal(t, 0, peers1.Len(), \"ListPeers for node1 should return empty\")\n\tassert.Equal(t, 0, peers2.Len(), \"ListPeers for node2 should return empty\")\n\n\t// Now re-enable peers and rebuild again\n\tallowPeers = true\n\n\tstore.RebuildPeerMaps()\n\n\t// Nodes should see each other again\n\tsnapshot = store.data.Load()\n\trequire.Len(t, snapshot.peersByNode[1], 1, \"node1 should have 1 peer after re-enabling\")\n\trequire.Len(t, snapshot.peersByNode[2], 1, \"node2 should have 1 peer after re-enabling\")\n\n\tpeers1 = store.ListPeers(1)\n\tpeers2 = store.ListPeers(2)\n\n\tassert.Equal(t, 1, peers1.Len(), \"ListPeers for node1 should return 1\")\n\tassert.Equal(t, 1, peers2.Len(), \"ListPeers for node2 should return 1\")\n}\n"
  },
  {
    "path": "hscontrol/state/ssh_check_test.go",
    "content": "package state\n\nimport (\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc newTestStateForSSHCheck() *State {\n\treturn &State{\n\t\tsshCheckAuth: make(map[sshCheckPair]time.Time),\n\t}\n}\n\nfunc TestSSHCheckAuth(t *testing.T) {\n\ts := newTestStateForSSHCheck()\n\n\tsrc := types.NodeID(1)\n\tdst := types.NodeID(2)\n\totherDst := types.NodeID(3)\n\totherSrc := types.NodeID(4)\n\n\t// No record initially\n\t_, ok := s.GetLastSSHAuth(src, dst)\n\trequire.False(t, ok)\n\n\t// Record auth for (src, dst)\n\ts.SetLastSSHAuth(src, dst)\n\n\t// Same src+dst: found\n\tauthTime, ok := s.GetLastSSHAuth(src, dst)\n\trequire.True(t, ok)\n\tassert.WithinDuration(t, time.Now(), authTime, time.Second)\n\n\t// Same src, different dst: not found (auth is per-pair)\n\t_, ok = s.GetLastSSHAuth(src, otherDst)\n\trequire.False(t, ok)\n\n\t// Different src: not found\n\t_, ok = s.GetLastSSHAuth(otherSrc, dst)\n\trequire.False(t, ok)\n}\n\nfunc TestSSHCheckAuthClear(t *testing.T) {\n\ts := newTestStateForSSHCheck()\n\n\ts.SetLastSSHAuth(types.NodeID(1), types.NodeID(2))\n\ts.SetLastSSHAuth(types.NodeID(1), types.NodeID(3))\n\n\t_, ok := s.GetLastSSHAuth(types.NodeID(1), types.NodeID(2))\n\trequire.True(t, ok)\n\n\t_, ok = s.GetLastSSHAuth(types.NodeID(1), types.NodeID(3))\n\trequire.True(t, ok)\n\n\t// Clear\n\ts.ClearSSHCheckAuth()\n\n\t_, ok = s.GetLastSSHAuth(types.NodeID(1), types.NodeID(2))\n\trequire.False(t, ok)\n\n\t_, ok = s.GetLastSSHAuth(types.NodeID(1), types.NodeID(3))\n\trequire.False(t, ok)\n}\n\nfunc TestSSHCheckAuthConcurrent(t *testing.T) {\n\ts := newTestStateForSSHCheck()\n\n\tvar wg sync.WaitGroup\n\n\tfor i := range 100 {\n\t\twg.Go(func() {\n\t\t\tsrc := types.NodeID(uint64(i % 10))   //nolint:gosec\n\t\t\tdst := types.NodeID(uint64(i%5 + 10)) //nolint:gosec\n\n\t\t\ts.SetLastSSHAuth(src, dst)\n\t\t\ts.GetLastSSHAuth(src, dst)\n\t\t})\n\t}\n\n\twg.Wait()\n\n\t// Clear concurrently with reads\n\twg.Add(2)\n\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\ts.ClearSSHCheckAuth()\n\t}()\n\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\ts.GetLastSSHAuth(types.NodeID(1), types.NodeID(2))\n\t}()\n\n\twg.Wait()\n}\n"
  },
  {
    "path": "hscontrol/state/state.go",
    "content": "// Package state provides core state management for Headscale, coordinating\n// between subsystems like database, IP allocation, policy management, and DERP routing.\n\npackage state\n\nimport (\n\t\"cmp\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/netip\"\n\t\"slices\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\thsdb \"github.com/juanfont/headscale/hscontrol/db\"\n\t\"github.com/juanfont/headscale/hscontrol/policy\"\n\t\"github.com/juanfont/headscale/hscontrol/policy/matcher\"\n\t\"github.com/juanfont/headscale/hscontrol/routes\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/types/change\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/juanfont/headscale/hscontrol/util/zlog/zf\"\n\t\"github.com/rs/zerolog\"\n\t\"github.com/rs/zerolog/log\"\n\t\"golang.org/x/sync/errgroup\"\n\t\"gorm.io/gorm\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/key\"\n\t\"tailscale.com/types/views\"\n\tzcache \"zgo.at/zcache/v2\"\n)\n\nconst (\n\t// registerCacheExpiration defines how long node registration entries remain in cache.\n\tregisterCacheExpiration = time.Minute * 15\n\n\t// registerCacheCleanup defines the interval for cleaning up expired cache entries.\n\tregisterCacheCleanup = time.Minute * 20\n\n\t// defaultNodeStoreBatchSize is the default number of write operations to batch\n\t// before rebuilding the in-memory node snapshot.\n\tdefaultNodeStoreBatchSize = 100\n\n\t// defaultNodeStoreBatchTimeout is the default maximum time to wait before\n\t// processing a partial batch of node operations.\n\tdefaultNodeStoreBatchTimeout = 500 * time.Millisecond\n)\n\n// ErrUnsupportedPolicyMode is returned for invalid policy modes. Valid modes are \"file\" and \"db\".\nvar ErrUnsupportedPolicyMode = errors.New(\"unsupported policy mode\")\n\n// ErrNodeNotFound is returned when a node cannot be found by its ID.\nvar ErrNodeNotFound = errors.New(\"node not found\")\n\n// ErrInvalidNodeView is returned when an invalid node view is provided.\nvar ErrInvalidNodeView = errors.New(\"invalid node view provided\")\n\n// ErrNodeNotInNodeStore is returned when a node no longer exists in the NodeStore.\nvar ErrNodeNotInNodeStore = errors.New(\"node no longer exists in NodeStore\")\n\n// ErrNodeNameNotUnique is returned when a node name is not unique.\nvar ErrNodeNameNotUnique = errors.New(\"node name is not unique\")\n\n// ErrRegistrationExpired is returned when a registration has expired.\nvar ErrRegistrationExpired = errors.New(\"registration expired\")\n\n// sshCheckPair identifies a (source, destination) node pair for\n// SSH check auth tracking.\ntype sshCheckPair struct {\n\tSrc types.NodeID\n\tDst types.NodeID\n}\n\n// State manages Headscale's core state, coordinating between database, policy management,\n// IP allocation, and DERP routing. All methods are thread-safe.\ntype State struct {\n\t// cfg holds the current Headscale configuration\n\tcfg *types.Config\n\n\t// nodeStore provides an in-memory cache for nodes.\n\tnodeStore *NodeStore\n\n\t// subsystem keeping state\n\t// db provides persistent storage and database operations\n\tdb *hsdb.HSDatabase\n\t// ipAlloc manages IP address allocation for nodes\n\tipAlloc *hsdb.IPAllocator\n\t// derpMap contains the current DERP relay configuration\n\tderpMap atomic.Pointer[tailcfg.DERPMap]\n\t// polMan handles policy evaluation and management\n\tpolMan policy.PolicyManager\n\n\t// authCache caches any pending authentication requests, from either auth type (Web and OIDC).\n\tauthCache *zcache.Cache[types.AuthID, types.AuthRequest]\n\n\t// primaryRoutes tracks primary route assignments for nodes\n\tprimaryRoutes *routes.PrimaryRoutes\n\n\t// connectGen tracks a per-node monotonic generation counter so stale\n\t// Disconnect() calls from old poll sessions are rejected. Connect()\n\t// increments the counter and returns the current value; Disconnect()\n\t// only proceeds when the generation it carries matches the latest.\n\tconnectGen sync.Map // types.NodeID → *atomic.Uint64\n\n\t// sshCheckAuth tracks when source nodes last completed SSH check auth.\n\t//\n\t// For rules without explicit checkPeriod (default 12h), auth covers any\n\t// destination — keyed by (src, Dst=0) where 0 is a sentinel meaning \"any\".\n\t// Ref: \"Once re-authenticated to a destination, the user can access the\n\t// device and any other device in the tailnet without re-verification\n\t// for the next 12 hours.\" — https://tailscale.com/kb/1193/tailscale-ssh\n\t//\n\t// For rules with explicit checkPeriod, auth covers only that specific\n\t// destination — keyed by (src, dst).\n\t// Ref: \"If a different check period is specified for the connection,\n\t// then the user can access specifically this device without\n\t// re-verification for the duration of the check period.\"\n\t//\n\t// Ref: https://github.com/tailscale/tailscale/issues/10480\n\t// Ref: https://github.com/tailscale/tailscale/issues/7125\n\tsshCheckAuth map[sshCheckPair]time.Time\n\tsshCheckMu   sync.RWMutex\n}\n\n// NewState creates and initializes a new State instance, setting up the database,\n// IP allocator, DERP map, policy manager, and loading existing users and nodes.\nfunc NewState(cfg *types.Config) (*State, error) {\n\tcacheExpiration := registerCacheExpiration\n\tif cfg.Tuning.RegisterCacheExpiration != 0 {\n\t\tcacheExpiration = cfg.Tuning.RegisterCacheExpiration\n\t}\n\n\tcacheCleanup := registerCacheCleanup\n\tif cfg.Tuning.RegisterCacheCleanup != 0 {\n\t\tcacheCleanup = cfg.Tuning.RegisterCacheCleanup\n\t}\n\n\tauthCache := zcache.New[types.AuthID, types.AuthRequest](\n\t\tcacheExpiration,\n\t\tcacheCleanup,\n\t)\n\n\tauthCache.OnEvicted(\n\t\tfunc(id types.AuthID, rn types.AuthRequest) {\n\t\t\trn.FinishAuth(types.AuthVerdict{Err: ErrRegistrationExpired})\n\t\t},\n\t)\n\n\tdb, err := hsdb.NewHeadscaleDatabase(\n\t\tcfg,\n\t\tauthCache,\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"initializing database: %w\", err)\n\t}\n\n\tipAlloc, err := hsdb.NewIPAllocator(db, cfg.PrefixV4, cfg.PrefixV6, cfg.IPAllocation)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"initializing IP allocator: %w\", err)\n\t}\n\n\tnodes, err := db.ListNodes()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"loading nodes: %w\", err)\n\t}\n\n\t// On startup, all nodes should be marked as offline until they reconnect\n\t// This ensures we don't have stale online status from previous runs\n\tfor _, node := range nodes {\n\t\tnode.IsOnline = new(false)\n\t}\n\n\tusers, err := db.ListUsers()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"loading users: %w\", err)\n\t}\n\n\tpol, err := hsdb.PolicyBytes(db.DB, cfg)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"loading policy: %w\", err)\n\t}\n\n\tpolMan, err := policy.NewPolicyManager(pol, users, nodes.ViewSlice())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"initializing policy manager: %w\", err)\n\t}\n\n\t// Apply defaults for NodeStore batch configuration if not set.\n\t// This ensures tests that create Config directly (without viper) still work.\n\tbatchSize := cfg.Tuning.NodeStoreBatchSize\n\tif batchSize == 0 {\n\t\tbatchSize = defaultNodeStoreBatchSize\n\t}\n\n\tbatchTimeout := cfg.Tuning.NodeStoreBatchTimeout\n\tif batchTimeout == 0 {\n\t\tbatchTimeout = defaultNodeStoreBatchTimeout\n\t}\n\n\t// PolicyManager.BuildPeerMap handles both global and per-node filter complexity.\n\t// This moves the complex peer relationship logic into the policy package where it belongs.\n\tnodeStore := NewNodeStore(\n\t\tnodes,\n\t\tfunc(nodes []types.NodeView) map[types.NodeID][]types.NodeView {\n\t\t\treturn polMan.BuildPeerMap(views.SliceOf(nodes))\n\t\t},\n\t\tbatchSize,\n\t\tbatchTimeout,\n\t)\n\tnodeStore.Start()\n\n\treturn &State{\n\t\tcfg: cfg,\n\n\t\tdb:            db,\n\t\tipAlloc:       ipAlloc,\n\t\tpolMan:        polMan,\n\t\tauthCache:     authCache,\n\t\tprimaryRoutes: routes.New(),\n\t\tnodeStore:     nodeStore,\n\n\t\tsshCheckAuth: make(map[sshCheckPair]time.Time),\n\t}, nil\n}\n\n// Close gracefully shuts down the State instance and releases all resources.\nfunc (s *State) Close() error {\n\ts.nodeStore.Stop()\n\n\terr := s.db.Close()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"closing database: %w\", err)\n\t}\n\n\treturn nil\n}\n\n// SetDERPMap updates the DERP relay configuration.\nfunc (s *State) SetDERPMap(dm *tailcfg.DERPMap) {\n\ts.derpMap.Store(dm)\n}\n\n// DERPMap returns the current DERP relay configuration for peer-to-peer connectivity.\nfunc (s *State) DERPMap() tailcfg.DERPMapView {\n\treturn s.derpMap.Load().View()\n}\n\n// ReloadPolicy reloads the access control policy and triggers auto-approval if changed.\n// Returns true if the policy changed.\nfunc (s *State) ReloadPolicy() ([]change.Change, error) {\n\tpol, err := hsdb.PolicyBytes(s.db.DB, s.cfg)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"loading policy: %w\", err)\n\t}\n\n\tpolicyChanged, err := s.polMan.SetPolicy(pol)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"setting policy: %w\", err)\n\t}\n\n\t// Clear SSH check auth times when policy changes to ensure stale\n\t// approvals don't persist if checkPeriod rules are modified or removed.\n\ts.ClearSSHCheckAuth()\n\n\t// Rebuild peer maps after policy changes because the peersFunc in NodeStore\n\t// uses the PolicyManager's filters. Without this, nodes won't see newly allowed\n\t// peers until a node is added/removed, causing autogroup:self policies to not\n\t// propagate correctly when switching between policy types.\n\ts.nodeStore.RebuildPeerMaps()\n\n\t//nolint:prealloc // cs starts with one element and may grow\n\tcs := []change.Change{change.PolicyChange()}\n\n\t// Always call autoApproveNodes during policy reload, regardless of whether\n\t// the policy content has changed. This ensures that routes are re-evaluated\n\t// when they might have been manually disabled but could now be auto-approved\n\t// with the current policy.\n\trcs, err := s.autoApproveNodes()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"auto approving nodes: %w\", err)\n\t}\n\n\t// TODO(kradalby): These changes can probably be safely ignored.\n\t// If the PolicyChange is happening, that will lead to a full update\n\t// meaning that we do not need to send individual route changes.\n\tcs = append(cs, rcs...)\n\n\tif len(rcs) > 0 || policyChanged {\n\t\tlog.Info().\n\t\t\tBool(\"policy.changed\", policyChanged).\n\t\t\tInt(\"route.changes\", len(rcs)).\n\t\t\tInt(\"total.changes\", len(cs)).\n\t\t\tMsg(\"Policy reload completed with changes\")\n\t}\n\n\treturn cs, nil\n}\n\n// CreateUser creates a new user and updates the policy manager.\n// Returns the created user, change set, and any error.\nfunc (s *State) CreateUser(user types.User) (*types.User, change.Change, error) {\n\tif err := s.db.DB.Save(&user).Error; err != nil { //nolint:noinlineerr\n\t\treturn nil, change.Change{}, fmt.Errorf(\"creating user: %w\", err)\n\t}\n\n\t// Check if policy manager needs updating\n\tc, err := s.updatePolicyManagerUsers()\n\tif err != nil {\n\t\t// Log the error but don't fail the user creation\n\t\treturn &user, change.Change{}, fmt.Errorf(\"updating policy manager after user creation: %w\", err)\n\t}\n\n\t// Even if the policy manager doesn't detect a filter change, SSH policies\n\t// might now be resolvable when they weren't before. If there are existing\n\t// nodes, we should send a policy change to ensure they get updated SSH policies.\n\t// TODO(kradalby): detect this, or rebuild all SSH policies so we can determine\n\t// this upstream.\n\tif c.IsEmpty() {\n\t\tc = change.PolicyChange()\n\t}\n\n\tlog.Info().Str(zf.UserName, user.Name).Msg(\"user created\")\n\n\treturn &user, c, nil\n}\n\n// UpdateUser modifies an existing user using the provided update function within a transaction.\n// Returns the updated user, change set, and any error.\nfunc (s *State) UpdateUser(userID types.UserID, updateFn func(*types.User) error) (*types.User, change.Change, error) {\n\tuser, err := hsdb.Write(s.db.DB, func(tx *gorm.DB) (*types.User, error) {\n\t\tuser, err := hsdb.GetUserByID(tx, userID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif err := updateFn(user); err != nil { //nolint:noinlineerr\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Use Updates() to only update modified fields, preserving unchanged values.\n\t\terr = tx.Updates(user).Error\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"updating user: %w\", err)\n\t\t}\n\n\t\treturn user, nil\n\t})\n\tif err != nil {\n\t\treturn nil, change.Change{}, err\n\t}\n\n\t// Check if policy manager needs updating\n\tc, err := s.updatePolicyManagerUsers()\n\tif err != nil {\n\t\treturn user, change.Change{}, fmt.Errorf(\"updating policy manager after user update: %w\", err)\n\t}\n\n\t// TODO(kradalby): We might want to update nodestore with the user data\n\n\treturn user, c, nil\n}\n\n// DeleteUser permanently removes a user and all associated data (nodes, API keys, etc).\n// This operation is irreversible.\n// It also updates the policy manager to ensure ACL policies referencing the deleted\n// user are re-evaluated immediately, fixing issue #2967.\nfunc (s *State) DeleteUser(userID types.UserID) (change.Change, error) {\n\terr := s.db.DestroyUser(userID)\n\tif err != nil {\n\t\treturn change.Change{}, err\n\t}\n\n\t// Update policy manager with the new user list (without the deleted user)\n\t// This ensures that if the policy references the deleted user, it gets\n\t// re-evaluated immediately rather than when some other operation triggers it.\n\tc, err := s.updatePolicyManagerUsers()\n\tif err != nil {\n\t\treturn change.Change{}, fmt.Errorf(\"updating policy after user deletion: %w\", err)\n\t}\n\n\t// If the policy manager doesn't detect changes, still return UserRemoved\n\t// to ensure peer lists are refreshed\n\tif c.IsEmpty() {\n\t\tc = change.UserRemoved()\n\t}\n\n\treturn c, nil\n}\n\n// RenameUser changes a user's name. The new name must be unique.\nfunc (s *State) RenameUser(userID types.UserID, newName string) (*types.User, change.Change, error) {\n\treturn s.UpdateUser(userID, func(user *types.User) error {\n\t\tuser.Name = newName\n\t\treturn nil\n\t})\n}\n\n// GetUserByID retrieves a user by ID.\nfunc (s *State) GetUserByID(userID types.UserID) (*types.User, error) {\n\treturn s.db.GetUserByID(userID)\n}\n\n// GetUserByName retrieves a user by name.\nfunc (s *State) GetUserByName(name string) (*types.User, error) {\n\treturn s.db.GetUserByName(name)\n}\n\n// GetUserByOIDCIdentifier retrieves a user by their OIDC identifier.\nfunc (s *State) GetUserByOIDCIdentifier(id string) (*types.User, error) {\n\treturn s.db.GetUserByOIDCIdentifier(id)\n}\n\n// ListUsersWithFilter retrieves users matching the specified filter criteria.\nfunc (s *State) ListUsersWithFilter(filter *types.User) ([]types.User, error) {\n\treturn s.db.ListUsers(filter)\n}\n\n// ListAllUsers retrieves all users in the system.\nfunc (s *State) ListAllUsers() ([]types.User, error) {\n\treturn s.db.ListUsers()\n}\n\n// persistNodeToDB saves the given node state to the database.\n// This function must receive the exact node state to save to ensure consistency between\n// NodeStore and the database. It verifies the node still exists in NodeStore to prevent\n// race conditions where a node might be deleted between UpdateNode returning and\n// persistNodeToDB being called.\nfunc (s *State) persistNodeToDB(node types.NodeView) (types.NodeView, change.Change, error) {\n\tif !node.Valid() {\n\t\treturn types.NodeView{}, change.Change{}, ErrInvalidNodeView\n\t}\n\n\t// Verify the node still exists in NodeStore before persisting to database.\n\t// Without this check, we could hit a race condition where UpdateNode returns a valid\n\t// node from a batch update, then the node gets deleted (e.g., ephemeral node logout),\n\t// and persistNodeToDB would incorrectly re-insert the deleted node into the database.\n\t_, exists := s.nodeStore.GetNode(node.ID())\n\tif !exists {\n\t\tlog.Warn().\n\t\t\tEmbedObject(node).\n\t\t\tBool(\"is_ephemeral\", node.IsEphemeral()).\n\t\t\tMsg(\"Node no longer exists in NodeStore, skipping database persist to prevent race condition\")\n\n\t\treturn types.NodeView{}, change.Change{}, fmt.Errorf(\"%w: %d\", ErrNodeNotInNodeStore, node.ID())\n\t}\n\n\tnodePtr := node.AsStruct()\n\n\t// Use Omit to prevent overwriting certain fields during MapRequest updates:\n\t// - \"expiry\": should only be updated through explicit SetNodeExpiry calls or re-registration\n\t// - \"AuthKeyID\", \"AuthKey\": prevents GORM from persisting stale PreAuthKey references that\n\t//   may exist in NodeStore after a PreAuthKey has been deleted. The database handles setting\n\t//   auth_key_id to NULL via ON DELETE SET NULL. Without this, Updates() would fail with a\n\t//   foreign key constraint error when trying to reference a deleted PreAuthKey.\n\t// See also: https://github.com/juanfont/headscale/issues/2862\n\terr := s.db.DB.Omit(\"expiry\", \"AuthKeyID\", \"AuthKey\").Updates(nodePtr).Error\n\tif err != nil {\n\t\treturn types.NodeView{}, change.Change{}, fmt.Errorf(\"saving node: %w\", err)\n\t}\n\n\t// Check if policy manager needs updating\n\tc, err := s.updatePolicyManagerNodes()\n\tif err != nil {\n\t\treturn nodePtr.View(), change.Change{}, fmt.Errorf(\"updating policy manager after node save: %w\", err)\n\t}\n\n\tif c.IsEmpty() {\n\t\tc = change.NodeAdded(node.ID())\n\t}\n\n\treturn node, c, nil\n}\n\nfunc (s *State) SaveNode(node types.NodeView) (types.NodeView, change.Change, error) {\n\t// Update NodeStore first\n\tnodePtr := node.AsStruct()\n\n\tresultNode := s.nodeStore.PutNode(*nodePtr)\n\n\t// Then save to database using the result from PutNode\n\treturn s.persistNodeToDB(resultNode)\n}\n\n// DeleteNode permanently removes a node and cleans up associated resources.\n// Returns whether policies changed and any error. This operation is irreversible.\nfunc (s *State) DeleteNode(node types.NodeView) (change.Change, error) {\n\ts.nodeStore.DeleteNode(node.ID())\n\n\terr := s.db.DeleteNode(node.AsStruct())\n\tif err != nil {\n\t\treturn change.Change{}, err\n\t}\n\n\ts.ipAlloc.FreeIPs(node.IPs())\n\n\tc := change.NodeRemoved(node.ID())\n\n\t// Check if policy manager needs updating after node deletion\n\tpolicyChange, err := s.updatePolicyManagerNodes()\n\tif err != nil {\n\t\treturn change.Change{}, fmt.Errorf(\"updating policy manager after node deletion: %w\", err)\n\t}\n\n\tif !policyChange.IsEmpty() {\n\t\t// Merge policy change with NodeRemoved to preserve PeersRemoved info\n\t\t// This ensures the batcher cleans up the deleted node from its state\n\t\tc = c.Merge(policyChange)\n\t}\n\n\treturn c, nil\n}\n\n// Connect marks a node as connected and updates its primary routes in the state.\n// It returns the list of changes and a generation number. The generation number\n// must be passed to Disconnect() so that stale disconnects from old poll sessions\n// are rejected (see the grace period logic in poll.go).\nfunc (s *State) Connect(id types.NodeID) ([]change.Change, uint64) {\n\t// Increment the connect generation for this node. This ensures that any\n\t// in-flight Disconnect() from a previous session will see a stale generation\n\t// and become a no-op.\n\tgen := s.nextConnectGen(id)\n\n\t// Update online status in NodeStore before creating change notification\n\t// so the NodeStore already reflects the correct state when other nodes\n\t// process the NodeCameOnline change for full map generation.\n\tnode, ok := s.nodeStore.UpdateNode(id, func(n *types.Node) {\n\t\tn.IsOnline = new(true)\n\t\t// n.LastSeen = ptr.To(now)\n\t})\n\tif !ok {\n\t\treturn nil, gen\n\t}\n\n\tc := []change.Change{change.NodeOnlineFor(node)}\n\n\tlog.Info().EmbedObject(node).Msg(\"node connected\")\n\n\t// Use the node's current routes for primary route update.\n\t// AllApprovedRoutes() returns only the intersection of announced and approved routes.\n\trouteChange := s.primaryRoutes.SetRoutes(id, node.AllApprovedRoutes()...)\n\n\tif routeChange {\n\t\tc = append(c, change.NodeAdded(id))\n\t}\n\n\treturn c, gen\n}\n\n// nextConnectGen atomically increments and returns the connect generation for a node.\nfunc (s *State) nextConnectGen(id types.NodeID) uint64 {\n\tval, _ := s.connectGen.LoadOrStore(id, &atomic.Uint64{})\n\n\tcounter, ok := val.(*atomic.Uint64)\n\tif !ok {\n\t\treturn 0\n\t}\n\n\treturn counter.Add(1)\n}\n\n// connectGeneration returns the current connect generation for a node.\nfunc (s *State) connectGeneration(id types.NodeID) uint64 {\n\tval, ok := s.connectGen.Load(id)\n\tif !ok {\n\t\treturn 0\n\t}\n\n\tcounter, ok := val.(*atomic.Uint64)\n\tif !ok {\n\t\treturn 0\n\t}\n\n\treturn counter.Load()\n}\n\n// Disconnect marks a node as disconnected and updates its primary routes in the state.\n// The gen parameter is the generation returned by Connect(). If a newer Connect() has\n// been called since the session that is disconnecting, the generation will not match\n// and this call becomes a no-op, preventing stale disconnects from overwriting the\n// online status set by a newer session.\nfunc (s *State) Disconnect(id types.NodeID, gen uint64) ([]change.Change, error) {\n\t// Check if this disconnect is stale. A newer Connect() will have incremented\n\t// the generation, so if ours doesn't match, a newer session owns this node.\n\tif current := s.connectGeneration(id); current != gen {\n\t\tlog.Debug().\n\t\t\tUint64(\"disconnect_gen\", gen).\n\t\t\tUint64(\"current_gen\", current).\n\t\t\tMsg(\"stale disconnect rejected, newer session active\")\n\n\t\treturn nil, nil\n\t}\n\n\tnode, ok := s.nodeStore.UpdateNode(id, func(n *types.Node) {\n\t\tnow := time.Now()\n\t\tn.LastSeen = &now\n\t\t// NodeStore is the source of truth for all node state including online status.\n\t\tn.IsOnline = new(false)\n\t})\n\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"%w: %d\", ErrNodeNotFound, id)\n\t}\n\n\tlog.Info().EmbedObject(node).Msg(\"node disconnected\")\n\n\t// Special error handling for disconnect - we log errors but continue\n\t// because NodeStore is already updated and we need to notify peers\n\t_, c, err := s.persistNodeToDB(node)\n\tif err != nil {\n\t\t// Log error but don't fail the disconnection - NodeStore is already updated\n\t\t// and we need to send change notifications to peers\n\t\tlog.Error().Err(err).EmbedObject(node).Msg(\"failed to update last seen in database\")\n\n\t\tc = change.Change{}\n\t}\n\n\t// The node is disconnecting so make sure that none of the routes it\n\t// announced are served to any nodes.\n\trouteChange := s.primaryRoutes.SetRoutes(id)\n\n\tcs := []change.Change{change.NodeOfflineFor(node), c}\n\n\t// If we have a policy change or route change, return that as it's more comprehensive\n\t// Otherwise, return the NodeOffline change to ensure nodes are notified\n\tif c.IsFull() || routeChange {\n\t\tcs = append(cs, change.PolicyChange())\n\t}\n\n\treturn cs, nil\n}\n\n// GetNodeByID retrieves a node by ID.\n// GetNodeByID retrieves a node by its ID.\n// The bool indicates if the node exists or is available (like \"err not found\").\n// The NodeView might be invalid, so it must be checked with .Valid(), which must be used to ensure\n// it isn't an invalid node (this is more of a node error or node is broken).\nfunc (s *State) GetNodeByID(nodeID types.NodeID) (types.NodeView, bool) {\n\treturn s.nodeStore.GetNode(nodeID)\n}\n\n// GetNodeByNodeKey retrieves a node by its Tailscale public key.\n// The bool indicates if the node exists or is available (like \"err not found\").\n// The NodeView might be invalid, so it must be checked with .Valid(), which must be used to ensure\n// it isn't an invalid node (this is more of a node error or node is broken).\nfunc (s *State) GetNodeByNodeKey(nodeKey key.NodePublic) (types.NodeView, bool) {\n\treturn s.nodeStore.GetNodeByNodeKey(nodeKey)\n}\n\n// GetNodeByMachineKey retrieves a node by its machine key and user ID.\n// The bool indicates if the node exists or is available (like \"err not found\").\n// The NodeView might be invalid, so it must be checked with .Valid(), which must be used to ensure\n// it isn't an invalid node (this is more of a node error or node is broken).\nfunc (s *State) GetNodeByMachineKey(machineKey key.MachinePublic, userID types.UserID) (types.NodeView, bool) {\n\treturn s.nodeStore.GetNodeByMachineKey(machineKey, userID)\n}\n\n// ListNodes retrieves specific nodes by ID, or all nodes if no IDs provided.\nfunc (s *State) ListNodes(nodeIDs ...types.NodeID) views.Slice[types.NodeView] {\n\tif len(nodeIDs) == 0 {\n\t\treturn s.nodeStore.ListNodes()\n\t}\n\n\t// Filter nodes by the requested IDs\n\tallNodes := s.nodeStore.ListNodes()\n\n\tnodeIDSet := make(map[types.NodeID]struct{}, len(nodeIDs))\n\tfor _, id := range nodeIDs {\n\t\tnodeIDSet[id] = struct{}{}\n\t}\n\n\tvar filteredNodes []types.NodeView\n\n\tfor _, node := range allNodes.All() {\n\t\tif _, exists := nodeIDSet[node.ID()]; exists {\n\t\t\tfilteredNodes = append(filteredNodes, node)\n\t\t}\n\t}\n\n\treturn views.SliceOf(filteredNodes)\n}\n\n// ListNodesByUser retrieves all nodes belonging to a specific user.\nfunc (s *State) ListNodesByUser(userID types.UserID) views.Slice[types.NodeView] {\n\treturn s.nodeStore.ListNodesByUser(userID)\n}\n\n// ListPeers retrieves nodes that can communicate with the specified node based on policy.\nfunc (s *State) ListPeers(nodeID types.NodeID, peerIDs ...types.NodeID) views.Slice[types.NodeView] {\n\tif len(peerIDs) == 0 {\n\t\treturn s.nodeStore.ListPeers(nodeID)\n\t}\n\n\t// For specific peerIDs, filter from all nodes\n\tallNodes := s.nodeStore.ListNodes()\n\n\tnodeIDSet := make(map[types.NodeID]struct{}, len(peerIDs))\n\tfor _, id := range peerIDs {\n\t\tnodeIDSet[id] = struct{}{}\n\t}\n\n\tvar filteredNodes []types.NodeView\n\n\tfor _, node := range allNodes.All() {\n\t\tif _, exists := nodeIDSet[node.ID()]; exists {\n\t\t\tfilteredNodes = append(filteredNodes, node)\n\t\t}\n\t}\n\n\treturn views.SliceOf(filteredNodes)\n}\n\n// ListEphemeralNodes retrieves all ephemeral (temporary) nodes in the system.\nfunc (s *State) ListEphemeralNodes() views.Slice[types.NodeView] {\n\tallNodes := s.nodeStore.ListNodes()\n\n\tvar ephemeralNodes []types.NodeView\n\n\tfor _, node := range allNodes.All() {\n\t\t// Check if node is ephemeral by checking its AuthKey\n\t\tif node.AuthKey().Valid() && node.AuthKey().Ephemeral() {\n\t\t\tephemeralNodes = append(ephemeralNodes, node)\n\t\t}\n\t}\n\n\treturn views.SliceOf(ephemeralNodes)\n}\n\n// SetNodeExpiry updates the expiration time for a node.\n// If expiry is nil, the node's expiry is disabled (node will never expire).\nfunc (s *State) SetNodeExpiry(nodeID types.NodeID, expiry *time.Time) (types.NodeView, change.Change, error) {\n\t// Update NodeStore before database to ensure consistency. The NodeStore update is\n\t// blocking and will be the source of truth for the batcher. The database update must\n\t// make the exact same change. If the database update fails, the NodeStore change will\n\t// remain, but since we return an error, no change notification will be sent to the\n\t// batcher, preventing inconsistent state propagation.\n\tn, ok := s.nodeStore.UpdateNode(nodeID, func(node *types.Node) {\n\t\tnode.Expiry = expiry\n\t})\n\n\tif !ok {\n\t\treturn types.NodeView{}, change.Change{}, fmt.Errorf(\"%w: %d\", ErrNodeNotInNodeStore, nodeID)\n\t}\n\n\t// Persist expiry change to database directly since persistNodeToDB omits expiry.\n\terr := s.db.NodeSetExpiry(nodeID, expiry)\n\tif err != nil {\n\t\treturn types.NodeView{}, change.Change{}, fmt.Errorf(\"setting node expiry in database: %w\", err)\n\t}\n\n\t// Update policy manager and generate change notification.\n\tc, err := s.updatePolicyManagerNodes()\n\tif err != nil {\n\t\treturn n, change.Change{}, fmt.Errorf(\"updating policy manager after setting expiry: %w\", err)\n\t}\n\n\tif c.IsEmpty() {\n\t\tc = change.NodeAdded(n.ID())\n\t}\n\n\treturn n, c, nil\n}\n\n// SetNodeTags assigns tags to a node, making it a \"tagged node\".\n// Once a node is tagged, it cannot be un-tagged (only tags can be changed).\n// Setting tags clears UserID since tagged nodes are owned by their tags.\nfunc (s *State) SetNodeTags(nodeID types.NodeID, tags []string) (types.NodeView, change.Change, error) {\n\tif len(tags) == 0 {\n\t\treturn types.NodeView{}, change.Change{}, types.ErrCannotRemoveAllTags\n\t}\n\n\t// Get node for validation\n\texistingNode, exists := s.nodeStore.GetNode(nodeID)\n\tif !exists {\n\t\treturn types.NodeView{}, change.Change{}, fmt.Errorf(\"%w: %d\", ErrNodeNotFound, nodeID)\n\t}\n\n\t// Validate tags: must have correct format and exist in policy\n\tvalidatedTags := make([]string, 0, len(tags))\n\tinvalidTags := make([]string, 0)\n\n\tfor _, tag := range tags {\n\t\tif !strings.HasPrefix(tag, \"tag:\") || !s.polMan.TagExists(tag) {\n\t\t\tinvalidTags = append(invalidTags, tag)\n\n\t\t\tcontinue\n\t\t}\n\n\t\tvalidatedTags = append(validatedTags, tag)\n\t}\n\n\tif len(invalidTags) > 0 {\n\t\treturn types.NodeView{}, change.Change{}, fmt.Errorf(\"%w %v are invalid or not permitted\", ErrRequestedTagsInvalidOrNotPermitted, invalidTags)\n\t}\n\n\tslices.Sort(validatedTags)\n\tvalidatedTags = slices.Compact(validatedTags)\n\n\t// Log the operation\n\tlogTagOperation(existingNode, validatedTags)\n\n\t// Update NodeStore before database to ensure consistency. The NodeStore update is\n\t// blocking and will be the source of truth for the batcher. The database update must\n\t// make the exact same change.\n\tn, ok := s.nodeStore.UpdateNode(nodeID, func(node *types.Node) {\n\t\tnode.Tags = validatedTags\n\t\t// Tagged nodes are owned by their tags, not a user.\n\t\tnode.UserID = nil\n\t\tnode.User = nil\n\t})\n\n\tif !ok {\n\t\treturn types.NodeView{}, change.Change{}, fmt.Errorf(\"%w: %d\", ErrNodeNotInNodeStore, nodeID)\n\t}\n\n\tnodeView, c, err := s.persistNodeToDB(n)\n\tif err != nil {\n\t\treturn nodeView, c, err\n\t}\n\n\t// Set OriginNode so the mapper knows to include self info for this node.\n\t// When tags change, persistNodeToDB returns PolicyChange which doesn't set OriginNode,\n\t// so the mapper's self-update check fails and the node never sees its new tags.\n\t// Setting OriginNode ensures the node gets a self-update with the new tags.\n\tc.OriginNode = nodeID\n\n\treturn nodeView, c, nil\n}\n\n// SetApprovedRoutes sets the network routes that a node is approved to advertise.\nfunc (s *State) SetApprovedRoutes(nodeID types.NodeID, routes []netip.Prefix) (types.NodeView, change.Change, error) {\n\t// TODO(kradalby): In principle we should call the AutoApprove logic here\n\t// because even if the CLI removes an auto-approved route, it will be added\n\t// back automatically.\n\tn, ok := s.nodeStore.UpdateNode(nodeID, func(node *types.Node) {\n\t\tnode.ApprovedRoutes = routes\n\t})\n\n\tif !ok {\n\t\treturn types.NodeView{}, change.Change{}, fmt.Errorf(\"%w: %d\", ErrNodeNotInNodeStore, nodeID)\n\t}\n\n\t// Persist the node changes to the database\n\tnodeView, c, err := s.persistNodeToDB(n)\n\tif err != nil {\n\t\treturn types.NodeView{}, change.Change{}, err\n\t}\n\n\t// Update primary routes table based on SubnetRoutes (intersection of announced and approved).\n\t// The primary routes table is what the mapper uses to generate network maps, so updating it\n\t// here ensures that route changes are distributed to peers.\n\trouteChange := s.primaryRoutes.SetRoutes(nodeID, nodeView.AllApprovedRoutes()...)\n\n\t// If routes changed or the changeset isn't already a full update, trigger a policy change\n\t// to ensure all nodes get updated network maps\n\tif routeChange || !c.IsFull() {\n\t\tc = change.PolicyChange()\n\t}\n\n\treturn nodeView, c, nil\n}\n\n// RenameNode changes the display name of a node.\nfunc (s *State) RenameNode(nodeID types.NodeID, newName string) (types.NodeView, change.Change, error) {\n\terr := util.ValidateHostname(newName)\n\tif err != nil {\n\t\treturn types.NodeView{}, change.Change{}, fmt.Errorf(\"renaming node: %w\", err)\n\t}\n\n\t// Check name uniqueness against NodeStore\n\tallNodes := s.nodeStore.ListNodes()\n\tfor i := range allNodes.Len() {\n\t\tnode := allNodes.At(i)\n\t\tif node.ID() != nodeID && node.AsStruct().GivenName == newName {\n\t\t\treturn types.NodeView{}, change.Change{}, fmt.Errorf(\"%w: %s\", ErrNodeNameNotUnique, newName)\n\t\t}\n\t}\n\n\t// Update NodeStore before database to ensure consistency. The NodeStore update is\n\t// blocking and will be the source of truth for the batcher. The database update must\n\t// make the exact same change.\n\tn, ok := s.nodeStore.UpdateNode(nodeID, func(node *types.Node) {\n\t\tnode.GivenName = newName\n\t})\n\n\tif !ok {\n\t\treturn types.NodeView{}, change.Change{}, fmt.Errorf(\"%w: %d\", ErrNodeNotInNodeStore, nodeID)\n\t}\n\n\treturn s.persistNodeToDB(n)\n}\n\n// BackfillNodeIPs assigns IP addresses to nodes that don't have them.\nfunc (s *State) BackfillNodeIPs() ([]string, error) {\n\tchanges, err := s.db.BackfillNodeIPs(s.ipAlloc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Refresh NodeStore after IP changes to ensure consistency\n\tif len(changes) > 0 {\n\t\tnodes, err := s.db.ListNodes()\n\t\tif err != nil {\n\t\t\treturn changes, fmt.Errorf(\"refreshing NodeStore after IP backfill: %w\", err)\n\t\t}\n\n\t\tfor _, node := range nodes {\n\t\t\t// Preserve online status and NetInfo when refreshing from database\n\t\t\texistingNode, exists := s.nodeStore.GetNode(node.ID)\n\t\t\tif exists && existingNode.Valid() {\n\t\t\t\tnode.IsOnline = new(existingNode.IsOnline().Get())\n\n\t\t\t\t// TODO(kradalby): We should ensure we use the same hostinfo and node merge semantics\n\t\t\t\t// when a node re-registers as we do when it sends a map request (UpdateNodeFromMapRequest).\n\n\t\t\t\t// Preserve NetInfo from existing node to prevent loss during backfill\n\t\t\t\tnetInfo := netInfoFromMapRequest(node.ID, existingNode.Hostinfo().AsStruct(), node.Hostinfo)\n\t\t\t\tnode.Hostinfo = existingNode.Hostinfo().AsStruct()\n\t\t\t\tnode.Hostinfo.NetInfo = netInfo\n\t\t\t}\n\t\t\t// TODO(kradalby): This should just update the IP addresses, nothing else in the node store.\n\t\t\t// We should avoid PutNode here.\n\t\t\t_ = s.nodeStore.PutNode(*node)\n\t\t}\n\t}\n\n\treturn changes, nil\n}\n\n// ExpireExpiredNodes finds and processes expired nodes since the last check.\n// Returns next check time, state update with expired nodes, and whether any were found.\nfunc (s *State) ExpireExpiredNodes(lastCheck time.Time) (time.Time, []change.Change, bool) {\n\t// Why capture start time: We need to ensure we don't miss nodes that expire\n\t// while this function is running by using a consistent timestamp for the next check\n\tstarted := time.Now()\n\n\tvar updates []change.Change\n\n\tfor _, node := range s.nodeStore.ListNodes().All() { //nolint:unqueryvet // NodeStore.ListNodes not a SQL query\n\t\tif !node.Valid() {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Why check After(lastCheck): We only want to notify about nodes that\n\t\t// expired since the last check to avoid duplicate notifications\n\t\tif node.IsExpired() && node.Expiry().Valid() && node.Expiry().Get().After(lastCheck) {\n\t\t\tupdates = append(updates, change.KeyExpiryFor(node.ID(), node.Expiry().Get()))\n\t\t}\n\t}\n\n\tif len(updates) > 0 {\n\t\treturn started, updates, true\n\t}\n\n\treturn started, nil, false\n}\n\n// SSHPolicy returns the SSH access policy for a node.\nfunc (s *State) SSHPolicy(node types.NodeView) (*tailcfg.SSHPolicy, error) {\n\treturn s.polMan.SSHPolicy(s.cfg.ServerURL, node)\n}\n\n// SSHCheckParams resolves the SSH check period for a source-destination\n// node pair from the current policy.\nfunc (s *State) SSHCheckParams(\n\tsrcNodeID, dstNodeID types.NodeID,\n) (time.Duration, bool) {\n\treturn s.polMan.SSHCheckParams(srcNodeID, dstNodeID)\n}\n\n// Filter returns the current network filter rules and matches.\nfunc (s *State) Filter() ([]tailcfg.FilterRule, []matcher.Match) {\n\treturn s.polMan.Filter()\n}\n\n// FilterForNode returns filter rules for a specific node, handling autogroup:self per-node.\nfunc (s *State) FilterForNode(node types.NodeView) ([]tailcfg.FilterRule, error) {\n\treturn s.polMan.FilterForNode(node)\n}\n\n// MatchersForNode returns matchers for peer relationship determination (unreduced).\nfunc (s *State) MatchersForNode(node types.NodeView) ([]matcher.Match, error) {\n\treturn s.polMan.MatchersForNode(node)\n}\n\n// NodeCanHaveTag checks if a node is allowed to have a specific tag.\nfunc (s *State) NodeCanHaveTag(node types.NodeView, tag string) bool {\n\treturn s.polMan.NodeCanHaveTag(node, tag)\n}\n\n// SetPolicy updates the policy configuration.\nfunc (s *State) SetPolicy(pol []byte) (bool, error) {\n\tchanged, err := s.polMan.SetPolicy(pol)\n\tif err != nil {\n\t\treturn changed, err\n\t}\n\n\t// Clear SSH check auth times when policy changes.\n\ts.ClearSSHCheckAuth()\n\n\treturn changed, nil\n}\n\n// AutoApproveRoutes checks if a node's routes should be auto-approved.\n// AutoApproveRoutes checks if any routes should be auto-approved for a node and updates them.\nfunc (s *State) AutoApproveRoutes(nv types.NodeView) (change.Change, error) {\n\tapproved, changed := policy.ApproveRoutesWithPolicy(s.polMan, nv, nv.ApprovedRoutes().AsSlice(), nv.AnnouncedRoutes())\n\tif changed {\n\t\tlog.Debug().\n\t\t\tEmbedObject(nv).\n\t\t\tStrs(\"routes.announced\", util.PrefixesToString(nv.AnnouncedRoutes())).\n\t\t\tStrs(\"routes.approved.old\", util.PrefixesToString(nv.ApprovedRoutes().AsSlice())).\n\t\t\tStrs(\"routes.approved.new\", util.PrefixesToString(approved)).\n\t\t\tMsg(\"Single node auto-approval detected route changes\")\n\n\t\t// Persist the auto-approved routes to database and NodeStore via SetApprovedRoutes\n\t\t// This ensures consistency between database and NodeStore\n\t\t_, c, err := s.SetApprovedRoutes(nv.ID(), approved)\n\t\tif err != nil {\n\t\t\tlog.Error().\n\t\t\t\tEmbedObject(nv).\n\t\t\t\tErr(err).\n\t\t\t\tMsg(\"Failed to persist auto-approved routes\")\n\n\t\t\treturn change.Change{}, err\n\t\t}\n\n\t\tlog.Info().EmbedObject(nv).Strs(zf.RoutesApproved, util.PrefixesToString(approved)).Msg(\"routes approved\")\n\n\t\treturn c, nil\n\t}\n\n\treturn change.Change{}, nil\n}\n\n// GetPolicy retrieves the current policy from the database.\nfunc (s *State) GetPolicy() (*types.Policy, error) {\n\treturn s.db.GetPolicy()\n}\n\n// SetPolicyInDB stores policy data in the database.\nfunc (s *State) SetPolicyInDB(data string) (*types.Policy, error) {\n\treturn s.db.SetPolicy(data)\n}\n\n// SetNodeRoutes sets the primary routes for a node.\nfunc (s *State) SetNodeRoutes(nodeID types.NodeID, routes ...netip.Prefix) change.Change {\n\tif s.primaryRoutes.SetRoutes(nodeID, routes...) {\n\t\t// Route changes affect packet filters for all nodes, so trigger a policy change\n\t\t// to ensure filters are regenerated across the entire network\n\t\treturn change.PolicyChange()\n\t}\n\n\treturn change.Change{}\n}\n\n// GetNodePrimaryRoutes returns the primary routes for a node.\nfunc (s *State) GetNodePrimaryRoutes(nodeID types.NodeID) []netip.Prefix {\n\treturn s.primaryRoutes.PrimaryRoutes(nodeID)\n}\n\n// PrimaryRoutesString returns a string representation of all primary routes.\nfunc (s *State) PrimaryRoutesString() string {\n\treturn s.primaryRoutes.String()\n}\n\n// ValidateAPIKey checks if an API key is valid and active.\nfunc (s *State) ValidateAPIKey(keyStr string) (bool, error) {\n\treturn s.db.ValidateAPIKey(keyStr)\n}\n\n// CreateAPIKey generates a new API key with optional expiration.\nfunc (s *State) CreateAPIKey(expiration *time.Time) (string, *types.APIKey, error) {\n\treturn s.db.CreateAPIKey(expiration)\n}\n\n// GetAPIKey retrieves an API key by its prefix.\n// Accepts both display format (hskey-api-{12chars}-***) and database format ({12chars}).\nfunc (s *State) GetAPIKey(displayPrefix string) (*types.APIKey, error) {\n\t// Parse the display prefix to extract the database prefix\n\tprefix, err := hsdb.ParseAPIKeyPrefix(displayPrefix)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s.db.GetAPIKey(prefix)\n}\n\n// GetAPIKeyByID retrieves an API key by its database ID.\nfunc (s *State) GetAPIKeyByID(id uint64) (*types.APIKey, error) {\n\treturn s.db.GetAPIKeyByID(id)\n}\n\n// ExpireAPIKey marks an API key as expired.\nfunc (s *State) ExpireAPIKey(key *types.APIKey) error {\n\treturn s.db.ExpireAPIKey(key)\n}\n\n// ListAPIKeys returns all API keys in the system.\nfunc (s *State) ListAPIKeys() ([]types.APIKey, error) {\n\treturn s.db.ListAPIKeys()\n}\n\n// DestroyAPIKey permanently removes an API key.\nfunc (s *State) DestroyAPIKey(key types.APIKey) error {\n\treturn s.db.DestroyAPIKey(key)\n}\n\n// CreatePreAuthKey generates a new pre-authentication key for a user.\n// The userID parameter is now optional (can be nil) for system-created tagged keys.\nfunc (s *State) CreatePreAuthKey(userID *types.UserID, reusable bool, ephemeral bool, expiration *time.Time, aclTags []string) (*types.PreAuthKeyNew, error) {\n\treturn s.db.CreatePreAuthKey(userID, reusable, ephemeral, expiration, aclTags)\n}\n\n// Test helpers for the state layer\n\n// CreateUserForTest creates a test user. This is a convenience wrapper around the database layer.\nfunc (s *State) CreateUserForTest(name ...string) *types.User {\n\treturn s.db.CreateUserForTest(name...)\n}\n\n// CreateNodeForTest creates a test node. This is a convenience wrapper around the database layer.\nfunc (s *State) CreateNodeForTest(user *types.User, hostname ...string) *types.Node {\n\treturn s.db.CreateNodeForTest(user, hostname...)\n}\n\n// CreateRegisteredNodeForTest creates a test node with allocated IPs. This is a convenience wrapper around the database layer.\nfunc (s *State) CreateRegisteredNodeForTest(user *types.User, hostname ...string) *types.Node {\n\treturn s.db.CreateRegisteredNodeForTest(user, hostname...)\n}\n\n// CreateNodesForTest creates multiple test nodes. This is a convenience wrapper around the database layer.\nfunc (s *State) CreateNodesForTest(user *types.User, count int, namePrefix ...string) []*types.Node {\n\treturn s.db.CreateNodesForTest(user, count, namePrefix...)\n}\n\n// CreateUsersForTest creates multiple test users. This is a convenience wrapper around the database layer.\nfunc (s *State) CreateUsersForTest(count int, namePrefix ...string) []*types.User {\n\treturn s.db.CreateUsersForTest(count, namePrefix...)\n}\n\n// DB returns the underlying database for testing purposes.\nfunc (s *State) DB() *hsdb.HSDatabase {\n\treturn s.db\n}\n\n// GetPreAuthKey retrieves a pre-authentication key by ID.\nfunc (s *State) GetPreAuthKey(id string) (*types.PreAuthKey, error) {\n\treturn s.db.GetPreAuthKey(id)\n}\n\n// ListPreAuthKeys returns all pre-authentication keys for a user.\nfunc (s *State) ListPreAuthKeys() ([]types.PreAuthKey, error) {\n\treturn s.db.ListPreAuthKeys()\n}\n\n// ExpirePreAuthKey marks a pre-authentication key as expired.\nfunc (s *State) ExpirePreAuthKey(id uint64) error {\n\treturn s.db.ExpirePreAuthKey(id)\n}\n\n// DeletePreAuthKey permanently deletes a pre-authentication key.\nfunc (s *State) DeletePreAuthKey(id uint64) error {\n\treturn s.db.DeletePreAuthKey(id)\n}\n\n// GetAuthCacheEntry retrieves a node registration from cache.\nfunc (s *State) GetAuthCacheEntry(id types.AuthID) (*types.AuthRequest, bool) {\n\tentry, found := s.authCache.Get(id)\n\tif !found {\n\t\treturn nil, false\n\t}\n\n\treturn &entry, true\n}\n\n// SetAuthCacheEntry stores a node registration in cache.\nfunc (s *State) SetAuthCacheEntry(id types.AuthID, entry types.AuthRequest) {\n\ts.authCache.Set(id, entry)\n}\n\n// SetLastSSHAuth records a successful SSH check authentication\n// for the given (src, dst) node pair.\nfunc (s *State) SetLastSSHAuth(src, dst types.NodeID) {\n\ts.sshCheckMu.Lock()\n\tdefer s.sshCheckMu.Unlock()\n\n\ts.sshCheckAuth[sshCheckPair{Src: src, Dst: dst}] = time.Now()\n}\n\n// GetLastSSHAuth returns when src last authenticated for SSH check\n// to dst.\nfunc (s *State) GetLastSSHAuth(src, dst types.NodeID) (time.Time, bool) {\n\ts.sshCheckMu.RLock()\n\tdefer s.sshCheckMu.RUnlock()\n\n\tt, ok := s.sshCheckAuth[sshCheckPair{Src: src, Dst: dst}]\n\n\treturn t, ok\n}\n\n// ClearSSHCheckAuth clears all recorded SSH check auth times.\n// Called when the policy changes to ensure stale auth times don't grant access.\nfunc (s *State) ClearSSHCheckAuth() {\n\ts.sshCheckMu.Lock()\n\tdefer s.sshCheckMu.Unlock()\n\n\ts.sshCheckAuth = make(map[sshCheckPair]time.Time)\n}\n\n// logHostinfoValidation logs warnings when hostinfo is nil or has empty hostname.\nfunc logHostinfoValidation(nv types.NodeView, username, hostname string) {\n\tif !nv.Hostinfo().Valid() {\n\t\tlog.Warn().\n\t\t\tCaller().\n\t\t\tEmbedObject(nv).\n\t\t\tStr(zf.UserName, username).\n\t\t\tStr(zf.GeneratedHostname, hostname).\n\t\t\tMsg(\"Registration had nil hostinfo, generated default hostname\")\n\t} else if nv.Hostinfo().Hostname() == \"\" {\n\t\tlog.Warn().\n\t\t\tCaller().\n\t\t\tEmbedObject(nv).\n\t\t\tStr(zf.UserName, username).\n\t\t\tStr(zf.GeneratedHostname, hostname).\n\t\t\tMsg(\"Registration had empty hostname, generated default\")\n\t}\n}\n\n// preserveNetInfo preserves NetInfo from an existing node for faster DERP connectivity.\n// If no existing node is provided, it creates new netinfo from the provided hostinfo.\nfunc preserveNetInfo(existingNode types.NodeView, nodeID types.NodeID, validHostinfo *tailcfg.Hostinfo) *tailcfg.NetInfo {\n\tvar existingHostinfo *tailcfg.Hostinfo\n\tif existingNode.Valid() {\n\t\texistingHostinfo = existingNode.Hostinfo().AsStruct()\n\t}\n\n\treturn netInfoFromMapRequest(nodeID, existingHostinfo, validHostinfo)\n}\n\n// newNodeParams contains parameters for creating a new node.\ntype newNodeParams struct {\n\tUser           types.User\n\tMachineKey     key.MachinePublic\n\tNodeKey        key.NodePublic\n\tDiscoKey       key.DiscoPublic\n\tHostname       string\n\tHostinfo       *tailcfg.Hostinfo\n\tEndpoints      []netip.AddrPort\n\tExpiry         *time.Time\n\tRegisterMethod string\n\n\t// Optional: Pre-auth key specific fields\n\tPreAuthKey *types.PreAuthKey\n\n\t// Optional: Existing node for netinfo preservation\n\tExistingNodeForNetinfo types.NodeView\n}\n\n// authNodeUpdateParams contains parameters for updating an existing node during auth.\ntype authNodeUpdateParams struct {\n\t// Node to update; must be valid and in NodeStore.\n\tExistingNode types.NodeView\n\t// Client data: keys, hostinfo, endpoints.\n\tRegEntry *types.AuthRequest\n\t// Pre-validated hostinfo; NetInfo preserved from ExistingNode.\n\tValidHostinfo *tailcfg.Hostinfo\n\t// Hostname from hostinfo, or generated from keys if client omits it.\n\tHostname string\n\t// Auth user; may differ from ExistingNode.User() on conversion.\n\tUser *types.User\n\t// Overrides RegEntry.Node.Expiry; ignored for tagged nodes.\n\tExpiry *time.Time\n\t// Only used when IsConvertFromTag=true.\n\tRegisterMethod string\n\t// Set true for tagged->user conversion. Affects RegisterMethod and expiry.\n\tIsConvertFromTag bool\n}\n\n// applyAuthNodeUpdate applies common update logic for re-authenticating or converting\n// an existing node. It updates the node in NodeStore, processes RequestTags, and\n// persists changes to the database.\nfunc (s *State) applyAuthNodeUpdate(params authNodeUpdateParams) (types.NodeView, error) {\n\tregNv := params.RegEntry.Node()\n\t// Log the operation type\n\tif params.IsConvertFromTag {\n\t\tlog.Info().\n\t\t\tEmbedObject(params.ExistingNode).\n\t\t\tStrs(\"old.tags\", params.ExistingNode.Tags().AsSlice()).\n\t\t\tMsg(\"Converting tagged node to user-owned node\")\n\t} else {\n\t\tlog.Info().\n\t\t\tObject(\"existing\", params.ExistingNode).\n\t\t\tObject(\"incoming\", regNv).\n\t\t\tMsg(\"Updating existing node registration via reauth\")\n\t}\n\n\t// Process RequestTags during reauth (#2979)\n\t// Due to json:\",omitempty\", we treat empty/nil as \"clear tags\"\n\tvar requestTags []string\n\tif regNv.Hostinfo().Valid() {\n\t\trequestTags = regNv.Hostinfo().RequestTags().AsSlice()\n\t}\n\n\toldTags := params.ExistingNode.Tags().AsSlice()\n\n\t// Validate tags BEFORE calling UpdateNode to ensure we don't modify NodeStore\n\t// if validation fails. This maintains consistency between NodeStore and database.\n\trejectedTags := s.validateRequestTags(params.ExistingNode, requestTags)\n\tif len(rejectedTags) > 0 {\n\t\treturn types.NodeView{}, fmt.Errorf(\n\t\t\t\"%w %v are invalid or not permitted\",\n\t\t\tErrRequestedTagsInvalidOrNotPermitted,\n\t\t\trejectedTags,\n\t\t)\n\t}\n\n\t// Update existing node in NodeStore - validation passed, safe to mutate\n\tupdatedNodeView, ok := s.nodeStore.UpdateNode(params.ExistingNode.ID(), func(node *types.Node) {\n\t\tnode.NodeKey = regNv.NodeKey()\n\t\tnode.DiscoKey = regNv.DiscoKey()\n\t\tnode.Hostname = params.Hostname\n\n\t\t// Preserve NetInfo from existing node when re-registering\n\t\tnode.Hostinfo = params.ValidHostinfo\n\t\tnode.Hostinfo.NetInfo = preserveNetInfo(\n\t\t\tparams.ExistingNode,\n\t\t\tparams.ExistingNode.ID(),\n\t\t\tparams.ValidHostinfo,\n\t\t)\n\n\t\tnode.Endpoints = regNv.Endpoints().AsSlice()\n\t\t// Do NOT reset IsOnline here. Online status is managed exclusively by\n\t\t// Connect()/Disconnect() in the poll session lifecycle. Resetting it\n\t\t// during re-registration causes a false offline blip: the change\n\t\t// notification triggers a map regeneration showing the node as offline\n\t\t// to peers, even though Connect() will immediately set it back to true.\n\t\tnode.LastSeen = new(time.Now())\n\n\t\t// Set RegisterMethod - for conversion this is the new method,\n\t\t// for reauth we preserve the existing one from regEntry\n\t\tif params.IsConvertFromTag {\n\t\t\tnode.RegisterMethod = params.RegisterMethod\n\t\t} else {\n\t\t\tnode.RegisterMethod = regNv.RegisterMethod()\n\t\t}\n\n\t\t// Track tagged status BEFORE processing tags\n\t\twasTagged := node.IsTagged()\n\n\t\t// Process tags - may change node.Tags and node.UserID\n\t\t// Tags were pre-validated, so this will always succeed (no rejected tags)\n\t\t_ = s.processReauthTags(node, requestTags, params.User, oldTags)\n\n\t\t// Handle expiry AFTER tag processing, based on transition\n\t\t// This ensures expiry is correctly set/cleared based on the NEW tagged status\n\t\tisTagged := node.IsTagged()\n\n\t\tswitch {\n\t\tcase wasTagged && !isTagged:\n\t\t\t// Tagged → Personal: set expiry from client request\n\t\t\tif params.Expiry != nil {\n\t\t\t\tnode.Expiry = params.Expiry\n\t\t\t} else {\n\t\t\t\tnode.Expiry = regNv.Expiry().Clone()\n\t\t\t}\n\t\tcase !wasTagged && isTagged:\n\t\t\t// Personal → Tagged: clear expiry (tagged nodes don't expire)\n\t\t\tnode.Expiry = nil\n\t\tcase params.IsConvertFromTag:\n\t\t\t// Explicit conversion from tagged to user-owned: set expiry from client request\n\t\t\tif params.Expiry != nil {\n\t\t\t\tnode.Expiry = params.Expiry\n\t\t\t} else {\n\t\t\t\tnode.Expiry = regNv.Expiry().Clone()\n\t\t\t}\n\t\tcase !isTagged:\n\t\t\t// Personal → Personal: update expiry from client\n\t\t\tif params.Expiry != nil {\n\t\t\t\tnode.Expiry = params.Expiry\n\t\t\t} else {\n\t\t\t\tnode.Expiry = regNv.Expiry().Clone()\n\t\t\t}\n\t\t}\n\t\t// Tagged → Tagged: keep existing expiry (nil) - no action needed\n\t})\n\n\tif !ok {\n\t\treturn types.NodeView{}, fmt.Errorf(\"%w: %d\", ErrNodeNotInNodeStore, params.ExistingNode.ID())\n\t}\n\n\t// Persist to database\n\t// Omit AuthKeyID/AuthKey to prevent stale PreAuthKey references from causing FK errors.\n\t_, err := hsdb.Write(s.db.DB, func(tx *gorm.DB) (*types.Node, error) {\n\t\terr := tx.Omit(\"AuthKeyID\", \"AuthKey\").Updates(updatedNodeView.AsStruct()).Error\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"saving node: %w\", err)\n\t\t}\n\n\t\treturn nil, nil //nolint:nilnil // side-effect only write\n\t})\n\tif err != nil {\n\t\treturn types.NodeView{}, err\n\t}\n\n\t// Log completion\n\tif params.IsConvertFromTag {\n\t\tlog.Trace().\n\t\t\tEmbedObject(updatedNodeView).\n\t\t\tMsg(\"Tagged node converted to user-owned\")\n\t} else {\n\t\tlog.Trace().\n\t\t\tEmbedObject(updatedNodeView).\n\t\t\tMsg(\"Node re-authorized\")\n\t}\n\n\treturn updatedNodeView, nil\n}\n\n// createAndSaveNewNode creates a new node, allocates IPs, saves to DB, and adds to NodeStore.\n// It preserves netinfo from an existing node if one is provided (for faster DERP connectivity).\nfunc (s *State) createAndSaveNewNode(params newNodeParams) (types.NodeView, error) {\n\t// Preserve NetInfo from existing node if available\n\tif params.Hostinfo != nil {\n\t\tparams.Hostinfo.NetInfo = preserveNetInfo(\n\t\t\tparams.ExistingNodeForNetinfo,\n\t\t\ttypes.NodeID(0),\n\t\t\tparams.Hostinfo,\n\t\t)\n\t}\n\n\t// Prepare the node for registration\n\tnodeToRegister := types.Node{\n\t\tHostname:       params.Hostname,\n\t\tMachineKey:     params.MachineKey,\n\t\tNodeKey:        params.NodeKey,\n\t\tDiscoKey:       params.DiscoKey,\n\t\tHostinfo:       params.Hostinfo,\n\t\tEndpoints:      params.Endpoints,\n\t\tLastSeen:       new(time.Now()),\n\t\tIsOnline:       new(false), // Explicitly offline until Connect() is called\n\t\tRegisterMethod: params.RegisterMethod,\n\t\tExpiry:         params.Expiry,\n\t}\n\n\t// Assign ownership based on PreAuthKey\n\tif params.PreAuthKey != nil {\n\t\tif params.PreAuthKey.IsTagged() {\n\t\t\t// Tagged nodes are owned by their tags, not a user.\n\t\t\t// UserID is intentionally left nil.\n\t\t\tnodeToRegister.Tags = params.PreAuthKey.Proto().GetAclTags()\n\n\t\t\t// Tagged nodes have key expiry disabled.\n\t\t\tnodeToRegister.Expiry = nil\n\t\t} else {\n\t\t\t// USER-OWNED NODE\n\t\t\tnodeToRegister.UserID = &params.PreAuthKey.User.ID\n\t\t\tnodeToRegister.User = params.PreAuthKey.User\n\t\t\tnodeToRegister.Tags = nil\n\t\t}\n\n\t\tnodeToRegister.AuthKey = params.PreAuthKey\n\t\tnodeToRegister.AuthKeyID = &params.PreAuthKey.ID\n\t} else {\n\t\t// Non-PreAuthKey registration (OIDC, CLI) - always user-owned\n\t\tnodeToRegister.UserID = &params.User.ID\n\t\tnodeToRegister.User = &params.User\n\t\tnodeToRegister.Tags = nil\n\t}\n\n\t// Reject advertise-tags for PreAuthKey registrations early, before any resource allocation.\n\t// PreAuthKey nodes get their tags from the key itself, not from client requests.\n\tif params.PreAuthKey != nil && params.Hostinfo != nil && len(params.Hostinfo.RequestTags) > 0 {\n\t\treturn types.NodeView{}, fmt.Errorf(\"%w %v are invalid or not permitted\", ErrRequestedTagsInvalidOrNotPermitted, params.Hostinfo.RequestTags)\n\t}\n\n\t// Process RequestTags (from tailscale up --advertise-tags) ONLY for non-PreAuthKey registrations.\n\t// Validate early before IP allocation to avoid resource leaks on failure.\n\tif params.PreAuthKey == nil && params.Hostinfo != nil && len(params.Hostinfo.RequestTags) > 0 {\n\t\t// Validate all tags before applying - reject if any tag is not permitted\n\t\trejectedTags := s.validateRequestTags(nodeToRegister.View(), params.Hostinfo.RequestTags)\n\t\tif len(rejectedTags) > 0 {\n\t\t\treturn types.NodeView{}, fmt.Errorf(\"%w %v are invalid or not permitted\", ErrRequestedTagsInvalidOrNotPermitted, rejectedTags)\n\t\t}\n\n\t\t// All tags are approved - apply them\n\t\tapprovedTags := params.Hostinfo.RequestTags\n\t\tif len(approvedTags) > 0 {\n\t\t\tnodeToRegister.Tags = approvedTags\n\t\t\tslices.Sort(nodeToRegister.Tags)\n\t\t\tnodeToRegister.Tags = slices.Compact(nodeToRegister.Tags)\n\n\t\t\t// Node is now tagged, so clear user ownership.\n\t\t\t// Tagged nodes are owned by their tags, not a user.\n\t\t\tnodeToRegister.UserID = nil\n\t\t\tnodeToRegister.User = nil\n\n\t\t\t// Tagged nodes have key expiry disabled.\n\t\t\tnodeToRegister.Expiry = nil\n\n\t\t\tlog.Info().\n\t\t\t\tStr(zf.NodeName, nodeToRegister.Hostname).\n\t\t\t\tStrs(zf.NodeTags, nodeToRegister.Tags).\n\t\t\t\tMsg(\"approved advertise-tags during registration\")\n\t\t}\n\t}\n\n\t// Validate before saving\n\terr := validateNodeOwnership(&nodeToRegister)\n\tif err != nil {\n\t\treturn types.NodeView{}, err\n\t}\n\n\t// Allocate new IPs\n\tipv4, ipv6, err := s.ipAlloc.Next()\n\tif err != nil {\n\t\treturn types.NodeView{}, fmt.Errorf(\"allocating IPs: %w\", err)\n\t}\n\n\tnodeToRegister.IPv4 = ipv4\n\tnodeToRegister.IPv6 = ipv6\n\n\t// Ensure unique given name if not set\n\tif nodeToRegister.GivenName == \"\" {\n\t\tgivenName, err := hsdb.EnsureUniqueGivenName(s.db.DB, nodeToRegister.Hostname)\n\t\tif err != nil {\n\t\t\treturn types.NodeView{}, fmt.Errorf(\"ensuring unique given name: %w\", err)\n\t\t}\n\n\t\tnodeToRegister.GivenName = givenName\n\t}\n\n\t// New node - database first to get ID, then NodeStore\n\tsavedNode, err := hsdb.Write(s.db.DB, func(tx *gorm.DB) (*types.Node, error) {\n\t\terr := tx.Save(&nodeToRegister).Error\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"saving node: %w\", err)\n\t\t}\n\n\t\tif params.PreAuthKey != nil && !params.PreAuthKey.Reusable {\n\t\t\terr := hsdb.UsePreAuthKey(tx, params.PreAuthKey)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"using pre auth key: %w\", err)\n\t\t\t}\n\t\t}\n\n\t\treturn &nodeToRegister, nil\n\t})\n\tif err != nil {\n\t\treturn types.NodeView{}, err\n\t}\n\n\t// Add to NodeStore after database creates the ID\n\treturn s.nodeStore.PutNode(*savedNode), nil\n}\n\n// validateRequestTags validates that the requested tags are permitted for the node.\n// This should be called BEFORE UpdateNode to ensure we don't modify NodeStore\n// if validation fails. Returns the list of rejected tags (empty if all valid).\nfunc (s *State) validateRequestTags(node types.NodeView, requestTags []string) []string {\n\t// Empty tags = clear tags, always permitted\n\tif len(requestTags) == 0 {\n\t\treturn nil\n\t}\n\n\tvar rejectedTags []string\n\n\tfor _, tag := range requestTags {\n\t\tif !s.polMan.NodeCanHaveTag(node, tag) {\n\t\t\trejectedTags = append(rejectedTags, tag)\n\t\t}\n\t}\n\n\treturn rejectedTags\n}\n\n// processReauthTags handles tag changes during node re-authentication.\n// It processes RequestTags from the client and updates node tags accordingly.\n// Returns rejected tags (if any) for post-validation error handling.\nfunc (s *State) processReauthTags(\n\tnode *types.Node,\n\trequestTags []string,\n\tuser *types.User,\n\toldTags []string,\n) []string {\n\twasAuthKeyTagged := node.AuthKey != nil && node.AuthKey.IsTagged()\n\n\tlogEvent := log.Debug().\n\t\tUint64(zf.NodeID, uint64(node.ID)).\n\t\tStr(zf.NodeName, node.Hostname).\n\t\tStrs(zf.RequestTags, requestTags).\n\t\tStrs(zf.CurrentTags, node.Tags).\n\t\tBool(zf.IsTagged, node.IsTagged()).\n\t\tBool(zf.WasAuthKeyTagged, wasAuthKeyTagged)\n\tlogEvent.Msg(\"processing RequestTags during reauth\")\n\n\t// Empty RequestTags means untag node (transition to user-owned)\n\tif len(requestTags) == 0 {\n\t\tif node.IsTagged() {\n\t\t\tlog.Info().\n\t\t\t\tUint64(zf.NodeID, uint64(node.ID)).\n\t\t\t\tStr(zf.NodeName, node.Hostname).\n\t\t\t\tStrs(zf.RemovedTags, node.Tags).\n\t\t\t\tStr(zf.UserName, user.Name).\n\t\t\t\tBool(zf.WasAuthKeyTagged, wasAuthKeyTagged).\n\t\t\t\tMsg(\"Reauth: removing all tags, returning node ownership to user\")\n\n\t\t\tnode.Tags = []string{}\n\t\t\tnode.UserID = &user.ID\n\t\t\tnode.User = user\n\t\t}\n\n\t\treturn nil\n\t}\n\n\t// Non-empty RequestTags: validate and apply\n\tvar approvedTags, rejectedTags []string\n\n\tfor _, tag := range requestTags {\n\t\tif s.polMan.NodeCanHaveTag(node.View(), tag) {\n\t\t\tapprovedTags = append(approvedTags, tag)\n\t\t} else {\n\t\t\trejectedTags = append(rejectedTags, tag)\n\t\t}\n\t}\n\n\tif len(rejectedTags) > 0 {\n\t\tlog.Warn().\n\t\t\tUint64(zf.NodeID, uint64(node.ID)).\n\t\t\tStr(zf.NodeName, node.Hostname).\n\t\t\tStrs(zf.RejectedTags, rejectedTags).\n\t\t\tMsg(\"Reauth: requested tags are not permitted\")\n\n\t\treturn rejectedTags\n\t}\n\n\tif len(approvedTags) > 0 {\n\t\tslices.Sort(approvedTags)\n\t\tapprovedTags = slices.Compact(approvedTags)\n\n\t\twasTagged := node.IsTagged()\n\t\tnode.Tags = approvedTags\n\n\t\t// Tagged nodes are owned by their tags, not a user.\n\t\tnode.UserID = nil\n\t\tnode.User = nil\n\n\t\tif !wasTagged {\n\t\t\tlog.Info().\n\t\t\t\tUint64(zf.NodeID, uint64(node.ID)).\n\t\t\t\tStr(zf.NodeName, node.Hostname).\n\t\t\t\tStrs(zf.NewTags, approvedTags).\n\t\t\t\tStr(zf.OldUser, user.Name).\n\t\t\t\tMsg(\"Reauth: applying tags, transferring node to tagged-devices\")\n\t\t} else {\n\t\t\tlog.Info().\n\t\t\t\tUint64(zf.NodeID, uint64(node.ID)).\n\t\t\t\tStr(zf.NodeName, node.Hostname).\n\t\t\t\tStrs(zf.OldTags, oldTags).\n\t\t\t\tStrs(zf.NewTags, approvedTags).\n\t\t\t\tMsg(\"Reauth: updating tags on already-tagged node\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// HandleNodeFromAuthPath handles node registration through authentication flow (like OIDC).\nfunc (s *State) HandleNodeFromAuthPath(\n\tauthID types.AuthID,\n\tuserID types.UserID,\n\texpiry *time.Time,\n\tregistrationMethod string,\n) (types.NodeView, change.Change, error) {\n\t// Get the registration entry from cache\n\tregEntry, ok := s.GetAuthCacheEntry(authID)\n\tif !ok {\n\t\treturn types.NodeView{}, change.Change{}, hsdb.ErrNodeNotFoundRegistrationCache\n\t}\n\n\t// Get the user\n\tuser, err := s.db.GetUserByID(userID)\n\tif err != nil {\n\t\treturn types.NodeView{}, change.Change{}, fmt.Errorf(\"finding user: %w\", err)\n\t}\n\n\t// Ensure we have a valid hostname from the registration cache entry\n\thostname := util.EnsureHostname(\n\t\tregEntry.Node().Hostinfo(),\n\t\tregEntry.Node().MachineKey().String(),\n\t\tregEntry.Node().NodeKey().String(),\n\t)\n\n\t// Ensure we have valid hostinfo\n\thostinfo := &tailcfg.Hostinfo{}\n\tif regEntry.Node().Hostinfo().Valid() {\n\t\thostinfo = regEntry.Node().Hostinfo().AsStruct()\n\t}\n\n\thostinfo.Hostname = hostname\n\n\tlogHostinfoValidation(\n\t\tregEntry.Node(),\n\t\tuser.Name,\n\t\thostname,\n\t)\n\n\t// Lookup existing nodes\n\tmachineKey := regEntry.Node().MachineKey()\n\texistingNodeSameUser, _ := s.nodeStore.GetNodeByMachineKey(machineKey, types.UserID(user.ID))\n\texistingNodeAnyUser, _ := s.nodeStore.GetNodeByMachineKeyAnyUser(machineKey)\n\n\t// Named conditions - describe WHAT we found, not HOW we check it\n\tnodeExistsForSameUser := existingNodeSameUser.Valid()\n\tnodeExistsForAnyUser := existingNodeAnyUser.Valid()\n\texistingNodeIsTagged := nodeExistsForAnyUser && existingNodeAnyUser.IsTagged()\n\texistingNodeOwnedByOtherUser := nodeExistsForAnyUser &&\n\t\t!existingNodeIsTagged &&\n\t\texistingNodeAnyUser.UserID().Get() != user.ID\n\n\t// Create logger with common fields for all auth operations\n\tlogger := log.With().\n\t\tStr(zf.RegistrationID, authID.String()).\n\t\tStr(zf.UserName, user.Name).\n\t\tStr(zf.MachineKey, machineKey.ShortString()).\n\t\tStr(zf.Method, registrationMethod).\n\t\tLogger()\n\n\t// Common params for update operations\n\tupdateParams := authNodeUpdateParams{\n\t\tRegEntry:       regEntry,\n\t\tValidHostinfo:  hostinfo,\n\t\tHostname:       hostname,\n\t\tUser:           user,\n\t\tExpiry:         expiry,\n\t\tRegisterMethod: registrationMethod,\n\t}\n\n\tvar finalNode types.NodeView\n\n\tif nodeExistsForSameUser {\n\t\tupdateParams.ExistingNode = existingNodeSameUser\n\n\t\tfinalNode, err = s.applyAuthNodeUpdate(updateParams)\n\t\tif err != nil {\n\t\t\treturn types.NodeView{}, change.Change{}, err\n\t\t}\n\t} else if existingNodeIsTagged {\n\t\tupdateParams.ExistingNode = existingNodeAnyUser\n\t\tupdateParams.IsConvertFromTag = true\n\n\t\tfinalNode, err = s.applyAuthNodeUpdate(updateParams)\n\t\tif err != nil {\n\t\t\treturn types.NodeView{}, change.Change{}, err\n\t\t}\n\t} else if existingNodeOwnedByOtherUser {\n\t\toldUser := existingNodeAnyUser.User()\n\n\t\tlogger.Info().\n\t\t\tStr(zf.ExistingNodeName, existingNodeAnyUser.Hostname()).\n\t\t\tUint64(zf.ExistingNodeID, existingNodeAnyUser.ID().Uint64()).\n\t\t\tStr(zf.OldUser, oldUser.Name()).\n\t\t\tMsg(\"Creating new node for different user (same machine key exists for another user)\")\n\n\t\tfinalNode, err = s.createNewNodeFromAuth(\n\t\t\tlogger, user, regEntry, hostname, hostinfo,\n\t\t\texpiry, registrationMethod, existingNodeAnyUser,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn types.NodeView{}, change.Change{}, err\n\t\t}\n\t} else {\n\t\tfinalNode, err = s.createNewNodeFromAuth(\n\t\t\tlogger, user, regEntry, hostname, hostinfo,\n\t\t\texpiry, registrationMethod, types.NodeView{},\n\t\t)\n\t\tif err != nil {\n\t\t\treturn types.NodeView{}, change.Change{}, err\n\t\t}\n\t}\n\n\t// Signal to waiting clients\n\tregEntry.FinishAuth(types.AuthVerdict{Node: finalNode})\n\n\t// Delete from registration cache\n\ts.authCache.Delete(authID)\n\n\t// Update policy managers\n\tusersChange, err := s.updatePolicyManagerUsers()\n\tif err != nil {\n\t\treturn finalNode, change.NodeAdded(finalNode.ID()), fmt.Errorf(\"updating policy manager users: %w\", err)\n\t}\n\n\tnodesChange, err := s.updatePolicyManagerNodes()\n\tif err != nil {\n\t\treturn finalNode, change.NodeAdded(finalNode.ID()), fmt.Errorf(\"updating policy manager nodes: %w\", err)\n\t}\n\n\tvar c change.Change\n\tif !usersChange.IsEmpty() || !nodesChange.IsEmpty() {\n\t\tc = change.PolicyChange()\n\t} else {\n\t\tc = change.NodeAdded(finalNode.ID())\n\t}\n\n\treturn finalNode, c, nil\n}\n\n// createNewNodeFromAuth creates a new node during auth callback.\n// This is used for both new registrations and when a machine already has a node\n// for a different user.\nfunc (s *State) createNewNodeFromAuth(\n\tlogger zerolog.Logger,\n\tuser *types.User,\n\tregEntry *types.AuthRequest,\n\thostname string,\n\tvalidHostinfo *tailcfg.Hostinfo,\n\texpiry *time.Time,\n\tregistrationMethod string,\n\texistingNodeForNetinfo types.NodeView,\n) (types.NodeView, error) {\n\tlogger.Debug().\n\t\tInterface(\"expiry\", expiry).\n\t\tMsg(\"Registering new node from auth callback\")\n\n\treturn s.createAndSaveNewNode(newNodeParams{\n\t\tUser:                   *user,\n\t\tMachineKey:             regEntry.Node().MachineKey(),\n\t\tNodeKey:                regEntry.Node().NodeKey(),\n\t\tDiscoKey:               regEntry.Node().DiscoKey(),\n\t\tHostname:               hostname,\n\t\tHostinfo:               validHostinfo,\n\t\tEndpoints:              regEntry.Node().Endpoints().AsSlice(),\n\t\tExpiry:                 cmp.Or(expiry, regEntry.Node().Expiry().Clone()),\n\t\tRegisterMethod:         registrationMethod,\n\t\tExistingNodeForNetinfo: existingNodeForNetinfo,\n\t})\n}\n\n// HandleNodeFromPreAuthKey handles node registration using a pre-authentication key.\n// findExistingNodeForPAK looks up an existing node by machine key,\n// matching the PAK's ownership. For user-owned keys it checks the\n// user's ID; for tagged keys it checks UserID(0) since tagged nodes\n// have no owning user.\nfunc (s *State) findExistingNodeForPAK(\n\tmachineKey key.MachinePublic,\n\tpak *types.PreAuthKey,\n) (types.NodeView, bool) {\n\tif pak.User != nil {\n\t\tnode, exists := s.nodeStore.GetNodeByMachineKey(machineKey, types.UserID(pak.User.ID))\n\t\tif exists {\n\t\t\treturn node, true\n\t\t}\n\t}\n\n\t// Tagged nodes have nil UserID, so they are indexed under UserID(0)\n\t// in nodesByMachineKey. Check there for tagged PAK re-registration.\n\tif pak.IsTagged() {\n\t\treturn s.nodeStore.GetNodeByMachineKey(machineKey, 0)\n\t}\n\n\treturn types.NodeView{}, false\n}\n\nfunc (s *State) HandleNodeFromPreAuthKey(\n\tregReq tailcfg.RegisterRequest,\n\tmachineKey key.MachinePublic,\n) (types.NodeView, change.Change, error) {\n\tpak, err := s.GetPreAuthKey(regReq.Auth.AuthKey)\n\tif err != nil {\n\t\treturn types.NodeView{}, change.Change{}, err\n\t}\n\n\t// Helper to get username for logging (handles nil User for tags-only keys)\n\tpakUsername := func() string {\n\t\tif pak.User != nil {\n\t\t\treturn pak.User.Username()\n\t\t}\n\n\t\treturn types.TaggedDevices.Name\n\t}\n\n\texistingNodeSameUser, existsSameUser := s.findExistingNodeForPAK(machineKey, pak)\n\n\t// For existing nodes, skip validation if:\n\t// 1. MachineKey matches (cryptographic proof of machine identity)\n\t// 2. User/tag ownership matches (from the PAK being used)\n\t// 3. Not a NodeKey rotation (rotation requires fresh validation)\n\t//\n\t// Security: MachineKey is the cryptographic identity. If someone has the MachineKey,\n\t// they control the machine. The PAK was only needed to authorize initial join.\n\t// We don't check which specific PAK was used originally because:\n\t// - Container restarts may use different PAKs (e.g., env var changed)\n\t// - Original PAK may be deleted\n\t// - MachineKey + ownership is sufficient to prove this is the same node\n\tisExistingNodeReregistering := existsSameUser && existingNodeSameUser.Valid()\n\n\t// Check if this is a NodeKey rotation (different NodeKey)\n\tisNodeKeyRotation := existsSameUser && existingNodeSameUser.Valid() &&\n\t\texistingNodeSameUser.NodeKey() != regReq.NodeKey\n\n\tif isExistingNodeReregistering && !isNodeKeyRotation {\n\t\t// Existing node re-registering with same NodeKey: skip validation.\n\t\t// Pre-auth keys are only needed for initial authentication. Critical for\n\t\t// containers that run \"tailscale up --authkey=KEY\" on every restart.\n\t\tlog.Debug().\n\t\t\tCaller().\n\t\t\tUint64(zf.NodeID, existingNodeSameUser.ID().Uint64()).\n\t\t\tStr(zf.NodeName, existingNodeSameUser.Hostname()).\n\t\t\tStr(zf.MachineKey, machineKey.ShortString()).\n\t\t\tStr(zf.NodeKeyExisting, existingNodeSameUser.NodeKey().ShortString()).\n\t\t\tStr(zf.NodeKeyRequest, regReq.NodeKey.ShortString()).\n\t\t\tUint64(zf.AuthKeyID, pak.ID).\n\t\t\tBool(zf.AuthKeyUsed, pak.Used).\n\t\t\tBool(zf.AuthKeyExpired, pak.Expiration != nil && pak.Expiration.Before(time.Now())).\n\t\t\tBool(zf.AuthKeyReusable, pak.Reusable).\n\t\t\tBool(zf.NodeKeyRotation, isNodeKeyRotation).\n\t\t\tMsg(\"Existing node re-registering with same NodeKey and auth key, skipping validation\")\n\t} else {\n\t\t// New node or NodeKey rotation: require valid auth key.\n\t\terr = pak.Validate()\n\t\tif err != nil {\n\t\t\treturn types.NodeView{}, change.Change{}, err\n\t\t}\n\t}\n\n\t// Ensure we have a valid hostname - handle nil/empty cases\n\thostname := util.EnsureHostname(\n\t\tregReq.Hostinfo.View(),\n\t\tmachineKey.String(),\n\t\tregReq.NodeKey.String(),\n\t)\n\n\t// Ensure we have valid hostinfo\n\tvalidHostinfo := cmp.Or(regReq.Hostinfo, &tailcfg.Hostinfo{})\n\tvalidHostinfo.Hostname = hostname\n\n\tlog.Debug().\n\t\tCaller().\n\t\tStr(zf.NodeName, hostname).\n\t\tStr(zf.MachineKey, machineKey.ShortString()).\n\t\tStr(zf.NodeKey, regReq.NodeKey.ShortString()).\n\t\tStr(zf.UserName, pakUsername()).\n\t\tMsg(\"Registering node with pre-auth key\")\n\n\tvar finalNode types.NodeView\n\n\t// If this node exists for this user, update the node in place.\n\t// Note: For tags-only keys (pak.User == nil), existsSameUser is always false.\n\tif existsSameUser && existingNodeSameUser.Valid() {\n\t\tlog.Trace().\n\t\t\tCaller().\n\t\t\tStr(zf.NodeName, existingNodeSameUser.Hostname()).\n\t\t\tUint64(zf.NodeID, existingNodeSameUser.ID().Uint64()).\n\t\t\tStr(zf.MachineKey, machineKey.ShortString()).\n\t\t\tStr(zf.NodeKey, existingNodeSameUser.NodeKey().ShortString()).\n\t\t\tStr(zf.UserName, pakUsername()).\n\t\t\tMsg(\"Node re-registering with existing machine key and user, updating in place\")\n\n\t\t// Update existing node - NodeStore first, then database\n\t\tupdatedNodeView, ok := s.nodeStore.UpdateNode(existingNodeSameUser.ID(), func(node *types.Node) {\n\t\t\tnode.NodeKey = regReq.NodeKey\n\t\t\tnode.Hostname = hostname\n\n\t\t\t// TODO(kradalby): We should ensure we use the same hostinfo and node merge semantics\n\t\t\t// when a node re-registers as we do when it sends a map request (UpdateNodeFromMapRequest).\n\n\t\t\t// Preserve NetInfo from existing node when re-registering\n\t\t\tnode.Hostinfo = validHostinfo\n\t\t\tnode.Hostinfo.NetInfo = preserveNetInfo(existingNodeSameUser, existingNodeSameUser.ID(), validHostinfo)\n\n\t\t\tnode.RegisterMethod = util.RegisterMethodAuthKey\n\n\t\t\t// Tags from PreAuthKey are only applied during initial registration.\n\t\t\t// On re-registration the node keeps its existing tags and ownership.\n\t\t\t// Only update AuthKey reference.\n\t\t\tnode.AuthKey = pak\n\t\t\tnode.AuthKeyID = &pak.ID\n\t\t\t// Do NOT reset IsOnline here. Online status is managed exclusively by\n\t\t\t// Connect()/Disconnect() in the poll session lifecycle. Resetting it\n\t\t\t// during re-registration causes a false offline blip to peers.\n\t\t\tnode.LastSeen = new(time.Now())\n\n\t\t\t// Tagged nodes keep their existing expiry (disabled).\n\t\t\t// User-owned nodes update expiry from the client request.\n\t\t\tif !node.IsTagged() {\n\t\t\t\tnode.Expiry = &regReq.Expiry\n\t\t\t}\n\t\t})\n\n\t\tif !ok {\n\t\t\treturn types.NodeView{}, change.Change{}, fmt.Errorf(\"%w: %d\", ErrNodeNotInNodeStore, existingNodeSameUser.ID())\n\t\t}\n\n\t\t_, err = hsdb.Write(s.db.DB, func(tx *gorm.DB) (*types.Node, error) {\n\t\t\t// Use Updates() to preserve fields not modified by UpdateNode.\n\t\t\t// Omit AuthKeyID/AuthKey to prevent stale PreAuthKey references from causing FK errors.\n\t\t\terr := tx.Omit(\"AuthKeyID\", \"AuthKey\").Updates(updatedNodeView.AsStruct()).Error\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"saving node: %w\", err)\n\t\t\t}\n\n\t\t\tif !pak.Reusable {\n\t\t\t\terr = hsdb.UsePreAuthKey(tx, pak)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"using pre auth key: %w\", err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn nil, nil //nolint:nilnil // intentional: transaction success\n\t\t})\n\t\tif err != nil {\n\t\t\treturn types.NodeView{}, change.Change{}, fmt.Errorf(\"writing node to database: %w\", err)\n\t\t}\n\n\t\tlog.Trace().\n\t\t\tCaller().\n\t\t\tStr(zf.NodeName, updatedNodeView.Hostname()).\n\t\t\tUint64(zf.NodeID, updatedNodeView.ID().Uint64()).\n\t\t\tStr(zf.MachineKey, machineKey.ShortString()).\n\t\t\tStr(zf.NodeKey, updatedNodeView.NodeKey().ShortString()).\n\t\t\tStr(zf.UserName, pakUsername()).\n\t\t\tMsg(\"Node re-authorized\")\n\n\t\tfinalNode = updatedNodeView\n\t} else {\n\t\t// Node does not exist for this user with this machine key\n\t\t// Check if node exists with this machine key for a different user\n\t\texistingNodeAnyUser, existsAnyUser := s.nodeStore.GetNodeByMachineKeyAnyUser(machineKey)\n\n\t\t// For user-owned keys, check if node exists for a different user.\n\t\t// Tags-only keys (pak.User == nil) skip this check.\n\t\t// Tagged nodes are also skipped since they have no owning user.\n\t\texistingIsUserOwned := existsAnyUser &&\n\t\t\texistingNodeAnyUser.Valid() &&\n\t\t\t!existingNodeAnyUser.IsTagged()\n\t\tbelongsToDifferentUser := pak.User != nil &&\n\t\t\texistingIsUserOwned &&\n\t\t\texistingNodeAnyUser.UserID().Get() != pak.User.ID\n\n\t\tif belongsToDifferentUser {\n\t\t\t// Node exists but belongs to a different user.\n\t\t\t// Create a new node for the new user (do not transfer).\n\t\t\toldUserName := existingNodeAnyUser.User().Name()\n\n\t\t\tlog.Info().\n\t\t\t\tCaller().\n\t\t\t\tStr(zf.ExistingNodeName, existingNodeAnyUser.Hostname()).\n\t\t\t\tUint64(zf.ExistingNodeID, existingNodeAnyUser.ID().Uint64()).\n\t\t\t\tStr(zf.MachineKey, machineKey.ShortString()).\n\t\t\t\tStr(zf.OldUser, oldUserName).\n\t\t\t\tStr(zf.NewUser, pakUsername()).\n\t\t\t\tMsg(\"Creating new node for different user (same machine key exists for another user)\")\n\t\t}\n\n\t\t// This is a new node - create it\n\t\t// For user-owned keys: create for the user\n\t\t// For tags-only keys: create as tagged node (createAndSaveNewNode handles this via PreAuthKey)\n\n\t\t// Create and save new node\n\t\t// Note: For tags-only keys, User is empty but createAndSaveNewNode uses PreAuthKey for ownership\n\t\tvar pakUser types.User\n\t\tif pak.User != nil {\n\t\t\tpakUser = *pak.User\n\t\t}\n\n\t\tvar err error\n\n\t\tfinalNode, err = s.createAndSaveNewNode(newNodeParams{\n\t\t\tUser:                   pakUser,\n\t\t\tMachineKey:             machineKey,\n\t\t\tNodeKey:                regReq.NodeKey,\n\t\t\tDiscoKey:               key.DiscoPublic{}, // DiscoKey not available in RegisterRequest\n\t\t\tHostname:               hostname,\n\t\t\tHostinfo:               validHostinfo,\n\t\t\tEndpoints:              nil, // Endpoints not available in RegisterRequest\n\t\t\tExpiry:                 &regReq.Expiry,\n\t\t\tRegisterMethod:         util.RegisterMethodAuthKey,\n\t\t\tPreAuthKey:             pak,\n\t\t\tExistingNodeForNetinfo: cmp.Or(existingNodeAnyUser, types.NodeView{}),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn types.NodeView{}, change.Change{}, fmt.Errorf(\"creating new node: %w\", err)\n\t\t}\n\t}\n\n\t// Update policy managers\n\tusersChange, err := s.updatePolicyManagerUsers()\n\tif err != nil {\n\t\treturn finalNode, change.NodeAdded(finalNode.ID()), fmt.Errorf(\"updating policy manager users: %w\", err)\n\t}\n\n\tnodesChange, err := s.updatePolicyManagerNodes()\n\tif err != nil {\n\t\treturn finalNode, change.NodeAdded(finalNode.ID()), fmt.Errorf(\"updating policy manager nodes: %w\", err)\n\t}\n\n\tvar c change.Change\n\tif !usersChange.IsEmpty() || !nodesChange.IsEmpty() {\n\t\tc = change.PolicyChange()\n\t} else {\n\t\tc = change.NodeAdded(finalNode.ID())\n\t}\n\n\treturn finalNode, c, nil\n}\n\n// updatePolicyManagerUsers updates the policy manager with current users.\n// Returns true if the policy changed and notifications should be sent.\n// TODO(kradalby): This is a temporary stepping stone, ultimately we should\n// have the list already available so it could go much quicker. Alternatively\n// the policy manager could have a remove or add list for users.\n// updatePolicyManagerUsers refreshes the policy manager with current user data.\nfunc (s *State) updatePolicyManagerUsers() (change.Change, error) {\n\tusers, err := s.ListAllUsers()\n\tif err != nil {\n\t\treturn change.Change{}, fmt.Errorf(\"listing users for policy update: %w\", err)\n\t}\n\n\tlog.Debug().Caller().Int(\"user.count\", len(users)).Msg(\"policy manager user update initiated because user list modification detected\")\n\n\tchanged, err := s.polMan.SetUsers(users)\n\tif err != nil {\n\t\treturn change.Change{}, fmt.Errorf(\"updating policy manager users: %w\", err)\n\t}\n\n\tlog.Debug().Caller().Bool(\"policy.changed\", changed).Msg(\"policy manager user update completed because SetUsers operation finished\")\n\n\tif changed {\n\t\treturn change.PolicyChange(), nil\n\t}\n\n\treturn change.Change{}, nil\n}\n\n// UpdatePolicyManagerUsersForTest updates the policy manager's user cache.\n// This is exposed for testing purposes to sync the policy manager after\n// creating test users via CreateUserForTest().\nfunc (s *State) UpdatePolicyManagerUsersForTest() error {\n\t_, err := s.updatePolicyManagerUsers()\n\treturn err\n}\n\n// updatePolicyManagerNodes updates the policy manager with current nodes.\n// Returns true if the policy changed and notifications should be sent.\n// TODO(kradalby): This is a temporary stepping stone, ultimately we should\n// have the list already available so it could go much quicker. Alternatively\n// the policy manager could have a remove or add list for nodes.\n// updatePolicyManagerNodes refreshes the policy manager with current node data.\nfunc (s *State) updatePolicyManagerNodes() (change.Change, error) {\n\tnodes := s.ListNodes()\n\n\tchanged, err := s.polMan.SetNodes(nodes)\n\tif err != nil {\n\t\treturn change.Change{}, fmt.Errorf(\"updating policy manager nodes: %w\", err)\n\t}\n\n\tif changed {\n\t\t// Rebuild peer maps because policy-affecting node changes (tags, user, IPs)\n\t\t// affect ACL visibility. Without this, cached peer relationships use stale data.\n\t\ts.nodeStore.RebuildPeerMaps()\n\t\treturn change.PolicyChange(), nil\n\t}\n\n\treturn change.Change{}, nil\n}\n\n// PingDB checks if the database connection is healthy.\nfunc (s *State) PingDB(ctx context.Context) error {\n\treturn s.db.PingDB(ctx)\n}\n\n// autoApproveNodes mass approves routes on all nodes. It is _only_ intended for\n// use when the policy is replaced. It is not sending or reporting any changes\n// or updates as we send full updates after replacing the policy.\n// TODO(kradalby): This is kind of messy, maybe this is another +1\n// for an event bus. See example comments here.\n// autoApproveNodes automatically approves nodes based on policy rules.\nfunc (s *State) autoApproveNodes() ([]change.Change, error) {\n\tnodes := s.ListNodes()\n\n\t// Approve routes concurrently, this should make it likely\n\t// that the writes end in the same batch in the nodestore write.\n\tvar (\n\t\terrg errgroup.Group\n\t\tcs   []change.Change\n\t\tmu   sync.Mutex\n\t)\n\tfor _, nv := range nodes.All() {\n\t\terrg.Go(func() error {\n\t\t\tapproved, changed := policy.ApproveRoutesWithPolicy(s.polMan, nv, nv.ApprovedRoutes().AsSlice(), nv.AnnouncedRoutes())\n\t\t\tif changed {\n\t\t\t\tlog.Debug().\n\t\t\t\t\tUint64(zf.NodeID, nv.ID().Uint64()).\n\t\t\t\t\tStr(zf.NodeName, nv.Hostname()).\n\t\t\t\t\tStrs(zf.RoutesApprovedOld, util.PrefixesToString(nv.ApprovedRoutes().AsSlice())).\n\t\t\t\t\tStrs(zf.RoutesApprovedNew, util.PrefixesToString(approved)).\n\t\t\t\t\tMsg(\"Routes auto-approved by policy\")\n\n\t\t\t\t_, c, err := s.SetApprovedRoutes(nv.ID(), approved)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tmu.Lock()\n\n\t\t\t\tcs = append(cs, c)\n\n\t\t\t\tmu.Unlock()\n\t\t\t}\n\n\t\t\treturn nil\n\t\t})\n\t}\n\n\terr := errg.Wait()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cs, nil\n}\n\n// UpdateNodeFromMapRequest processes a MapRequest and updates the node.\n// TODO(kradalby): This is essentially a patch update that could be sent directly to nodes,\n// which means we could shortcut the whole change thing if there are no other important updates.\n// When a field is added to this function, remember to also add it to:\n// - node.PeerChangeFromMapRequest\n// - node.ApplyPeerChange\n// - logTracePeerChange in poll.go.\nfunc (s *State) UpdateNodeFromMapRequest(id types.NodeID, req tailcfg.MapRequest) (change.Change, error) {\n\tlog.Trace().\n\t\tCaller().\n\t\tUint64(zf.NodeID, id.Uint64()).\n\t\tInterface(\"request\", req).\n\t\tMsg(\"Processing MapRequest for node\")\n\n\tvar (\n\t\trouteChange        bool\n\t\thostinfoChanged    bool\n\t\tneedsRouteApproval bool\n\t\tautoApprovedRoutes []netip.Prefix\n\t\tendpointChanged    bool\n\t\tderpChanged        bool\n\t)\n\t// We need to ensure we update the node as it is in the NodeStore at\n\t// the time of the request.\n\tupdatedNode, ok := s.nodeStore.UpdateNode(id, func(currentNode *types.Node) {\n\t\tpeerChange := currentNode.PeerChangeFromMapRequest(req)\n\n\t\t// Track what specifically changed\n\t\tendpointChanged = peerChange.Endpoints != nil\n\t\tderpChanged = peerChange.DERPRegion != 0\n\t\thostinfoChanged = !hostinfoEqual(currentNode.View(), req.Hostinfo)\n\n\t\t// Get the correct NetInfo to use\n\t\tnetInfo := netInfoFromMapRequest(id, currentNode.Hostinfo, req.Hostinfo)\n\t\tif req.Hostinfo != nil {\n\t\t\treq.Hostinfo.NetInfo = netInfo\n\t\t} else {\n\t\t\treq.Hostinfo = &tailcfg.Hostinfo{NetInfo: netInfo}\n\t\t}\n\n\t\t// Re-check hostinfoChanged after potential NetInfo preservation\n\t\thostinfoChanged = !hostinfoEqual(currentNode.View(), req.Hostinfo)\n\n\t\t// If there is no changes and nothing to save,\n\t\t// return early.\n\t\tif peerChangeEmpty(peerChange) && !hostinfoChanged {\n\t\t\treturn\n\t\t}\n\n\t\t// Calculate route approval before NodeStore update to avoid calling View() inside callback\n\t\tvar hasNewRoutes bool\n\t\tif hi := req.Hostinfo; hi != nil {\n\t\t\thasNewRoutes = len(hi.RoutableIPs) > 0\n\t\t}\n\n\t\tneedsRouteApproval = hostinfoChanged && (routesChanged(currentNode.View(), req.Hostinfo) || (hasNewRoutes && len(currentNode.ApprovedRoutes) == 0))\n\t\tif needsRouteApproval {\n\t\t\t// Extract announced routes from request\n\t\t\tvar announcedRoutes []netip.Prefix\n\t\t\tif req.Hostinfo != nil {\n\t\t\t\tannouncedRoutes = req.Hostinfo.RoutableIPs\n\t\t\t}\n\n\t\t\t// Apply policy-based auto-approval if routes are announced\n\t\t\tif len(announcedRoutes) > 0 {\n\t\t\t\tautoApprovedRoutes, routeChange = policy.ApproveRoutesWithPolicy(\n\t\t\t\t\ts.polMan,\n\t\t\t\t\tcurrentNode.View(),\n\t\t\t\t\tcurrentNode.ApprovedRoutes,\n\t\t\t\t\tannouncedRoutes,\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\n\t\t// Log when routes change but approval doesn't\n\t\tif hostinfoChanged && !routeChange {\n\t\t\tif hi := req.Hostinfo; hi != nil {\n\t\t\t\tif routesChanged(currentNode.View(), hi) {\n\t\t\t\t\tlog.Debug().\n\t\t\t\t\t\tCaller().\n\t\t\t\t\t\tUint64(zf.NodeID, id.Uint64()).\n\t\t\t\t\t\tStrs(zf.OldAnnouncedRoutes, util.PrefixesToString(currentNode.AnnouncedRoutes())).\n\t\t\t\t\t\tStrs(zf.NewAnnouncedRoutes, util.PrefixesToString(hi.RoutableIPs)).\n\t\t\t\t\t\tStrs(zf.ApprovedRoutes, util.PrefixesToString(currentNode.ApprovedRoutes)).\n\t\t\t\t\t\tBool(zf.RouteChanged, routeChange).\n\t\t\t\t\t\tMsg(\"announced routes changed but approved routes did not\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tcurrentNode.ApplyPeerChange(&peerChange)\n\n\t\tif hostinfoChanged {\n\t\t\t// The node might not set NetInfo if it has not changed and if\n\t\t\t// the full HostInfo object is overwritten, the information is lost.\n\t\t\t// If there is no NetInfo, keep the previous one.\n\t\t\t// From 1.66 the client only sends it if changed:\n\t\t\t// https://github.com/tailscale/tailscale/commit/e1011f138737286ecf5123ff887a7a5800d129a2\n\t\t\t// TODO(kradalby): evaluate if we need better comparing of hostinfo\n\t\t\t// before we take the changes.\n\t\t\t// NetInfo preservation has already been handled above before early return check\n\t\t\tcurrentNode.Hostinfo = req.Hostinfo\n\t\t\tcurrentNode.ApplyHostnameFromHostInfo(req.Hostinfo)\n\n\t\t\tif routeChange {\n\t\t\t\t// Apply pre-calculated route approval\n\t\t\t\t// Always apply the route approval result to ensure consistency,\n\t\t\t\t// regardless of whether the policy evaluation detected changes.\n\t\t\t\t// This fixes the bug where routes weren't properly cleared when\n\t\t\t\t// auto-approvers were removed from the policy.\n\t\t\t\tlog.Info().\n\t\t\t\t\tUint64(zf.NodeID, id.Uint64()).\n\t\t\t\t\tStrs(zf.OldApprovedRoutes, util.PrefixesToString(currentNode.ApprovedRoutes)).\n\t\t\t\t\tStrs(zf.NewApprovedRoutes, util.PrefixesToString(autoApprovedRoutes)).\n\t\t\t\t\tBool(zf.RouteChanged, routeChange).\n\t\t\t\t\tMsg(\"applying route approval results\")\n\t\t\t}\n\t\t}\n\t})\n\n\tif !ok {\n\t\treturn change.Change{}, fmt.Errorf(\"%w: %d\", ErrNodeNotInNodeStore, id)\n\t}\n\n\tif routeChange {\n\t\tlog.Debug().\n\t\t\tUint64(zf.NodeID, id.Uint64()).\n\t\t\tStrs(zf.AutoApprovedRoutes, util.PrefixesToString(autoApprovedRoutes)).\n\t\t\tMsg(\"Persisting auto-approved routes from MapRequest\")\n\n\t\t// SetApprovedRoutes will update both database and PrimaryRoutes table\n\t\t_, c, err := s.SetApprovedRoutes(id, autoApprovedRoutes)\n\t\tif err != nil {\n\t\t\treturn change.Change{}, fmt.Errorf(\"persisting auto-approved routes: %w\", err)\n\t\t}\n\n\t\t// If SetApprovedRoutes resulted in a policy change, return it\n\t\tif !c.IsEmpty() {\n\t\t\treturn c, nil\n\t\t}\n\t} // Continue with the rest of the processing using the updated node\n\n\t// Handle route changes after NodeStore update.\n\t// Update routes if announced routes changed (even if approved routes stayed the same)\n\t// because SubnetRoutes is the intersection of announced AND approved routes.\n\tnodeRouteChange := s.maybeUpdateNodeRoutes(id, updatedNode, hostinfoChanged, needsRouteApproval, routeChange, req.Hostinfo)\n\n\t_, policyChange, err := s.persistNodeToDB(updatedNode)\n\tif err != nil {\n\t\treturn change.Change{}, fmt.Errorf(\"saving to database: %w\", err)\n\t}\n\n\tif policyChange.IsFull() {\n\t\treturn policyChange, nil\n\t}\n\n\tif !nodeRouteChange.IsEmpty() {\n\t\treturn nodeRouteChange, nil\n\t}\n\n\t// Determine the most specific change type based on what actually changed.\n\t// This allows us to send lightweight patch updates instead of full map responses.\n\treturn buildMapRequestChangeResponse(id, updatedNode, hostinfoChanged, endpointChanged, derpChanged)\n}\n\n// buildMapRequestChangeResponse determines the appropriate response type for a MapRequest update.\n// Hostinfo changes require a full update, while endpoint/DERP changes can use lightweight patches.\nfunc buildMapRequestChangeResponse(\n\tid types.NodeID,\n\tnode types.NodeView,\n\thostinfoChanged, endpointChanged, derpChanged bool,\n) (change.Change, error) {\n\t// Hostinfo changes require NodeAdded (full update) as they may affect many fields.\n\tif hostinfoChanged {\n\t\treturn change.NodeAdded(id), nil\n\t}\n\n\t// Return specific change types for endpoint and/or DERP updates.\n\tif endpointChanged || derpChanged {\n\t\tpatch := &tailcfg.PeerChange{NodeID: id.NodeID()}\n\n\t\tif endpointChanged {\n\t\t\tpatch.Endpoints = node.Endpoints().AsSlice()\n\t\t}\n\n\t\tif derpChanged {\n\t\t\tif hi := node.Hostinfo(); hi.Valid() {\n\t\t\t\tif ni := hi.NetInfo(); ni.Valid() {\n\t\t\t\t\tpatch.DERPRegion = ni.PreferredDERP()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn change.EndpointOrDERPUpdate(id, patch), nil\n\t}\n\n\treturn change.NodeAdded(id), nil\n}\n\nfunc hostinfoEqual(oldNode types.NodeView, newHI *tailcfg.Hostinfo) bool {\n\tif !oldNode.Valid() && newHI == nil {\n\t\treturn true\n\t}\n\n\tif !oldNode.Valid() || newHI == nil {\n\t\treturn false\n\t}\n\n\told := oldNode.AsStruct().Hostinfo\n\n\treturn old.Equal(newHI)\n}\n\nfunc routesChanged(oldNode types.NodeView, newHI *tailcfg.Hostinfo) bool {\n\tvar oldRoutes []netip.Prefix\n\tif oldNode.Valid() && oldNode.AsStruct().Hostinfo != nil {\n\t\toldRoutes = oldNode.AsStruct().Hostinfo.RoutableIPs\n\t}\n\n\tnewRoutes := newHI.RoutableIPs\n\tif newRoutes == nil {\n\t\tnewRoutes = []netip.Prefix{}\n\t}\n\n\tslices.SortFunc(oldRoutes, netip.Prefix.Compare)\n\tslices.SortFunc(newRoutes, netip.Prefix.Compare)\n\n\treturn !slices.Equal(oldRoutes, newRoutes)\n}\n\nfunc peerChangeEmpty(peerChange tailcfg.PeerChange) bool {\n\treturn peerChange.Key == nil &&\n\t\tpeerChange.DiscoKey == nil &&\n\t\tpeerChange.Online == nil &&\n\t\tpeerChange.Endpoints == nil &&\n\t\tpeerChange.DERPRegion == 0 &&\n\t\tpeerChange.LastSeen == nil &&\n\t\tpeerChange.KeyExpiry == nil\n}\n\n// maybeUpdateNodeRoutes updates node routes if announced routes changed but approved routes didn't.\n// This is needed because SubnetRoutes is the intersection of announced AND approved routes.\nfunc (s *State) maybeUpdateNodeRoutes(\n\tid types.NodeID,\n\tnode types.NodeView,\n\thostinfoChanged, needsRouteApproval, routeChange bool,\n\thostinfo *tailcfg.Hostinfo,\n) change.Change {\n\t// Only update if announced routes changed without approval change\n\tif !hostinfoChanged || !needsRouteApproval || routeChange || hostinfo == nil {\n\t\treturn change.Change{}\n\t}\n\n\tlog.Debug().\n\t\tCaller().\n\t\tUint64(zf.NodeID, id.Uint64()).\n\t\tMsg(\"updating routes because announced routes changed but approved routes did not\")\n\n\t// SetNodeRoutes sets the active/distributed routes using AllApprovedRoutes()\n\t// which returns only the intersection of announced AND approved routes.\n\tlog.Debug().\n\t\tCaller().\n\t\tUint64(zf.NodeID, id.Uint64()).\n\t\tStrs(zf.RoutesAnnounced, util.PrefixesToString(node.AnnouncedRoutes())).\n\t\tStrs(zf.ApprovedRoutes, util.PrefixesToString(node.ApprovedRoutes().AsSlice())).\n\t\tStrs(zf.AllApprovedRoutes, util.PrefixesToString(node.AllApprovedRoutes())).\n\t\tMsg(\"updating node routes for distribution\")\n\n\treturn s.SetNodeRoutes(id, node.AllApprovedRoutes()...)\n}\n"
  },
  {
    "path": "hscontrol/state/tags.go",
    "content": "package state\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/rs/zerolog/log\"\n)\n\nvar (\n\t// ErrNodeMarkedTaggedButHasNoTags is returned when a node is marked as tagged but has no tags.\n\tErrNodeMarkedTaggedButHasNoTags = errors.New(\"node marked as tagged but has no tags\")\n\n\t// ErrNodeHasNeitherUserNorTags is returned when a node has neither a user nor tags.\n\tErrNodeHasNeitherUserNorTags = errors.New(\"node has neither user nor tags - must be owned by user or tagged\")\n\n\t// ErrRequestedTagsInvalidOrNotPermitted is returned when requested tags are invalid or not permitted.\n\t// This message format matches Tailscale SaaS: \"requested tags [tag:xxx] are invalid or not permitted\".\n\tErrRequestedTagsInvalidOrNotPermitted = errors.New(\"requested tags\")\n)\n\n// ErrTaggedNodeHasUser is returned when a tagged node has a UserID set.\nvar ErrTaggedNodeHasUser = errors.New(\"tagged node must not have user_id set\")\n\n// validateNodeOwnership ensures proper node ownership model.\n// A node must be either user-owned or tagged, and these are mutually exclusive:\n// tagged nodes must not have a UserID, and user-owned nodes must not have tags.\nfunc validateNodeOwnership(node *types.Node) error {\n\tif node.IsTagged() {\n\t\tif len(node.Tags) == 0 {\n\t\t\treturn fmt.Errorf(\"%w: %q\", ErrNodeMarkedTaggedButHasNoTags, node.Hostname)\n\t\t}\n\n\t\tif node.UserID != nil {\n\t\t\treturn fmt.Errorf(\"%w: %q\", ErrTaggedNodeHasUser, node.Hostname)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\t// User-owned nodes must have a UserID.\n\tif node.UserID == nil {\n\t\treturn fmt.Errorf(\"%w: %q\", ErrNodeHasNeitherUserNorTags, node.Hostname)\n\t}\n\n\treturn nil\n}\n\n// logTagOperation logs tag assignment operations for audit purposes.\nfunc logTagOperation(existingNode types.NodeView, newTags []string) {\n\tif existingNode.IsTagged() {\n\t\tlog.Info().\n\t\t\tEmbedObject(existingNode).\n\t\t\tStrs(\"old.tags\", existingNode.Tags().AsSlice()).\n\t\t\tStrs(\"new.tags\", newTags).\n\t\t\tMsg(\"Updating tags on already-tagged node\")\n\t} else {\n\t\tvar userID uint\n\t\tif existingNode.UserID().Valid() {\n\t\t\tuserID = existingNode.UserID().Get()\n\t\t}\n\n\t\tlog.Info().\n\t\t\tEmbedObject(existingNode).\n\t\t\tUint(\"previous.user\", userID).\n\t\t\tStrs(\"new.tags\", newTags).\n\t\t\tMsg(\"Converting user-owned node to tagged node\")\n\t}\n}\n"
  },
  {
    "path": "hscontrol/state/test_helpers.go",
    "content": "package state\n\nimport (\n\t\"time\"\n)\n\n// Test configuration for NodeStore batching.\n// These values are optimized for test speed rather than production use.\nconst (\n\tTestBatchSize    = 5\n\tTestBatchTimeout = 5 * time.Millisecond\n)\n"
  },
  {
    "path": "hscontrol/tailsql.go",
    "content": "package hscontrol\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"os\"\n\n\t\"github.com/tailscale/tailsql/server/tailsql\"\n\t\"tailscale.com/tsnet\"\n\t\"tailscale.com/tsweb\"\n\t\"tailscale.com/types/logger\"\n)\n\n// ErrNoCertDomains is returned when no cert domains are available for HTTPS.\nvar ErrNoCertDomains = errors.New(\"no cert domains available for HTTPS\")\n\nfunc runTailSQLService(ctx context.Context, logf logger.Logf, stateDir, dbPath string) error {\n\topts := tailsql.Options{\n\t\tHostname: \"tailsql-headscale\",\n\t\tStateDir: stateDir,\n\t\tSources: []tailsql.DBSpec{\n\t\t\t{\n\t\t\t\tSource: \"headscale\",\n\t\t\t\tLabel:  \"headscale - sqlite\",\n\t\t\t\tDriver: \"sqlite\",\n\t\t\t\tURL:    fmt.Sprintf(\"file:%s?mode=ro\", dbPath),\n\t\t\t\tNamed: map[string]string{\n\t\t\t\t\t\"schema\": `select * from sqlite_schema`,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\ttsNode := &tsnet.Server{\n\t\tDir:      os.ExpandEnv(opts.StateDir),\n\t\tHostname: opts.Hostname,\n\t\tLogf:     logger.Discard,\n\t}\n\t// if *doDebugLog {\n\t// \ttsNode.Logf = logf\n\t// }\n\tdefer tsNode.Close()\n\n\tlogf(\"Starting tailscale (hostname=%q)\", opts.Hostname)\n\n\tlc, err := tsNode.LocalClient()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"connect local client: %w\", err)\n\t}\n\n\topts.LocalClient = lc // for authentication\n\n\t// Make sure the Tailscale node starts up. It might not, if it is a new node\n\t// and the user did not provide an auth key.\n\tif st, err := tsNode.Up(ctx); err != nil { //nolint:noinlineerr\n\t\treturn fmt.Errorf(\"starting tailscale: %w\", err)\n\t} else {\n\t\tlogf(\"tailscale started, node state %q\", st.BackendState)\n\t}\n\n\t// Reaching here, we have a running Tailscale node, now we can set up the\n\t// HTTP and/or HTTPS plumbing for TailSQL itself.\n\ttsql, err := tailsql.NewServer(opts)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating tailsql server: %w\", err)\n\t}\n\n\tlst, err := tsNode.Listen(\"tcp\", \":80\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"listen port 80: %w\", err)\n\t}\n\n\tif opts.ServeHTTPS {\n\t\t// When serving TLS, add a redirect from HTTP on port 80 to HTTPS on 443.\n\t\tcertDomains := tsNode.CertDomains()\n\t\tif len(certDomains) == 0 {\n\t\t\treturn ErrNoCertDomains\n\t\t}\n\n\t\tbase := \"https://\" + certDomains[0]\n\n\t\tgo func() {\n\t\t\t_ = http.Serve(lst, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { //nolint:gosec\n\t\t\t\ttarget := base + r.RequestURI\n\t\t\t\thttp.Redirect(w, r, target, http.StatusPermanentRedirect)\n\t\t\t}))\n\t\t}()\n\t\t// log.Printf(\"Redirecting HTTP to HTTPS at %q\", base)\n\n\t\t// For the real service, start a separate listener.\n\t\t// Note: Replaces the port 80 listener.\n\t\tvar err error\n\n\t\tlst, err = tsNode.ListenTLS(\"tcp\", \":443\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"listen TLS: %w\", err)\n\t\t}\n\n\t\tlogf(\"enabled serving via HTTPS\")\n\t}\n\n\tmux := tsql.NewMux()\n\ttsweb.Debugger(mux)\n\n\tgo func() {\n\t\t_ = http.Serve(lst, mux) //nolint:gosec\n\t}()\n\n\tlogf(\"TailSQL started\")\n\t<-ctx.Done()\n\tlogf(\"TailSQL shutting down...\")\n\n\treturn tsNode.Close()\n}\n"
  },
  {
    "path": "hscontrol/templates/apple.go",
    "content": "package templates\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/chasefleming/elem-go\"\n\t\"github.com/chasefleming/elem-go/attrs\"\n\t\"github.com/chasefleming/elem-go/styles\"\n)\n\nfunc Apple(url string) *elem.Element {\n\treturn HtmlStructure(\n\t\telem.Title(nil,\n\t\t\telem.Text(\"headscale - Apple\")),\n\t\tmdTypesetBody(\n\t\t\theadscaleLogo(),\n\t\t\tH1(elem.Text(\"iOS configuration\")),\n\t\t\tH2(elem.Text(\"GUI\")),\n\t\t\tOl(\n\t\t\t\telem.Li(\n\t\t\t\t\tnil,\n\t\t\t\t\telem.Text(\"Install the official Tailscale iOS client from the \"),\n\t\t\t\t\texternalLink(\"https://apps.apple.com/app/tailscale/id1470499037\", \"App Store\"),\n\t\t\t\t),\n\t\t\t\telem.Li(\n\t\t\t\t\tnil,\n\t\t\t\t\telem.Text(\"Open the \"),\n\t\t\t\t\telem.Strong(nil, elem.Text(\"Tailscale\")),\n\t\t\t\t\telem.Text(\" app\"),\n\t\t\t\t),\n\t\t\t\telem.Li(\n\t\t\t\t\tnil,\n\t\t\t\t\telem.Text(\"Click the account icon in the top-right corner and select \"),\n\t\t\t\t\telem.Strong(nil, elem.Text(\"Log in…\")),\n\t\t\t\t),\n\t\t\t\telem.Li(\n\t\t\t\t\tnil,\n\t\t\t\t\telem.Text(\"Tap the top-right options menu button and select \"),\n\t\t\t\t\telem.Strong(nil, elem.Text(\"Use custom coordination server\")),\n\t\t\t\t),\n\t\t\t\telem.Li(\n\t\t\t\t\tnil,\n\t\t\t\t\telem.Text(\"Enter your instance URL: \"),\n\t\t\t\t\tCode(elem.Text(url)),\n\t\t\t\t),\n\t\t\t\telem.Li(\n\t\t\t\t\tnil,\n\t\t\t\t\telem.Text(\n\t\t\t\t\t\t\"Enter your credentials and log in. Headscale should now be working on your iOS device\",\n\t\t\t\t\t),\n\t\t\t\t),\n\t\t\t),\n\t\t\tH1(elem.Text(\"macOS configuration\")),\n\t\t\tH2(elem.Text(\"Command line\")),\n\t\t\tP(\n\t\t\t\telem.Text(\"Use Tailscale's login command to add your profile:\"),\n\t\t\t),\n\t\t\tPre(PreCode(\"tailscale login --login-server \"+url)),\n\t\t\tH2(elem.Text(\"GUI\")),\n\t\t\tOl(\n\t\t\t\telem.Li(\n\t\t\t\t\tnil,\n\t\t\t\t\telem.Text(\"Option + Click the \"),\n\t\t\t\t\telem.Strong(nil, elem.Text(\"Tailscale\")),\n\t\t\t\t\telem.Text(\" icon in the menu and hover over the \"),\n\t\t\t\t\telem.Strong(nil, elem.Text(\"Debug\")),\n\t\t\t\t\telem.Text(\" menu\"),\n\t\t\t\t),\n\t\t\t\telem.Li(nil,\n\t\t\t\t\telem.Text(\"Under \"),\n\t\t\t\t\telem.Strong(nil, elem.Text(\"Custom Login Server\")),\n\t\t\t\t\telem.Text(\", select \"),\n\t\t\t\t\telem.Strong(nil, elem.Text(\"Add Account...\")),\n\t\t\t\t),\n\t\t\t\telem.Li(\n\t\t\t\t\tnil,\n\t\t\t\t\telem.Text(\"Enter \"),\n\t\t\t\t\tCode(elem.Text(url)),\n\t\t\t\t\telem.Text(\" of the headscale instance and press \"),\n\t\t\t\t\telem.Strong(nil, elem.Text(\"Add Account\")),\n\t\t\t\t),\n\t\t\t\telem.Li(nil,\n\t\t\t\t\telem.Text(\"Follow the login procedure in the browser\"),\n\t\t\t\t),\n\t\t\t),\n\t\t\tH2(elem.Text(\"Profiles\")),\n\t\t\tP(\n\t\t\t\telem.Text(\n\t\t\t\t\t\"Headscale can be set to the default server by installing a Headscale configuration profile:\",\n\t\t\t\t),\n\t\t\t),\n\t\t\telem.Div(attrs.Props{attrs.Style: styles.Props{styles.MarginTop: spaceL, styles.MarginBottom: spaceL}.ToInline()},\n\t\t\t\tdownloadButton(\"/apple/macos-app-store\", \"macOS AppStore profile\"),\n\t\t\t\tdownloadButton(\"/apple/macos-standalone\", \"macOS Standalone profile\"),\n\t\t\t),\n\t\t\tOl(\n\t\t\t\telem.Li(\n\t\t\t\t\tnil,\n\t\t\t\t\telem.Text(\n\t\t\t\t\t\t\"Download the profile, then open it. When it has been opened, there should be a notification that a profile can be installed\",\n\t\t\t\t\t),\n\t\t\t\t),\n\t\t\t\telem.Li(nil,\n\t\t\t\t\telem.Text(\"Open \"),\n\t\t\t\t\telem.Strong(nil, elem.Text(\"System Preferences\")),\n\t\t\t\t\telem.Text(\" and go to \"),\n\t\t\t\t\telem.Strong(nil, elem.Text(\"Profiles\")),\n\t\t\t\t),\n\t\t\t\telem.Li(nil,\n\t\t\t\t\telem.Text(\"Find and install the \"),\n\t\t\t\t\telem.Strong(nil, elem.Text(\"Headscale\")),\n\t\t\t\t\telem.Text(\" profile\"),\n\t\t\t\t),\n\t\t\t\telem.Li(nil,\n\t\t\t\t\telem.Text(\"Restart \"),\n\t\t\t\t\telem.Strong(nil, elem.Text(\"Tailscale.app\")),\n\t\t\t\t\telem.Text(\" and log in\"),\n\t\t\t\t),\n\t\t\t),\n\t\t\torDivider(),\n\t\t\tP(\n\t\t\t\telem.Text(\n\t\t\t\t\t\"Use your terminal to configure the default setting for Tailscale by issuing one of the following commands:\",\n\t\t\t\t),\n\t\t\t),\n\t\t\tP(elem.Text(\"For app store client:\")),\n\t\t\tPre(PreCode(\"defaults write io.tailscale.ipn.macos ControlURL \"+url)),\n\t\t\tP(elem.Text(\"For standalone client:\")),\n\t\t\tPre(PreCode(\"defaults write io.tailscale.ipn.macsys ControlURL \"+url)),\n\t\t\tP(\n\t\t\t\telem.Text(\"Restart \"),\n\t\t\t\telem.Strong(nil, elem.Text(\"Tailscale.app\")),\n\t\t\t\telem.Text(\" and log in.\"),\n\t\t\t),\n\t\t\twarningBox(\"Caution\", \"You should always download and inspect the profile before installing it.\"),\n\t\t\tP(elem.Text(\"For app store client:\")),\n\t\t\tPre(PreCode(fmt.Sprintf(`curl %s/apple/macos-app-store`, url))),\n\t\t\tP(elem.Text(\"For standalone client:\")),\n\t\t\tPre(PreCode(fmt.Sprintf(`curl %s/apple/macos-standalone`, url))),\n\t\t\tH1(elem.Text(\"tvOS configuration\")),\n\t\t\tH2(elem.Text(\"GUI\")),\n\t\t\tOl(\n\t\t\t\telem.Li(\n\t\t\t\t\tnil,\n\t\t\t\t\telem.Text(\"Install the official Tailscale tvOS client from the \"),\n\t\t\t\t\texternalLink(\"https://apps.apple.com/app/tailscale/id1470499037\", \"App Store\"),\n\t\t\t\t),\n\t\t\t\telem.Li(\n\t\t\t\t\tnil,\n\t\t\t\t\telem.Text(\"Open \"),\n\t\t\t\t\telem.Strong(nil, elem.Text(\"Settings\")),\n\t\t\t\t\telem.Text(\" (the Apple tvOS settings) > \"),\n\t\t\t\t\telem.Strong(nil, elem.Text(\"Apps\")),\n\t\t\t\t\telem.Text(\" > \"),\n\t\t\t\t\telem.Strong(nil, elem.Text(\"Tailscale\")),\n\t\t\t\t),\n\t\t\t\telem.Li(\n\t\t\t\t\tnil,\n\t\t\t\t\telem.Text(\"Enter \"),\n\t\t\t\t\tCode(elem.Text(url)),\n\t\t\t\t\telem.Text(\" under \"),\n\t\t\t\t\telem.Strong(nil, elem.Text(\"ALTERNATE COORDINATION SERVER URL\")),\n\t\t\t\t),\n\t\t\t\telem.Li(nil,\n\t\t\t\t\telem.Text(\"Return to the tvOS \"),\n\t\t\t\t\telem.Strong(nil, elem.Text(\"Home\")),\n\t\t\t\t\telem.Text(\" screen\"),\n\t\t\t\t),\n\t\t\t\telem.Li(nil,\n\t\t\t\t\telem.Text(\"Open \"),\n\t\t\t\t\telem.Strong(nil, elem.Text(\"Tailscale\")),\n\t\t\t\t),\n\t\t\t\telem.Li(nil,\n\t\t\t\t\telem.Text(\"Select \"),\n\t\t\t\t\telem.Strong(nil, elem.Text(\"Install VPN configuration\")),\n\t\t\t\t),\n\t\t\t\telem.Li(nil,\n\t\t\t\t\telem.Text(\"Select \"),\n\t\t\t\t\telem.Strong(nil, elem.Text(\"Allow\")),\n\t\t\t\t),\n\t\t\t\telem.Li(nil,\n\t\t\t\t\telem.Text(\"Scan the QR code and follow the login procedure\"),\n\t\t\t\t),\n\t\t\t\telem.Li(nil,\n\t\t\t\t\telem.Text(\"Headscale should now be working on your tvOS device\"),\n\t\t\t\t),\n\t\t\t),\n\t\t\tpageFooter(),\n\t\t),\n\t)\n}\n"
  },
  {
    "path": "hscontrol/templates/auth_success.go",
    "content": "package templates\n\nimport (\n\t\"github.com/chasefleming/elem-go\"\n)\n\n// AuthSuccessResult contains the text content for an authentication success page.\n// Each field controls a distinct piece of user-facing text so that every auth\n// flow (node registration, reauthentication, SSH check, …) can clearly\n// communicate what just happened.\ntype AuthSuccessResult struct {\n\t// Title is the browser tab / page title,\n\t// e.g. \"Headscale - Node Registered\".\n\tTitle string\n\n\t// Heading is the bold green text inside the success box,\n\t// e.g. \"Node registered\".\n\tHeading string\n\n\t// Verb is the action prefix in the body text before \"as <user>\",\n\t// e.g. \"Registered\", \"Reauthenticated\", \"Authorized\".\n\tVerb string\n\n\t// User is the display name shown in bold in the body text,\n\t// e.g. \"user@example.com\".\n\tUser string\n\n\t// Message is the follow-up instruction shown after the user name,\n\t// e.g. \"You can now close this window.\"\n\tMessage string\n}\n\n// AuthSuccess renders an authentication / authorisation success page.\n// The caller controls every user-visible string via [AuthSuccessResult] so the\n// page clearly describes what succeeded (registration, reauth, SSH check, …).\nfunc AuthSuccess(result AuthSuccessResult) *elem.Element {\n\tbox := successBox(\n\t\tresult.Heading,\n\t\telem.Text(result.Verb+\" as \"),\n\t\telem.Strong(nil, elem.Text(result.User)),\n\t\telem.Text(\". \"+result.Message),\n\t)\n\n\treturn HtmlStructure(\n\t\telem.Title(nil, elem.Text(result.Title)),\n\t\tmdTypesetBody(\n\t\t\theadscaleLogo(),\n\t\t\tbox,\n\t\t\tH2(elem.Text(\"Getting started\")),\n\t\t\tP(elem.Text(\"Check out the documentation to learn more about headscale and Tailscale:\")),\n\t\t\tUl(\n\t\t\t\telem.Li(nil,\n\t\t\t\t\texternalLink(\"https://headscale.net/stable/\", \"Headscale documentation\"),\n\t\t\t\t),\n\t\t\t\telem.Li(nil,\n\t\t\t\t\texternalLink(\"https://tailscale.com/kb/\", \"Tailscale knowledge base\"),\n\t\t\t\t),\n\t\t\t),\n\t\t\tpageFooter(),\n\t\t),\n\t)\n}\n"
  },
  {
    "path": "hscontrol/templates/auth_web.go",
    "content": "package templates\n\nimport (\n\t\"github.com/chasefleming/elem-go\"\n)\n\n// AuthWeb renders a page that instructs an administrator to run a CLI command\n// to complete an authentication or registration flow.\n// It is used by both the registration and auth-approve web handlers.\nfunc AuthWeb(title, description, command string) *elem.Element {\n\treturn HtmlStructure(\n\t\telem.Title(nil, elem.Text(title+\" - Headscale\")),\n\t\tmdTypesetBody(\n\t\t\theadscaleLogo(),\n\t\t\tH1(elem.Text(title)),\n\t\t\tP(elem.Text(description)),\n\t\t\tPre(PreCode(command)),\n\t\t\tpageFooter(),\n\t\t),\n\t)\n}\n"
  },
  {
    "path": "hscontrol/templates/design.go",
    "content": "package templates\n\nimport (\n\telem \"github.com/chasefleming/elem-go\"\n\t\"github.com/chasefleming/elem-go/attrs\"\n\t\"github.com/chasefleming/elem-go/styles\"\n)\n\n// Design System Constants\n// These constants define the visual language for all Headscale HTML templates.\n// They ensure consistency across all pages and make it easy to maintain and update the design.\n\n// Color System\n// EXTRACTED FROM: https://headscale.net/stable/assets/stylesheets/main.342714a4.min.css\n// Material for MkDocs design system - exact values from official docs.\nconst (\n\t// Text colors - from --md-default-fg-color CSS variables.\n\tcolorTextPrimary   = \"#000000de\" //nolint:unused // rgba(0,0,0,0.87) - Body text\n\tcolorTextSecondary = \"#0000008a\" //nolint:unused // rgba(0,0,0,0.54) - Headings (--md-default-fg-color--light)\n\tcolorTextTertiary  = \"#00000052\" //nolint:unused // rgba(0,0,0,0.32) - Lighter text\n\tcolorTextLightest  = \"#00000012\" //nolint:unused // rgba(0,0,0,0.07) - Lightest text\n\n\t// Code colors - from --md-code-* CSS variables.\n\tcolorCodeFg = \"#36464e\" //nolint:unused // Code text color (--md-code-fg-color)\n\tcolorCodeBg = \"#f5f5f5\" //nolint:unused // Code background (--md-code-bg-color)\n\n\t// Border colors.\n\tcolorBorderLight  = \"#e5e7eb\" //nolint:unused // Light borders\n\tcolorBorderMedium = \"#d1d5db\" //nolint:unused // Medium borders\n\n\t// Background colors.\n\tcolorBackgroundPage = \"#ffffff\" //nolint:unused // Page background\n\tcolorBackgroundCard = \"#ffffff\" //nolint:unused // Card/content background\n\n\t// Accent colors - from --md-primary/accent-fg-color.\n\tcolorPrimaryAccent = \"#4051b5\" //nolint:unused // Primary accent (links)\n\tcolorAccent        = \"#526cfe\" //nolint:unused // Secondary accent\n\n\t// Success colors.\n\tcolorSuccess      = \"#059669\" //nolint:unused // Success states\n\tcolorSuccessLight = \"#d1fae5\" //nolint:unused // Success backgrounds\n)\n\n// Spacing System\n// Based on 4px/8px base unit for consistent rhythm.\n// Uses rem units for scalability with user font size preferences.\nconst (\n\tspaceXS  = \"0.25rem\" //nolint:unused // 4px - Tight spacing\n\tspaceS   = \"0.5rem\"  //nolint:unused // 8px - Small spacing\n\tspaceM   = \"1rem\"    //nolint:unused // 16px - Medium spacing (base)\n\tspaceL   = \"1.5rem\"  //nolint:unused // 24px - Large spacing\n\tspaceXL  = \"2rem\"    //nolint:unused // 32px - Extra large spacing\n\tspace2XL = \"3rem\"    //nolint:unused // 48px - 2x extra large spacing\n\tspace3XL = \"4rem\"    //nolint:unused // 64px - 3x extra large spacing\n)\n\n// Typography System\n// EXTRACTED FROM: https://headscale.net/stable/assets/stylesheets/main.342714a4.min.css\n// Material for MkDocs typography - exact values from .md-typeset CSS.\nconst (\n\t// Font families - from CSS custom properties.\n\tfontFamilySystem = `\"Roboto\", -apple-system, BlinkMacSystemFont, \"Segoe UI\", \"Helvetica Neue\", Arial, sans-serif` //nolint:unused\n\tfontFamilyCode   = `\"Roboto Mono\", \"SF Mono\", Monaco, \"Cascadia Code\", Consolas, \"Courier New\", monospace`        //nolint:unused\n\n\t// Font sizes - from .md-typeset CSS rules.\n\tfontSizeBase  = \"0.8rem\"   //nolint:unused // 12.8px - Base text (.md-typeset)\n\tfontSizeH1    = \"2em\"      //nolint:unused // 2x base - Main headings\n\tfontSizeH2    = \"1.5625em\" //nolint:unused // 1.5625x base - Section headings\n\tfontSizeH3    = \"1.25em\"   //nolint:unused // 1.25x base - Subsection headings\n\tfontSizeSmall = \"0.8em\"    //nolint:unused // 0.8x base - Small text\n\tfontSizeCode  = \"0.85em\"   //nolint:unused // 0.85x base - Inline code\n\n\t// Line heights - from .md-typeset CSS rules.\n\tlineHeightBase = \"1.6\" //nolint:unused // Body text (.md-typeset)\n\tlineHeightH1   = \"1.3\" //nolint:unused // H1 headings\n\tlineHeightH2   = \"1.4\" //nolint:unused // H2 headings\n\tlineHeightH3   = \"1.5\" //nolint:unused // H3 headings\n\tlineHeightCode = \"1.4\" //nolint:unused // Code blocks (pre)\n)\n\n// Responsive Container Component\n// Creates a centered container with responsive padding and max-width.\n// Mobile-first approach: starts at 100% width with padding, constrains on larger screens.\n//\n//nolint:unused // Reserved for future use in Phase 4.\nfunc responsiveContainer(children ...elem.Node) *elem.Element {\n\treturn elem.Div(attrs.Props{\n\t\tattrs.Style: styles.Props{\n\t\t\tstyles.Width:    \"100%\",\n\t\t\tstyles.MaxWidth: \"min(800px, 90vw)\",         // Responsive: 90% of viewport or 800px max\n\t\t\tstyles.Margin:   \"0 auto\",                   // Center horizontally\n\t\t\tstyles.Padding:  \"clamp(1rem, 5vw, 2.5rem)\", // Fluid padding: 16px to 40px\n\t\t}.ToInline(),\n\t}, children...)\n}\n\n// Card Component\n// Reusable card for grouping related content with visual separation.\n// Parameters:\n//   - title: Optional title for the card (empty string for no title)\n//   - children: Content elements to display in the card\n//\n//nolint:unused // Reserved for future use in Phase 4.\nfunc card(title string, children ...elem.Node) *elem.Element {\n\tcardContent := children\n\tif title != \"\" {\n\t\t// Prepend title as H3 if provided\n\t\tcardContent = append([]elem.Node{\n\t\t\telem.H3(attrs.Props{\n\t\t\t\tattrs.Style: styles.Props{\n\t\t\t\t\tstyles.MarginTop:    \"0\",\n\t\t\t\t\tstyles.MarginBottom: spaceM,\n\t\t\t\t\tstyles.FontSize:     fontSizeH3,\n\t\t\t\t\tstyles.LineHeight:   lineHeightH3, // 1.5 - H3 line height\n\t\t\t\t\tstyles.Color:        colorTextSecondary,\n\t\t\t\t}.ToInline(),\n\t\t\t}, elem.Text(title)),\n\t\t}, children...)\n\t}\n\n\treturn elem.Div(attrs.Props{\n\t\tattrs.Style: styles.Props{\n\t\t\tstyles.Background:   colorBackgroundCard,\n\t\t\tstyles.Border:       \"1px solid \" + colorBorderLight,\n\t\t\tstyles.BorderRadius: \"0.5rem\",                   // 8px rounded corners\n\t\t\tstyles.Padding:      \"clamp(1rem, 3vw, 1.5rem)\", // Responsive padding\n\t\t\tstyles.MarginBottom: spaceL,\n\t\t\tstyles.BoxShadow:    \"0 1px 3px rgba(0,0,0,0.1)\", // Subtle shadow\n\t\t}.ToInline(),\n\t}, cardContent...)\n}\n\n// Code Block Component\n// EXTRACTED FROM: .md-typeset pre CSS rules\n// Exact styling from Material for MkDocs documentation.\n//\n//nolint:unused // Used across apple.go, windows.go, register_web.go templates.\nfunc codeBlock(code string) *elem.Element {\n\treturn elem.Pre(attrs.Props{\n\t\tattrs.Style: styles.Props{\n\t\t\tstyles.Display:         \"block\",\n\t\t\tstyles.Padding:         \"0.77em 1.18em\", // From .md-typeset pre\n\t\t\tstyles.Border:          \"none\",          // No border in original\n\t\t\tstyles.BorderRadius:    \"0.1rem\",        // From .md-typeset code\n\t\t\tstyles.BackgroundColor: colorCodeBg,     // #f5f5f5\n\t\t\tstyles.FontFamily:      fontFamilyCode,  // Roboto Mono\n\t\t\tstyles.FontSize:        fontSizeCode,    // 0.85em\n\t\t\tstyles.LineHeight:      lineHeightCode,  // 1.4\n\t\t\tstyles.OverflowX:       \"auto\",          // Horizontal scroll\n\t\t\t\"overflow-wrap\":        \"break-word\",    // Word wrapping\n\t\t\t\"word-wrap\":            \"break-word\",    // Legacy support\n\t\t\tstyles.WhiteSpace:      \"pre-wrap\",      // Preserve whitespace\n\t\t\tstyles.MarginTop:       spaceM,          // 1em\n\t\t\tstyles.MarginBottom:    spaceM,          // 1em\n\t\t\tstyles.Color:           colorCodeFg,     // #36464e\n\t\t\tstyles.BoxShadow:       \"none\",          // No shadow in original\n\t\t}.ToInline(),\n\t},\n\t\telem.Code(nil, elem.Text(code)),\n\t)\n}\n\n// Base Typeset Styles\n// Returns inline styles for the main content container that matches .md-typeset.\n// EXTRACTED FROM: .md-typeset CSS rule from Material for MkDocs.\n//\n//nolint:unused // Used in general.go for mdTypesetBody.\nfunc baseTypesetStyles() styles.Props {\n\treturn styles.Props{\n\t\tstyles.FontSize:   fontSizeBase,   // 0.8rem\n\t\tstyles.LineHeight: lineHeightBase, // 1.6\n\t\tstyles.Color:      colorTextPrimary,\n\t\tstyles.FontFamily: fontFamilySystem,\n\t\t\"overflow-wrap\":   \"break-word\",\n\t\tstyles.TextAlign:  \"left\",\n\t}\n}\n\n// H1 Styles\n// Returns inline styles for H1 headings that match .md-typeset h1.\n// EXTRACTED FROM: .md-typeset h1 CSS rule from Material for MkDocs.\n//\n//nolint:unused // Used across templates for main headings.\nfunc h1Styles() styles.Props {\n\treturn styles.Props{\n\t\tstyles.Color:      colorTextSecondary, // rgba(0, 0, 0, 0.54)\n\t\tstyles.FontSize:   fontSizeH1,         // 2em\n\t\tstyles.LineHeight: lineHeightH1,       // 1.3\n\t\tstyles.Margin:     \"0 0 1.25em\",\n\t\tstyles.FontWeight: \"300\",\n\t\t\"letter-spacing\":  \"-0.01em\",\n\t\tstyles.FontFamily: fontFamilySystem, // Roboto\n\t\t\"overflow-wrap\":   \"break-word\",\n\t}\n}\n\n// H2 Styles\n// Returns inline styles for H2 headings that match .md-typeset h2.\n// EXTRACTED FROM: .md-typeset h2 CSS rule from Material for MkDocs.\n//\n//nolint:unused // Used across templates for section headings.\nfunc h2Styles() styles.Props {\n\treturn styles.Props{\n\t\tstyles.FontSize:   fontSizeH2,   // 1.5625em\n\t\tstyles.LineHeight: lineHeightH2, // 1.4\n\t\tstyles.Margin:     \"1.6em 0 0.64em\",\n\t\tstyles.FontWeight: \"300\",\n\t\t\"letter-spacing\":  \"-0.01em\",\n\t\tstyles.Color:      colorTextSecondary, // rgba(0, 0, 0, 0.54)\n\t\tstyles.FontFamily: fontFamilySystem,   // Roboto\n\t\t\"overflow-wrap\":   \"break-word\",\n\t}\n}\n\n// H3 Styles\n// Returns inline styles for H3 headings that match .md-typeset h3.\n// EXTRACTED FROM: .md-typeset h3 CSS rule from Material for MkDocs.\n//\n//nolint:unused // Used across templates for subsection headings.\nfunc h3Styles() styles.Props {\n\treturn styles.Props{\n\t\tstyles.FontSize:   fontSizeH3,   // 1.25em\n\t\tstyles.LineHeight: lineHeightH3, // 1.5\n\t\tstyles.Margin:     \"1.6em 0 0.8em\",\n\t\tstyles.FontWeight: \"400\",\n\t\t\"letter-spacing\":  \"-0.01em\",\n\t\tstyles.Color:      colorTextSecondary, // rgba(0, 0, 0, 0.54)\n\t\tstyles.FontFamily: fontFamilySystem,   // Roboto\n\t\t\"overflow-wrap\":   \"break-word\",\n\t}\n}\n\n// Paragraph Styles\n// Returns inline styles for paragraphs that match .md-typeset p.\n// EXTRACTED FROM: .md-typeset p CSS rule from Material for MkDocs.\n//\n//nolint:unused // Used for consistent paragraph spacing.\nfunc paragraphStyles() styles.Props {\n\treturn styles.Props{\n\t\tstyles.Margin:     \"1em 0\",\n\t\tstyles.FontFamily: fontFamilySystem, // Roboto\n\t\tstyles.FontSize:   fontSizeBase,     // 0.8rem - inherited from .md-typeset\n\t\tstyles.LineHeight: lineHeightBase,   // 1.6 - inherited from .md-typeset\n\t\tstyles.Color:      colorTextPrimary, // rgba(0, 0, 0, 0.87)\n\t\t\"overflow-wrap\":   \"break-word\",\n\t}\n}\n\n// Ordered List Styles\n// Returns inline styles for ordered lists that match .md-typeset ol.\n// EXTRACTED FROM: .md-typeset ol CSS rule from Material for MkDocs.\n//\n//nolint:unused // Used for numbered instruction lists.\nfunc orderedListStyles() styles.Props {\n\treturn styles.Props{\n\t\tstyles.MarginBottom: \"1em\",\n\t\tstyles.MarginTop:    \"1em\",\n\t\tstyles.PaddingLeft:  \"2em\",\n\t\tstyles.FontFamily:   fontFamilySystem, // Roboto - inherited from .md-typeset\n\t\tstyles.FontSize:     fontSizeBase,     // 0.8rem - inherited from .md-typeset\n\t\tstyles.LineHeight:   lineHeightBase,   // 1.6 - inherited from .md-typeset\n\t\tstyles.Color:        colorTextPrimary, // rgba(0, 0, 0, 0.87) - inherited from .md-typeset\n\t\t\"overflow-wrap\":     \"break-word\",\n\t}\n}\n\n// Unordered List Styles\n// Returns inline styles for unordered lists that match .md-typeset ul.\n// EXTRACTED FROM: .md-typeset ul CSS rule from Material for MkDocs.\n//\n//nolint:unused // Used for bullet point lists.\nfunc unorderedListStyles() styles.Props {\n\treturn styles.Props{\n\t\tstyles.MarginBottom: \"1em\",\n\t\tstyles.MarginTop:    \"1em\",\n\t\tstyles.PaddingLeft:  \"2em\",\n\t\tstyles.FontFamily:   fontFamilySystem, // Roboto - inherited from .md-typeset\n\t\tstyles.FontSize:     fontSizeBase,     // 0.8rem - inherited from .md-typeset\n\t\tstyles.LineHeight:   lineHeightBase,   // 1.6 - inherited from .md-typeset\n\t\tstyles.Color:        colorTextPrimary, // rgba(0, 0, 0, 0.87) - inherited from .md-typeset\n\t\t\"overflow-wrap\":     \"break-word\",\n\t}\n}\n\n// Link Styles\n// Returns inline styles for links that match .md-typeset a.\n// EXTRACTED FROM: .md-typeset a CSS rule from Material for MkDocs.\n// Note: Hover states cannot be implemented with inline styles.\n//\n//nolint:unused // Used for text links.\nfunc linkStyles() styles.Props {\n\treturn styles.Props{\n\t\tstyles.Color:          colorPrimaryAccent, // #4051b5 - var(--md-primary-fg-color)\n\t\tstyles.TextDecoration: \"none\",\n\t\t\"word-break\":          \"break-word\",\n\t\tstyles.FontFamily:     fontFamilySystem, // Roboto - inherited from .md-typeset\n\t}\n}\n\n// Inline Code Styles (updated)\n// Returns inline styles for inline code that matches .md-typeset code.\n// EXTRACTED FROM: .md-typeset code CSS rule from Material for MkDocs.\n//\n//nolint:unused // Used for inline code snippets.\nfunc inlineCodeStyles() styles.Props {\n\treturn styles.Props{\n\t\tstyles.BackgroundColor: colorCodeBg, // #f5f5f5\n\t\tstyles.Color:           colorCodeFg, // #36464e\n\t\tstyles.BorderRadius:    \"0.1rem\",\n\t\tstyles.FontSize:        fontSizeCode,   // 0.85em\n\t\tstyles.FontFamily:      fontFamilyCode, // Roboto Mono\n\t\tstyles.Padding:         \"0 0.2941176471em\",\n\t\t\"word-break\":           \"break-word\",\n\t}\n}\n\n// Inline Code Component\n// For inline code snippets within text.\n//\n//nolint:unused // Reserved for future inline code usage.\nfunc inlineCode(code string) *elem.Element {\n\treturn elem.Code(attrs.Props{\n\t\tattrs.Style: inlineCodeStyles().ToInline(),\n\t}, elem.Text(code))\n}\n\n// orDivider creates a visual \"or\" divider between sections.\n// Styled with lines on either side for better visual separation.\n//\n//nolint:unused // Used in apple.go template.\nfunc orDivider() *elem.Element {\n\treturn elem.Div(attrs.Props{\n\t\tattrs.Style: styles.Props{\n\t\t\tstyles.Display:      \"flex\",\n\t\t\tstyles.AlignItems:   \"center\",\n\t\t\tstyles.Gap:          spaceM,\n\t\t\tstyles.MarginTop:    space2XL,\n\t\t\tstyles.MarginBottom: space2XL,\n\t\t\tstyles.Width:        \"100%\",\n\t\t}.ToInline(),\n\t},\n\t\telem.Div(attrs.Props{\n\t\t\tattrs.Style: styles.Props{\n\t\t\t\tstyles.Flex:            \"1\",\n\t\t\t\tstyles.Height:          \"1px\",\n\t\t\t\tstyles.BackgroundColor: colorBorderLight,\n\t\t\t}.ToInline(),\n\t\t}),\n\t\telem.Strong(attrs.Props{\n\t\t\tattrs.Style: styles.Props{\n\t\t\t\tstyles.Color:      colorTextSecondary,\n\t\t\t\tstyles.FontSize:   fontSizeBase,\n\t\t\t\tstyles.FontWeight: \"500\",\n\t\t\t\t\"text-transform\":  \"uppercase\",\n\t\t\t\t\"letter-spacing\":  \"0.05em\",\n\t\t\t}.ToInline(),\n\t\t}, elem.Text(\"or\")),\n\t\telem.Div(attrs.Props{\n\t\t\tattrs.Style: styles.Props{\n\t\t\t\tstyles.Flex:            \"1\",\n\t\t\t\tstyles.Height:          \"1px\",\n\t\t\t\tstyles.BackgroundColor: colorBorderLight,\n\t\t\t}.ToInline(),\n\t\t}),\n\t)\n}\n\n// successBox creates a green success feedback box with a checkmark icon.\n// The heading is displayed as bold green text, and children are rendered below it.\n// Pairs with warningBox for consistent feedback styling.\n//\n//nolint:unused // Used in auth_success.go template.\nfunc successBox(heading string, children ...elem.Node) *elem.Element {\n\treturn elem.Div(attrs.Props{\n\t\tattrs.Style: styles.Props{\n\t\t\tstyles.Display:         \"flex\",\n\t\t\tstyles.AlignItems:      \"center\",\n\t\t\tstyles.Gap:             spaceM,\n\t\t\tstyles.Padding:         spaceL,\n\t\t\tstyles.BackgroundColor: colorSuccessLight,\n\t\t\tstyles.Border:          \"1px solid \" + colorSuccess,\n\t\t\tstyles.BorderRadius:    \"0.5rem\",\n\t\t\tstyles.MarginBottom:    spaceXL,\n\t\t}.ToInline(),\n\t},\n\t\tcheckboxIcon(),\n\t\telem.Div(nil,\n\t\t\tappend([]elem.Node{\n\t\t\t\telem.Strong(attrs.Props{\n\t\t\t\t\tattrs.Style: styles.Props{\n\t\t\t\t\t\tstyles.Display:      \"block\",\n\t\t\t\t\t\tstyles.Color:        colorSuccess,\n\t\t\t\t\t\tstyles.FontSize:     fontSizeH3,\n\t\t\t\t\t\tstyles.MarginBottom: spaceXS,\n\t\t\t\t\t}.ToInline(),\n\t\t\t\t}, elem.Text(heading)),\n\t\t\t}, children...)...,\n\t\t),\n\t)\n}\n\n// checkboxIcon returns the success checkbox SVG icon as raw HTML.\nfunc checkboxIcon() elem.Node {\n\treturn elem.Raw(`<svg id=\"checkbox\" aria-hidden=\"true\" xmlns=\"http://www.w3.org/2000/svg\" width=\"48\" height=\"48\" viewBox=\"0 0 512 512\">\n  <path d=\"M256 32C132.3 32 32 132.3 32 256s100.3 224 224 224 224-100.3 224-224S379.7 32 256 32zm114.9 149.1L231.8 359.6c-1.1 1.1-2.9 3.5-5.1 3.5-2.3 0-3.8-1.6-5.1-2.9-1.3-1.3-78.9-75.9-78.9-75.9l-1.5-1.5c-.6-.9-1.1-2-1.1-3.2 0-1.2.5-2.3 1.1-3.2.4-.4.7-.7 1.1-1.2 7.7-8.1 23.3-24.5 24.3-25.5 1.3-1.3 2.4-3 4.8-3 2.5 0 4.1 2.1 5.3 3.3 1.2 1.2 45 43.3 45 43.3l111.3-143c1-.8 2.2-1.4 3.5-1.4 1.3 0 2.5.5 3.5 1.3l30.6 24.1c.8 1 1.3 2.2 1.3 3.5.1 1.3-.4 2.4-1 3.3z\"></path>\n</svg>`)\n}\n\n// warningBox creates a warning message box with icon and content.\n//\n//nolint:unused // Used in apple.go template.\nfunc warningBox(title, message string) *elem.Element {\n\treturn elem.Div(attrs.Props{\n\t\tattrs.Style: styles.Props{\n\t\t\tstyles.Display:         \"flex\",\n\t\t\tstyles.AlignItems:      \"flex-start\",\n\t\t\tstyles.Gap:             spaceM,\n\t\t\tstyles.Padding:         spaceL,\n\t\t\tstyles.BackgroundColor: \"#fef3c7\",           // yellow-100\n\t\t\tstyles.Border:          \"1px solid #f59e0b\", // yellow-500\n\t\t\tstyles.BorderRadius:    \"0.5rem\",\n\t\t\tstyles.MarginTop:       spaceL,\n\t\t\tstyles.MarginBottom:    spaceL,\n\t\t}.ToInline(),\n\t},\n\t\telem.Raw(`<svg xmlns=\"http://www.w3.org/2000/svg\" width=\"24\" height=\"24\" viewBox=\"0 0 24 24\" fill=\"none\" stroke=\"#f59e0b\" stroke-width=\"2\" stroke-linecap=\"round\" stroke-linejoin=\"round\" style=\"flex-shrink: 0; margin-top: 2px;\"><path d=\"M10.29 3.86L1.82 18a2 2 0 0 0 1.71 3h16.94a2 2 0 0 0 1.71-3L13.71 3.86a2 2 0 0 0-3.42 0z\"></path><line x1=\"12\" y1=\"9\" x2=\"12\" y2=\"13\"></line><line x1=\"12\" y1=\"17\" x2=\"12.01\" y2=\"17\"></line></svg>`),\n\t\telem.Div(nil,\n\t\t\telem.Strong(attrs.Props{\n\t\t\t\tattrs.Style: styles.Props{\n\t\t\t\t\tstyles.Display:      \"block\",\n\t\t\t\t\tstyles.Color:        \"#92400e\", // yellow-800\n\t\t\t\t\tstyles.FontSize:     fontSizeH3,\n\t\t\t\t\tstyles.MarginBottom: spaceXS,\n\t\t\t\t}.ToInline(),\n\t\t\t}, elem.Text(title)),\n\t\t\telem.Div(attrs.Props{\n\t\t\t\tattrs.Style: styles.Props{\n\t\t\t\t\tstyles.Color:    colorTextPrimary,\n\t\t\t\t\tstyles.FontSize: fontSizeBase,\n\t\t\t\t}.ToInline(),\n\t\t\t}, elem.Text(message)),\n\t\t),\n\t)\n}\n\n// downloadButton creates a nice button-style link for downloads.\n//\n//nolint:unused // Used in apple.go template.\nfunc downloadButton(href, text string) *elem.Element {\n\treturn elem.A(attrs.Props{\n\t\tattrs.Href:     href,\n\t\tattrs.Download: \"headscale_macos.mobileconfig\",\n\t\tattrs.Style: styles.Props{\n\t\t\tstyles.Display:         \"inline-block\",\n\t\t\tstyles.Padding:         \"0.75rem 1.5rem\",\n\t\t\tstyles.BackgroundColor: \"#3b82f6\", // blue-500\n\t\t\tstyles.Color:           \"#ffffff\",\n\t\t\tstyles.TextDecoration:  \"none\",\n\t\t\tstyles.BorderRadius:    \"0.5rem\",\n\t\t\tstyles.FontWeight:      \"500\",\n\t\t\tstyles.Transition:      \"background-color 0.2s\",\n\t\t\tstyles.MarginRight:     spaceM,\n\t\t\tstyles.MarginBottom:    spaceM,\n\t\t}.ToInline(),\n\t}, elem.Text(text))\n}\n\n// External Link Component\n// Creates a link with proper security attributes for external URLs.\n// Automatically adds rel=\"noreferrer noopener\" and target=\"_blank\".\n//\n//nolint:unused // Used in apple.go, oidc_callback.go templates.\nfunc externalLink(href, text string) *elem.Element {\n\treturn elem.A(attrs.Props{\n\t\tattrs.Href:   href,\n\t\tattrs.Rel:    \"noreferrer noopener\",\n\t\tattrs.Target: \"_blank\",\n\t\tattrs.Style: styles.Props{\n\t\t\tstyles.Color:          colorPrimaryAccent, // #4051b5 - base link color\n\t\t\tstyles.TextDecoration: \"none\",\n\t\t}.ToInline(),\n\t}, elem.Text(text))\n}\n\n// Instruction Step Component\n// For numbered instruction lists with consistent formatting.\n//\n//nolint:unused // Reserved for future use in Phase 4.\nfunc instructionStep(_ int, text string) *elem.Element {\n\treturn elem.Li(attrs.Props{\n\t\tattrs.Style: styles.Props{\n\t\t\tstyles.MarginBottom: spaceS,\n\t\t\tstyles.LineHeight:   lineHeightBase,\n\t\t}.ToInline(),\n\t}, elem.Text(text))\n}\n\n// Status Message Component\n// For displaying success/error/info messages with appropriate styling.\n//\n//nolint:unused // Reserved for future use in Phase 4.\nfunc statusMessage(message string, isSuccess bool) *elem.Element {\n\tbgColor := colorSuccessLight\n\ttextColor := colorSuccess\n\n\tif !isSuccess {\n\t\tbgColor = \"#fee2e2\"   // red-100\n\t\ttextColor = \"#dc2626\" // red-600\n\t}\n\n\treturn elem.Div(attrs.Props{\n\t\tattrs.Style: styles.Props{\n\t\t\tstyles.Padding:         spaceM,\n\t\t\tstyles.BackgroundColor: bgColor,\n\t\t\tstyles.Color:           textColor,\n\t\t\tstyles.BorderRadius:    \"0.5rem\",\n\t\t\tstyles.Border:          \"1px solid \" + textColor,\n\t\t\tstyles.MarginBottom:    spaceL,\n\t\t\tstyles.FontSize:        fontSizeBase,\n\t\t\tstyles.LineHeight:      lineHeightBase,\n\t\t}.ToInline(),\n\t}, elem.Text(message))\n}\n"
  },
  {
    "path": "hscontrol/templates/general.go",
    "content": "package templates\n\nimport (\n\t\"github.com/chasefleming/elem-go\"\n\t\"github.com/chasefleming/elem-go/attrs\"\n\t\"github.com/chasefleming/elem-go/styles\"\n\t\"github.com/juanfont/headscale/hscontrol/assets\"\n)\n\n// mdTypesetBody creates a body element with md-typeset styling\n// that matches the official Headscale documentation design.\n// Uses CSS classes with styles defined in assets.CSS.\nfunc mdTypesetBody(children ...elem.Node) *elem.Element {\n\treturn elem.Body(attrs.Props{\n\t\tattrs.Style: styles.Props{\n\t\t\tstyles.MinHeight:       \"100vh\",\n\t\t\tstyles.Display:         \"flex\",\n\t\t\tstyles.FlexDirection:   \"column\",\n\t\t\tstyles.AlignItems:      \"center\",\n\t\t\tstyles.BackgroundColor: \"#ffffff\",\n\t\t\tstyles.Padding:         \"3rem 1.5rem\",\n\t\t}.ToInline(),\n\t\t\"translate\": \"no\",\n\t},\n\t\telem.Div(attrs.Props{\n\t\t\tattrs.Class: \"md-typeset\",\n\t\t\tattrs.Style: styles.Props{\n\t\t\t\tstyles.MaxWidth: \"min(800px, 90vw)\",\n\t\t\t\tstyles.Width:    \"100%\",\n\t\t\t}.ToInline(),\n\t\t}, children...),\n\t)\n}\n\n// Styled Element Wrappers\n// These functions wrap elem-go elements using CSS classes.\n// Styling is handled by the CSS in assets.CSS.\n\n// H1 creates a H1 element styled by .md-typeset h1\nfunc H1(children ...elem.Node) *elem.Element {\n\treturn elem.H1(nil, children...)\n}\n\n// H2 creates a H2 element styled by .md-typeset h2\nfunc H2(children ...elem.Node) *elem.Element {\n\treturn elem.H2(nil, children...)\n}\n\n// H3 creates a H3 element styled by .md-typeset h3\nfunc H3(children ...elem.Node) *elem.Element {\n\treturn elem.H3(nil, children...)\n}\n\n// P creates a paragraph element styled by .md-typeset p\nfunc P(children ...elem.Node) *elem.Element {\n\treturn elem.P(nil, children...)\n}\n\n// Ol creates an ordered list element styled by .md-typeset ol\nfunc Ol(children ...elem.Node) *elem.Element {\n\treturn elem.Ol(nil, children...)\n}\n\n// Ul creates an unordered list element styled by .md-typeset ul\nfunc Ul(children ...elem.Node) *elem.Element {\n\treturn elem.Ul(nil, children...)\n}\n\n// A creates a link element styled by .md-typeset a\nfunc A(href string, children ...elem.Node) *elem.Element {\n\treturn elem.A(attrs.Props{attrs.Href: href}, children...)\n}\n\n// Code creates an inline code element styled by .md-typeset code\nfunc Code(children ...elem.Node) *elem.Element {\n\treturn elem.Code(nil, children...)\n}\n\n// Pre creates a preformatted text block styled by .md-typeset pre\nfunc Pre(children ...elem.Node) *elem.Element {\n\treturn elem.Pre(nil, children...)\n}\n\n// PreCode creates a code block inside Pre styled by .md-typeset pre > code\nfunc PreCode(code string) *elem.Element {\n\treturn elem.Code(nil, elem.Text(code))\n}\n\n// Deprecated: use H1, H2, H3 instead\nfunc headerOne(text string) *elem.Element {\n\treturn H1(elem.Text(text))\n}\n\n// Deprecated: use H1, H2, H3 instead\nfunc headerTwo(text string) *elem.Element {\n\treturn H2(elem.Text(text))\n}\n\n// Deprecated: use H1, H2, H3 instead\nfunc headerThree(text string) *elem.Element {\n\treturn H3(elem.Text(text))\n}\n\n// contentContainer wraps page content with proper width.\n// Content inside is left-aligned by default.\nfunc contentContainer(children ...elem.Node) *elem.Element {\n\tcontainerStyle := styles.Props{\n\t\tstyles.MaxWidth:      \"720px\",\n\t\tstyles.Width:         \"100%\",\n\t\tstyles.Display:       \"flex\",\n\t\tstyles.FlexDirection: \"column\",\n\t\tstyles.AlignItems:    \"flex-start\", // Left-align all children\n\t}\n\n\treturn elem.Div(attrs.Props{attrs.Style: containerStyle.ToInline()}, children...)\n}\n\n// headscaleLogo returns the Headscale SVG logo for consistent branding across all pages.\n// The logo is styled by the .headscale-logo CSS class.\nfunc headscaleLogo() elem.Node {\n\t// Return the embedded SVG as-is\n\treturn elem.Raw(assets.SVG)\n}\n\n// pageFooter creates a consistent footer for all pages.\nfunc pageFooter() *elem.Element {\n\tfooterStyle := styles.Props{\n\t\tstyles.MarginTop:  space3XL,\n\t\tstyles.TextAlign:  \"center\",\n\t\tstyles.FontSize:   fontSizeSmall,\n\t\tstyles.Color:      colorTextSecondary,\n\t\tstyles.LineHeight: lineHeightBase,\n\t}\n\n\tlinkStyle := styles.Props{\n\t\tstyles.Color:          colorTextSecondary,\n\t\tstyles.TextDecoration: \"underline\",\n\t}\n\n\treturn elem.Div(attrs.Props{attrs.Style: footerStyle.ToInline()},\n\t\telem.Text(\"Powered by \"),\n\t\telem.A(attrs.Props{\n\t\t\tattrs.Href:   \"https://github.com/juanfont/headscale\",\n\t\t\tattrs.Rel:    \"noreferrer noopener\",\n\t\t\tattrs.Target: \"_blank\",\n\t\t\tattrs.Style:  linkStyle.ToInline(),\n\t\t}, elem.Text(\"Headscale\")),\n\t)\n}\n\n// listStyle provides consistent styling for ordered and unordered lists\n// EXTRACTED FROM: .md-typeset ol, .md-typeset ul CSS rules\nvar listStyle = styles.Props{\n\tstyles.LineHeight:   lineHeightBase,               // 1.6 - From .md-typeset\n\tstyles.MarginTop:    \"1em\",                        // From CSS: margin-top: 1em\n\tstyles.MarginBottom: \"1em\",                        // From CSS: margin-bottom: 1em\n\tstyles.PaddingLeft:  \"clamp(1.5rem, 5vw, 2.5rem)\", // Responsive indentation\n}\n\n// HtmlStructure creates a complete HTML document structure with proper meta tags\n// and semantic HTML5 structure. The head and body elements are passed as parameters\n// to allow for customization of each page.\n// Styling is provided via a CSS stylesheet (Material for MkDocs design system) with\n// minimal inline styles for layout and positioning.\nfunc HtmlStructure(head, body *elem.Element) *elem.Element {\n\treturn elem.Html(attrs.Props{attrs.Lang: \"en\"},\n\t\telem.Head(nil,\n\t\t\telem.Meta(attrs.Props{\n\t\t\t\tattrs.Charset: \"UTF-8\",\n\t\t\t}),\n\t\t\telem.Meta(attrs.Props{\n\t\t\t\tattrs.HTTPequiv: \"X-UA-Compatible\",\n\t\t\t\tattrs.Content:   \"IE=edge\",\n\t\t\t}),\n\t\t\telem.Meta(attrs.Props{\n\t\t\t\tattrs.Name:    \"viewport\",\n\t\t\t\tattrs.Content: \"width=device-width, initial-scale=1.0\",\n\t\t\t}),\n\t\t\telem.Link(attrs.Props{\n\t\t\t\tattrs.Rel:  \"icon\",\n\t\t\t\tattrs.Href: \"/favicon.ico\",\n\t\t\t}),\n\t\t\t// Google Fonts for Roboto and Roboto Mono\n\t\t\telem.Link(attrs.Props{\n\t\t\t\tattrs.Rel:     \"preconnect\",\n\t\t\t\tattrs.Href:    \"https://fonts.gstatic.com\",\n\t\t\t\t\"crossorigin\": \"\",\n\t\t\t}),\n\t\t\telem.Link(attrs.Props{\n\t\t\t\tattrs.Rel:  \"stylesheet\",\n\t\t\t\tattrs.Href: \"https://fonts.googleapis.com/css2?family=Roboto:wght@300;400;500;700&family=Roboto+Mono:wght@400;700&display=swap\",\n\t\t\t}),\n\t\t\t// Material for MkDocs CSS styles\n\t\t\telem.Style(attrs.Props{attrs.Type: \"text/css\"}, elem.Raw(assets.CSS)),\n\t\t\thead,\n\t\t),\n\t\tbody,\n\t)\n}\n\n// BlankPage creates a minimal blank HTML page with favicon.\n// Used for endpoints that need to return a valid HTML page with no content.\nfunc BlankPage() *elem.Element {\n\treturn elem.Html(attrs.Props{attrs.Lang: \"en\"},\n\t\telem.Head(nil,\n\t\t\telem.Meta(attrs.Props{\n\t\t\t\tattrs.Charset: \"UTF-8\",\n\t\t\t}),\n\t\t\telem.Link(attrs.Props{\n\t\t\t\tattrs.Rel:  \"icon\",\n\t\t\t\tattrs.Href: \"/favicon.ico\",\n\t\t\t}),\n\t\t),\n\t\telem.Body(nil),\n\t)\n}\n"
  },
  {
    "path": "hscontrol/templates/windows.go",
    "content": "package templates\n\nimport (\n\t\"github.com/chasefleming/elem-go\"\n)\n\nfunc Windows(url string) *elem.Element {\n\treturn HtmlStructure(\n\t\telem.Title(nil,\n\t\t\telem.Text(\"headscale - Windows\"),\n\t\t),\n\t\tmdTypesetBody(\n\t\t\theadscaleLogo(),\n\t\t\tH1(elem.Text(\"Windows configuration\")),\n\t\t\tP(\n\t\t\t\telem.Text(\"Download \"),\n\t\t\t\texternalLink(\"https://tailscale.com/download/windows\", \"Tailscale for Windows\"),\n\t\t\t\telem.Text(\" and install it.\"),\n\t\t\t),\n\t\t\tP(\n\t\t\t\telem.Text(\"Open a Command Prompt or PowerShell and use Tailscale's login command to connect with headscale:\"),\n\t\t\t),\n\t\t\tPre(PreCode(\"tailscale login --login-server \"+url)),\n\t\t\tpageFooter(),\n\t\t),\n\t)\n}\n"
  },
  {
    "path": "hscontrol/templates_consistency_test.go",
    "content": "package hscontrol\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/juanfont/headscale/hscontrol/templates\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestTemplateHTMLConsistency(t *testing.T) {\n\t// Test all templates produce consistent modern HTML\n\ttestCases := []struct {\n\t\tname string\n\t\thtml string\n\t}{\n\t\t{\n\t\t\tname: \"Auth Success\",\n\t\t\thtml: templates.AuthSuccess(templates.AuthSuccessResult{\n\t\t\t\tTitle:   \"Headscale - Node Registered\",\n\t\t\t\tHeading: \"Node registered\",\n\t\t\t\tVerb:    \"Registered\",\n\t\t\t\tUser:    \"test@example.com\",\n\t\t\t\tMessage: \"You can now close this window.\",\n\t\t\t}).Render(),\n\t\t},\n\t\t{\n\t\t\tname: \"Auth Web Register\",\n\t\t\thtml: templates.AuthWeb(\n\t\t\t\t\"Machine registration\",\n\t\t\t\t\"Run the command below in the headscale server to add this machine to your network:\",\n\t\t\t\t\"headscale auth register --auth-id test-key-123 --user USERNAME\",\n\t\t\t).Render(),\n\t\t},\n\t\t{\n\t\t\tname: \"Auth Web Approve\",\n\t\t\thtml: templates.AuthWeb(\n\t\t\t\t\"Authentication check\",\n\t\t\t\t\"Run the command below in the headscale server to approve this authentication request:\",\n\t\t\t\t\"headscale auth approve --auth-id test-key-123\",\n\t\t\t).Render(),\n\t\t},\n\t\t{\n\t\t\tname: \"Windows Config\",\n\t\t\thtml: templates.Windows(\"https://example.com\").Render(),\n\t\t},\n\t\t{\n\t\t\tname: \"Apple Config\",\n\t\t\thtml: templates.Apple(\"https://example.com\").Render(),\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\t// Check DOCTYPE\n\t\t\tassert.True(t, strings.HasPrefix(tc.html, \"<!DOCTYPE html>\"),\n\t\t\t\t\"%s should start with <!DOCTYPE html>\", tc.name)\n\n\t\t\t// Check HTML5 lang attribute\n\t\t\tassert.Contains(t, tc.html, `<html lang=\"en\">`,\n\t\t\t\t\"%s should have html lang=\\\"en\\\"\", tc.name)\n\n\t\t\t// Check UTF-8 charset\n\t\t\tassert.Contains(t, tc.html, `charset=\"UTF-8\"`,\n\t\t\t\t\"%s should have UTF-8 charset\", tc.name)\n\n\t\t\t// Check viewport meta tag\n\t\t\tassert.Contains(t, tc.html, `name=\"viewport\"`,\n\t\t\t\t\"%s should have viewport meta tag\", tc.name)\n\n\t\t\t// Check IE compatibility meta tag\n\t\t\tassert.Contains(t, tc.html, `X-UA-Compatible`,\n\t\t\t\t\"%s should have X-UA-Compatible meta tag\", tc.name)\n\n\t\t\t// Check closing tags\n\t\t\tassert.Contains(t, tc.html, \"</html>\",\n\t\t\t\t\"%s should have closing html tag\", tc.name)\n\t\t\tassert.Contains(t, tc.html, \"</head>\",\n\t\t\t\t\"%s should have closing head tag\", tc.name)\n\t\t\tassert.Contains(t, tc.html, \"</body>\",\n\t\t\t\t\"%s should have closing body tag\", tc.name)\n\t\t})\n\t}\n}\n\nfunc TestTemplateModernHTMLFeatures(t *testing.T) {\n\ttestCases := []struct {\n\t\tname string\n\t\thtml string\n\t}{\n\t\t{\n\t\t\tname: \"Auth Success\",\n\t\t\thtml: templates.AuthSuccess(templates.AuthSuccessResult{\n\t\t\t\tTitle:   \"Headscale - Node Registered\",\n\t\t\t\tHeading: \"Node registered\",\n\t\t\t\tVerb:    \"Registered\",\n\t\t\t\tUser:    \"test@example.com\",\n\t\t\t\tMessage: \"You can now close this window.\",\n\t\t\t}).Render(),\n\t\t},\n\t\t{\n\t\t\tname: \"Auth Web Register\",\n\t\t\thtml: templates.AuthWeb(\n\t\t\t\t\"Machine registration\",\n\t\t\t\t\"Run the command below in the headscale server to add this machine to your network:\",\n\t\t\t\t\"headscale auth register --auth-id test-key-123 --user USERNAME\",\n\t\t\t).Render(),\n\t\t},\n\t\t{\n\t\t\tname: \"Auth Web Approve\",\n\t\t\thtml: templates.AuthWeb(\n\t\t\t\t\"Authentication check\",\n\t\t\t\t\"Run the command below in the headscale server to approve this authentication request:\",\n\t\t\t\t\"headscale auth approve --auth-id test-key-123\",\n\t\t\t).Render(),\n\t\t},\n\t\t{\n\t\t\tname: \"Windows Config\",\n\t\t\thtml: templates.Windows(\"https://example.com\").Render(),\n\t\t},\n\t\t{\n\t\t\tname: \"Apple Config\",\n\t\t\thtml: templates.Apple(\"https://example.com\").Render(),\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\t// Check no deprecated tags\n\t\t\tassert.NotContains(t, tc.html, \"<font\",\n\t\t\t\t\"%s should not use deprecated <font> tag\", tc.name)\n\t\t\tassert.NotContains(t, tc.html, \"<center\",\n\t\t\t\t\"%s should not use deprecated <center> tag\", tc.name)\n\n\t\t\t// Check modern structure\n\t\t\tassert.Contains(t, tc.html, \"<head>\",\n\t\t\t\t\"%s should have <head> section\", tc.name)\n\t\t\tassert.Contains(t, tc.html, \"<body\",\n\t\t\t\t\"%s should have <body> section\", tc.name)\n\t\t\tassert.Contains(t, tc.html, \"<title>\",\n\t\t\t\t\"%s should have <title> tag\", tc.name)\n\t\t})\n\t}\n}\n\nfunc TestTemplateExternalLinkSecurity(t *testing.T) {\n\t// Test that all external links (http/https) have proper security attributes\n\ttestCases := []struct {\n\t\tname         string\n\t\thtml         string\n\t\texternalURLs []string // URLs that should have security attributes\n\t}{\n\t\t{\n\t\t\tname: \"Auth Success\",\n\t\t\thtml: templates.AuthSuccess(templates.AuthSuccessResult{\n\t\t\t\tTitle:   \"Headscale - Node Registered\",\n\t\t\t\tHeading: \"Node registered\",\n\t\t\t\tVerb:    \"Registered\",\n\t\t\t\tUser:    \"test@example.com\",\n\t\t\t\tMessage: \"You can now close this window.\",\n\t\t\t}).Render(),\n\t\t\texternalURLs: []string{\n\t\t\t\t\"https://headscale.net/stable/\",\n\t\t\t\t\"https://tailscale.com/kb/\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Auth Web Register\",\n\t\t\thtml: templates.AuthWeb(\n\t\t\t\t\"Machine registration\",\n\t\t\t\t\"Run the command below in the headscale server to add this machine to your network:\",\n\t\t\t\t\"headscale auth register --auth-id test-key-123 --user USERNAME\",\n\t\t\t).Render(),\n\t\t\texternalURLs: []string{}, // No external links\n\t\t},\n\t\t{\n\t\t\tname: \"Auth Web Approve\",\n\t\t\thtml: templates.AuthWeb(\n\t\t\t\t\"Authentication check\",\n\t\t\t\t\"Run the command below in the headscale server to approve this authentication request:\",\n\t\t\t\t\"headscale auth approve --auth-id test-key-123\",\n\t\t\t).Render(),\n\t\t\texternalURLs: []string{}, // No external links\n\t\t},\n\t\t{\n\t\t\tname: \"Windows Config\",\n\t\t\thtml: templates.Windows(\"https://example.com\").Render(),\n\t\t\texternalURLs: []string{\n\t\t\t\t\"https://tailscale.com/download/windows\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Apple Config\",\n\t\t\thtml: templates.Apple(\"https://example.com\").Render(),\n\t\t\texternalURLs: []string{\n\t\t\t\t\"https://apps.apple.com/app/tailscale/id1470499037\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tfor _, url := range tc.externalURLs {\n\t\t\t\t// Find the link tag containing this URL\n\t\t\t\tif !strings.Contains(tc.html, url) {\n\t\t\t\t\tt.Errorf(\"%s should contain external link %s\", tc.name, url)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// Check for rel=\"noreferrer noopener\"\n\t\t\t\t// We look for the pattern: href=\"URL\"...rel=\"noreferrer noopener\"\n\t\t\t\t// The attributes might be in any order, so we check within a reasonable window\n\t\t\t\tidx := strings.Index(tc.html, url)\n\t\t\t\tif idx == -1 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// Look for the closing > of the <a> tag (within 200 chars should be safe)\n\t\t\t\tendIdx := strings.Index(tc.html[idx:idx+200], \">\")\n\t\t\t\tif endIdx == -1 {\n\t\t\t\t\tendIdx = 200\n\t\t\t\t}\n\n\t\t\t\tlinkTag := tc.html[idx : idx+endIdx]\n\n\t\t\t\tassert.Contains(t, linkTag, `rel=\"noreferrer noopener\"`,\n\t\t\t\t\t\"%s external link %s should have rel=\\\"noreferrer noopener\\\"\", tc.name, url)\n\t\t\t\tassert.Contains(t, linkTag, `target=\"_blank\"`,\n\t\t\t\t\t\"%s external link %s should have target=\\\"_blank\\\"\", tc.name, url)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestTemplateAccessibilityAttributes(t *testing.T) {\n\t// Test that all templates have proper accessibility attributes\n\ttestCases := []struct {\n\t\tname string\n\t\thtml string\n\t}{\n\t\t{\n\t\t\tname: \"Auth Success\",\n\t\t\thtml: templates.AuthSuccess(templates.AuthSuccessResult{\n\t\t\t\tTitle:   \"Headscale - Node Registered\",\n\t\t\t\tHeading: \"Node registered\",\n\t\t\t\tVerb:    \"Registered\",\n\t\t\t\tUser:    \"test@example.com\",\n\t\t\t\tMessage: \"You can now close this window.\",\n\t\t\t}).Render(),\n\t\t},\n\t\t{\n\t\t\tname: \"Auth Web Register\",\n\t\t\thtml: templates.AuthWeb(\n\t\t\t\t\"Machine registration\",\n\t\t\t\t\"Run the command below in the headscale server to add this machine to your network:\",\n\t\t\t\t\"headscale auth register --auth-id test-key-123 --user USERNAME\",\n\t\t\t).Render(),\n\t\t},\n\t\t{\n\t\t\tname: \"Auth Web Approve\",\n\t\t\thtml: templates.AuthWeb(\n\t\t\t\t\"Authentication check\",\n\t\t\t\t\"Run the command below in the headscale server to approve this authentication request:\",\n\t\t\t\t\"headscale auth approve --auth-id test-key-123\",\n\t\t\t).Render(),\n\t\t},\n\t\t{\n\t\t\tname: \"Windows Config\",\n\t\t\thtml: templates.Windows(\"https://example.com\").Render(),\n\t\t},\n\t\t{\n\t\t\tname: \"Apple Config\",\n\t\t\thtml: templates.Apple(\"https://example.com\").Render(),\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\t// Check for translate=\"no\" on body tag to prevent browser translation\n\t\t\t// This is important for technical documentation with commands\n\t\t\tassert.Contains(t, tc.html, `translate=\"no\"`,\n\t\t\t\t\"%s should have translate=\\\"no\\\" attribute on body tag\", tc.name)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "hscontrol/types/api_key.go",
    "content": "package types\n\nimport (\n\t\"time\"\n\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\t\"github.com/juanfont/headscale/hscontrol/util/zlog/zf\"\n\t\"github.com/rs/zerolog\"\n\t\"google.golang.org/protobuf/types/known/timestamppb\"\n)\n\nconst (\n\t// NewAPIKeyPrefixLength is the length of the prefix for new API keys.\n\tNewAPIKeyPrefixLength = 12\n\t// LegacyAPIKeyPrefixLength is the length of the prefix for legacy API keys.\n\tLegacyAPIKeyPrefixLength = 7\n)\n\n// APIKey describes the datamodel for API keys used to remotely authenticate with\n// headscale.\ntype APIKey struct {\n\tID     uint64 `gorm:\"primary_key\"`\n\tPrefix string `gorm:\"uniqueIndex\"`\n\tHash   []byte\n\n\tCreatedAt  *time.Time\n\tExpiration *time.Time\n\tLastSeen   *time.Time\n}\n\nfunc (key *APIKey) Proto() *v1.ApiKey {\n\tprotoKey := v1.ApiKey{\n\t\tId: key.ID,\n\t}\n\n\t// Show prefix format: distinguish between new (12-char) and legacy (7-char) keys\n\tif len(key.Prefix) == NewAPIKeyPrefixLength {\n\t\t// New format key (12-char prefix)\n\t\tprotoKey.Prefix = \"hskey-api-\" + key.Prefix + \"-***\"\n\t} else {\n\t\t// Legacy format key (7-char prefix) or fallback\n\t\tprotoKey.Prefix = key.Prefix + \"***\"\n\t}\n\n\tif key.Expiration != nil {\n\t\tprotoKey.Expiration = timestamppb.New(*key.Expiration)\n\t}\n\n\tif key.CreatedAt != nil {\n\t\tprotoKey.CreatedAt = timestamppb.New(*key.CreatedAt)\n\t}\n\n\tif key.LastSeen != nil {\n\t\tprotoKey.LastSeen = timestamppb.New(*key.LastSeen)\n\t}\n\n\treturn &protoKey\n}\n\n// maskedPrefix returns the API key prefix in masked format for safe logging.\n// SECURITY: Never log the full key or hash, only the masked prefix.\nfunc (k *APIKey) maskedPrefix() string {\n\tif len(k.Prefix) == NewAPIKeyPrefixLength {\n\t\treturn \"hskey-api-\" + k.Prefix + \"-***\"\n\t}\n\n\treturn k.Prefix + \"***\"\n}\n\n// MarshalZerologObject implements zerolog.LogObjectMarshaler for safe logging.\n// SECURITY: This method intentionally does NOT log the full key or hash.\n// Only the masked prefix is logged for identification purposes.\nfunc (k *APIKey) MarshalZerologObject(e *zerolog.Event) {\n\tif k == nil {\n\t\treturn\n\t}\n\n\te.Uint64(zf.APIKeyID, k.ID)\n\te.Str(zf.APIKeyPrefix, k.maskedPrefix())\n\n\tif k.Expiration != nil {\n\t\te.Time(zf.APIKeyExpiration, *k.Expiration)\n\t}\n\n\tif k.LastSeen != nil {\n\t\te.Time(zf.APIKeyLastSeen, *k.LastSeen)\n\t}\n}\n"
  },
  {
    "path": "hscontrol/types/change/change.go",
    "content": "package change\n\nimport (\n\t\"slices\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"tailscale.com/tailcfg\"\n)\n\n// Change declares what should be included in a MapResponse.\n// The mapper uses this to build the response without guessing.\ntype Change struct {\n\t// Reason is a human-readable description for logging/debugging.\n\tReason string\n\n\t// TargetNode, if set, means this response should only be sent to this node.\n\tTargetNode types.NodeID\n\n\t// OriginNode is the node that triggered this change.\n\t// Used for self-update detection and filtering.\n\tOriginNode types.NodeID\n\n\t// Content flags - what to include in the MapResponse.\n\tIncludeSelf    bool\n\tIncludeDERPMap bool\n\tIncludeDNS     bool\n\tIncludeDomain  bool\n\tIncludePolicy  bool // PacketFilters and SSHPolicy - always sent together\n\n\t// Peer changes.\n\tPeersChanged []types.NodeID\n\tPeersRemoved []types.NodeID\n\tPeerPatches  []*tailcfg.PeerChange\n\tSendAllPeers bool\n\n\t// RequiresRuntimePeerComputation indicates that peer visibility\n\t// must be computed at runtime per-node. Used for policy changes\n\t// where each node may have different peer visibility.\n\tRequiresRuntimePeerComputation bool\n}\n\n// boolFieldNames returns all boolean field names for exhaustive testing.\n// When adding a new boolean field to Change, add it here.\n// Tests use reflection to verify this matches the struct.\nfunc (r Change) boolFieldNames() []string {\n\treturn []string{\n\t\t\"IncludeSelf\",\n\t\t\"IncludeDERPMap\",\n\t\t\"IncludeDNS\",\n\t\t\"IncludeDomain\",\n\t\t\"IncludePolicy\",\n\t\t\"SendAllPeers\",\n\t\t\"RequiresRuntimePeerComputation\",\n\t}\n}\n\nfunc (r Change) Merge(other Change) Change {\n\tmerged := r\n\n\tmerged.IncludeSelf = r.IncludeSelf || other.IncludeSelf\n\tmerged.IncludeDERPMap = r.IncludeDERPMap || other.IncludeDERPMap\n\tmerged.IncludeDNS = r.IncludeDNS || other.IncludeDNS\n\tmerged.IncludeDomain = r.IncludeDomain || other.IncludeDomain\n\tmerged.IncludePolicy = r.IncludePolicy || other.IncludePolicy\n\tmerged.SendAllPeers = r.SendAllPeers || other.SendAllPeers\n\tmerged.RequiresRuntimePeerComputation = r.RequiresRuntimePeerComputation || other.RequiresRuntimePeerComputation\n\n\tmerged.PeersChanged = uniqueNodeIDs(append(r.PeersChanged, other.PeersChanged...))\n\tmerged.PeersRemoved = uniqueNodeIDs(append(r.PeersRemoved, other.PeersRemoved...))\n\tmerged.PeerPatches = append(r.PeerPatches, other.PeerPatches...)\n\n\t// Preserve OriginNode for self-update detection.\n\t// If either change has OriginNode set, keep it so the mapper\n\t// can detect self-updates and send the node its own changes.\n\tif merged.OriginNode == 0 {\n\t\tmerged.OriginNode = other.OriginNode\n\t}\n\n\t// Preserve TargetNode for targeted responses.\n\tif merged.TargetNode == 0 {\n\t\tmerged.TargetNode = other.TargetNode\n\t}\n\n\tif r.Reason != \"\" && other.Reason != \"\" && r.Reason != other.Reason {\n\t\tmerged.Reason = r.Reason + \"; \" + other.Reason\n\t} else if other.Reason != \"\" {\n\t\tmerged.Reason = other.Reason\n\t}\n\n\treturn merged\n}\n\nfunc (r Change) IsEmpty() bool {\n\tif r.IncludeSelf || r.IncludeDERPMap || r.IncludeDNS ||\n\t\tr.IncludeDomain || r.IncludePolicy || r.SendAllPeers {\n\t\treturn false\n\t}\n\n\tif r.RequiresRuntimePeerComputation {\n\t\treturn false\n\t}\n\n\treturn len(r.PeersChanged) == 0 &&\n\t\tlen(r.PeersRemoved) == 0 &&\n\t\tlen(r.PeerPatches) == 0\n}\n\nfunc (r Change) IsSelfOnly() bool {\n\tif r.TargetNode == 0 || !r.IncludeSelf {\n\t\treturn false\n\t}\n\n\tif r.SendAllPeers || len(r.PeersChanged) > 0 || len(r.PeersRemoved) > 0 || len(r.PeerPatches) > 0 {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\n// IsTargetedToNode returns true if this response should only be sent to TargetNode.\nfunc (r Change) IsTargetedToNode() bool {\n\treturn r.TargetNode != 0\n}\n\n// IsFull reports whether this is a full update response.\nfunc (r Change) IsFull() bool {\n\treturn r.SendAllPeers && r.IncludeSelf && r.IncludeDERPMap &&\n\t\tr.IncludeDNS && r.IncludeDomain && r.IncludePolicy\n}\n\n// Type returns a categorized type string for metrics.\n// This provides a bounded set of values suitable for Prometheus labels,\n// unlike Reason which is free-form text for logging.\nfunc (r Change) Type() string {\n\tif r.IsFull() {\n\t\treturn \"full\"\n\t}\n\n\tif r.IsSelfOnly() {\n\t\treturn \"self\"\n\t}\n\n\tif r.RequiresRuntimePeerComputation {\n\t\treturn \"policy\"\n\t}\n\n\tif len(r.PeerPatches) > 0 && len(r.PeersChanged) == 0 && len(r.PeersRemoved) == 0 && !r.SendAllPeers {\n\t\treturn \"patch\"\n\t}\n\n\tif len(r.PeersChanged) > 0 || len(r.PeersRemoved) > 0 || r.SendAllPeers {\n\t\treturn \"peers\"\n\t}\n\n\tif r.IncludeDERPMap || r.IncludeDNS || r.IncludeDomain || r.IncludePolicy {\n\t\treturn \"config\"\n\t}\n\n\treturn \"unknown\"\n}\n\n// ShouldSendToNode determines if this response should be sent to nodeID.\n// It handles self-only targeting and filtering out self-updates for non-origin nodes.\nfunc (r Change) ShouldSendToNode(nodeID types.NodeID) bool {\n\t// If targeted to a specific node, only send to that node\n\tif r.TargetNode != 0 {\n\t\treturn r.TargetNode == nodeID\n\t}\n\n\treturn true\n}\n\n// HasFull returns true if any response in the slice is a full update.\nfunc HasFull(rs []Change) bool {\n\tfor _, r := range rs {\n\t\tif r.IsFull() {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n// SplitTargetedAndBroadcast separates responses into targeted (to specific node) and broadcast.\nfunc SplitTargetedAndBroadcast(rs []Change) ([]Change, []Change) {\n\tvar broadcast, targeted []Change\n\n\tfor _, r := range rs {\n\t\tif r.IsTargetedToNode() {\n\t\t\ttargeted = append(targeted, r)\n\t\t} else {\n\t\t\tbroadcast = append(broadcast, r)\n\t\t}\n\t}\n\n\treturn broadcast, targeted\n}\n\n// FilterForNode returns responses that should be sent to the given node.\nfunc FilterForNode(nodeID types.NodeID, rs []Change) []Change {\n\tvar result []Change\n\n\tfor _, r := range rs {\n\t\tif r.ShouldSendToNode(nodeID) {\n\t\t\tresult = append(result, r)\n\t\t}\n\t}\n\n\treturn result\n}\n\nfunc uniqueNodeIDs(ids []types.NodeID) []types.NodeID {\n\tif len(ids) == 0 {\n\t\treturn nil\n\t}\n\n\tslices.Sort(ids)\n\n\treturn slices.Compact(ids)\n}\n\n// Constructor functions\n\nfunc FullUpdate() Change {\n\treturn Change{\n\t\tReason:         \"full update\",\n\t\tIncludeSelf:    true,\n\t\tIncludeDERPMap: true,\n\t\tIncludeDNS:     true,\n\t\tIncludeDomain:  true,\n\t\tIncludePolicy:  true,\n\t\tSendAllPeers:   true,\n\t}\n}\n\n// FullSelf returns a full update targeted at a specific node.\nfunc FullSelf(nodeID types.NodeID) Change {\n\treturn Change{\n\t\tReason:         \"full self update\",\n\t\tTargetNode:     nodeID,\n\t\tIncludeSelf:    true,\n\t\tIncludeDERPMap: true,\n\t\tIncludeDNS:     true,\n\t\tIncludeDomain:  true,\n\t\tIncludePolicy:  true,\n\t\tSendAllPeers:   true,\n\t}\n}\n\nfunc SelfUpdate(nodeID types.NodeID) Change {\n\treturn Change{\n\t\tReason:      \"self update\",\n\t\tTargetNode:  nodeID,\n\t\tIncludeSelf: true,\n\t}\n}\n\nfunc PolicyOnly() Change {\n\treturn Change{\n\t\tReason:        \"policy update\",\n\t\tIncludePolicy: true,\n\t}\n}\n\nfunc PolicyAndPeers(changedPeers ...types.NodeID) Change {\n\treturn Change{\n\t\tReason:        \"policy and peers update\",\n\t\tIncludePolicy: true,\n\t\tPeersChanged:  changedPeers,\n\t}\n}\n\nfunc VisibilityChange(reason string, added, removed []types.NodeID) Change {\n\treturn Change{\n\t\tReason:        reason,\n\t\tIncludePolicy: true,\n\t\tPeersChanged:  added,\n\t\tPeersRemoved:  removed,\n\t}\n}\n\nfunc PeersChanged(reason string, peerIDs ...types.NodeID) Change {\n\treturn Change{\n\t\tReason:       reason,\n\t\tPeersChanged: peerIDs,\n\t}\n}\n\nfunc PeersRemoved(peerIDs ...types.NodeID) Change {\n\treturn Change{\n\t\tReason:       \"peers removed\",\n\t\tPeersRemoved: peerIDs,\n\t}\n}\n\nfunc PeerPatched(reason string, patches ...*tailcfg.PeerChange) Change {\n\treturn Change{\n\t\tReason:      reason,\n\t\tPeerPatches: patches,\n\t}\n}\n\nfunc DERPMap() Change {\n\treturn Change{\n\t\tReason:         \"DERP map update\",\n\t\tIncludeDERPMap: true,\n\t}\n}\n\n// PolicyChange creates a response for policy changes.\n// Policy changes require runtime peer visibility computation.\nfunc PolicyChange() Change {\n\treturn Change{\n\t\tReason:                         \"policy change\",\n\t\tIncludePolicy:                  true,\n\t\tRequiresRuntimePeerComputation: true,\n\t}\n}\n\n// DNSConfig creates a response for DNS configuration updates.\nfunc DNSConfig() Change {\n\treturn Change{\n\t\tReason:     \"DNS config update\",\n\t\tIncludeDNS: true,\n\t}\n}\n\n// NodeOnline creates a patch response for a node coming online.\nfunc NodeOnline(nodeID types.NodeID) Change {\n\treturn Change{\n\t\tReason: \"node online\",\n\t\tPeerPatches: []*tailcfg.PeerChange{\n\t\t\t{\n\t\t\t\tNodeID: nodeID.NodeID(),\n\t\t\t\tOnline: new(true),\n\t\t\t},\n\t\t},\n\t}\n}\n\n// NodeOffline creates a patch response for a node going offline.\nfunc NodeOffline(nodeID types.NodeID) Change {\n\treturn Change{\n\t\tReason: \"node offline\",\n\t\tPeerPatches: []*tailcfg.PeerChange{\n\t\t\t{\n\t\t\t\tNodeID: nodeID.NodeID(),\n\t\t\t\tOnline: new(false),\n\t\t\t},\n\t\t},\n\t}\n}\n\n// KeyExpiry creates a patch response for a node's key expiry change.\nfunc KeyExpiry(nodeID types.NodeID, expiry *time.Time) Change {\n\treturn Change{\n\t\tReason: \"key expiry\",\n\t\tPeerPatches: []*tailcfg.PeerChange{\n\t\t\t{\n\t\t\t\tNodeID:    nodeID.NodeID(),\n\t\t\t\tKeyExpiry: expiry,\n\t\t\t},\n\t\t},\n\t}\n}\n\n// High-level change constructors\n\n// NodeAdded returns a Change for when a node is added or updated.\n// The OriginNode field enables self-update detection by the mapper.\nfunc NodeAdded(id types.NodeID) Change {\n\tc := PeersChanged(\"node added\", id)\n\tc.OriginNode = id\n\n\treturn c\n}\n\n// NodeRemoved returns a Change for when a node is removed.\nfunc NodeRemoved(id types.NodeID) Change {\n\treturn PeersRemoved(id)\n}\n\n// NodeOnlineFor returns a Change for when a node comes online.\n// If the node is a subnet router, a full update is sent instead of a patch.\nfunc NodeOnlineFor(node types.NodeView) Change {\n\tif node.IsSubnetRouter() {\n\t\tc := FullUpdate()\n\t\tc.Reason = \"subnet router online\"\n\n\t\treturn c\n\t}\n\n\treturn NodeOnline(node.ID())\n}\n\n// NodeOfflineFor returns a Change for when a node goes offline.\n// If the node is a subnet router, a full update is sent instead of a patch.\nfunc NodeOfflineFor(node types.NodeView) Change {\n\tif node.IsSubnetRouter() {\n\t\tc := FullUpdate()\n\t\tc.Reason = \"subnet router offline\"\n\n\t\treturn c\n\t}\n\n\treturn NodeOffline(node.ID())\n}\n\n// KeyExpiryFor returns a Change for when a node's key expiry changes.\n// The OriginNode field enables self-update detection by the mapper.\nfunc KeyExpiryFor(id types.NodeID, expiry time.Time) Change {\n\tc := KeyExpiry(id, &expiry)\n\tc.OriginNode = id\n\n\treturn c\n}\n\n// EndpointOrDERPUpdate returns a Change for when a node's endpoints or DERP region changes.\n// The OriginNode field enables self-update detection by the mapper.\nfunc EndpointOrDERPUpdate(id types.NodeID, patch *tailcfg.PeerChange) Change {\n\tc := PeerPatched(\"endpoint/DERP update\", patch)\n\tc.OriginNode = id\n\n\treturn c\n}\n\n// UserAdded returns a Change for when a user is added or updated.\n// A full update is sent to refresh user profiles on all nodes.\nfunc UserAdded() Change {\n\tc := FullUpdate()\n\tc.Reason = \"user added\"\n\n\treturn c\n}\n\n// UserRemoved returns a Change for when a user is removed.\n// A full update is sent to refresh user profiles on all nodes.\nfunc UserRemoved() Change {\n\tc := FullUpdate()\n\tc.Reason = \"user removed\"\n\n\treturn c\n}\n\n// ExtraRecords returns a Change for when DNS extra records change.\nfunc ExtraRecords() Change {\n\tc := DNSConfig()\n\tc.Reason = \"extra records update\"\n\n\treturn c\n}\n"
  },
  {
    "path": "hscontrol/types/change/change_test.go",
    "content": "package change\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"tailscale.com/tailcfg\"\n)\n\nfunc TestChange_FieldSync(t *testing.T) {\n\tr := Change{}\n\tfieldNames := r.boolFieldNames()\n\n\ttyp := reflect.TypeFor[Change]()\n\tboolCount := 0\n\n\tfor field := range typ.Fields() {\n\t\tif field.Type.Kind() == reflect.Bool {\n\t\t\tboolCount++\n\t\t}\n\t}\n\n\tif len(fieldNames) != boolCount {\n\t\tt.Fatalf(\"boolFieldNames() returns %d fields but struct has %d bool fields; \"+\n\t\t\t\"update boolFieldNames() when adding new bool fields\", len(fieldNames), boolCount)\n\t}\n}\n\nfunc TestChange_IsEmpty(t *testing.T) {\n\ttests := []struct {\n\t\tname     string\n\t\tresponse Change\n\t\twant     bool\n\t}{\n\t\t{\n\t\t\tname:     \"zero value is empty\",\n\t\t\tresponse: Change{},\n\t\t\twant:     true,\n\t\t},\n\t\t{\n\t\t\tname:     \"only reason is still empty\",\n\t\t\tresponse: Change{Reason: \"test\"},\n\t\t\twant:     true,\n\t\t},\n\t\t{\n\t\t\tname:     \"IncludeSelf not empty\",\n\t\t\tresponse: Change{IncludeSelf: true},\n\t\t\twant:     false,\n\t\t},\n\t\t{\n\t\t\tname:     \"IncludeDERPMap not empty\",\n\t\t\tresponse: Change{IncludeDERPMap: true},\n\t\t\twant:     false,\n\t\t},\n\t\t{\n\t\t\tname:     \"IncludeDNS not empty\",\n\t\t\tresponse: Change{IncludeDNS: true},\n\t\t\twant:     false,\n\t\t},\n\t\t{\n\t\t\tname:     \"IncludeDomain not empty\",\n\t\t\tresponse: Change{IncludeDomain: true},\n\t\t\twant:     false,\n\t\t},\n\t\t{\n\t\t\tname:     \"IncludePolicy not empty\",\n\t\t\tresponse: Change{IncludePolicy: true},\n\t\t\twant:     false,\n\t\t},\n\t\t{\n\t\t\tname:     \"SendAllPeers not empty\",\n\t\t\tresponse: Change{SendAllPeers: true},\n\t\t\twant:     false,\n\t\t},\n\t\t{\n\t\t\tname:     \"PeersChanged not empty\",\n\t\t\tresponse: Change{PeersChanged: []types.NodeID{1}},\n\t\t\twant:     false,\n\t\t},\n\t\t{\n\t\t\tname:     \"PeersRemoved not empty\",\n\t\t\tresponse: Change{PeersRemoved: []types.NodeID{1}},\n\t\t\twant:     false,\n\t\t},\n\t\t{\n\t\t\tname:     \"PeerPatches not empty\",\n\t\t\tresponse: Change{PeerPatches: []*tailcfg.PeerChange{{}}},\n\t\t\twant:     false,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot := tt.response.IsEmpty()\n\t\t\tassert.Equal(t, tt.want, got)\n\t\t})\n\t}\n}\n\nfunc TestChange_IsSelfOnly(t *testing.T) {\n\ttests := []struct {\n\t\tname     string\n\t\tresponse Change\n\t\twant     bool\n\t}{\n\t\t{\n\t\t\tname:     \"empty is not self only\",\n\t\t\tresponse: Change{},\n\t\t\twant:     false,\n\t\t},\n\t\t{\n\t\t\tname:     \"IncludeSelf without TargetNode is not self only\",\n\t\t\tresponse: Change{IncludeSelf: true},\n\t\t\twant:     false,\n\t\t},\n\t\t{\n\t\t\tname:     \"TargetNode without IncludeSelf is not self only\",\n\t\t\tresponse: Change{TargetNode: 1},\n\t\t\twant:     false,\n\t\t},\n\t\t{\n\t\t\tname:     \"TargetNode with IncludeSelf is self only\",\n\t\t\tresponse: Change{TargetNode: 1, IncludeSelf: true},\n\t\t\twant:     true,\n\t\t},\n\t\t{\n\t\t\tname:     \"self only with SendAllPeers is not self only\",\n\t\t\tresponse: Change{TargetNode: 1, IncludeSelf: true, SendAllPeers: true},\n\t\t\twant:     false,\n\t\t},\n\t\t{\n\t\t\tname:     \"self only with PeersChanged is not self only\",\n\t\t\tresponse: Change{TargetNode: 1, IncludeSelf: true, PeersChanged: []types.NodeID{2}},\n\t\t\twant:     false,\n\t\t},\n\t\t{\n\t\t\tname:     \"self only with PeersRemoved is not self only\",\n\t\t\tresponse: Change{TargetNode: 1, IncludeSelf: true, PeersRemoved: []types.NodeID{2}},\n\t\t\twant:     false,\n\t\t},\n\t\t{\n\t\t\tname:     \"self only with PeerPatches is not self only\",\n\t\t\tresponse: Change{TargetNode: 1, IncludeSelf: true, PeerPatches: []*tailcfg.PeerChange{{}}},\n\t\t\twant:     false,\n\t\t},\n\t\t{\n\t\t\tname: \"self only with other include flags is still self only\",\n\t\t\tresponse: Change{\n\t\t\t\tTargetNode:    1,\n\t\t\t\tIncludeSelf:   true,\n\t\t\t\tIncludePolicy: true,\n\t\t\t\tIncludeDNS:    true,\n\t\t\t},\n\t\t\twant: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot := tt.response.IsSelfOnly()\n\t\t\tassert.Equal(t, tt.want, got)\n\t\t})\n\t}\n}\n\nfunc TestChange_Merge(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tr1   Change\n\t\tr2   Change\n\t\twant Change\n\t}{\n\t\t{\n\t\t\tname: \"empty merge\",\n\t\t\tr1:   Change{},\n\t\t\tr2:   Change{},\n\t\t\twant: Change{},\n\t\t},\n\t\t{\n\t\t\tname: \"bool fields OR together\",\n\t\t\tr1:   Change{IncludeSelf: true, IncludePolicy: true},\n\t\t\tr2:   Change{IncludeDERPMap: true, IncludePolicy: true},\n\t\t\twant: Change{IncludeSelf: true, IncludeDERPMap: true, IncludePolicy: true},\n\t\t},\n\t\t{\n\t\t\tname: \"all bool fields merge\",\n\t\t\tr1:   Change{IncludeSelf: true, IncludeDNS: true, IncludePolicy: true},\n\t\t\tr2:   Change{IncludeDERPMap: true, IncludeDomain: true, SendAllPeers: true},\n\t\t\twant: Change{\n\t\t\t\tIncludeSelf:    true,\n\t\t\t\tIncludeDERPMap: true,\n\t\t\t\tIncludeDNS:     true,\n\t\t\t\tIncludeDomain:  true,\n\t\t\t\tIncludePolicy:  true,\n\t\t\t\tSendAllPeers:   true,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"peers deduplicated and sorted\",\n\t\t\tr1:   Change{PeersChanged: []types.NodeID{3, 1}},\n\t\t\tr2:   Change{PeersChanged: []types.NodeID{2, 1}},\n\t\t\twant: Change{PeersChanged: []types.NodeID{1, 2, 3}},\n\t\t},\n\t\t{\n\t\t\tname: \"peers removed deduplicated\",\n\t\t\tr1:   Change{PeersRemoved: []types.NodeID{1, 2}},\n\t\t\tr2:   Change{PeersRemoved: []types.NodeID{2, 3}},\n\t\t\twant: Change{PeersRemoved: []types.NodeID{1, 2, 3}},\n\t\t},\n\t\t{\n\t\t\tname: \"peer patches concatenated\",\n\t\t\tr1:   Change{PeerPatches: []*tailcfg.PeerChange{{NodeID: 1}}},\n\t\t\tr2:   Change{PeerPatches: []*tailcfg.PeerChange{{NodeID: 2}}},\n\t\t\twant: Change{PeerPatches: []*tailcfg.PeerChange{{NodeID: 1}, {NodeID: 2}}},\n\t\t},\n\t\t{\n\t\t\tname: \"reasons combined when different\",\n\t\t\tr1:   Change{Reason: \"route change\"},\n\t\t\tr2:   Change{Reason: \"tag change\"},\n\t\t\twant: Change{Reason: \"route change; tag change\"},\n\t\t},\n\t\t{\n\t\t\tname: \"same reason not duplicated\",\n\t\t\tr1:   Change{Reason: \"policy\"},\n\t\t\tr2:   Change{Reason: \"policy\"},\n\t\t\twant: Change{Reason: \"policy\"},\n\t\t},\n\t\t{\n\t\t\tname: \"empty reason takes other\",\n\t\t\tr1:   Change{},\n\t\t\tr2:   Change{Reason: \"update\"},\n\t\t\twant: Change{Reason: \"update\"},\n\t\t},\n\t\t{\n\t\t\tname: \"OriginNode preserved from first\",\n\t\t\tr1:   Change{OriginNode: 42},\n\t\t\tr2:   Change{IncludePolicy: true},\n\t\t\twant: Change{OriginNode: 42, IncludePolicy: true},\n\t\t},\n\t\t{\n\t\t\tname: \"OriginNode preserved from second when first is zero\",\n\t\t\tr1:   Change{IncludePolicy: true},\n\t\t\tr2:   Change{OriginNode: 42},\n\t\t\twant: Change{OriginNode: 42, IncludePolicy: true},\n\t\t},\n\t\t{\n\t\t\tname: \"OriginNode first wins when both set\",\n\t\t\tr1:   Change{OriginNode: 1},\n\t\t\tr2:   Change{OriginNode: 2},\n\t\t\twant: Change{OriginNode: 1},\n\t\t},\n\t\t{\n\t\t\tname: \"TargetNode preserved from first\",\n\t\t\tr1:   Change{TargetNode: 42},\n\t\t\tr2:   Change{IncludeSelf: true},\n\t\t\twant: Change{TargetNode: 42, IncludeSelf: true},\n\t\t},\n\t\t{\n\t\t\tname: \"TargetNode preserved from second when first is zero\",\n\t\t\tr1:   Change{IncludeSelf: true},\n\t\t\tr2:   Change{TargetNode: 42},\n\t\t\twant: Change{TargetNode: 42, IncludeSelf: true},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot := tt.r1.Merge(tt.r2)\n\t\t\tassert.Equal(t, tt.want, got)\n\t\t})\n\t}\n}\n\nfunc TestChange_Constructors(t *testing.T) {\n\ttests := []struct {\n\t\tname        string\n\t\tconstructor func() Change\n\t\twantReason  string\n\t\twant        Change\n\t}{\n\t\t{\n\t\t\tname:        \"FullUpdateResponse\",\n\t\t\tconstructor: FullUpdate,\n\t\t\twantReason:  \"full update\",\n\t\t\twant: Change{\n\t\t\t\tReason:         \"full update\",\n\t\t\t\tIncludeSelf:    true,\n\t\t\t\tIncludeDERPMap: true,\n\t\t\t\tIncludeDNS:     true,\n\t\t\t\tIncludeDomain:  true,\n\t\t\t\tIncludePolicy:  true,\n\t\t\t\tSendAllPeers:   true,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:        \"PolicyOnlyResponse\",\n\t\t\tconstructor: PolicyOnly,\n\t\t\twantReason:  \"policy update\",\n\t\t\twant: Change{\n\t\t\t\tReason:        \"policy update\",\n\t\t\t\tIncludePolicy: true,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:        \"DERPMapResponse\",\n\t\t\tconstructor: DERPMap,\n\t\t\twantReason:  \"DERP map update\",\n\t\t\twant: Change{\n\t\t\t\tReason:         \"DERP map update\",\n\t\t\t\tIncludeDERPMap: true,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tr := tt.constructor()\n\t\t\tassert.Equal(t, tt.wantReason, r.Reason)\n\t\t\tassert.Equal(t, tt.want, r)\n\t\t})\n\t}\n}\n\nfunc TestSelfUpdate(t *testing.T) {\n\tr := SelfUpdate(42)\n\tassert.Equal(t, \"self update\", r.Reason)\n\tassert.Equal(t, types.NodeID(42), r.TargetNode)\n\tassert.True(t, r.IncludeSelf)\n\tassert.True(t, r.IsSelfOnly())\n}\n\nfunc TestPolicyAndPeers(t *testing.T) {\n\tr := PolicyAndPeers(1, 2, 3)\n\tassert.Equal(t, \"policy and peers update\", r.Reason)\n\tassert.True(t, r.IncludePolicy)\n\tassert.Equal(t, []types.NodeID{1, 2, 3}, r.PeersChanged)\n}\n\nfunc TestVisibilityChange(t *testing.T) {\n\tr := VisibilityChange(\"tag change\", []types.NodeID{1}, []types.NodeID{2, 3})\n\tassert.Equal(t, \"tag change\", r.Reason)\n\tassert.True(t, r.IncludePolicy)\n\tassert.Equal(t, []types.NodeID{1}, r.PeersChanged)\n\tassert.Equal(t, []types.NodeID{2, 3}, r.PeersRemoved)\n}\n\nfunc TestPeersChanged(t *testing.T) {\n\tr := PeersChanged(\"routes approved\", 1, 2)\n\tassert.Equal(t, \"routes approved\", r.Reason)\n\tassert.Equal(t, []types.NodeID{1, 2}, r.PeersChanged)\n\tassert.False(t, r.IncludePolicy)\n}\n\nfunc TestPeersRemoved(t *testing.T) {\n\tr := PeersRemoved(1, 2, 3)\n\tassert.Equal(t, \"peers removed\", r.Reason)\n\tassert.Equal(t, []types.NodeID{1, 2, 3}, r.PeersRemoved)\n}\n\nfunc TestPeerPatched(t *testing.T) {\n\tpatch := &tailcfg.PeerChange{NodeID: 1}\n\tr := PeerPatched(\"endpoint change\", patch)\n\tassert.Equal(t, \"endpoint change\", r.Reason)\n\tassert.Equal(t, []*tailcfg.PeerChange{patch}, r.PeerPatches)\n}\n\nfunc TestChange_Type(t *testing.T) {\n\ttests := []struct {\n\t\tname     string\n\t\tresponse Change\n\t\twant     string\n\t}{\n\t\t{\n\t\t\tname:     \"full update\",\n\t\t\tresponse: FullUpdate(),\n\t\t\twant:     \"full\",\n\t\t},\n\t\t{\n\t\t\tname:     \"self only\",\n\t\t\tresponse: SelfUpdate(1),\n\t\t\twant:     \"self\",\n\t\t},\n\t\t{\n\t\t\tname:     \"policy with runtime computation\",\n\t\t\tresponse: PolicyChange(),\n\t\t\twant:     \"policy\",\n\t\t},\n\t\t{\n\t\t\tname:     \"patch only\",\n\t\t\tresponse: PeerPatched(\"test\", &tailcfg.PeerChange{NodeID: 1}),\n\t\t\twant:     \"patch\",\n\t\t},\n\t\t{\n\t\t\tname:     \"peers changed\",\n\t\t\tresponse: PeersChanged(\"test\", 1, 2),\n\t\t\twant:     \"peers\",\n\t\t},\n\t\t{\n\t\t\tname:     \"peers removed\",\n\t\t\tresponse: PeersRemoved(1, 2),\n\t\t\twant:     \"peers\",\n\t\t},\n\t\t{\n\t\t\tname:     \"config - DERP map\",\n\t\t\tresponse: DERPMap(),\n\t\t\twant:     \"config\",\n\t\t},\n\t\t{\n\t\t\tname:     \"config - DNS\",\n\t\t\tresponse: DNSConfig(),\n\t\t\twant:     \"config\",\n\t\t},\n\t\t{\n\t\t\tname:     \"config - policy only (no runtime)\",\n\t\t\tresponse: PolicyOnly(),\n\t\t\twant:     \"config\",\n\t\t},\n\t\t{\n\t\t\tname:     \"empty is unknown\",\n\t\t\tresponse: Change{},\n\t\t\twant:     \"unknown\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot := tt.response.Type()\n\t\t\tassert.Equal(t, tt.want, got)\n\t\t})\n\t}\n}\n\nfunc TestUniqueNodeIDs(t *testing.T) {\n\ttests := []struct {\n\t\tname  string\n\t\tinput []types.NodeID\n\t\twant  []types.NodeID\n\t}{\n\t\t{\n\t\t\tname:  \"nil input\",\n\t\t\tinput: nil,\n\t\t\twant:  nil,\n\t\t},\n\t\t{\n\t\t\tname:  \"empty input\",\n\t\t\tinput: []types.NodeID{},\n\t\t\twant:  nil,\n\t\t},\n\t\t{\n\t\t\tname:  \"single element\",\n\t\t\tinput: []types.NodeID{1},\n\t\t\twant:  []types.NodeID{1},\n\t\t},\n\t\t{\n\t\t\tname:  \"no duplicates\",\n\t\t\tinput: []types.NodeID{1, 2, 3},\n\t\t\twant:  []types.NodeID{1, 2, 3},\n\t\t},\n\t\t{\n\t\t\tname:  \"with duplicates\",\n\t\t\tinput: []types.NodeID{3, 1, 2, 1, 3},\n\t\t\twant:  []types.NodeID{1, 2, 3},\n\t\t},\n\t\t{\n\t\t\tname:  \"all same\",\n\t\t\tinput: []types.NodeID{5, 5, 5, 5},\n\t\t\twant:  []types.NodeID{5},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot := uniqueNodeIDs(tt.input)\n\t\t\tassert.Equal(t, tt.want, got)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "hscontrol/types/common.go",
    "content": "//go:generate go tool viewer --type=User,Node,PreAuthKey\npackage types\n\n//go:generate go run tailscale.com/cmd/viewer --type=User,Node,PreAuthKey\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"tailscale.com/tailcfg\"\n)\n\nconst (\n\tSelfUpdateIdentifier = \"self-update\"\n\tDatabasePostgres     = \"postgres\"\n\tDatabaseSqlite       = \"sqlite3\"\n)\n\n// Common errors.\nvar (\n\tErrCannotParsePrefix   = errors.New(\"cannot parse prefix\")\n\tErrInvalidAuthIDLength = errors.New(\"auth ID has invalid length\")\n\tErrInvalidAuthIDPrefix = errors.New(\"auth ID has invalid prefix\")\n)\n\ntype StateUpdateType int\n\nfunc (su StateUpdateType) String() string {\n\tswitch su {\n\tcase StateFullUpdate:\n\t\treturn \"StateFullUpdate\"\n\tcase StatePeerChanged:\n\t\treturn \"StatePeerChanged\"\n\tcase StatePeerChangedPatch:\n\t\treturn \"StatePeerChangedPatch\"\n\tcase StatePeerRemoved:\n\t\treturn \"StatePeerRemoved\"\n\tcase StateSelfUpdate:\n\t\treturn \"StateSelfUpdate\"\n\tcase StateDERPUpdated:\n\t\treturn \"StateDERPUpdated\"\n\t}\n\n\treturn \"unknown state update type\"\n}\n\nconst (\n\tStateFullUpdate StateUpdateType = iota\n\t// StatePeerChanged is used for updates that needs\n\t// to be calculated with all peers and all policy rules.\n\t// This would typically be things that include tags, routes\n\t// and similar.\n\tStatePeerChanged\n\tStatePeerChangedPatch\n\tStatePeerRemoved\n\t// StateSelfUpdate is used to indicate that the node\n\t// has changed in control, and the client needs to be\n\t// informed.\n\t// The updated node is inside the ChangeNodes field\n\t// which should have a length of one.\n\tStateSelfUpdate\n\tStateDERPUpdated\n)\n\n// StateUpdate is an internal message containing information about\n// a state change that has happened to the network.\n// If type is StateFullUpdate, all fields are ignored.\ntype StateUpdate struct {\n\t// The type of update\n\tType StateUpdateType\n\n\t// ChangeNodes must be set when Type is StatePeerAdded\n\t// and StatePeerChanged and contains the full node\n\t// object for added nodes.\n\tChangeNodes []NodeID\n\n\t// ChangePatches must be set when Type is StatePeerChangedPatch\n\t// and contains a populated PeerChange object.\n\tChangePatches []*tailcfg.PeerChange\n\n\t// Removed must be set when Type is StatePeerRemoved and\n\t// contain a list of the nodes that has been removed from\n\t// the network.\n\tRemoved []NodeID\n\n\t// DERPMap must be set when Type is StateDERPUpdated and\n\t// contain the new DERP Map.\n\tDERPMap *tailcfg.DERPMap\n\n\t// Additional message for tracking origin or what being\n\t// updated, useful for ambiguous updates like StatePeerChanged.\n\tMessage string\n}\n\n// Empty reports if there are any updates in the StateUpdate.\nfunc (su *StateUpdate) Empty() bool {\n\tswitch su.Type {\n\tcase StatePeerChanged:\n\t\treturn len(su.ChangeNodes) == 0\n\tcase StatePeerChangedPatch:\n\t\treturn len(su.ChangePatches) == 0\n\tcase StatePeerRemoved:\n\t\treturn len(su.Removed) == 0\n\tcase StateFullUpdate, StateSelfUpdate, StateDERPUpdated:\n\t\t// These update types don't have associated data to check,\n\t\t// so they are never considered empty.\n\t\treturn false\n\t}\n\n\treturn false\n}\n\nfunc UpdateFull() StateUpdate {\n\treturn StateUpdate{\n\t\tType: StateFullUpdate,\n\t}\n}\n\nfunc UpdateSelf(nodeID NodeID) StateUpdate {\n\treturn StateUpdate{\n\t\tType:        StateSelfUpdate,\n\t\tChangeNodes: []NodeID{nodeID},\n\t}\n}\n\nfunc UpdatePeerChanged(nodeIDs ...NodeID) StateUpdate {\n\treturn StateUpdate{\n\t\tType:        StatePeerChanged,\n\t\tChangeNodes: nodeIDs,\n\t}\n}\n\nfunc UpdatePeerPatch(changes ...*tailcfg.PeerChange) StateUpdate {\n\treturn StateUpdate{\n\t\tType:          StatePeerChangedPatch,\n\t\tChangePatches: changes,\n\t}\n}\n\nfunc UpdatePeerRemoved(nodeIDs ...NodeID) StateUpdate {\n\treturn StateUpdate{\n\t\tType:    StatePeerRemoved,\n\t\tRemoved: nodeIDs,\n\t}\n}\n\nfunc UpdateExpire(nodeID NodeID, expiry time.Time) StateUpdate {\n\treturn StateUpdate{\n\t\tType: StatePeerChangedPatch,\n\t\tChangePatches: []*tailcfg.PeerChange{\n\t\t\t{\n\t\t\t\tNodeID:    nodeID.NodeID(),\n\t\t\t\tKeyExpiry: &expiry,\n\t\t\t},\n\t\t},\n\t}\n}\n\nconst (\n\tauthIDPrefix       = \"hskey-authreq-\"\n\tauthIDRandomLength = 24\n\t// AuthIDLength is the total length of an AuthID: 14 (prefix) + 24 (random).\n\tAuthIDLength = 38\n)\n\ntype AuthID string\n\nfunc NewAuthID() (AuthID, error) {\n\trid, err := util.GenerateRandomStringURLSafe(authIDRandomLength)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn AuthID(authIDPrefix + rid), nil\n}\n\nfunc MustAuthID() AuthID {\n\trid, err := NewAuthID()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn rid\n}\n\nfunc AuthIDFromString(str string) (AuthID, error) {\n\tr := AuthID(str)\n\n\terr := r.Validate()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn r, nil\n}\n\nfunc (r AuthID) String() string {\n\treturn string(r)\n}\n\nfunc (r AuthID) Validate() error {\n\tif !strings.HasPrefix(string(r), authIDPrefix) {\n\t\treturn fmt.Errorf(\n\t\t\t\"%w: expected prefix %q\",\n\t\t\tErrInvalidAuthIDPrefix, authIDPrefix,\n\t\t)\n\t}\n\n\tif len(r) != AuthIDLength {\n\t\treturn fmt.Errorf(\n\t\t\t\"%w: expected %d, got %d\",\n\t\t\tErrInvalidAuthIDLength, AuthIDLength, len(r),\n\t\t)\n\t}\n\n\treturn nil\n}\n\n// AuthRequest represent a pending authentication request from a user or a node.\n// If it is a registration request, the node field will be populate with the node that is trying to register.\n// When the authentication process is finished, the node that has been authenticated will be sent through the Finished channel.\n// The closed field is used to ensure that the Finished channel is only closed once, and that no more nodes are sent through it after it has been closed.\ntype AuthRequest struct {\n\tnode     *Node\n\tfinished chan AuthVerdict\n\tclosed   *atomic.Bool\n}\n\nfunc NewAuthRequest() AuthRequest {\n\treturn AuthRequest{\n\t\tfinished: make(chan AuthVerdict, 1),\n\t\tclosed:   &atomic.Bool{},\n\t}\n}\n\nfunc NewRegisterAuthRequest(node Node) AuthRequest {\n\treturn AuthRequest{\n\t\tnode:     &node,\n\t\tfinished: make(chan AuthVerdict, 1),\n\t\tclosed:   &atomic.Bool{},\n\t}\n}\n\n// Node returns the node that is trying to register.\n// It will panic if the AuthRequest is not a registration request.\n// Can _only_ be used in the registration path.\nfunc (rn *AuthRequest) Node() NodeView {\n\tif rn.node == nil {\n\t\tpanic(\"Node can only be used in registration requests\")\n\t}\n\n\treturn rn.node.View()\n}\n\nfunc (rn *AuthRequest) FinishAuth(verdict AuthVerdict) {\n\tif rn.closed.Swap(true) {\n\t\treturn\n\t}\n\n\tselect {\n\tcase rn.finished <- verdict:\n\tdefault:\n\t}\n\n\tclose(rn.finished)\n}\n\nfunc (rn *AuthRequest) WaitForAuth() <-chan AuthVerdict {\n\treturn rn.finished\n}\n\ntype AuthVerdict struct {\n\t// Err is the error that occurred during the authentication process, if any.\n\t// If Err is nil, the authentication process has succeeded.\n\t// If Err is not nil, the authentication process has failed and the node should not be authenticated.\n\tErr error\n\n\t// Node is the node that has been authenticated.\n\t// Node is only valid if the auth request was a registration request\n\t// and the authentication process has succeeded.\n\tNode NodeView\n}\n\nfunc (v AuthVerdict) Accept() bool {\n\treturn v.Err == nil\n}\n\n// DefaultBatcherWorkers returns the default number of batcher workers.\n// Default to 3/4 of CPU cores, minimum 1, no maximum.\nfunc DefaultBatcherWorkers() int {\n\treturn DefaultBatcherWorkersFor(runtime.NumCPU())\n}\n\n// DefaultBatcherWorkersFor returns the default number of batcher workers for a given CPU count.\n// Default to 3/4 of CPU cores, minimum 1, no maximum.\nfunc DefaultBatcherWorkersFor(cpuCount int) int {\n\tconst (\n\t\tworkerNumerator   = 3\n\t\tworkerDenominator = 4\n\t)\n\n\tdefaultWorkers := max((cpuCount*workerNumerator)/workerDenominator, 1)\n\n\treturn defaultWorkers\n}\n"
  },
  {
    "path": "hscontrol/types/common_test.go",
    "content": "package types\n\nimport (\n\t\"testing\"\n)\n\nfunc TestDefaultBatcherWorkersFor(t *testing.T) {\n\ttests := []struct {\n\t\tcpuCount int\n\t\texpected int\n\t}{\n\t\t{1, 1},   // (1*3)/4 = 0, should be minimum 1\n\t\t{2, 1},   // (2*3)/4 = 1\n\t\t{4, 3},   // (4*3)/4 = 3\n\t\t{8, 6},   // (8*3)/4 = 6\n\t\t{12, 9},  // (12*3)/4 = 9\n\t\t{16, 12}, // (16*3)/4 = 12\n\t\t{20, 15}, // (20*3)/4 = 15\n\t\t{24, 18}, // (24*3)/4 = 18\n\t}\n\n\tfor _, test := range tests {\n\t\tresult := DefaultBatcherWorkersFor(test.cpuCount)\n\t\tif result != test.expected {\n\t\t\tt.Errorf(\"DefaultBatcherWorkersFor(%d) = %d, expected %d\", test.cpuCount, result, test.expected)\n\t\t}\n\t}\n}\n\nfunc TestDefaultBatcherWorkers(t *testing.T) {\n\t// Just verify it returns a valid value (>= 1)\n\tresult := DefaultBatcherWorkers()\n\tif result < 1 {\n\t\tt.Errorf(\"DefaultBatcherWorkers() = %d, expected value >= 1\", result)\n\t}\n}\n"
  },
  {
    "path": "hscontrol/types/config.go",
    "content": "package types\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io/fs\"\n\t\"net/netip\"\n\t\"net/url\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/coreos/go-oidc/v3/oidc\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/prometheus/common/model\"\n\t\"github.com/rs/zerolog\"\n\t\"github.com/rs/zerolog/log\"\n\t\"github.com/spf13/viper\"\n\t\"go4.org/netipx\"\n\t\"tailscale.com/net/tsaddr\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/dnstype\"\n\t\"tailscale.com/util/set\"\n)\n\nconst (\n\tdefaultOIDCExpiryTime               = 180 * 24 * time.Hour // 180 Days\n\tmaxDuration           time.Duration = 1<<63 - 1\n\tPKCEMethodPlain       string        = \"plain\"\n\tPKCEMethodS256        string        = \"S256\"\n\n\tdefaultNodeStoreBatchSize = 100\n)\n\nvar (\n\terrOidcMutuallyExclusive     = errors.New(\"oidc_client_secret and oidc_client_secret_path are mutually exclusive\")\n\terrServerURLSuffix           = errors.New(\"server_url cannot be part of base_domain in a way that could make the DERP and headscale server unreachable\")\n\terrServerURLSame             = errors.New(\"server_url cannot use the same domain as base_domain in a way that could make the DERP and headscale server unreachable\")\n\terrInvalidPKCEMethod         = errors.New(\"pkce.method must be either 'plain' or 'S256'\")\n\tErrNoPrefixConfigured        = errors.New(\"no IPv4 or IPv6 prefix configured, minimum one prefix is required\")\n\tErrInvalidAllocationStrategy = errors.New(\"invalid prefix allocation strategy\")\n)\n\ntype IPAllocationStrategy string\n\nconst (\n\tIPAllocationStrategySequential IPAllocationStrategy = \"sequential\"\n\tIPAllocationStrategyRandom     IPAllocationStrategy = \"random\"\n)\n\ntype PolicyMode string\n\nconst (\n\tPolicyModeDB   = \"database\"\n\tPolicyModeFile = \"file\"\n)\n\n// Config contains the initial Headscale configuration.\ntype Config struct {\n\tServerURL                      string\n\tAddr                           string\n\tMetricsAddr                    string\n\tGRPCAddr                       string\n\tGRPCAllowInsecure              bool\n\tEphemeralNodeInactivityTimeout time.Duration\n\tPrefixV4                       *netip.Prefix\n\tPrefixV6                       *netip.Prefix\n\tIPAllocation                   IPAllocationStrategy\n\tNoisePrivateKeyPath            string\n\tBaseDomain                     string\n\tLog                            LogConfig\n\tDisableUpdateCheck             bool\n\n\tDatabase DatabaseConfig\n\n\tDERP DERPConfig\n\n\tTLS TLSConfig\n\n\tACMEURL   string\n\tACMEEmail string\n\n\t// DNSConfig is the headscale representation of the DNS configuration.\n\t// It is kept in the config update for some settings that are\n\t// not directly converted into a tailcfg.DNSConfig.\n\tDNSConfig DNSConfig\n\n\t// TailcfgDNSConfig is the tailcfg representation of the DNS configuration,\n\t// it can be used directly when sending Netmaps to clients.\n\tTailcfgDNSConfig *tailcfg.DNSConfig\n\n\tUnixSocket           string\n\tUnixSocketPermission fs.FileMode\n\n\tOIDC OIDCConfig\n\n\tLogTail             LogTailConfig\n\tRandomizeClientPort bool\n\tTaildrop            TaildropConfig\n\n\tCLI CLIConfig\n\n\tPolicy PolicyConfig\n\n\tTuning Tuning\n}\n\ntype DNSConfig struct {\n\tMagicDNS         bool   `mapstructure:\"magic_dns\"`\n\tBaseDomain       string `mapstructure:\"base_domain\"`\n\tOverrideLocalDNS bool   `mapstructure:\"override_local_dns\"`\n\tNameservers      Nameservers\n\tSearchDomains    []string            `mapstructure:\"search_domains\"`\n\tExtraRecords     []tailcfg.DNSRecord `mapstructure:\"extra_records\"`\n\tExtraRecordsPath string              `mapstructure:\"extra_records_path\"`\n}\n\ntype Nameservers struct {\n\tGlobal []string\n\tSplit  map[string][]string\n}\n\ntype SqliteConfig struct {\n\tPath              string\n\tWriteAheadLog     bool\n\tWALAutoCheckPoint int\n}\n\ntype PostgresConfig struct {\n\tHost                string\n\tPort                int\n\tName                string\n\tUser                string\n\tPass                string\n\tSsl                 string\n\tMaxOpenConnections  int\n\tMaxIdleConnections  int\n\tConnMaxIdleTimeSecs int\n}\n\ntype GormConfig struct {\n\tDebug                 bool\n\tSlowThreshold         time.Duration\n\tSkipErrRecordNotFound bool\n\tParameterizedQueries  bool\n\tPrepareStmt           bool\n}\n\ntype DatabaseConfig struct {\n\t// Type sets the database type, either \"sqlite3\" or \"postgres\"\n\tType  string\n\tDebug bool\n\n\t// Type sets the gorm configuration\n\tGorm GormConfig\n\n\tSqlite   SqliteConfig\n\tPostgres PostgresConfig\n}\n\ntype TLSConfig struct {\n\tCertPath string\n\tKeyPath  string\n\n\tLetsEncrypt LetsEncryptConfig\n}\n\ntype LetsEncryptConfig struct {\n\tListen        string\n\tHostname      string\n\tCacheDir      string\n\tChallengeType string\n}\n\ntype PKCEConfig struct {\n\tEnabled bool\n\tMethod  string\n}\n\ntype OIDCConfig struct {\n\tOnlyStartIfOIDCIsAvailable bool\n\tIssuer                     string\n\tClientID                   string\n\tClientSecret               string\n\tScope                      []string\n\tExtraParams                map[string]string\n\tAllowedDomains             []string\n\tAllowedUsers               []string\n\tAllowedGroups              []string\n\tEmailVerifiedRequired      bool\n\tExpiry                     time.Duration\n\tUseExpiryFromToken         bool\n\tPKCE                       PKCEConfig\n}\n\ntype DERPConfig struct {\n\tServerEnabled                      bool\n\tAutomaticallyAddEmbeddedDerpRegion bool\n\tServerRegionID                     int\n\tServerRegionCode                   string\n\tServerRegionName                   string\n\tServerPrivateKeyPath               string\n\tServerVerifyClients                bool\n\tSTUNAddr                           string\n\tURLs                               []url.URL\n\tPaths                              []string\n\tDERPMap                            *tailcfg.DERPMap\n\tAutoUpdate                         bool\n\tUpdateFrequency                    time.Duration\n\tIPv4                               string\n\tIPv6                               string\n}\n\ntype LogTailConfig struct {\n\tEnabled bool\n}\n\ntype TaildropConfig struct {\n\tEnabled bool\n}\n\ntype CLIConfig struct {\n\tAddress  string\n\tAPIKey   string\n\tTimeout  time.Duration\n\tInsecure bool\n}\n\ntype PolicyConfig struct {\n\tPath string\n\tMode PolicyMode\n}\n\nfunc (p *PolicyConfig) IsEmpty() bool {\n\treturn p.Mode == PolicyModeFile && p.Path == \"\"\n}\n\ntype LogConfig struct {\n\tFormat string\n\tLevel  zerolog.Level\n}\n\n// Tuning contains advanced performance tuning parameters for Headscale.\n// These settings control internal batching, timeouts, and resource allocation.\n// The defaults are carefully chosen for typical deployments and should rarely\n// need adjustment. Changes to these values can significantly impact performance\n// and resource usage.\ntype Tuning struct {\n\t// NotifierSendTimeout is the maximum time to wait when sending notifications\n\t// to connected clients about network changes.\n\tNotifierSendTimeout time.Duration\n\n\t// BatchChangeDelay controls how long to wait before sending batched updates\n\t// to clients when multiple changes occur in rapid succession.\n\tBatchChangeDelay time.Duration\n\n\t// NodeMapSessionBufferedChanSize sets the buffer size for the channel that\n\t// queues map updates to be sent to connected clients.\n\tNodeMapSessionBufferedChanSize int\n\n\t// BatcherWorkers controls the number of parallel workers processing map\n\t// updates for connected clients.\n\tBatcherWorkers int\n\n\t// RegisterCacheCleanup is the interval between cleanup operations for\n\t// expired registration cache entries.\n\tRegisterCacheCleanup time.Duration\n\n\t// RegisterCacheExpiration is how long registration cache entries remain\n\t// valid before being eligible for cleanup.\n\tRegisterCacheExpiration time.Duration\n\n\t// NodeStoreBatchSize controls how many write operations are accumulated\n\t// before rebuilding the in-memory node snapshot.\n\t//\n\t// The NodeStore batches write operations (add/update/delete nodes) before\n\t// rebuilding its in-memory data structures. Rebuilding involves recalculating\n\t// peer relationships between all nodes based on the current ACL policy, which\n\t// is computationally expensive and scales with the square of the number of nodes.\n\t//\n\t// By batching writes, Headscale can process N operations but only rebuild once,\n\t// rather than rebuilding N times. This significantly reduces CPU usage during\n\t// bulk operations like initial sync or policy updates.\n\t//\n\t// Trade-off: Higher values reduce CPU usage from rebuilds but increase latency\n\t// for individual operations waiting for their batch to complete.\n\tNodeStoreBatchSize int\n\n\t// NodeStoreBatchTimeout is the maximum time to wait before processing a\n\t// partial batch of node operations.\n\t//\n\t// When NodeStoreBatchSize operations haven't accumulated, this timeout ensures\n\t// writes don't wait indefinitely. The batch processes when either the size\n\t// threshold is reached OR this timeout expires, whichever comes first.\n\t//\n\t// Trade-off: Lower values provide faster response for individual operations\n\t// but trigger more frequent (expensive) peer map rebuilds. Higher values\n\t// optimize for bulk throughput at the cost of individual operation latency.\n\tNodeStoreBatchTimeout time.Duration\n}\n\nfunc validatePKCEMethod(method string) error {\n\tif method != PKCEMethodPlain && method != PKCEMethodS256 {\n\t\treturn errInvalidPKCEMethod\n\t}\n\n\treturn nil\n}\n\n// Domain returns the hostname/domain part of the ServerURL.\n// If the ServerURL is not a valid URL, it returns the BaseDomain.\nfunc (c *Config) Domain() string {\n\tu, err := url.Parse(c.ServerURL)\n\tif err != nil {\n\t\treturn c.BaseDomain\n\t}\n\n\treturn u.Hostname()\n}\n\n// LoadConfig prepares and loads the Headscale configuration into Viper.\n// This means it sets the default values, reads the configuration file and\n// environment variables, and handles deprecated configuration options.\n// It has to be called before LoadServerConfig and LoadCLIConfig.\n// The configuration is not validated and the caller should check for errors\n// using a validation function.\nfunc LoadConfig(path string, isFile bool) error {\n\tif isFile {\n\t\tviper.SetConfigFile(path)\n\t} else {\n\t\tviper.SetConfigName(\"config\")\n\n\t\tif path == \"\" {\n\t\t\tviper.AddConfigPath(\"/etc/headscale/\")\n\t\t\tviper.AddConfigPath(\"$HOME/.headscale\")\n\t\t\tviper.AddConfigPath(\".\")\n\t\t} else {\n\t\t\t// For testing\n\t\t\tviper.AddConfigPath(path)\n\t\t}\n\t}\n\n\tenvPrefix := \"headscale\"\n\tviper.SetEnvPrefix(envPrefix)\n\tviper.SetEnvKeyReplacer(strings.NewReplacer(\".\", \"_\"))\n\tviper.AutomaticEnv()\n\n\tviper.SetDefault(\"policy.mode\", \"file\")\n\n\tviper.SetDefault(\"tls_letsencrypt_cache_dir\", \"/var/www/.cache\")\n\tviper.SetDefault(\"tls_letsencrypt_challenge_type\", HTTP01ChallengeType)\n\n\tviper.SetDefault(\"log.level\", \"info\")\n\tviper.SetDefault(\"log.format\", TextLogFormat)\n\n\tviper.SetDefault(\"dns.magic_dns\", true)\n\tviper.SetDefault(\"dns.base_domain\", \"\")\n\tviper.SetDefault(\"dns.override_local_dns\", true)\n\tviper.SetDefault(\"dns.nameservers.global\", []string{})\n\tviper.SetDefault(\"dns.nameservers.split\", map[string]string{})\n\tviper.SetDefault(\"dns.search_domains\", []string{})\n\n\tviper.SetDefault(\"derp.server.enabled\", false)\n\tviper.SetDefault(\"derp.server.verify_clients\", true)\n\tviper.SetDefault(\"derp.server.stun.enabled\", true)\n\tviper.SetDefault(\"derp.server.automatically_add_embedded_derp_region\", true)\n\tviper.SetDefault(\"derp.update_frequency\", \"3h\")\n\n\tviper.SetDefault(\"unix_socket\", \"/var/run/headscale/headscale.sock\")\n\tviper.SetDefault(\"unix_socket_permission\", \"0o770\")\n\n\tviper.SetDefault(\"grpc_listen_addr\", \":50443\")\n\tviper.SetDefault(\"grpc_allow_insecure\", false)\n\n\tviper.SetDefault(\"cli.timeout\", \"5s\")\n\tviper.SetDefault(\"cli.insecure\", false)\n\n\tviper.SetDefault(\"database.postgres.ssl\", false)\n\tviper.SetDefault(\"database.postgres.max_open_conns\", 10)\n\tviper.SetDefault(\"database.postgres.max_idle_conns\", 10)\n\tviper.SetDefault(\"database.postgres.conn_max_idle_time_secs\", 3600)\n\n\tviper.SetDefault(\"database.sqlite.write_ahead_log\", true)\n\tviper.SetDefault(\"database.sqlite.wal_autocheckpoint\", 1000) // SQLite default\n\n\tviper.SetDefault(\"oidc.scope\", []string{oidc.ScopeOpenID, \"profile\", \"email\"})\n\tviper.SetDefault(\"oidc.only_start_if_oidc_is_available\", true)\n\tviper.SetDefault(\"oidc.expiry\", \"180d\")\n\tviper.SetDefault(\"oidc.use_expiry_from_token\", false)\n\tviper.SetDefault(\"oidc.pkce.enabled\", false)\n\tviper.SetDefault(\"oidc.pkce.method\", \"S256\")\n\tviper.SetDefault(\"oidc.email_verified_required\", true)\n\n\tviper.SetDefault(\"logtail.enabled\", false)\n\tviper.SetDefault(\"randomize_client_port\", false)\n\tviper.SetDefault(\"taildrop.enabled\", true)\n\n\tviper.SetDefault(\"ephemeral_node_inactivity_timeout\", \"120s\")\n\n\tviper.SetDefault(\"tuning.notifier_send_timeout\", \"800ms\")\n\tviper.SetDefault(\"tuning.batch_change_delay\", \"800ms\")\n\tviper.SetDefault(\"tuning.node_mapsession_buffered_chan_size\", 30)\n\tviper.SetDefault(\"tuning.node_store_batch_size\", defaultNodeStoreBatchSize)\n\tviper.SetDefault(\"tuning.node_store_batch_timeout\", \"500ms\")\n\n\tviper.SetDefault(\"prefixes.allocation\", string(IPAllocationStrategySequential))\n\n\terr := viper.ReadInConfig()\n\tif err != nil {\n\t\tif _, ok := errors.AsType[viper.ConfigFileNotFoundError](err); ok {\n\t\t\tlog.Warn().Msg(\"no config file found, using defaults\")\n\t\t\treturn nil\n\t\t}\n\n\t\treturn fmt.Errorf(\"fatal error reading config file: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc validateServerConfig() error {\n\tdepr := deprecator{\n\t\twarns:  make(set.Set[string]),\n\t\tfatals: make(set.Set[string]),\n\t}\n\n\t// Register aliases for backward compatibility\n\t// Has to be called _after_ viper.ReadInConfig()\n\t// https://github.com/spf13/viper/issues/560\n\n\t// Alias the old ACL Policy path with the new configuration option.\n\tdepr.fatalIfNewKeyIsNotUsed(\"policy.path\", \"acl_policy_path\")\n\n\t// Move dns_config -> dns\n\tdepr.fatalIfNewKeyIsNotUsed(\"dns.magic_dns\", \"dns_config.magic_dns\")\n\tdepr.fatalIfNewKeyIsNotUsed(\"dns.base_domain\", \"dns_config.base_domain\")\n\tdepr.fatalIfNewKeyIsNotUsed(\"dns.override_local_dns\", \"dns_config.override_local_dns\")\n\tdepr.fatalIfNewKeyIsNotUsed(\"dns.nameservers.global\", \"dns_config.nameservers\")\n\tdepr.fatalIfNewKeyIsNotUsed(\"dns.nameservers.split\", \"dns_config.restricted_nameservers\")\n\tdepr.fatalIfNewKeyIsNotUsed(\"dns.search_domains\", \"dns_config.domains\")\n\tdepr.fatalIfNewKeyIsNotUsed(\"dns.extra_records\", \"dns_config.extra_records\")\n\tdepr.fatal(\"dns.use_username_in_magic_dns\")\n\tdepr.fatal(\"dns_config.use_username_in_magic_dns\")\n\n\t// Removed since version v0.26.0\n\tdepr.fatal(\"oidc.strip_email_domain\")\n\tdepr.fatal(\"oidc.map_legacy_users\")\n\n\tif viper.GetBool(\"oidc.enabled\") {\n\t\terr := validatePKCEMethod(viper.GetString(\"oidc.pkce.method\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tdepr.Log()\n\n\tif viper.IsSet(\"dns.extra_records\") && viper.IsSet(\"dns.extra_records_path\") {\n\t\tlog.Fatal().Msg(\"fatal config error: dns.extra_records and dns.extra_records_path are mutually exclusive. Please remove one of them from your config file\")\n\t}\n\n\t// Collect any validation errors and return them all at once\n\tvar errorText string\n\tif (viper.GetString(\"tls_letsencrypt_hostname\") != \"\") &&\n\t\t((viper.GetString(\"tls_cert_path\") != \"\") || (viper.GetString(\"tls_key_path\") != \"\")) {\n\t\terrorText += \"Fatal config error: set either tls_letsencrypt_hostname or tls_cert_path/tls_key_path, not both\\n\"\n\t}\n\n\tif viper.GetString(\"noise.private_key_path\") == \"\" {\n\t\terrorText += \"Fatal config error: headscale now requires a new `noise.private_key_path` field in the config file for the Tailscale v2 protocol\\n\"\n\t}\n\n\tif (viper.GetString(\"tls_letsencrypt_hostname\") != \"\") &&\n\t\t(viper.GetString(\"tls_letsencrypt_challenge_type\") == TLSALPN01ChallengeType) &&\n\t\t(!strings.HasSuffix(viper.GetString(\"listen_addr\"), \":443\")) {\n\t\t// this is only a warning because there could be something sitting in front of headscale that redirects the traffic (e.g. an iptables rule)\n\t\tlog.Warn().\n\t\t\tMsg(\"Warning: when using tls_letsencrypt_hostname with TLS-ALPN-01 as challenge type, headscale must be reachable on port 443, i.e. listen_addr should probably end in :443\")\n\t}\n\n\tif (viper.GetString(\"tls_letsencrypt_challenge_type\") != HTTP01ChallengeType) &&\n\t\t(viper.GetString(\"tls_letsencrypt_challenge_type\") != TLSALPN01ChallengeType) {\n\t\terrorText += \"Fatal config error: the only supported values for tls_letsencrypt_challenge_type are HTTP-01 and TLS-ALPN-01\\n\"\n\t}\n\n\tif !strings.HasPrefix(viper.GetString(\"server_url\"), \"http://\") &&\n\t\t!strings.HasPrefix(viper.GetString(\"server_url\"), \"https://\") {\n\t\terrorText += \"Fatal config error: server_url must start with https:// or http://\\n\"\n\t}\n\n\t// Minimum inactivity time out is keepalive timeout (60s) plus a few seconds\n\t// to avoid races\n\tminInactivityTimeout, _ := time.ParseDuration(\"65s\")\n\tif viper.GetDuration(\"ephemeral_node_inactivity_timeout\") <= minInactivityTimeout {\n\t\terrorText += fmt.Sprintf(\n\t\t\t\"Fatal config error: ephemeral_node_inactivity_timeout (%s) is set too low, must be more than %s\",\n\t\t\tviper.GetString(\"ephemeral_node_inactivity_timeout\"),\n\t\t\tminInactivityTimeout,\n\t\t)\n\t}\n\n\tif viper.GetBool(\"dns.override_local_dns\") {\n\t\tif global := viper.GetStringSlice(\"dns.nameservers.global\"); len(global) == 0 {\n\t\t\terrorText += \"Fatal config error: dns.nameservers.global must be set when dns.override_local_dns is true\\n\"\n\t\t}\n\t}\n\n\t// Validate tuning parameters\n\tif size := viper.GetInt(\"tuning.node_store_batch_size\"); size <= 0 {\n\t\terrorText += fmt.Sprintf(\n\t\t\t\"Fatal config error: tuning.node_store_batch_size must be positive, got %d\\n\",\n\t\t\tsize,\n\t\t)\n\t}\n\n\tif timeout := viper.GetDuration(\"tuning.node_store_batch_timeout\"); timeout <= 0 {\n\t\terrorText += fmt.Sprintf(\n\t\t\t\"Fatal config error: tuning.node_store_batch_timeout must be positive, got %s\\n\",\n\t\t\ttimeout,\n\t\t)\n\t}\n\n\tif errorText != \"\" {\n\t\t// nolint\n\t\treturn errors.New(strings.TrimSuffix(errorText, \"\\n\"))\n\t}\n\n\treturn nil\n}\n\nfunc tlsConfig() TLSConfig {\n\treturn TLSConfig{\n\t\tLetsEncrypt: LetsEncryptConfig{\n\t\t\tHostname: viper.GetString(\"tls_letsencrypt_hostname\"),\n\t\t\tListen:   viper.GetString(\"tls_letsencrypt_listen\"),\n\t\t\tCacheDir: util.AbsolutePathFromConfigPath(\n\t\t\t\tviper.GetString(\"tls_letsencrypt_cache_dir\"),\n\t\t\t),\n\t\t\tChallengeType: viper.GetString(\"tls_letsencrypt_challenge_type\"),\n\t\t},\n\t\tCertPath: util.AbsolutePathFromConfigPath(\n\t\t\tviper.GetString(\"tls_cert_path\"),\n\t\t),\n\t\tKeyPath: util.AbsolutePathFromConfigPath(\n\t\t\tviper.GetString(\"tls_key_path\"),\n\t\t),\n\t}\n}\n\nfunc derpConfig() DERPConfig {\n\tserverEnabled := viper.GetBool(\"derp.server.enabled\")\n\tserverRegionID := viper.GetInt(\"derp.server.region_id\")\n\tserverRegionCode := viper.GetString(\"derp.server.region_code\")\n\tserverRegionName := viper.GetString(\"derp.server.region_name\")\n\tserverVerifyClients := viper.GetBool(\"derp.server.verify_clients\")\n\tstunAddr := viper.GetString(\"derp.server.stun_listen_addr\")\n\tprivateKeyPath := util.AbsolutePathFromConfigPath(\n\t\tviper.GetString(\"derp.server.private_key_path\"),\n\t)\n\tipv4 := viper.GetString(\"derp.server.ipv4\")\n\tipv6 := viper.GetString(\"derp.server.ipv6\")\n\tautomaticallyAddEmbeddedDerpRegion := viper.GetBool(\n\t\t\"derp.server.automatically_add_embedded_derp_region\",\n\t)\n\n\tif serverEnabled && stunAddr == \"\" {\n\t\tlog.Fatal().\n\t\t\tMsg(\"derp.server.stun_listen_addr must be set if derp.server.enabled is true\")\n\t}\n\n\turlStrs := viper.GetStringSlice(\"derp.urls\")\n\n\turls := make([]url.URL, len(urlStrs))\n\tfor index, urlStr := range urlStrs {\n\t\turlAddr, err := url.Parse(urlStr)\n\t\tif err != nil {\n\t\t\tlog.Error().\n\t\t\t\tCaller().\n\t\t\t\tStr(\"url\", urlStr).\n\t\t\t\tErr(err).\n\t\t\t\tMsg(\"Failed to parse url, ignoring...\")\n\t\t}\n\n\t\turls[index] = *urlAddr\n\t}\n\n\tpaths := viper.GetStringSlice(\"derp.paths\")\n\n\tif serverEnabled && !automaticallyAddEmbeddedDerpRegion && len(paths) == 0 {\n\t\tlog.Fatal().\n\t\t\tMsg(\"Disabling derp.server.automatically_add_embedded_derp_region requires to configure the derp server in derp.paths\")\n\t}\n\n\tautoUpdate := viper.GetBool(\"derp.auto_update_enabled\")\n\tupdateFrequency := viper.GetDuration(\"derp.update_frequency\")\n\n\treturn DERPConfig{\n\t\tServerEnabled:                      serverEnabled,\n\t\tServerRegionID:                     serverRegionID,\n\t\tServerRegionCode:                   serverRegionCode,\n\t\tServerRegionName:                   serverRegionName,\n\t\tServerVerifyClients:                serverVerifyClients,\n\t\tServerPrivateKeyPath:               privateKeyPath,\n\t\tSTUNAddr:                           stunAddr,\n\t\tURLs:                               urls,\n\t\tPaths:                              paths,\n\t\tAutoUpdate:                         autoUpdate,\n\t\tUpdateFrequency:                    updateFrequency,\n\t\tIPv4:                               ipv4,\n\t\tIPv6:                               ipv6,\n\t\tAutomaticallyAddEmbeddedDerpRegion: automaticallyAddEmbeddedDerpRegion,\n\t}\n}\n\nfunc logtailConfig() LogTailConfig {\n\tenabled := viper.GetBool(\"logtail.enabled\")\n\n\treturn LogTailConfig{\n\t\tEnabled: enabled,\n\t}\n}\n\nfunc policyConfig() PolicyConfig {\n\tpolicyPath := viper.GetString(\"policy.path\")\n\tpolicyMode := viper.GetString(\"policy.mode\")\n\n\treturn PolicyConfig{\n\t\tPath: policyPath,\n\t\tMode: PolicyMode(policyMode),\n\t}\n}\n\nfunc logConfig() LogConfig {\n\tlogLevelStr := viper.GetString(\"log.level\")\n\n\tlogLevel, err := zerolog.ParseLevel(logLevelStr)\n\tif err != nil {\n\t\tlogLevel = zerolog.DebugLevel\n\t}\n\n\tlogFormatOpt := viper.GetString(\"log.format\")\n\n\tvar logFormat string\n\n\tswitch logFormatOpt {\n\tcase JSONLogFormat:\n\t\tlogFormat = JSONLogFormat\n\tcase TextLogFormat:\n\t\tlogFormat = TextLogFormat\n\tcase \"\":\n\t\tlogFormat = TextLogFormat\n\tdefault:\n\t\tlog.Error().\n\t\t\tCaller().\n\t\t\tStr(\"func\", \"GetLogConfig\").\n\t\t\tMsgf(\"Could not parse log format: %s. Valid choices are 'json' or 'text'\", logFormatOpt)\n\t}\n\n\treturn LogConfig{\n\t\tFormat: logFormat,\n\t\tLevel:  logLevel,\n\t}\n}\n\nfunc databaseConfig() DatabaseConfig {\n\tdebug := viper.GetBool(\"database.debug\")\n\n\ttype_ := viper.GetString(\"database.type\")\n\n\tskipErrRecordNotFound := viper.GetBool(\"database.gorm.skip_err_record_not_found\")\n\tslowThreshold := time.Duration(viper.GetInt64(\"database.gorm.slow_threshold\")) * time.Millisecond\n\tparameterizedQueries := viper.GetBool(\"database.gorm.parameterized_queries\")\n\tprepareStmt := viper.GetBool(\"database.gorm.prepare_stmt\")\n\n\tswitch type_ {\n\tcase DatabaseSqlite, DatabasePostgres:\n\t\tbreak\n\tcase \"sqlite\":\n\t\ttype_ = \"sqlite3\"\n\tdefault:\n\t\tlog.Fatal().\n\t\t\tMsgf(\"invalid database type %q, must be sqlite, sqlite3 or postgres\", type_)\n\t}\n\n\treturn DatabaseConfig{\n\t\tType:  type_,\n\t\tDebug: debug,\n\t\tGorm: GormConfig{\n\t\t\tDebug:                 debug,\n\t\t\tSkipErrRecordNotFound: skipErrRecordNotFound,\n\t\t\tSlowThreshold:         slowThreshold,\n\t\t\tParameterizedQueries:  parameterizedQueries,\n\t\t\tPrepareStmt:           prepareStmt,\n\t\t},\n\t\tSqlite: SqliteConfig{\n\t\t\tPath: util.AbsolutePathFromConfigPath(\n\t\t\t\tviper.GetString(\"database.sqlite.path\"),\n\t\t\t),\n\t\t\tWriteAheadLog:     viper.GetBool(\"database.sqlite.write_ahead_log\"),\n\t\t\tWALAutoCheckPoint: viper.GetInt(\"database.sqlite.wal_autocheckpoint\"),\n\t\t},\n\t\tPostgres: PostgresConfig{\n\t\t\tHost:               viper.GetString(\"database.postgres.host\"),\n\t\t\tPort:               viper.GetInt(\"database.postgres.port\"),\n\t\t\tName:               viper.GetString(\"database.postgres.name\"),\n\t\t\tUser:               viper.GetString(\"database.postgres.user\"),\n\t\t\tPass:               viper.GetString(\"database.postgres.pass\"),\n\t\t\tSsl:                viper.GetString(\"database.postgres.ssl\"),\n\t\t\tMaxOpenConnections: viper.GetInt(\"database.postgres.max_open_conns\"),\n\t\t\tMaxIdleConnections: viper.GetInt(\"database.postgres.max_idle_conns\"),\n\t\t\tConnMaxIdleTimeSecs: viper.GetInt(\n\t\t\t\t\"database.postgres.conn_max_idle_time_secs\",\n\t\t\t),\n\t\t},\n\t}\n}\n\nfunc dns() (DNSConfig, error) {\n\tvar dns DNSConfig\n\n\t// TODO: Use this instead of manually getting settings when\n\t// UnmarshalKey is compatible with Environment Variables.\n\t// err := viper.UnmarshalKey(\"dns\", &dns)\n\t// if err != nil {\n\t// \treturn DNSConfig{}, fmt.Errorf(\"unmarshalling dns config: %w\", err)\n\t// }\n\n\tdns.MagicDNS = viper.GetBool(\"dns.magic_dns\")\n\tdns.BaseDomain = viper.GetString(\"dns.base_domain\")\n\tdns.OverrideLocalDNS = viper.GetBool(\"dns.override_local_dns\")\n\tdns.Nameservers.Global = viper.GetStringSlice(\"dns.nameservers.global\")\n\tdns.Nameservers.Split = viper.GetStringMapStringSlice(\"dns.nameservers.split\")\n\tdns.SearchDomains = viper.GetStringSlice(\"dns.search_domains\")\n\tdns.ExtraRecordsPath = viper.GetString(\"dns.extra_records_path\")\n\n\tif viper.IsSet(\"dns.extra_records\") {\n\t\tvar extraRecords []tailcfg.DNSRecord\n\n\t\terr := viper.UnmarshalKey(\"dns.extra_records\", &extraRecords)\n\t\tif err != nil {\n\t\t\treturn DNSConfig{}, fmt.Errorf(\"unmarshalling dns extra records: %w\", err)\n\t\t}\n\n\t\tdns.ExtraRecords = extraRecords\n\t}\n\n\treturn dns, nil\n}\n\n// globalResolvers returns the global DNS resolvers\n// defined in the config file.\n// If a nameserver is a valid IP, it will be used as a regular resolver.\n// If a nameserver is a valid URL, it will be used as a DoH resolver.\n// If a nameserver is neither a valid URL nor a valid IP, it will be ignored.\nfunc (d *DNSConfig) globalResolvers() []*dnstype.Resolver {\n\tvar resolvers []*dnstype.Resolver\n\n\tfor _, nsStr := range d.Nameservers.Global {\n\t\tif _, err := netip.ParseAddr(nsStr); err == nil { //nolint:noinlineerr\n\t\t\tresolvers = append(resolvers, &dnstype.Resolver{\n\t\t\t\tAddr: nsStr,\n\t\t\t})\n\n\t\t\tcontinue\n\t\t}\n\n\t\tif _, err := url.Parse(nsStr); err == nil { //nolint:noinlineerr\n\t\t\tresolvers = append(resolvers, &dnstype.Resolver{\n\t\t\t\tAddr: nsStr,\n\t\t\t})\n\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Warn().Str(\"nameserver\", nsStr).Msg(\"invalid global nameserver, ignoring\")\n\t}\n\n\treturn resolvers\n}\n\n// splitResolvers returns a map of domain to DNS resolvers.\n// If a nameserver is a valid IP, it will be used as a regular resolver.\n// If a nameserver is a valid URL, it will be used as a DoH resolver.\n// If a nameserver is neither a valid URL nor a valid IP, it will be ignored.\nfunc (d *DNSConfig) splitResolvers() map[string][]*dnstype.Resolver {\n\troutes := make(map[string][]*dnstype.Resolver)\n\n\tfor domain, nameservers := range d.Nameservers.Split {\n\t\tvar resolvers []*dnstype.Resolver\n\n\t\tfor _, nsStr := range nameservers {\n\t\t\tif _, err := netip.ParseAddr(nsStr); err == nil { //nolint:noinlineerr\n\t\t\t\tresolvers = append(resolvers, &dnstype.Resolver{\n\t\t\t\t\tAddr: nsStr,\n\t\t\t\t})\n\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif _, err := url.Parse(nsStr); err == nil { //nolint:noinlineerr\n\t\t\t\tresolvers = append(resolvers, &dnstype.Resolver{\n\t\t\t\t\tAddr: nsStr,\n\t\t\t\t})\n\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlog.Warn().Str(\"nameserver\", nsStr).Str(\"domain\", domain).Msg(\"invalid split dns nameserver, ignoring\")\n\t\t}\n\n\t\troutes[domain] = resolvers\n\t}\n\n\treturn routes\n}\n\nfunc dnsToTailcfgDNS(dns DNSConfig) *tailcfg.DNSConfig {\n\tcfg := tailcfg.DNSConfig{}\n\n\tif dns.BaseDomain == \"\" && dns.MagicDNS {\n\t\tlog.Fatal().Msg(\"dns.base_domain must be set when using MagicDNS (dns.magic_dns)\")\n\t}\n\n\tcfg.Proxied = dns.MagicDNS\n\n\tcfg.ExtraRecords = dns.ExtraRecords\n\tif dns.OverrideLocalDNS {\n\t\tcfg.Resolvers = dns.globalResolvers()\n\t} else {\n\t\tcfg.FallbackResolvers = dns.globalResolvers()\n\t}\n\n\troutes := dns.splitResolvers()\n\n\tcfg.Routes = routes\n\tif dns.BaseDomain != \"\" {\n\t\tcfg.Domains = []string{dns.BaseDomain}\n\t}\n\n\tcfg.Domains = append(cfg.Domains, dns.SearchDomains...)\n\n\treturn &cfg\n}\n\n// warnBanner prints a highly visible warning banner to the log output.\n// It wraps the provided lines in an ASCII-art box with a \"Warning!\" header.\n// This is intended for critical configuration issues that users must not ignore.\nfunc warnBanner(lines []string) {\n\tvar b strings.Builder\n\n\tb.WriteString(\"\\n\")\n\tb.WriteString(\"################################################################\\n\")\n\tb.WriteString(\"###      __          __              _             _         ###\\n\")\n\tb.WriteString(\"###      \\\\ \\\\        / /             (_)           | |        ###\\n\")\n\tb.WriteString(\"###       \\\\ \\\\  /\\\\  / /_ _ _ __ _ __  _ _ __   __ _| |        ###\\n\")\n\tb.WriteString(\"###        \\\\ \\\\/  \\\\/ / _` | '__| '_ \\\\| | '_ \\\\ / _` | |        ###\\n\")\n\tb.WriteString(\"###         \\\\  /\\\\  / (_| | |  | | | | | | | | (_| |_|        ###\\n\")\n\tb.WriteString(\"###          \\\\/  \\\\/ \\\\__,_|_|  |_| |_|_|_| |_|\\\\__, (_)        ###\\n\")\n\tb.WriteString(\"###                                           __/ |          ###\\n\")\n\tb.WriteString(\"###                                          |___/           ###\\n\")\n\tb.WriteString(\"################################################################\\n\")\n\tb.WriteString(\"###                                                          ###\\n\")\n\n\tfor _, line := range lines {\n\t\tb.WriteString(fmt.Sprintf(\"###  %-54s  ###\\n\", line))\n\t}\n\n\tb.WriteString(\"###                                                          ###\\n\")\n\tb.WriteString(\"################################################################\")\n\n\tlog.Warn().Msg(b.String())\n}\n\nfunc prefixV4() (*netip.Prefix, bool, error) {\n\tprefixV4Str := viper.GetString(\"prefixes.v4\")\n\n\tif prefixV4Str == \"\" {\n\t\treturn nil, false, nil\n\t}\n\n\tprefixV4, err := netip.ParsePrefix(prefixV4Str)\n\tif err != nil {\n\t\treturn nil, false, fmt.Errorf(\"parsing IPv4 prefix from config: %w\", err)\n\t}\n\n\tbuilder := netipx.IPSetBuilder{}\n\tbuilder.AddPrefix(tsaddr.CGNATRange())\n\n\tipSet, _ := builder.IPSet()\n\n\treturn &prefixV4, !ipSet.ContainsPrefix(prefixV4), nil\n}\n\nfunc prefixV6() (*netip.Prefix, bool, error) {\n\tprefixV6Str := viper.GetString(\"prefixes.v6\")\n\n\tif prefixV6Str == \"\" {\n\t\treturn nil, false, nil\n\t}\n\n\tprefixV6, err := netip.ParsePrefix(prefixV6Str)\n\tif err != nil {\n\t\treturn nil, false, fmt.Errorf(\"parsing IPv6 prefix from config: %w\", err)\n\t}\n\n\tbuilder := netipx.IPSetBuilder{}\n\tbuilder.AddPrefix(tsaddr.TailscaleULARange())\n\tipSet, _ := builder.IPSet()\n\n\treturn &prefixV6, !ipSet.ContainsPrefix(prefixV6), nil\n}\n\n// LoadCLIConfig returns the needed configuration for the CLI client\n// of Headscale to connect to a Headscale server.\nfunc LoadCLIConfig() (*Config, error) {\n\tlogConfig := logConfig()\n\tzerolog.SetGlobalLevel(logConfig.Level)\n\n\treturn &Config{\n\t\tDisableUpdateCheck: viper.GetBool(\"disable_check_updates\"),\n\t\tUnixSocket:         viper.GetString(\"unix_socket\"),\n\t\tCLI: CLIConfig{\n\t\t\tAddress:  viper.GetString(\"cli.address\"),\n\t\t\tAPIKey:   viper.GetString(\"cli.api_key\"),\n\t\t\tTimeout:  viper.GetDuration(\"cli.timeout\"),\n\t\t\tInsecure: viper.GetBool(\"cli.insecure\"),\n\t\t},\n\t\tLog: logConfig,\n\t}, nil\n}\n\n// LoadServerConfig returns the full Headscale configuration to\n// host a Headscale server. This is called as part of `headscale serve`.\nfunc LoadServerConfig() (*Config, error) {\n\tif err := validateServerConfig(); err != nil { //nolint:noinlineerr\n\t\treturn nil, err\n\t}\n\n\tlogConfig := logConfig()\n\tzerolog.SetGlobalLevel(logConfig.Level)\n\n\tprefix4, v4NonStandard, err := prefixV4()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tprefix6, v6NonStandard, err := prefixV6()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif prefix4 == nil && prefix6 == nil {\n\t\treturn nil, ErrNoPrefixConfigured\n\t}\n\n\tif v4NonStandard || v6NonStandard {\n\t\twarnBanner([]string{\n\t\t\t\"You have overridden the default Headscale IP prefixes\",\n\t\t\t\"with a range outside of the standard CGNAT and/or ULA\",\n\t\t\t\"ranges. This is NOT a supported configuration.\",\n\t\t\t\"\",\n\t\t\t\"Using subsets of the default ranges (100.64.0.0/10 for\",\n\t\t\t\"IPv4, fd7a:115c:a1e0::/48 for IPv6) is fine. Using\",\n\t\t\t\"ranges outside of these will cause undefined behaviour\",\n\t\t\t\"as the Tailscale client is NOT designed to operate on\",\n\t\t\t\"any other ranges.\",\n\t\t\t\"\",\n\t\t\t\"Please revert your prefixes to subsets of the standard\",\n\t\t\t\"ranges as described in the example configuration.\",\n\t\t\t\"\",\n\t\t\t\"Any issue raised using a range outside of the\",\n\t\t\t\"supported range will be labelled as wontfix\",\n\t\t\t\"and closed.\",\n\t\t})\n\t}\n\n\tallocStr := viper.GetString(\"prefixes.allocation\")\n\n\tvar alloc IPAllocationStrategy\n\n\tswitch allocStr {\n\tcase string(IPAllocationStrategySequential):\n\t\talloc = IPAllocationStrategySequential\n\tcase string(IPAllocationStrategyRandom):\n\t\talloc = IPAllocationStrategyRandom\n\tdefault:\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"%w: %q, allowed options: %s, %s\",\n\t\t\tErrInvalidAllocationStrategy,\n\t\t\tallocStr,\n\t\t\tIPAllocationStrategySequential,\n\t\t\tIPAllocationStrategyRandom,\n\t\t)\n\t}\n\n\tdnsConfig, err := dns()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tderpConfig := derpConfig()\n\tlogTailConfig := logtailConfig()\n\trandomizeClientPort := viper.GetBool(\"randomize_client_port\")\n\n\toidcClientSecret := viper.GetString(\"oidc.client_secret\")\n\n\toidcClientSecretPath := viper.GetString(\"oidc.client_secret_path\")\n\tif oidcClientSecretPath != \"\" && oidcClientSecret != \"\" {\n\t\treturn nil, errOidcMutuallyExclusive\n\t}\n\n\tif oidcClientSecretPath != \"\" {\n\t\tsecretBytes, err := os.ReadFile(os.ExpandEnv(oidcClientSecretPath))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\toidcClientSecret = strings.TrimSpace(string(secretBytes))\n\t}\n\n\tserverURL := viper.GetString(\"server_url\")\n\n\t// BaseDomain cannot be the same as the server URL.\n\t// This is because Tailscale takes over the domain in BaseDomain,\n\t// causing the headscale server and DERP to be unreachable.\n\t// For Tailscale upstream, the following is true:\n\t// - DERP run on their own domains\n\t// - Control plane runs on login.tailscale.com/controlplane.tailscale.com\n\t// - MagicDNS (BaseDomain) for users is on a *.ts.net domain per tailnet (e.g. tail-scale.ts.net)\n\tif dnsConfig.BaseDomain != \"\" {\n\t\terr := isSafeServerURL(serverURL, dnsConfig.BaseDomain)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn &Config{\n\t\tServerURL:          serverURL,\n\t\tAddr:               viper.GetString(\"listen_addr\"),\n\t\tMetricsAddr:        viper.GetString(\"metrics_listen_addr\"),\n\t\tGRPCAddr:           viper.GetString(\"grpc_listen_addr\"),\n\t\tGRPCAllowInsecure:  viper.GetBool(\"grpc_allow_insecure\"),\n\t\tDisableUpdateCheck: false,\n\n\t\tPrefixV4:     prefix4,\n\t\tPrefixV6:     prefix6,\n\t\tIPAllocation: alloc,\n\n\t\tNoisePrivateKeyPath: util.AbsolutePathFromConfigPath(\n\t\t\tviper.GetString(\"noise.private_key_path\"),\n\t\t),\n\t\tBaseDomain: dnsConfig.BaseDomain,\n\n\t\tDERP: derpConfig,\n\n\t\tEphemeralNodeInactivityTimeout: viper.GetDuration(\n\t\t\t\"ephemeral_node_inactivity_timeout\",\n\t\t),\n\n\t\tDatabase: databaseConfig(),\n\n\t\tTLS: tlsConfig(),\n\n\t\tDNSConfig:        dnsConfig,\n\t\tTailcfgDNSConfig: dnsToTailcfgDNS(dnsConfig),\n\n\t\tACMEEmail: viper.GetString(\"acme_email\"),\n\t\tACMEURL:   viper.GetString(\"acme_url\"),\n\n\t\tUnixSocket:           viper.GetString(\"unix_socket\"),\n\t\tUnixSocketPermission: util.GetFileMode(\"unix_socket_permission\"),\n\n\t\tOIDC: OIDCConfig{\n\t\t\tOnlyStartIfOIDCIsAvailable: viper.GetBool(\n\t\t\t\t\"oidc.only_start_if_oidc_is_available\",\n\t\t\t),\n\t\t\tIssuer:                viper.GetString(\"oidc.issuer\"),\n\t\t\tClientID:              viper.GetString(\"oidc.client_id\"),\n\t\t\tClientSecret:          oidcClientSecret,\n\t\t\tScope:                 viper.GetStringSlice(\"oidc.scope\"),\n\t\t\tExtraParams:           viper.GetStringMapString(\"oidc.extra_params\"),\n\t\t\tAllowedDomains:        viper.GetStringSlice(\"oidc.allowed_domains\"),\n\t\t\tAllowedUsers:          viper.GetStringSlice(\"oidc.allowed_users\"),\n\t\t\tAllowedGroups:         viper.GetStringSlice(\"oidc.allowed_groups\"),\n\t\t\tEmailVerifiedRequired: viper.GetBool(\"oidc.email_verified_required\"),\n\t\t\tExpiry: func() time.Duration {\n\t\t\t\t// if set to 0, we assume no expiry\n\t\t\t\tif value := viper.GetString(\"oidc.expiry\"); value == \"0\" {\n\t\t\t\t\treturn maxDuration\n\t\t\t\t} else {\n\t\t\t\t\texpiry, err := model.ParseDuration(value)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Warn().Msg(\"failed to parse oidc.expiry, defaulting back to 180 days\")\n\n\t\t\t\t\t\treturn defaultOIDCExpiryTime\n\t\t\t\t\t}\n\n\t\t\t\t\treturn time.Duration(expiry)\n\t\t\t\t}\n\t\t\t}(),\n\t\t\tUseExpiryFromToken: viper.GetBool(\"oidc.use_expiry_from_token\"),\n\t\t\tPKCE: PKCEConfig{\n\t\t\t\tEnabled: viper.GetBool(\"oidc.pkce.enabled\"),\n\t\t\t\tMethod:  viper.GetString(\"oidc.pkce.method\"),\n\t\t\t},\n\t\t},\n\n\t\tLogTail:             logTailConfig,\n\t\tRandomizeClientPort: randomizeClientPort,\n\t\tTaildrop: TaildropConfig{\n\t\t\tEnabled: viper.GetBool(\"taildrop.enabled\"),\n\t\t},\n\n\t\tPolicy: policyConfig(),\n\n\t\tCLI: CLIConfig{\n\t\t\tAddress:  viper.GetString(\"cli.address\"),\n\t\t\tAPIKey:   viper.GetString(\"cli.api_key\"),\n\t\t\tTimeout:  viper.GetDuration(\"cli.timeout\"),\n\t\t\tInsecure: viper.GetBool(\"cli.insecure\"),\n\t\t},\n\n\t\tLog: logConfig,\n\n\t\tTuning: Tuning{\n\t\t\tNotifierSendTimeout: viper.GetDuration(\"tuning.notifier_send_timeout\"),\n\t\t\tBatchChangeDelay:    viper.GetDuration(\"tuning.batch_change_delay\"),\n\t\t\tNodeMapSessionBufferedChanSize: viper.GetInt(\n\t\t\t\t\"tuning.node_mapsession_buffered_chan_size\",\n\t\t\t),\n\t\t\tBatcherWorkers: func() int {\n\t\t\t\tif workers := viper.GetInt(\"tuning.batcher_workers\"); workers > 0 {\n\t\t\t\t\treturn workers\n\t\t\t\t}\n\n\t\t\t\treturn DefaultBatcherWorkers()\n\t\t\t}(),\n\t\t\tRegisterCacheCleanup:    viper.GetDuration(\"tuning.register_cache_cleanup\"),\n\t\t\tRegisterCacheExpiration: viper.GetDuration(\"tuning.register_cache_expiration\"),\n\t\t\tNodeStoreBatchSize:      viper.GetInt(\"tuning.node_store_batch_size\"),\n\t\t\tNodeStoreBatchTimeout:   viper.GetDuration(\"tuning.node_store_batch_timeout\"),\n\t\t},\n\t}, nil\n}\n\n// BaseDomain cannot be a suffix of the server URL.\n// This is because Tailscale takes over the domain in BaseDomain,\n// causing the headscale server and DERP to be unreachable.\n// For Tailscale upstream, the following is true:\n// - DERP run on their own domains.\n// - Control plane runs on login.tailscale.com/controlplane.tailscale.com.\n// - MagicDNS (BaseDomain) for users is on a *.ts.net domain per tailnet (e.g. tail-scale.ts.net).\nfunc isSafeServerURL(serverURL, baseDomain string) error {\n\tserver, err := url.Parse(serverURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif server.Hostname() == baseDomain {\n\t\treturn errServerURLSame\n\t}\n\n\tserverDomainParts := strings.Split(server.Host, \".\")\n\tbaseDomainParts := strings.Split(baseDomain, \".\")\n\n\tif len(serverDomainParts) <= len(baseDomainParts) {\n\t\treturn nil\n\t}\n\n\ts := len(serverDomainParts)\n\n\tb := len(baseDomainParts)\n\tfor i := range baseDomainParts {\n\t\tif serverDomainParts[s-i-1] != baseDomainParts[b-i-1] {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn errServerURLSuffix\n}\n\ntype deprecator struct {\n\twarns  set.Set[string]\n\tfatals set.Set[string]\n}\n\n// warnWithAlias will register an alias between the newKey and the oldKey,\n// and log a deprecation warning if the oldKey is set.\n//\n//nolint:unused\nfunc (d *deprecator) warnWithAlias(newKey, oldKey string) {\n\t// NOTE: RegisterAlias is called with NEW KEY -> OLD KEY\n\tviper.RegisterAlias(newKey, oldKey)\n\n\tif viper.IsSet(oldKey) {\n\t\td.warns.Add(\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"The %q configuration key is deprecated. Please use %q instead. %q will be removed in the future.\",\n\t\t\t\toldKey,\n\t\t\t\tnewKey,\n\t\t\t\toldKey,\n\t\t\t),\n\t\t)\n\t}\n}\n\n// fatal deprecates and adds an entry to the fatal list of options if the oldKey is set.\nfunc (d *deprecator) fatal(oldKey string) {\n\tif viper.IsSet(oldKey) {\n\t\td.fatals.Add(\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"The %q configuration key has been removed. Please see the changelog for more details.\",\n\t\t\t\toldKey,\n\t\t\t),\n\t\t)\n\t}\n}\n\n// fatalIfNewKeyIsNotUsed deprecates and adds an entry to the fatal list of options if the oldKey is set and the new key is _not_ set.\n// If the new key is set, a warning is emitted instead.\nfunc (d *deprecator) fatalIfNewKeyIsNotUsed(newKey, oldKey string) {\n\tif viper.IsSet(oldKey) && !viper.IsSet(newKey) {\n\t\td.fatals.Add(\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"The %q configuration key is deprecated. Please use %q instead. %q has been removed.\",\n\t\t\t\toldKey,\n\t\t\t\tnewKey,\n\t\t\t\toldKey,\n\t\t\t),\n\t\t)\n\t} else if viper.IsSet(oldKey) {\n\t\td.warns.Add(fmt.Sprintf(\"The %q configuration key is deprecated. Please use %q instead. %q has been removed.\", oldKey, newKey, oldKey))\n\t}\n}\n\n// warn deprecates and adds an option to log a warning if the oldKey is set.\n//\n//nolint:unused\nfunc (d *deprecator) warnNoAlias(newKey, oldKey string) {\n\tif viper.IsSet(oldKey) {\n\t\td.warns.Add(\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"The %q configuration key is deprecated. Please use %q instead. %q has been removed.\",\n\t\t\t\toldKey,\n\t\t\t\tnewKey,\n\t\t\t\toldKey,\n\t\t\t),\n\t\t)\n\t}\n}\n\n// warn deprecates and adds an entry to the warn list of options if the oldKey is set.\n//\n//nolint:unused\nfunc (d *deprecator) warn(oldKey string) {\n\tif viper.IsSet(oldKey) {\n\t\td.warns.Add(\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"The %q configuration key is deprecated and has been removed. Please see the changelog for more details.\",\n\t\t\t\toldKey,\n\t\t\t),\n\t\t)\n\t}\n}\n\nfunc (d *deprecator) String() string {\n\tvar b strings.Builder\n\n\tfor _, w := range d.warns.Slice() {\n\t\tfmt.Fprintf(&b, \"WARN: %s\\n\", w)\n\t}\n\n\tfor _, f := range d.fatals.Slice() {\n\t\tfmt.Fprintf(&b, \"FATAL: %s\\n\", f)\n\t}\n\n\treturn b.String()\n}\n\nfunc (d *deprecator) Log() {\n\tif len(d.fatals) > 0 {\n\t\tlog.Fatal().Msg(\"\\n\" + d.String())\n\t} else if len(d.warns) > 0 {\n\t\tlog.Warn().Msg(\"\\n\" + d.String())\n\t}\n}\n"
  },
  {
    "path": "hscontrol/types/config_test.go",
    "content": "package types\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/google/go-cmp/cmp/cmpopts\"\n\t\"github.com/spf13/viper\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/dnstype\"\n)\n\nfunc TestReadConfig(t *testing.T) {\n\ttests := []struct {\n\t\tname       string\n\t\tconfigPath string\n\t\tsetup      func(*testing.T) (any, error)\n\t\twant       any\n\t\twantErr    string\n\t}{\n\t\t{\n\t\t\tname:       \"unmarshal-dns-full-config\",\n\t\t\tconfigPath: \"testdata/dns_full.yaml\",\n\t\t\tsetup: func(t *testing.T) (any, error) { //nolint:thelper\n\t\t\t\tdns, err := dns()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\treturn dns, nil\n\t\t\t},\n\t\t\twant: DNSConfig{\n\t\t\t\tMagicDNS:         true,\n\t\t\t\tBaseDomain:       \"example.com\",\n\t\t\t\tOverrideLocalDNS: false,\n\t\t\t\tNameservers: Nameservers{\n\t\t\t\t\tGlobal: []string{\n\t\t\t\t\t\t\"1.1.1.1\",\n\t\t\t\t\t\t\"1.0.0.1\",\n\t\t\t\t\t\t\"2606:4700:4700::1111\",\n\t\t\t\t\t\t\"2606:4700:4700::1001\",\n\t\t\t\t\t\t\"https://dns.nextdns.io/abc123\",\n\t\t\t\t\t},\n\t\t\t\t\tSplit: map[string][]string{\n\t\t\t\t\t\t\"darp.headscale.net\": {\"1.1.1.1\", \"8.8.8.8\"},\n\t\t\t\t\t\t\"foo.bar.com\":        {\"1.1.1.1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tExtraRecords: []tailcfg.DNSRecord{\n\t\t\t\t\t{Name: \"grafana.myvpn.example.com\", Type: \"A\", Value: \"100.64.0.3\"},\n\t\t\t\t\t{Name: \"prometheus.myvpn.example.com\", Type: \"A\", Value: \"100.64.0.4\"},\n\t\t\t\t},\n\t\t\t\tSearchDomains: []string{\"test.com\", \"bar.com\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:       \"dns-to-tailcfg.DNSConfig\",\n\t\t\tconfigPath: \"testdata/dns_full.yaml\",\n\t\t\tsetup: func(t *testing.T) (any, error) { //nolint:thelper\n\t\t\t\tdns, err := dns()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\treturn dnsToTailcfgDNS(dns), nil\n\t\t\t},\n\t\t\twant: &tailcfg.DNSConfig{\n\t\t\t\tProxied: true,\n\t\t\t\tDomains: []string{\"example.com\", \"test.com\", \"bar.com\"},\n\t\t\t\tFallbackResolvers: []*dnstype.Resolver{\n\t\t\t\t\t{Addr: \"1.1.1.1\"},\n\t\t\t\t\t{Addr: \"1.0.0.1\"},\n\t\t\t\t\t{Addr: \"2606:4700:4700::1111\"},\n\t\t\t\t\t{Addr: \"2606:4700:4700::1001\"},\n\t\t\t\t\t{Addr: \"https://dns.nextdns.io/abc123\"},\n\t\t\t\t},\n\t\t\t\tRoutes: map[string][]*dnstype.Resolver{\n\t\t\t\t\t\"darp.headscale.net\": {{Addr: \"1.1.1.1\"}, {Addr: \"8.8.8.8\"}},\n\t\t\t\t\t\"foo.bar.com\":        {{Addr: \"1.1.1.1\"}},\n\t\t\t\t},\n\t\t\t\tExtraRecords: []tailcfg.DNSRecord{\n\t\t\t\t\t{Name: \"grafana.myvpn.example.com\", Type: \"A\", Value: \"100.64.0.3\"},\n\t\t\t\t\t{Name: \"prometheus.myvpn.example.com\", Type: \"A\", Value: \"100.64.0.4\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:       \"unmarshal-dns-full-no-magic\",\n\t\t\tconfigPath: \"testdata/dns_full_no_magic.yaml\",\n\t\t\tsetup: func(t *testing.T) (any, error) { //nolint:thelper\n\t\t\t\tdns, err := dns()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\treturn dns, nil\n\t\t\t},\n\t\t\twant: DNSConfig{\n\t\t\t\tMagicDNS:         false,\n\t\t\t\tBaseDomain:       \"example.com\",\n\t\t\t\tOverrideLocalDNS: false,\n\t\t\t\tNameservers: Nameservers{\n\t\t\t\t\tGlobal: []string{\n\t\t\t\t\t\t\"1.1.1.1\",\n\t\t\t\t\t\t\"1.0.0.1\",\n\t\t\t\t\t\t\"2606:4700:4700::1111\",\n\t\t\t\t\t\t\"2606:4700:4700::1001\",\n\t\t\t\t\t\t\"https://dns.nextdns.io/abc123\",\n\t\t\t\t\t},\n\t\t\t\t\tSplit: map[string][]string{\n\t\t\t\t\t\t\"darp.headscale.net\": {\"1.1.1.1\", \"8.8.8.8\"},\n\t\t\t\t\t\t\"foo.bar.com\":        {\"1.1.1.1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tExtraRecords: []tailcfg.DNSRecord{\n\t\t\t\t\t{Name: \"grafana.myvpn.example.com\", Type: \"A\", Value: \"100.64.0.3\"},\n\t\t\t\t\t{Name: \"prometheus.myvpn.example.com\", Type: \"A\", Value: \"100.64.0.4\"},\n\t\t\t\t},\n\t\t\t\tSearchDomains: []string{\"test.com\", \"bar.com\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:       \"dns-to-tailcfg.DNSConfig\",\n\t\t\tconfigPath: \"testdata/dns_full_no_magic.yaml\",\n\t\t\tsetup: func(t *testing.T) (any, error) { //nolint:thelper\n\t\t\t\tdns, err := dns()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\treturn dnsToTailcfgDNS(dns), nil\n\t\t\t},\n\t\t\twant: &tailcfg.DNSConfig{\n\t\t\t\tProxied: false,\n\t\t\t\tDomains: []string{\"example.com\", \"test.com\", \"bar.com\"},\n\t\t\t\tFallbackResolvers: []*dnstype.Resolver{\n\t\t\t\t\t{Addr: \"1.1.1.1\"},\n\t\t\t\t\t{Addr: \"1.0.0.1\"},\n\t\t\t\t\t{Addr: \"2606:4700:4700::1111\"},\n\t\t\t\t\t{Addr: \"2606:4700:4700::1001\"},\n\t\t\t\t\t{Addr: \"https://dns.nextdns.io/abc123\"},\n\t\t\t\t},\n\t\t\t\tRoutes: map[string][]*dnstype.Resolver{\n\t\t\t\t\t\"darp.headscale.net\": {{Addr: \"1.1.1.1\"}, {Addr: \"8.8.8.8\"}},\n\t\t\t\t\t\"foo.bar.com\":        {{Addr: \"1.1.1.1\"}},\n\t\t\t\t},\n\t\t\t\tExtraRecords: []tailcfg.DNSRecord{\n\t\t\t\t\t{Name: \"grafana.myvpn.example.com\", Type: \"A\", Value: \"100.64.0.3\"},\n\t\t\t\t\t{Name: \"prometheus.myvpn.example.com\", Type: \"A\", Value: \"100.64.0.4\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:       \"base-domain-in-server-url-err\",\n\t\t\tconfigPath: \"testdata/base-domain-in-server-url.yaml\",\n\t\t\tsetup: func(t *testing.T) (any, error) { //nolint:thelper\n\t\t\t\treturn LoadServerConfig()\n\t\t\t},\n\t\t\twant:    nil,\n\t\t\twantErr: errServerURLSuffix.Error(),\n\t\t},\n\t\t{\n\t\t\tname:       \"base-domain-not-in-server-url\",\n\t\t\tconfigPath: \"testdata/base-domain-not-in-server-url.yaml\",\n\t\t\tsetup: func(t *testing.T) (any, error) { //nolint:thelper\n\t\t\t\tcfg, err := LoadServerConfig()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\treturn map[string]string{\n\t\t\t\t\t\"server_url\":  cfg.ServerURL,\n\t\t\t\t\t\"base_domain\": cfg.BaseDomain,\n\t\t\t\t}, err\n\t\t\t},\n\t\t\twant: map[string]string{\n\t\t\t\t\"server_url\":  \"https://derp.no\",\n\t\t\t\t\"base_domain\": \"clients.derp.no\",\n\t\t\t},\n\t\t\twantErr: \"\",\n\t\t},\n\t\t{\n\t\t\tname:       \"dns-override-true-errors\",\n\t\t\tconfigPath: \"testdata/dns-override-true-error.yaml\",\n\t\t\tsetup: func(t *testing.T) (any, error) { //nolint:thelper\n\t\t\t\treturn LoadServerConfig()\n\t\t\t},\n\t\t\twantErr: \"Fatal config error: dns.nameservers.global must be set when dns.override_local_dns is true\",\n\t\t},\n\t\t{\n\t\t\tname:       \"dns-override-true\",\n\t\t\tconfigPath: \"testdata/dns-override-true.yaml\",\n\t\t\tsetup: func(t *testing.T) (any, error) { //nolint:thelper\n\t\t\t\t_, err := LoadServerConfig()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\tdns, err := dns()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\treturn dnsToTailcfgDNS(dns), nil\n\t\t\t},\n\t\t\twant: &tailcfg.DNSConfig{\n\t\t\t\tProxied: true,\n\t\t\t\tDomains: []string{\"derp2.no\"},\n\t\t\t\tRoutes:  map[string][]*dnstype.Resolver{},\n\t\t\t\tResolvers: []*dnstype.Resolver{\n\t\t\t\t\t{Addr: \"1.1.1.1\"},\n\t\t\t\t\t{Addr: \"1.0.0.1\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:       \"policy-path-is-loaded\",\n\t\t\tconfigPath: \"testdata/policy-path-is-loaded.yaml\",\n\t\t\tsetup: func(t *testing.T) (any, error) { //nolint:thelper // inline test closure\n\t\t\t\tcfg, err := LoadServerConfig()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\treturn map[string]string{\n\t\t\t\t\t\"policy.mode\": string(cfg.Policy.Mode),\n\t\t\t\t\t\"policy.path\": cfg.Policy.Path,\n\t\t\t\t}, err\n\t\t\t},\n\t\t\twant: map[string]string{\n\t\t\t\t\"policy.mode\": \"file\",\n\t\t\t\t\"policy.path\": \"/etc/policy.hujson\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tviper.Reset()\n\n\t\t\terr := LoadConfig(tt.configPath, true)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tconf, err := tt.setup(t)\n\n\t\t\tif tt.wantErr != \"\" {\n\t\t\t\tassert.Equal(t, tt.wantErr, err.Error())\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\n\t\t\tif diff := cmp.Diff(tt.want, conf); diff != \"\" {\n\t\t\t\tt.Errorf(\"ReadConfig() mismatch (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestReadConfigFromEnv(t *testing.T) {\n\ttests := []struct {\n\t\tname      string\n\t\tconfigEnv map[string]string\n\t\tsetup     func(*testing.T) (any, error)\n\t\twant      any\n\t}{\n\t\t{\n\t\t\tname: \"test-random-base-settings-with-env\",\n\t\t\tconfigEnv: map[string]string{\n\t\t\t\t\"HEADSCALE_LOG_LEVEL\":                       \"trace\",\n\t\t\t\t\"HEADSCALE_DATABASE_SQLITE_WRITE_AHEAD_LOG\": \"false\",\n\t\t\t\t\"HEADSCALE_PREFIXES_V4\":                     \"100.64.0.0/10\",\n\t\t\t},\n\t\t\tsetup: func(t *testing.T) (any, error) { //nolint:thelper // inline test closure\n\t\t\t\tt.Logf(\"all settings: %#v\", viper.AllSettings())\n\n\t\t\t\tassert.Equal(t, \"trace\", viper.GetString(\"log.level\"))\n\t\t\t\tassert.Equal(t, \"100.64.0.0/10\", viper.GetString(\"prefixes.v4\"))\n\t\t\t\tassert.False(t, viper.GetBool(\"database.sqlite.write_ahead_log\"))\n\n\t\t\t\treturn nil, nil //nolint:nilnil // test setup returns nil to indicate no expected value\n\t\t\t},\n\t\t\twant: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"unmarshal-dns-full-config\",\n\t\t\tconfigEnv: map[string]string{\n\t\t\t\t\"HEADSCALE_DNS_MAGIC_DNS\":          \"true\",\n\t\t\t\t\"HEADSCALE_DNS_BASE_DOMAIN\":        \"example.com\",\n\t\t\t\t\"HEADSCALE_DNS_OVERRIDE_LOCAL_DNS\": \"false\",\n\t\t\t\t\"HEADSCALE_DNS_NAMESERVERS_GLOBAL\": `1.1.1.1 8.8.8.8`,\n\t\t\t\t\"HEADSCALE_DNS_SEARCH_DOMAINS\":     \"test.com bar.com\",\n\n\t\t\t\t// TODO(kradalby): Figure out how to pass these as env vars\n\t\t\t\t// \"HEADSCALE_DNS_NAMESERVERS_SPLIT\":  `{foo.bar.com: [\"1.1.1.1\"]}`,\n\t\t\t\t// \"HEADSCALE_DNS_EXTRA_RECORDS\":      `[{ name: \"prometheus.myvpn.example.com\", type: \"A\", value: \"100.64.0.4\" }]`,\n\t\t\t},\n\t\t\tsetup: func(t *testing.T) (any, error) { //nolint:thelper // inline test closure\n\t\t\t\tt.Logf(\"all settings: %#v\", viper.AllSettings())\n\n\t\t\t\tdns, err := dns()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\treturn dns, nil\n\t\t\t},\n\t\t\twant: DNSConfig{\n\t\t\t\tMagicDNS:         true,\n\t\t\t\tBaseDomain:       \"example.com\",\n\t\t\t\tOverrideLocalDNS: false,\n\t\t\t\tNameservers: Nameservers{\n\t\t\t\t\tGlobal: []string{\"1.1.1.1\", \"8.8.8.8\"},\n\t\t\t\t\tSplit:  map[string][]string{\n\t\t\t\t\t\t// \"foo.bar.com\": {\"1.1.1.1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t// ExtraRecords: []tailcfg.DNSRecord{\n\t\t\t\t// \t{Name: \"prometheus.myvpn.example.com\", Type: \"A\", Value: \"100.64.0.4\"},\n\t\t\t\t// },\n\t\t\t\tSearchDomains: []string{\"test.com\", \"bar.com\"},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tfor k, v := range tt.configEnv {\n\t\t\t\tt.Setenv(k, v)\n\t\t\t}\n\n\t\t\tviper.Reset()\n\n\t\t\terr := LoadConfig(\"testdata/minimal.yaml\", true)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tconf, err := tt.setup(t)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tif diff := cmp.Diff(tt.want, conf, cmpopts.EquateEmpty()); diff != \"\" {\n\t\t\t\tt.Errorf(\"ReadConfig() mismatch (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestTLSConfigValidation(t *testing.T) {\n\ttmpDir := t.TempDir()\n\n\tvar err error\n\n\tconfigYaml := []byte(`---\ntls_letsencrypt_hostname: example.com\ntls_letsencrypt_challenge_type: \"\"\ntls_cert_path: abc.pem\nnoise:\n  private_key_path: noise_private.key`)\n\n\t// Populate a custom config file\n\tconfigFilePath := filepath.Join(tmpDir, \"config.yaml\")\n\n\terr = os.WriteFile(configFilePath, configYaml, 0o600)\n\tif err != nil {\n\t\tt.Fatalf(\"Couldn't write file %s\", configFilePath)\n\t}\n\n\t// Check configuration validation errors (1)\n\terr = LoadConfig(tmpDir, false)\n\trequire.NoError(t, err)\n\n\terr = validateServerConfig()\n\trequire.Error(t, err)\n\tassert.Contains(\n\t\tt,\n\t\terr.Error(),\n\t\t\"Fatal config error: set either tls_letsencrypt_hostname or tls_cert_path/tls_key_path, not both\",\n\t)\n\tassert.Contains(\n\t\tt,\n\t\terr.Error(),\n\t\t\"Fatal config error: the only supported values for tls_letsencrypt_challenge_type are\",\n\t)\n\tassert.Contains(\n\t\tt,\n\t\terr.Error(),\n\t\t\"Fatal config error: server_url must start with https:// or http://\",\n\t)\n\n\t// Check configuration validation errors (2)\n\tconfigYaml = []byte(`---\nnoise:\n  private_key_path: noise_private.key\nserver_url: http://127.0.0.1:8080\ntls_letsencrypt_hostname: example.com\ntls_letsencrypt_challenge_type: TLS-ALPN-01\n`)\n\n\terr = os.WriteFile(configFilePath, configYaml, 0o600)\n\tif err != nil {\n\t\tt.Fatalf(\"Couldn't write file %s\", configFilePath)\n\t}\n\n\terr = LoadConfig(tmpDir, false)\n\trequire.NoError(t, err)\n}\n\n// OK\n// server_url: headscale.com, base: clients.headscale.com\n// server_url: headscale.com, base: headscale.net\n//\n// NOT OK\n// server_url: server.headscale.com, base: headscale.com.\nfunc TestSafeServerURL(t *testing.T) {\n\ttests := []struct {\n\t\tserverURL, baseDomain,\n\t\twantErr string\n\t}{\n\t\t{\n\t\t\tserverURL:  \"https://example.com\",\n\t\t\tbaseDomain: \"example.org\",\n\t\t},\n\t\t{\n\t\t\tserverURL:  \"https://headscale.com\",\n\t\t\tbaseDomain: \"headscale.com\",\n\t\t\twantErr:    errServerURLSame.Error(),\n\t\t},\n\t\t{\n\t\t\tserverURL:  \"https://headscale.com\",\n\t\t\tbaseDomain: \"clients.headscale.com\",\n\t\t},\n\t\t{\n\t\t\tserverURL:  \"https://headscale.com\",\n\t\t\tbaseDomain: \"clients.subdomain.headscale.com\",\n\t\t},\n\t\t{\n\t\t\tserverURL:  \"https://headscale.kristoffer.com\",\n\t\t\tbaseDomain: \"mybase\",\n\t\t},\n\t\t{\n\t\t\tserverURL:  \"https://server.headscale.com\",\n\t\t\tbaseDomain: \"headscale.com\",\n\t\t\twantErr:    errServerURLSuffix.Error(),\n\t\t},\n\t\t{\n\t\t\tserverURL:  \"https://server.subdomain.headscale.com\",\n\t\t\tbaseDomain: \"headscale.com\",\n\t\t\twantErr:    errServerURLSuffix.Error(),\n\t\t},\n\t\t{\n\t\t\tserverURL: \"http://foo\\x00\",\n\t\t\twantErr:   `parse \"http://foo\\x00\": net/url: invalid control character in URL`,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\ttestName := fmt.Sprintf(\"server=%s domain=%s\", tt.serverURL, tt.baseDomain)\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\terr := isSafeServerURL(tt.serverURL, tt.baseDomain)\n\t\t\tif tt.wantErr != \"\" {\n\t\t\t\tassert.EqualError(t, err, tt.wantErr)\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "hscontrol/types/const.go",
    "content": "package types\n\nimport \"time\"\n\nconst (\n\tHTTPTimeout            = 30 * time.Second\n\tHTTPShutdownTimeout    = 3 * time.Second\n\tTLSALPN01ChallengeType = \"TLS-ALPN-01\"\n\tHTTP01ChallengeType    = \"HTTP-01\"\n\n\tJSONLogFormat = \"json\"\n\tTextLogFormat = \"text\"\n\n\tKeepAliveInterval = 60 * time.Second\n\tMaxHostnameLength = 255\n)\n"
  },
  {
    "path": "hscontrol/types/main_test.go",
    "content": "package types\n\nimport (\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"testing\"\n)\n\n// TestMain ensures the working directory is set to the package source directory\n// so that relative testdata/ paths resolve correctly when the test binary is\n// executed from an arbitrary location (e.g., via \"go tool stress\").\nfunc TestMain(m *testing.M) {\n\t_, filename, _, ok := runtime.Caller(0)\n\tif !ok {\n\t\tpanic(\"could not determine test source directory\")\n\t}\n\n\terr := os.Chdir(filepath.Dir(filename))\n\tif err != nil {\n\t\tpanic(\"could not chdir to test source directory: \" + err.Error())\n\t}\n\n\tos.Exit(m.Run())\n}\n"
  },
  {
    "path": "hscontrol/types/node.go",
    "content": "package types\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net/netip\"\n\t\"regexp\"\n\t\"slices\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\t\"github.com/juanfont/headscale/hscontrol/policy/matcher\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/juanfont/headscale/hscontrol/util/zlog/zf\"\n\t\"github.com/rs/zerolog\"\n\t\"github.com/rs/zerolog/log\"\n\t\"go4.org/netipx\"\n\t\"google.golang.org/protobuf/types/known/timestamppb\"\n\t\"tailscale.com/net/tsaddr\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/key\"\n\t\"tailscale.com/types/views\"\n)\n\nvar (\n\tErrNodeAddressesInvalid = errors.New(\"parsing node addresses\")\n\tErrHostnameTooLong      = errors.New(\"hostname too long, cannot accept more than 255 ASCII chars\")\n\tErrNodeHasNoGivenName   = errors.New(\"node has no given name\")\n\tErrNodeUserHasNoName    = errors.New(\"node user has no name\")\n\tErrCannotRemoveAllTags  = errors.New(\"cannot remove all tags from node\")\n\tErrInvalidNodeView      = errors.New(\"cannot convert invalid NodeView to tailcfg.Node\")\n\n\tinvalidDNSRegex = regexp.MustCompile(\"[^a-z0-9-.]+\")\n)\n\n// RouteFunc is a function that takes a node ID and returns a list of\n// netip.Prefixes representing the primary routes for that node.\ntype RouteFunc func(id NodeID) []netip.Prefix\n\ntype (\n\tNodeID  uint64\n\tNodeIDs []NodeID\n)\n\nfunc (n NodeIDs) Len() int           { return len(n) }\nfunc (n NodeIDs) Less(i, j int) bool { return n[i] < n[j] }\nfunc (n NodeIDs) Swap(i, j int)      { n[i], n[j] = n[j], n[i] }\n\nfunc (id NodeID) StableID() tailcfg.StableNodeID {\n\treturn tailcfg.StableNodeID(strconv.FormatUint(uint64(id), util.Base10))\n}\n\nfunc (id NodeID) NodeID() tailcfg.NodeID {\n\treturn tailcfg.NodeID(id) //nolint:gosec // NodeID is bounded\n}\n\nfunc (id NodeID) Uint64() uint64 {\n\treturn uint64(id)\n}\n\nfunc (id NodeID) String() string {\n\treturn strconv.FormatUint(id.Uint64(), util.Base10)\n}\n\nfunc ParseNodeID(s string) (NodeID, error) {\n\tid, err := strconv.ParseUint(s, util.Base10, 64)\n\treturn NodeID(id), err\n}\n\nfunc MustParseNodeID(s string) NodeID {\n\tid, err := ParseNodeID(s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn id\n}\n\n// Node is a Headscale client.\ntype Node struct {\n\tID NodeID `gorm:\"primary_key\"`\n\n\tMachineKey key.MachinePublic `gorm:\"serializer:text\"`\n\tNodeKey    key.NodePublic    `gorm:\"serializer:text\"`\n\tDiscoKey   key.DiscoPublic   `gorm:\"serializer:text\"`\n\n\tEndpoints []netip.AddrPort `gorm:\"serializer:json\"`\n\n\tHostinfo *tailcfg.Hostinfo `gorm:\"column:host_info;serializer:json\"`\n\n\tIPv4 *netip.Addr `gorm:\"column:ipv4;serializer:text\"`\n\tIPv6 *netip.Addr `gorm:\"column:ipv6;serializer:text\"`\n\n\t// Hostname represents the name given by the Tailscale\n\t// client during registration\n\tHostname string\n\n\t// Givenname represents either:\n\t// a DNS normalized version of Hostname\n\t// a valid name set by the User\n\t//\n\t// GivenName is the name used in all DNS related\n\t// parts of headscale.\n\tGivenName string `gorm:\"type:varchar(63);unique_index\"`\n\n\t// UserID identifies the owning user for user-owned nodes.\n\t// Nil for tagged nodes, which are owned by their tags.\n\tUserID *uint\n\tUser   *User `gorm:\"constraint:OnDelete:CASCADE;\"`\n\n\tRegisterMethod string\n\n\t// Tags is the definitive owner for tagged nodes.\n\t// When non-empty, the node is \"tagged\" and tags define its identity.\n\t// Empty for user-owned nodes.\n\t// Tags cannot be removed once set (one-way transition).\n\tTags []string `gorm:\"column:tags;serializer:json\"`\n\n\t// When a node has been created with a PreAuthKey, we need to\n\t// prevent the preauthkey from being deleted before the node.\n\t// The preauthkey can define \"tags\" of the node so we need it\n\t// around.\n\tAuthKeyID *uint64 `sql:\"DEFAULT:NULL\"`\n\tAuthKey   *PreAuthKey\n\n\tExpiry *time.Time\n\n\t// LastSeen is when the node was last in contact with\n\t// headscale. It is best effort and not persisted.\n\tLastSeen *time.Time `gorm:\"column:last_seen\"`\n\n\t// ApprovedRoutes is a list of routes that the node is allowed to announce\n\t// as a subnet router. They are not necessarily the routes that the node\n\t// announces at the moment.\n\t// See [Node.Hostinfo]\n\tApprovedRoutes []netip.Prefix `gorm:\"column:approved_routes;serializer:json\"`\n\n\tCreatedAt time.Time\n\tUpdatedAt time.Time\n\tDeletedAt *time.Time\n\n\tIsOnline *bool `gorm:\"-\"`\n}\n\ntype Nodes []*Node\n\nfunc (ns Nodes) ViewSlice() views.Slice[NodeView] {\n\tvs := make([]NodeView, len(ns))\n\tfor i, n := range ns {\n\t\tvs[i] = n.View()\n\t}\n\n\treturn views.SliceOf(vs)\n}\n\n// GivenNameHasBeenChanged returns whether the `givenName` can be automatically changed based on the `Hostname` of the node.\nfunc (node *Node) GivenNameHasBeenChanged() bool {\n\t// Strip invalid DNS characters for givenName comparison\n\tnormalised := strings.ToLower(node.Hostname)\n\tnormalised = invalidDNSRegex.ReplaceAllString(normalised, \"\")\n\n\treturn node.GivenName == normalised\n}\n\n// IsExpired returns whether the node registration has expired.\nfunc (node *Node) IsExpired() bool {\n\t// If Expiry is not set, the client has not indicated that\n\t// it wants an expiry time, it is therefore considered\n\t// to mean \"not expired\"\n\tif node.Expiry == nil || node.Expiry.IsZero() {\n\t\treturn false\n\t}\n\n\treturn time.Since(*node.Expiry) > 0\n}\n\n// IsEphemeral returns if the node is registered as an Ephemeral node.\n// https://tailscale.com/kb/1111/ephemeral-nodes/\nfunc (node *Node) IsEphemeral() bool {\n\treturn node.AuthKey != nil && node.AuthKey.Ephemeral\n}\n\nfunc (node *Node) IPs() []netip.Addr {\n\tvar ret []netip.Addr\n\n\tif node.IPv4 != nil {\n\t\tret = append(ret, *node.IPv4)\n\t}\n\n\tif node.IPv6 != nil {\n\t\tret = append(ret, *node.IPv6)\n\t}\n\n\treturn ret\n}\n\n// HasIP reports if a node has a given IP address.\nfunc (node *Node) HasIP(i netip.Addr) bool {\n\tfor _, ip := range node.IPs() {\n\t\tif ip.Compare(i) == 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n// IsTagged reports if a device is tagged and therefore should not be treated\n// as a user-owned device.\n// When a node has tags, the tags define its identity (not the user).\nfunc (node *Node) IsTagged() bool {\n\treturn len(node.Tags) > 0\n}\n\n// IsUserOwned returns true if node is owned by a user (not tagged).\n// Tagged nodes may have a UserID for \"created by\" tracking, but the tag is the owner.\nfunc (node *Node) IsUserOwned() bool {\n\treturn !node.IsTagged()\n}\n\n// HasTag reports if a node has a given tag.\nfunc (node *Node) HasTag(tag string) bool {\n\treturn slices.Contains(node.Tags, tag)\n}\n\n// TypedUserID returns the UserID as a typed UserID type.\n// Returns 0 if UserID is nil.\nfunc (node *Node) TypedUserID() UserID {\n\tif node.UserID == nil {\n\t\treturn 0\n\t}\n\n\treturn UserID(*node.UserID)\n}\n\nfunc (node *Node) RequestTags() []string {\n\tif node.Hostinfo == nil {\n\t\treturn []string{}\n\t}\n\n\treturn node.Hostinfo.RequestTags\n}\n\nfunc (node *Node) Prefixes() []netip.Prefix {\n\tips := node.IPs()\n\tif len(ips) == 0 {\n\t\treturn nil\n\t}\n\n\taddrs := make([]netip.Prefix, 0, len(ips))\n\n\tfor _, nodeAddress := range ips {\n\t\tip := netip.PrefixFrom(nodeAddress, nodeAddress.BitLen())\n\t\taddrs = append(addrs, ip)\n\t}\n\n\treturn addrs\n}\n\n// ExitRoutes returns a list of both exit routes if the\n// node has any exit routes enabled.\n// If none are enabled, it will return nil.\nfunc (node *Node) ExitRoutes() []netip.Prefix {\n\tvar routes []netip.Prefix\n\n\tfor _, route := range node.AnnouncedRoutes() {\n\t\tif tsaddr.IsExitRoute(route) && slices.Contains(node.ApprovedRoutes, route) {\n\t\t\troutes = append(routes, route)\n\t\t}\n\t}\n\n\treturn routes\n}\n\nfunc (node *Node) IsExitNode() bool {\n\treturn len(node.ExitRoutes()) > 0\n}\n\nfunc (node *Node) IPsAsString() []string {\n\tips := node.IPs()\n\tif len(ips) == 0 {\n\t\treturn nil\n\t}\n\n\tret := make([]string, 0, len(ips))\n\n\tfor _, ip := range ips {\n\t\tret = append(ret, ip.String())\n\t}\n\n\treturn ret\n}\n\nfunc (node *Node) InIPSet(set *netipx.IPSet) bool {\n\treturn slices.ContainsFunc(node.IPs(), set.Contains)\n}\n\n// AppendToIPSet adds the individual ips in NodeAddresses to a\n// given netipx.IPSetBuilder.\nfunc (node *Node) AppendToIPSet(build *netipx.IPSetBuilder) {\n\tfor _, ip := range node.IPs() {\n\t\tbuild.Add(ip)\n\t}\n}\n\nfunc (node *Node) CanAccess(matchers []matcher.Match, node2 *Node) bool {\n\tsrc := node.IPs()\n\tallowedIPs := node2.IPs()\n\n\tfor _, matcher := range matchers {\n\t\tif !matcher.SrcsContainsIPs(src...) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif matcher.DestsContainsIP(allowedIPs...) {\n\t\t\treturn true\n\t\t}\n\n\t\t// Check if the node has access to routes that might be part of a\n\t\t// smaller subnet that is served from node2 as a subnet router.\n\t\tif matcher.DestsOverlapsPrefixes(node2.SubnetRoutes()...) {\n\t\t\treturn true\n\t\t}\n\n\t\t// If the dst is \"the internet\" and node2 is an exit node, allow access.\n\t\tif matcher.DestsIsTheInternet() && node2.IsExitNode() {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc (node *Node) CanAccessRoute(matchers []matcher.Match, route netip.Prefix) bool {\n\tsrc := node.IPs()\n\n\tfor _, matcher := range matchers {\n\t\tif matcher.SrcsContainsIPs(src...) && matcher.DestsOverlapsPrefixes(route) {\n\t\t\treturn true\n\t\t}\n\n\t\tif matcher.SrcsOverlapsPrefixes(route) && matcher.DestsContainsIP(src...) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc (nodes Nodes) FilterByIP(ip netip.Addr) Nodes {\n\tvar found Nodes\n\n\tfor _, node := range nodes {\n\t\tif node.IPv4 != nil && ip == *node.IPv4 {\n\t\t\tfound = append(found, node)\n\t\t\tcontinue\n\t\t}\n\n\t\tif node.IPv6 != nil && ip == *node.IPv6 {\n\t\t\tfound = append(found, node)\n\t\t}\n\t}\n\n\treturn found\n}\n\nfunc (nodes Nodes) ContainsNodeKey(nodeKey key.NodePublic) bool {\n\tfor _, node := range nodes {\n\t\tif node.NodeKey == nodeKey {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc (node *Node) Proto() *v1.Node {\n\tnodeProto := &v1.Node{\n\t\tId:         uint64(node.ID),\n\t\tMachineKey: node.MachineKey.String(),\n\n\t\tNodeKey:  node.NodeKey.String(),\n\t\tDiscoKey: node.DiscoKey.String(),\n\n\t\t// TODO(kradalby): replace list with v4, v6 field?\n\t\tIpAddresses: node.IPsAsString(),\n\t\tName:        node.Hostname,\n\t\tGivenName:   node.GivenName,\n\t\tUser:        nil, // Will be set below based on node type\n\t\tTags:        node.Tags,\n\t\tOnline:      node.IsOnline != nil && *node.IsOnline,\n\n\t\t// Only ApprovedRoutes and AvailableRoutes is set here. SubnetRoutes has\n\t\t// to be populated manually with PrimaryRoute, to ensure it includes the\n\t\t// routes that are actively served from the node.\n\t\tApprovedRoutes:  util.PrefixesToString(node.ApprovedRoutes),\n\t\tAvailableRoutes: util.PrefixesToString(node.AnnouncedRoutes()),\n\n\t\tRegisterMethod: node.RegisterMethodToV1Enum(),\n\n\t\tCreatedAt: timestamppb.New(node.CreatedAt),\n\t}\n\n\t// Set User field based on node ownership\n\t// Note: User will be set to TaggedDevices in the gRPC layer (grpcv1.go)\n\t// for proper MapResponse formatting\n\tif node.User != nil {\n\t\tnodeProto.User = node.User.Proto()\n\t}\n\n\tif node.AuthKey != nil {\n\t\tnodeProto.PreAuthKey = node.AuthKey.Proto()\n\t}\n\n\tif node.LastSeen != nil {\n\t\tnodeProto.LastSeen = timestamppb.New(*node.LastSeen)\n\t}\n\n\tif node.Expiry != nil {\n\t\tnodeProto.Expiry = timestamppb.New(*node.Expiry)\n\t}\n\n\treturn nodeProto\n}\n\nfunc (node *Node) GetFQDN(baseDomain string) (string, error) {\n\tif node.GivenName == \"\" {\n\t\treturn \"\", fmt.Errorf(\"creating valid FQDN: %w\", ErrNodeHasNoGivenName)\n\t}\n\n\thostname := node.GivenName\n\n\tif baseDomain != \"\" {\n\t\thostname = fmt.Sprintf(\n\t\t\t\"%s.%s.\",\n\t\t\tnode.GivenName,\n\t\t\tbaseDomain,\n\t\t)\n\t}\n\n\tif len(hostname) > MaxHostnameLength {\n\t\treturn \"\", fmt.Errorf(\n\t\t\t\"creating valid FQDN (%s): %w\",\n\t\t\thostname,\n\t\t\tErrHostnameTooLong,\n\t\t)\n\t}\n\n\treturn hostname, nil\n}\n\n// AnnouncedRoutes returns the list of routes that the node announces.\n// It should be used instead of checking Hostinfo.RoutableIPs directly.\nfunc (node *Node) AnnouncedRoutes() []netip.Prefix {\n\tif node.Hostinfo == nil {\n\t\treturn nil\n\t}\n\n\treturn node.Hostinfo.RoutableIPs\n}\n\n// SubnetRoutes returns the list of routes (excluding exit routes) that the node\n// announces and are approved.\n//\n// IMPORTANT: This method is used for internal data structures and should NOT be\n// used for the gRPC Proto conversion. For Proto, SubnetRoutes must be populated\n// manually with PrimaryRoutes to ensure it includes only routes actively served\n// by the node. See the comment in Proto() method and the implementation in\n// grpcv1.go/nodesToProto.\nfunc (node *Node) SubnetRoutes() []netip.Prefix {\n\tvar routes []netip.Prefix\n\n\tfor _, route := range node.AnnouncedRoutes() {\n\t\tif tsaddr.IsExitRoute(route) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif slices.Contains(node.ApprovedRoutes, route) {\n\t\t\troutes = append(routes, route)\n\t\t}\n\t}\n\n\treturn routes\n}\n\n// IsSubnetRouter reports if the node has any subnet routes.\nfunc (node *Node) IsSubnetRouter() bool {\n\treturn len(node.SubnetRoutes()) > 0\n}\n\n// AllApprovedRoutes returns the combination of SubnetRoutes and ExitRoutes.\nfunc (node *Node) AllApprovedRoutes() []netip.Prefix {\n\treturn append(node.SubnetRoutes(), node.ExitRoutes()...)\n}\n\nfunc (node *Node) String() string {\n\treturn node.Hostname\n}\n\n// MarshalZerologObject implements zerolog.LogObjectMarshaler for safe logging.\n// This method is used with zerolog's EmbedObject() for flat field embedding\n// or Object() for nested logging when multiple nodes are logged.\nfunc (node *Node) MarshalZerologObject(e *zerolog.Event) {\n\tif node == nil {\n\t\treturn\n\t}\n\n\te.Uint64(zf.NodeID, node.ID.Uint64())\n\te.Str(zf.NodeName, node.Hostname)\n\te.Str(zf.MachineKey, node.MachineKey.ShortString())\n\te.Str(zf.NodeKey, node.NodeKey.ShortString())\n\te.Bool(zf.NodeIsTagged, node.IsTagged())\n\te.Bool(zf.NodeExpired, node.IsExpired())\n\n\tif node.IsOnline != nil {\n\t\te.Bool(zf.NodeOnline, *node.IsOnline)\n\t}\n\n\tif len(node.Tags) > 0 {\n\t\te.Strs(zf.NodeTags, node.Tags)\n\t}\n\n\tif node.User != nil {\n\t\te.Str(zf.UserName, node.User.Username())\n\t} else if node.UserID != nil {\n\t\te.Uint(zf.UserID, *node.UserID)\n\t}\n}\n\n// PeerChangeFromMapRequest takes a MapRequest and compares it to the node\n// to produce a PeerChange struct that can be used to updated the node and\n// inform peers about smaller changes to the node.\n// When a field is added to this function, remember to also add it to:\n// - node.ApplyPeerChange\n// - logTracePeerChange in poll.go.\nfunc (node *Node) PeerChangeFromMapRequest(req tailcfg.MapRequest) tailcfg.PeerChange {\n\tret := tailcfg.PeerChange{\n\t\tNodeID: tailcfg.NodeID(node.ID), //nolint:gosec // NodeID is bounded\n\t}\n\n\tif node.NodeKey.String() != req.NodeKey.String() {\n\t\tret.Key = &req.NodeKey\n\t}\n\n\tif node.DiscoKey.String() != req.DiscoKey.String() {\n\t\tret.DiscoKey = &req.DiscoKey\n\t}\n\n\tif node.Hostinfo != nil &&\n\t\tnode.Hostinfo.NetInfo != nil &&\n\t\treq.Hostinfo != nil &&\n\t\treq.Hostinfo.NetInfo != nil &&\n\t\tnode.Hostinfo.NetInfo.PreferredDERP != req.Hostinfo.NetInfo.PreferredDERP {\n\t\tret.DERPRegion = req.Hostinfo.NetInfo.PreferredDERP\n\t}\n\n\tif req.Hostinfo != nil && req.Hostinfo.NetInfo != nil {\n\t\t// If there is no stored Hostinfo or NetInfo, use\n\t\t// the new PreferredDERP.\n\t\tif node.Hostinfo == nil {\n\t\t\tret.DERPRegion = req.Hostinfo.NetInfo.PreferredDERP\n\t\t} else if node.Hostinfo.NetInfo == nil {\n\t\t\tret.DERPRegion = req.Hostinfo.NetInfo.PreferredDERP\n\t\t} else if node.Hostinfo.NetInfo.PreferredDERP != req.Hostinfo.NetInfo.PreferredDERP {\n\t\t\t// If there is a PreferredDERP check if it has changed.\n\t\t\tret.DERPRegion = req.Hostinfo.NetInfo.PreferredDERP\n\t\t}\n\t}\n\n\t// Compare endpoints using order-independent comparison\n\tif EndpointsChanged(node.Endpoints, req.Endpoints) {\n\t\tret.Endpoints = req.Endpoints\n\t}\n\n\tnow := time.Now()\n\tret.LastSeen = &now\n\n\treturn ret\n}\n\n// EndpointsChanged compares two endpoint slices and returns true if they differ.\n// The comparison is order-independent - endpoints are sorted before comparison.\nfunc EndpointsChanged(oldEndpoints, newEndpoints []netip.AddrPort) bool {\n\tif len(oldEndpoints) != len(newEndpoints) {\n\t\treturn true\n\t}\n\n\tif len(oldEndpoints) == 0 {\n\t\treturn false\n\t}\n\n\t// Make copies to avoid modifying the original slices\n\toldCopy := slices.Clone(oldEndpoints)\n\tnewCopy := slices.Clone(newEndpoints)\n\n\t// Sort both slices to enable order-independent comparison\n\tslices.SortFunc(oldCopy, func(a, b netip.AddrPort) int {\n\t\treturn a.Compare(b)\n\t})\n\tslices.SortFunc(newCopy, func(a, b netip.AddrPort) int {\n\t\treturn a.Compare(b)\n\t})\n\n\treturn !slices.Equal(oldCopy, newCopy)\n}\n\nfunc (node *Node) RegisterMethodToV1Enum() v1.RegisterMethod {\n\tswitch node.RegisterMethod {\n\tcase \"authkey\":\n\t\treturn v1.RegisterMethod_REGISTER_METHOD_AUTH_KEY\n\tcase \"oidc\":\n\t\treturn v1.RegisterMethod_REGISTER_METHOD_OIDC\n\tcase \"cli\":\n\t\treturn v1.RegisterMethod_REGISTER_METHOD_CLI\n\tdefault:\n\t\treturn v1.RegisterMethod_REGISTER_METHOD_UNSPECIFIED\n\t}\n}\n\n// ApplyHostnameFromHostInfo takes a Hostinfo struct and updates the node.\nfunc (node *Node) ApplyHostnameFromHostInfo(hostInfo *tailcfg.Hostinfo) {\n\tif hostInfo == nil {\n\t\treturn\n\t}\n\n\tnewHostname := strings.ToLower(hostInfo.Hostname)\n\n\terr := util.ValidateHostname(newHostname)\n\tif err != nil {\n\t\tlog.Warn().\n\t\t\tStr(\"node.id\", node.ID.String()).\n\t\t\tStr(\"current_hostname\", node.Hostname).\n\t\t\tStr(\"rejected_hostname\", hostInfo.Hostname).\n\t\t\tErr(err).\n\t\t\tMsg(\"Rejecting invalid hostname update from hostinfo\")\n\n\t\treturn\n\t}\n\n\tif node.Hostname != newHostname {\n\t\tlog.Trace().\n\t\t\tStr(\"node.id\", node.ID.String()).\n\t\t\tStr(\"old_hostname\", node.Hostname).\n\t\t\tStr(\"new_hostname\", newHostname).\n\t\t\tStr(\"old_given_name\", node.GivenName).\n\t\t\tBool(\"given_name_changed\", node.GivenNameHasBeenChanged()).\n\t\t\tMsg(\"Updating hostname from hostinfo\")\n\n\t\tif node.GivenNameHasBeenChanged() {\n\t\t\t// Strip invalid DNS characters for givenName display\n\t\t\tgivenName := strings.ToLower(newHostname)\n\t\t\tgivenName = invalidDNSRegex.ReplaceAllString(givenName, \"\")\n\t\t\tnode.GivenName = givenName\n\t\t}\n\n\t\tnode.Hostname = newHostname\n\n\t\tlog.Trace().\n\t\t\tStr(\"node.id\", node.ID.String()).\n\t\t\tStr(\"new_hostname\", node.Hostname).\n\t\t\tStr(\"new_given_name\", node.GivenName).\n\t\t\tMsg(\"Hostname updated\")\n\t}\n}\n\n// ApplyPeerChange takes a PeerChange struct and updates the node.\nfunc (node *Node) ApplyPeerChange(change *tailcfg.PeerChange) {\n\tif change.Key != nil {\n\t\tnode.NodeKey = *change.Key\n\t}\n\n\tif change.DiscoKey != nil {\n\t\tnode.DiscoKey = *change.DiscoKey\n\t}\n\n\tif change.Online != nil {\n\t\tnode.IsOnline = change.Online\n\t}\n\n\tif change.Endpoints != nil {\n\t\tnode.Endpoints = change.Endpoints\n\t}\n\n\t// This might technically not be useful as we replace\n\t// the whole hostinfo blob when it has changed.\n\tif change.DERPRegion != 0 {\n\t\tif node.Hostinfo == nil {\n\t\t\tnode.Hostinfo = &tailcfg.Hostinfo{\n\t\t\t\tNetInfo: &tailcfg.NetInfo{\n\t\t\t\t\tPreferredDERP: change.DERPRegion,\n\t\t\t\t},\n\t\t\t}\n\t\t} else if node.Hostinfo.NetInfo == nil {\n\t\t\tnode.Hostinfo.NetInfo = &tailcfg.NetInfo{\n\t\t\t\tPreferredDERP: change.DERPRegion,\n\t\t\t}\n\t\t} else {\n\t\t\tnode.Hostinfo.NetInfo.PreferredDERP = change.DERPRegion\n\t\t}\n\t}\n\n\tnode.LastSeen = change.LastSeen\n}\n\nfunc (nodes Nodes) String() string {\n\ttemp := make([]string, len(nodes))\n\n\tfor index, node := range nodes {\n\t\ttemp[index] = node.Hostname\n\t}\n\n\treturn fmt.Sprintf(\"[ %s ](%d)\", strings.Join(temp, \", \"), len(temp))\n}\n\nfunc (nodes Nodes) IDMap() map[NodeID]*Node {\n\tret := map[NodeID]*Node{}\n\n\tfor _, node := range nodes {\n\t\tret[node.ID] = node\n\t}\n\n\treturn ret\n}\n\nfunc (nodes Nodes) DebugString() string {\n\tvar sb strings.Builder\n\tsb.WriteString(\"Nodes:\\n\")\n\n\tfor _, node := range nodes {\n\t\tsb.WriteString(node.DebugString())\n\t\tsb.WriteString(\"\\n\")\n\t}\n\n\treturn sb.String()\n}\n\nfunc (node *Node) DebugString() string {\n\tvar sb strings.Builder\n\tfmt.Fprintf(&sb, \"%s(%s):\\n\", node.Hostname, node.ID)\n\n\t// Show ownership status\n\tif node.IsTagged() {\n\t\tfmt.Fprintf(&sb, \"\\tTagged: %v\\n\", node.Tags)\n\n\t\tif node.User != nil {\n\t\t\tfmt.Fprintf(&sb, \"\\tCreated by: %s (%d, %q)\\n\", node.User.Display(), node.User.ID, node.User.Username())\n\t\t}\n\t} else if node.User != nil {\n\t\tfmt.Fprintf(&sb, \"\\tUser-owned: %s (%d, %q)\\n\", node.User.Display(), node.User.ID, node.User.Username())\n\t} else {\n\t\tfmt.Fprintf(&sb, \"\\tOrphaned: no user or tags\\n\")\n\t}\n\n\tfmt.Fprintf(&sb, \"\\tIPs: %v\\n\", node.IPs())\n\tfmt.Fprintf(&sb, \"\\tApprovedRoutes: %v\\n\", node.ApprovedRoutes)\n\tfmt.Fprintf(&sb, \"\\tAnnouncedRoutes: %v\\n\", node.AnnouncedRoutes())\n\tfmt.Fprintf(&sb, \"\\tSubnetRoutes: %v\\n\", node.SubnetRoutes())\n\tfmt.Fprintf(&sb, \"\\tExitRoutes: %v\\n\", node.ExitRoutes())\n\tsb.WriteString(\"\\n\")\n\n\treturn sb.String()\n}\n\n// MarshalZerologObject implements zerolog.LogObjectMarshaler for NodeView.\n// This delegates to the underlying Node's implementation.\nfunc (nv NodeView) MarshalZerologObject(e *zerolog.Event) {\n\tif !nv.Valid() {\n\t\treturn\n\t}\n\n\tnv.ж.MarshalZerologObject(e)\n}\n\n// Owner returns the owner for display purposes.\n// For tagged nodes, returns TaggedDevices. For user-owned nodes, returns the user.\nfunc (nv NodeView) Owner() UserView {\n\tif nv.IsTagged() {\n\t\treturn TaggedDevices.View()\n\t}\n\n\treturn nv.User()\n}\n\nfunc (nv NodeView) IPs() []netip.Addr {\n\tif !nv.Valid() {\n\t\treturn nil\n\t}\n\n\treturn nv.ж.IPs()\n}\n\nfunc (nv NodeView) InIPSet(set *netipx.IPSet) bool {\n\tif !nv.Valid() {\n\t\treturn false\n\t}\n\n\treturn nv.ж.InIPSet(set)\n}\n\nfunc (nv NodeView) CanAccess(matchers []matcher.Match, node2 NodeView) bool {\n\tif !nv.Valid() || !node2.Valid() {\n\t\treturn false\n\t}\n\n\treturn nv.ж.CanAccess(matchers, node2.ж)\n}\n\nfunc (nv NodeView) CanAccessRoute(matchers []matcher.Match, route netip.Prefix) bool {\n\tif !nv.Valid() {\n\t\treturn false\n\t}\n\n\treturn nv.ж.CanAccessRoute(matchers, route)\n}\n\nfunc (nv NodeView) AnnouncedRoutes() []netip.Prefix {\n\tif !nv.Valid() {\n\t\treturn nil\n\t}\n\n\treturn nv.ж.AnnouncedRoutes()\n}\n\nfunc (nv NodeView) SubnetRoutes() []netip.Prefix {\n\tif !nv.Valid() {\n\t\treturn nil\n\t}\n\n\treturn nv.ж.SubnetRoutes()\n}\n\nfunc (nv NodeView) IsSubnetRouter() bool {\n\tif !nv.Valid() {\n\t\treturn false\n\t}\n\n\treturn nv.ж.IsSubnetRouter()\n}\n\nfunc (nv NodeView) AllApprovedRoutes() []netip.Prefix {\n\tif !nv.Valid() {\n\t\treturn nil\n\t}\n\n\treturn nv.ж.AllApprovedRoutes()\n}\n\nfunc (nv NodeView) AppendToIPSet(build *netipx.IPSetBuilder) {\n\tif !nv.Valid() {\n\t\treturn\n\t}\n\n\tnv.ж.AppendToIPSet(build)\n}\n\nfunc (nv NodeView) RequestTagsSlice() views.Slice[string] {\n\tif !nv.Valid() || !nv.Hostinfo().Valid() {\n\t\treturn views.Slice[string]{}\n\t}\n\n\treturn nv.Hostinfo().RequestTags()\n}\n\n// IsTagged reports if a device is tagged\n// and therefore should not be treated as a\n// user owned device.\n// Currently, this function only handles tags set\n// via CLI (\"forced tags\" and preauthkeys).\nfunc (nv NodeView) IsTagged() bool {\n\tif !nv.Valid() {\n\t\treturn false\n\t}\n\n\treturn nv.ж.IsTagged()\n}\n\n// IsExpired returns whether the node registration has expired.\nfunc (nv NodeView) IsExpired() bool {\n\tif !nv.Valid() {\n\t\treturn true\n\t}\n\n\treturn nv.ж.IsExpired()\n}\n\n// IsEphemeral returns if the node is registered as an Ephemeral node.\n// https://tailscale.com/kb/1111/ephemeral-nodes/\nfunc (nv NodeView) IsEphemeral() bool {\n\tif !nv.Valid() {\n\t\treturn false\n\t}\n\n\treturn nv.ж.IsEphemeral()\n}\n\n// PeerChangeFromMapRequest takes a MapRequest and compares it to the node\n// to produce a PeerChange struct that can be used to updated the node and\n// inform peers about smaller changes to the node.\nfunc (nv NodeView) PeerChangeFromMapRequest(req tailcfg.MapRequest) tailcfg.PeerChange {\n\tif !nv.Valid() {\n\t\treturn tailcfg.PeerChange{}\n\t}\n\n\treturn nv.ж.PeerChangeFromMapRequest(req)\n}\n\n// GetFQDN returns the fully qualified domain name for the node.\nfunc (nv NodeView) GetFQDN(baseDomain string) (string, error) {\n\tif !nv.Valid() {\n\t\treturn \"\", fmt.Errorf(\"creating valid FQDN: %w\", ErrInvalidNodeView)\n\t}\n\n\treturn nv.ж.GetFQDN(baseDomain)\n}\n\n// ExitRoutes returns a list of both exit routes if the\n// node has any exit routes enabled.\n// If none are enabled, it will return nil.\nfunc (nv NodeView) ExitRoutes() []netip.Prefix {\n\tif !nv.Valid() {\n\t\treturn nil\n\t}\n\n\treturn nv.ж.ExitRoutes()\n}\n\nfunc (nv NodeView) IsExitNode() bool {\n\tif !nv.Valid() {\n\t\treturn false\n\t}\n\n\treturn nv.ж.IsExitNode()\n}\n\n// RequestTags returns the ACL tags that the node is requesting.\nfunc (nv NodeView) RequestTags() []string {\n\tif !nv.Valid() || !nv.Hostinfo().Valid() {\n\t\treturn []string{}\n\t}\n\n\treturn nv.Hostinfo().RequestTags().AsSlice()\n}\n\n// Proto converts the NodeView to a protobuf representation.\nfunc (nv NodeView) Proto() *v1.Node {\n\tif !nv.Valid() {\n\t\treturn nil\n\t}\n\n\treturn nv.ж.Proto()\n}\n\n// HasIP reports if a node has a given IP address.\nfunc (nv NodeView) HasIP(i netip.Addr) bool {\n\tif !nv.Valid() {\n\t\treturn false\n\t}\n\n\treturn nv.ж.HasIP(i)\n}\n\n// HasTag reports if a node has a given tag.\nfunc (nv NodeView) HasTag(tag string) bool {\n\tif !nv.Valid() {\n\t\treturn false\n\t}\n\n\treturn nv.ж.HasTag(tag)\n}\n\n// TypedUserID returns the UserID as a typed UserID type.\n// Returns 0 if UserID is nil or node is invalid.\nfunc (nv NodeView) TypedUserID() UserID {\n\tif !nv.Valid() {\n\t\treturn 0\n\t}\n\n\treturn nv.ж.TypedUserID()\n}\n\n// TailscaleUserID returns the user ID to use in Tailscale protocol.\n// Tagged nodes always return TaggedDevices.ID, user-owned nodes return their actual UserID.\nfunc (nv NodeView) TailscaleUserID() tailcfg.UserID {\n\tif !nv.Valid() {\n\t\treturn 0\n\t}\n\n\tif nv.IsTagged() {\n\t\t//nolint:gosec // G115: TaggedDevices.ID is a constant that fits in int64\n\t\treturn tailcfg.UserID(int64(TaggedDevices.ID))\n\t}\n\n\t//nolint:gosec // G115: UserID values are within int64 range\n\treturn tailcfg.UserID(int64(nv.UserID().Get()))\n}\n\n// Prefixes returns the node IPs as netip.Prefix.\nfunc (nv NodeView) Prefixes() []netip.Prefix {\n\tif !nv.Valid() {\n\t\treturn nil\n\t}\n\n\treturn nv.ж.Prefixes()\n}\n\n// IPsAsString returns the node IPs as strings.\nfunc (nv NodeView) IPsAsString() []string {\n\tif !nv.Valid() {\n\t\treturn nil\n\t}\n\n\treturn nv.ж.IPsAsString()\n}\n\n// HasNetworkChanges checks if the node has network-related changes.\n// Returns true if IPs, announced routes, or approved routes changed.\n// This is primarily used for policy cache invalidation.\nfunc (nv NodeView) HasNetworkChanges(other NodeView) bool {\n\tif !slices.Equal(nv.IPs(), other.IPs()) {\n\t\treturn true\n\t}\n\n\tif !slices.Equal(nv.AnnouncedRoutes(), other.AnnouncedRoutes()) {\n\t\treturn true\n\t}\n\n\tif !slices.Equal(nv.SubnetRoutes(), other.SubnetRoutes()) {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\n// HasPolicyChange reports whether the node has changes that affect policy evaluation.\nfunc (nv NodeView) HasPolicyChange(other NodeView) bool {\n\tif nv.UserID() != other.UserID() {\n\t\treturn true\n\t}\n\n\tif !views.SliceEqual(nv.Tags(), other.Tags()) {\n\t\treturn true\n\t}\n\n\tif !slices.Equal(nv.IPs(), other.IPs()) {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\n// TailNodes converts a slice of NodeViews into Tailscale tailcfg.Nodes.\nfunc TailNodes(\n\tnodes views.Slice[NodeView],\n\tcapVer tailcfg.CapabilityVersion,\n\tprimaryRouteFunc RouteFunc,\n\tcfg *Config,\n) ([]*tailcfg.Node, error) {\n\ttNodes := make([]*tailcfg.Node, 0, nodes.Len())\n\n\tfor _, node := range nodes.All() {\n\t\ttNode, err := node.TailNode(capVer, primaryRouteFunc, cfg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\ttNodes = append(tNodes, tNode)\n\t}\n\n\treturn tNodes, nil\n}\n\n// TailNode converts a NodeView into a Tailscale tailcfg.Node.\nfunc (nv NodeView) TailNode(\n\tcapVer tailcfg.CapabilityVersion,\n\tprimaryRouteFunc RouteFunc,\n\tcfg *Config,\n) (*tailcfg.Node, error) {\n\tif !nv.Valid() {\n\t\treturn nil, ErrInvalidNodeView\n\t}\n\n\thostname, err := nv.GetFQDN(cfg.BaseDomain)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar derp int\n\t// TODO(kradalby): legacyDERP was removed in tailscale/tailscale@2fc4455e6dd9ab7f879d4e2f7cffc2be81f14077\n\t// and should be removed after 111 is the minimum capver.\n\tlegacyDERP := \"127.3.3.40:0\" // Zero means disconnected or unknown.\n\tif nv.Hostinfo().Valid() && nv.Hostinfo().NetInfo().Valid() {\n\t\tlegacyDERP = fmt.Sprintf(\"127.3.3.40:%d\", nv.Hostinfo().NetInfo().PreferredDERP())\n\t\tderp = nv.Hostinfo().NetInfo().PreferredDERP()\n\t}\n\n\tvar keyExpiry time.Time\n\tif nv.Expiry().Valid() {\n\t\tkeyExpiry = nv.Expiry().Get()\n\t}\n\n\tprimaryRoutes := primaryRouteFunc(nv.ID())\n\tallowedIPs := slices.Concat(nv.Prefixes(), primaryRoutes, nv.ExitRoutes())\n\tslices.SortFunc(allowedIPs, netip.Prefix.Compare)\n\n\tcapMap := tailcfg.NodeCapMap{\n\t\ttailcfg.CapabilityAdmin: []tailcfg.RawMessage{},\n\t\ttailcfg.CapabilitySSH:   []tailcfg.RawMessage{},\n\t}\n\tif cfg.RandomizeClientPort {\n\t\tcapMap[tailcfg.NodeAttrRandomizeClientPort] = []tailcfg.RawMessage{}\n\t}\n\n\tif cfg.Taildrop.Enabled {\n\t\tcapMap[tailcfg.CapabilityFileSharing] = []tailcfg.RawMessage{}\n\t}\n\n\ttNode := tailcfg.Node{\n\t\t//nolint:gosec // G115: NodeID values are within int64 range\n\t\tID:       tailcfg.NodeID(nv.ID()),\n\t\tStableID: nv.ID().StableID(),\n\t\tName:     hostname,\n\t\tCap:      capVer,\n\t\tCapMap:   capMap,\n\n\t\tUser: nv.TailscaleUserID(),\n\n\t\tKey:       nv.NodeKey(),\n\t\tKeyExpiry: keyExpiry.UTC(),\n\n\t\tMachine:          nv.MachineKey(),\n\t\tDiscoKey:         nv.DiscoKey(),\n\t\tAddresses:        nv.Prefixes(),\n\t\tPrimaryRoutes:    primaryRoutes,\n\t\tAllowedIPs:       allowedIPs,\n\t\tEndpoints:        nv.Endpoints().AsSlice(),\n\t\tHomeDERP:         derp,\n\t\tLegacyDERPString: legacyDERP,\n\t\tHostinfo:         nv.Hostinfo(),\n\t\tCreated:          nv.CreatedAt().UTC(),\n\n\t\tOnline: nv.IsOnline().Clone(),\n\n\t\tTags: nv.Tags().AsSlice(),\n\n\t\tMachineAuthorized: !nv.IsExpired(),\n\t\tExpired:           nv.IsExpired(),\n\t}\n\n\t// Set LastSeen only for offline nodes to avoid confusing Tailscale clients\n\t// during rapid reconnection cycles. Online nodes should not have LastSeen set\n\t// as this can make clients interpret them as \"not online\" despite Online=true.\n\tif nv.LastSeen().Valid() && nv.IsOnline().Valid() && !nv.IsOnline().Get() {\n\t\tlastSeen := nv.LastSeen().Get()\n\t\ttNode.LastSeen = &lastSeen\n\t}\n\n\treturn &tNode, nil\n}\n"
  },
  {
    "path": "hscontrol/types/node_benchmark_test.go",
    "content": "package types\n\nimport (\n\t\"fmt\"\n\t\"net/netip\"\n\t\"testing\"\n\n\t\"github.com/juanfont/headscale/hscontrol/policy/matcher\"\n\t\"tailscale.com/tailcfg\"\n)\n\nfunc BenchmarkNodeViewCanAccess(b *testing.B) {\n\taddr := func(ip string) *netip.Addr {\n\t\tparsed := netip.MustParseAddr(ip)\n\t\treturn &parsed\n\t}\n\n\trules := []tailcfg.FilterRule{\n\t\t{\n\t\t\tSrcIPs: []string{\"100.64.0.1/32\"},\n\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t{\n\t\t\t\t\tIP:    \"100.64.0.2/32\",\n\t\t\t\t\tPorts: tailcfg.PortRangeAny,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tmatchers := matcher.MatchesFromFilterRules(rules)\n\n\tderpLatency := make(map[string]float64, 256)\n\tfor i := range 128 {\n\t\tderpLatency[fmt.Sprintf(\"%d-v4\", i)] = float64(i) / 10\n\t\tderpLatency[fmt.Sprintf(\"%d-v6\", i)] = float64(i) / 10\n\t}\n\n\tsrc := Node{\n\t\tIPv4: addr(\"100.64.0.1\"),\n\t}\n\tdst := Node{\n\t\tIPv4: addr(\"100.64.0.2\"),\n\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\tNetInfo: &tailcfg.NetInfo{\n\t\t\t\tDERPLatency: derpLatency,\n\t\t\t},\n\t\t},\n\t}\n\n\tsrcView := src.View()\n\tdstView := dst.View()\n\n\tif !srcView.CanAccess(matchers, dstView) {\n\t\tb.Fatal(\"benchmark setup error: expected source to access destination\")\n\t}\n\n\tb.Run(\"pointer\", func(b *testing.B) {\n\t\tb.ReportAllocs()\n\t\tb.ResetTimer()\n\n\t\tfor b.Loop() {\n\t\t\tsrcView.CanAccess(matchers, dstView)\n\t\t}\n\t})\n\n\tb.Run(\"struct clone\", func(b *testing.B) {\n\t\tb.ReportAllocs()\n\t\tb.ResetTimer()\n\n\t\tfor b.Loop() {\n\t\t\tsrc.CanAccess(matchers, dstView.AsStruct())\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "hscontrol/types/node_tags_test.go",
    "content": "package types\n\nimport (\n\t\"testing\"\n\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"gorm.io/gorm\"\n)\n\n// TestNodeIsTagged tests the IsTagged() method for determining if a node is tagged.\nfunc TestNodeIsTagged(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tnode Node\n\t\twant bool\n\t}{\n\t\t{\n\t\t\tname: \"node with tags - is tagged\",\n\t\t\tnode: Node{\n\t\t\t\tTags: []string{\"tag:server\", \"tag:prod\"},\n\t\t\t},\n\t\t\twant: true,\n\t\t},\n\t\t{\n\t\t\tname: \"node with single tag - is tagged\",\n\t\t\tnode: Node{\n\t\t\t\tTags: []string{\"tag:web\"},\n\t\t\t},\n\t\t\twant: true,\n\t\t},\n\t\t{\n\t\t\tname: \"node with no tags - not tagged\",\n\t\t\tnode: Node{\n\t\t\t\tTags: []string{},\n\t\t\t},\n\t\t\twant: false,\n\t\t},\n\t\t{\n\t\t\tname: \"node with nil tags - not tagged\",\n\t\t\tnode: Node{\n\t\t\t\tTags: nil,\n\t\t\t},\n\t\t\twant: false,\n\t\t},\n\t\t{\n\t\t\t// Tags should be copied from AuthKey during registration, so a node\n\t\t\t// with only AuthKey.Tags and no Tags would be invalid in practice.\n\t\t\t// IsTagged() only checks node.Tags, not AuthKey.Tags.\n\t\t\tname: \"node registered with tagged authkey only - not tagged (tags should be copied)\",\n\t\t\tnode: Node{\n\t\t\t\tAuthKey: &PreAuthKey{\n\t\t\t\t\tTags: []string{\"tag:database\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: false,\n\t\t},\n\t\t{\n\t\t\tname: \"node with both tags and authkey tags - is tagged\",\n\t\t\tnode: Node{\n\t\t\t\tTags: []string{\"tag:server\"},\n\t\t\t\tAuthKey: &PreAuthKey{\n\t\t\t\t\tTags: []string{\"tag:database\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: true,\n\t\t},\n\t\t{\n\t\t\tname: \"node with user and no tags - not tagged\",\n\t\t\tnode: Node{\n\t\t\t\tUserID: new(uint(42)),\n\t\t\t\tTags:   []string{},\n\t\t\t},\n\t\t\twant: false,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot := tt.node.IsTagged()\n\t\t\tassert.Equal(t, tt.want, got, \"IsTagged() returned unexpected value\")\n\t\t})\n\t}\n}\n\n// TestNodeViewIsTagged tests the IsTagged() method on NodeView.\nfunc TestNodeViewIsTagged(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tnode Node\n\t\twant bool\n\t}{\n\t\t{\n\t\t\tname: \"tagged node via Tags field\",\n\t\t\tnode: Node{\n\t\t\t\tTags: []string{\"tag:server\"},\n\t\t\t},\n\t\t\twant: true,\n\t\t},\n\t\t{\n\t\t\t// Tags should be copied from AuthKey during registration, so a node\n\t\t\t// with only AuthKey.Tags and no Tags would be invalid in practice.\n\t\t\tname: \"node with only AuthKey tags - not tagged (tags should be copied)\",\n\t\t\tnode: Node{\n\t\t\t\tAuthKey: &PreAuthKey{\n\t\t\t\t\tTags: []string{\"tag:web\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: false, // IsTagged() only checks node.Tags\n\t\t},\n\t\t{\n\t\t\tname: \"user-owned node\",\n\t\t\tnode: Node{\n\t\t\t\tUserID: new(uint(1)),\n\t\t\t},\n\t\t\twant: false,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tview := tt.node.View()\n\t\t\tgot := view.IsTagged()\n\t\t\tassert.Equal(t, tt.want, got, \"NodeView.IsTagged() returned unexpected value\")\n\t\t})\n\t}\n}\n\n// TestNodeHasTag tests the HasTag() method for checking specific tag membership.\nfunc TestNodeHasTag(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tnode Node\n\t\ttag  string\n\t\twant bool\n\t}{\n\t\t{\n\t\t\tname: \"node has the tag\",\n\t\t\tnode: Node{\n\t\t\t\tTags: []string{\"tag:server\", \"tag:prod\"},\n\t\t\t},\n\t\t\ttag:  \"tag:server\",\n\t\t\twant: true,\n\t\t},\n\t\t{\n\t\t\tname: \"node does not have the tag\",\n\t\t\tnode: Node{\n\t\t\t\tTags: []string{\"tag:server\", \"tag:prod\"},\n\t\t\t},\n\t\t\ttag:  \"tag:web\",\n\t\t\twant: false,\n\t\t},\n\t\t{\n\t\t\t// Tags should be copied from AuthKey during registration\n\t\t\t// HasTag() only checks node.Tags, not AuthKey.Tags\n\t\t\tname: \"node has tag only in authkey - returns false\",\n\t\t\tnode: Node{\n\t\t\t\tAuthKey: &PreAuthKey{\n\t\t\t\t\tTags: []string{\"tag:database\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\ttag:  \"tag:database\",\n\t\t\twant: false,\n\t\t},\n\t\t{\n\t\t\t// node.Tags is what matters, not AuthKey.Tags\n\t\t\tname: \"node has tag in Tags but not in AuthKey\",\n\t\t\tnode: Node{\n\t\t\t\tTags: []string{\"tag:server\"},\n\t\t\t\tAuthKey: &PreAuthKey{\n\t\t\t\t\tTags: []string{\"tag:database\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\ttag:  \"tag:server\",\n\t\t\twant: true,\n\t\t},\n\t\t{\n\t\t\tname: \"invalid tag format still returns false\",\n\t\t\tnode: Node{\n\t\t\t\tTags: []string{\"tag:server\"},\n\t\t\t},\n\t\t\ttag:  \"invalid-tag\",\n\t\t\twant: false,\n\t\t},\n\t\t{\n\t\t\tname: \"empty tag returns false\",\n\t\t\tnode: Node{\n\t\t\t\tTags: []string{\"tag:server\"},\n\t\t\t},\n\t\t\ttag:  \"\",\n\t\t\twant: false,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot := tt.node.HasTag(tt.tag)\n\t\t\tassert.Equal(t, tt.want, got, \"HasTag() returned unexpected value\")\n\t\t})\n\t}\n}\n\n// TestNodeTagsImmutableAfterRegistration tests that tags can only be set during registration.\nfunc TestNodeTagsImmutableAfterRegistration(t *testing.T) {\n\t// Test that a node registered with tags keeps them\n\ttaggedNode := Node{\n\t\tID:   1,\n\t\tTags: []string{\"tag:server\"},\n\t\tAuthKey: &PreAuthKey{\n\t\t\tTags: []string{\"tag:server\"},\n\t\t},\n\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t}\n\n\t// Node should be tagged\n\tassert.True(t, taggedNode.IsTagged(), \"Node registered with tags should be tagged\")\n\n\t// Node should have the tag\n\thas := taggedNode.HasTag(\"tag:server\")\n\tassert.True(t, has, \"Node should have the tag it was registered with\")\n\n\t// Test that a user-owned node is not tagged\n\tuserNode := Node{\n\t\tID:             2,\n\t\tUserID:         new(uint(42)),\n\t\tTags:           []string{},\n\t\tRegisterMethod: util.RegisterMethodOIDC,\n\t}\n\n\tassert.False(t, userNode.IsTagged(), \"User-owned node should not be tagged\")\n}\n\n// TestNodeOwnershipModel tests the tags-as-identity model.\nfunc TestNodeOwnershipModel(t *testing.T) {\n\ttests := []struct {\n\t\tname         string\n\t\tnode         Node\n\t\twantIsTagged bool\n\t\tdescription  string\n\t}{\n\t\t{\n\t\t\tname: \"tagged node has tags, UserID is informational\",\n\t\t\tnode: Node{\n\t\t\t\tID:     1,\n\t\t\t\tUserID: new(uint(5)), // \"created by\" user 5\n\t\t\t\tTags:   []string{\"tag:server\"},\n\t\t\t},\n\t\t\twantIsTagged: true,\n\t\t\tdescription:  \"Tagged nodes may have UserID set for tracking, but ownership is defined by tags\",\n\t\t},\n\t\t{\n\t\t\tname: \"user-owned node has no tags\",\n\t\t\tnode: Node{\n\t\t\t\tID:     2,\n\t\t\t\tUserID: new(uint(5)),\n\t\t\t\tTags:   []string{},\n\t\t\t},\n\t\t\twantIsTagged: false,\n\t\t\tdescription:  \"User-owned nodes are owned by the user, not by tags\",\n\t\t},\n\t\t{\n\t\t\t// Tags should be copied from AuthKey to Node during registration\n\t\t\t// IsTagged() only checks node.Tags, not AuthKey.Tags\n\t\t\tname: \"node with only authkey tags - not tagged (tags should be copied)\",\n\t\t\tnode: Node{\n\t\t\t\tID:     3,\n\t\t\t\tUserID: new(uint(5)), // \"created by\" user 5\n\t\t\t\tAuthKey: &PreAuthKey{\n\t\t\t\t\tTags: []string{\"tag:database\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantIsTagged: false,\n\t\t\tdescription:  \"IsTagged() only checks node.Tags; AuthKey.Tags should be copied during registration\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot := tt.node.IsTagged()\n\t\t\tassert.Equal(t, tt.wantIsTagged, got, tt.description)\n\t\t})\n\t}\n}\n\n// TestUserTypedID tests the TypedID() helper method.\nfunc TestUserTypedID(t *testing.T) {\n\tuser := User{\n\t\tModel: gorm.Model{ID: 42},\n\t}\n\n\ttypedID := user.TypedID()\n\tassert.NotNil(t, typedID, \"TypedID() should return non-nil pointer\")\n\tassert.Equal(t, UserID(42), *typedID, \"TypedID() should return correct UserID value\")\n}\n"
  },
  {
    "path": "hscontrol/types/node_test.go",
    "content": "package types\n\nimport (\n\t\"fmt\"\n\t\"net/netip\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/google/go-cmp/cmp/cmpopts\"\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\t\"github.com/juanfont/headscale/hscontrol/policy/matcher\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/key\"\n)\n\nfunc Test_NodeCanAccess(t *testing.T) {\n\tiap := func(ipStr string) *netip.Addr {\n\t\tip := netip.MustParseAddr(ipStr)\n\t\treturn &ip\n\t}\n\ttests := []struct {\n\t\tname  string\n\t\tnode1 Node\n\t\tnode2 Node\n\t\trules []tailcfg.FilterRule\n\t\twant  bool\n\t}{\n\t\t{\n\t\t\tname: \"no-rules\",\n\t\t\tnode1: Node{\n\t\t\t\tIPv4: iap(\"10.0.0.1\"),\n\t\t\t},\n\t\t\tnode2: Node{\n\t\t\t\tIPv4: iap(\"10.0.0.2\"),\n\t\t\t},\n\t\t\trules: []tailcfg.FilterRule{},\n\t\t\twant:  false,\n\t\t},\n\t\t{\n\t\t\tname: \"wildcard\",\n\t\t\tnode1: Node{\n\t\t\t\tIPv4: iap(\"10.0.0.1\"),\n\t\t\t},\n\t\t\tnode2: Node{\n\t\t\t\tIPv4: iap(\"10.0.0.2\"),\n\t\t\t},\n\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"*\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP:    \"*\",\n\t\t\t\t\t\t\tPorts: tailcfg.PortRangeAny,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: true,\n\t\t},\n\t\t{\n\t\t\tname: \"other-cant-access-src\",\n\t\t\tnode1: Node{\n\t\t\t\tIPv4: iap(\"100.64.0.1\"),\n\t\t\t},\n\t\t\tnode2: Node{\n\t\t\t\tIPv4: iap(\"100.64.0.3\"),\n\t\t\t},\n\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.2/32\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.64.0.3/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: false,\n\t\t},\n\t\t{\n\t\t\tname: \"dest-cant-access-src\",\n\t\t\tnode1: Node{\n\t\t\t\tIPv4: iap(\"100.64.0.3\"),\n\t\t\t},\n\t\t\tnode2: Node{\n\t\t\t\tIPv4: iap(\"100.64.0.2\"),\n\t\t\t},\n\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.2/32\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.64.0.3/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: false,\n\t\t},\n\t\t{\n\t\t\tname: \"src-can-access-dest\",\n\t\t\tnode1: Node{\n\t\t\t\tIPv4: iap(\"100.64.0.2\"),\n\t\t\t},\n\t\t\tnode2: Node{\n\t\t\t\tIPv4: iap(\"100.64.0.3\"),\n\t\t\t},\n\t\t\trules: []tailcfg.FilterRule{\n\t\t\t\t{\n\t\t\t\t\tSrcIPs: []string{\"100.64.0.2/32\"},\n\t\t\t\t\tDstPorts: []tailcfg.NetPortRange{\n\t\t\t\t\t\t{IP: \"100.64.0.3/32\", Ports: tailcfg.PortRangeAny},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tmatchers := matcher.MatchesFromFilterRules(tt.rules)\n\t\t\tgot := tt.node1.CanAccess(matchers, &tt.node2)\n\n\t\t\tif got != tt.want {\n\t\t\t\tt.Errorf(\"canAccess() failed: want (%t), got (%t)\", tt.want, got)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestNodeFQDN(t *testing.T) {\n\ttests := []struct {\n\t\tname    string\n\t\tnode    Node\n\t\tdomain  string\n\t\twant    string\n\t\twantErr string\n\t}{\n\t\t{\n\t\t\tname: \"no-dnsconfig-with-username\",\n\t\t\tnode: Node{\n\t\t\t\tGivenName: \"test\",\n\t\t\t\tUser: &User{\n\t\t\t\t\tName: \"user\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tdomain: \"example.com\",\n\t\t\twant:   \"test.example.com.\",\n\t\t},\n\t\t{\n\t\t\tname: \"all-set\",\n\t\t\tnode: Node{\n\t\t\t\tGivenName: \"test\",\n\t\t\t\tUser: &User{\n\t\t\t\t\tName: \"user\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tdomain: \"example.com\",\n\t\t\twant:   \"test.example.com.\",\n\t\t},\n\t\t{\n\t\t\tname: \"no-given-name\",\n\t\t\tnode: Node{\n\t\t\t\tUser: &User{\n\t\t\t\t\tName: \"user\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tdomain:  \"example.com\",\n\t\t\twantErr: \"creating valid FQDN: node has no given name\",\n\t\t},\n\t\t{\n\t\t\tname: \"too-long-username\",\n\t\t\tnode: Node{\n\t\t\t\tGivenName: strings.Repeat(\"a\", 256),\n\t\t\t},\n\t\t\tdomain:  \"example.com\",\n\t\t\twantErr: fmt.Sprintf(\"creating valid FQDN (%s.example.com.): hostname too long, cannot accept more than 255 ASCII chars\", strings.Repeat(\"a\", 256)),\n\t\t},\n\t\t{\n\t\t\tname: \"no-dnsconfig\",\n\t\t\tnode: Node{\n\t\t\t\tGivenName: \"test\",\n\t\t\t\tUser: &User{\n\t\t\t\t\tName: \"user\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tdomain: \"example.com\",\n\t\t\twant:   \"test.example.com.\",\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tgot, err := tc.node.GetFQDN(tc.domain)\n\n\t\t\tt.Logf(\"GOT: %q, %q\", got, tc.domain)\n\n\t\t\tif (err != nil) && (err.Error() != tc.wantErr) {\n\t\t\t\tt.Errorf(\"GetFQDN() error = %s, wantErr %s\", err, tc.wantErr)\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif diff := cmp.Diff(tc.want, got); diff != \"\" {\n\t\t\t\tt.Errorf(\"GetFQDN unexpected result (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestPeerChangeFromMapRequest(t *testing.T) {\n\tnKeys := []key.NodePublic{\n\t\tkey.NewNode().Public(),\n\t\tkey.NewNode().Public(),\n\t\tkey.NewNode().Public(),\n\t}\n\n\tdKeys := []key.DiscoPublic{\n\t\tkey.NewDisco().Public(),\n\t\tkey.NewDisco().Public(),\n\t\tkey.NewDisco().Public(),\n\t}\n\n\ttests := []struct {\n\t\tname   string\n\t\tnode   Node\n\t\tmapReq tailcfg.MapRequest\n\t\twant   tailcfg.PeerChange\n\t}{\n\t\t{\n\t\t\tname: \"preferred-derp-changed\",\n\t\t\tnode: Node{\n\t\t\t\tID:        1,\n\t\t\t\tNodeKey:   nKeys[0],\n\t\t\t\tDiscoKey:  dKeys[0],\n\t\t\t\tEndpoints: []netip.AddrPort{},\n\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\tNetInfo: &tailcfg.NetInfo{\n\t\t\t\t\t\tPreferredDERP: 998,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tmapReq: tailcfg.MapRequest{\n\t\t\t\tNodeKey:  nKeys[0],\n\t\t\t\tDiscoKey: dKeys[0],\n\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\tNetInfo: &tailcfg.NetInfo{\n\t\t\t\t\t\tPreferredDERP: 999,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: tailcfg.PeerChange{\n\t\t\t\tNodeID:     1,\n\t\t\t\tDERPRegion: 999,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"preferred-derp-no-changed\",\n\t\t\tnode: Node{\n\t\t\t\tID:        1,\n\t\t\t\tNodeKey:   nKeys[0],\n\t\t\t\tDiscoKey:  dKeys[0],\n\t\t\t\tEndpoints: []netip.AddrPort{},\n\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\tNetInfo: &tailcfg.NetInfo{\n\t\t\t\t\t\tPreferredDERP: 100,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tmapReq: tailcfg.MapRequest{\n\t\t\t\tNodeKey:  nKeys[0],\n\t\t\t\tDiscoKey: dKeys[0],\n\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\tNetInfo: &tailcfg.NetInfo{\n\t\t\t\t\t\tPreferredDERP: 100,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: tailcfg.PeerChange{\n\t\t\t\tNodeID:     1,\n\t\t\t\tDERPRegion: 0,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"preferred-derp-no-mapreq-netinfo\",\n\t\t\tnode: Node{\n\t\t\t\tID:        1,\n\t\t\t\tNodeKey:   nKeys[0],\n\t\t\t\tDiscoKey:  dKeys[0],\n\t\t\t\tEndpoints: []netip.AddrPort{},\n\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\tNetInfo: &tailcfg.NetInfo{\n\t\t\t\t\t\tPreferredDERP: 200,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tmapReq: tailcfg.MapRequest{\n\t\t\t\tNodeKey:  nKeys[0],\n\t\t\t\tDiscoKey: dKeys[0],\n\t\t\t\tHostinfo: &tailcfg.Hostinfo{},\n\t\t\t},\n\t\t\twant: tailcfg.PeerChange{\n\t\t\t\tNodeID:     1,\n\t\t\t\tDERPRegion: 0,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"preferred-derp-no-node-netinfo\",\n\t\t\tnode: Node{\n\t\t\t\tID:        1,\n\t\t\t\tNodeKey:   nKeys[0],\n\t\t\t\tDiscoKey:  dKeys[0],\n\t\t\t\tEndpoints: []netip.AddrPort{},\n\t\t\t\tHostinfo:  &tailcfg.Hostinfo{},\n\t\t\t},\n\t\t\tmapReq: tailcfg.MapRequest{\n\t\t\t\tNodeKey:  nKeys[0],\n\t\t\t\tDiscoKey: dKeys[0],\n\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\tNetInfo: &tailcfg.NetInfo{\n\t\t\t\t\t\tPreferredDERP: 200,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: tailcfg.PeerChange{\n\t\t\t\tNodeID:     1,\n\t\t\t\tDERPRegion: 200,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tgot := tc.node.PeerChangeFromMapRequest(tc.mapReq)\n\n\t\t\tif diff := cmp.Diff(tc.want, got, cmpopts.IgnoreFields(tailcfg.PeerChange{}, \"LastSeen\")); diff != \"\" {\n\t\t\t\tt.Errorf(\"Patch unexpected result (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestApplyHostnameFromHostInfo(t *testing.T) {\n\ttests := []struct {\n\t\tname       string\n\t\tnodeBefore Node\n\t\tchange     *tailcfg.Hostinfo\n\t\twant       Node\n\t}{\n\t\t{\n\t\t\tname: \"hostinfo-not-exists\",\n\t\t\tnodeBefore: Node{\n\t\t\t\tGivenName: \"manual-test.local\",\n\t\t\t\tHostname:  \"TestHost.Local\",\n\t\t\t},\n\t\t\tchange: nil,\n\t\t\twant: Node{\n\t\t\t\tGivenName: \"manual-test.local\",\n\t\t\t\tHostname:  \"TestHost.Local\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"hostinfo-exists-no-automatic-givenName\",\n\t\t\tnodeBefore: Node{\n\t\t\t\tGivenName: \"manual-test.local\",\n\t\t\t\tHostname:  \"TestHost.Local\",\n\t\t\t},\n\t\t\tchange: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"NewHostName.Local\",\n\t\t\t},\n\t\t\twant: Node{\n\t\t\t\tGivenName: \"manual-test.local\",\n\t\t\t\tHostname:  \"newhostname.local\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"hostinfo-exists-automatic-givenName\",\n\t\t\tnodeBefore: Node{\n\t\t\t\tGivenName: \"automaticname.test\",\n\t\t\t\tHostname:  \"AutomaticName.Test\",\n\t\t\t},\n\t\t\tchange: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"NewHostName.Local\",\n\t\t\t},\n\t\t\twant: Node{\n\t\t\t\tGivenName: \"newhostname.local\",\n\t\t\t\tHostname:  \"newhostname.local\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"invalid-hostname-with-emoji-rejected\",\n\t\t\tnodeBefore: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\",\n\t\t\t},\n\t\t\tchange: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"hostname-with-💩\",\n\t\t\t},\n\t\t\twant: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\", // Should reject and keep old hostname\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"invalid-hostname-with-unicode-rejected\",\n\t\t\tnodeBefore: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\",\n\t\t\t},\n\t\t\tchange: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"我的电脑\", //nolint:gosmopolitan // intentional i18n test data\n\t\t\t},\n\t\t\twant: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\", // Should keep old hostname\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"invalid-hostname-with-special-chars-rejected\",\n\t\t\tnodeBefore: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\",\n\t\t\t},\n\t\t\tchange: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"node-with-special!@#$%\",\n\t\t\t},\n\t\t\twant: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\", // Should reject and keep old hostname\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"invalid-hostname-too-short-rejected\",\n\t\t\tnodeBefore: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\",\n\t\t\t},\n\t\t\tchange: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"a\",\n\t\t\t},\n\t\t\twant: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\", // Should keep old hostname\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"invalid-hostname-uppercase-accepted-lowercased\",\n\t\t\tnodeBefore: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\",\n\t\t\t},\n\t\t\tchange: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"ValidHostName\",\n\t\t\t},\n\t\t\twant: Node{\n\t\t\t\tGivenName: \"validhostname\", // GivenName follows hostname when it changes\n\t\t\t\tHostname:  \"validhostname\", // Uppercase is lowercased, not rejected\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"uppercase_to_lowercase_accepted\",\n\t\t\tnodeBefore: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\",\n\t\t\t},\n\t\t\tchange: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"User2-Host\",\n\t\t\t},\n\t\t\twant: Node{\n\t\t\t\tGivenName: \"user2-host\",\n\t\t\t\tHostname:  \"user2-host\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"at_sign_rejected\",\n\t\t\tnodeBefore: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\",\n\t\t\t},\n\t\t\tchange: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"Test@Host\",\n\t\t\t},\n\t\t\twant: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"chinese_chars_with_dash_rejected\",\n\t\t\tnodeBefore: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\",\n\t\t\t},\n\t\t\tchange: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"server-北京-01\", //nolint:gosmopolitan // intentional i18n test data\n\t\t\t},\n\t\t\twant: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"chinese_only_rejected\",\n\t\t\tnodeBefore: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\",\n\t\t\t},\n\t\t\tchange: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"我的电脑\", //nolint:gosmopolitan // intentional i18n test data\n\t\t\t},\n\t\t\twant: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"emoji_with_text_rejected\",\n\t\t\tnodeBefore: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\",\n\t\t\t},\n\t\t\tchange: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"laptop-🚀\",\n\t\t\t},\n\t\t\twant: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"mixed_chinese_emoji_rejected\",\n\t\t\tnodeBefore: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\",\n\t\t\t},\n\t\t\tchange: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"测试💻机器\", //nolint:gosmopolitan // intentional i18n test data\n\t\t\t},\n\t\t\twant: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"only_emojis_rejected\",\n\t\t\tnodeBefore: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\",\n\t\t\t},\n\t\t\tchange: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"🎉🎊\",\n\t\t\t},\n\t\t\twant: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"only_at_signs_rejected\",\n\t\t\tnodeBefore: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\",\n\t\t\t},\n\t\t\tchange: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"@@@\",\n\t\t\t},\n\t\t\twant: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"starts_with_dash_rejected\",\n\t\t\tnodeBefore: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\",\n\t\t\t},\n\t\t\tchange: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"-test\",\n\t\t\t},\n\t\t\twant: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ends_with_dash_rejected\",\n\t\t\tnodeBefore: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\",\n\t\t\t},\n\t\t\tchange: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"test-\",\n\t\t\t},\n\t\t\twant: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"too_long_hostname_rejected\",\n\t\t\tnodeBefore: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\",\n\t\t\t},\n\t\t\tchange: &tailcfg.Hostinfo{\n\t\t\t\tHostname: strings.Repeat(\"t\", 65),\n\t\t\t},\n\t\t\twant: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"underscore_rejected\",\n\t\t\tnodeBefore: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\",\n\t\t\t},\n\t\t\tchange: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"test_node\",\n\t\t\t},\n\t\t\twant: Node{\n\t\t\t\tGivenName: \"valid-hostname\",\n\t\t\t\tHostname:  \"valid-hostname\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\ttt.nodeBefore.ApplyHostnameFromHostInfo(tt.change)\n\n\t\t\tif diff := cmp.Diff(tt.want, tt.nodeBefore, util.Comparers...); diff != \"\" {\n\t\t\t\tt.Errorf(\"Patch unexpected result (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestApplyPeerChange(t *testing.T) {\n\ttests := []struct {\n\t\tname       string\n\t\tnodeBefore Node\n\t\tchange     *tailcfg.PeerChange\n\t\twant       Node\n\t}{\n\t\t{\n\t\t\tname:       \"hostinfo-and-netinfo-not-exists\",\n\t\t\tnodeBefore: Node{},\n\t\t\tchange: &tailcfg.PeerChange{\n\t\t\t\tDERPRegion: 1,\n\t\t\t},\n\t\t\twant: Node{\n\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\tNetInfo: &tailcfg.NetInfo{\n\t\t\t\t\t\tPreferredDERP: 1,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"hostinfo-netinfo-not-exists\",\n\t\t\tnodeBefore: Node{\n\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\tHostname: \"test\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tchange: &tailcfg.PeerChange{\n\t\t\t\tDERPRegion: 3,\n\t\t\t},\n\t\t\twant: Node{\n\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\tHostname: \"test\",\n\t\t\t\t\tNetInfo: &tailcfg.NetInfo{\n\t\t\t\t\t\tPreferredDERP: 3,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"hostinfo-netinfo-exists-derp-set\",\n\t\t\tnodeBefore: Node{\n\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\tHostname: \"test\",\n\t\t\t\t\tNetInfo: &tailcfg.NetInfo{\n\t\t\t\t\t\tPreferredDERP: 999,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tchange: &tailcfg.PeerChange{\n\t\t\t\tDERPRegion: 2,\n\t\t\t},\n\t\t\twant: Node{\n\t\t\t\tHostinfo: &tailcfg.Hostinfo{\n\t\t\t\t\tHostname: \"test\",\n\t\t\t\t\tNetInfo: &tailcfg.NetInfo{\n\t\t\t\t\t\tPreferredDERP: 2,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:       \"endpoints-not-set\",\n\t\t\tnodeBefore: Node{},\n\t\t\tchange: &tailcfg.PeerChange{\n\t\t\t\tEndpoints: []netip.AddrPort{\n\t\t\t\t\tnetip.MustParseAddrPort(\"8.8.8.8:88\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: Node{\n\t\t\t\tEndpoints: []netip.AddrPort{\n\t\t\t\t\tnetip.MustParseAddrPort(\"8.8.8.8:88\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"endpoints-set\",\n\t\t\tnodeBefore: Node{\n\t\t\t\tEndpoints: []netip.AddrPort{\n\t\t\t\t\tnetip.MustParseAddrPort(\"6.6.6.6:66\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tchange: &tailcfg.PeerChange{\n\t\t\t\tEndpoints: []netip.AddrPort{\n\t\t\t\t\tnetip.MustParseAddrPort(\"8.8.8.8:88\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: Node{\n\t\t\t\tEndpoints: []netip.AddrPort{\n\t\t\t\t\tnetip.MustParseAddrPort(\"8.8.8.8:88\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\ttt.nodeBefore.ApplyPeerChange(tt.change)\n\n\t\t\tif diff := cmp.Diff(tt.want, tt.nodeBefore, util.Comparers...); diff != \"\" {\n\t\t\t\tt.Errorf(\"Patch unexpected result (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestNodeRegisterMethodToV1Enum(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tnode Node\n\t\twant v1.RegisterMethod\n\t}{\n\t\t{\n\t\t\tname: \"authkey\",\n\t\t\tnode: Node{\n\t\t\t\tID:             1,\n\t\t\t\tRegisterMethod: util.RegisterMethodAuthKey,\n\t\t\t},\n\t\t\twant: v1.RegisterMethod_REGISTER_METHOD_AUTH_KEY,\n\t\t},\n\t\t{\n\t\t\tname: \"oidc\",\n\t\t\tnode: Node{\n\t\t\t\tID:             1,\n\t\t\t\tRegisterMethod: util.RegisterMethodOIDC,\n\t\t\t},\n\t\t\twant: v1.RegisterMethod_REGISTER_METHOD_OIDC,\n\t\t},\n\t\t{\n\t\t\tname: \"cli\",\n\t\t\tnode: Node{\n\t\t\t\tID:             1,\n\t\t\t\tRegisterMethod: util.RegisterMethodCLI,\n\t\t\t},\n\t\t\twant: v1.RegisterMethod_REGISTER_METHOD_CLI,\n\t\t},\n\t\t{\n\t\t\tname: \"unknown\",\n\t\t\tnode: Node{\n\t\t\t\tID: 0,\n\t\t\t},\n\t\t\twant: v1.RegisterMethod_REGISTER_METHOD_UNSPECIFIED,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot := tt.node.RegisterMethodToV1Enum()\n\n\t\t\tif diff := cmp.Diff(tt.want, got); diff != \"\" {\n\t\t\t\tt.Errorf(\"RegisterMethodToV1Enum() unexpected result (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TestHasNetworkChanges tests the NodeView method for detecting\n// when a node's network properties have changed.\nfunc TestHasNetworkChanges(t *testing.T) {\n\tmustIPPtr := func(s string) *netip.Addr {\n\t\tip := netip.MustParseAddr(s)\n\t\treturn &ip\n\t}\n\n\ttests := []struct {\n\t\tname    string\n\t\told     *Node\n\t\tnew     *Node\n\t\tchanged bool\n\t}{\n\t\t{\n\t\t\tname: \"no changes\",\n\t\t\told: &Node{\n\t\t\t\tID:             1,\n\t\t\t\tIPv4:           mustIPPtr(\"100.64.0.1\"),\n\t\t\t\tIPv6:           mustIPPtr(\"fd7a:115c:a1e0::1\"),\n\t\t\t\tHostinfo:       &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix(\"10.0.0.0/24\")}},\n\t\t\t\tApprovedRoutes: []netip.Prefix{netip.MustParsePrefix(\"192.168.0.0/24\")},\n\t\t\t},\n\t\t\tnew: &Node{\n\t\t\t\tID:             1,\n\t\t\t\tIPv4:           mustIPPtr(\"100.64.0.1\"),\n\t\t\t\tIPv6:           mustIPPtr(\"fd7a:115c:a1e0::1\"),\n\t\t\t\tHostinfo:       &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix(\"10.0.0.0/24\")}},\n\t\t\t\tApprovedRoutes: []netip.Prefix{netip.MustParsePrefix(\"192.168.0.0/24\")},\n\t\t\t},\n\t\t\tchanged: false,\n\t\t},\n\t\t{\n\t\t\tname: \"IPv4 changed\",\n\t\t\told: &Node{\n\t\t\t\tID:   1,\n\t\t\t\tIPv4: mustIPPtr(\"100.64.0.1\"),\n\t\t\t\tIPv6: mustIPPtr(\"fd7a:115c:a1e0::1\"),\n\t\t\t},\n\t\t\tnew: &Node{\n\t\t\t\tID:   1,\n\t\t\t\tIPv4: mustIPPtr(\"100.64.0.2\"),\n\t\t\t\tIPv6: mustIPPtr(\"fd7a:115c:a1e0::1\"),\n\t\t\t},\n\t\t\tchanged: true,\n\t\t},\n\t\t{\n\t\t\tname: \"IPv6 changed\",\n\t\t\told: &Node{\n\t\t\t\tID:   1,\n\t\t\t\tIPv4: mustIPPtr(\"100.64.0.1\"),\n\t\t\t\tIPv6: mustIPPtr(\"fd7a:115c:a1e0::1\"),\n\t\t\t},\n\t\t\tnew: &Node{\n\t\t\t\tID:   1,\n\t\t\t\tIPv4: mustIPPtr(\"100.64.0.1\"),\n\t\t\t\tIPv6: mustIPPtr(\"fd7a:115c:a1e0::2\"),\n\t\t\t},\n\t\t\tchanged: true,\n\t\t},\n\t\t{\n\t\t\tname: \"RoutableIPs added\",\n\t\t\told: &Node{\n\t\t\t\tID:       1,\n\t\t\t\tIPv4:     mustIPPtr(\"100.64.0.1\"),\n\t\t\t\tHostinfo: &tailcfg.Hostinfo{},\n\t\t\t},\n\t\t\tnew: &Node{\n\t\t\t\tID:       1,\n\t\t\t\tIPv4:     mustIPPtr(\"100.64.0.1\"),\n\t\t\t\tHostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix(\"10.0.0.0/24\")}},\n\t\t\t},\n\t\t\tchanged: true,\n\t\t},\n\t\t{\n\t\t\tname: \"RoutableIPs removed\",\n\t\t\told: &Node{\n\t\t\t\tID:       1,\n\t\t\t\tIPv4:     mustIPPtr(\"100.64.0.1\"),\n\t\t\t\tHostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix(\"10.0.0.0/24\")}},\n\t\t\t},\n\t\t\tnew: &Node{\n\t\t\t\tID:       1,\n\t\t\t\tIPv4:     mustIPPtr(\"100.64.0.1\"),\n\t\t\t\tHostinfo: &tailcfg.Hostinfo{},\n\t\t\t},\n\t\t\tchanged: true,\n\t\t},\n\t\t{\n\t\t\tname: \"RoutableIPs changed\",\n\t\t\told: &Node{\n\t\t\t\tID:       1,\n\t\t\t\tIPv4:     mustIPPtr(\"100.64.0.1\"),\n\t\t\t\tHostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix(\"10.0.0.0/24\")}},\n\t\t\t},\n\t\t\tnew: &Node{\n\t\t\t\tID:       1,\n\t\t\t\tIPv4:     mustIPPtr(\"100.64.0.1\"),\n\t\t\t\tHostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix(\"192.168.0.0/24\")}},\n\t\t\t},\n\t\t\tchanged: true,\n\t\t},\n\t\t{\n\t\t\tname: \"SubnetRoutes added\",\n\t\t\told: &Node{\n\t\t\t\tID:             1,\n\t\t\t\tIPv4:           mustIPPtr(\"100.64.0.1\"),\n\t\t\t\tHostinfo:       &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix(\"192.168.0.0/24\")}},\n\t\t\t\tApprovedRoutes: []netip.Prefix{},\n\t\t\t},\n\t\t\tnew: &Node{\n\t\t\t\tID:             1,\n\t\t\t\tIPv4:           mustIPPtr(\"100.64.0.1\"),\n\t\t\t\tHostinfo:       &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix(\"192.168.0.0/24\")}},\n\t\t\t\tApprovedRoutes: []netip.Prefix{netip.MustParsePrefix(\"192.168.0.0/24\")},\n\t\t\t},\n\t\t\tchanged: true,\n\t\t},\n\t\t{\n\t\t\tname: \"SubnetRoutes removed\",\n\t\t\told: &Node{\n\t\t\t\tID:             1,\n\t\t\t\tIPv4:           mustIPPtr(\"100.64.0.1\"),\n\t\t\t\tHostinfo:       &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix(\"192.168.0.0/24\")}},\n\t\t\t\tApprovedRoutes: []netip.Prefix{netip.MustParsePrefix(\"192.168.0.0/24\")},\n\t\t\t},\n\t\t\tnew: &Node{\n\t\t\t\tID:             1,\n\t\t\t\tIPv4:           mustIPPtr(\"100.64.0.1\"),\n\t\t\t\tHostinfo:       &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix(\"192.168.0.0/24\")}},\n\t\t\t\tApprovedRoutes: []netip.Prefix{},\n\t\t\t},\n\t\t\tchanged: true,\n\t\t},\n\t\t{\n\t\t\tname: \"SubnetRoutes changed\",\n\t\t\told: &Node{\n\t\t\t\tID:             1,\n\t\t\t\tIPv4:           mustIPPtr(\"100.64.0.1\"),\n\t\t\t\tHostinfo:       &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix(\"10.0.0.0/24\"), netip.MustParsePrefix(\"192.168.0.0/24\")}},\n\t\t\t\tApprovedRoutes: []netip.Prefix{netip.MustParsePrefix(\"10.0.0.0/24\")},\n\t\t\t},\n\t\t\tnew: &Node{\n\t\t\t\tID:             1,\n\t\t\t\tIPv4:           mustIPPtr(\"100.64.0.1\"),\n\t\t\t\tHostinfo:       &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix(\"10.0.0.0/24\"), netip.MustParsePrefix(\"192.168.0.0/24\")}},\n\t\t\t\tApprovedRoutes: []netip.Prefix{netip.MustParsePrefix(\"192.168.0.0/24\")},\n\t\t\t},\n\t\t\tchanged: true,\n\t\t},\n\t\t{\n\t\t\tname: \"irrelevant property changed (Hostname)\",\n\t\t\told: &Node{\n\t\t\t\tID:       1,\n\t\t\t\tIPv4:     mustIPPtr(\"100.64.0.1\"),\n\t\t\t\tHostname: \"old-name\",\n\t\t\t},\n\t\t\tnew: &Node{\n\t\t\t\tID:       1,\n\t\t\t\tIPv4:     mustIPPtr(\"100.64.0.1\"),\n\t\t\t\tHostname: \"new-name\",\n\t\t\t},\n\t\t\tchanged: false,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot := tt.new.View().HasNetworkChanges(tt.old.View())\n\t\t\tif got != tt.changed {\n\t\t\t\tt.Errorf(\"HasNetworkChanges() = %v, want %v\", got, tt.changed)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "hscontrol/types/policy.go",
    "content": "package types\n\nimport (\n\t\"errors\"\n\n\t\"gorm.io/gorm\"\n)\n\nvar (\n\tErrPolicyNotFound         = errors.New(\"acl policy not found\")\n\tErrPolicyUpdateIsDisabled = errors.New(\"update is disabled for modes other than 'database'\")\n)\n\n// Policy represents a policy in the database.\ntype Policy struct {\n\tgorm.Model\n\n\t// Data contains the policy in HuJSON format.\n\tData string\n}\n"
  },
  {
    "path": "hscontrol/types/preauth_key.go",
    "content": "package types\n\nimport (\n\t\"time\"\n\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\t\"github.com/juanfont/headscale/hscontrol/util/zlog/zf\"\n\t\"github.com/rs/zerolog\"\n\t\"github.com/rs/zerolog/log\"\n\t\"google.golang.org/protobuf/types/known/timestamppb\"\n)\n\ntype PAKError string\n\nfunc (e PAKError) Error() string { return string(e) }\n\n// PreAuthKey describes a pre-authorization key usable in a particular user.\ntype PreAuthKey struct {\n\tID uint64 `gorm:\"primary_key\"`\n\n\t// Legacy plaintext key (for backwards compatibility)\n\tKey string\n\n\t// New bcrypt-based authentication\n\tPrefix string\n\tHash   []byte // bcrypt\n\n\t// For tagged keys: UserID tracks who created the key (informational)\n\t// For user-owned keys: UserID tracks the node owner\n\t// Can be nil for system-created tagged keys\n\tUserID *uint\n\tUser   *User `gorm:\"constraint:OnDelete:SET NULL;\"`\n\n\tReusable  bool\n\tEphemeral bool `gorm:\"default:false\"`\n\tUsed      bool `gorm:\"default:false\"`\n\n\t// Tags to assign to nodes registered with this key.\n\t// Tags are copied to the node during registration.\n\t// If non-empty, this creates tagged nodes (not user-owned).\n\tTags []string `gorm:\"serializer:json\"`\n\n\tCreatedAt  *time.Time\n\tExpiration *time.Time\n}\n\n// PreAuthKeyNew is returned once when the key is created.\ntype PreAuthKeyNew struct {\n\tID         uint64 `gorm:\"primary_key\"`\n\tKey        string\n\tReusable   bool\n\tEphemeral  bool\n\tTags       []string\n\tExpiration *time.Time\n\tCreatedAt  *time.Time\n\tUser       *User // Can be nil for system-created tagged keys\n}\n\nfunc (key *PreAuthKeyNew) Proto() *v1.PreAuthKey {\n\tprotoKey := v1.PreAuthKey{\n\t\tId:        key.ID,\n\t\tKey:       key.Key,\n\t\tUser:      nil, // Will be set below if not nil\n\t\tReusable:  key.Reusable,\n\t\tEphemeral: key.Ephemeral,\n\t\tAclTags:   key.Tags,\n\t}\n\n\tif key.User != nil {\n\t\tprotoKey.User = key.User.Proto()\n\t}\n\n\tif key.Expiration != nil {\n\t\tprotoKey.Expiration = timestamppb.New(*key.Expiration)\n\t}\n\n\tif key.CreatedAt != nil {\n\t\tprotoKey.CreatedAt = timestamppb.New(*key.CreatedAt)\n\t}\n\n\treturn &protoKey\n}\n\nfunc (key *PreAuthKey) Proto() *v1.PreAuthKey {\n\tprotoKey := v1.PreAuthKey{\n\t\tUser:      nil, // Will be set below if not nil\n\t\tId:        key.ID,\n\t\tEphemeral: key.Ephemeral,\n\t\tReusable:  key.Reusable,\n\t\tUsed:      key.Used,\n\t\tAclTags:   key.Tags,\n\t}\n\n\tif key.User != nil {\n\t\tprotoKey.User = key.User.Proto()\n\t}\n\n\t// For new keys (with prefix/hash), show the prefix so users can identify the key\n\t// For legacy keys (with plaintext key), show the full key for backwards compatibility\n\tif key.Prefix != \"\" {\n\t\tprotoKey.Key = \"hskey-auth-\" + key.Prefix + \"-***\"\n\t} else if key.Key != \"\" {\n\t\t// Legacy key - show full key for backwards compatibility\n\t\t// TODO: Consider hiding this in a future major version\n\t\tprotoKey.Key = key.Key\n\t}\n\n\tif key.Expiration != nil {\n\t\tprotoKey.Expiration = timestamppb.New(*key.Expiration)\n\t}\n\n\tif key.CreatedAt != nil {\n\t\tprotoKey.CreatedAt = timestamppb.New(*key.CreatedAt)\n\t}\n\n\treturn &protoKey\n}\n\n// Validate checks if a pre auth key can be used.\nfunc (pak *PreAuthKey) Validate() error {\n\tif pak == nil {\n\t\treturn PAKError(\"invalid authkey\")\n\t}\n\n\t// Use EmbedObject for safe logging - never log full key\n\tlog.Debug().\n\t\tCaller().\n\t\tEmbedObject(pak).\n\t\tMsg(\"PreAuthKey.Validate: checking key\")\n\n\tif pak.Expiration != nil && pak.Expiration.Before(time.Now()) {\n\t\treturn PAKError(\"authkey expired\")\n\t}\n\n\t// we don't need to check if has been used before\n\tif pak.Reusable {\n\t\treturn nil\n\t}\n\n\tif pak.Used {\n\t\treturn PAKError(\"authkey already used\")\n\t}\n\n\treturn nil\n}\n\n// IsTagged returns true if this PreAuthKey creates tagged nodes.\n// When a PreAuthKey has tags, nodes registered with it will be tagged nodes.\nfunc (pak *PreAuthKey) IsTagged() bool {\n\treturn len(pak.Tags) > 0\n}\n\n// maskedPrefix returns the key prefix in masked format for safe logging.\n// SECURITY: Never log the full key or hash, only the masked prefix.\nfunc (pak *PreAuthKey) maskedPrefix() string {\n\tif pak.Prefix != \"\" {\n\t\treturn \"hskey-auth-\" + pak.Prefix + \"-***\"\n\t}\n\n\treturn \"\"\n}\n\n// MarshalZerologObject implements zerolog.LogObjectMarshaler for safe logging.\n// SECURITY: This method intentionally does NOT log the full key or hash.\n// Only the masked prefix is logged for identification purposes.\nfunc (pak *PreAuthKey) MarshalZerologObject(e *zerolog.Event) {\n\tif pak == nil {\n\t\treturn\n\t}\n\n\te.Uint64(zf.PAKID, pak.ID)\n\te.Bool(zf.PAKReusable, pak.Reusable)\n\te.Bool(zf.PAKEphemeral, pak.Ephemeral)\n\te.Bool(zf.PAKUsed, pak.Used)\n\te.Bool(zf.PAKIsTagged, pak.IsTagged())\n\n\t// SECURITY: Only log masked prefix, never full key or hash\n\tif masked := pak.maskedPrefix(); masked != \"\" {\n\t\te.Str(zf.PAKPrefix, masked)\n\t}\n\n\tif len(pak.Tags) > 0 {\n\t\te.Strs(zf.PAKTags, pak.Tags)\n\t}\n\n\tif pak.User != nil {\n\t\te.Str(zf.UserName, pak.User.Username())\n\t}\n\n\tif pak.Expiration != nil {\n\t\te.Time(zf.PAKExpiration, *pak.Expiration)\n\t}\n}\n"
  },
  {
    "path": "hscontrol/types/preauth_key_test.go",
    "content": "package types\n\nimport (\n\t\"errors\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/google/go-cmp/cmp\"\n)\n\nfunc TestCanUsePreAuthKey(t *testing.T) {\n\tnow := time.Now()\n\tpast := now.Add(-time.Hour)\n\tfuture := now.Add(time.Hour)\n\n\ttests := []struct {\n\t\tname    string\n\t\tpak     *PreAuthKey\n\t\twantErr bool\n\t\terr     PAKError\n\t}{\n\t\t{\n\t\t\tname: \"valid reusable key\",\n\t\t\tpak: &PreAuthKey{\n\t\t\t\tReusable:   true,\n\t\t\t\tUsed:       false,\n\t\t\t\tExpiration: &future,\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"valid non-reusable key\",\n\t\t\tpak: &PreAuthKey{\n\t\t\t\tReusable:   false,\n\t\t\t\tUsed:       false,\n\t\t\t\tExpiration: &future,\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"expired key\",\n\t\t\tpak: &PreAuthKey{\n\t\t\t\tReusable:   false,\n\t\t\t\tUsed:       false,\n\t\t\t\tExpiration: &past,\n\t\t\t},\n\t\t\twantErr: true,\n\t\t\terr:     PAKError(\"authkey expired\"),\n\t\t},\n\t\t{\n\t\t\tname: \"used non-reusable key\",\n\t\t\tpak: &PreAuthKey{\n\t\t\t\tReusable:   false,\n\t\t\t\tUsed:       true,\n\t\t\t\tExpiration: &future,\n\t\t\t},\n\t\t\twantErr: true,\n\t\t\terr:     PAKError(\"authkey already used\"),\n\t\t},\n\t\t{\n\t\t\tname: \"used reusable key\",\n\t\t\tpak: &PreAuthKey{\n\t\t\t\tReusable:   true,\n\t\t\t\tUsed:       true,\n\t\t\t\tExpiration: &future,\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"no expiration date\",\n\t\t\tpak: &PreAuthKey{\n\t\t\t\tReusable:   false,\n\t\t\t\tUsed:       false,\n\t\t\t\tExpiration: nil,\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname:    \"nil preauth key\",\n\t\t\tpak:     nil,\n\t\t\twantErr: true,\n\t\t\terr:     PAKError(\"invalid authkey\"),\n\t\t},\n\t\t{\n\t\t\tname: \"expired and used key\",\n\t\t\tpak: &PreAuthKey{\n\t\t\t\tReusable:   false,\n\t\t\t\tUsed:       true,\n\t\t\t\tExpiration: &past,\n\t\t\t},\n\t\t\twantErr: true,\n\t\t\terr:     PAKError(\"authkey expired\"),\n\t\t},\n\t\t{\n\t\t\tname: \"no expiration and used key\",\n\t\t\tpak: &PreAuthKey{\n\t\t\t\tReusable:   false,\n\t\t\t\tUsed:       true,\n\t\t\t\tExpiration: nil,\n\t\t\t},\n\t\t\twantErr: true,\n\t\t\terr:     PAKError(\"authkey already used\"),\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\terr := tt.pak.Validate()\n\t\t\tif tt.wantErr {\n\t\t\t\tif err == nil {\n\t\t\t\t\tt.Errorf(\"expected error but got none\")\n\t\t\t\t} else {\n\t\t\t\t\thttpErr, ok := errors.AsType[PAKError](err)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tt.Errorf(\"expected HTTPError but got %T\", err)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tif diff := cmp.Diff(tt.err, httpErr); diff != \"\" {\n\t\t\t\t\t\t\tt.Errorf(\"unexpected error (-want +got):\\n%s\", diff)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"expected no error but got %v\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "hscontrol/types/routes.go",
    "content": "package types\n\nimport (\n\t\"net/netip\"\n\n\t\"gorm.io/gorm\"\n)\n\n// Deprecated: Approval of routes is denormalised onto the relevant node.\n// Struct is kept for GORM migrations only.\ntype Route struct {\n\tgorm.Model\n\n\tNodeID uint64 `gorm:\"not null\"`\n\tNode   *Node\n\n\tPrefix netip.Prefix `gorm:\"serializer:text\"`\n\n\t// Advertised is now only stored as part of [Node.Hostinfo].\n\tAdvertised bool\n\n\t// Enabled is stored directly on the node as ApprovedRoutes.\n\tEnabled bool\n\n\t// IsPrimary is only determined in memory as it is only relevant\n\t// when the server is up.\n\tIsPrimary bool\n}\n\n// Deprecated: Approval of routes is denormalised onto the relevant node.\ntype Routes []Route\n"
  },
  {
    "path": "hscontrol/types/testdata/base-domain-in-server-url.yaml",
    "content": "noise:\n  private_key_path: \"private_key.pem\"\n\nprefixes:\n  v6: fd7a:115c:a1e0::/48\n  v4: 100.64.0.0/10\n\ndatabase:\n  type: sqlite3\n\nserver_url: \"https://server.derp.no\"\n\ndns:\n  magic_dns: true\n  base_domain: derp.no\n  override_local_dns: false\n"
  },
  {
    "path": "hscontrol/types/testdata/base-domain-not-in-server-url.yaml",
    "content": "noise:\n  private_key_path: \"private_key.pem\"\n\nprefixes:\n  v6: fd7a:115c:a1e0::/48\n  v4: 100.64.0.0/10\n\ndatabase:\n  type: sqlite3\n\nserver_url: \"https://derp.no\"\n\ndns:\n  magic_dns: true\n  base_domain: clients.derp.no\n  override_local_dns: false\n"
  },
  {
    "path": "hscontrol/types/testdata/dns-override-true-error.yaml",
    "content": "noise:\n  private_key_path: \"private_key.pem\"\n\nprefixes:\n  v6: fd7a:115c:a1e0::/48\n  v4: 100.64.0.0/10\n\ndatabase:\n  type: sqlite3\n\nserver_url: \"https://server.derp.no\"\n\ndns:\n  magic_dns: true\n  base_domain: derp.no\n  override_local_dns: true\n"
  },
  {
    "path": "hscontrol/types/testdata/dns-override-true.yaml",
    "content": "noise:\n  private_key_path: \"private_key.pem\"\n\nprefixes:\n  v6: fd7a:115c:a1e0::/48\n  v4: 100.64.0.0/10\n\ndatabase:\n  type: sqlite3\n\nserver_url: \"https://server.derp.no\"\n\ndns:\n  magic_dns: true\n  base_domain: derp2.no\n  override_local_dns: true\n  nameservers:\n    global:\n      - 1.1.1.1\n      - 1.0.0.1\n"
  },
  {
    "path": "hscontrol/types/testdata/dns_full.yaml",
    "content": "# minimum to not fatal\nnoise:\n  private_key_path: \"private_key.pem\"\nserver_url: \"https://derp.no\"\n\ndns:\n  magic_dns: true\n  base_domain: example.com\n\n  override_local_dns: false\n  nameservers:\n    global:\n      - 1.1.1.1\n      - 1.0.0.1\n      - 2606:4700:4700::1111\n      - 2606:4700:4700::1001\n      - https://dns.nextdns.io/abc123\n\n    split:\n      foo.bar.com:\n        - 1.1.1.1\n      darp.headscale.net:\n        - 1.1.1.1\n        - 8.8.8.8\n\n  search_domains:\n    - test.com\n    - bar.com\n\n  extra_records:\n    - name: \"grafana.myvpn.example.com\"\n      type: \"A\"\n      value: \"100.64.0.3\"\n\n    # you can also put it in one line\n    - { name: \"prometheus.myvpn.example.com\", type: \"A\", value: \"100.64.0.4\" }\n"
  },
  {
    "path": "hscontrol/types/testdata/dns_full_no_magic.yaml",
    "content": "# minimum to not fatal\nnoise:\n  private_key_path: \"private_key.pem\"\nserver_url: \"https://derp.no\"\n\ndns:\n  magic_dns: false\n  base_domain: example.com\n\n  override_local_dns: false\n  nameservers:\n    global:\n      - 1.1.1.1\n      - 1.0.0.1\n      - 2606:4700:4700::1111\n      - 2606:4700:4700::1001\n      - https://dns.nextdns.io/abc123\n\n    split:\n      foo.bar.com:\n        - 1.1.1.1\n      darp.headscale.net:\n        - 1.1.1.1\n        - 8.8.8.8\n\n  search_domains:\n    - test.com\n    - bar.com\n\n  extra_records:\n    - name: \"grafana.myvpn.example.com\"\n      type: \"A\"\n      value: \"100.64.0.3\"\n\n    # you can also put it in one line\n    - { name: \"prometheus.myvpn.example.com\", type: \"A\", value: \"100.64.0.4\" }\n"
  },
  {
    "path": "hscontrol/types/testdata/minimal.yaml",
    "content": "noise:\n  private_key_path: \"private_key.pem\"\nserver_url: \"https://derp.no\"\n"
  },
  {
    "path": "hscontrol/types/testdata/policy-path-is-loaded.yaml",
    "content": "noise:\n  private_key_path: \"private_key.pem\"\n\nprefixes:\n  v6: fd7a:115c:a1e0::/48\n  v4: 100.64.0.0/10\n\ndatabase:\n  type: sqlite3\n\nserver_url: \"https://derp.no\"\n\nacl_policy_path: \"/etc/acl_policy.yaml\"\npolicy:\n  type: file\n  path: \"/etc/policy.hujson\"\n\ndns:\n  magic_dns: false\n  override_local_dns: false\n"
  },
  {
    "path": "hscontrol/types/types_clone.go",
    "content": "// Copyright (c) Tailscale Inc & AUTHORS\n// SPDX-License-Identifier: BSD-3-Clause\n\n// Code generated by tailscale.com/cmd/cloner; DO NOT EDIT.\n\npackage types\n\nimport (\n\t\"database/sql\"\n\t\"net/netip\"\n\t\"time\"\n\n\t\"gorm.io/gorm\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/key\"\n\t\"tailscale.com/types/ptr\"\n)\n\n// Clone makes a deep copy of User.\n// The result aliases no memory with the original.\nfunc (src *User) Clone() *User {\n\tif src == nil {\n\t\treturn nil\n\t}\n\tdst := new(User)\n\t*dst = *src\n\treturn dst\n}\n\n// A compilation failure here means this code must be regenerated, with the command at the top of this file.\nvar _UserCloneNeedsRegeneration = User(struct {\n\tgorm.Model\n\tName               string\n\tDisplayName        string\n\tEmail              string\n\tProviderIdentifier sql.NullString\n\tProvider           string\n\tProfilePicURL      string\n}{})\n\n// Clone makes a deep copy of Node.\n// The result aliases no memory with the original.\nfunc (src *Node) Clone() *Node {\n\tif src == nil {\n\t\treturn nil\n\t}\n\tdst := new(Node)\n\t*dst = *src\n\tdst.Endpoints = append(src.Endpoints[:0:0], src.Endpoints...)\n\tdst.Hostinfo = src.Hostinfo.Clone()\n\tif dst.IPv4 != nil {\n\t\tdst.IPv4 = ptr.To(*src.IPv4)\n\t}\n\tif dst.IPv6 != nil {\n\t\tdst.IPv6 = ptr.To(*src.IPv6)\n\t}\n\tif dst.UserID != nil {\n\t\tdst.UserID = ptr.To(*src.UserID)\n\t}\n\tif dst.User != nil {\n\t\tdst.User = ptr.To(*src.User)\n\t}\n\tdst.Tags = append(src.Tags[:0:0], src.Tags...)\n\tif dst.AuthKeyID != nil {\n\t\tdst.AuthKeyID = ptr.To(*src.AuthKeyID)\n\t}\n\tdst.AuthKey = src.AuthKey.Clone()\n\tif dst.Expiry != nil {\n\t\tdst.Expiry = ptr.To(*src.Expiry)\n\t}\n\tif dst.LastSeen != nil {\n\t\tdst.LastSeen = ptr.To(*src.LastSeen)\n\t}\n\tdst.ApprovedRoutes = append(src.ApprovedRoutes[:0:0], src.ApprovedRoutes...)\n\tif dst.DeletedAt != nil {\n\t\tdst.DeletedAt = ptr.To(*src.DeletedAt)\n\t}\n\tif dst.IsOnline != nil {\n\t\tdst.IsOnline = ptr.To(*src.IsOnline)\n\t}\n\treturn dst\n}\n\n// A compilation failure here means this code must be regenerated, with the command at the top of this file.\nvar _NodeCloneNeedsRegeneration = Node(struct {\n\tID             NodeID\n\tMachineKey     key.MachinePublic\n\tNodeKey        key.NodePublic\n\tDiscoKey       key.DiscoPublic\n\tEndpoints      []netip.AddrPort\n\tHostinfo       *tailcfg.Hostinfo\n\tIPv4           *netip.Addr\n\tIPv6           *netip.Addr\n\tHostname       string\n\tGivenName      string\n\tUserID         *uint\n\tUser           *User\n\tRegisterMethod string\n\tTags           []string\n\tAuthKeyID      *uint64\n\tAuthKey        *PreAuthKey\n\tExpiry         *time.Time\n\tLastSeen       *time.Time\n\tApprovedRoutes []netip.Prefix\n\tCreatedAt      time.Time\n\tUpdatedAt      time.Time\n\tDeletedAt      *time.Time\n\tIsOnline       *bool\n}{})\n\n// Clone makes a deep copy of PreAuthKey.\n// The result aliases no memory with the original.\nfunc (src *PreAuthKey) Clone() *PreAuthKey {\n\tif src == nil {\n\t\treturn nil\n\t}\n\tdst := new(PreAuthKey)\n\t*dst = *src\n\tdst.Hash = append(src.Hash[:0:0], src.Hash...)\n\tif dst.UserID != nil {\n\t\tdst.UserID = ptr.To(*src.UserID)\n\t}\n\tif dst.User != nil {\n\t\tdst.User = ptr.To(*src.User)\n\t}\n\tdst.Tags = append(src.Tags[:0:0], src.Tags...)\n\tif dst.CreatedAt != nil {\n\t\tdst.CreatedAt = ptr.To(*src.CreatedAt)\n\t}\n\tif dst.Expiration != nil {\n\t\tdst.Expiration = ptr.To(*src.Expiration)\n\t}\n\treturn dst\n}\n\n// A compilation failure here means this code must be regenerated, with the command at the top of this file.\nvar _PreAuthKeyCloneNeedsRegeneration = PreAuthKey(struct {\n\tID         uint64\n\tKey        string\n\tPrefix     string\n\tHash       []byte\n\tUserID     *uint\n\tUser       *User\n\tReusable   bool\n\tEphemeral  bool\n\tUsed       bool\n\tTags       []string\n\tCreatedAt  *time.Time\n\tExpiration *time.Time\n}{})\n"
  },
  {
    "path": "hscontrol/types/types_view.go",
    "content": "// Copyright (c) Tailscale Inc & AUTHORS\n// SPDX-License-Identifier: BSD-3-Clause\n\n// Code generated by tailscale/cmd/viewer; DO NOT EDIT.\n\npackage types\n\nimport (\n\t\"database/sql\"\n\tjsonv1 \"encoding/json\"\n\t\"errors\"\n\t\"net/netip\"\n\t\"time\"\n\n\tjsonv2 \"github.com/go-json-experiment/json\"\n\t\"github.com/go-json-experiment/json/jsontext\"\n\t\"gorm.io/gorm\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/key\"\n\t\"tailscale.com/types/views\"\n)\n\n//go:generate go run tailscale.com/cmd/cloner  -clonefunc=false -type=User,Node,PreAuthKey\n\n// View returns a read-only view of User.\nfunc (p *User) View() UserView {\n\treturn UserView{ж: p}\n}\n\n// UserView provides a read-only view over User.\n//\n// Its methods should only be called if `Valid()` returns true.\ntype UserView struct {\n\t// ж is the underlying mutable value, named with a hard-to-type\n\t// character that looks pointy like a pointer.\n\t// It is named distinctively to make you think of how dangerous it is to escape\n\t// to callers. You must not let callers be able to mutate it.\n\tж *User\n}\n\n// Valid reports whether v's underlying value is non-nil.\nfunc (v UserView) Valid() bool { return v.ж != nil }\n\n// AsStruct returns a clone of the underlying value which aliases no memory with\n// the original.\nfunc (v UserView) AsStruct() *User {\n\tif v.ж == nil {\n\t\treturn nil\n\t}\n\treturn v.ж.Clone()\n}\n\n// MarshalJSON implements [jsonv1.Marshaler].\nfunc (v UserView) MarshalJSON() ([]byte, error) {\n\treturn jsonv1.Marshal(v.ж)\n}\n\n// MarshalJSONTo implements [jsonv2.MarshalerTo].\nfunc (v UserView) MarshalJSONTo(enc *jsontext.Encoder) error {\n\treturn jsonv2.MarshalEncode(enc, v.ж)\n}\n\n// UnmarshalJSON implements [jsonv1.Unmarshaler].\nfunc (v *UserView) UnmarshalJSON(b []byte) error {\n\tif v.ж != nil {\n\t\treturn errors.New(\"already initialized\")\n\t}\n\tif len(b) == 0 {\n\t\treturn nil\n\t}\n\tvar x User\n\tif err := jsonv1.Unmarshal(b, &x); err != nil {\n\t\treturn err\n\t}\n\tv.ж = &x\n\treturn nil\n}\n\n// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom].\nfunc (v *UserView) UnmarshalJSONFrom(dec *jsontext.Decoder) error {\n\tif v.ж != nil {\n\t\treturn errors.New(\"already initialized\")\n\t}\n\tvar x User\n\tif err := jsonv2.UnmarshalDecode(dec, &x); err != nil {\n\t\treturn err\n\t}\n\tv.ж = &x\n\treturn nil\n}\n\nfunc (v UserView) Model() gorm.Model { return v.ж.Model }\n\n// Name (username) for the user, is used if email is empty\n// Should not be used, please use Username().\n// It is unique if ProviderIdentifier is not set.\nfunc (v UserView) Name() string { return v.ж.Name }\n\n// Typically the full name of the user\nfunc (v UserView) DisplayName() string { return v.ж.DisplayName }\n\n// Email of the user\n// Should not be used, please use Username().\nfunc (v UserView) Email() string { return v.ж.Email }\n\n// ProviderIdentifier is a unique or not set identifier of the\n// user from OIDC. It is the combination of `iss`\n// and `sub` claim in the OIDC token.\n// It is unique if set.\n// It is unique together with Name.\nfunc (v UserView) ProviderIdentifier() sql.NullString { return v.ж.ProviderIdentifier }\n\n// Provider is the origin of the user account,\n// same as RegistrationMethod, without authkey.\nfunc (v UserView) Provider() string      { return v.ж.Provider }\nfunc (v UserView) ProfilePicURL() string { return v.ж.ProfilePicURL }\n\n// A compilation failure here means this code must be regenerated, with the command at the top of this file.\nvar _UserViewNeedsRegeneration = User(struct {\n\tgorm.Model\n\tName               string\n\tDisplayName        string\n\tEmail              string\n\tProviderIdentifier sql.NullString\n\tProvider           string\n\tProfilePicURL      string\n}{})\n\n// View returns a read-only view of Node.\nfunc (p *Node) View() NodeView {\n\treturn NodeView{ж: p}\n}\n\n// NodeView provides a read-only view over Node.\n//\n// Its methods should only be called if `Valid()` returns true.\ntype NodeView struct {\n\t// ж is the underlying mutable value, named with a hard-to-type\n\t// character that looks pointy like a pointer.\n\t// It is named distinctively to make you think of how dangerous it is to escape\n\t// to callers. You must not let callers be able to mutate it.\n\tж *Node\n}\n\n// Valid reports whether v's underlying value is non-nil.\nfunc (v NodeView) Valid() bool { return v.ж != nil }\n\n// AsStruct returns a clone of the underlying value which aliases no memory with\n// the original.\nfunc (v NodeView) AsStruct() *Node {\n\tif v.ж == nil {\n\t\treturn nil\n\t}\n\treturn v.ж.Clone()\n}\n\n// MarshalJSON implements [jsonv1.Marshaler].\nfunc (v NodeView) MarshalJSON() ([]byte, error) {\n\treturn jsonv1.Marshal(v.ж)\n}\n\n// MarshalJSONTo implements [jsonv2.MarshalerTo].\nfunc (v NodeView) MarshalJSONTo(enc *jsontext.Encoder) error {\n\treturn jsonv2.MarshalEncode(enc, v.ж)\n}\n\n// UnmarshalJSON implements [jsonv1.Unmarshaler].\nfunc (v *NodeView) UnmarshalJSON(b []byte) error {\n\tif v.ж != nil {\n\t\treturn errors.New(\"already initialized\")\n\t}\n\tif len(b) == 0 {\n\t\treturn nil\n\t}\n\tvar x Node\n\tif err := jsonv1.Unmarshal(b, &x); err != nil {\n\t\treturn err\n\t}\n\tv.ж = &x\n\treturn nil\n}\n\n// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom].\nfunc (v *NodeView) UnmarshalJSONFrom(dec *jsontext.Decoder) error {\n\tif v.ж != nil {\n\t\treturn errors.New(\"already initialized\")\n\t}\n\tvar x Node\n\tif err := jsonv2.UnmarshalDecode(dec, &x); err != nil {\n\t\treturn err\n\t}\n\tv.ж = &x\n\treturn nil\n}\n\nfunc (v NodeView) ID() NodeID                             { return v.ж.ID }\nfunc (v NodeView) MachineKey() key.MachinePublic          { return v.ж.MachineKey }\nfunc (v NodeView) NodeKey() key.NodePublic                { return v.ж.NodeKey }\nfunc (v NodeView) DiscoKey() key.DiscoPublic              { return v.ж.DiscoKey }\nfunc (v NodeView) Endpoints() views.Slice[netip.AddrPort] { return views.SliceOf(v.ж.Endpoints) }\nfunc (v NodeView) Hostinfo() tailcfg.HostinfoView         { return v.ж.Hostinfo.View() }\nfunc (v NodeView) IPv4() views.ValuePointer[netip.Addr]   { return views.ValuePointerOf(v.ж.IPv4) }\n\nfunc (v NodeView) IPv6() views.ValuePointer[netip.Addr] { return views.ValuePointerOf(v.ж.IPv6) }\n\n// Hostname represents the name given by the Tailscale\n// client during registration\nfunc (v NodeView) Hostname() string { return v.ж.Hostname }\n\n// Givenname represents either:\n// a DNS normalized version of Hostname\n// a valid name set by the User\n//\n// GivenName is the name used in all DNS related\n// parts of headscale.\nfunc (v NodeView) GivenName() string { return v.ж.GivenName }\n\n// UserID identifies the owning user for user-owned nodes.\n// Nil for tagged nodes, which are owned by their tags.\nfunc (v NodeView) UserID() views.ValuePointer[uint] { return views.ValuePointerOf(v.ж.UserID) }\n\nfunc (v NodeView) User() UserView         { return v.ж.User.View() }\nfunc (v NodeView) RegisterMethod() string { return v.ж.RegisterMethod }\n\n// Tags is the definitive owner for tagged nodes.\n// When non-empty, the node is \"tagged\" and tags define its identity.\n// Empty for user-owned nodes.\n// Tags cannot be removed once set (one-way transition).\nfunc (v NodeView) Tags() views.Slice[string] { return views.SliceOf(v.ж.Tags) }\n\n// When a node has been created with a PreAuthKey, we need to\n// prevent the preauthkey from being deleted before the node.\n// The preauthkey can define \"tags\" of the node so we need it\n// around.\nfunc (v NodeView) AuthKeyID() views.ValuePointer[uint64] { return views.ValuePointerOf(v.ж.AuthKeyID) }\n\nfunc (v NodeView) AuthKey() PreAuthKeyView               { return v.ж.AuthKey.View() }\nfunc (v NodeView) Expiry() views.ValuePointer[time.Time] { return views.ValuePointerOf(v.ж.Expiry) }\n\n// LastSeen is when the node was last in contact with\n// headscale. It is best effort and not persisted.\nfunc (v NodeView) LastSeen() views.ValuePointer[time.Time] {\n\treturn views.ValuePointerOf(v.ж.LastSeen)\n}\n\n// ApprovedRoutes is a list of routes that the node is allowed to announce\n// as a subnet router. They are not necessarily the routes that the node\n// announces at the moment.\n// See [Node.Hostinfo]\nfunc (v NodeView) ApprovedRoutes() views.Slice[netip.Prefix] {\n\treturn views.SliceOf(v.ж.ApprovedRoutes)\n}\nfunc (v NodeView) CreatedAt() time.Time { return v.ж.CreatedAt }\nfunc (v NodeView) UpdatedAt() time.Time { return v.ж.UpdatedAt }\nfunc (v NodeView) DeletedAt() views.ValuePointer[time.Time] {\n\treturn views.ValuePointerOf(v.ж.DeletedAt)\n}\n\nfunc (v NodeView) IsOnline() views.ValuePointer[bool] { return views.ValuePointerOf(v.ж.IsOnline) }\n\nfunc (v NodeView) String() string { return v.ж.String() }\n\n// A compilation failure here means this code must be regenerated, with the command at the top of this file.\nvar _NodeViewNeedsRegeneration = Node(struct {\n\tID             NodeID\n\tMachineKey     key.MachinePublic\n\tNodeKey        key.NodePublic\n\tDiscoKey       key.DiscoPublic\n\tEndpoints      []netip.AddrPort\n\tHostinfo       *tailcfg.Hostinfo\n\tIPv4           *netip.Addr\n\tIPv6           *netip.Addr\n\tHostname       string\n\tGivenName      string\n\tUserID         *uint\n\tUser           *User\n\tRegisterMethod string\n\tTags           []string\n\tAuthKeyID      *uint64\n\tAuthKey        *PreAuthKey\n\tExpiry         *time.Time\n\tLastSeen       *time.Time\n\tApprovedRoutes []netip.Prefix\n\tCreatedAt      time.Time\n\tUpdatedAt      time.Time\n\tDeletedAt      *time.Time\n\tIsOnline       *bool\n}{})\n\n// View returns a read-only view of PreAuthKey.\nfunc (p *PreAuthKey) View() PreAuthKeyView {\n\treturn PreAuthKeyView{ж: p}\n}\n\n// PreAuthKeyView provides a read-only view over PreAuthKey.\n//\n// Its methods should only be called if `Valid()` returns true.\ntype PreAuthKeyView struct {\n\t// ж is the underlying mutable value, named with a hard-to-type\n\t// character that looks pointy like a pointer.\n\t// It is named distinctively to make you think of how dangerous it is to escape\n\t// to callers. You must not let callers be able to mutate it.\n\tж *PreAuthKey\n}\n\n// Valid reports whether v's underlying value is non-nil.\nfunc (v PreAuthKeyView) Valid() bool { return v.ж != nil }\n\n// AsStruct returns a clone of the underlying value which aliases no memory with\n// the original.\nfunc (v PreAuthKeyView) AsStruct() *PreAuthKey {\n\tif v.ж == nil {\n\t\treturn nil\n\t}\n\treturn v.ж.Clone()\n}\n\n// MarshalJSON implements [jsonv1.Marshaler].\nfunc (v PreAuthKeyView) MarshalJSON() ([]byte, error) {\n\treturn jsonv1.Marshal(v.ж)\n}\n\n// MarshalJSONTo implements [jsonv2.MarshalerTo].\nfunc (v PreAuthKeyView) MarshalJSONTo(enc *jsontext.Encoder) error {\n\treturn jsonv2.MarshalEncode(enc, v.ж)\n}\n\n// UnmarshalJSON implements [jsonv1.Unmarshaler].\nfunc (v *PreAuthKeyView) UnmarshalJSON(b []byte) error {\n\tif v.ж != nil {\n\t\treturn errors.New(\"already initialized\")\n\t}\n\tif len(b) == 0 {\n\t\treturn nil\n\t}\n\tvar x PreAuthKey\n\tif err := jsonv1.Unmarshal(b, &x); err != nil {\n\t\treturn err\n\t}\n\tv.ж = &x\n\treturn nil\n}\n\n// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom].\nfunc (v *PreAuthKeyView) UnmarshalJSONFrom(dec *jsontext.Decoder) error {\n\tif v.ж != nil {\n\t\treturn errors.New(\"already initialized\")\n\t}\n\tvar x PreAuthKey\n\tif err := jsonv2.UnmarshalDecode(dec, &x); err != nil {\n\t\treturn err\n\t}\n\tv.ж = &x\n\treturn nil\n}\n\nfunc (v PreAuthKeyView) ID() uint64 { return v.ж.ID }\n\n// Legacy plaintext key (for backwards compatibility)\nfunc (v PreAuthKeyView) Key() string { return v.ж.Key }\n\n// New bcrypt-based authentication\nfunc (v PreAuthKeyView) Prefix() string { return v.ж.Prefix }\n\n// bcrypt\nfunc (v PreAuthKeyView) Hash() views.ByteSlice[[]byte] { return views.ByteSliceOf(v.ж.Hash) }\n\n// For tagged keys: UserID tracks who created the key (informational)\n// For user-owned keys: UserID tracks the node owner\n// Can be nil for system-created tagged keys\nfunc (v PreAuthKeyView) UserID() views.ValuePointer[uint] { return views.ValuePointerOf(v.ж.UserID) }\n\nfunc (v PreAuthKeyView) User() UserView  { return v.ж.User.View() }\nfunc (v PreAuthKeyView) Reusable() bool  { return v.ж.Reusable }\nfunc (v PreAuthKeyView) Ephemeral() bool { return v.ж.Ephemeral }\nfunc (v PreAuthKeyView) Used() bool      { return v.ж.Used }\n\n// Tags to assign to nodes registered with this key.\n// Tags are copied to the node during registration.\n// If non-empty, this creates tagged nodes (not user-owned).\nfunc (v PreAuthKeyView) Tags() views.Slice[string] { return views.SliceOf(v.ж.Tags) }\nfunc (v PreAuthKeyView) CreatedAt() views.ValuePointer[time.Time] {\n\treturn views.ValuePointerOf(v.ж.CreatedAt)\n}\n\nfunc (v PreAuthKeyView) Expiration() views.ValuePointer[time.Time] {\n\treturn views.ValuePointerOf(v.ж.Expiration)\n}\n\n// A compilation failure here means this code must be regenerated, with the command at the top of this file.\nvar _PreAuthKeyViewNeedsRegeneration = PreAuthKey(struct {\n\tID         uint64\n\tKey        string\n\tPrefix     string\n\tHash       []byte\n\tUserID     *uint\n\tUser       *User\n\tReusable   bool\n\tEphemeral  bool\n\tUsed       bool\n\tTags       []string\n\tCreatedAt  *time.Time\n\tExpiration *time.Time\n}{})\n"
  },
  {
    "path": "hscontrol/types/users.go",
    "content": "package types\n\nimport (\n\t\"cmp\"\n\t\"database/sql\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/mail\"\n\t\"net/url\"\n\t\"strconv\"\n\t\"strings\"\n\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/juanfont/headscale/hscontrol/util/zlog/zf\"\n\t\"github.com/rs/zerolog\"\n\t\"github.com/rs/zerolog/log\"\n\t\"google.golang.org/protobuf/types/known/timestamppb\"\n\t\"gorm.io/gorm\"\n\t\"tailscale.com/tailcfg\"\n)\n\n// ErrCannotParseBoolean is returned when a value cannot be parsed as boolean.\nvar ErrCannotParseBoolean = errors.New(\"cannot parse value as boolean\")\n\ntype UserID uint64\n\ntype Users []User\n\nconst (\n\t// TaggedDevicesUserID is the special user ID for tagged devices.\n\t// This ID is used when rendering tagged nodes in the Tailscale protocol.\n\tTaggedDevicesUserID = 2147455555\n)\n\n// TaggedDevices is a special user used in MapResponse for tagged nodes.\n// Tagged nodes don't belong to a real user - the tag is their identity.\n// This special user ID is used when rendering tagged nodes in the Tailscale protocol.\nvar TaggedDevices = User{\n\tModel:       gorm.Model{ID: TaggedDevicesUserID},\n\tName:        \"tagged-devices\",\n\tDisplayName: \"Tagged Devices\",\n}\n\nfunc (u Users) String() string {\n\tvar sb strings.Builder\n\tsb.WriteString(\"[ \")\n\n\tfor _, user := range u {\n\t\tfmt.Fprintf(&sb, \"%d: %s, \", user.ID, user.Name)\n\t}\n\n\tsb.WriteString(\" ]\")\n\n\treturn sb.String()\n}\n\n// User is the way Headscale implements the concept of users in Tailscale\n//\n// At the end of the day, users in Tailscale are some kind of 'bubbles' or users\n// that contain our machines.\ntype User struct {\n\tgorm.Model //nolint:embeddedstructfieldcheck\n\n\t// The index `idx_name_provider_identifier` is to enforce uniqueness\n\t// between Name and ProviderIdentifier. This ensures that\n\t// you can have multiple users with the same name in OIDC,\n\t// but not if you only run with CLI users.\n\n\t// Name (username) for the user, is used if email is empty\n\t// Should not be used, please use Username().\n\t// It is unique if ProviderIdentifier is not set.\n\tName string\n\n\t// Typically the full name of the user\n\tDisplayName string\n\n\t// Email of the user\n\t// Should not be used, please use Username().\n\tEmail string\n\n\t// ProviderIdentifier is a unique or not set identifier of the\n\t// user from OIDC. It is the combination of `iss`\n\t// and `sub` claim in the OIDC token.\n\t// It is unique if set.\n\t// It is unique together with Name.\n\tProviderIdentifier sql.NullString\n\n\t// Provider is the origin of the user account,\n\t// same as RegistrationMethod, without authkey.\n\tProvider string\n\n\tProfilePicURL string\n}\n\nfunc (u *User) StringID() string {\n\tif u == nil {\n\t\treturn \"\"\n\t}\n\n\treturn strconv.FormatUint(uint64(u.ID), 10)\n}\n\n// TypedID returns a pointer to the user's ID as a UserID type.\n// This is a convenience method to avoid ugly casting like ptr.To(types.UserID(user.ID)).\nfunc (u *User) TypedID() *UserID {\n\tuid := UserID(u.ID)\n\treturn &uid\n}\n\n// Username is the main way to get the username of a user,\n// it will return the email if it exists, the name if it exists,\n// the OIDCIdentifier if it exists, and the ID if nothing else exists.\n// Email and OIDCIdentifier will be set when the user has headscale\n// enabled with OIDC, which means that there is a domain involved which\n// should be used throughout headscale, in information returned to the\n// user and the Policy engine.\nfunc (u *User) Username() string {\n\treturn cmp.Or(\n\t\tu.Email,\n\t\tu.Name,\n\t\tu.ProviderIdentifier.String,\n\t\tu.StringID(),\n\t)\n}\n\n// Display returns the DisplayName if it exists, otherwise\n// it will return the Username.\nfunc (u *User) Display() string {\n\treturn cmp.Or(u.DisplayName, u.Username())\n}\n\n// TODO(kradalby): See if we can fill in Gravatar here.\nfunc (u *User) profilePicURL() string {\n\treturn u.ProfilePicURL\n}\n\nfunc (u *User) TailscaleUser() tailcfg.User {\n\treturn tailcfg.User{\n\t\tID:            tailcfg.UserID(u.ID), //nolint:gosec // UserID is bounded\n\t\tDisplayName:   u.Display(),\n\t\tProfilePicURL: u.profilePicURL(),\n\t\tCreated:       u.CreatedAt,\n\t}\n}\n\nfunc (u UserView) TailscaleUser() tailcfg.User {\n\treturn u.ж.TailscaleUser()\n}\n\n// ID returns the user's ID.\n// This is a custom accessor because gorm.Model.ID is embedded\n// and the viewer generator doesn't always produce it.\nfunc (u UserView) ID() uint {\n\treturn u.ж.ID\n}\n\nfunc (u *User) TailscaleLogin() tailcfg.Login {\n\treturn tailcfg.Login{\n\t\tID:            tailcfg.LoginID(u.ID), //nolint:gosec // safe conversion for user ID\n\t\tProvider:      u.Provider,\n\t\tLoginName:     u.Username(),\n\t\tDisplayName:   u.Display(),\n\t\tProfilePicURL: u.profilePicURL(),\n\t}\n}\n\nfunc (u UserView) TailscaleLogin() tailcfg.Login {\n\treturn u.ж.TailscaleLogin()\n}\n\nfunc (u *User) TailscaleUserProfile() tailcfg.UserProfile {\n\treturn tailcfg.UserProfile{\n\t\tID:            tailcfg.UserID(u.ID), //nolint:gosec // UserID is bounded\n\t\tLoginName:     u.Username(),\n\t\tDisplayName:   u.Display(),\n\t\tProfilePicURL: u.profilePicURL(),\n\t}\n}\n\nfunc (u UserView) TailscaleUserProfile() tailcfg.UserProfile {\n\treturn u.ж.TailscaleUserProfile()\n}\n\nfunc (u *User) Proto() *v1.User {\n\t// Use Name if set, otherwise fall back to Username() which provides\n\t// a display-friendly identifier (Email > ProviderIdentifier > ID).\n\t// This ensures OIDC users (who typically have empty Name) display\n\t// their email, while CLI users retain their original Name.\n\tname := u.Name\n\tif name == \"\" {\n\t\tname = u.Username()\n\t}\n\n\treturn &v1.User{\n\t\tId:            uint64(u.ID),\n\t\tName:          name,\n\t\tCreatedAt:     timestamppb.New(u.CreatedAt),\n\t\tDisplayName:   u.DisplayName,\n\t\tEmail:         u.Email,\n\t\tProviderId:    u.ProviderIdentifier.String,\n\t\tProvider:      u.Provider,\n\t\tProfilePicUrl: u.ProfilePicURL,\n\t}\n}\n\n// MarshalZerologObject implements zerolog.LogObjectMarshaler for safe logging.\nfunc (u *User) MarshalZerologObject(e *zerolog.Event) {\n\tif u == nil {\n\t\treturn\n\t}\n\n\te.Uint(zf.UserID, u.ID)\n\te.Str(zf.UserName, u.Username())\n\te.Str(zf.UserDisplay, u.Display())\n\n\tif u.Provider != \"\" {\n\t\te.Str(zf.UserProvider, u.Provider)\n\t}\n}\n\n// MarshalZerologObject implements zerolog.LogObjectMarshaler for UserView.\nfunc (u UserView) MarshalZerologObject(e *zerolog.Event) {\n\tif !u.Valid() {\n\t\treturn\n\t}\n\n\tu.ж.MarshalZerologObject(e)\n}\n\n// FlexibleBoolean handles JumpCloud's JSON where email_verified is returned as a\n// string \"true\" or \"false\" instead of a boolean.\n// This maps bool to a specific type with a custom unmarshaler to\n// ensure we can decode it from a string.\n// https://github.com/juanfont/headscale/issues/2293\ntype FlexibleBoolean bool\n\nfunc (bit *FlexibleBoolean) UnmarshalJSON(data []byte) error {\n\tvar val any\n\n\terr := json.Unmarshal(data, &val)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling data: %w\", err)\n\t}\n\n\tswitch v := val.(type) {\n\tcase bool:\n\t\t*bit = FlexibleBoolean(v)\n\tcase string:\n\t\tpv, err := strconv.ParseBool(v)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"parsing %s as boolean: %w\", v, err)\n\t\t}\n\n\t\t*bit = FlexibleBoolean(pv)\n\n\tdefault:\n\t\treturn fmt.Errorf(\"%w: %v\", ErrCannotParseBoolean, v)\n\t}\n\n\treturn nil\n}\n\ntype OIDCClaims struct {\n\t// Sub is the user's unique identifier at the provider.\n\tSub string `json:\"sub\"`\n\tIss string `json:\"iss\"`\n\n\t// Name is the user's full name.\n\tName              string          `json:\"name,omitempty\"`\n\tGroups            []string        `json:\"groups,omitempty\"`\n\tEmail             string          `json:\"email,omitempty\"`\n\tEmailVerified     FlexibleBoolean `json:\"email_verified,omitempty\"`\n\tProfilePictureURL string          `json:\"picture,omitempty\"`\n\tUsername          string          `json:\"preferred_username,omitempty\"`\n}\n\n// Identifier returns a unique identifier string combining the Iss and Sub claims.\n// The format depends on whether Iss is a URL or not:\n// - For URLs: Joins the URL and sub path (e.g., \"https://example.com/sub\")\n// - For non-URLs: Joins with a slash (e.g., \"oidc/sub\")\n// - For empty Iss: Returns just \"sub\"\n// - For empty Sub: Returns just the Issuer\n// - For both empty: Returns empty string\n//\n// The result is cleaned using CleanIdentifier() to ensure consistent formatting.\nfunc (c *OIDCClaims) Identifier() string {\n\t// Handle empty components special cases\n\tif c.Iss == \"\" && c.Sub == \"\" {\n\t\treturn \"\"\n\t}\n\n\tif c.Iss == \"\" {\n\t\treturn CleanIdentifier(c.Sub)\n\t}\n\n\tif c.Sub == \"\" {\n\t\treturn CleanIdentifier(c.Iss)\n\t}\n\n\t// We'll use the raw values and let CleanIdentifier handle all the whitespace\n\tissuer := c.Iss\n\tsubject := c.Sub\n\n\tvar result string\n\t// Try to parse as URL to handle URL joining correctly\n\tif u, err := url.Parse(issuer); err == nil && u.Scheme != \"\" { //nolint:noinlineerr\n\t\t// For URLs, use proper URL path joining\n\t\tif joined, err := url.JoinPath(issuer, subject); err == nil { //nolint:noinlineerr\n\t\t\tresult = joined\n\t\t}\n\t}\n\n\t// If URL joining failed or issuer wasn't a URL, do simple string join\n\tif result == \"\" {\n\t\t// Default case: simple string joining with slash\n\t\tissuer = strings.TrimSuffix(issuer, \"/\")\n\t\tsubject = strings.TrimPrefix(subject, \"/\")\n\t\tresult = issuer + \"/\" + subject\n\t}\n\n\t// Clean the result and return it\n\treturn CleanIdentifier(result)\n}\n\n// CleanIdentifier cleans a potentially malformed identifier by removing double slashes\n// while preserving protocol specifications like http://. This function will:\n// - Trim all whitespace from the beginning and end of the identifier\n// - Remove whitespace within path segments\n// - Preserve the scheme (http://, https://, etc.) for URLs\n// - Remove any duplicate slashes in the path\n// - Remove empty path segments\n// - For non-URL identifiers, it joins non-empty segments with a single slash\n// - Returns empty string for identifiers with only slashes\n// - Normalize URL schemes to lowercase.\nfunc CleanIdentifier(identifier string) string {\n\tif identifier == \"\" {\n\t\treturn identifier\n\t}\n\n\t// Trim leading/trailing whitespace\n\tidentifier = strings.TrimSpace(identifier)\n\n\t// Handle URLs with schemes\n\tu, err := url.Parse(identifier)\n\tif err == nil && u.Scheme != \"\" {\n\t\t// Clean path by removing empty segments and whitespace within segments\n\t\tparts := strings.FieldsFunc(u.Path, func(c rune) bool { return c == '/' })\n\t\tfor i, part := range parts {\n\t\t\tparts[i] = strings.TrimSpace(part)\n\t\t}\n\t\t// Remove empty parts after trimming\n\t\tcleanParts := make([]string, 0, len(parts))\n\t\tfor _, part := range parts {\n\t\t\tif part != \"\" {\n\t\t\t\tcleanParts = append(cleanParts, part)\n\t\t\t}\n\t\t}\n\n\t\tif len(cleanParts) == 0 {\n\t\t\tu.Path = \"\"\n\t\t} else {\n\t\t\tu.Path = \"/\" + strings.Join(cleanParts, \"/\")\n\t\t}\n\t\t// Ensure scheme is lowercase\n\t\tu.Scheme = strings.ToLower(u.Scheme)\n\n\t\treturn u.String()\n\t}\n\n\t// Handle non-URL identifiers\n\tparts := strings.FieldsFunc(identifier, func(c rune) bool { return c == '/' })\n\t// Clean whitespace from each part\n\tcleanParts := make([]string, 0, len(parts))\n\tfor _, part := range parts {\n\t\ttrimmed := strings.TrimSpace(part)\n\t\tif trimmed != \"\" {\n\t\t\tcleanParts = append(cleanParts, trimmed)\n\t\t}\n\t}\n\n\tif len(cleanParts) == 0 {\n\t\treturn \"\"\n\t}\n\n\treturn strings.Join(cleanParts, \"/\")\n}\n\ntype OIDCUserInfo struct {\n\tSub               string          `json:\"sub\"`\n\tName              string          `json:\"name\"`\n\tGivenName         string          `json:\"given_name\"`\n\tFamilyName        string          `json:\"family_name\"`\n\tPreferredUsername string          `json:\"preferred_username\"`\n\tEmail             string          `json:\"email\"`\n\tEmailVerified     FlexibleBoolean `json:\"email_verified,omitempty\"`\n\tGroups            []string        `json:\"groups\"`\n\tPicture           string          `json:\"picture\"`\n}\n\n// FromClaim overrides a User from OIDC claims.\n// All fields will be updated, except for the ID.\nfunc (u *User) FromClaim(claims *OIDCClaims, emailVerifiedRequired bool) {\n\terr := util.ValidateUsername(claims.Username)\n\tif err == nil {\n\t\tu.Name = claims.Username\n\t} else {\n\t\tlog.Debug().Caller().Err(err).Msgf(\"username %s is not valid\", claims.Username)\n\t}\n\n\tif claims.EmailVerified || !FlexibleBoolean(emailVerifiedRequired) {\n\t\t_, err = mail.ParseAddress(claims.Email)\n\t\tif err == nil {\n\t\t\tu.Email = claims.Email\n\t\t}\n\t}\n\n\t// Get provider identifier\n\tidentifier := claims.Identifier()\n\t// Ensure provider identifier always has a leading slash for backward compatibility\n\tif claims.Iss == \"\" && !strings.HasPrefix(identifier, \"/\") {\n\t\tidentifier = \"/\" + identifier\n\t}\n\n\tu.ProviderIdentifier = sql.NullString{String: identifier, Valid: true}\n\tu.DisplayName = claims.Name\n\tu.ProfilePicURL = claims.ProfilePictureURL\n\tu.Provider = util.RegisterMethodOIDC\n}\n"
  },
  {
    "path": "hscontrol/types/users_test.go",
    "content": "package types\n\nimport (\n\t\"database/sql\"\n\t\"encoding/json\"\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestUnmarshallOIDCClaims(t *testing.T) {\n\ttests := []struct {\n\t\tname    string\n\t\tjsonstr string\n\t\twant    OIDCClaims\n\t}{\n\t\t{\n\t\t\tname: \"normal-bool\",\n\t\t\tjsonstr: `\n{\n  \"sub\": \"test\",\n  \"email\": \"test@test.no\",\n  \"email_verified\": true\n}\n\t\t\t`,\n\t\t\twant: OIDCClaims{\n\t\t\t\tSub:           \"test\",\n\t\t\t\tEmail:         \"test@test.no\",\n\t\t\t\tEmailVerified: true,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"string-bool-true\",\n\t\t\tjsonstr: `\n{\n  \"sub\": \"test2\",\n  \"email\": \"test2@test.no\",\n  \"email_verified\": \"true\"\n}\n\t\t\t`,\n\t\t\twant: OIDCClaims{\n\t\t\t\tSub:           \"test2\",\n\t\t\t\tEmail:         \"test2@test.no\",\n\t\t\t\tEmailVerified: true,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"string-bool-false\",\n\t\t\tjsonstr: `\n{\n  \"sub\": \"test3\",\n  \"email\": \"test3@test.no\",\n  \"email_verified\": \"false\"\n}\n\t\t\t`,\n\t\t\twant: OIDCClaims{\n\t\t\t\tSub:           \"test3\",\n\t\t\t\tEmail:         \"test3@test.no\",\n\t\t\t\tEmailVerified: false,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tvar got OIDCClaims\n\n\t\t\terr := json.Unmarshal([]byte(tt.jsonstr), &got)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"UnmarshallOIDCClaims() error = %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif diff := cmp.Diff(got, tt.want); diff != \"\" {\n\t\t\t\tt.Errorf(\"UnmarshallOIDCClaims() mismatch (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestOIDCClaimsIdentifier(t *testing.T) {\n\ttests := []struct {\n\t\tname     string\n\t\tiss      string\n\t\tsub      string\n\t\texpected string\n\t}{\n\t\t{\n\t\t\tname:     \"standard URL with trailing slash\",\n\t\t\tiss:      \"https://oidc.example.com/\",\n\t\t\tsub:      \"xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\",\n\t\t\texpected: \"https://oidc.example.com/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\",\n\t\t},\n\t\t{\n\t\t\tname:     \"standard URL without trailing slash\",\n\t\t\tiss:      \"https://oidc.example.com\",\n\t\t\tsub:      \"xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\",\n\t\t\texpected: \"https://oidc.example.com/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\",\n\t\t},\n\t\t{\n\t\t\tname:     \"standard URL with uppercase protocol\",\n\t\t\tiss:      \"HTTPS://oidc.example.com/\",\n\t\t\tsub:      \"xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\",\n\t\t\texpected: \"https://oidc.example.com/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\",\n\t\t},\n\t\t{\n\t\t\tname:     \"standard URL with path and trailing slash\",\n\t\t\tiss:      \"https://login.microsoftonline.com/v2.0/\",\n\t\t\tsub:      \"I-70OQnj3TogrNSfkZQqB3f7dGwyBWSm1dolHNKrMzQ\",\n\t\t\texpected: \"https://login.microsoftonline.com/v2.0/I-70OQnj3TogrNSfkZQqB3f7dGwyBWSm1dolHNKrMzQ\",\n\t\t},\n\t\t{\n\t\t\tname:     \"standard URL with path without trailing slash\",\n\t\t\tiss:      \"https://login.microsoftonline.com/v2.0\",\n\t\t\tsub:      \"I-70OQnj3TogrNSfkZQqB3f7dGwyBWSm1dolHNKrMzQ\",\n\t\t\texpected: \"https://login.microsoftonline.com/v2.0/I-70OQnj3TogrNSfkZQqB3f7dGwyBWSm1dolHNKrMzQ\",\n\t\t},\n\t\t{\n\t\t\tname:     \"non-URL identifier with slash\",\n\t\t\tiss:      \"oidc\",\n\t\t\tsub:      \"sub\",\n\t\t\texpected: \"oidc/sub\",\n\t\t},\n\t\t{\n\t\t\tname:     \"non-URL identifier with trailing slash\",\n\t\t\tiss:      \"oidc/\",\n\t\t\tsub:      \"sub\",\n\t\t\texpected: \"oidc/sub\",\n\t\t},\n\t\t{\n\t\t\tname:     \"subject with slash\",\n\t\t\tiss:      \"oidc/\",\n\t\t\tsub:      \"sub/\",\n\t\t\texpected: \"oidc/sub\",\n\t\t},\n\t\t{\n\t\t\tname:     \"whitespace\",\n\t\t\tiss:      \"   oidc/   \",\n\t\t\tsub:      \"   sub   \",\n\t\t\texpected: \"oidc/sub\",\n\t\t},\n\t\t{\n\t\t\tname:     \"newline\",\n\t\t\tiss:      \"\\noidc/\\n\",\n\t\t\tsub:      \"\\nsub\\n\",\n\t\t\texpected: \"oidc/sub\",\n\t\t},\n\t\t{\n\t\t\tname:     \"tab\",\n\t\t\tiss:      \"\\toidc/\\t\",\n\t\t\tsub:      \"\\tsub\\t\",\n\t\t\texpected: \"oidc/sub\",\n\t\t},\n\t\t{\n\t\t\tname:     \"empty issuer\",\n\t\t\tiss:      \"\",\n\t\t\tsub:      \"sub\",\n\t\t\texpected: \"sub\",\n\t\t},\n\t\t{\n\t\t\tname:     \"empty subject\",\n\t\t\tiss:      \"https://oidc.example.com\",\n\t\t\tsub:      \"\",\n\t\t\texpected: \"https://oidc.example.com\",\n\t\t},\n\t\t{\n\t\t\tname:     \"both empty\",\n\t\t\tiss:      \"\",\n\t\t\tsub:      \"\",\n\t\t\texpected: \"\",\n\t\t},\n\t\t{\n\t\t\tname:     \"URL with double slash\",\n\t\t\tiss:      \"https://login.microsoftonline.com//v2.0\",\n\t\t\tsub:      \"I-70OQnj3TogrNSfkZQqB3f7dGwyBWSm1dolHNKrMzQ\",\n\t\t\texpected: \"https://login.microsoftonline.com/v2.0/I-70OQnj3TogrNSfkZQqB3f7dGwyBWSm1dolHNKrMzQ\",\n\t\t},\n\t\t{\n\t\t\tname:     \"FTP URL protocol\",\n\t\t\tiss:      \"ftp://example.com/directory\",\n\t\t\tsub:      \"resource\",\n\t\t\texpected: \"ftp://example.com/directory/resource\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tclaims := OIDCClaims{\n\t\t\t\tIss: tt.iss,\n\t\t\t\tSub: tt.sub,\n\t\t\t}\n\t\t\tresult := claims.Identifier()\n\t\t\tassert.Equal(t, tt.expected, result)\n\n\t\t\tif diff := cmp.Diff(tt.expected, result); diff != \"\" {\n\t\t\t\tt.Errorf(\"Identifier() mismatch (-want +got):\\n%s\", diff)\n\t\t\t}\n\n\t\t\t// Now clean the identifier and verify it's still the same\n\t\t\tcleaned := CleanIdentifier(result)\n\n\t\t\t// Double-check with cmp.Diff for better error messages\n\t\t\tif diff := cmp.Diff(tt.expected, cleaned); diff != \"\" {\n\t\t\t\tt.Errorf(\"CleanIdentifier(Identifier()) mismatch (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestCleanIdentifier(t *testing.T) {\n\ttests := []struct {\n\t\tname       string\n\t\tidentifier string\n\t\texpected   string\n\t}{\n\t\t{\n\t\t\tname:       \"empty identifier\",\n\t\t\tidentifier: \"\",\n\t\t\texpected:   \"\",\n\t\t},\n\t\t{\n\t\t\tname:       \"simple identifier\",\n\t\t\tidentifier: \"oidc/sub\",\n\t\t\texpected:   \"oidc/sub\",\n\t\t},\n\t\t{\n\t\t\tname:       \"double slashes in the middle\",\n\t\t\tidentifier: \"oidc//sub\",\n\t\t\texpected:   \"oidc/sub\",\n\t\t},\n\t\t{\n\t\t\tname:       \"trailing slash\",\n\t\t\tidentifier: \"oidc/sub/\",\n\t\t\texpected:   \"oidc/sub\",\n\t\t},\n\t\t{\n\t\t\tname:       \"multiple double slashes\",\n\t\t\tidentifier: \"oidc//sub///id//\",\n\t\t\texpected:   \"oidc/sub/id\",\n\t\t},\n\t\t{\n\t\t\tname:       \"HTTP URL with proper scheme\",\n\t\t\tidentifier: \"http://example.com/path\",\n\t\t\texpected:   \"http://example.com/path\",\n\t\t},\n\t\t{\n\t\t\tname:       \"HTTP URL with double slashes in path\",\n\t\t\tidentifier: \"http://example.com//path///resource\",\n\t\t\texpected:   \"http://example.com/path/resource\",\n\t\t},\n\t\t{\n\t\t\tname:       \"HTTPS URL with empty segments\",\n\t\t\tidentifier: \"https://example.com///path//\",\n\t\t\texpected:   \"https://example.com/path\",\n\t\t},\n\t\t{\n\t\t\tname:       \"URL with double slashes in domain\",\n\t\t\tidentifier: \"https://login.microsoftonline.com//v2.0/I-70OQnj3TogrNSfkZQqB3f7dGwyBWSm1dolHNKrMzQ\",\n\t\t\texpected:   \"https://login.microsoftonline.com/v2.0/I-70OQnj3TogrNSfkZQqB3f7dGwyBWSm1dolHNKrMzQ\",\n\t\t},\n\t\t{\n\t\t\tname:       \"FTP URL with double slashes\",\n\t\t\tidentifier: \"ftp://example.com//resource//\",\n\t\t\texpected:   \"ftp://example.com/resource\",\n\t\t},\n\t\t{\n\t\t\tname:       \"Just slashes\",\n\t\t\tidentifier: \"///\",\n\t\t\texpected:   \"\",\n\t\t},\n\t\t{\n\t\t\tname:       \"Leading slash without URL\",\n\t\t\tidentifier: \"/path//to///resource\",\n\t\t\texpected:   \"path/to/resource\",\n\t\t},\n\t\t{\n\t\t\tname:       \"Non-standard protocol\",\n\t\t\tidentifier: \"ldap://example.org//path//to//resource\",\n\t\t\texpected:   \"ldap://example.org/path/to/resource\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tresult := CleanIdentifier(tt.identifier)\n\t\t\tassert.Equal(t, tt.expected, result)\n\n\t\t\tif diff := cmp.Diff(tt.expected, result); diff != \"\" {\n\t\t\t\tt.Errorf(\"CleanIdentifier() mismatch (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestOIDCClaimsJSONToUser(t *testing.T) {\n\ttests := []struct {\n\t\tname                  string\n\t\tjsonstr               string\n\t\temailVerifiedRequired bool\n\t\twant                  User\n\t}{\n\t\t{\n\t\t\tname:                  \"normal-bool\",\n\t\t\temailVerifiedRequired: true,\n\t\t\tjsonstr: `\n{\n  \"sub\": \"test\",\n  \"email\": \"test@test.no\",\n  \"email_verified\": true\n}\n\t\t\t`,\n\t\t\twant: User{\n\t\t\t\tProvider: util.RegisterMethodOIDC,\n\t\t\t\tEmail:    \"test@test.no\",\n\t\t\t\tProviderIdentifier: sql.NullString{\n\t\t\t\t\tString: \"/test\",\n\t\t\t\t\tValid:  true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:                  \"string-bool-true\",\n\t\t\temailVerifiedRequired: true,\n\t\t\tjsonstr: `\n{\n  \"sub\": \"test2\",\n  \"email\": \"test2@test.no\",\n  \"email_verified\": \"true\"\n}\n\t\t\t`,\n\t\t\twant: User{\n\t\t\t\tProvider: util.RegisterMethodOIDC,\n\t\t\t\tEmail:    \"test2@test.no\",\n\t\t\t\tProviderIdentifier: sql.NullString{\n\t\t\t\t\tString: \"/test2\",\n\t\t\t\t\tValid:  true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:                  \"string-bool-false\",\n\t\t\temailVerifiedRequired: true,\n\t\t\tjsonstr: `\n{\n  \"sub\": \"test3\",\n  \"email\": \"test3@test.no\",\n  \"email_verified\": \"false\"\n}\n\t\t\t`,\n\t\t\twant: User{\n\t\t\t\tProvider: util.RegisterMethodOIDC,\n\t\t\t\tProviderIdentifier: sql.NullString{\n\t\t\t\t\tString: \"/test3\",\n\t\t\t\t\tValid:  true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:                  \"allow-unverified-email\",\n\t\t\temailVerifiedRequired: false,\n\t\t\tjsonstr: `\n{\n  \"sub\": \"test4\",\n  \"email\": \"test4@test.no\",\n  \"email_verified\": \"false\"\n}\n\t\t\t`,\n\t\t\twant: User{\n\t\t\t\tProvider: util.RegisterMethodOIDC,\n\t\t\t\tEmail:    \"test4@test.no\",\n\t\t\t\tProviderIdentifier: sql.NullString{\n\t\t\t\t\tString: \"/test4\",\n\t\t\t\t\tValid:  true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t// From https://github.com/juanfont/headscale/issues/2333\n\t\t\tname:                  \"okta-oidc-claim-20250121\",\n\t\t\temailVerifiedRequired: true,\n\t\t\tjsonstr: `\n{\n  \"sub\": \"00u7dr4qp7XXXXXXXXXX\",\n  \"name\": \"Tim Horton\",\n  \"email\": \"tim.horton@company.com\",\n  \"ver\": 1,\n  \"iss\": \"https://sso.company.com/oauth2/default\",\n  \"aud\": \"0oa8neto4tXXXXXXXXXX\",\n  \"iat\": 1737455152,\n  \"exp\": 1737458752,\n  \"jti\": \"ID.zzJz93koTunMKv5Bq-XXXXXXXXXXXXXXXXXXXXXXXXX\",\n  \"amr\": [\n    \"pwd\"\n  ],\n  \"idp\": \"00o42r3s2cXXXXXXXX\",\n  \"nonce\": \"nonce\",\n  \"preferred_username\": \"tim.horton@company.com\",\n  \"auth_time\": 1000,\n  \"at_hash\": \"preview_at_hash\"\n}\n\t\t\t`,\n\t\t\twant: User{\n\t\t\t\tProvider:    util.RegisterMethodOIDC,\n\t\t\t\tDisplayName: \"Tim Horton\",\n\t\t\t\tEmail:       \"\",\n\t\t\t\tName:        \"tim.horton@company.com\",\n\t\t\t\tProviderIdentifier: sql.NullString{\n\t\t\t\t\tString: \"https://sso.company.com/oauth2/default/00u7dr4qp7XXXXXXXXXX\",\n\t\t\t\t\tValid:  true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t// From https://github.com/juanfont/headscale/issues/2333\n\t\t\tname:                  \"okta-oidc-claim-20250121\",\n\t\t\temailVerifiedRequired: true,\n\t\t\tjsonstr: `\n{\n  \"aud\": \"79xxxxxx-xxxx-xxxx-xxxx-892146xxxxxx\",\n  \"iss\": \"https://login.microsoftonline.com//v2.0\",\n  \"iat\": 1737346441,\n  \"nbf\": 1737346441,\n  \"exp\": 1737350341,\n  \"aio\": \"AWQAm/8ZAAAABKne9EWr6ygVO2DbcRmoPIpRM819qqlP/mmK41AAWv/C2tVkld4+znbG8DaXFdLQa9jRUzokvsT7rt9nAT6Fg7QC+/ecDWsF5U+QX11f9Ox7ZkK4UAIWFcIXpuZZvRS7\",\n  \"email\": \"user@domain.com\",\n  \"name\": \"XXXXXX XXXX\",\n  \"oid\": \"54c2323d-5052-4130-9588-ad751909003f\",\n  \"preferred_username\": \"user@domain.com\",\n  \"rh\": \"1.AXUAXdg0Rfc11UifLDJv67ChfSluoXmD9z1EmK-JIUYuSK9cAQl1AA.\",\n  \"sid\": \"5250a0a2-0b4e-4e68-8652-b4e97866411d\",\n  \"sub\": \"I-70OQnj3TogrNSfkZQqB3f7dGwyBWSm1dolHNKrMzQ\",\n  \"tid\": \"<redacted>\",\n  \"uti\": \"zAuXeEtMM0GwcTAcOsBZAA\",\n  \"ver\": \"2.0\"\n}\n\t\t\t`,\n\t\t\twant: User{\n\t\t\t\tProvider:    util.RegisterMethodOIDC,\n\t\t\t\tDisplayName: \"XXXXXX XXXX\",\n\t\t\t\tName:        \"user@domain.com\",\n\t\t\t\tEmail:       \"\",\n\t\t\t\tProviderIdentifier: sql.NullString{\n\t\t\t\t\tString: \"https://login.microsoftonline.com/v2.0/I-70OQnj3TogrNSfkZQqB3f7dGwyBWSm1dolHNKrMzQ\",\n\t\t\t\t\tValid:  true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t// From https://github.com/juanfont/headscale/issues/2333\n\t\t\tname:                  \"casby-oidc-claim-20250513\",\n\t\t\temailVerifiedRequired: true,\n\t\t\tjsonstr: `\n\t\t\t{\n  \"sub\": \"xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\",\n  \"iss\": \"https://oidc.example.com/\",\n  \"aud\": \"xxxxxxxxxxxx\",\n  \"preferred_username\": \"user001\",\n  \"name\": \"User001\",\n  \"email\": \"user001@example.com\",\n  \"email_verified\": true,\n  \"picture\": \"https://cdn.casbin.org/img/casbin.svg\",\n  \"groups\": [\n    \"org1/department1\",\n    \"org1/department2\"\n  ]\n}\n\t\t\t`,\n\t\t\twant: User{\n\t\t\t\tProvider:    util.RegisterMethodOIDC,\n\t\t\t\tName:        \"user001\",\n\t\t\t\tDisplayName: \"User001\",\n\t\t\t\tEmail:       \"user001@example.com\",\n\t\t\t\tProviderIdentifier: sql.NullString{\n\t\t\t\t\tString: \"https://oidc.example.com/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\",\n\t\t\t\t\tValid:  true,\n\t\t\t\t},\n\t\t\t\tProfilePicURL: \"https://cdn.casbin.org/img/casbin.svg\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tvar got OIDCClaims\n\n\t\t\terr := json.Unmarshal([]byte(tt.jsonstr), &got)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"TestOIDCClaimsJSONToUser() error = %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tvar user User\n\n\t\t\tuser.FromClaim(&got, tt.emailVerifiedRequired)\n\n\t\t\tif diff := cmp.Diff(user, tt.want); diff != \"\" {\n\t\t\t\tt.Errorf(\"TestOIDCClaimsJSONToUser() mismatch (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "hscontrol/types/version.go",
    "content": "package types\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\t\"runtime/debug\"\n\t\"strings\"\n\t\"sync\"\n)\n\ntype GoInfo struct {\n\tVersion string `json:\"version\"`\n\tOS      string `json:\"os\"`\n\tArch    string `json:\"arch\"`\n}\n\ntype VersionInfo struct {\n\tVersion   string `json:\"version\"`\n\tCommit    string `json:\"commit\"`\n\tBuildTime string `json:\"buildTime\"`\n\tGo        GoInfo `json:\"go\"`\n\tDirty     bool   `json:\"dirty\"`\n}\n\nfunc (v *VersionInfo) String() string {\n\tvar sb strings.Builder\n\n\tversion := v.Version\n\tif v.Dirty && !strings.Contains(version, \"dirty\") {\n\t\tversion += \"-dirty\"\n\t}\n\n\tsb.WriteString(fmt.Sprintf(\"headscale version %s\\n\", version))\n\tsb.WriteString(fmt.Sprintf(\"commit: %s\\n\", v.Commit))\n\tsb.WriteString(fmt.Sprintf(\"build time: %s\\n\", v.BuildTime))\n\tsb.WriteString(fmt.Sprintf(\"built with: %s %s/%s\\n\", v.Go.Version, v.Go.OS, v.Go.Arch))\n\n\treturn sb.String()\n}\n\nvar buildInfo = sync.OnceValues(debug.ReadBuildInfo)\n\nvar GetVersionInfo = sync.OnceValue(func() *VersionInfo {\n\tinfo := &VersionInfo{\n\t\tVersion:   \"dev\",\n\t\tCommit:    \"unknown\",\n\t\tBuildTime: \"unknown\",\n\t\tGo: GoInfo{\n\t\t\tVersion: runtime.Version(),\n\t\t\tOS:      runtime.GOOS,\n\t\t\tArch:    runtime.GOARCH,\n\t\t},\n\t\tDirty: false,\n\t}\n\n\tbuildInfo, ok := buildInfo()\n\tif !ok {\n\t\treturn info\n\t}\n\n\t// Extract version from module path or main version\n\tif buildInfo.Main.Version != \"\" && buildInfo.Main.Version != \"(devel)\" {\n\t\tinfo.Version = buildInfo.Main.Version\n\t}\n\n\t// Extract build settings\n\tfor _, setting := range buildInfo.Settings {\n\t\tswitch setting.Key {\n\t\tcase \"vcs.revision\":\n\t\t\tinfo.Commit = setting.Value\n\t\tcase \"vcs.modified\":\n\t\t\tinfo.Dirty = setting.Value == \"true\"\n\t\tcase \"vcs.time\":\n\t\t\tinfo.BuildTime = setting.Value\n\t\t}\n\t}\n\n\treturn info\n})\n"
  },
  {
    "path": "hscontrol/util/addr.go",
    "content": "package util\n\nimport (\n\t\"fmt\"\n\t\"iter\"\n\t\"net/netip\"\n\t\"strings\"\n\n\t\"go4.org/netipx\"\n)\n\n// This is borrowed from, and updated to use IPSet\n// https://github.com/tailscale/tailscale/blob/71029cea2ddf82007b80f465b256d027eab0f02d/wgengine/filter/tailcfg.go#L97-L162\n// TODO(kradalby): contribute upstream and make public.\nvar (\n\tzeroIP4 = netip.AddrFrom4([4]byte{})\n\tzeroIP6 = netip.AddrFrom16([16]byte{})\n)\n\n// parseIPSet parses arg as one:\n//\n//   - an IP address (IPv4 or IPv6)\n//   - the string \"*\" to match everything (both IPv4 & IPv6)\n//   - a CIDR (e.g. \"192.168.0.0/16\")\n//   - a range of two IPs, inclusive, separated by hyphen (\"2eff::1-2eff::0800\")\n//\n// bits, if non-nil, is the legacy SrcBits CIDR length to make a IP\n// address (without a slash) treated as a CIDR of *bits length.\n// nolint\nfunc ParseIPSet(arg string, bits *int) (*netipx.IPSet, error) {\n\tvar ipSet netipx.IPSetBuilder\n\tif arg == \"*\" {\n\t\tipSet.AddPrefix(netip.PrefixFrom(zeroIP4, 0))\n\t\tipSet.AddPrefix(netip.PrefixFrom(zeroIP6, 0))\n\n\t\treturn ipSet.IPSet()\n\t}\n\tif strings.Contains(arg, \"/\") {\n\t\tpfx, err := netip.ParsePrefix(arg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif pfx != pfx.Masked() {\n\t\t\treturn nil, fmt.Errorf(\"%v contains non-network bits set\", pfx)\n\t\t}\n\n\t\tipSet.AddPrefix(pfx)\n\n\t\treturn ipSet.IPSet()\n\t}\n\tif strings.Count(arg, \"-\") == 1 {\n\t\tip1s, ip2s, _ := strings.Cut(arg, \"-\")\n\n\t\tip1, err := netip.ParseAddr(ip1s)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tip2, err := netip.ParseAddr(ip2s)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tr := netipx.IPRangeFrom(ip1, ip2)\n\t\tif !r.IsValid() {\n\t\t\treturn nil, fmt.Errorf(\"invalid IP range %q\", arg)\n\t\t}\n\n\t\tfor _, prefix := range r.Prefixes() {\n\t\t\tipSet.AddPrefix(prefix)\n\t\t}\n\n\t\treturn ipSet.IPSet()\n\t}\n\tip, err := netip.ParseAddr(arg)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid IP address %q\", arg)\n\t}\n\tbits8 := uint8(ip.BitLen())\n\tif bits != nil {\n\t\tif *bits < 0 || *bits > int(bits8) {\n\t\t\treturn nil, fmt.Errorf(\"invalid CIDR size %d for IP %q\", *bits, arg)\n\t\t}\n\t\tbits8 = uint8(*bits)\n\t}\n\n\tipSet.AddPrefix(netip.PrefixFrom(ip, int(bits8)))\n\n\treturn ipSet.IPSet()\n}\n\nfunc GetIPPrefixEndpoints(na netip.Prefix) (netip.Addr, netip.Addr) {\n\tvar network, broadcast netip.Addr\n\n\tipRange := netipx.RangeOfPrefix(na)\n\tnetwork = ipRange.From()\n\tbroadcast = ipRange.To()\n\n\treturn network, broadcast\n}\n\nfunc StringToIPPrefix(prefixes []string) ([]netip.Prefix, error) {\n\tresult := make([]netip.Prefix, len(prefixes))\n\n\tfor index, prefixStr := range prefixes {\n\t\tprefix, err := netip.ParsePrefix(prefixStr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresult[index] = prefix\n\t}\n\n\treturn result, nil\n}\n\n// IPSetAddrIter returns a function that iterates over all the IPs in the IPSet.\nfunc IPSetAddrIter(ipSet *netipx.IPSet) iter.Seq[netip.Addr] {\n\treturn func(yield func(netip.Addr) bool) {\n\t\tfor _, rng := range ipSet.Ranges() {\n\t\t\tfor ip := rng.From(); ip.Compare(rng.To()) <= 0; ip = ip.Next() {\n\t\t\t\tif !yield(ip) {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "hscontrol/util/addr_test.go",
    "content": "package util\n\nimport (\n\t\"net/netip\"\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"go4.org/netipx\"\n)\n\nfunc Test_parseIPSet(t *testing.T) {\n\tset := func(ips []string, prefixes []string) *netipx.IPSet {\n\t\tvar builder netipx.IPSetBuilder\n\n\t\tfor _, ip := range ips {\n\t\t\tbuilder.Add(netip.MustParseAddr(ip))\n\t\t}\n\n\t\tfor _, pre := range prefixes {\n\t\t\tbuilder.AddPrefix(netip.MustParsePrefix(pre))\n\t\t}\n\n\t\ts, _ := builder.IPSet()\n\n\t\treturn s\n\t}\n\n\ttype args struct {\n\t\targ  string\n\t\tbits *int\n\t}\n\n\ttests := []struct {\n\t\tname    string\n\t\targs    args\n\t\twant    *netipx.IPSet\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname: \"simple ip4\",\n\t\t\targs: args{\n\t\t\t\targ:  \"10.0.0.1\",\n\t\t\t\tbits: nil,\n\t\t\t},\n\t\t\twant: set([]string{\n\t\t\t\t\"10.0.0.1\",\n\t\t\t}, []string{}),\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"simple ip6\",\n\t\t\targs: args{\n\t\t\t\targ:  \"2001:db8:abcd:1234::2\",\n\t\t\t\tbits: nil,\n\t\t\t},\n\t\t\twant: set([]string{\n\t\t\t\t\"2001:db8:abcd:1234::2\",\n\t\t\t}, []string{}),\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"wildcard\",\n\t\t\targs: args{\n\t\t\t\targ:  \"*\",\n\t\t\t\tbits: nil,\n\t\t\t},\n\t\t\twant: set([]string{}, []string{\n\t\t\t\t\"0.0.0.0/0\",\n\t\t\t\t\"::/0\",\n\t\t\t}),\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"prefix4\",\n\t\t\targs: args{\n\t\t\t\targ:  \"192.168.0.0/16\",\n\t\t\t\tbits: nil,\n\t\t\t},\n\t\t\twant: set([]string{}, []string{\n\t\t\t\t\"192.168.0.0/16\",\n\t\t\t}),\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"prefix6\",\n\t\t\targs: args{\n\t\t\t\targ:  \"2001:db8:abcd:1234::/64\",\n\t\t\t\tbits: nil,\n\t\t\t},\n\t\t\twant: set([]string{}, []string{\n\t\t\t\t\"2001:db8:abcd:1234::/64\",\n\t\t\t}),\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"range4\",\n\t\t\targs: args{\n\t\t\t\targ:  \"192.168.0.0-192.168.255.255\",\n\t\t\t\tbits: nil,\n\t\t\t},\n\t\t\twant: set([]string{}, []string{\n\t\t\t\t\"192.168.0.0/16\",\n\t\t\t}),\n\t\t\twantErr: false,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot, err := ParseIPSet(tt.args.arg, tt.args.bits)\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"parseIPSet() error = %v, wantErr %v\", err, tt.wantErr)\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif diff := cmp.Diff(tt.want, got); diff != \"\" {\n\t\t\t\tt.Errorf(\"parseIPSet() = (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "hscontrol/util/const.go",
    "content": "package util\n\nconst (\n\tRegisterMethodAuthKey = \"authkey\"\n\tRegisterMethodOIDC    = \"oidc\"\n\tRegisterMethodCLI     = \"cli\"\n)\n"
  },
  {
    "path": "hscontrol/util/dns.go",
    "content": "package util\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net/netip\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"unicode\"\n\n\t\"go4.org/netipx\"\n\t\"tailscale.com/util/dnsname\"\n)\n\nconst (\n\tByteSize          = 8\n\tipv4AddressLength = 32\n\tipv6AddressLength = 128\n\n\t// LabelHostnameLength is the maximum length for a DNS label,\n\t// value related to RFC 1123 and 952.\n\tLabelHostnameLength = 63\n)\n\nvar invalidDNSRegex = regexp.MustCompile(\"[^a-z0-9-.]+\")\n\n// DNS validation errors.\nvar (\n\tErrInvalidHostName         = errors.New(\"invalid hostname\")\n\tErrUsernameTooShort        = errors.New(\"username must be at least 2 characters long\")\n\tErrUsernameMustStartLetter = errors.New(\"username must start with a letter\")\n\tErrUsernameTooManyAt       = errors.New(\"username cannot contain more than one '@'\")\n\tErrUsernameInvalidChar     = errors.New(\"username contains invalid character\")\n\tErrHostnameTooShort        = errors.New(\"hostname is too short, must be at least 2 characters\")\n\tErrHostnameTooLong         = errors.New(\"hostname is too long, must not exceed 63 characters\")\n\tErrHostnameMustBeLowercase = errors.New(\"hostname must be lowercase\")\n\tErrHostnameHyphenBoundary  = errors.New(\"hostname cannot start or end with a hyphen\")\n\tErrHostnameDotBoundary     = errors.New(\"hostname cannot start or end with a dot\")\n\tErrHostnameInvalidChars    = errors.New(\"hostname contains invalid characters\")\n)\n\n// ValidateUsername checks if a username is valid.\n// It must be at least 2 characters long, start with a letter, and contain\n// only letters, numbers, hyphens, dots, and underscores.\n// It cannot contain more than one '@'.\n// It cannot contain invalid characters.\nfunc ValidateUsername(username string) error {\n\t// Ensure the username meets the minimum length requirement\n\tif len(username) < 2 {\n\t\treturn ErrUsernameTooShort\n\t}\n\n\t// Ensure the username starts with a letter\n\tif !unicode.IsLetter(rune(username[0])) {\n\t\treturn ErrUsernameMustStartLetter\n\t}\n\n\tatCount := 0\n\n\tfor _, char := range username {\n\t\tswitch {\n\t\tcase unicode.IsLetter(char),\n\t\t\tunicode.IsDigit(char),\n\t\t\tchar == '-',\n\t\t\tchar == '.',\n\t\t\tchar == '_':\n\t\t\t// Valid characters\n\t\tcase char == '@':\n\t\t\tatCount++\n\t\t\tif atCount > 1 {\n\t\t\t\treturn ErrUsernameTooManyAt\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"%w: '%c'\", ErrUsernameInvalidChar, char)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// ValidateHostname checks if a hostname meets DNS requirements.\n// This function does NOT modify the input - it only validates.\n// The hostname must already be lowercase and contain only valid characters.\nfunc ValidateHostname(name string) error {\n\tif len(name) < 2 {\n\t\treturn fmt.Errorf(\"%w: %q\", ErrHostnameTooShort, name)\n\t}\n\n\tif len(name) > LabelHostnameLength {\n\t\treturn fmt.Errorf(\"%w: %q\", ErrHostnameTooLong, name)\n\t}\n\n\tif strings.ToLower(name) != name {\n\t\treturn fmt.Errorf(\"%w: %q (try %q)\", ErrHostnameMustBeLowercase, name, strings.ToLower(name))\n\t}\n\n\tif strings.HasPrefix(name, \"-\") || strings.HasSuffix(name, \"-\") {\n\t\treturn fmt.Errorf(\"%w: %q\", ErrHostnameHyphenBoundary, name)\n\t}\n\n\tif strings.HasPrefix(name, \".\") || strings.HasSuffix(name, \".\") {\n\t\treturn fmt.Errorf(\"%w: %q\", ErrHostnameDotBoundary, name)\n\t}\n\n\tif invalidDNSRegex.MatchString(name) {\n\t\treturn fmt.Errorf(\"%w: %q\", ErrHostnameInvalidChars, name)\n\t}\n\n\treturn nil\n}\n\n// NormaliseHostname transforms a string into a valid DNS hostname.\n// Returns error if the transformation results in an invalid hostname.\n//\n// Transformations applied:\n// - Converts to lowercase\n// - Removes invalid DNS characters\n// - Truncates to 63 characters if needed\n//\n// After transformation, validates the result.\nfunc NormaliseHostname(name string) (string, error) {\n\t// Early return if already valid\n\terr := ValidateHostname(name)\n\tif err == nil {\n\t\treturn name, nil\n\t}\n\n\t// Transform to lowercase\n\tname = strings.ToLower(name)\n\n\t// Strip invalid DNS characters\n\tname = invalidDNSRegex.ReplaceAllString(name, \"\")\n\n\t// Truncate to DNS label limit\n\tif len(name) > LabelHostnameLength {\n\t\tname = name[:LabelHostnameLength]\n\t}\n\n\t// Validate result after transformation\n\terr = ValidateHostname(name)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\n\t\t\t\"hostname invalid after normalisation: %w\",\n\t\t\terr,\n\t\t)\n\t}\n\n\treturn name, nil\n}\n\n// generateMagicDNSRootDomains generates a list of DNS entries to be included in `Routes` in `MapResponse`.\n// This list of reverse DNS entries instructs the OS on what subnets and domains the Tailscale embedded DNS\n// server (listening in 100.100.100.100 udp/53) should be used for.\n//\n// Tailscale.com includes in the list:\n// - the `BaseDomain` of the user\n// - the reverse DNS entry for IPv6 (0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa., see below more on IPv6)\n// - the reverse DNS entries for the IPv4 subnets covered by the user's `IPPrefix`.\n//   In the public SaaS this is [64-127].100.in-addr.arpa.\n//\n// The main purpose of this function is then generating the list of IPv4 entries. For the 100.64.0.0/10, this\n// is clear, and could be hardcoded. But we are allowing any range as `IPPrefix`, so we need to find out the\n// subnets when we have 172.16.0.0/16 (i.e., [0-255].16.172.in-addr.arpa.), or any other subnet.\n//\n// How IN-ADDR.ARPA domains work is defined in RFC1035 (section 3.5). Tailscale.com seems to adhere to this,\n// and do not make use of RFC2317 (\"Classless IN-ADDR.ARPA delegation\") - hence generating the entries for the next\n// class block only.\n\n// GenerateIPv4DNSRootDomain generates the IPv4 reverse DNS root domains.\n// From the netmask we can find out the wildcard bits (the bits that are not set in the netmask).\n// This allows us to then calculate the subnets included in the subsequent class block and generate the entries.\nfunc GenerateIPv4DNSRootDomain(ipPrefix netip.Prefix) []dnsname.FQDN {\n\t// Conversion to the std lib net.IPnet, a bit easier to operate\n\tnetRange := netipx.PrefixIPNet(ipPrefix)\n\tmaskBits, _ := netRange.Mask.Size()\n\n\t// lastOctet is the last IP byte covered by the mask\n\tlastOctet := maskBits / ByteSize\n\n\t// wildcardBits is the number of bits not under the mask in the lastOctet\n\twildcardBits := ByteSize - maskBits%ByteSize\n\n\t// minVal is the value in the lastOctet byte of the IP\n\t// maxVal is basically 2^wildcardBits - i.e., the value when all the wildcardBits are set to 1\n\tminVal := uint(netRange.IP[lastOctet])\n\tmaxVal := (minVal + 1<<uint(wildcardBits)) - 1 //nolint:gosec // wildcardBits is always < 8, no overflow\n\n\t// here we generate the base domain (e.g., 100.in-addr.arpa., 16.172.in-addr.arpa., etc.)\n\trdnsSlice := []string{}\n\tfor i := lastOctet - 1; i >= 0; i-- {\n\t\trdnsSlice = append(rdnsSlice, strconv.FormatUint(uint64(netRange.IP[i]), 10))\n\t}\n\n\trdnsSlice = append(rdnsSlice, \"in-addr.arpa.\")\n\trdnsBase := strings.Join(rdnsSlice, \".\")\n\n\tfqdns := make([]dnsname.FQDN, 0, maxVal-minVal+1)\n\tfor i := minVal; i <= maxVal; i++ {\n\t\tfqdn, err := dnsname.ToFQDN(fmt.Sprintf(\"%d.%s\", i, rdnsBase))\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tfqdns = append(fqdns, fqdn)\n\t}\n\n\treturn fqdns\n}\n\n// generateMagicDNSRootDomains generates a list of DNS entries to be included in `Routes` in `MapResponse`.\n// This list of reverse DNS entries instructs the OS on what subnets and domains the Tailscale embedded DNS\n// server (listening in 100.100.100.100 udp/53) should be used for.\n//\n// Tailscale.com includes in the list:\n// - the `BaseDomain` of the user\n// - the reverse DNS entry for IPv6 (0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa., see below more on IPv6)\n// - the reverse DNS entries for the IPv4 subnets covered by the user's `IPPrefix`.\n//   In the public SaaS this is [64-127].100.in-addr.arpa.\n//\n// The main purpose of this function is then generating the list of IPv4 entries. For the 100.64.0.0/10, this\n// is clear, and could be hardcoded. But we are allowing any range as `IPPrefix`, so we need to find out the\n// subnets when we have 172.16.0.0/16 (i.e., [0-255].16.172.in-addr.arpa.), or any other subnet.\n//\n// How IN-ADDR.ARPA domains work is defined in RFC1035 (section 3.5). Tailscale.com seems to adhere to this,\n// and do not make use of RFC2317 (\"Classless IN-ADDR.ARPA delegation\") - hence generating the entries for the next\n// class block only.\n\n// GenerateIPv6DNSRootDomain generates the IPv6 reverse DNS root domains.\n// From the netmask we can find out the wildcard bits (the bits that are not set in the netmask).\n// This allows us to then calculate the subnets included in the subsequent class block and generate the entries.\nfunc GenerateIPv6DNSRootDomain(ipPrefix netip.Prefix) []dnsname.FQDN {\n\tconst nibbleLen = 4\n\n\tmaskBits, _ := netipx.PrefixIPNet(ipPrefix).Mask.Size()\n\texpanded := ipPrefix.Addr().StringExpanded()\n\tnibbleStr := strings.Map(func(r rune) rune {\n\t\tif r == ':' {\n\t\t\treturn -1\n\t\t}\n\n\t\treturn r\n\t}, expanded)\n\n\t// TODO?: that does not look the most efficient implementation,\n\t// but the inputs are not so long as to cause problems,\n\t// and from what I can see, the generateMagicDNSRootDomains\n\t// function is called only once over the lifetime of a server process.\n\tprefixConstantParts := []string{}\n\tfor i := range maskBits / nibbleLen {\n\t\tprefixConstantParts = append(\n\t\t\t[]string{string(nibbleStr[i])},\n\t\t\tprefixConstantParts...)\n\t}\n\n\tmakeDomain := func(variablePrefix ...string) (dnsname.FQDN, error) {\n\t\tprefix := strings.Join(append(variablePrefix, prefixConstantParts...), \".\")\n\n\t\treturn dnsname.ToFQDN(prefix + \".ip6.arpa\")\n\t}\n\n\tvar fqdns []dnsname.FQDN\n\n\tif maskBits%4 == 0 {\n\t\tdom, _ := makeDomain()\n\t\tfqdns = append(fqdns, dom)\n\t} else {\n\t\tdomCount := 1 << (maskBits % nibbleLen)\n\n\t\tfqdns = make([]dnsname.FQDN, 0, domCount)\n\t\tfor i := range domCount {\n\t\t\tvarNibble := fmt.Sprintf(\"%x\", i)\n\n\t\t\tdom, err := makeDomain(varNibble)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfqdns = append(fqdns, dom)\n\t\t}\n\t}\n\n\treturn fqdns\n}\n"
  },
  {
    "path": "hscontrol/util/dns_test.go",
    "content": "package util\n\nimport (\n\t\"net/netip\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"tailscale.com/util/dnsname\"\n\t\"tailscale.com/util/must\"\n)\n\nfunc TestNormaliseHostname(t *testing.T) {\n\ttype args struct {\n\t\tname string\n\t}\n\n\ttests := []struct {\n\t\tname    string\n\t\targs    args\n\t\twant    string\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname:    \"valid: lowercase user\",\n\t\t\targs:    args{name: \"valid-user\"},\n\t\t\twant:    \"valid-user\",\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname:    \"normalise: capitalized user\",\n\t\t\targs:    args{name: \"Invalid-CapItaLIzed-user\"},\n\t\t\twant:    \"invalid-capitalized-user\",\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname:    \"normalise: email as user\",\n\t\t\targs:    args{name: \"foo.bar@example.com\"},\n\t\t\twant:    \"foo.barexample.com\",\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname:    \"normalise: chars in user name\",\n\t\t\targs:    args{name: \"super-user+name\"},\n\t\t\twant:    \"super-username\",\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"invalid: too long name truncated leaves trailing hyphen\",\n\t\t\targs: args{\n\t\t\t\tname: \"super-long-useruseruser-name-that-should-be-a-little-more-than-63-chars\",\n\t\t\t},\n\t\t\twant:    \"\",\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname:    \"invalid: emoji stripped leaves trailing hyphen\",\n\t\t\targs:    args{name: \"hostname-with-💩\"},\n\t\t\twant:    \"\",\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname:    \"normalise: multiple emojis stripped\",\n\t\t\targs:    args{name: \"node-🎉-🚀-test\"},\n\t\t\twant:    \"node---test\",\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname:    \"invalid: only emoji becomes empty\",\n\t\t\targs:    args{name: \"💩\"},\n\t\t\twant:    \"\",\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname:    \"invalid: emoji at start leaves leading hyphen\",\n\t\t\targs:    args{name: \"🚀-rocket-node\"},\n\t\t\twant:    \"\",\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname:    \"invalid: emoji at end leaves trailing hyphen\",\n\t\t\targs:    args{name: \"node-test-🎉\"},\n\t\t\twant:    \"\",\n\t\t\twantErr: true,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot, err := NormaliseHostname(tt.args.name)\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"NormaliseHostname() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif !tt.wantErr && got != tt.want {\n\t\t\t\tt.Errorf(\"NormaliseHostname() = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestValidateHostname(t *testing.T) {\n\ttests := []struct {\n\t\tname          string\n\t\thostname      string\n\t\twantErr       bool\n\t\terrorContains string\n\t}{\n\t\t{\n\t\t\tname:     \"valid lowercase\",\n\t\t\thostname: \"valid-hostname\",\n\t\t\twantErr:  false,\n\t\t},\n\t\t{\n\t\t\tname:          \"uppercase rejected\",\n\t\t\thostname:      \"MyHostname\",\n\t\t\twantErr:       true,\n\t\t\terrorContains: \"must be lowercase\",\n\t\t},\n\t\t{\n\t\t\tname:          \"too short\",\n\t\t\thostname:      \"a\",\n\t\t\twantErr:       true,\n\t\t\terrorContains: \"too short\",\n\t\t},\n\t\t{\n\t\t\tname:          \"too long\",\n\t\t\thostname:      \"a\" + strings.Repeat(\"b\", 63),\n\t\t\twantErr:       true,\n\t\t\terrorContains: \"too long\",\n\t\t},\n\t\t{\n\t\t\tname:          \"emoji rejected\",\n\t\t\thostname:      \"hostname-💩\",\n\t\t\twantErr:       true,\n\t\t\terrorContains: \"invalid characters\",\n\t\t},\n\t\t{\n\t\t\tname:          \"starts with hyphen\",\n\t\t\thostname:      \"-hostname\",\n\t\t\twantErr:       true,\n\t\t\terrorContains: \"cannot start or end with a hyphen\",\n\t\t},\n\t\t{\n\t\t\tname:          \"ends with hyphen\",\n\t\t\thostname:      \"hostname-\",\n\t\t\twantErr:       true,\n\t\t\terrorContains: \"cannot start or end with a hyphen\",\n\t\t},\n\t\t{\n\t\t\tname:          \"starts with dot\",\n\t\t\thostname:      \".hostname\",\n\t\t\twantErr:       true,\n\t\t\terrorContains: \"cannot start or end with a dot\",\n\t\t},\n\t\t{\n\t\t\tname:          \"ends with dot\",\n\t\t\thostname:      \"hostname.\",\n\t\t\twantErr:       true,\n\t\t\terrorContains: \"cannot start or end with a dot\",\n\t\t},\n\t\t{\n\t\t\tname:          \"special characters\",\n\t\t\thostname:      \"host!@#$name\",\n\t\t\twantErr:       true,\n\t\t\terrorContains: \"invalid characters\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\terr := ValidateHostname(tt.hostname)\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"ValidateHostname() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif tt.wantErr && tt.errorContains != \"\" {\n\t\t\t\tif err == nil || !strings.Contains(err.Error(), tt.errorContains) {\n\t\t\t\t\tt.Errorf(\"ValidateHostname() error = %v, should contain %q\", err, tt.errorContains)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestMagicDNSRootDomains100(t *testing.T) {\n\tdomains := GenerateIPv4DNSRootDomain(netip.MustParsePrefix(\"100.64.0.0/10\"))\n\n\tassert.Contains(t, domains, must.Get(dnsname.ToFQDN(\"64.100.in-addr.arpa.\")))\n\tassert.Contains(t, domains, must.Get(dnsname.ToFQDN(\"100.100.in-addr.arpa.\")))\n\tassert.Contains(t, domains, must.Get(dnsname.ToFQDN(\"127.100.in-addr.arpa.\")))\n}\n\nfunc TestMagicDNSRootDomains172(t *testing.T) {\n\tdomains := GenerateIPv4DNSRootDomain(netip.MustParsePrefix(\"172.16.0.0/16\"))\n\n\tassert.Contains(t, domains, must.Get(dnsname.ToFQDN(\"0.16.172.in-addr.arpa.\")))\n\tassert.Contains(t, domains, must.Get(dnsname.ToFQDN(\"255.16.172.in-addr.arpa.\")))\n}\n\n// Happens when netmask is a multiple of 4 bits (sounds likely).\nfunc TestMagicDNSRootDomainsIPv6Single(t *testing.T) {\n\tdomains := GenerateIPv6DNSRootDomain(netip.MustParsePrefix(\"fd7a:115c:a1e0::/48\"))\n\n\tassert.Len(t, domains, 1)\n\tassert.Equal(t, \"0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa.\", domains[0].WithTrailingDot())\n}\n\nfunc TestMagicDNSRootDomainsIPv6SingleMultiple(t *testing.T) {\n\tdomains := GenerateIPv6DNSRootDomain(netip.MustParsePrefix(\"fd7a:115c:a1e0::/50\"))\n\n\tyieldsRoot := func(dom string) bool {\n\t\tfor _, candidate := range domains {\n\t\t\tif candidate.WithTrailingDot() == dom {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\treturn false\n\t}\n\n\tassert.Len(t, domains, 4)\n\tassert.True(t, yieldsRoot(\"0.0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa.\"))\n\tassert.True(t, yieldsRoot(\"1.0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa.\"))\n\tassert.True(t, yieldsRoot(\"2.0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa.\"))\n\tassert.True(t, yieldsRoot(\"3.0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa.\"))\n}\n"
  },
  {
    "path": "hscontrol/util/file.go",
    "content": "package util\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io/fs\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/spf13/viper\"\n)\n\nconst (\n\tBase8              = 8\n\tBase10             = 10\n\tBitSize16          = 16\n\tBitSize32          = 32\n\tBitSize64          = 64\n\tPermissionFallback = 0o700\n)\n\n// ErrDirectoryPermission is returned when creating a directory fails due to permission issues.\nvar ErrDirectoryPermission = errors.New(\"creating directory failed with permission error\")\n\nfunc AbsolutePathFromConfigPath(path string) string {\n\t// If a relative path is provided, prefix it with the directory where\n\t// the config file was found.\n\tif (path != \"\") && !strings.HasPrefix(path, string(os.PathSeparator)) {\n\t\tdir, _ := filepath.Split(viper.ConfigFileUsed())\n\t\tif dir != \"\" {\n\t\t\tpath = filepath.Join(dir, path)\n\t\t}\n\t}\n\n\treturn path\n}\n\nfunc GetFileMode(key string) fs.FileMode {\n\tmodeStr := viper.GetString(key)\n\n\tmode, err := strconv.ParseUint(modeStr, Base8, BitSize64)\n\tif err != nil {\n\t\treturn PermissionFallback\n\t}\n\n\treturn fs.FileMode(mode) //nolint:gosec // file mode is bounded by ParseUint\n}\n\nfunc EnsureDir(dir string) error {\n\tif _, err := os.Stat(dir); os.IsNotExist(err) { //nolint:noinlineerr\n\t\terr := os.MkdirAll(dir, PermissionFallback)\n\t\tif err != nil {\n\t\t\tif errors.Is(err, os.ErrPermission) {\n\t\t\t\treturn fmt.Errorf(\"%w: %s\", ErrDirectoryPermission, dir)\n\t\t\t}\n\n\t\t\treturn fmt.Errorf(\"creating directory %s: %w\", dir, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "hscontrol/util/key.go",
    "content": "package util\n\nimport (\n\t\"errors\"\n)\n\nvar (\n\tErrCannotDecryptResponse = errors.New(\"decrypting response\")\n\tZstdCompression          = \"zstd\"\n)\n"
  },
  {
    "path": "hscontrol/util/log.go",
    "content": "package util\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"time\"\n\n\t\"github.com/rs/zerolog\"\n\t\"github.com/rs/zerolog/log\"\n\t\"gorm.io/gorm\"\n\tgormLogger \"gorm.io/gorm/logger\"\n\t\"tailscale.com/types/logger\"\n)\n\nfunc LogErr(err error, msg string) {\n\tlog.Error().Caller().Err(err).Msg(msg)\n}\n\nfunc TSLogfWrapper() logger.Logf {\n\treturn func(format string, args ...any) {\n\t\tlog.Debug().Caller().Msgf(format, args...)\n\t}\n}\n\ntype DBLogWrapper struct {\n\tLogger                *zerolog.Logger\n\tLevel                 zerolog.Level\n\tEvent                 *zerolog.Event\n\tSlowThreshold         time.Duration\n\tSkipErrRecordNotFound bool\n\tParameterizedQueries  bool\n}\n\nfunc NewDBLogWrapper(origin *zerolog.Logger, slowThreshold time.Duration, skipErrRecordNotFound bool, parameterizedQueries bool) *DBLogWrapper {\n\tl := &DBLogWrapper{\n\t\tLogger:                origin,\n\t\tLevel:                 origin.GetLevel(),\n\t\tSlowThreshold:         slowThreshold,\n\t\tSkipErrRecordNotFound: skipErrRecordNotFound,\n\t\tParameterizedQueries:  parameterizedQueries,\n\t}\n\n\treturn l\n}\n\ntype DBLogWrapperOption func(*DBLogWrapper)\n\nfunc (l *DBLogWrapper) LogMode(gormLogger.LogLevel) gormLogger.Interface {\n\treturn l\n}\n\nfunc (l *DBLogWrapper) Info(ctx context.Context, msg string, data ...any) {\n\tl.Logger.Info().Msgf(msg, data...)\n}\n\nfunc (l *DBLogWrapper) Warn(ctx context.Context, msg string, data ...any) {\n\tl.Logger.Warn().Msgf(msg, data...)\n}\n\nfunc (l *DBLogWrapper) Error(ctx context.Context, msg string, data ...any) {\n\tl.Logger.Error().Msgf(msg, data...)\n}\n\nfunc (l *DBLogWrapper) Trace(ctx context.Context, begin time.Time, fc func() (sql string, rowsAffected int64), err error) {\n\telapsed := time.Since(begin)\n\tsql, rowsAffected := fc()\n\tfields := map[string]any{\n\t\t\"duration\":     elapsed,\n\t\t\"sql\":          sql,\n\t\t\"rowsAffected\": rowsAffected,\n\t}\n\n\tif err != nil && (!errors.Is(err, gorm.ErrRecordNotFound) || !l.SkipErrRecordNotFound) {\n\t\tl.Logger.Error().Err(err).Fields(fields).Msgf(\"\")\n\t\treturn\n\t}\n\n\tif l.SlowThreshold != 0 && elapsed > l.SlowThreshold {\n\t\tl.Logger.Warn().Fields(fields).Msgf(\"\")\n\t\treturn\n\t}\n\n\tl.Logger.Debug().Fields(fields).Msgf(\"\")\n}\n\nfunc (l *DBLogWrapper) ParamsFilter(ctx context.Context, sql string, params ...any) (string, []any) {\n\tif l.ParameterizedQueries {\n\t\treturn sql, nil\n\t}\n\n\treturn sql, params\n}\n"
  },
  {
    "path": "hscontrol/util/net.go",
    "content": "package util\n\nimport (\n\t\"context\"\n\t\"net\"\n\t\"net/netip\"\n\t\"sync\"\n\n\t\"go4.org/netipx\"\n\t\"tailscale.com/net/tsaddr\"\n)\n\nfunc GrpcSocketDialer(ctx context.Context, addr string) (net.Conn, error) {\n\tvar d net.Dialer\n\n\treturn d.DialContext(ctx, \"unix\", addr)\n}\n\nfunc PrefixesToString(prefixes []netip.Prefix) []string {\n\tret := make([]string, 0, len(prefixes))\n\tfor _, prefix := range prefixes {\n\t\tret = append(ret, prefix.String())\n\t}\n\n\treturn ret\n}\n\nfunc MustStringsToPrefixes(strings []string) []netip.Prefix {\n\tret := make([]netip.Prefix, 0, len(strings))\n\tfor _, str := range strings {\n\t\tprefix := netip.MustParsePrefix(str)\n\t\tret = append(ret, prefix)\n\t}\n\n\treturn ret\n}\n\n// TheInternet returns the IPSet for the Internet.\n// https://www.youtube.com/watch?v=iDbyYGrswtg\nvar TheInternet = sync.OnceValue(func() *netipx.IPSet {\n\tvar internetBuilder netipx.IPSetBuilder\n\tinternetBuilder.AddPrefix(netip.MustParsePrefix(\"2000::/3\"))\n\tinternetBuilder.AddPrefix(tsaddr.AllIPv4())\n\n\t// Delete Private network addresses\n\t// https://datatracker.ietf.org/doc/html/rfc1918\n\tinternetBuilder.RemovePrefix(netip.MustParsePrefix(\"fc00::/7\"))\n\tinternetBuilder.RemovePrefix(netip.MustParsePrefix(\"10.0.0.0/8\"))\n\tinternetBuilder.RemovePrefix(netip.MustParsePrefix(\"172.16.0.0/12\"))\n\tinternetBuilder.RemovePrefix(netip.MustParsePrefix(\"192.168.0.0/16\"))\n\n\t// Delete Tailscale networks\n\tinternetBuilder.RemovePrefix(tsaddr.TailscaleULARange())\n\tinternetBuilder.RemovePrefix(tsaddr.CGNATRange())\n\n\t// Delete \"can't find DHCP networks\"\n\tinternetBuilder.RemovePrefix(netip.MustParsePrefix(\"fe80::/10\")) // link-local\n\tinternetBuilder.RemovePrefix(netip.MustParsePrefix(\"169.254.0.0/16\"))\n\n\ttheInternetSet, _ := internetBuilder.IPSet()\n\n\treturn theInternetSet\n})\n"
  },
  {
    "path": "hscontrol/util/norace.go",
    "content": "//go:build !race\n\npackage util\n\n// RaceEnabled is true when the race detector is active.\nconst RaceEnabled = false\n"
  },
  {
    "path": "hscontrol/util/prompt.go",
    "content": "package util\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n)\n\n// YesNo takes a question and prompts the user to answer the\n// question with a yes or no. It appends a [y/n] to the message.\n// The question is written to stderr so that content can be redirected\n// without interfering with the prompt.\nfunc YesNo(msg string) bool {\n\tfmt.Fprint(os.Stderr, msg+\" [y/n] \")\n\n\tvar resp string\n\n\t_, _ = fmt.Scanln(&resp)\n\n\tresp = strings.ToLower(resp)\n\tswitch resp {\n\tcase \"y\", \"yes\", \"sure\":\n\t\treturn true\n\t}\n\n\treturn false\n}\n"
  },
  {
    "path": "hscontrol/util/prompt_test.go",
    "content": "package util\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestYesNo(t *testing.T) {\n\ttests := []struct {\n\t\tname     string\n\t\tinput    string\n\t\texpected bool\n\t}{\n\t\t{\n\t\t\tname:     \"y answer\",\n\t\t\tinput:    \"y\\n\",\n\t\t\texpected: true,\n\t\t},\n\t\t{\n\t\t\tname:     \"Y answer\",\n\t\t\tinput:    \"Y\\n\",\n\t\t\texpected: true,\n\t\t},\n\t\t{\n\t\t\tname:     \"yes answer\",\n\t\t\tinput:    \"yes\\n\",\n\t\t\texpected: true,\n\t\t},\n\t\t{\n\t\t\tname:     \"YES answer\",\n\t\t\tinput:    \"YES\\n\",\n\t\t\texpected: true,\n\t\t},\n\t\t{\n\t\t\tname:     \"sure answer\",\n\t\t\tinput:    \"sure\\n\",\n\t\t\texpected: true,\n\t\t},\n\t\t{\n\t\t\tname:     \"SURE answer\",\n\t\t\tinput:    \"SURE\\n\",\n\t\t\texpected: true,\n\t\t},\n\t\t{\n\t\t\tname:     \"n answer\",\n\t\t\tinput:    \"n\\n\",\n\t\t\texpected: false,\n\t\t},\n\t\t{\n\t\t\tname:     \"no answer\",\n\t\t\tinput:    \"no\\n\",\n\t\t\texpected: false,\n\t\t},\n\t\t{\n\t\t\tname:     \"empty answer\",\n\t\t\tinput:    \"\\n\",\n\t\t\texpected: false,\n\t\t},\n\t\t{\n\t\t\tname:     \"invalid answer\",\n\t\t\tinput:    \"maybe\\n\",\n\t\t\texpected: false,\n\t\t},\n\t\t{\n\t\t\tname:     \"random text\",\n\t\t\tinput:    \"foobar\\n\",\n\t\t\texpected: false,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\t// Capture stdin\n\t\t\toldStdin := os.Stdin\n\t\t\tr, w, _ := os.Pipe()\n\t\t\tos.Stdin = r\n\n\t\t\t// Capture stderr\n\t\t\toldStderr := os.Stderr\n\t\t\tstderrR, stderrW, _ := os.Pipe()\n\t\t\tos.Stderr = stderrW\n\n\t\t\t// Write test input\n\t\t\tgo func() {\n\t\t\t\tdefer w.Close()\n\n\t\t\t\t_, _ = w.WriteString(tt.input)\n\t\t\t}()\n\n\t\t\t// Call the function\n\t\t\tresult := YesNo(\"Test question\")\n\n\t\t\t// Restore stdin and stderr\n\t\t\tos.Stdin = oldStdin\n\t\t\tos.Stderr = oldStderr\n\n\t\t\tstderrW.Close()\n\n\t\t\t// Check the result\n\t\t\tif result != tt.expected {\n\t\t\t\tt.Errorf(\"YesNo() = %v, want %v\", result, tt.expected)\n\t\t\t}\n\n\t\t\t// Check that the prompt was written to stderr\n\t\t\tvar stderrBuf bytes.Buffer\n\n\t\t\t_, _ = io.Copy(&stderrBuf, stderrR)\n\t\t\tstderrR.Close()\n\n\t\t\texpectedPrompt := \"Test question [y/n] \"\n\n\t\t\tactualPrompt := stderrBuf.String()\n\t\t\tif actualPrompt != expectedPrompt {\n\t\t\t\tt.Errorf(\"Expected prompt %q, got %q\", expectedPrompt, actualPrompt)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestYesNoPromptMessage(t *testing.T) {\n\t// Capture stdin\n\toldStdin := os.Stdin\n\tr, w, _ := os.Pipe()\n\tos.Stdin = r\n\n\t// Capture stderr\n\toldStderr := os.Stderr\n\tstderrR, stderrW, _ := os.Pipe()\n\tos.Stderr = stderrW\n\n\t// Write test input\n\tgo func() {\n\t\tdefer w.Close()\n\n\t\t_, _ = w.WriteString(\"n\\n\")\n\t}()\n\n\t// Call the function with a custom message\n\tcustomMessage := \"Do you want to continue with this dangerous operation?\"\n\tYesNo(customMessage)\n\n\t// Restore stdin and stderr\n\tos.Stdin = oldStdin\n\tos.Stderr = oldStderr\n\n\tstderrW.Close()\n\n\t// Check that the custom message was included in the prompt\n\tvar stderrBuf bytes.Buffer\n\n\t_, _ = io.Copy(&stderrBuf, stderrR)\n\tstderrR.Close()\n\n\texpectedPrompt := customMessage + \" [y/n] \"\n\n\tactualPrompt := stderrBuf.String()\n\tif actualPrompt != expectedPrompt {\n\t\tt.Errorf(\"Expected prompt %q, got %q\", expectedPrompt, actualPrompt)\n\t}\n}\n\nfunc TestYesNoCaseInsensitive(t *testing.T) {\n\ttestCases := []struct {\n\t\tinput    string\n\t\texpected bool\n\t}{\n\t\t{\"y\\n\", true},\n\t\t{\"Y\\n\", true},\n\t\t{\"yes\\n\", true},\n\t\t{\"Yes\\n\", true},\n\t\t{\"YES\\n\", true},\n\t\t{\"yEs\\n\", true},\n\t\t{\"sure\\n\", true},\n\t\t{\"Sure\\n\", true},\n\t\t{\"SURE\\n\", true},\n\t\t{\"SuRe\\n\", true},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(\"input_\"+strings.TrimSpace(tc.input), func(t *testing.T) {\n\t\t\t// Capture stdin\n\t\t\toldStdin := os.Stdin\n\t\t\tr, w, _ := os.Pipe()\n\t\t\tos.Stdin = r\n\n\t\t\t// Capture stderr to avoid output during tests\n\t\t\toldStderr := os.Stderr\n\t\t\tstderrR, stderrW, _ := os.Pipe()\n\t\t\tos.Stderr = stderrW\n\n\t\t\t// Write test input\n\t\t\tgo func() {\n\t\t\t\tdefer w.Close()\n\n\t\t\t\t_, _ = w.WriteString(tc.input)\n\t\t\t}()\n\n\t\t\t// Call the function\n\t\t\tresult := YesNo(\"Test\")\n\n\t\t\t// Restore stdin and stderr\n\t\t\tos.Stdin = oldStdin\n\t\t\tos.Stderr = oldStderr\n\n\t\t\tstderrW.Close()\n\n\t\t\t// Drain stderr\n\t\t\t_, _ = io.Copy(io.Discard, stderrR)\n\t\t\tstderrR.Close()\n\n\t\t\tif result != tc.expected {\n\t\t\t\tt.Errorf(\"Input %q: expected %v, got %v\", strings.TrimSpace(tc.input), tc.expected, result)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "hscontrol/util/race.go",
    "content": "//go:build race\n\npackage util\n\n// RaceEnabled is true when the race detector is active.\nconst RaceEnabled = true\n"
  },
  {
    "path": "hscontrol/util/string.go",
    "content": "package util\n\nimport (\n\t\"crypto/rand\"\n\t\"encoding/base64\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"tailscale.com/tailcfg\"\n)\n\n// GenerateRandomBytes returns securely generated random bytes.\n// It will return an error if the system's secure random\n// number generator fails to function correctly, in which\n// case the caller should not continue.\nfunc GenerateRandomBytes(n int) ([]byte, error) {\n\tbytes := make([]byte, n)\n\n\t// Note that err == nil only if we read len(b) bytes.\n\tif _, err := rand.Read(bytes); err != nil { //nolint:noinlineerr\n\t\treturn nil, err\n\t}\n\n\treturn bytes, nil\n}\n\n// GenerateRandomStringURLSafe returns a URL-safe, base64 encoded\n// securely generated random string.\n// It will return an error if the system's secure random\n// number generator fails to function correctly, in which\n// case the caller should not continue.\nfunc GenerateRandomStringURLSafe(n int) (string, error) {\n\tb, err := GenerateRandomBytes(n)\n\n\tuenc := base64.RawURLEncoding.EncodeToString(b)\n\n\treturn uenc[:n], err\n}\n\n// GenerateRandomStringDNSSafe returns a DNS-safe\n// securely generated random string.\n// It will return an error if the system's secure random\n// number generator fails to function correctly, in which\n// case the caller should not continue.\nfunc GenerateRandomStringDNSSafe(size int) (string, error) {\n\tvar (\n\t\tstr string\n\t\terr error\n\t)\n\n\tfor len(str) < size {\n\t\tstr, err = GenerateRandomStringURLSafe(size)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tstr = strings.ToLower(\n\t\t\tstrings.ReplaceAll(strings.ReplaceAll(str, \"_\", \"\"), \"-\", \"\"),\n\t\t)\n\t}\n\n\treturn str[:size], nil\n}\n\nfunc MustGenerateRandomStringDNSSafe(size int) string {\n\thash, err := GenerateRandomStringDNSSafe(size)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn hash\n}\n\nfunc InvalidString() string {\n\thash, _ := GenerateRandomStringDNSSafe(8)\n\treturn \"invalid-\" + hash\n}\n\nfunc TailNodesToString(nodes []*tailcfg.Node) string {\n\ttemp := make([]string, len(nodes))\n\n\tfor index, node := range nodes {\n\t\ttemp[index] = node.Name\n\t}\n\n\treturn fmt.Sprintf(\"[ %s ](%d)\", strings.Join(temp, \", \"), len(temp))\n}\n\nfunc TailMapResponseToString(resp tailcfg.MapResponse) string {\n\treturn fmt.Sprintf(\n\t\t\"{ Node: %s, Peers: %s }\",\n\t\tresp.Node.Name,\n\t\tTailNodesToString(resp.Peers),\n\t)\n}\n\nfunc TailcfgFilterRulesToString(rules []tailcfg.FilterRule) string {\n\tvar sb strings.Builder\n\n\tfor index, rule := range rules {\n\t\tsb.WriteString(fmt.Sprintf(`\n{\n  SrcIPs: %v\n  DstIPs: %v\n}\n`, rule.SrcIPs, rule.DstPorts))\n\n\t\tif index < len(rules)-1 {\n\t\t\tsb.WriteString(\", \")\n\t\t}\n\t}\n\n\treturn fmt.Sprintf(\"[ %s ](%d)\", sb.String(), len(rules))\n}\n"
  },
  {
    "path": "hscontrol/util/string_test.go",
    "content": "package util\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestGenerateRandomStringDNSSafe(t *testing.T) {\n\tfor range 100000 {\n\t\tstr, err := GenerateRandomStringDNSSafe(8)\n\t\trequire.NoError(t, err)\n\t\tassert.Len(t, str, 8)\n\t}\n}\n"
  },
  {
    "path": "hscontrol/util/test.go",
    "content": "package util\n\nimport (\n\t\"net/netip\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"tailscale.com/types/ipproto\"\n\t\"tailscale.com/types/key\"\n\t\"tailscale.com/types/views\"\n)\n\nvar PrefixComparer = cmp.Comparer(func(x, y netip.Prefix) bool {\n\treturn x.Compare(y) == 0\n})\n\nvar IPComparer = cmp.Comparer(func(x, y netip.Addr) bool {\n\treturn x.Compare(y) == 0\n})\n\nvar AddrPortComparer = cmp.Comparer(func(x, y netip.AddrPort) bool {\n\treturn x == y\n})\n\nvar MkeyComparer = cmp.Comparer(func(x, y key.MachinePublic) bool {\n\treturn x.String() == y.String()\n})\n\nvar NkeyComparer = cmp.Comparer(func(x, y key.NodePublic) bool {\n\treturn x.String() == y.String()\n})\n\nvar DkeyComparer = cmp.Comparer(func(x, y key.DiscoPublic) bool {\n\treturn x.String() == y.String()\n})\n\nvar ViewSliceIPProtoComparer = cmp.Comparer(views.SliceEqual[ipproto.Proto])\n\nvar Comparers []cmp.Option = []cmp.Option{\n\tIPComparer, PrefixComparer, AddrPortComparer, MkeyComparer, NkeyComparer, DkeyComparer, ViewSliceIPProtoComparer,\n}\n"
  },
  {
    "path": "hscontrol/util/util.go",
    "content": "package util\n\nimport (\n\t\"cmp\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/netip\"\n\t\"net/url\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/util/cmpver\"\n)\n\n// URL parsing errors.\nvar (\n\tErrMultipleURLsFound     = errors.New(\"multiple URLs found\")\n\tErrNoURLFound            = errors.New(\"no URL found\")\n\tErrEmptyTracerouteOutput = errors.New(\"empty traceroute output\")\n\tErrTracerouteHeaderParse = errors.New(\"parsing traceroute header\")\n\tErrTracerouteDidNotReach = errors.New(\"traceroute did not reach target\")\n)\n\nfunc TailscaleVersionNewerOrEqual(minimum, toCheck string) bool {\n\tif cmpver.Compare(minimum, toCheck) <= 0 ||\n\t\ttoCheck == \"unstable\" ||\n\t\ttoCheck == \"head\" {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\n// ParseLoginURLFromCLILogin parses the output of the tailscale up command to extract the login URL.\n// It returns an error if not exactly one URL is found.\nfunc ParseLoginURLFromCLILogin(output string) (*url.URL, error) {\n\tlines := strings.Split(output, \"\\n\")\n\n\tvar urlStr string\n\n\tfor _, line := range lines {\n\t\tline = strings.TrimSpace(line)\n\t\tif strings.HasPrefix(line, \"http://\") || strings.HasPrefix(line, \"https://\") {\n\t\t\tif urlStr != \"\" {\n\t\t\t\treturn nil, fmt.Errorf(\"%w: %s and %s\", ErrMultipleURLsFound, urlStr, line)\n\t\t\t}\n\n\t\t\turlStr = line\n\t\t}\n\t}\n\n\tif urlStr == \"\" {\n\t\treturn nil, ErrNoURLFound\n\t}\n\n\tloginURL, err := url.Parse(urlStr)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"parsing URL: %w\", err)\n\t}\n\n\treturn loginURL, nil\n}\n\ntype TraceroutePath struct {\n\t// Hop is the current jump in the total traceroute.\n\tHop int\n\n\t// Hostname is the resolved hostname or IP address identifying the jump\n\tHostname string\n\n\t// IP is the IP address of the jump\n\tIP netip.Addr\n\n\t// Latencies is a list of the latencies for this jump\n\tLatencies []time.Duration\n}\n\ntype Traceroute struct {\n\t// Hostname is the resolved hostname or IP address identifying the target\n\tHostname string\n\n\t// IP is the IP address of the target\n\tIP netip.Addr\n\n\t// Route is the path taken to reach the target if successful. The list is ordered by the path taken.\n\tRoute []TraceroutePath\n\n\t// Success indicates if the traceroute was successful.\n\tSuccess bool\n\n\t// Err contains an error if  the traceroute was not successful.\n\tErr error\n}\n\n// ParseTraceroute parses the output of the traceroute command and returns a Traceroute struct.\nfunc ParseTraceroute(output string) (Traceroute, error) {\n\tlines := strings.Split(strings.TrimSpace(output), \"\\n\")\n\tif len(lines) < 1 {\n\t\treturn Traceroute{}, ErrEmptyTracerouteOutput\n\t}\n\n\t// Parse the header line - handle both 'traceroute' and 'tracert' (Windows)\n\theaderRegex := regexp.MustCompile(`(?i)(?:traceroute|tracing route) to ([^ ]+) (?:\\[([^\\]]+)\\]|\\(([^)]+)\\))`)\n\n\theaderMatches := headerRegex.FindStringSubmatch(lines[0])\n\tif len(headerMatches) < 2 {\n\t\treturn Traceroute{}, fmt.Errorf(\"%w: %s\", ErrTracerouteHeaderParse, lines[0])\n\t}\n\n\thostname := headerMatches[1]\n\t// IP can be in either capture group 2 or 3 depending on format\n\tipStr := headerMatches[2]\n\tif ipStr == \"\" {\n\t\tipStr = headerMatches[3]\n\t}\n\n\tip, err := netip.ParseAddr(ipStr)\n\tif err != nil {\n\t\treturn Traceroute{}, fmt.Errorf(\"parsing IP address %s: %w\", ipStr, err)\n\t}\n\n\tresult := Traceroute{\n\t\tHostname: hostname,\n\t\tIP:       ip,\n\t\tRoute:    []TraceroutePath{},\n\t\tSuccess:  false,\n\t}\n\n\t// More flexible regex that handles various traceroute output formats\n\t// Main pattern handles: \"hostname (IP)\", \"hostname [IP]\", \"IP only\", \"* * *\"\n\thopRegex := regexp.MustCompile(`^\\s*(\\d+)\\s+(.*)$`)\n\t// Patterns for parsing the hop details\n\thostIPRegex := regexp.MustCompile(`^([^ ]+) \\(([^)]+)\\)`)\n\thostIPBracketRegex := regexp.MustCompile(`^([^ ]+) \\[([^\\]]+)\\]`)\n\t// Pattern for latencies with flexible spacing and optional '<'\n\tlatencyRegex := regexp.MustCompile(`(<?\\d+(?:\\.\\d+)?)\\s*ms\\b`)\n\n\tfor i := 1; i < len(lines); i++ {\n\t\tline := strings.TrimSpace(lines[i])\n\t\tif line == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tmatches := hopRegex.FindStringSubmatch(line)\n\t\tif len(matches) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\thop, err := strconv.Atoi(matches[1])\n\t\tif err != nil {\n\t\t\t// Skip lines that don't start with a hop number\n\t\t\tcontinue\n\t\t}\n\n\t\tremainder := strings.TrimSpace(matches[2])\n\n\t\tvar (\n\t\t\thopHostname string\n\t\t\thopIP       netip.Addr\n\t\t\tlatencies   []time.Duration\n\t\t)\n\n\t\t// Check for Windows tracert format which has latencies before hostname\n\t\t// Format: \"  1    <1 ms    <1 ms    <1 ms  router.local [192.168.1.1]\"\n\t\tlatencyFirst := false\n\n\t\tif strings.Contains(remainder, \" ms \") && !strings.HasPrefix(remainder, \"*\") {\n\t\t\t// Check if latencies appear before any hostname/IP\n\t\t\tfirstSpace := strings.Index(remainder, \" \")\n\t\t\tif firstSpace > 0 {\n\t\t\t\tfirstPart := remainder[:firstSpace]\n\t\t\t\tif _, err := strconv.ParseFloat(strings.TrimPrefix(firstPart, \"<\"), 64); err == nil { //nolint:noinlineerr\n\t\t\t\t\tlatencyFirst = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif latencyFirst {\n\t\t\t// Windows format: extract latencies first\n\t\t\tfor {\n\t\t\t\tlatMatch := latencyRegex.FindStringSubmatchIndex(remainder)\n\t\t\t\tif latMatch == nil || latMatch[0] > 0 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\t// Extract and remove the latency from the beginning\n\t\t\t\tlatStr := strings.TrimPrefix(remainder[latMatch[2]:latMatch[3]], \"<\")\n\n\t\t\t\tms, err := strconv.ParseFloat(latStr, 64)\n\t\t\t\tif err == nil {\n\t\t\t\t\t// Round to nearest microsecond to avoid floating point precision issues\n\t\t\t\t\tduration := time.Duration(ms * float64(time.Millisecond))\n\t\t\t\t\tlatencies = append(latencies, duration.Round(time.Microsecond))\n\t\t\t\t}\n\n\t\t\t\tremainder = strings.TrimSpace(remainder[latMatch[1]:])\n\t\t\t}\n\t\t}\n\n\t\t// Now parse hostname/IP from remainder\n\t\tif strings.HasPrefix(remainder, \"*\") {\n\t\t\t// Timeout hop\n\t\t\thopHostname = \"*\"\n\t\t\t// Skip any remaining asterisks\n\t\t\tremainder = strings.TrimLeft(remainder, \"* \")\n\t\t} else if hostMatch := hostIPRegex.FindStringSubmatch(remainder); len(hostMatch) >= 3 {\n\t\t\t// Format: hostname (IP)\n\t\t\thopHostname = hostMatch[1]\n\t\t\thopIP, _ = netip.ParseAddr(hostMatch[2])\n\t\t\tremainder = strings.TrimSpace(remainder[len(hostMatch[0]):])\n\t\t} else if hostMatch := hostIPBracketRegex.FindStringSubmatch(remainder); len(hostMatch) >= 3 {\n\t\t\t// Format: hostname [IP] (Windows)\n\t\t\thopHostname = hostMatch[1]\n\t\t\thopIP, _ = netip.ParseAddr(hostMatch[2])\n\t\t\tremainder = strings.TrimSpace(remainder[len(hostMatch[0]):])\n\t\t} else {\n\t\t\t// Try to parse as IP only or hostname only\n\t\t\tparts := strings.Fields(remainder)\n\t\t\tif len(parts) > 0 {\n\t\t\t\thopHostname = parts[0]\n\t\t\t\tif ip, err := netip.ParseAddr(parts[0]); err == nil { //nolint:noinlineerr\n\t\t\t\t\thopIP = ip\n\t\t\t\t}\n\n\t\t\t\tremainder = strings.TrimSpace(strings.Join(parts[1:], \" \"))\n\t\t\t}\n\t\t}\n\n\t\t// Extract latencies from the remaining part (if not already done)\n\t\tif !latencyFirst {\n\t\t\tlatencyMatches := latencyRegex.FindAllStringSubmatch(remainder, -1)\n\t\t\tfor _, match := range latencyMatches {\n\t\t\t\tif len(match) > 1 {\n\t\t\t\t\t// Remove '<' prefix if present (e.g., \"<1 ms\")\n\t\t\t\t\tlatStr := strings.TrimPrefix(match[1], \"<\")\n\n\t\t\t\t\tms, err := strconv.ParseFloat(latStr, 64)\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\t// Round to nearest microsecond to avoid floating point precision issues\n\t\t\t\t\t\tduration := time.Duration(ms * float64(time.Millisecond))\n\t\t\t\t\t\tlatencies = append(latencies, duration.Round(time.Microsecond))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tpath := TraceroutePath{\n\t\t\tHop:       hop,\n\t\t\tHostname:  hopHostname,\n\t\t\tIP:        hopIP,\n\t\t\tLatencies: latencies,\n\t\t}\n\n\t\tresult.Route = append(result.Route, path)\n\n\t\t// Check if we've reached the target\n\t\tif hopIP == ip {\n\t\t\tresult.Success = true\n\t\t}\n\t}\n\n\t// If we didn't reach the target, it's unsuccessful\n\tif !result.Success {\n\t\tresult.Err = ErrTracerouteDidNotReach\n\t}\n\n\treturn result, nil\n}\n\nfunc IsCI() bool {\n\tif _, ok := os.LookupEnv(\"CI\"); ok {\n\t\treturn true\n\t}\n\n\tif _, ok := os.LookupEnv(\"GITHUB_RUN_ID\"); ok {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\n// EnsureHostname guarantees a valid hostname for node registration.\n// It extracts a hostname from Hostinfo, providing sensible defaults\n// if Hostinfo is nil or Hostname is empty. This prevents nil pointer dereferences\n// and ensures nodes always have a valid hostname.\n// The hostname is truncated to 63 characters to comply with DNS label length limits (RFC 1123).\n// This function never fails - it always returns a valid hostname.\n//\n// Strategy:\n// 1. If hostinfo is nil/empty → generate default from keys\n// 2. If hostname is provided → normalise it\n// 3. If normalisation fails → generate invalid-<random> replacement\n//\n// Returns the guaranteed-valid hostname to use.\nfunc EnsureHostname(hostinfo tailcfg.HostinfoView, machineKey, nodeKey string) string {\n\tif !hostinfo.Valid() || hostinfo.Hostname() == \"\" {\n\t\tkey := cmp.Or(machineKey, nodeKey)\n\t\tif key == \"\" {\n\t\t\treturn \"unknown-node\"\n\t\t}\n\n\t\tkeyPrefix := key\n\t\tif len(key) > 8 {\n\t\t\tkeyPrefix = key[:8]\n\t\t}\n\n\t\treturn \"node-\" + keyPrefix\n\t}\n\n\tlowercased := strings.ToLower(hostinfo.Hostname())\n\n\terr := ValidateHostname(lowercased)\n\tif err == nil {\n\t\treturn lowercased\n\t}\n\n\treturn InvalidString()\n}\n\n// GenerateRegistrationKey generates a vanity key for tracking web authentication\n// registration flows in logs. This key is NOT stored in the database and does NOT use bcrypt -\n// it's purely for observability and correlating log entries during the registration process.\nfunc GenerateRegistrationKey() (string, error) {\n\tconst (\n\t\tregisterKeyPrefix = \"hskey-reg-\" //nolint:gosec // This is a vanity key for logging, not a credential\n\t\tregisterKeyLength = 64\n\t)\n\n\trandomPart, err := GenerateRandomStringURLSafe(registerKeyLength)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"generating registration key: %w\", err)\n\t}\n\n\treturn registerKeyPrefix + randomPart, nil\n}\n"
  },
  {
    "path": "hscontrol/util/util_test.go",
    "content": "package util\n\nimport (\n\t\"net/netip\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"tailscale.com/tailcfg\"\n)\n\nconst testUnknownNode = \"unknown-node\"\n\nfunc TestTailscaleVersionNewerOrEqual(t *testing.T) {\n\ttype args struct {\n\t\tminimum string\n\t\ttoCheck string\n\t}\n\n\ttests := []struct {\n\t\tname string\n\t\targs args\n\t\twant bool\n\t}{\n\t\t{\n\t\t\tname: \"is-equal\",\n\t\t\targs: args{\n\t\t\t\tminimum: \"1.56\",\n\t\t\t\ttoCheck: \"1.56\",\n\t\t\t},\n\t\t\twant: true,\n\t\t},\n\t\t{\n\t\t\tname: \"is-newer-head\",\n\t\t\targs: args{\n\t\t\t\tminimum: \"1.56\",\n\t\t\t\ttoCheck: \"head\",\n\t\t\t},\n\t\t\twant: true,\n\t\t},\n\t\t{\n\t\t\tname: \"is-newer-unstable\",\n\t\t\targs: args{\n\t\t\t\tminimum: \"1.56\",\n\t\t\t\ttoCheck: \"unstable\",\n\t\t\t},\n\t\t\twant: true,\n\t\t},\n\t\t{\n\t\t\tname: \"is-newer-patch\",\n\t\t\targs: args{\n\t\t\t\tminimum: \"1.56.1\",\n\t\t\t\ttoCheck: \"1.56.1\",\n\t\t\t},\n\t\t\twant: true,\n\t\t},\n\t\t{\n\t\t\tname: \"is-older-patch-same-minor\",\n\t\t\targs: args{\n\t\t\t\tminimum: \"1.56.1\",\n\t\t\t\ttoCheck: \"1.56.0\",\n\t\t\t},\n\t\t\twant: false,\n\t\t},\n\t\t{\n\t\t\tname: \"is-older-unstable\",\n\t\t\targs: args{\n\t\t\t\tminimum: \"1.56\",\n\t\t\t\ttoCheck: \"1.55\",\n\t\t\t},\n\t\t\twant: false,\n\t\t},\n\t\t{\n\t\t\tname: \"is-older-one-stable\",\n\t\t\targs: args{\n\t\t\t\tminimum: \"1.56\",\n\t\t\t\ttoCheck: \"1.54\",\n\t\t\t},\n\t\t\twant: false,\n\t\t},\n\t\t{\n\t\t\tname: \"is-older-five-stable\",\n\t\t\targs: args{\n\t\t\t\tminimum: \"1.56\",\n\t\t\t\ttoCheck: \"1.46\",\n\t\t\t},\n\t\t\twant: false,\n\t\t},\n\t\t{\n\t\t\tname: \"is-older-patch\",\n\t\t\targs: args{\n\t\t\t\tminimum: \"1.56\",\n\t\t\t\ttoCheck: \"1.48.1\",\n\t\t\t},\n\t\t\twant: false,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tif got := TailscaleVersionNewerOrEqual(tt.args.minimum, tt.args.toCheck); got != tt.want {\n\t\t\t\tt.Errorf(\"TailscaleVersionNewerThan() = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestParseLoginURLFromCLILogin(t *testing.T) {\n\ttests := []struct {\n\t\tname    string\n\t\toutput  string\n\t\twantURL string\n\t\twantErr string\n\t}{\n\t\t{\n\t\t\tname: \"valid https URL\",\n\t\t\toutput: `\nTo authenticate, visit:\n\n        https://headscale.example.com/register/3oYCOZYA2zZmGB4PQ7aHBaMi\n\nSuccess.`,\n\t\t\twantURL: \"https://headscale.example.com/register/3oYCOZYA2zZmGB4PQ7aHBaMi\",\n\t\t\twantErr: \"\",\n\t\t},\n\t\t{\n\t\t\tname: \"valid http URL\",\n\t\t\toutput: `\nTo authenticate, visit:\n\n        http://headscale.example.com/register/3oYCOZYA2zZmGB4PQ7aHBaMi\n\nSuccess.`,\n\t\t\twantURL: \"http://headscale.example.com/register/3oYCOZYA2zZmGB4PQ7aHBaMi\",\n\t\t\twantErr: \"\",\n\t\t},\n\t\t{\n\t\t\tname: \"no URL\",\n\t\t\toutput: `\nTo authenticate, visit:\n\nSuccess.`,\n\t\t\twantURL: \"\",\n\t\t\twantErr: \"no URL found\",\n\t\t},\n\t\t{\n\t\t\tname: \"multiple URLs\",\n\t\t\toutput: `\nTo authenticate, visit:\n\n        https://headscale.example.com/register/3oYCOZYA2zZmGB4PQ7aHBaMi\n\nTo authenticate, visit:\n\n        http://headscale.example.com/register/dv1l2k5FackOYl-7-V3mSd_E\n\nSuccess.`,\n\t\t\twantURL: \"\",\n\t\t\twantErr: \"multiple URLs found: https://headscale.example.com/register/3oYCOZYA2zZmGB4PQ7aHBaMi and http://headscale.example.com/register/dv1l2k5FackOYl-7-V3mSd_E\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid URL\",\n\t\t\toutput: `\nTo authenticate, visit:\n\n        invalid-url\n\nSuccess.`,\n\t\t\twantURL: \"\",\n\t\t\twantErr: \"no URL found\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgotURL, err := ParseLoginURLFromCLILogin(tt.output)\n\t\t\tif tt.wantErr != \"\" {\n\t\t\t\tif err == nil || err.Error() != tt.wantErr {\n\t\t\t\t\tt.Errorf(\"ParseLoginURLFromCLILogin() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"ParseLoginURLFromCLILogin() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\t}\n\n\t\t\t\tif gotURL.String() != tt.wantURL {\n\t\t\t\t\tt.Errorf(\"ParseLoginURLFromCLILogin() = %v, want %v\", gotURL, tt.wantURL)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestParseTraceroute(t *testing.T) {\n\ttests := []struct {\n\t\tname    string\n\t\tinput   string\n\t\twant    Traceroute\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname: \"simple successful traceroute\",\n\t\t\tinput: `traceroute to 172.24.0.3 (172.24.0.3), 30 hops max, 46 byte packets\n 1  ts-head-hk0urr.headscale.net (100.64.0.1)  1.135 ms  0.922 ms  0.619 ms\n 2  172.24.0.3 (172.24.0.3)  0.593 ms  0.549 ms  0.522 ms`,\n\t\t\twant: Traceroute{\n\t\t\t\tHostname: \"172.24.0.3\",\n\t\t\t\tIP:       netip.MustParseAddr(\"172.24.0.3\"),\n\t\t\t\tRoute: []TraceroutePath{\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      1,\n\t\t\t\t\t\tHostname: \"ts-head-hk0urr.headscale.net\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"100.64.0.1\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t1135 * time.Microsecond,\n\t\t\t\t\t\t\t922 * time.Microsecond,\n\t\t\t\t\t\t\t619 * time.Microsecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      2,\n\t\t\t\t\t\tHostname: \"172.24.0.3\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"172.24.0.3\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t593 * time.Microsecond,\n\t\t\t\t\t\t\t549 * time.Microsecond,\n\t\t\t\t\t\t\t522 * time.Microsecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSuccess: true,\n\t\t\t\tErr:     nil,\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"traceroute with timeouts\",\n\t\t\tinput: `traceroute to 8.8.8.8 (8.8.8.8), 30 hops max, 60 byte packets\n 1  router.local (192.168.1.1)  1.234 ms  1.123 ms  1.121 ms\n 2  * * *\n 3  isp-gateway.net (10.0.0.1)  15.678 ms  14.789 ms  15.432 ms\n 4  8.8.8.8 (8.8.8.8)  20.123 ms  19.876 ms  20.345 ms`,\n\t\t\twant: Traceroute{\n\t\t\t\tHostname: \"8.8.8.8\",\n\t\t\t\tIP:       netip.MustParseAddr(\"8.8.8.8\"),\n\t\t\t\tRoute: []TraceroutePath{\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      1,\n\t\t\t\t\t\tHostname: \"router.local\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"192.168.1.1\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t1234 * time.Microsecond,\n\t\t\t\t\t\t\t1123 * time.Microsecond,\n\t\t\t\t\t\t\t1121 * time.Microsecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      2,\n\t\t\t\t\t\tHostname: \"*\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      3,\n\t\t\t\t\t\tHostname: \"isp-gateway.net\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"10.0.0.1\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t15678 * time.Microsecond,\n\t\t\t\t\t\t\t14789 * time.Microsecond,\n\t\t\t\t\t\t\t15432 * time.Microsecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      4,\n\t\t\t\t\t\tHostname: \"8.8.8.8\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"8.8.8.8\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t20123 * time.Microsecond,\n\t\t\t\t\t\t\t19876 * time.Microsecond,\n\t\t\t\t\t\t\t20345 * time.Microsecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSuccess: true,\n\t\t\t\tErr:     nil,\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"unsuccessful traceroute\",\n\t\t\tinput: `traceroute to 10.0.0.99 (10.0.0.99), 5 hops max, 60 byte packets\n 1  router.local (192.168.1.1)  1.234 ms  1.123 ms  1.121 ms\n 2  * * *\n 3  * * *\n 4  * * *\n 5  * * *`,\n\t\t\twant: Traceroute{\n\t\t\t\tHostname: \"10.0.0.99\",\n\t\t\t\tIP:       netip.MustParseAddr(\"10.0.0.99\"),\n\t\t\t\tRoute: []TraceroutePath{\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      1,\n\t\t\t\t\t\tHostname: \"router.local\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"192.168.1.1\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t1234 * time.Microsecond,\n\t\t\t\t\t\t\t1123 * time.Microsecond,\n\t\t\t\t\t\t\t1121 * time.Microsecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      2,\n\t\t\t\t\t\tHostname: \"*\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      3,\n\t\t\t\t\t\tHostname: \"*\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      4,\n\t\t\t\t\t\tHostname: \"*\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      5,\n\t\t\t\t\t\tHostname: \"*\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSuccess: false,\n\t\t\t\tErr:     ErrTracerouteDidNotReach,\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname:    \"empty input\",\n\t\t\tinput:   \"\",\n\t\t\twant:    Traceroute{},\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname:    \"invalid header\",\n\t\t\tinput:   \"not a valid traceroute output\",\n\t\t\twant:    Traceroute{},\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"windows tracert format\",\n\t\t\tinput: `Tracing route to google.com [8.8.8.8]\nover a maximum of 30 hops:\n\n  1    <1 ms    <1 ms    <1 ms  router.local [192.168.1.1]\n  2     5 ms     4 ms     5 ms  10.0.0.1\n  3     *        *        *     Request timed out.\n  4    20 ms    19 ms    21 ms  8.8.8.8`,\n\t\t\twant: Traceroute{\n\t\t\t\tHostname: \"google.com\",\n\t\t\t\tIP:       netip.MustParseAddr(\"8.8.8.8\"),\n\t\t\t\tRoute: []TraceroutePath{\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      1,\n\t\t\t\t\t\tHostname: \"router.local\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"192.168.1.1\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t1 * time.Millisecond,\n\t\t\t\t\t\t\t1 * time.Millisecond,\n\t\t\t\t\t\t\t1 * time.Millisecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      2,\n\t\t\t\t\t\tHostname: \"10.0.0.1\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"10.0.0.1\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t5 * time.Millisecond,\n\t\t\t\t\t\t\t4 * time.Millisecond,\n\t\t\t\t\t\t\t5 * time.Millisecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      3,\n\t\t\t\t\t\tHostname: \"*\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      4,\n\t\t\t\t\t\tHostname: \"8.8.8.8\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"8.8.8.8\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t20 * time.Millisecond,\n\t\t\t\t\t\t\t19 * time.Millisecond,\n\t\t\t\t\t\t\t21 * time.Millisecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSuccess: true,\n\t\t\t\tErr:     nil,\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"mixed latency formats\",\n\t\t\tinput: `traceroute to 192.168.1.1 (192.168.1.1), 30 hops max, 60 byte packets\n 1  gateway (192.168.1.1)  0.5 ms  *  0.4 ms`,\n\t\t\twant: Traceroute{\n\t\t\t\tHostname: \"192.168.1.1\",\n\t\t\t\tIP:       netip.MustParseAddr(\"192.168.1.1\"),\n\t\t\t\tRoute: []TraceroutePath{\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      1,\n\t\t\t\t\t\tHostname: \"gateway\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"192.168.1.1\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t500 * time.Microsecond,\n\t\t\t\t\t\t\t400 * time.Microsecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSuccess: true,\n\t\t\t\tErr:     nil,\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"only one latency value\",\n\t\t\tinput: `traceroute to 10.0.0.1 (10.0.0.1), 30 hops max, 60 byte packets\n 1  10.0.0.1 (10.0.0.1)  1.5 ms`,\n\t\t\twant: Traceroute{\n\t\t\t\tHostname: \"10.0.0.1\",\n\t\t\t\tIP:       netip.MustParseAddr(\"10.0.0.1\"),\n\t\t\t\tRoute: []TraceroutePath{\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      1,\n\t\t\t\t\t\tHostname: \"10.0.0.1\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"10.0.0.1\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t1500 * time.Microsecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSuccess: true,\n\t\t\t\tErr:     nil,\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"backward compatibility - original format with 3 latencies\",\n\t\t\tinput: `traceroute to 172.24.0.3 (172.24.0.3), 30 hops max, 46 byte packets\n 1  ts-head-hk0urr.headscale.net (100.64.0.1)  1.135 ms  0.922 ms  0.619 ms\n 2  172.24.0.3 (172.24.0.3)  0.593 ms  0.549 ms  0.522 ms`,\n\t\t\twant: Traceroute{\n\t\t\t\tHostname: \"172.24.0.3\",\n\t\t\t\tIP:       netip.MustParseAddr(\"172.24.0.3\"),\n\t\t\t\tRoute: []TraceroutePath{\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      1,\n\t\t\t\t\t\tHostname: \"ts-head-hk0urr.headscale.net\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"100.64.0.1\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t1135 * time.Microsecond,\n\t\t\t\t\t\t\t922 * time.Microsecond,\n\t\t\t\t\t\t\t619 * time.Microsecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      2,\n\t\t\t\t\t\tHostname: \"172.24.0.3\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"172.24.0.3\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t593 * time.Microsecond,\n\t\t\t\t\t\t\t549 * time.Microsecond,\n\t\t\t\t\t\t\t522 * time.Microsecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSuccess: true,\n\t\t\t\tErr:     nil,\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"two latencies only - common on packet loss\",\n\t\t\tinput: `traceroute to 8.8.8.8 (8.8.8.8), 30 hops max, 60 byte packets\n 1  gateway (192.168.1.1)  1.2 ms  1.1 ms`,\n\t\t\twant: Traceroute{\n\t\t\t\tHostname: \"8.8.8.8\",\n\t\t\t\tIP:       netip.MustParseAddr(\"8.8.8.8\"),\n\t\t\t\tRoute: []TraceroutePath{\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      1,\n\t\t\t\t\t\tHostname: \"gateway\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"192.168.1.1\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t1200 * time.Microsecond,\n\t\t\t\t\t\t\t1100 * time.Microsecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSuccess: false,\n\t\t\t\tErr:     ErrTracerouteDidNotReach,\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"hostname without parentheses - some traceroute versions\",\n\t\t\tinput: `traceroute to 8.8.8.8 (8.8.8.8), 30 hops max, 60 byte packets\n 1  192.168.1.1  1.2 ms  1.1 ms  1.0 ms\n 2  8.8.8.8  20.1 ms  19.9 ms  20.2 ms`,\n\t\t\twant: Traceroute{\n\t\t\t\tHostname: \"8.8.8.8\",\n\t\t\t\tIP:       netip.MustParseAddr(\"8.8.8.8\"),\n\t\t\t\tRoute: []TraceroutePath{\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      1,\n\t\t\t\t\t\tHostname: \"192.168.1.1\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"192.168.1.1\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t1200 * time.Microsecond,\n\t\t\t\t\t\t\t1100 * time.Microsecond,\n\t\t\t\t\t\t\t1000 * time.Microsecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      2,\n\t\t\t\t\t\tHostname: \"8.8.8.8\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"8.8.8.8\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t20100 * time.Microsecond,\n\t\t\t\t\t\t\t19900 * time.Microsecond,\n\t\t\t\t\t\t\t20200 * time.Microsecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSuccess: true,\n\t\t\t\tErr:     nil,\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"ipv6 traceroute\",\n\t\t\tinput: `traceroute to 2001:4860:4860::8888 (2001:4860:4860::8888), 30 hops max, 80 byte packets\n 1  2001:db8::1 (2001:db8::1)  1.123 ms  1.045 ms  0.987 ms\n 2  2001:4860:4860::8888 (2001:4860:4860::8888)  15.234 ms  14.876 ms  15.123 ms`,\n\t\t\twant: Traceroute{\n\t\t\t\tHostname: \"2001:4860:4860::8888\",\n\t\t\t\tIP:       netip.MustParseAddr(\"2001:4860:4860::8888\"),\n\t\t\t\tRoute: []TraceroutePath{\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      1,\n\t\t\t\t\t\tHostname: \"2001:db8::1\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"2001:db8::1\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t1123 * time.Microsecond,\n\t\t\t\t\t\t\t1045 * time.Microsecond,\n\t\t\t\t\t\t\t987 * time.Microsecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      2,\n\t\t\t\t\t\tHostname: \"2001:4860:4860::8888\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"2001:4860:4860::8888\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t15234 * time.Microsecond,\n\t\t\t\t\t\t\t14876 * time.Microsecond,\n\t\t\t\t\t\t\t15123 * time.Microsecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSuccess: true,\n\t\t\t\tErr:     nil,\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"macos traceroute with extra spacing\",\n\t\t\tinput: `traceroute to google.com (8.8.8.8), 64 hops max, 52 byte packets\n 1  router.home (192.168.1.1)   2.345 ms   1.234 ms   1.567 ms\n 2  * * *\n 3  isp-gw.net (10.1.1.1)   15.234 ms   14.567 ms   15.890 ms\n 4  google.com (8.8.8.8)   20.123 ms   19.456 ms   20.789 ms`,\n\t\t\twant: Traceroute{\n\t\t\t\tHostname: \"google.com\",\n\t\t\t\tIP:       netip.MustParseAddr(\"8.8.8.8\"),\n\t\t\t\tRoute: []TraceroutePath{\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      1,\n\t\t\t\t\t\tHostname: \"router.home\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"192.168.1.1\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t2345 * time.Microsecond,\n\t\t\t\t\t\t\t1234 * time.Microsecond,\n\t\t\t\t\t\t\t1567 * time.Microsecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      2,\n\t\t\t\t\t\tHostname: \"*\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      3,\n\t\t\t\t\t\tHostname: \"isp-gw.net\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"10.1.1.1\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t15234 * time.Microsecond,\n\t\t\t\t\t\t\t14567 * time.Microsecond,\n\t\t\t\t\t\t\t15890 * time.Microsecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      4,\n\t\t\t\t\t\tHostname: \"google.com\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"8.8.8.8\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t20123 * time.Microsecond,\n\t\t\t\t\t\t\t19456 * time.Microsecond,\n\t\t\t\t\t\t\t20789 * time.Microsecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSuccess: true,\n\t\t\t\tErr:     nil,\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"busybox traceroute minimal format\",\n\t\t\tinput: `traceroute to 10.0.0.1 (10.0.0.1), 30 hops max, 38 byte packets\n 1  10.0.0.1 (10.0.0.1)  1.234 ms  1.123 ms  1.456 ms`,\n\t\t\twant: Traceroute{\n\t\t\t\tHostname: \"10.0.0.1\",\n\t\t\t\tIP:       netip.MustParseAddr(\"10.0.0.1\"),\n\t\t\t\tRoute: []TraceroutePath{\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      1,\n\t\t\t\t\t\tHostname: \"10.0.0.1\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"10.0.0.1\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t1234 * time.Microsecond,\n\t\t\t\t\t\t\t1123 * time.Microsecond,\n\t\t\t\t\t\t\t1456 * time.Microsecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSuccess: true,\n\t\t\t\tErr:     nil,\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"linux traceroute with dns failure fallback to IP\",\n\t\t\tinput: `traceroute to example.com (93.184.216.34), 30 hops max, 60 byte packets\n 1  192.168.1.1 (192.168.1.1)  1.234 ms  1.123 ms  1.098 ms\n 2  10.0.0.1 (10.0.0.1)  5.678 ms  5.432 ms  5.321 ms\n 3  93.184.216.34 (93.184.216.34)  20.123 ms  19.876 ms  20.234 ms`,\n\t\t\twant: Traceroute{\n\t\t\t\tHostname: \"example.com\",\n\t\t\t\tIP:       netip.MustParseAddr(\"93.184.216.34\"),\n\t\t\t\tRoute: []TraceroutePath{\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      1,\n\t\t\t\t\t\tHostname: \"192.168.1.1\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"192.168.1.1\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t1234 * time.Microsecond,\n\t\t\t\t\t\t\t1123 * time.Microsecond,\n\t\t\t\t\t\t\t1098 * time.Microsecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      2,\n\t\t\t\t\t\tHostname: \"10.0.0.1\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"10.0.0.1\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t5678 * time.Microsecond,\n\t\t\t\t\t\t\t5432 * time.Microsecond,\n\t\t\t\t\t\t\t5321 * time.Microsecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      3,\n\t\t\t\t\t\tHostname: \"93.184.216.34\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"93.184.216.34\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t20123 * time.Microsecond,\n\t\t\t\t\t\t\t19876 * time.Microsecond,\n\t\t\t\t\t\t\t20234 * time.Microsecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSuccess: true,\n\t\t\t\tErr:     nil,\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"alpine linux traceroute with ms variations\",\n\t\t\tinput: `traceroute to 1.1.1.1 (1.1.1.1), 30 hops max, 46 byte packets\n 1  gateway (192.168.0.1)  0.456ms  0.389ms  0.412ms\n 2  1.1.1.1 (1.1.1.1)  8.234ms  7.987ms  8.123ms`,\n\t\t\twant: Traceroute{\n\t\t\t\tHostname: \"1.1.1.1\",\n\t\t\t\tIP:       netip.MustParseAddr(\"1.1.1.1\"),\n\t\t\t\tRoute: []TraceroutePath{\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      1,\n\t\t\t\t\t\tHostname: \"gateway\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"192.168.0.1\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t456 * time.Microsecond,\n\t\t\t\t\t\t\t389 * time.Microsecond,\n\t\t\t\t\t\t\t412 * time.Microsecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      2,\n\t\t\t\t\t\tHostname: \"1.1.1.1\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"1.1.1.1\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t8234 * time.Microsecond,\n\t\t\t\t\t\t\t7987 * time.Microsecond,\n\t\t\t\t\t\t\t8123 * time.Microsecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSuccess: true,\n\t\t\t\tErr:     nil,\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"mixed asterisk and latency values\",\n\t\t\tinput: `traceroute to 8.8.8.8 (8.8.8.8), 30 hops max, 60 byte packets\n 1  gateway (192.168.1.1)  *  1.234 ms  1.123 ms\n 2  10.0.0.1 (10.0.0.1)  5.678 ms  *  5.432 ms\n 3  8.8.8.8 (8.8.8.8)  20.123 ms  19.876 ms  *`,\n\t\t\twant: Traceroute{\n\t\t\t\tHostname: \"8.8.8.8\",\n\t\t\t\tIP:       netip.MustParseAddr(\"8.8.8.8\"),\n\t\t\t\tRoute: []TraceroutePath{\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      1,\n\t\t\t\t\t\tHostname: \"gateway\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"192.168.1.1\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t1234 * time.Microsecond,\n\t\t\t\t\t\t\t1123 * time.Microsecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      2,\n\t\t\t\t\t\tHostname: \"10.0.0.1\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"10.0.0.1\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t5678 * time.Microsecond,\n\t\t\t\t\t\t\t5432 * time.Microsecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tHop:      3,\n\t\t\t\t\t\tHostname: \"8.8.8.8\",\n\t\t\t\t\t\tIP:       netip.MustParseAddr(\"8.8.8.8\"),\n\t\t\t\t\t\tLatencies: []time.Duration{\n\t\t\t\t\t\t\t20123 * time.Microsecond,\n\t\t\t\t\t\t\t19876 * time.Microsecond,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSuccess: true,\n\t\t\t\tErr:     nil,\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot, err := ParseTraceroute(tt.input)\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"ParseTraceroute() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif tt.wantErr {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Special handling for error field since it can't be directly compared with cmp.Diff\n\t\t\tgotErr := got.Err\n\t\t\twantErr := tt.want.Err\n\t\t\tgot.Err = nil\n\t\t\ttt.want.Err = nil\n\n\t\t\tif diff := cmp.Diff(tt.want, got, IPComparer); diff != \"\" {\n\t\t\t\tt.Errorf(\"ParseTraceroute() mismatch (-want +got):\\n%s\", diff)\n\t\t\t}\n\n\t\t\t// Now check error field separately\n\t\t\tif (gotErr == nil) != (wantErr == nil) {\n\t\t\t\tt.Errorf(\"Error field: got %v, want %v\", gotErr, wantErr)\n\t\t\t} else if gotErr != nil && wantErr != nil && gotErr.Error() != wantErr.Error() {\n\t\t\t\tt.Errorf(\"Error message: got %q, want %q\", gotErr.Error(), wantErr.Error())\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestEnsureHostname(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tname       string\n\t\thostinfo   *tailcfg.Hostinfo\n\t\tmachineKey string\n\t\tnodeKey    string\n\t\twant       string\n\t}{\n\t\t{\n\t\t\tname: \"valid_hostname\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"test-node\",\n\t\t\t},\n\t\t\tmachineKey: \"mkey12345678\",\n\t\t\tnodeKey:    \"nkey12345678\",\n\t\t\twant:       \"test-node\",\n\t\t},\n\t\t{\n\t\t\tname:       \"nil_hostinfo_with_machine_key\",\n\t\t\thostinfo:   nil,\n\t\t\tmachineKey: \"mkey12345678\",\n\t\t\tnodeKey:    \"nkey12345678\",\n\t\t\twant:       \"node-mkey1234\",\n\t\t},\n\t\t{\n\t\t\tname:       \"nil_hostinfo_with_node_key_only\",\n\t\t\thostinfo:   nil,\n\t\t\tmachineKey: \"\",\n\t\t\tnodeKey:    \"nkey12345678\",\n\t\t\twant:       \"node-nkey1234\",\n\t\t},\n\t\t{\n\t\t\tname:       \"nil_hostinfo_no_keys\",\n\t\t\thostinfo:   nil,\n\t\t\tmachineKey: \"\",\n\t\t\tnodeKey:    \"\",\n\t\t\twant:       testUnknownNode,\n\t\t},\n\t\t{\n\t\t\tname: \"empty_hostname_with_machine_key\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"\",\n\t\t\t},\n\t\t\tmachineKey: \"mkey12345678\",\n\t\t\tnodeKey:    \"nkey12345678\",\n\t\t\twant:       \"node-mkey1234\",\n\t\t},\n\t\t{\n\t\t\tname: \"empty_hostname_with_node_key_only\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"\",\n\t\t\t},\n\t\t\tmachineKey: \"\",\n\t\t\tnodeKey:    \"nkey12345678\",\n\t\t\twant:       \"node-nkey1234\",\n\t\t},\n\t\t{\n\t\t\tname: \"empty_hostname_no_keys\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"\",\n\t\t\t},\n\t\t\tmachineKey: \"\",\n\t\t\tnodeKey:    \"\",\n\t\t\twant:       testUnknownNode,\n\t\t},\n\t\t{\n\t\t\tname: \"hostname_exactly_63_chars\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"123456789012345678901234567890123456789012345678901234567890123\",\n\t\t\t},\n\t\t\tmachineKey: \"mkey12345678\",\n\t\t\tnodeKey:    \"nkey12345678\",\n\t\t\twant:       \"123456789012345678901234567890123456789012345678901234567890123\",\n\t\t},\n\t\t{\n\t\t\tname: \"hostname_64_chars_truncated\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"1234567890123456789012345678901234567890123456789012345678901234\",\n\t\t\t},\n\t\t\tmachineKey: \"mkey12345678\",\n\t\t\tnodeKey:    \"nkey12345678\",\n\t\t\twant:       \"invalid-\",\n\t\t},\n\t\t{\n\t\t\tname: \"hostname_very_long_truncated\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"test-node-with-very-long-hostname-that-exceeds-dns-label-limits-of-63-characters-and-should-be-truncated\",\n\t\t\t},\n\t\t\tmachineKey: \"mkey12345678\",\n\t\t\tnodeKey:    \"nkey12345678\",\n\t\t\twant:       \"invalid-\",\n\t\t},\n\t\t{\n\t\t\tname: \"hostname_with_special_chars\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"node-with-special!@#$%\",\n\t\t\t},\n\t\t\tmachineKey: \"mkey12345678\",\n\t\t\tnodeKey:    \"nkey12345678\",\n\t\t\twant:       \"invalid-\",\n\t\t},\n\t\t{\n\t\t\tname: \"hostname_with_unicode\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"node-ñoño-测试\", //nolint:gosmopolitan\n\t\t\t},\n\t\t\tmachineKey: \"mkey12345678\",\n\t\t\tnodeKey:    \"nkey12345678\",\n\t\t\twant:       \"invalid-\",\n\t\t},\n\t\t{\n\t\t\tname: \"short_machine_key\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"\",\n\t\t\t},\n\t\t\tmachineKey: \"short\",\n\t\t\tnodeKey:    \"nkey12345678\",\n\t\t\twant:       \"node-short\",\n\t\t},\n\t\t{\n\t\t\tname: \"short_node_key\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"\",\n\t\t\t},\n\t\t\tmachineKey: \"\",\n\t\t\tnodeKey:    \"short\",\n\t\t\twant:       \"node-short\",\n\t\t},\n\t\t{\n\t\t\tname: \"hostname_with_emoji_replaced\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"hostname-with-💩\",\n\t\t\t},\n\t\t\tmachineKey: \"mkey12345678\",\n\t\t\tnodeKey:    \"nkey12345678\",\n\t\t\twant:       \"invalid-\",\n\t\t},\n\t\t{\n\t\t\tname: \"hostname_only_emoji_replaced\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"🚀\",\n\t\t\t},\n\t\t\tmachineKey: \"mkey12345678\",\n\t\t\tnodeKey:    \"nkey12345678\",\n\t\t\twant:       \"invalid-\",\n\t\t},\n\t\t{\n\t\t\tname: \"hostname_with_multiple_emojis_replaced\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"node-🎉-🚀-test\",\n\t\t\t},\n\t\t\tmachineKey: \"mkey12345678\",\n\t\t\tnodeKey:    \"nkey12345678\",\n\t\t\twant:       \"invalid-\",\n\t\t},\n\t\t{\n\t\t\tname: \"uppercase_to_lowercase\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"User2-Host\",\n\t\t\t},\n\t\t\tmachineKey: \"mkey12345678\",\n\t\t\tnodeKey:    \"nkey12345678\",\n\t\t\twant:       \"user2-host\",\n\t\t},\n\t\t{\n\t\t\tname: \"underscore_removed\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"test_node\",\n\t\t\t},\n\t\t\tmachineKey: \"mkey12345678\",\n\t\t\tnodeKey:    \"nkey12345678\",\n\t\t\twant:       \"invalid-\",\n\t\t},\n\t\t{\n\t\t\tname: \"at_sign_invalid\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"Test@Host\",\n\t\t\t},\n\t\t\tmachineKey: \"mkey12345678\",\n\t\t\tnodeKey:    \"nkey12345678\",\n\t\t\twant:       \"invalid-\",\n\t\t},\n\t\t{\n\t\t\tname: \"chinese_chars_with_dash_invalid\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"server-北京-01\", //nolint:gosmopolitan\n\t\t\t},\n\t\t\tmachineKey: \"mkey12345678\",\n\t\t\tnodeKey:    \"nkey12345678\",\n\t\t\twant:       \"invalid-\",\n\t\t},\n\t\t{\n\t\t\tname: \"chinese_only_invalid\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"我的电脑\", //nolint:gosmopolitan\n\t\t\t},\n\t\t\tmachineKey: \"mkey12345678\",\n\t\t\tnodeKey:    \"nkey12345678\",\n\t\t\twant:       \"invalid-\",\n\t\t},\n\t\t{\n\t\t\tname: \"emoji_with_text_invalid\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"laptop-🚀\",\n\t\t\t},\n\t\t\tmachineKey: \"mkey12345678\",\n\t\t\tnodeKey:    \"nkey12345678\",\n\t\t\twant:       \"invalid-\",\n\t\t},\n\t\t{\n\t\t\tname: \"mixed_chinese_emoji_invalid\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"测试💻机器\", //nolint:gosmopolitan // intentional i18n test data\n\t\t\t},\n\t\t\tmachineKey: \"mkey12345678\",\n\t\t\tnodeKey:    \"nkey12345678\",\n\t\t\twant:       \"invalid-\",\n\t\t},\n\t\t{\n\t\t\tname: \"only_emojis_invalid\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"🎉🎊\",\n\t\t\t},\n\t\t\tmachineKey: \"mkey12345678\",\n\t\t\tnodeKey:    \"nkey12345678\",\n\t\t\twant:       \"invalid-\",\n\t\t},\n\t\t{\n\t\t\tname: \"only_at_signs_invalid\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"@@@\",\n\t\t\t},\n\t\t\tmachineKey: \"mkey12345678\",\n\t\t\tnodeKey:    \"nkey12345678\",\n\t\t\twant:       \"invalid-\",\n\t\t},\n\t\t{\n\t\t\tname: \"starts_with_dash_invalid\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"-test\",\n\t\t\t},\n\t\t\tmachineKey: \"mkey12345678\",\n\t\t\tnodeKey:    \"nkey12345678\",\n\t\t\twant:       \"invalid-\",\n\t\t},\n\t\t{\n\t\t\tname: \"ends_with_dash_invalid\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"test-\",\n\t\t\t},\n\t\t\tmachineKey: \"mkey12345678\",\n\t\t\tnodeKey:    \"nkey12345678\",\n\t\t\twant:       \"invalid-\",\n\t\t},\n\t\t{\n\t\t\tname: \"very_long_hostname_truncated\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: strings.Repeat(\"t\", 70),\n\t\t\t},\n\t\t\tmachineKey: \"mkey12345678\",\n\t\t\tnodeKey:    \"nkey12345678\",\n\t\t\twant:       \"invalid-\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tgot := EnsureHostname(tt.hostinfo.View(), tt.machineKey, tt.nodeKey)\n\t\t\t// For invalid hostnames, we just check the prefix since the random part varies\n\t\t\tif strings.HasPrefix(tt.want, \"invalid-\") {\n\t\t\t\tif !strings.HasPrefix(got, \"invalid-\") {\n\t\t\t\t\tt.Errorf(\"EnsureHostname() = %v, want prefix %v\", got, tt.want)\n\t\t\t\t}\n\t\t\t} else if got != tt.want {\n\t\t\t\tt.Errorf(\"EnsureHostname() = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestEnsureHostnameWithHostinfo(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tname          string\n\t\thostinfo      *tailcfg.Hostinfo\n\t\tmachineKey    string\n\t\tnodeKey       string\n\t\twantHostname  string\n\t\tcheckHostinfo func(*testing.T, *tailcfg.Hostinfo)\n\t}{\n\t\t{\n\t\t\tname: \"valid_hostinfo_unchanged\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"test-node\",\n\t\t\t\tOS:       \"linux\",\n\t\t\t},\n\t\t\tmachineKey:   \"mkey12345678\",\n\t\t\tnodeKey:      \"nkey12345678\",\n\t\t\twantHostname: \"test-node\",\n\t\t\tcheckHostinfo: func(t *testing.T, hi *tailcfg.Hostinfo) { //nolint:thelper\n\t\t\t\tif hi == nil {\n\t\t\t\t\tt.Fatal(\"hostinfo should not be nil\")\n\t\t\t\t}\n\n\t\t\t\tif hi.Hostname != \"test-node\" {\n\t\t\t\t\tt.Errorf(\"hostname = %v, want test-node\", hi.Hostname)\n\t\t\t\t}\n\n\t\t\t\tif hi.OS != \"linux\" {\n\t\t\t\t\tt.Errorf(\"OS = %v, want linux\", hi.OS)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:         \"nil_hostinfo_creates_default\",\n\t\t\thostinfo:     nil,\n\t\t\tmachineKey:   \"mkey12345678\",\n\t\t\tnodeKey:      \"nkey12345678\",\n\t\t\twantHostname: \"node-mkey1234\",\n\t\t},\n\t\t{\n\t\t\tname: \"empty_hostname_updated\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"\",\n\t\t\t\tOS:       \"darwin\",\n\t\t\t},\n\t\t\tmachineKey:   \"mkey12345678\",\n\t\t\tnodeKey:      \"nkey12345678\",\n\t\t\twantHostname: \"node-mkey1234\",\n\t\t},\n\t\t{\n\t\t\tname: \"long_hostname_rejected\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"test-node-with-very-long-hostname-that-exceeds-dns-label-limits-of-63-characters\",\n\t\t\t},\n\t\t\tmachineKey:   \"mkey12345678\",\n\t\t\tnodeKey:      \"nkey12345678\",\n\t\t\twantHostname: \"invalid-\",\n\t\t},\n\t\t{\n\t\t\tname:         \"nil_hostinfo_node_key_only\",\n\t\t\thostinfo:     nil,\n\t\t\tmachineKey:   \"\",\n\t\t\tnodeKey:      \"nkey12345678\",\n\t\t\twantHostname: \"node-nkey1234\",\n\t\t\tcheckHostinfo: func(t *testing.T, hi *tailcfg.Hostinfo) { //nolint:thelper\n\t\t\t\tif hi == nil {\n\t\t\t\t\tt.Fatal(\"hostinfo should not be nil\")\n\t\t\t\t}\n\n\t\t\t\tif hi.Hostname != \"node-nkey1234\" {\n\t\t\t\t\tt.Errorf(\"hostname = %v, want node-nkey1234\", hi.Hostname)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:         \"nil_hostinfo_no_keys\",\n\t\t\thostinfo:     nil,\n\t\t\tmachineKey:   \"\",\n\t\t\tnodeKey:      \"\",\n\t\t\twantHostname: testUnknownNode,\n\t\t\tcheckHostinfo: func(t *testing.T, hi *tailcfg.Hostinfo) { //nolint:thelper\n\t\t\t\tif hi == nil {\n\t\t\t\t\tt.Fatal(\"hostinfo should not be nil\")\n\t\t\t\t}\n\n\t\t\t\tif hi.Hostname != testUnknownNode {\n\t\t\t\t\tt.Errorf(\"hostname = %v, want unknown-node\", hi.Hostname)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"empty_hostname_no_keys\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"\",\n\t\t\t},\n\t\t\tmachineKey:   \"\",\n\t\t\tnodeKey:      \"\",\n\t\t\twantHostname: testUnknownNode,\n\t\t\tcheckHostinfo: func(t *testing.T, hi *tailcfg.Hostinfo) { //nolint:thelper\n\t\t\t\tif hi == nil {\n\t\t\t\t\tt.Fatal(\"hostinfo should not be nil\")\n\t\t\t\t}\n\n\t\t\t\tif hi.Hostname != testUnknownNode {\n\t\t\t\t\tt.Errorf(\"hostname = %v, want unknown-node\", hi.Hostname)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"preserves_other_fields\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname:     \"test\",\n\t\t\t\tOS:           \"windows\",\n\t\t\t\tOSVersion:    \"10.0.19044\",\n\t\t\t\tDeviceModel:  \"test-device\",\n\t\t\t\tBackendLogID: \"log123\",\n\t\t\t},\n\t\t\tmachineKey:   \"mkey12345678\",\n\t\t\tnodeKey:      \"nkey12345678\",\n\t\t\twantHostname: \"test\",\n\t\t\tcheckHostinfo: func(t *testing.T, hi *tailcfg.Hostinfo) { //nolint:thelper\n\t\t\t\tif hi == nil {\n\t\t\t\t\tt.Fatal(\"hostinfo should not be nil\")\n\t\t\t\t}\n\n\t\t\t\tif hi.Hostname != \"test\" {\n\t\t\t\t\tt.Errorf(\"hostname = %v, want test\", hi.Hostname)\n\t\t\t\t}\n\n\t\t\t\tif hi.OS != \"windows\" {\n\t\t\t\t\tt.Errorf(\"OS = %v, want windows\", hi.OS)\n\t\t\t\t}\n\n\t\t\t\tif hi.OSVersion != \"10.0.19044\" {\n\t\t\t\t\tt.Errorf(\"OSVersion = %v, want 10.0.19044\", hi.OSVersion)\n\t\t\t\t}\n\n\t\t\t\tif hi.DeviceModel != \"test-device\" {\n\t\t\t\t\tt.Errorf(\"DeviceModel = %v, want test-device\", hi.DeviceModel)\n\t\t\t\t}\n\n\t\t\t\tif hi.BackendLogID != \"log123\" {\n\t\t\t\t\tt.Errorf(\"BackendLogID = %v, want log123\", hi.BackendLogID)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"exactly_63_chars_unchanged\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"123456789012345678901234567890123456789012345678901234567890123\",\n\t\t\t},\n\t\t\tmachineKey:   \"mkey12345678\",\n\t\t\tnodeKey:      \"nkey12345678\",\n\t\t\twantHostname: \"123456789012345678901234567890123456789012345678901234567890123\",\n\t\t\tcheckHostinfo: func(t *testing.T, hi *tailcfg.Hostinfo) { //nolint:thelper\n\t\t\t\tif hi == nil {\n\t\t\t\t\tt.Fatal(\"hostinfo should not be nil\")\n\t\t\t\t}\n\n\t\t\t\tif len(hi.Hostname) != 63 {\n\t\t\t\t\tt.Errorf(\"hostname length = %v, want 63\", len(hi.Hostname))\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tgotHostname := EnsureHostname(tt.hostinfo.View(), tt.machineKey, tt.nodeKey)\n\t\t\t// For invalid hostnames, we just check the prefix since the random part varies\n\t\t\tif strings.HasPrefix(tt.wantHostname, \"invalid-\") {\n\t\t\t\tif !strings.HasPrefix(gotHostname, \"invalid-\") {\n\t\t\t\t\tt.Errorf(\"EnsureHostname() = %v, want prefix %v\", gotHostname, tt.wantHostname)\n\t\t\t\t}\n\t\t\t} else if gotHostname != tt.wantHostname {\n\t\t\t\tt.Errorf(\"EnsureHostname() hostname = %v, want %v\", gotHostname, tt.wantHostname)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestEnsureHostname_DNSLabelLimit(t *testing.T) {\n\tt.Parallel()\n\n\ttestCases := []string{\n\t\t\"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\",\n\t\t\"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\",\n\t\t\"cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\",\n\t\t\"dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd\",\n\t}\n\n\tfor i, hostname := range testCases {\n\t\tt.Run(cmp.Diff(\"\", \"\"), func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\thostinfo := &tailcfg.Hostinfo{Hostname: hostname}\n\n\t\t\tresult := EnsureHostname(hostinfo.View(), \"mkey\", \"nkey\")\n\t\t\tif len(result) > 63 {\n\t\t\t\tt.Errorf(\"test case %d: hostname length = %d, want <= 63\", i, len(result))\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestEnsureHostname_Idempotent(t *testing.T) {\n\tt.Parallel()\n\n\toriginalHostinfo := &tailcfg.Hostinfo{\n\t\tHostname: \"test-node\",\n\t\tOS:       \"linux\",\n\t}\n\n\thostname1 := EnsureHostname(originalHostinfo.View(), \"mkey\", \"nkey\")\n\thostname2 := EnsureHostname(originalHostinfo.View(), \"mkey\", \"nkey\")\n\n\tif hostname1 != hostname2 {\n\t\tt.Errorf(\"hostnames not equal: %v != %v\", hostname1, hostname2)\n\t}\n}\n\nfunc TestGenerateRegistrationKey(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tname string\n\t\ttest func(*testing.T)\n\t}{\n\t\t{\n\t\t\tname: \"generates_key_with_correct_prefix\",\n\t\t\ttest: func(t *testing.T) {\n\t\t\t\tt.Helper()\n\n\t\t\t\tkey, err := GenerateRegistrationKey()\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"GenerateRegistrationKey() error = %v\", err)\n\t\t\t\t}\n\n\t\t\t\tif !strings.HasPrefix(key, \"hskey-reg-\") {\n\t\t\t\t\tt.Errorf(\"key does not have expected prefix: %s\", key)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"generates_key_with_correct_length\",\n\t\t\ttest: func(t *testing.T) {\n\t\t\t\tt.Helper()\n\n\t\t\t\tkey, err := GenerateRegistrationKey()\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"GenerateRegistrationKey() error = %v\", err)\n\t\t\t\t}\n\n\t\t\t\t// Expected format: hskey-reg-{64-char-random}\n\t\t\t\t// Total length: 10 (prefix) + 64 (random) = 74\n\t\t\t\tif len(key) != 74 {\n\t\t\t\t\tt.Errorf(\"key length = %d, want 74\", len(key))\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"generates_unique_keys\",\n\t\t\ttest: func(t *testing.T) {\n\t\t\t\tt.Helper()\n\n\t\t\t\tkey1, err := GenerateRegistrationKey()\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"GenerateRegistrationKey() error = %v\", err)\n\t\t\t\t}\n\n\t\t\t\tkey2, err := GenerateRegistrationKey()\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"GenerateRegistrationKey() error = %v\", err)\n\t\t\t\t}\n\n\t\t\t\tif key1 == key2 {\n\t\t\t\t\tt.Error(\"generated keys should be unique\")\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"key_contains_only_valid_chars\",\n\t\t\ttest: func(t *testing.T) {\n\t\t\t\tt.Helper()\n\n\t\t\t\tkey, err := GenerateRegistrationKey()\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"GenerateRegistrationKey() error = %v\", err)\n\t\t\t\t}\n\n\t\t\t\t// Remove prefix\n\t\t\t\t_, randomPart, found := strings.Cut(key, \"hskey-reg-\")\n\t\t\t\tif !found {\n\t\t\t\t\tt.Error(\"key does not contain expected prefix\")\n\t\t\t\t}\n\n\t\t\t\t// Verify base64 URL-safe characters (A-Za-z0-9_-)\n\t\t\t\tfor _, ch := range randomPart {\n\t\t\t\t\tif (ch < 'A' || ch > 'Z') &&\n\t\t\t\t\t\t(ch < 'a' || ch > 'z') &&\n\t\t\t\t\t\t(ch < '0' || ch > '9') &&\n\t\t\t\t\t\tch != '_' && ch != '-' {\n\t\t\t\t\t\tt.Errorf(\"key contains invalid character: %c\", ch)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\ttt.test(t)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "hscontrol/util/zlog/fields.go",
    "content": "// Package zlog provides zerolog utilities for safe and consistent logging.\n//\n// This package contains:\n//   - Safe wrapper types for external types (tailcfg.Hostinfo, tailcfg.MapRequest)\n//     that implement LogObjectMarshaler with security-conscious field redaction\n//\n// For field name constants, use the zf subpackage:\n//\n//\timport \"github.com/juanfont/headscale/hscontrol/util/zlog/zf\"\n//\n// # Usage Pattern: Sub-Loggers\n//\n// The recommended pattern is to create sub-loggers at function entry points:\n//\n//\tfunc (m *mapSession) serve() {\n//\t    log := log.With().\n//\t        EmbedObject(m.node).\n//\t        EmbedObject(zlog.MapRequest(&m.req)).\n//\t        Logger()\n//\n//\t    log.Info().Msg(\"Map session started\")\n//\t    log.Debug().Caller().Msg(\"Processing request\")\n//\t}\n//\n// # Security Considerations\n//\n// The wrapper types in this package intentionally redact sensitive information:\n//   - Device fingerprinting data (OS version, device model, etc.)\n//   - Client endpoints and IP addresses\n//   - Full authentication keys (only prefixes are logged)\npackage zlog\n"
  },
  {
    "path": "hscontrol/util/zlog/hostinfo.go",
    "content": "package zlog\n\nimport (\n\t\"github.com/juanfont/headscale/hscontrol/util/zlog/zf\"\n\t\"github.com/rs/zerolog\"\n\t\"tailscale.com/tailcfg\"\n)\n\n// SafeHostinfo wraps tailcfg.Hostinfo for safe logging.\n//\n// SECURITY: This wrapper intentionally redacts device fingerprinting data\n// that could be used to identify or track specific devices:\n//   - OSVersion, DeviceModel, DistroName, DistroVersion (device fingerprinting)\n//   - IPNVersion (client version fingerprinting)\n//   - Machine, FrontendLogID (device identifiers)\n//\n// Only safe fields are logged:\n//   - hostname: The device hostname\n//   - os: The OS family (e.g., \"linux\", \"windows\") without version\n//   - routable_ips_count: Number of advertised routes (not the actual routes)\n//   - request_tags: Tags requested by the client\n//   - derp: Preferred DERP region ID\ntype SafeHostinfo struct {\n\thi *tailcfg.Hostinfo\n}\n\n// Hostinfo creates a SafeHostinfo wrapper for safe logging.\nfunc Hostinfo(hi *tailcfg.Hostinfo) SafeHostinfo {\n\treturn SafeHostinfo{hi: hi}\n}\n\n// MarshalZerologObject implements zerolog.LogObjectMarshaler.\nfunc (s SafeHostinfo) MarshalZerologObject(e *zerolog.Event) {\n\tif s.hi == nil {\n\t\treturn\n\t}\n\n\t// Safe fields only - no device fingerprinting data.\n\te.Str(zf.Hostname, s.hi.Hostname)\n\te.Str(zf.OS, s.hi.OS) // OS family only, NOT version\n\n\tif len(s.hi.RoutableIPs) > 0 {\n\t\te.Int(zf.RoutableIPCount, len(s.hi.RoutableIPs))\n\t}\n\n\tif len(s.hi.RequestTags) > 0 {\n\t\te.Strs(zf.RequestTags, s.hi.RequestTags)\n\t}\n\n\tif s.hi.NetInfo != nil && s.hi.NetInfo.PreferredDERP != 0 {\n\t\te.Int(zf.DERP, s.hi.NetInfo.PreferredDERP)\n\t}\n\n\t// SECURITY: The following fields are intentionally NOT logged:\n\t// - OSVersion, DistroName, DistroVersion, DistroCodeName: device fingerprinting\n\t// - DeviceModel: device fingerprinting\n\t// - IPNVersion: client version fingerprinting\n\t// - Machine, FrontendLogID: device identifiers\n\t// - GoArch, GoArchVar, GoVersion: build environment fingerprinting\n\t// - Userspace, UserspaceRouter: network configuration details\n}\n"
  },
  {
    "path": "hscontrol/util/zlog/maprequest.go",
    "content": "package zlog\n\nimport (\n\t\"github.com/juanfont/headscale/hscontrol/util/zlog/zf\"\n\t\"github.com/rs/zerolog\"\n\t\"tailscale.com/tailcfg\"\n)\n\n// SafeMapRequest wraps tailcfg.MapRequest for safe logging.\n//\n// SECURITY: This wrapper does not log sensitive information:\n//   - Endpoints: Client IP addresses and ports\n//   - Hostinfo: Device fingerprinting data (handled by SafeHostinfo)\n//   - DERPForceWebsockets: Network configuration details\n//\n// Only safe fields are logged:\n//   - stream: Whether this is a streaming request\n//   - omit_peers: Whether peers should be omitted\n//   - version: Client capability version\n//   - node.key: Short form of the node key\n//   - endpoints_count: Number of endpoints (not the actual endpoints)\ntype SafeMapRequest struct {\n\treq *tailcfg.MapRequest\n}\n\n// MapRequest creates a SafeMapRequest wrapper for safe logging.\nfunc MapRequest(req *tailcfg.MapRequest) SafeMapRequest {\n\treturn SafeMapRequest{req: req}\n}\n\n// MarshalZerologObject implements zerolog.LogObjectMarshaler.\nfunc (s SafeMapRequest) MarshalZerologObject(e *zerolog.Event) {\n\tif s.req == nil {\n\t\treturn\n\t}\n\n\te.Bool(zf.Stream, s.req.Stream)\n\te.Bool(zf.OmitPeers, s.req.OmitPeers)\n\te.Int(zf.Version, int(s.req.Version))\n\te.Str(zf.NodeKey, s.req.NodeKey.ShortString())\n\n\t// Log counts only, NOT actual endpoints/IPs.\n\tif len(s.req.Endpoints) > 0 {\n\t\te.Int(zf.EndpointsCount, len(s.req.Endpoints))\n\t}\n\n\t// SECURITY: The following fields are intentionally NOT logged:\n\t// - Endpoints: Client IP addresses and ports\n\t// - Hostinfo: Device fingerprinting data (use SafeHostinfo separately if needed)\n\t// - DERPForceWebsockets: Network configuration details\n}\n"
  },
  {
    "path": "hscontrol/util/zlog/zf/fields.go",
    "content": "// Package zf provides zerolog field name constants for consistent logging.\n//\n// Using constants ensures typos are caught at compile time and enables\n// easy refactoring. Import as:\n//\n//\timport \"github.com/juanfont/headscale/hscontrol/util/zlog/zf\"\n//\n// Usage:\n//\n//\tlog.Info().Uint64(zf.NodeID, id).Str(zf.NodeName, name).Msg(\"...\")\npackage zf\n\n// Node fields.\nconst (\n\tNodeID             = \"node.id\"\n\tNodeName           = \"node.name\"\n\tNodeKey            = \"node.key\"\n\tNodeKeyExisting    = \"node.key.existing\"\n\tNodeKeyRequest     = \"node.key.request\"\n\tNodeTags           = \"node.tags\"\n\tNodeIsTagged       = \"node.is_tagged\"\n\tNodeOnline         = \"node.online\"\n\tNodeExpired        = \"node.expired\"\n\tNodeHostname       = \"node.hostname\"\n\tExistingNodeName   = \"existing.node.name\"\n\tExistingNodeID     = \"existing.node.id\"\n\tCurrentHostname    = \"current_hostname\"\n\tRejectedHostname   = \"rejected_hostname\"\n\tOldHostname        = \"old_hostname\"\n\tNewHostnameField   = \"new_hostname\"\n\tOldGivenName       = \"old_given_name\"\n\tNewGivenName       = \"new_given_name\"\n\tNewName            = \"new_name\"\n\tGeneratedHostname  = \"generated.hostname\"\n\tRegistrationKey    = \"registration_key\" //nolint:gosec // G101: not a credential\n\tRegistrationMethod = \"registrationMethod\"\n\tExpiresAt          = \"expiresAt\"\n)\n\n// Tag fields for reauth and tag operations.\nconst (\n\tCurrentTags      = \"current.tags\"\n\tRemovedTags      = \"removed.tags\"\n\tRejectedTags     = \"rejected.tags\"\n\tNewTags          = \"new.tags\"\n\tOldTags          = \"old.tags\"\n\tIsTagged         = \"is.tagged\"\n\tWasAuthKeyTagged = \"was.authkey.tagged\"\n)\n\n// Machine fields.\nconst (\n\tMachineKey = \"machine.key\"\n)\n\n// User fields.\nconst (\n\tUserID       = \"user.id\"\n\tUserName     = \"user.name\"\n\tUserDisplay  = \"user.display\"\n\tUserProvider = \"user.provider\"\n\tUserCount    = \"user.count\"\n\tOldUser      = \"old.user\"\n\tNewUser      = \"new.user\"\n)\n\n// PreAuthKey fields.\nconst (\n\tPAKID           = \"pak.id\"\n\tPAKPrefix       = \"pak.prefix\"\n\tPAKTags         = \"pak.tags\"\n\tPAKReusable     = \"pak.reusable\"\n\tPAKEphemeral    = \"pak.ephemeral\"\n\tPAKUsed         = \"pak.used\"\n\tPAKIsTagged     = \"pak.is_tagged\"\n\tPAKExpiration   = \"pak.expiration\"\n\tAuthKeyID       = \"authkey.id\"\n\tAuthKeyUsed     = \"authkey.used\"\n\tAuthKeyExpired  = \"authkey.expired\"\n\tAuthKeyReusable = \"authkey.reusable\"\n\tNodeKeyRotation = \"nodekey.rotation\"\n)\n\n// APIKey fields.\nconst (\n\tAPIKeyID         = \"api_key.id\"\n\tAPIKeyPrefix     = \"api_key.prefix\"     //nolint:gosec // G101: not a credential\n\tAPIKeyExpiration = \"api_key.expiration\" //nolint:gosec // G101: not a credential\n\tAPIKeyLastSeen   = \"api_key.last_seen\"  //nolint:gosec // G101: not a credential\n)\n\n// Route fields.\nconst (\n\tRoutesAnnounced    = \"routes.announced\"\n\tRoutesApproved     = \"routes.approved\"\n\tRoutesApprovedOld  = \"routes.approved.old\"\n\tRoutesApprovedNew  = \"routes.approved.new\"\n\tOldAnnouncedRoutes = \"oldAnnouncedRoutes\"\n\tNewAnnouncedRoutes = \"newAnnouncedRoutes\"\n\tApprovedRoutes     = \"approvedRoutes\"\n\tOldApprovedRoutes  = \"oldApprovedRoutes\"\n\tNewApprovedRoutes  = \"newApprovedRoutes\"\n\tAutoApprovedRoutes = \"autoApprovedRoutes\"\n\tAllApprovedRoutes  = \"allApprovedRoutes\"\n\tRouteChanged       = \"routeChanged\"\n\tPrefix             = \"prefix\"\n\tFinalState         = \"finalState\"\n\tNewState           = \"newState\"\n)\n\n// Request/Response fields.\nconst (\n\tOmitPeers      = \"omit_peers\"\n\tStream         = \"stream\"\n\tVersion        = \"version\"\n\tStatusCode     = \"status_code\"\n\tRegistrationID = \"registration_id\"\n)\n\n// Network fields.\nconst (\n\tEndpointsCount  = \"endpoints_count\"\n\tDERP            = \"derp\"\n\tHostname        = \"hostname\"\n\tOS              = \"os\"\n\tRoutableIPCount = \"routable_ips_count\"\n\tRequestTags     = \"request_tags\"\n\tInvalidHostname = \"invalid_hostname\"\n\tNewHostname     = \"new_hostname\"\n\tURL             = \"url\"\n\tPath            = \"path\"\n\tClientAddress   = \"client_address\"\n\tClientVersion   = \"client_version\"\n\tMinimumVersion  = \"minimum_version\"\n)\n\n// Policy fields.\nconst (\n\tPolicyChanged      = \"policy.changed\"\n\tFilterHashOld      = \"filter.hash.old\"\n\tFilterHashNew      = \"filter.hash.new\"\n\tTagOwnerHashOld    = \"tagOwner.hash.old\"\n\tTagOwnerHashNew    = \"tagOwner.hash.new\"\n\tAutoApproveHashOld = \"autoApprove.hash.old\"\n\tAutoApproveHashNew = \"autoApprove.hash.new\"\n\tExitSetHashOld     = \"exitSet.hash.old\"\n\tExitSetHashNew     = \"exitSet.hash.new\"\n)\n\n// Connection/Channel fields.\nconst (\n\tChan            = \"chan\"\n\tConnID          = \"conn.id\"\n\tConnectionIndex = \"connection_index\"\n\tAddress         = \"address\"\n)\n\n// gRPC fields.\nconst (\n\tClient  = \"client\"\n\tRequest = \"request\"\n\tUsers   = \"users\"\n)\n\n// Worker/Processing fields.\nconst (\n\tWorkerID     = \"worker.id\"\n\tReason       = \"reason\"\n\tOp           = \"op\"\n\tOK           = \"ok\"\n\tChanges      = \"changes\"\n\tWatching     = \"watching\"\n\tCleanedNodes = \"cleaned_nodes\"\n\tMethod       = \"method\"\n\tSignal       = \"signal\"\n\tFunc         = \"func\"\n)\n\n// Duration fields.\nconst (\n\tTotalDuration   = \"total.duration\"\n\tTimeoutDuration = \"timeout.duration\"\n)\n\n// Database fields.\nconst (\n\tTable       = \"table\"\n\tMigrationID = \"migration_id\"\n\tCommit      = \"commit\"\n\tRecords     = \"records\"\n\tCode        = \"code\"\n\tGot         = \"got\"\n\tDatabase    = \"database\"\n\tIndex       = \"index\"\n\tParent      = \"parent\"\n\tType        = \"type\"\n)\n\n// Component field for sub-loggers.\nconst (\n\tComponent = \"component\"\n)\n\n// Debug environment variable fields.\nconst (\n\tDebugDeadlock              = \"HEADSCALE_DEBUG_DEADLOCK\"\n\tDebugDERPUseIP             = \"HEADSCALE_DEBUG_DERP_USE_IP\"\n\tDebugDumpConfig            = \"HEADSCALE_DEBUG_DUMP_CONFIG\"\n\tDebugHighCardinalityMetric = \"HEADSCALE_DEBUG_HIGH_CARDINALITY_METRICS\"\n\tDebugProfilingEnabled      = \"HEADSCALE_DEBUG_PROFILING_ENABLED\"\n\tDebugTailSQLEnabled        = \"HEADSCALE_DEBUG_TAILSQL_ENABLED\"\n)\n"
  },
  {
    "path": "hscontrol/util/zlog/zlog_test.go",
    "content": "package zlog\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"net/netip\"\n\t\"testing\"\n\n\t\"github.com/juanfont/headscale/hscontrol/util/zlog/zf\"\n\t\"github.com/rs/zerolog\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/key\"\n)\n\nfunc TestSafeHostinfo_MarshalZerologObject(t *testing.T) {\n\ttests := []struct {\n\t\tname       string\n\t\thostinfo   *tailcfg.Hostinfo\n\t\twantFields map[string]any\n\t\twantAbsent []string // Fields that should NOT be present\n\t}{\n\t\t{\n\t\t\tname:       \"nil hostinfo\",\n\t\t\thostinfo:   nil,\n\t\t\twantFields: map[string]any{},\n\t\t},\n\t\t{\n\t\t\tname: \"basic hostinfo\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"myhost\",\n\t\t\t\tOS:       \"linux\",\n\t\t\t},\n\t\t\twantFields: map[string]any{\n\t\t\t\tzf.Hostname: \"myhost\",\n\t\t\t\tzf.OS:       \"linux\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"hostinfo with routes and tags\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname:    \"router\",\n\t\t\t\tOS:          \"linux\",\n\t\t\t\tRoutableIPs: []netip.Prefix{netip.MustParsePrefix(\"10.0.0.0/24\")},\n\t\t\t\tRequestTags: []string{\"tag:server\"},\n\t\t\t},\n\t\t\twantFields: map[string]any{\n\t\t\t\tzf.Hostname:        \"router\",\n\t\t\t\tzf.OS:              \"linux\",\n\t\t\t\tzf.RoutableIPCount: float64(1),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"hostinfo with netinfo\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname: \"myhost\",\n\t\t\t\tOS:       \"windows\",\n\t\t\t\tNetInfo: &tailcfg.NetInfo{\n\t\t\t\t\tPreferredDERP: 1,\n\t\t\t\t},\n\t\t\t},\n\t\t\twantFields: map[string]any{\n\t\t\t\tzf.Hostname: \"myhost\",\n\t\t\t\tzf.OS:       \"windows\",\n\t\t\t\tzf.DERP:     float64(1),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"sensitive fields are NOT logged\",\n\t\t\thostinfo: &tailcfg.Hostinfo{\n\t\t\t\tHostname:    \"myhost\",\n\t\t\t\tOS:          \"linux\",\n\t\t\t\tOSVersion:   \"5.15.0-generic\", // Should NOT be logged\n\t\t\t\tDeviceModel: \"ThinkPad X1\",    // Should NOT be logged\n\t\t\t\tIPNVersion:  \"1.50.0\",         // Should NOT be logged\n\t\t\t},\n\t\t\twantFields: map[string]any{\n\t\t\t\tzf.Hostname: \"myhost\",\n\t\t\t\tzf.OS:       \"linux\",\n\t\t\t},\n\t\t\twantAbsent: []string{\"os_version\", \"device_model\", \"ipn_version\", \"OSVersion\", \"DeviceModel\", \"IPNVersion\"},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tvar buf bytes.Buffer\n\n\t\t\tlog := zerolog.New(&buf)\n\n\t\t\tlog.Info().EmbedObject(Hostinfo(tt.hostinfo)).Msg(\"test\")\n\n\t\t\tvar result map[string]any\n\n\t\t\terr := json.Unmarshal(buf.Bytes(), &result)\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Check expected fields are present\n\t\t\tfor key, wantVal := range tt.wantFields {\n\t\t\t\tassert.Equal(t, wantVal, result[key], \"field %s\", key)\n\t\t\t}\n\n\t\t\t// Check sensitive fields are absent\n\t\t\tfor _, key := range tt.wantAbsent {\n\t\t\t\t_, exists := result[key]\n\t\t\t\tassert.False(t, exists, \"sensitive field %s should not be logged\", key)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestSafeMapRequest_MarshalZerologObject(t *testing.T) {\n\tnodeKey := key.NewNode().Public()\n\n\ttests := []struct {\n\t\tname       string\n\t\treq        *tailcfg.MapRequest\n\t\twantFields map[string]any\n\t\twantAbsent []string\n\t}{\n\t\t{\n\t\t\tname:       \"nil request\",\n\t\t\treq:        nil,\n\t\t\twantFields: map[string]any{},\n\t\t},\n\t\t{\n\t\t\tname: \"basic request\",\n\t\t\treq: &tailcfg.MapRequest{\n\t\t\t\tStream:    true,\n\t\t\t\tOmitPeers: false,\n\t\t\t\tVersion:   100,\n\t\t\t\tNodeKey:   nodeKey,\n\t\t\t},\n\t\t\twantFields: map[string]any{\n\t\t\t\tzf.Stream:    true,\n\t\t\t\tzf.OmitPeers: false,\n\t\t\t\tzf.Version:   float64(100),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"request with endpoints - only count logged\",\n\t\t\treq: &tailcfg.MapRequest{\n\t\t\t\tStream:    false,\n\t\t\t\tOmitPeers: true,\n\t\t\t\tVersion:   100,\n\t\t\t\tNodeKey:   nodeKey,\n\t\t\t\tEndpoints: []netip.AddrPort{\n\t\t\t\t\tnetip.MustParseAddrPort(\"192.168.1.100:41641\"),\n\t\t\t\t\tnetip.MustParseAddrPort(\"10.0.0.50:41641\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\twantFields: map[string]any{\n\t\t\t\tzf.Stream:         false,\n\t\t\t\tzf.OmitPeers:      true,\n\t\t\t\tzf.EndpointsCount: float64(2),\n\t\t\t},\n\t\t\twantAbsent: []string{\"endpoints\", \"Endpoints\"},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tvar buf bytes.Buffer\n\n\t\t\tlog := zerolog.New(&buf)\n\n\t\t\tlog.Info().EmbedObject(MapRequest(tt.req)).Msg(\"test\")\n\n\t\t\tvar result map[string]any\n\n\t\t\terr := json.Unmarshal(buf.Bytes(), &result)\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Check expected fields are present\n\t\t\tfor key, wantVal := range tt.wantFields {\n\t\t\t\tassert.Equal(t, wantVal, result[key], \"field %s\", key)\n\t\t\t}\n\n\t\t\t// Check node.key is a short string (not full key)\n\t\t\tif tt.req != nil {\n\t\t\t\tnodeKeyStr, ok := result[zf.NodeKey].(string)\n\t\t\t\tif ok {\n\t\t\t\t\t// Short keys are truncated, full keys are 64+ chars\n\t\t\t\t\tassert.Less(t, len(nodeKeyStr), 20, \"node key should be short form\")\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Check sensitive fields are absent\n\t\t\tfor _, key := range tt.wantAbsent {\n\t\t\t\t_, exists := result[key]\n\t\t\t\tassert.False(t, exists, \"sensitive field %s should not be logged\", key)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestFieldConstants(t *testing.T) {\n\t// Verify field constants follow the expected naming pattern\n\tfieldTests := []struct {\n\t\tconstant string\n\t\texpected string\n\t}{\n\t\t{zf.NodeID, \"node.id\"},\n\t\t{zf.NodeName, \"node.name\"},\n\t\t{zf.NodeKey, \"node.key\"},\n\t\t{zf.MachineKey, \"machine.key\"},\n\t\t{zf.NodeTags, \"node.tags\"},\n\t\t{zf.NodeIsTagged, \"node.is_tagged\"},\n\t\t{zf.NodeOnline, \"node.online\"},\n\t\t{zf.NodeExpired, \"node.expired\"},\n\t\t{zf.UserID, \"user.id\"},\n\t\t{zf.UserName, \"user.name\"},\n\t\t{zf.PAKID, \"pak.id\"},\n\t\t{zf.PAKPrefix, \"pak.prefix\"},\n\t\t{zf.APIKeyID, \"api_key.id\"},\n\t\t{zf.APIKeyPrefix, \"api_key.prefix\"},\n\t\t{zf.OmitPeers, \"omit_peers\"},\n\t\t{zf.Stream, \"stream\"},\n\t}\n\n\tfor _, tt := range fieldTests {\n\t\tt.Run(tt.expected, func(t *testing.T) {\n\t\t\tassert.Equal(t, tt.expected, tt.constant)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "integration/README.md",
    "content": "# Integration testing\n\nHeadscale relies on integration testing to ensure we remain compatible with Tailscale.\n\nThis is typically performed by starting a Headscale server and running a test \"scenario\"\nwith an array of Tailscale clients and versions.\n\nHeadscale's test framework and the current set of scenarios are defined in this directory.\n\nTests are located in files ending with `_test.go` and the framework are located in the rest.\n\n## Running integration tests locally\n\nThe easiest way to run tests locally is to use [act](https://github.com/nektos/act), a local GitHub Actions runner:\n\n```\nact pull_request -W .github/workflows/test-integration.yaml\n```\n\nAlternatively, the `docker run` command in each GitHub workflow file can be used.\n\n## Running integration tests on GitHub Actions\n\nEach test currently runs as a separate workflows in GitHub actions, to add new test, run\n`go generate` inside `../cmd/gh-action-integration-generator/` and commit the result.\n"
  },
  {
    "path": "integration/acl_test.go",
    "content": "package integration\n\nimport (\n\t\"fmt\"\n\t\"net/netip\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/google/go-cmp/cmp/cmpopts\"\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\tpolicyv2 \"github.com/juanfont/headscale/hscontrol/policy/v2\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/integration/hsic\"\n\t\"github.com/juanfont/headscale/integration/integrationutil\"\n\t\"github.com/juanfont/headscale/integration/tsic\"\n\t\"github.com/ory/dockertest/v3\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/tailcfg\"\n)\n\nvar veryLargeDestination = []policyv2.AliasWithPorts{\n\taliasWithPorts(prefixp(\"0.0.0.0/5\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"8.0.0.0/7\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"11.0.0.0/8\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"12.0.0.0/6\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"16.0.0.0/4\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"32.0.0.0/3\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"64.0.0.0/2\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"128.0.0.0/3\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"160.0.0.0/5\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"168.0.0.0/6\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"172.0.0.0/12\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"172.32.0.0/11\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"172.64.0.0/10\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"172.128.0.0/9\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"173.0.0.0/8\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"174.0.0.0/7\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"176.0.0.0/4\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"192.0.0.0/9\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"192.128.0.0/11\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"192.160.0.0/13\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"192.169.0.0/16\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"192.170.0.0/15\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"192.172.0.0/14\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"192.176.0.0/12\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"192.192.0.0/10\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"193.0.0.0/8\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"194.0.0.0/7\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"196.0.0.0/6\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"200.0.0.0/5\"), tailcfg.PortRangeAny),\n\taliasWithPorts(prefixp(\"208.0.0.0/4\"), tailcfg.PortRangeAny),\n}\n\nfunc aclScenario(\n\tt *testing.T,\n\tpolicy *policyv2.Policy,\n\ttestName string,\n\tclientsPerUser int,\n) *Scenario {\n\tt.Helper()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: clientsPerUser,\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\trequire.NoError(t, err)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{\n\t\t\t// Alpine containers dont have ip6tables set up, which causes\n\t\t\t// tailscaled to stop configuring the wgengine, causing it\n\t\t\t// to not configure DNS.\n\t\t\ttsic.WithNetfilter(\"off\"),\n\t\t\ttsic.WithPackages(\"curl\"),\n\t\t\ttsic.WithWebserver(80),\n\t\t\ttsic.WithDockerWorkdir(\"/\"),\n\t\t},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(testName),\n\t)\n\trequire.NoError(t, err)\n\n\t_, err = scenario.ListTailscaleClientsFQDNs()\n\trequire.NoError(t, err)\n\n\treturn scenario\n}\n\n// This tests a different ACL mechanism, if a host _cannot_ connect\n// to another node at all based on ACL, it should just not be part\n// of the NetMap sent to the host. This is slightly different than\n// the other tests as we can just check if the hosts are present\n// or not.\nfunc TestACLHostsInNetMapTable(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 2,\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t}\n\n\t// NOTE: All want cases currently checks the\n\t// total count of expected peers, this would\n\t// typically be the client count of the users\n\t// they can access minus one (them self).\n\ttests := map[string]struct {\n\t\tusers  ScenarioSpec\n\t\tpolicy policyv2.Policy\n\t\twant   map[string]int\n\t}{\n\t\t// Test that when we have no ACL, each client netmap has\n\t\t// the amount of peers of the total amount of clients\n\t\t\"base-acls\": {\n\t\t\tusers: spec,\n\t\t\tpolicy: policyv2.Policy{\n\t\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{wildcard()},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(wildcard(), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}, want: map[string]int{\n\t\t\t\t\"user1@test.no\": 3, // ns1 + ns2\n\t\t\t\t\"user2@test.no\": 3, // ns2 + ns1\n\t\t\t},\n\t\t},\n\t\t// Test that when we have two users, which cannot see\n\t\t// each other, each node has only the number of pairs from\n\t\t// their own user.\n\t\t\"two-isolated-users\": {\n\t\t\tusers: spec,\n\t\t\tpolicy: policyv2.Policy{\n\t\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{usernamep(\"user1@\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(usernamep(\"user1@\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{usernamep(\"user2@\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(usernamep(\"user2@\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}, want: map[string]int{\n\t\t\t\t\"user1@test.no\": 1,\n\t\t\t\t\"user2@test.no\": 1,\n\t\t\t},\n\t\t},\n\t\t// Test that when we have two users, with ACLs and they\n\t\t// are restricted to a single port, nodes are still present\n\t\t// in the netmap.\n\t\t\"two-restricted-present-in-netmap\": {\n\t\t\tusers: spec,\n\t\t\tpolicy: policyv2.Policy{\n\t\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{usernamep(\"user1@\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(usernamep(\"user1@\"), tailcfg.PortRange{First: 22, Last: 22}),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{usernamep(\"user2@\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(usernamep(\"user2@\"), tailcfg.PortRange{First: 22, Last: 22}),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{usernamep(\"user1@\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(usernamep(\"user2@\"), tailcfg.PortRange{First: 22, Last: 22}),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{usernamep(\"user2@\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(usernamep(\"user1@\"), tailcfg.PortRange{First: 22, Last: 22}),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}, want: map[string]int{\n\t\t\t\t\"user1@test.no\": 3,\n\t\t\t\t\"user2@test.no\": 3,\n\t\t\t},\n\t\t},\n\t\t// Test that when we have two users, that are isolated,\n\t\t// but one can see the others, we have the appropriate number\n\t\t// of peers. This will still result in all the peers as we\n\t\t// need them present on the other side for the \"return path\".\n\t\t\"two-ns-one-isolated\": {\n\t\t\tusers: spec,\n\t\t\tpolicy: policyv2.Policy{\n\t\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{usernamep(\"user1@\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(usernamep(\"user1@\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{usernamep(\"user2@\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(usernamep(\"user2@\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{usernamep(\"user1@\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(usernamep(\"user2@\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}, want: map[string]int{\n\t\t\t\t\"user1@test.no\": 3, // ns1 + ns2\n\t\t\t\t\"user2@test.no\": 3, // ns1 + ns2 (return path)\n\t\t\t},\n\t\t},\n\t\t\"very-large-destination-prefix-1372\": {\n\t\t\tusers: spec,\n\t\t\tpolicy: policyv2.Policy{\n\t\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{usernamep(\"user1@\")},\n\t\t\t\t\t\tDestinations: append(\n\t\t\t\t\t\t\t[]policyv2.AliasWithPorts{\n\t\t\t\t\t\t\t\taliasWithPorts(usernamep(\"user1@\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tveryLargeDestination...,\n\t\t\t\t\t\t),\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{usernamep(\"user2@\")},\n\t\t\t\t\t\tDestinations: append(\n\t\t\t\t\t\t\t[]policyv2.AliasWithPorts{\n\t\t\t\t\t\t\t\taliasWithPorts(usernamep(\"user2@\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tveryLargeDestination...,\n\t\t\t\t\t\t),\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{usernamep(\"user1@\")},\n\t\t\t\t\t\tDestinations: append(\n\t\t\t\t\t\t\t[]policyv2.AliasWithPorts{\n\t\t\t\t\t\t\t\taliasWithPorts(usernamep(\"user2@\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tveryLargeDestination...,\n\t\t\t\t\t\t),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}, want: map[string]int{\n\t\t\t\t\"user1@test.no\": 3, // ns1 + ns2\n\t\t\t\t\"user2@test.no\": 3, // ns1 + ns2 (return path)\n\t\t\t},\n\t\t},\n\t\t\"ipv6-acls-1470\": {\n\t\t\tusers: spec,\n\t\t\tpolicy: policyv2.Policy{\n\t\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{wildcard()},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(prefixp(\"0.0.0.0/0\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t\taliasWithPorts(prefixp(\"::/0\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}, want: map[string]int{\n\t\t\t\t\"user1@test.no\": 3, // ns1 + ns2\n\t\t\t\t\"user2@test.no\": 3, // ns2 + ns1\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, testCase := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tcaseSpec := testCase.users\n\t\t\tscenario, err := NewScenario(caseSpec)\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = scenario.CreateHeadscaleEnv(\n\t\t\t\t[]tsic.Option{},\n\t\t\t\thsic.WithTestName(\"aclnetmap\"),\n\t\t\t\thsic.WithACLPolicy(&testCase.policy),\n\t\t\t)\n\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\t\t\tallClients, err := scenario.ListTailscaleClients()\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = scenario.WaitForTailscaleSyncWithPeerCount(testCase.want[\"user1@test.no\"], integrationutil.PeerSyncTimeout(), integrationutil.PeerSyncRetryInterval())\n\t\t\trequire.NoError(t, err)\n\n\t\t\tfor _, client := range allClients {\n\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\tstatus, err := client.Status()\n\t\t\t\t\tassert.NoError(c, err)\n\n\t\t\t\t\tuser := status.User[status.Self.UserID].LoginName\n\n\t\t\t\t\tassert.Len(c, status.Peer, (testCase.want[user]))\n\t\t\t\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for expected peer visibility\")\n\t\t\t}\n\t\t})\n\t}\n}\n\n// Test to confirm that we can use user:80 from one user\n// This should make the node appear in the peer list, but\n// disallow ping.\n// This ACL will not allow user1 access its own machines.\n// Reported: https://github.com/juanfont/headscale/issues/699\nfunc TestACLAllowUser80Dst(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tscenario := aclScenario(t,\n\t\t&policyv2.Policy{\n\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t{\n\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\tSources: []policyv2.Alias{usernamep(\"user1@\")},\n\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\taliasWithPorts(usernamep(\"user2@\"), tailcfg.PortRange{First: 80, Last: 80}),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"acl-allowuser80\",\n\t\t1,\n\t)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\tuser1Clients, err := scenario.ListTailscaleClients(\"user1\")\n\trequire.NoError(t, err)\n\n\tuser2Clients, err := scenario.ListTailscaleClients(\"user2\")\n\trequire.NoError(t, err)\n\n\t// Test that user1 can visit all user2\n\tfor _, client := range user1Clients {\n\t\tfor _, peer := range user2Clients {\n\t\t\tfqdn, err := peer.FQDN()\n\t\t\trequire.NoError(t, err)\n\n\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\t\t\tt.Logf(\"url from %s to %s\", client.Hostname(), url)\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := client.Curl(url)\n\t\t\t\tassert.NoError(c, err)\n\t\t\t\tassert.Len(c, result, 13)\n\t\t\t}, 20*time.Second, 500*time.Millisecond, \"Verifying user1 can reach user2\")\n\t\t}\n\t}\n\n\t// Test that user2 _cannot_ visit user1\n\tfor _, client := range user2Clients {\n\t\tfor _, peer := range user1Clients {\n\t\t\tfqdn, err := peer.FQDN()\n\t\t\trequire.NoError(t, err)\n\n\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\t\t\tt.Logf(\"url from %s to %s\", client.Hostname(), url)\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := client.Curl(url)\n\t\t\t\tassert.Error(c, err)\n\t\t\t\tassert.Empty(c, result)\n\t\t\t}, 20*time.Second, 500*time.Millisecond, \"Verifying user2 cannot reach user1\")\n\t\t}\n\t}\n}\n\nfunc TestACLDenyAllPort80(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tscenario := aclScenario(t,\n\t\t&policyv2.Policy{\n\t\t\tGroups: policyv2.Groups{\n\t\t\t\tpolicyv2.Group(\"group:integration-acl-test\"): []policyv2.Username{policyv2.Username(\"user1@\"), policyv2.Username(\"user2@\")},\n\t\t\t},\n\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t{\n\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\tSources: []policyv2.Alias{groupp(\"group:integration-acl-test\")},\n\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\taliasWithPorts(wildcard(), tailcfg.PortRange{First: 22, Last: 22}),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"acl-denyport80\",\n\t\t4,\n\t)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequire.NoError(t, err)\n\n\tallHostnames, err := scenario.ListTailscaleClientsFQDNs()\n\trequire.NoError(t, err)\n\n\tfor _, client := range allClients {\n\t\tfor _, hostname := range allHostnames {\n\t\t\t// We will always be allowed to check _self_ so shortcircuit\n\t\t\t// the test here.\n\t\t\tif strings.Contains(hostname, client.Hostname()) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", hostname)\n\t\t\tt.Logf(\"url from %s to %s\", client.Hostname(), url)\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := client.Curl(url)\n\t\t\t\tassert.Error(c, err)\n\t\t\t\tassert.Empty(c, result)\n\t\t\t}, 20*time.Second, 500*time.Millisecond, \"Verifying all traffic is denied\")\n\t\t}\n\t}\n}\n\n// Test to confirm that we can use user:* from one user.\n// This ACL will not allow user1 access its own machines.\n// Reported: https://github.com/juanfont/headscale/issues/699\nfunc TestACLAllowUserDst(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tscenario := aclScenario(t,\n\t\t&policyv2.Policy{\n\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t{\n\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\tSources: []policyv2.Alias{usernamep(\"user1@\")},\n\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\taliasWithPorts(usernamep(\"user2@\"), tailcfg.PortRangeAny),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"acl-allowuserdst\",\n\t\t2,\n\t)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\tuser1Clients, err := scenario.ListTailscaleClients(\"user1\")\n\trequire.NoError(t, err)\n\n\tuser2Clients, err := scenario.ListTailscaleClients(\"user2\")\n\trequire.NoError(t, err)\n\n\t// Test that user1 can visit all user2\n\tfor _, client := range user1Clients {\n\t\tfor _, peer := range user2Clients {\n\t\t\tfqdn, err := peer.FQDN()\n\t\t\trequire.NoError(t, err)\n\n\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\t\t\tt.Logf(\"url from %s to %s\", client.Hostname(), url)\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := client.Curl(url)\n\t\t\t\tassert.NoError(c, err)\n\t\t\t\tassert.Len(c, result, 13)\n\t\t\t}, 20*time.Second, 500*time.Millisecond, \"Verifying user1 can reach user2\")\n\t\t}\n\t}\n\n\t// Test that user2 _cannot_ visit user1\n\tfor _, client := range user2Clients {\n\t\tfor _, peer := range user1Clients {\n\t\t\tfqdn, err := peer.FQDN()\n\t\t\trequire.NoError(t, err)\n\n\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\t\t\tt.Logf(\"url from %s to %s\", client.Hostname(), url)\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := client.Curl(url)\n\t\t\t\tassert.Error(c, err)\n\t\t\t\tassert.Empty(c, result)\n\t\t\t}, 20*time.Second, 500*time.Millisecond, \"Verifying user2 cannot reach user1\")\n\t\t}\n\t}\n}\n\n// Test to confirm that we can use *:* from one user\n// Reported: https://github.com/juanfont/headscale/issues/699\nfunc TestACLAllowStarDst(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tscenario := aclScenario(t,\n\t\t&policyv2.Policy{\n\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t{\n\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\tSources: []policyv2.Alias{usernamep(\"user1@\")},\n\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\taliasWithPorts(wildcard(), tailcfg.PortRangeAny),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"acl-allowstar\",\n\t\t2,\n\t)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\tuser1Clients, err := scenario.ListTailscaleClients(\"user1\")\n\trequire.NoError(t, err)\n\n\tuser2Clients, err := scenario.ListTailscaleClients(\"user2\")\n\trequire.NoError(t, err)\n\n\t// Test that user1 can visit all user2\n\tfor _, client := range user1Clients {\n\t\tfor _, peer := range user2Clients {\n\t\t\tfqdn, err := peer.FQDN()\n\t\t\trequire.NoError(t, err)\n\n\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\t\t\tt.Logf(\"url from %s to %s\", client.Hostname(), url)\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := client.Curl(url)\n\t\t\t\tassert.NoError(c, err)\n\t\t\t\tassert.Len(c, result, 13)\n\t\t\t}, 20*time.Second, 500*time.Millisecond, \"Verifying user1 can reach user2\")\n\t\t}\n\t}\n\n\t// Test that user2 _cannot_ visit user1\n\tfor _, client := range user2Clients {\n\t\tfor _, peer := range user1Clients {\n\t\t\tfqdn, err := peer.FQDN()\n\t\t\trequire.NoError(t, err)\n\n\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\t\t\tt.Logf(\"url from %s to %s\", client.Hostname(), url)\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := client.Curl(url)\n\t\t\t\tassert.Error(c, err)\n\t\t\t\tassert.Empty(c, result)\n\t\t\t}, 20*time.Second, 500*time.Millisecond, \"Verifying user2 cannot reach user1\")\n\t\t}\n\t}\n}\n\n// TestACLNamedHostsCanReachBySubnet is the same as\n// TestACLNamedHostsCanReach, but it tests if we expand a\n// full CIDR correctly. All routes should work.\nfunc TestACLNamedHostsCanReachBySubnet(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tscenario := aclScenario(t,\n\t\t&policyv2.Policy{\n\t\t\tHosts: policyv2.Hosts{\n\t\t\t\t\"all\": policyv2.Prefix(netip.MustParsePrefix(\"100.64.0.0/24\")),\n\t\t\t},\n\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t// Everyone can curl test3\n\t\t\t\t{\n\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\tSources: []policyv2.Alias{wildcard()},\n\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\taliasWithPorts(hostp(\"all\"), tailcfg.PortRangeAny),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"acl-namedsubnet\",\n\t\t3,\n\t)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\tuser1Clients, err := scenario.ListTailscaleClients(\"user1\")\n\trequire.NoError(t, err)\n\n\tuser2Clients, err := scenario.ListTailscaleClients(\"user2\")\n\trequire.NoError(t, err)\n\n\t// Test that user1 can visit all user2\n\tfor _, client := range user1Clients {\n\t\tfor _, peer := range user2Clients {\n\t\t\tfqdn, err := peer.FQDN()\n\t\t\trequire.NoError(t, err)\n\n\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\t\t\tt.Logf(\"url from %s to %s\", client.Hostname(), url)\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := client.Curl(url)\n\t\t\t\tassert.NoError(c, err)\n\t\t\t\tassert.Len(c, result, 13)\n\t\t\t}, 20*time.Second, 500*time.Millisecond, \"Verifying user1 can reach user2\")\n\t\t}\n\t}\n\n\t// Test that user2 can visit all user1\n\t// Test that user2 can visit all user1, note that this\n\t// is _not_ symmetric.\n\tfor _, client := range user2Clients {\n\t\tfor _, peer := range user1Clients {\n\t\t\tfqdn, err := peer.FQDN()\n\t\t\trequire.NoError(t, err)\n\n\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\t\t\tt.Logf(\"url from %s to %s\", client.Hostname(), url)\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := client.Curl(url)\n\t\t\t\tassert.NoError(c, err)\n\t\t\t\tassert.Len(c, result, 13)\n\t\t\t}, 20*time.Second, 500*time.Millisecond, \"Verifying user2 can reach user1\")\n\t\t}\n\t}\n}\n\n// This test aims to cover cases where individual hosts are allowed and denied\n// access based on their assigned hostname\n// https://github.com/juanfont/headscale/issues/941\n//\n//\tACL = [{\n//\t\t\t\"DstPorts\": [{\n//\t\t\t\t\"Bits\": null,\n//\t\t\t\t\"IP\": \"100.64.0.3/32\",\n//\t\t\t\t\"Ports\": {\n//\t\t\t\t\t\"First\": 0,\n//\t\t\t\t\t\"Last\": 65535\n//\t\t\t\t}\n//\t\t\t}],\n//\t\t\t\"SrcIPs\": [\"*\"]\n//\t\t}, {\n//\n//\t\t\t\"DstPorts\": [{\n//\t\t\t\t\"Bits\": null,\n//\t\t\t\t\"IP\": \"100.64.0.2/32\",\n//\t\t\t\t\"Ports\": {\n//\t\t\t\t\t\"First\": 0,\n//\t\t\t\t\t\"Last\": 65535\n//\t\t\t\t}\n//\t\t\t}],\n//\t\t\t\"SrcIPs\": [\"100.64.0.1/32\"]\n//\t\t}]\n//\n//\tACL Cache Map= {\n//\t\t\"*\": {\n//\t\t\t\"100.64.0.3/32\": {}\n//\t\t},\n//\t\t\"100.64.0.1/32\": {\n//\t\t\t\"100.64.0.2/32\": {}\n//\t\t}\n//\t}\n//\n// https://github.com/juanfont/headscale/issues/941\n// Additionally verify ipv6 behaviour, part of\n// https://github.com/juanfont/headscale/issues/809\nfunc TestACLNamedHostsCanReach(t *testing.T) {\n\tIntegrationSkip(t)\n\n\ttests := map[string]struct {\n\t\tpolicy policyv2.Policy\n\t}{\n\t\t\"ipv4\": {\n\t\t\tpolicy: policyv2.Policy{\n\t\t\t\tHosts: policyv2.Hosts{\n\t\t\t\t\t\"test1\": policyv2.Prefix(netip.MustParsePrefix(\"100.64.0.1/32\")),\n\t\t\t\t\t\"test2\": policyv2.Prefix(netip.MustParsePrefix(\"100.64.0.2/32\")),\n\t\t\t\t\t\"test3\": policyv2.Prefix(netip.MustParsePrefix(\"100.64.0.3/32\")),\n\t\t\t\t},\n\t\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t\t// Everyone can curl test3\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{wildcard()},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(hostp(\"test3\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t// test1 can curl test2\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{hostp(\"test1\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(hostp(\"test2\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"ipv6\": {\n\t\t\tpolicy: policyv2.Policy{\n\t\t\t\tHosts: policyv2.Hosts{\n\t\t\t\t\t\"test1\": policyv2.Prefix(netip.MustParsePrefix(\"fd7a:115c:a1e0::1/128\")),\n\t\t\t\t\t\"test2\": policyv2.Prefix(netip.MustParsePrefix(\"fd7a:115c:a1e0::2/128\")),\n\t\t\t\t\t\"test3\": policyv2.Prefix(netip.MustParsePrefix(\"fd7a:115c:a1e0::3/128\")),\n\t\t\t\t},\n\t\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t\t// Everyone can curl test3\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{wildcard()},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(hostp(\"test3\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t// test1 can curl test2\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{hostp(\"test1\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(hostp(\"test2\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, testCase := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tscenario := aclScenario(t,\n\t\t\t\t&testCase.policy,\n\t\t\t\t\"acl-namedreach\",\n\t\t\t\t2,\n\t\t\t)\n\t\t\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\t\t\t// Since user/users dont matter here, we basically expect that some clients\n\t\t\t// will be assigned these ips and that we can pick them up for our own use.\n\t\t\ttest1ip4 := netip.MustParseAddr(\"100.64.0.1\")\n\t\t\ttest1ip6 := netip.MustParseAddr(\"fd7a:115c:a1e0::1\")\n\t\t\ttest1, err := scenario.FindTailscaleClientByIP(test1ip6)\n\t\t\trequire.NoError(t, err)\n\n\t\t\ttest1fqdn, err := test1.FQDN()\n\t\t\trequire.NoError(t, err)\n\n\t\t\ttest1ip4URL := fmt.Sprintf(\"http://%s/etc/hostname\", test1ip4.String())\n\t\t\ttest1ip6URL := fmt.Sprintf(\"http://[%s]/etc/hostname\", test1ip6.String())\n\t\t\ttest1fqdnURL := fmt.Sprintf(\"http://%s/etc/hostname\", test1fqdn)\n\n\t\t\ttest2ip4 := netip.MustParseAddr(\"100.64.0.2\")\n\t\t\ttest2ip6 := netip.MustParseAddr(\"fd7a:115c:a1e0::2\")\n\t\t\ttest2, err := scenario.FindTailscaleClientByIP(test2ip6)\n\t\t\trequire.NoError(t, err)\n\n\t\t\ttest2fqdn, err := test2.FQDN()\n\t\t\trequire.NoError(t, err)\n\n\t\t\ttest2ip4URL := fmt.Sprintf(\"http://%s/etc/hostname\", test2ip4.String())\n\t\t\ttest2ip6URL := fmt.Sprintf(\"http://[%s]/etc/hostname\", test2ip6.String())\n\t\t\ttest2fqdnURL := fmt.Sprintf(\"http://%s/etc/hostname\", test2fqdn)\n\n\t\t\ttest3ip4 := netip.MustParseAddr(\"100.64.0.3\")\n\t\t\ttest3ip6 := netip.MustParseAddr(\"fd7a:115c:a1e0::3\")\n\t\t\ttest3, err := scenario.FindTailscaleClientByIP(test3ip6)\n\t\t\trequire.NoError(t, err)\n\n\t\t\ttest3fqdn, err := test3.FQDN()\n\t\t\trequire.NoError(t, err)\n\n\t\t\ttest3ip4URL := fmt.Sprintf(\"http://%s/etc/hostname\", test3ip4.String())\n\t\t\ttest3ip6URL := fmt.Sprintf(\"http://[%s]/etc/hostname\", test3ip6.String())\n\t\t\ttest3fqdnURL := fmt.Sprintf(\"http://%s/etc/hostname\", test3fqdn)\n\n\t\t\t// test1 can query test3\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := test1.Curl(test3ip4URL)\n\t\t\t\tassert.NoError(c, err)\n\t\t\t\tassert.Lenf(\n\t\t\t\t\tc,\n\t\t\t\t\tresult,\n\t\t\t\t\t13,\n\t\t\t\t\t\"failed to connect from test1 to test3 with URL %s, expected hostname of 13 chars, got %s\",\n\t\t\t\t\ttest3ip4URL,\n\t\t\t\t\tresult,\n\t\t\t\t)\n\t\t\t}, 10*time.Second, 200*time.Millisecond, \"test1 should reach test3 via IPv4\")\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := test1.Curl(test3ip6URL)\n\t\t\t\tassert.NoError(c, err)\n\t\t\t\tassert.Lenf(\n\t\t\t\t\tc,\n\t\t\t\t\tresult,\n\t\t\t\t\t13,\n\t\t\t\t\t\"failed to connect from test1 to test3 with URL %s, expected hostname of 13 chars, got %s\",\n\t\t\t\t\ttest3ip6URL,\n\t\t\t\t\tresult,\n\t\t\t\t)\n\t\t\t}, 10*time.Second, 200*time.Millisecond, \"test1 should reach test3 via IPv6\")\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := test1.Curl(test3fqdnURL)\n\t\t\t\tassert.NoError(c, err)\n\t\t\t\tassert.Lenf(\n\t\t\t\t\tc,\n\t\t\t\t\tresult,\n\t\t\t\t\t13,\n\t\t\t\t\t\"failed to connect from test1 to test3 with URL %s, expected hostname of 13 chars, got %s\",\n\t\t\t\t\ttest3fqdnURL,\n\t\t\t\t\tresult,\n\t\t\t\t)\n\t\t\t}, 10*time.Second, 200*time.Millisecond, \"test1 should reach test3 via FQDN\")\n\n\t\t\t// test2 can query test3\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := test2.Curl(test3ip4URL)\n\t\t\t\tassert.NoError(c, err)\n\t\t\t\tassert.Lenf(\n\t\t\t\t\tc,\n\t\t\t\t\tresult,\n\t\t\t\t\t13,\n\t\t\t\t\t\"failed to connect from test1 to test3 with URL %s, expected hostname of 13 chars, got %s\",\n\t\t\t\t\ttest3ip4URL,\n\t\t\t\t\tresult,\n\t\t\t\t)\n\t\t\t}, 10*time.Second, 200*time.Millisecond, \"test2 should reach test3 via IPv4\")\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := test2.Curl(test3ip6URL)\n\t\t\t\tassert.NoError(c, err)\n\t\t\t\tassert.Lenf(\n\t\t\t\t\tc,\n\t\t\t\t\tresult,\n\t\t\t\t\t13,\n\t\t\t\t\t\"failed to connect from test1 to test3 with URL %s, expected hostname of 13 chars, got %s\",\n\t\t\t\t\ttest3ip6URL,\n\t\t\t\t\tresult,\n\t\t\t\t)\n\t\t\t}, 10*time.Second, 200*time.Millisecond, \"test2 should reach test3 via IPv6\")\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := test2.Curl(test3fqdnURL)\n\t\t\t\tassert.NoError(c, err)\n\t\t\t\tassert.Lenf(\n\t\t\t\t\tc,\n\t\t\t\t\tresult,\n\t\t\t\t\t13,\n\t\t\t\t\t\"failed to connect from test1 to test3 with URL %s, expected hostname of 13 chars, got %s\",\n\t\t\t\t\ttest3fqdnURL,\n\t\t\t\t\tresult,\n\t\t\t\t)\n\t\t\t}, 10*time.Second, 200*time.Millisecond, \"test2 should reach test3 via FQDN\")\n\n\t\t\t// test3 cannot query test1\n\t\t\tresult, err := test3.Curl(test1ip4URL)\n\t\t\tassert.Empty(t, result)\n\t\t\trequire.Error(t, err)\n\n\t\t\tresult, err = test3.Curl(test1ip6URL)\n\t\t\tassert.Empty(t, result)\n\t\t\trequire.Error(t, err)\n\n\t\t\tresult, err = test3.Curl(test1fqdnURL)\n\t\t\tassert.Empty(t, result)\n\t\t\trequire.Error(t, err)\n\n\t\t\t// test3 cannot query test2\n\t\t\tresult, err = test3.Curl(test2ip4URL)\n\t\t\tassert.Empty(t, result)\n\t\t\trequire.Error(t, err)\n\n\t\t\tresult, err = test3.Curl(test2ip6URL)\n\t\t\tassert.Empty(t, result)\n\t\t\trequire.Error(t, err)\n\n\t\t\tresult, err = test3.Curl(test2fqdnURL)\n\t\t\tassert.Empty(t, result)\n\t\t\trequire.Error(t, err)\n\n\t\t\t// test1 can query test2\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := test1.Curl(test2ip4URL)\n\t\t\t\tassert.NoError(c, err)\n\t\t\t\tassert.Lenf(\n\t\t\t\t\tc,\n\t\t\t\t\tresult,\n\t\t\t\t\t13,\n\t\t\t\t\t\"failed to connect from test1 to test2 with URL %s, expected hostname of 13 chars, got %s\",\n\t\t\t\t\ttest2ip4URL,\n\t\t\t\t\tresult,\n\t\t\t\t)\n\t\t\t}, 10*time.Second, 200*time.Millisecond, \"test1 should reach test2 via IPv4\")\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := test1.Curl(test2ip6URL)\n\t\t\t\tassert.NoError(c, err)\n\t\t\t\tassert.Lenf(\n\t\t\t\t\tc,\n\t\t\t\t\tresult,\n\t\t\t\t\t13,\n\t\t\t\t\t\"failed to connect from test1 to test2 with URL %s, expected hostname of 13 chars, got %s\",\n\t\t\t\t\ttest2ip6URL,\n\t\t\t\t\tresult,\n\t\t\t\t)\n\t\t\t}, 10*time.Second, 200*time.Millisecond, \"test1 should reach test2 via IPv6\")\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := test1.Curl(test2fqdnURL)\n\t\t\t\tassert.NoError(c, err)\n\t\t\t\tassert.Lenf(\n\t\t\t\t\tc,\n\t\t\t\t\tresult,\n\t\t\t\t\t13,\n\t\t\t\t\t\"failed to connect from test1 to test2 with URL %s, expected hostname of 13 chars, got %s\",\n\t\t\t\t\ttest2fqdnURL,\n\t\t\t\t\tresult,\n\t\t\t\t)\n\t\t\t}, 10*time.Second, 200*time.Millisecond, \"test1 should reach test2 via FQDN\")\n\n\t\t\t// test2 cannot query test1\n\t\t\tresult, err = test2.Curl(test1ip4URL)\n\t\t\tassert.Empty(t, result)\n\t\t\trequire.Error(t, err)\n\n\t\t\tresult, err = test2.Curl(test1ip6URL)\n\t\t\tassert.Empty(t, result)\n\t\t\trequire.Error(t, err)\n\n\t\t\tresult, err = test2.Curl(test1fqdnURL)\n\t\t\tassert.Empty(t, result)\n\t\t\trequire.Error(t, err)\n\t\t})\n\t}\n}\n\n// TestACLDevice1CanAccessDevice2 is a table driven test that aims to test\n// the various ways to achieve a connection between device1 and device2 where\n// device1 can access device2, but not the other way around. This can be\n// viewed as one of the most important tests here as it covers most of the\n// syntax that can be used.\n//\n// Before adding new taste cases, consider if it can be reduced to a case\n// in this function.\nfunc TestACLDevice1CanAccessDevice2(t *testing.T) {\n\tIntegrationSkip(t)\n\n\ttests := map[string]struct {\n\t\tpolicy policyv2.Policy\n\t}{\n\t\t\"ipv4\": {\n\t\t\tpolicy: policyv2.Policy{\n\t\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{prefixp(\"100.64.0.1/32\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(prefixp(\"100.64.0.2/32\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"ipv6\": {\n\t\t\tpolicy: policyv2.Policy{\n\t\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{prefixp(\"fd7a:115c:a1e0::1/128\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(prefixp(\"fd7a:115c:a1e0::2/128\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"hostv4cidr\": {\n\t\t\tpolicy: policyv2.Policy{\n\t\t\t\tHosts: policyv2.Hosts{\n\t\t\t\t\t\"test1\": policyv2.Prefix(netip.MustParsePrefix(\"100.64.0.1/32\")),\n\t\t\t\t\t\"test2\": policyv2.Prefix(netip.MustParsePrefix(\"100.64.0.2/32\")),\n\t\t\t\t},\n\t\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{hostp(\"test1\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(hostp(\"test2\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"hostv6cidr\": {\n\t\t\tpolicy: policyv2.Policy{\n\t\t\t\tHosts: policyv2.Hosts{\n\t\t\t\t\t\"test1\": policyv2.Prefix(netip.MustParsePrefix(\"fd7a:115c:a1e0::1/128\")),\n\t\t\t\t\t\"test2\": policyv2.Prefix(netip.MustParsePrefix(\"fd7a:115c:a1e0::2/128\")),\n\t\t\t\t},\n\t\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{hostp(\"test1\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(hostp(\"test2\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"group\": {\n\t\t\tpolicy: policyv2.Policy{\n\t\t\t\tGroups: policyv2.Groups{\n\t\t\t\t\tpolicyv2.Group(\"group:one\"): []policyv2.Username{policyv2.Username(\"user1@\")},\n\t\t\t\t\tpolicyv2.Group(\"group:two\"): []policyv2.Username{policyv2.Username(\"user2@\")},\n\t\t\t\t},\n\t\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{groupp(\"group:one\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(groupp(\"group:two\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO(kradalby): Add similar tests for Tags, might need support\n\t\t// in the scenario function when we create or join the clients.\n\t}\n\n\tfor name, testCase := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tscenario := aclScenario(t, &testCase.policy, \"acl-dev1dev2\", 1)\n\t\t\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\t\t\ttest1ip := netip.MustParseAddr(\"100.64.0.1\")\n\t\t\ttest1ip6 := netip.MustParseAddr(\"fd7a:115c:a1e0::1\")\n\t\t\ttest1, err := scenario.FindTailscaleClientByIP(test1ip)\n\t\t\tassert.NotNil(t, test1)\n\t\t\trequire.NoError(t, err)\n\n\t\t\ttest1fqdn, err := test1.FQDN()\n\t\t\trequire.NoError(t, err)\n\n\t\t\ttest1ipURL := fmt.Sprintf(\"http://%s/etc/hostname\", test1ip.String())\n\t\t\ttest1ip6URL := fmt.Sprintf(\"http://[%s]/etc/hostname\", test1ip6.String())\n\t\t\ttest1fqdnURL := fmt.Sprintf(\"http://%s/etc/hostname\", test1fqdn)\n\n\t\t\ttest2ip := netip.MustParseAddr(\"100.64.0.2\")\n\t\t\ttest2ip6 := netip.MustParseAddr(\"fd7a:115c:a1e0::2\")\n\t\t\ttest2, err := scenario.FindTailscaleClientByIP(test2ip)\n\t\t\tassert.NotNil(t, test2)\n\t\t\trequire.NoError(t, err)\n\n\t\t\ttest2fqdn, err := test2.FQDN()\n\t\t\trequire.NoError(t, err)\n\n\t\t\ttest2ipURL := fmt.Sprintf(\"http://%s/etc/hostname\", test2ip.String())\n\t\t\ttest2ip6URL := fmt.Sprintf(\"http://[%s]/etc/hostname\", test2ip6.String())\n\t\t\ttest2fqdnURL := fmt.Sprintf(\"http://%s/etc/hostname\", test2fqdn)\n\n\t\t\t// test1 can query test2\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := test1.Curl(test2ipURL)\n\t\t\t\tassert.NoError(c, err)\n\t\t\t\tassert.Lenf(\n\t\t\t\t\tc,\n\t\t\t\t\tresult,\n\t\t\t\t\t13,\n\t\t\t\t\t\"failed to connect from test1 to test with URL %s, expected hostname of 13 chars, got %s\",\n\t\t\t\t\ttest2ipURL,\n\t\t\t\t\tresult,\n\t\t\t\t)\n\t\t\t}, 10*time.Second, 200*time.Millisecond, \"test1 should reach test2 via IPv4\")\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := test1.Curl(test2ip6URL)\n\t\t\t\tassert.NoError(c, err)\n\t\t\t\tassert.Lenf(\n\t\t\t\t\tc,\n\t\t\t\t\tresult,\n\t\t\t\t\t13,\n\t\t\t\t\t\"failed to connect from test1 to test with URL %s, expected hostname of 13 chars, got %s\",\n\t\t\t\t\ttest2ip6URL,\n\t\t\t\t\tresult,\n\t\t\t\t)\n\t\t\t}, 10*time.Second, 200*time.Millisecond, \"test1 should reach test2 via IPv6\")\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := test1.Curl(test2fqdnURL)\n\t\t\t\tassert.NoError(c, err)\n\t\t\t\tassert.Lenf(\n\t\t\t\t\tc,\n\t\t\t\t\tresult,\n\t\t\t\t\t13,\n\t\t\t\t\t\"failed to connect from test1 to test with URL %s, expected hostname of 13 chars, got %s\",\n\t\t\t\t\ttest2fqdnURL,\n\t\t\t\t\tresult,\n\t\t\t\t)\n\t\t\t}, 10*time.Second, 200*time.Millisecond, \"test1 should reach test2 via FQDN\")\n\n\t\t\t// test2 cannot query test1 (negative test case)\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := test2.Curl(test1ipURL)\n\t\t\t\tassert.Error(c, err)\n\t\t\t\tassert.Empty(c, result)\n\t\t\t}, 10*time.Second, 200*time.Millisecond, \"test2 should NOT reach test1 via IPv4\")\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := test2.Curl(test1ip6URL)\n\t\t\t\tassert.Error(c, err)\n\t\t\t\tassert.Empty(c, result)\n\t\t\t}, 10*time.Second, 200*time.Millisecond, \"test2 should NOT reach test1 via IPv6\")\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := test2.Curl(test1fqdnURL)\n\t\t\t\tassert.Error(c, err)\n\t\t\t\tassert.Empty(c, result)\n\t\t\t}, 10*time.Second, 200*time.Millisecond, \"test2 should NOT reach test1 via FQDN\")\n\t\t})\n\t}\n}\n\nfunc TestPolicyUpdateWhileRunningWithCLIInDatabase(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 1,\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{\n\t\t\t// Alpine containers dont have ip6tables set up, which causes\n\t\t\t// tailscaled to stop configuring the wgengine, causing it\n\t\t\t// to not configure DNS.\n\t\t\ttsic.WithNetfilter(\"off\"),\n\t\t\ttsic.WithPackages(\"curl\"),\n\t\t\ttsic.WithWebserver(80),\n\t\t\ttsic.WithDockerWorkdir(\"/\"),\n\t\t},\n\t\thsic.WithTestName(\"policyreload\"),\n\t\thsic.WithPolicyMode(types.PolicyModeDB), // test updates policy at runtime via CLI\n\t)\n\trequire.NoError(t, err)\n\n\t_, err = scenario.ListTailscaleClientsFQDNs()\n\trequire.NoError(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequire.NoError(t, err)\n\n\tuser1Clients, err := scenario.ListTailscaleClients(\"user1\")\n\trequire.NoError(t, err)\n\n\tuser2Clients, err := scenario.ListTailscaleClients(\"user2\")\n\trequire.NoError(t, err)\n\n\tall := append(user1Clients, user2Clients...)\n\n\t// Initially all nodes can reach each other\n\tfor _, client := range all {\n\t\tfor _, peer := range all {\n\t\t\tif client.ContainerID() == peer.ContainerID() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfqdn, err := peer.FQDN()\n\t\t\trequire.NoError(t, err)\n\n\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\t\t\tt.Logf(\"url from %s to %s\", client.Hostname(), url)\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := client.Curl(url)\n\t\t\t\tassert.NoError(c, err)\n\t\t\t\tassert.Len(c, result, 13)\n\t\t\t}, 20*time.Second, 500*time.Millisecond, \"Verifying user1 can reach user2\")\n\t\t}\n\t}\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\tp := policyv2.Policy{\n\t\tACLs: []policyv2.ACL{\n\t\t\t{\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []policyv2.Alias{usernamep(\"user1@\")},\n\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\taliasWithPorts(usernamep(\"user2@\"), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tHosts: policyv2.Hosts{},\n\t}\n\n\terr = headscale.SetPolicy(&p)\n\trequire.NoError(t, err)\n\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t// Get the current policy and check\n\t\t// if it is the same as the one we set.\n\t\tvar output *policyv2.Policy\n\n\t\terr = executeAndUnmarshal(\n\t\t\theadscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"policy\",\n\t\t\t\t\"get\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&output,\n\t\t)\n\t\tassert.NoError(ct, err)\n\n\t\tassert.Len(t, output.ACLs, 1)\n\n\t\tif diff := cmp.Diff(p, *output, cmpopts.IgnoreUnexported(policyv2.Policy{}), cmpopts.EquateEmpty()); diff != \"\" {\n\t\t\tct.Errorf(\"unexpected policy(-want +got):\\n%s\", diff)\n\t\t}\n\t}, 30*time.Second, 1*time.Second, \"verifying that the new policy took place\")\n\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t// Test that user1 can visit all user2\n\t\tfor _, client := range user1Clients {\n\t\t\tfor _, peer := range user2Clients {\n\t\t\t\tfqdn, err := peer.FQDN()\n\t\t\t\tassert.NoError(ct, err)\n\n\t\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\t\t\t\tt.Logf(\"url from %s to %s\", client.Hostname(), url)\n\n\t\t\t\tresult, err := client.Curl(url)\n\t\t\t\tassert.Len(ct, result, 13)\n\t\t\t\tassert.NoError(ct, err)\n\t\t\t}\n\t\t}\n\n\t\t// Test that user2 _cannot_ visit user1\n\t\tfor _, client := range user2Clients {\n\t\t\tfor _, peer := range user1Clients {\n\t\t\t\tfqdn, err := peer.FQDN()\n\t\t\t\tassert.NoError(ct, err)\n\n\t\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\t\t\t\tt.Logf(\"url from %s to %s\", client.Hostname(), url)\n\n\t\t\t\tresult, err := client.Curl(url)\n\t\t\t\tassert.Empty(ct, result)\n\t\t\t\tassert.Error(ct, err)\n\t\t\t}\n\t\t}\n\t}, 30*time.Second, 1*time.Second, \"new policy did not get propagated to nodes\")\n}\n\nfunc TestACLAutogroupMember(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tscenario := aclScenario(t,\n\t\t&policyv2.Policy{\n\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t{\n\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\tSources: []policyv2.Alias{new(policyv2.AutoGroupMember)},\n\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\taliasWithPorts(new(policyv2.AutoGroupMember), tailcfg.PortRangeAny),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"acl-agmember\",\n\t\t2,\n\t)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequire.NoError(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequire.NoError(t, err)\n\n\t// Test that untagged nodes can access each other\n\tfor _, client := range allClients {\n\t\tvar clientIsUntagged bool\n\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\tstatus, err := client.Status()\n\t\t\tassert.NoError(c, err)\n\n\t\t\tclientIsUntagged = status.Self.Tags == nil || status.Self.Tags.Len() == 0\n\t\t\tassert.True(c, clientIsUntagged, \"Expected client %s to be untagged for autogroup:member test\", client.Hostname())\n\t\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for client %s to be untagged\", client.Hostname())\n\n\t\tif !clientIsUntagged {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, peer := range allClients {\n\t\t\tif client.Hostname() == peer.Hostname() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar peerIsUntagged bool\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tstatus, err := peer.Status()\n\t\t\t\tassert.NoError(c, err)\n\n\t\t\t\tpeerIsUntagged = status.Self.Tags == nil || status.Self.Tags.Len() == 0\n\t\t\t\tassert.True(c, peerIsUntagged, \"Expected peer %s to be untagged for autogroup:member test\", peer.Hostname())\n\t\t\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for peer %s to be untagged\", peer.Hostname())\n\n\t\t\tif !peerIsUntagged {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfqdn, err := peer.FQDN()\n\t\t\trequire.NoError(t, err)\n\n\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\t\t\tt.Logf(\"url from %s to %s\", client.Hostname(), url)\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := client.Curl(url)\n\t\t\t\tassert.NoError(c, err)\n\t\t\t\tassert.Len(c, result, 13)\n\t\t\t}, 20*time.Second, 500*time.Millisecond, \"Verifying autogroup:member connectivity\")\n\t\t}\n\t}\n}\n\nfunc TestACLAutogroupTagged(t *testing.T) {\n\tIntegrationSkip(t)\n\n\t// Create a custom scenario for testing autogroup:tagged\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 2, // 2 nodes per user - one tagged, one untagged\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\tpolicy := &policyv2.Policy{\n\t\tTagOwners: policyv2.TagOwners{\n\t\t\t\"tag:test\": policyv2.Owners{usernameOwner(\"user1@\"), usernameOwner(\"user2@\")},\n\t\t},\n\t\tACLs: []policyv2.ACL{\n\t\t\t{\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []policyv2.Alias{new(policyv2.AutoGroupTagged)},\n\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\taliasWithPorts(new(policyv2.AutoGroupTagged), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t// Create only the headscale server (not the full environment with users/nodes)\n\theadscale, err := scenario.Headscale(\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"acl-autogroup-tagged\"),\n\t)\n\trequire.NoError(t, err)\n\n\t// Create users and nodes manually with specific tags\n\t// Tags are now set via PreAuthKey (tags-as-identity model), not via --advertise-tags\n\tfor _, userStr := range spec.Users {\n\t\tuser, err := scenario.CreateUser(userStr)\n\t\trequire.NoError(t, err)\n\n\t\t// Create two pre-auth keys per user: one tagged, one untagged\n\t\ttaggedAuthKey, err := scenario.CreatePreAuthKeyWithTags(user.GetId(), true, false, []string{\"tag:test\"})\n\t\trequire.NoError(t, err)\n\n\t\tuntaggedAuthKey, err := scenario.CreatePreAuthKey(user.GetId(), true, false)\n\t\trequire.NoError(t, err)\n\n\t\t// Create nodes with proper naming\n\t\tfor i := range spec.NodesPerUser {\n\t\t\tvar (\n\t\t\t\tauthKey string\n\t\t\t\tversion string\n\t\t\t)\n\n\t\t\tif i == 0 {\n\t\t\t\t// First node is tagged - use tagged PreAuthKey\n\t\t\t\tauthKey = taggedAuthKey.GetKey()\n\t\t\t\tversion = \"head\"\n\n\t\t\t\tt.Logf(\"Creating tagged node for %s\", userStr)\n\t\t\t} else {\n\t\t\t\t// Second node is untagged - use untagged PreAuthKey\n\t\t\t\tauthKey = untaggedAuthKey.GetKey()\n\t\t\t\tversion = \"unstable\"\n\n\t\t\t\tt.Logf(\"Creating untagged node for %s\", userStr)\n\t\t\t}\n\n\t\t\t// Get the network for this scenario\n\t\t\tnetworks := scenario.Networks()\n\n\t\t\tvar network *dockertest.Network\n\t\t\tif len(networks) > 0 {\n\t\t\t\tnetwork = networks[0]\n\t\t\t}\n\n\t\t\t// Create the tailscale node with appropriate options.\n\t\t\t// CACert and HeadscaleName are passed explicitly because\n\t\t\t// nodes created via CreateTailscaleNode are not part of\n\t\t\t// the standard CreateHeadscaleEnv flow.\n\t\t\topts := []tsic.Option{\n\t\t\t\ttsic.WithCACert(headscale.GetCert()),\n\t\t\t\ttsic.WithHeadscaleName(headscale.GetHostname()),\n\t\t\t\ttsic.WithNetwork(network),\n\t\t\t\ttsic.WithNetfilter(\"off\"),\n\t\t\t\ttsic.WithPackages(\"curl\"),\n\t\t\t\ttsic.WithWebserver(80),\n\t\t\t\ttsic.WithDockerWorkdir(\"/\"),\n\t\t\t}\n\n\t\t\ttsClient, err := tsic.New(\n\t\t\t\tscenario.Pool(),\n\t\t\t\tversion,\n\t\t\t\topts...,\n\t\t\t)\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = tsClient.WaitForNeedsLogin(integrationutil.PeerSyncTimeout())\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Login with the appropriate auth key (tags come from the PreAuthKey)\n\t\t\terr = tsClient.Login(headscale.GetEndpoint(), authKey)\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = tsClient.WaitForRunning(integrationutil.PeerSyncTimeout())\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Add client to user\n\t\t\tuserObj := scenario.GetOrCreateUser(userStr)\n\t\t\tuserObj.Clients[tsClient.Hostname()] = tsClient\n\t\t}\n\t}\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequire.NoError(t, err)\n\trequire.Len(t, allClients, 4) // 2 users * 2 nodes each\n\n\t// Wait for nodes to see only their allowed peers\n\t// Tagged nodes should see each other (2 tagged nodes total)\n\t// Untagged nodes should see no one\n\tvar (\n\t\ttaggedClients   []TailscaleClient\n\t\tuntaggedClients []TailscaleClient\n\t)\n\n\t// First, categorize nodes by checking their tags\n\n\tfor _, client := range allClients {\n\t\thostname := client.Hostname()\n\n\t\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t\tstatus, err := client.Status()\n\t\t\tassert.NoError(ct, err)\n\n\t\t\tif status.Self.Tags != nil && status.Self.Tags.Len() > 0 {\n\t\t\t\t// This is a tagged node\n\t\t\t\tassert.Len(ct, status.Peers(), 1, \"tagged node %s should see exactly 1 peer\", hostname)\n\n\t\t\t\t// Add to tagged list only once we've verified it\n\t\t\t\tfound := false\n\n\t\t\t\tfor _, tc := range taggedClients {\n\t\t\t\t\tif tc.Hostname() == hostname {\n\t\t\t\t\t\tfound = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif !found {\n\t\t\t\t\ttaggedClients = append(taggedClients, client)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// This is an untagged node\n\t\t\t\tassert.Empty(ct, status.Peers(), \"untagged node %s should see 0 peers\", hostname)\n\n\t\t\t\t// Add to untagged list only once we've verified it\n\t\t\t\tfound := false\n\n\t\t\t\tfor _, uc := range untaggedClients {\n\t\t\t\t\tif uc.Hostname() == hostname {\n\t\t\t\t\t\tfound = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif !found {\n\t\t\t\t\tuntaggedClients = append(untaggedClients, client)\n\t\t\t\t}\n\t\t\t}\n\t\t}, 30*time.Second, 1*time.Second, \"verifying peer visibility for node %s\", hostname)\n\t}\n\n\t// Verify we have the expected number of tagged and untagged nodes\n\trequire.Len(t, taggedClients, 2, \"should have exactly 2 tagged nodes\")\n\trequire.Len(t, untaggedClients, 2, \"should have exactly 2 untagged nodes\")\n\n\t// Explicitly verify tags on tagged nodes\n\tfor _, client := range taggedClients {\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\tstatus, err := client.Status()\n\t\t\tassert.NoError(c, err)\n\t\t\tassert.NotNil(c, status.Self.Tags, \"tagged node %s should have tags\", client.Hostname())\n\t\t\tassert.Positive(c, status.Self.Tags.Len(), \"tagged node %s should have at least one tag\", client.Hostname())\n\t\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for tags to be applied to tagged nodes\")\n\t}\n\n\t// Verify untagged nodes have no tags\n\tfor _, client := range untaggedClients {\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\tstatus, err := client.Status()\n\t\t\tassert.NoError(c, err)\n\n\t\t\tif status.Self.Tags != nil {\n\t\t\t\tassert.Equal(c, 0, status.Self.Tags.Len(), \"untagged node %s should have no tags\", client.Hostname())\n\t\t\t}\n\t\t}, 10*time.Second, 200*time.Millisecond, \"Waiting to verify untagged nodes have no tags\")\n\t}\n\n\t// Test that tagged nodes can communicate with each other\n\tfor _, client := range taggedClients {\n\t\tfor _, peer := range taggedClients {\n\t\t\tif client.Hostname() == peer.Hostname() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfqdn, err := peer.FQDN()\n\t\t\trequire.NoError(t, err)\n\n\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\n\t\t\tt.Logf(\"Testing connection from tagged node %s to tagged node %s\", client.Hostname(), peer.Hostname())\n\n\t\t\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t\t\tresult, err := client.Curl(url)\n\t\t\t\tassert.NoError(ct, err)\n\t\t\t\tassert.Len(ct, result, 13)\n\t\t\t}, 20*time.Second, 500*time.Millisecond, \"tagged nodes should be able to communicate\")\n\t\t}\n\t}\n\n\t// Test that untagged nodes cannot communicate with anyone\n\tfor _, client := range untaggedClients {\n\t\t// Try to reach tagged nodes (should fail)\n\t\tfor _, peer := range taggedClients {\n\t\t\tfqdn, err := peer.FQDN()\n\t\t\trequire.NoError(t, err)\n\n\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\n\t\t\tt.Logf(\"Testing connection from untagged node %s to tagged node %s (should fail)\", client.Hostname(), peer.Hostname())\n\n\t\t\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t\t\tresult, err := client.CurlFailFast(url)\n\t\t\t\tassert.Empty(ct, result)\n\t\t\t\tassert.Error(ct, err)\n\t\t\t}, 5*time.Second, 200*time.Millisecond, \"untagged nodes should not be able to reach tagged nodes\")\n\t\t}\n\n\t\t// Try to reach other untagged nodes (should also fail)\n\t\tfor _, peer := range untaggedClients {\n\t\t\tif client.Hostname() == peer.Hostname() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfqdn, err := peer.FQDN()\n\t\t\trequire.NoError(t, err)\n\n\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\n\t\t\tt.Logf(\"Testing connection from untagged node %s to untagged node %s (should fail)\", client.Hostname(), peer.Hostname())\n\n\t\t\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t\t\tresult, err := client.CurlFailFast(url)\n\t\t\t\tassert.Empty(ct, result)\n\t\t\t\tassert.Error(ct, err)\n\t\t\t}, 5*time.Second, 200*time.Millisecond, \"untagged nodes should not be able to reach other untagged nodes\")\n\t\t}\n\t}\n\n\t// Test that tagged nodes cannot reach untagged nodes\n\tfor _, client := range taggedClients {\n\t\tfor _, peer := range untaggedClients {\n\t\t\tfqdn, err := peer.FQDN()\n\t\t\trequire.NoError(t, err)\n\n\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\n\t\t\tt.Logf(\"Testing connection from tagged node %s to untagged node %s (should fail)\", client.Hostname(), peer.Hostname())\n\n\t\t\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t\t\tresult, err := client.CurlFailFast(url)\n\t\t\t\tassert.Empty(ct, result)\n\t\t\t\tassert.Error(ct, err)\n\t\t\t}, 5*time.Second, 200*time.Millisecond, \"tagged nodes should not be able to reach untagged nodes\")\n\t\t}\n\t}\n}\n\n// Test that only devices owned by the same user can access each other and cannot access devices of other users\n// Test structure:\n// - user1: 2 regular nodes (tests autogroup:self for same-user access)\n// - user2: 2 regular nodes (tests autogroup:self for same-user access and cross-user isolation)\n// - user-router: 1 node with tag:router-node (tests that autogroup:self doesn't interfere with other rules).\nfunc TestACLAutogroupSelf(t *testing.T) {\n\tIntegrationSkip(t)\n\n\t// Policy with TWO separate ACL rules:\n\t// 1. autogroup:member -> autogroup:self (same-user access)\n\t// 2. group:home -> tag:router-node (router access)\n\t// This tests that autogroup:self doesn't prevent other rules from working\n\tpolicy := &policyv2.Policy{\n\t\tGroups: policyv2.Groups{\n\t\t\tpolicyv2.Group(\"group:home\"): []policyv2.Username{\n\t\t\t\tpolicyv2.Username(\"user1@\"),\n\t\t\t\tpolicyv2.Username(\"user2@\"),\n\t\t\t},\n\t\t},\n\t\tTagOwners: policyv2.TagOwners{\n\t\t\tpolicyv2.Tag(\"tag:router-node\"): policyv2.Owners{\n\t\t\t\tusernameOwner(\"user-router@\"),\n\t\t\t},\n\t\t},\n\t\tACLs: []policyv2.ACL{\n\t\t\t{\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []policyv2.Alias{new(policyv2.AutoGroupMember)},\n\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\taliasWithPorts(new(policyv2.AutoGroupSelf), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []policyv2.Alias{groupp(\"group:home\")},\n\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\taliasWithPorts(tagp(\"tag:router-node\"), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []policyv2.Alias{tagp(\"tag:router-node\")},\n\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\taliasWithPorts(groupp(\"group:home\"), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t// Create custom scenario: user1 and user2 with regular nodes, plus user-router with tagged node\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 2,\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{\n\t\t\ttsic.WithNetfilter(\"off\"),\n\t\t\ttsic.WithPackages(\"curl\"),\n\t\t\ttsic.WithWebserver(80),\n\t\t\ttsic.WithDockerWorkdir(\"/\"),\n\t\t},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"acl-autogroup-self\"),\n\t)\n\trequire.NoError(t, err)\n\n\t// Add router node for user-router (single shared router node)\n\tnetworks := scenario.Networks()\n\n\tvar network *dockertest.Network\n\tif len(networks) > 0 {\n\t\tnetwork = networks[0]\n\t}\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\trouterUser, err := scenario.CreateUser(\"user-router\")\n\trequire.NoError(t, err)\n\n\t// Create a tagged PreAuthKey for the router node (tags-as-identity model)\n\tauthKey, err := scenario.CreatePreAuthKeyWithTags(routerUser.GetId(), true, false, []string{\"tag:router-node\"})\n\trequire.NoError(t, err)\n\n\t// Create router node (tags come from the PreAuthKey).\n\t// CACert and HeadscaleName are passed explicitly because\n\t// nodes created via tsic.New are not part of the standard\n\t// CreateHeadscaleEnv flow.\n\trouterClient, err := tsic.New(\n\t\tscenario.Pool(),\n\t\t\"unstable\",\n\t\ttsic.WithCACert(headscale.GetCert()),\n\t\ttsic.WithHeadscaleName(headscale.GetHostname()),\n\t\ttsic.WithNetwork(network),\n\t\ttsic.WithNetfilter(\"off\"),\n\t\ttsic.WithPackages(\"curl\"),\n\t\ttsic.WithWebserver(80),\n\t\ttsic.WithDockerWorkdir(\"/\"),\n\t)\n\trequire.NoError(t, err)\n\n\terr = routerClient.WaitForNeedsLogin(integrationutil.PeerSyncTimeout())\n\trequire.NoError(t, err)\n\n\terr = routerClient.Login(headscale.GetEndpoint(), authKey.GetKey())\n\trequire.NoError(t, err)\n\n\terr = routerClient.WaitForRunning(integrationutil.PeerSyncTimeout())\n\trequire.NoError(t, err)\n\n\tuserRouterObj := scenario.GetOrCreateUser(\"user-router\")\n\tuserRouterObj.Clients[routerClient.Hostname()] = routerClient\n\n\tuser1Clients, err := scenario.GetClients(\"user1\")\n\trequire.NoError(t, err)\n\tuser2Clients, err := scenario.GetClients(\"user2\")\n\trequire.NoError(t, err)\n\n\tvar user1Regular, user2Regular []TailscaleClient\n\n\tfor _, client := range user1Clients {\n\t\tstatus, err := client.Status()\n\t\trequire.NoError(t, err)\n\n\t\tif status.Self != nil && (status.Self.Tags == nil || status.Self.Tags.Len() == 0) {\n\t\t\tuser1Regular = append(user1Regular, client)\n\t\t}\n\t}\n\n\tfor _, client := range user2Clients {\n\t\tstatus, err := client.Status()\n\t\trequire.NoError(t, err)\n\n\t\tif status.Self != nil && (status.Self.Tags == nil || status.Self.Tags.Len() == 0) {\n\t\t\tuser2Regular = append(user2Regular, client)\n\t\t}\n\t}\n\n\trequire.NotEmpty(t, user1Regular, \"user1 should have regular (untagged) devices\")\n\trequire.NotEmpty(t, user2Regular, \"user2 should have regular (untagged) devices\")\n\trequire.NotNil(t, routerClient, \"router node should exist\")\n\n\t// Wait for all nodes to sync with their expected peer counts\n\t// With our ACL policy:\n\t// - Regular nodes (user1/user2): 1 same-user regular peer + 1 router-node = 2 peers\n\t// - Router node: 2 user1 regular + 2 user2 regular = 4 peers\n\tfor _, client := range user1Regular {\n\t\terr := client.WaitForPeers(2, integrationutil.PeerSyncTimeout(), integrationutil.PeerSyncRetryInterval())\n\t\trequire.NoError(t, err, \"user1 regular device %s should see 2 peers (1 same-user peer + 1 router)\", client.Hostname())\n\t}\n\n\tfor _, client := range user2Regular {\n\t\terr := client.WaitForPeers(2, integrationutil.PeerSyncTimeout(), integrationutil.PeerSyncRetryInterval())\n\t\trequire.NoError(t, err, \"user2 regular device %s should see 2 peers (1 same-user peer + 1 router)\", client.Hostname())\n\t}\n\n\terr = routerClient.WaitForPeers(4, integrationutil.PeerSyncTimeout(), integrationutil.PeerSyncRetryInterval())\n\trequire.NoError(t, err, \"router should see 4 peers (all group:home regular nodes)\")\n\n\t// Test that user1's regular devices can access each other\n\tfor _, client := range user1Regular {\n\t\tfor _, peer := range user1Regular {\n\t\t\tif client.Hostname() == peer.Hostname() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfqdn, err := peer.FQDN()\n\t\t\trequire.NoError(t, err)\n\n\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\t\t\tt.Logf(\"url from %s (user1) to %s (user1)\", client.Hostname(), fqdn)\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := client.Curl(url)\n\t\t\t\tassert.NoError(c, err)\n\t\t\t\tassert.Len(c, result, 13)\n\t\t\t}, 10*time.Second, 200*time.Millisecond, \"user1 device should reach other user1 device via autogroup:self\")\n\t\t}\n\t}\n\n\t// Test that user2's regular devices can access each other\n\tfor _, client := range user2Regular {\n\t\tfor _, peer := range user2Regular {\n\t\t\tif client.Hostname() == peer.Hostname() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfqdn, err := peer.FQDN()\n\t\t\trequire.NoError(t, err)\n\n\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\t\t\tt.Logf(\"url from %s (user2) to %s (user2)\", client.Hostname(), fqdn)\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := client.Curl(url)\n\t\t\t\tassert.NoError(c, err)\n\t\t\t\tassert.Len(c, result, 13)\n\t\t\t}, 10*time.Second, 200*time.Millisecond, \"user2 device should reach other user2 device via autogroup:self\")\n\t\t}\n\t}\n\n\t// Test that user1's regular devices can access router-node\n\tfor _, client := range user1Regular {\n\t\tfqdn, err := routerClient.FQDN()\n\t\trequire.NoError(t, err)\n\n\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\t\tt.Logf(\"url from %s (user1) to %s (router-node) - should SUCCEED\", client.Hostname(), fqdn)\n\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\tresult, err := client.Curl(url)\n\t\t\tassert.NoError(c, err)\n\t\t\tassert.NotEmpty(c, result, \"user1 should be able to access router-node via group:home -> tag:router-node rule\")\n\t\t}, 10*time.Second, 200*time.Millisecond, \"user1 device should reach router-node (proves autogroup:self doesn't interfere)\")\n\t}\n\n\t// Test that user2's regular devices can access router-node\n\tfor _, client := range user2Regular {\n\t\tfqdn, err := routerClient.FQDN()\n\t\trequire.NoError(t, err)\n\n\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\t\tt.Logf(\"url from %s (user2) to %s (router-node) - should SUCCEED\", client.Hostname(), fqdn)\n\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\tresult, err := client.Curl(url)\n\t\t\tassert.NoError(c, err)\n\t\t\tassert.NotEmpty(c, result, \"user2 should be able to access router-node via group:home -> tag:router-node rule\")\n\t\t}, 10*time.Second, 200*time.Millisecond, \"user2 device should reach router-node (proves autogroup:self doesn't interfere)\")\n\t}\n\n\t// Test that devices from different users cannot access each other's regular devices\n\tfor _, client := range user1Regular {\n\t\tfor _, peer := range user2Regular {\n\t\t\tfqdn, err := peer.FQDN()\n\t\t\trequire.NoError(t, err)\n\n\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\t\t\tt.Logf(\"url from %s (user1) to %s (user2 regular) - should FAIL\", client.Hostname(), fqdn)\n\n\t\t\tresult, err := client.Curl(url)\n\t\t\tassert.Empty(t, result, \"user1 should not be able to access user2's regular devices (autogroup:self isolation)\")\n\t\t\trequire.Error(t, err, \"connection from user1 to user2 regular device should fail\")\n\t\t}\n\t}\n\n\tfor _, client := range user2Regular {\n\t\tfor _, peer := range user1Regular {\n\t\t\tfqdn, err := peer.FQDN()\n\t\t\trequire.NoError(t, err)\n\n\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\t\t\tt.Logf(\"url from %s (user2) to %s (user1 regular) - should FAIL\", client.Hostname(), fqdn)\n\n\t\t\tresult, err := client.Curl(url)\n\t\t\tassert.Empty(t, result, \"user2 should not be able to access user1's regular devices (autogroup:self isolation)\")\n\t\t\tassert.Error(t, err, \"connection from user2 to user1 regular device should fail\")\n\t\t}\n\t}\n}\n\n//nolint:gocyclo // complex integration test scenario\nfunc TestACLPolicyPropagationOverTime(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 2,\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{\n\t\t\ttsic.WithNetfilter(\"off\"),\n\t\t\ttsic.WithPackages(\"curl\"),\n\t\t\ttsic.WithWebserver(80),\n\t\t\ttsic.WithDockerWorkdir(\"/\"),\n\t\t},\n\t\thsic.WithTestName(\"aclpropagation\"),\n\t\thsic.WithPolicyMode(types.PolicyModeDB), // test updates policy at runtime via CLI\n\t)\n\trequire.NoError(t, err)\n\n\t_, err = scenario.ListTailscaleClientsFQDNs()\n\trequire.NoError(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequire.NoError(t, err)\n\n\tuser1Clients, err := scenario.ListTailscaleClients(\"user1\")\n\trequire.NoError(t, err)\n\n\tuser2Clients, err := scenario.ListTailscaleClients(\"user2\")\n\trequire.NoError(t, err)\n\n\tallClients := append(user1Clients, user2Clients...)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\t// Define the four policies we'll cycle through\n\tallowAllPolicy := &policyv2.Policy{\n\t\tACLs: []policyv2.ACL{\n\t\t\t{\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []policyv2.Alias{wildcard()},\n\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\taliasWithPorts(wildcard(), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tautogroupSelfPolicy := &policyv2.Policy{\n\t\tACLs: []policyv2.ACL{\n\t\t\t{\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []policyv2.Alias{new(policyv2.AutoGroupMember)},\n\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\taliasWithPorts(new(policyv2.AutoGroupSelf), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tuser1ToUser2Policy := &policyv2.Policy{\n\t\tACLs: []policyv2.ACL{\n\t\t\t{\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []policyv2.Alias{usernamep(\"user1@\")},\n\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\taliasWithPorts(usernamep(\"user2@\"), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t// Run through the policy cycle 5 times\n\tfor i := range 5 {\n\t\titeration := i + 1 // range 5 gives 0-4, we want 1-5 for logging\n\t\tt.Logf(\"=== Iteration %d/5 ===\", iteration)\n\n\t\t// Phase 1: Allow all policy\n\t\tt.Logf(\"Iteration %d: Setting allow-all policy\", iteration)\n\n\t\terr = headscale.SetPolicy(allowAllPolicy)\n\t\trequire.NoError(t, err)\n\n\t\t// Wait for peer lists to sync with allow-all policy\n\t\tt.Logf(\"Iteration %d: Phase 1 - Waiting for peer lists to sync with allow-all policy\", iteration)\n\n\t\terr = scenario.WaitForTailscaleSync()\n\t\trequire.NoError(t, err, \"iteration %d: Phase 1 - failed to sync after allow-all policy\", iteration)\n\n\t\t// Test all-to-all connectivity after state is settled\n\t\tt.Logf(\"Iteration %d: Phase 1 - Testing all-to-all connectivity\", iteration)\n\t\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t\tfor _, client := range allClients {\n\t\t\t\tfor _, peer := range allClients {\n\t\t\t\t\tif client.ContainerID() == peer.ContainerID() {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tfqdn, err := peer.FQDN()\n\t\t\t\t\tif !assert.NoError(ct, err, \"iteration %d: failed to get FQDN for %s\", iteration, peer.Hostname()) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\t\t\t\t\tresult, err := client.Curl(url)\n\t\t\t\t\tassert.NoError(ct, err, \"iteration %d: %s should reach %s with allow-all policy\", iteration, client.Hostname(), fqdn)\n\t\t\t\t\tassert.Len(ct, result, 13, \"iteration %d: response from %s to %s should be valid\", iteration, client.Hostname(), fqdn)\n\t\t\t\t}\n\t\t\t}\n\t\t}, 90*time.Second, 500*time.Millisecond, \"iteration %d: Phase 1 - all connectivity tests with allow-all policy\", iteration)\n\n\t\t// Phase 2: Autogroup:self policy (only same user can access)\n\t\tt.Logf(\"Iteration %d: Phase 2 - Setting autogroup:self policy\", iteration)\n\n\t\terr = headscale.SetPolicy(autogroupSelfPolicy)\n\t\trequire.NoError(t, err)\n\n\t\t// Wait for peer lists to sync with autogroup:self - ensures cross-user peers are removed\n\t\tt.Logf(\"Iteration %d: Phase 2 - Waiting for peer lists to sync with autogroup:self\", iteration)\n\n\t\terr = scenario.WaitForTailscaleSyncPerUser(60*time.Second, 500*time.Millisecond)\n\t\trequire.NoError(t, err, \"iteration %d: Phase 2 - failed to sync after autogroup:self policy\", iteration)\n\n\t\t// Test ALL connectivity (positive and negative) in one block after state is settled\n\t\tt.Logf(\"Iteration %d: Phase 2 - Testing all connectivity with autogroup:self\", iteration)\n\t\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t\t// Positive: user1 can access user1's nodes\n\t\t\tfor _, client := range user1Clients {\n\t\t\t\tfor _, peer := range user1Clients {\n\t\t\t\t\tif client.ContainerID() == peer.ContainerID() {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tfqdn, err := peer.FQDN()\n\t\t\t\t\tif !assert.NoError(ct, err, \"iteration %d: failed to get FQDN for user1 peer %s\", iteration, peer.Hostname()) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\t\t\t\t\tresult, err := client.Curl(url)\n\t\t\t\t\tassert.NoError(ct, err, \"iteration %d: user1 node %s should reach user1 node %s\", iteration, client.Hostname(), peer.Hostname())\n\t\t\t\t\tassert.Len(ct, result, 13, \"iteration %d: response from %s to %s should be valid\", iteration, client.Hostname(), peer.Hostname())\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Positive: user2 can access user2's nodes\n\t\t\tfor _, client := range user2Clients {\n\t\t\t\tfor _, peer := range user2Clients {\n\t\t\t\t\tif client.ContainerID() == peer.ContainerID() {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tfqdn, err := peer.FQDN()\n\t\t\t\t\tif !assert.NoError(ct, err, \"iteration %d: failed to get FQDN for user2 peer %s\", iteration, peer.Hostname()) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\t\t\t\t\tresult, err := client.Curl(url)\n\t\t\t\t\tassert.NoError(ct, err, \"iteration %d: user2 %s should reach user2's node %s\", iteration, client.Hostname(), fqdn)\n\t\t\t\t\tassert.Len(ct, result, 13, \"iteration %d: response from %s to %s should be valid\", iteration, client.Hostname(), fqdn)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Negative: user1 cannot access user2's nodes\n\t\t\tfor _, client := range user1Clients {\n\t\t\t\tfor _, peer := range user2Clients {\n\t\t\t\t\tfqdn, err := peer.FQDN()\n\t\t\t\t\tif !assert.NoError(ct, err, \"iteration %d: failed to get FQDN for user2 peer %s\", iteration, peer.Hostname()) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\t\t\t\t\tresult, err := client.Curl(url)\n\t\t\t\t\tassert.Error(ct, err, \"iteration %d: user1 %s should NOT reach user2's node %s with autogroup:self\", iteration, client.Hostname(), fqdn)\n\t\t\t\t\tassert.Empty(ct, result, \"iteration %d: user1 %s->user2 %s should fail\", iteration, client.Hostname(), fqdn)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Negative: user2 cannot access user1's nodes\n\t\t\tfor _, client := range user2Clients {\n\t\t\t\tfor _, peer := range user1Clients {\n\t\t\t\t\tfqdn, err := peer.FQDN()\n\t\t\t\t\tif !assert.NoError(ct, err, \"iteration %d: failed to get FQDN for user1 peer %s\", iteration, peer.Hostname()) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\t\t\t\t\tresult, err := client.Curl(url)\n\t\t\t\t\tassert.Error(ct, err, \"iteration %d: user2 node %s should NOT reach user1 node %s\", iteration, client.Hostname(), peer.Hostname())\n\t\t\t\t\tassert.Empty(ct, result, \"iteration %d: user2->user1 connection from %s to %s should fail\", iteration, client.Hostname(), peer.Hostname())\n\t\t\t\t}\n\t\t\t}\n\t\t}, 90*time.Second, 500*time.Millisecond, \"iteration %d: Phase 2 - all connectivity tests with autogroup:self\", iteration)\n\n\t\t// Phase 2b: Add a new node to user1 and validate policy propagation\n\t\tt.Logf(\"Iteration %d: Phase 2b - Adding new node to user1 during autogroup:self policy\", iteration)\n\n\t\t// Add a new node with the same options as the initial setup\n\t\t// Get the network to use (scenario uses first network in list)\n\t\tnetworks := scenario.Networks()\n\t\trequire.NotEmpty(t, networks, \"scenario should have at least one network\")\n\n\t\tnewClient := scenario.MustAddAndLoginClient(t, \"user1\", \"all\", headscale,\n\t\t\ttsic.WithNetfilter(\"off\"),\n\t\t\ttsic.WithPackages(\"curl\"),\n\t\t\ttsic.WithWebserver(80),\n\t\t\ttsic.WithDockerWorkdir(\"/\"),\n\t\t\ttsic.WithNetwork(networks[0]),\n\t\t)\n\t\tt.Logf(\"Iteration %d: Phase 2b - Added and logged in new node %s\", iteration, newClient.Hostname())\n\n\t\t// Wait for peer lists to sync after new node addition (now 3 user1 nodes, still autogroup:self)\n\t\tt.Logf(\"Iteration %d: Phase 2b - Waiting for peer lists to sync after new node addition\", iteration)\n\n\t\terr = scenario.WaitForTailscaleSyncPerUser(60*time.Second, 500*time.Millisecond)\n\t\trequire.NoError(t, err, \"iteration %d: Phase 2b - failed to sync after new node addition\", iteration)\n\n\t\t// Test ALL connectivity (positive and negative) in one block after state is settled\n\t\tt.Logf(\"Iteration %d: Phase 2b - Testing all connectivity after new node addition\", iteration)\n\t\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t\t// Re-fetch client list to ensure latest state\n\t\t\tuser1ClientsWithNew, err := scenario.ListTailscaleClients(\"user1\")\n\t\t\tassert.NoError(ct, err, \"iteration %d: failed to list user1 clients\", iteration)\n\t\t\tassert.Len(ct, user1ClientsWithNew, 3, \"iteration %d: user1 should have 3 nodes\", iteration)\n\n\t\t\t// Positive: all user1 nodes can access each other\n\t\t\tfor _, client := range user1ClientsWithNew {\n\t\t\t\tfor _, peer := range user1ClientsWithNew {\n\t\t\t\t\tif client.ContainerID() == peer.ContainerID() {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tfqdn, err := peer.FQDN()\n\t\t\t\t\tif !assert.NoError(ct, err, \"iteration %d: failed to get FQDN for peer %s\", iteration, peer.Hostname()) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\t\t\t\t\tresult, err := client.Curl(url)\n\t\t\t\t\tassert.NoError(ct, err, \"iteration %d: user1 node %s should reach user1 node %s\", iteration, client.Hostname(), peer.Hostname())\n\t\t\t\t\tassert.Len(ct, result, 13, \"iteration %d: response from %s to %s should be valid\", iteration, client.Hostname(), peer.Hostname())\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Negative: user1 nodes cannot access user2's nodes\n\t\t\tfor _, client := range user1ClientsWithNew {\n\t\t\t\tfor _, peer := range user2Clients {\n\t\t\t\t\tfqdn, err := peer.FQDN()\n\t\t\t\t\tif !assert.NoError(ct, err, \"iteration %d: failed to get FQDN for user2 peer %s\", iteration, peer.Hostname()) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\t\t\t\t\tresult, err := client.Curl(url)\n\t\t\t\t\tassert.Error(ct, err, \"iteration %d: user1 node %s should NOT reach user2 node %s\", iteration, client.Hostname(), peer.Hostname())\n\t\t\t\t\tassert.Empty(ct, result, \"iteration %d: user1->user2 connection from %s to %s should fail\", iteration, client.Hostname(), peer.Hostname())\n\t\t\t\t}\n\t\t\t}\n\t\t}, 90*time.Second, 500*time.Millisecond, \"iteration %d: Phase 2b - all connectivity tests after new node addition\", iteration)\n\n\t\t// Delete the newly added node before Phase 3\n\t\tt.Logf(\"Iteration %d: Phase 2b - Deleting the newly added node from user1\", iteration)\n\n\t\t// Get the node list and find the newest node (highest ID)\n\t\tvar (\n\t\t\tnodeList       []*v1.Node\n\t\t\tnodeToDeleteID uint64\n\t\t)\n\n\t\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t\tnodeList, err = headscale.ListNodes(\"user1\")\n\t\t\tassert.NoError(ct, err)\n\t\t\tassert.Len(ct, nodeList, 3, \"should have 3 user1 nodes before deletion\")\n\n\t\t\t// Find the node with the highest ID (the newest one)\n\t\t\tfor _, node := range nodeList {\n\t\t\t\tif node.GetId() > nodeToDeleteID {\n\t\t\t\t\tnodeToDeleteID = node.GetId()\n\t\t\t\t}\n\t\t\t}\n\t\t}, 10*time.Second, 500*time.Millisecond, \"iteration %d: Phase 2b - listing nodes before deletion\", iteration)\n\n\t\t// Delete the node via headscale helper\n\t\tt.Logf(\"Iteration %d: Phase 2b - Deleting node ID %d from headscale\", iteration, nodeToDeleteID)\n\t\terr = headscale.DeleteNode(nodeToDeleteID)\n\t\trequire.NoError(t, err, \"iteration %d: failed to delete node %d\", iteration, nodeToDeleteID)\n\n\t\t// Remove the deleted client from the scenario's user.Clients map\n\t\t// This is necessary for WaitForTailscaleSyncPerUser to calculate correct peer counts\n\t\tt.Logf(\"Iteration %d: Phase 2b - Removing deleted client from scenario\", iteration)\n\n\t\tfor clientName, client := range scenario.users[\"user1\"].Clients {\n\t\t\tstatus := client.MustStatus()\n\n\t\t\tnodeID, err := strconv.ParseUint(string(status.Self.ID), 10, 64)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif nodeID == nodeToDeleteID {\n\t\t\t\tdelete(scenario.users[\"user1\"].Clients, clientName)\n\t\t\t\tt.Logf(\"Iteration %d: Phase 2b - Removed client %s (node ID %d) from scenario\", iteration, clientName, nodeToDeleteID)\n\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// Verify the node has been deleted\n\t\tt.Logf(\"Iteration %d: Phase 2b - Verifying node deletion (expecting 2 user1 nodes)\", iteration)\n\t\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t\tnodeListAfter, err := headscale.ListNodes(\"user1\")\n\t\t\tassert.NoError(ct, err, \"failed to list nodes after deletion\")\n\t\t\tassert.Len(ct, nodeListAfter, 2, \"iteration %d: should have 2 user1 nodes after deletion, got %d\", iteration, len(nodeListAfter))\n\t\t}, 10*time.Second, 500*time.Millisecond, \"iteration %d: Phase 2b - node should be deleted\", iteration)\n\n\t\t// Wait for sync after deletion to ensure peer counts are correct\n\t\t// Use WaitForTailscaleSyncPerUser because autogroup:self is still active,\n\t\t// so nodes only see same-user peers, not all nodes\n\t\tt.Logf(\"Iteration %d: Phase 2b - Waiting for sync after node deletion (with autogroup:self)\", iteration)\n\n\t\terr = scenario.WaitForTailscaleSyncPerUser(60*time.Second, 500*time.Millisecond)\n\t\trequire.NoError(t, err, \"iteration %d: failed to sync after node deletion\", iteration)\n\n\t\t// Refresh client lists after deletion to ensure we don't reference the deleted node\n\t\tuser1Clients, err = scenario.ListTailscaleClients(\"user1\")\n\t\trequire.NoError(t, err, \"iteration %d: failed to refresh user1 client list after deletion\", iteration)\n\t\tuser2Clients, err = scenario.ListTailscaleClients(\"user2\")\n\t\trequire.NoError(t, err, \"iteration %d: failed to refresh user2 client list after deletion\", iteration)\n\t\t// Create NEW slice instead of appending to old allClients which still has deleted client\n\t\tallClients = make([]TailscaleClient, 0, len(user1Clients)+len(user2Clients))\n\t\tallClients = append(allClients, user1Clients...)\n\t\tallClients = append(allClients, user2Clients...)\n\n\t\tt.Logf(\"Iteration %d: Phase 2b completed - New node added, validated, and removed successfully\", iteration)\n\n\t\t// Phase 3: User1 can access user2 but not reverse\n\t\tt.Logf(\"Iteration %d: Phase 3 - Setting user1->user2 directional policy\", iteration)\n\n\t\terr = headscale.SetPolicy(user1ToUser2Policy)\n\t\trequire.NoError(t, err)\n\n\t\t// Note: Cannot use WaitForTailscaleSync() here because directional policy means\n\t\t// user2 nodes don't see user1 nodes in their peer list (asymmetric visibility).\n\t\t// The EventuallyWithT block below will handle waiting for policy propagation.\n\n\t\t// Test ALL connectivity (positive and negative) in one block after policy settles\n\t\tt.Logf(\"Iteration %d: Phase 3 - Testing all connectivity with directional policy\", iteration)\n\t\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t\t// Positive: user1 can access user2's nodes\n\t\t\tfor _, client := range user1Clients {\n\t\t\t\tfor _, peer := range user2Clients {\n\t\t\t\t\tfqdn, err := peer.FQDN()\n\t\t\t\t\tif !assert.NoError(ct, err, \"iteration %d: failed to get FQDN for user2 peer %s\", iteration, peer.Hostname()) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\t\t\t\t\tresult, err := client.Curl(url)\n\t\t\t\t\tassert.NoError(ct, err, \"iteration %d: user1 node %s should reach user2 node %s\", iteration, client.Hostname(), peer.Hostname())\n\t\t\t\t\tassert.Len(ct, result, 13, \"iteration %d: response from %s to %s should be valid\", iteration, client.Hostname(), peer.Hostname())\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Negative: user2 cannot access user1's nodes\n\t\t\tfor _, client := range user2Clients {\n\t\t\t\tfor _, peer := range user1Clients {\n\t\t\t\t\tfqdn, err := peer.FQDN()\n\t\t\t\t\tif !assert.NoError(ct, err, \"iteration %d: failed to get FQDN for user1 peer %s\", iteration, peer.Hostname()) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", fqdn)\n\t\t\t\t\tresult, err := client.Curl(url)\n\t\t\t\t\tassert.Error(ct, err, \"iteration %d: user2 node %s should NOT reach user1 node %s\", iteration, client.Hostname(), peer.Hostname())\n\t\t\t\t\tassert.Empty(ct, result, \"iteration %d: user2->user1 from %s to %s should fail\", iteration, client.Hostname(), peer.Hostname())\n\t\t\t\t}\n\t\t\t}\n\t\t}, 90*time.Second, 500*time.Millisecond, \"iteration %d: Phase 3 - all connectivity tests with directional policy\", iteration)\n\n\t\tt.Logf(\"=== Iteration %d/5 completed successfully - All 3 phases passed ===\", iteration)\n\t}\n\n\tt.Log(\"All 5 iterations completed successfully - ACL propagation is working correctly\")\n}\n\n// TestACLTagPropagation validates that tag changes propagate immediately\n// to ACLs without requiring a Headscale restart.\n// This is the primary test for GitHub issue #2389.\nfunc TestACLTagPropagation(t *testing.T) {\n\tIntegrationSkip(t)\n\n\ttests := []struct {\n\t\tname   string\n\t\tpolicy *policyv2.Policy\n\t\tspec   ScenarioSpec\n\t\t// setup returns clients and any initial state needed\n\t\tsetup func(t *testing.T, scenario *Scenario, headscale ControlServer) (\n\t\t\tsourceClient TailscaleClient,\n\t\t\ttargetClient TailscaleClient,\n\t\t\ttargetNodeID uint64,\n\t\t)\n\t\t// initialAccess: should source be able to reach target before tag change?\n\t\tinitialAccess bool\n\t\t// tagChange: what tags to set on target node (nil = test uses custom logic)\n\t\ttagChange []string\n\t\t// finalAccess: should source be able to reach target after tag change?\n\t\tfinalAccess bool\n\t}{\n\t\t{\n\t\t\tname: \"add-tag-grants-access\",\n\t\t\tpolicy: &policyv2.Policy{\n\t\t\t\tTagOwners: policyv2.TagOwners{\n\t\t\t\t\t\"tag:shared\": policyv2.Owners{usernameOwner(\"user1@\")},\n\t\t\t\t},\n\t\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t\t// user1 self-access\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{usernamep(\"user1@\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(usernamep(\"user1@\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t// user2 self-access\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{usernamep(\"user2@\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(usernamep(\"user2@\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t// user2 can access tag:shared\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{usernamep(\"user2@\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(tagp(\"tag:shared\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t// tag:shared can respond to user2 (return path)\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{tagp(\"tag:shared\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(usernamep(\"user2@\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tspec: ScenarioSpec{\n\t\t\t\tNodesPerUser: 1,\n\t\t\t\tUsers:        []string{\"user1\", \"user2\"},\n\t\t\t},\n\t\t\tsetup: func(t *testing.T, scenario *Scenario, headscale ControlServer) (TailscaleClient, TailscaleClient, uint64) {\n\t\t\t\tt.Helper()\n\n\t\t\t\tuser1Clients, err := scenario.ListTailscaleClients(\"user1\")\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tuser2Clients, err := scenario.ListTailscaleClients(\"user2\")\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tnodes, err := headscale.ListNodes(\"user1\")\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\treturn user2Clients[0], user1Clients[0], nodes[0].GetId()\n\t\t\t},\n\t\t\tinitialAccess: false,                  // user2 cannot access user1 (no tag)\n\t\t\ttagChange:     []string{\"tag:shared\"}, // add tag:shared\n\t\t\tfinalAccess:   true,                   // user2 can now access user1\n\t\t},\n\t\t{\n\t\t\tname: \"remove-tag-revokes-access\",\n\t\t\tpolicy: &policyv2.Policy{\n\t\t\t\tTagOwners: policyv2.TagOwners{\n\t\t\t\t\t\"tag:shared\": policyv2.Owners{usernameOwner(\"user1@\")},\n\t\t\t\t\t\"tag:other\":  policyv2.Owners{usernameOwner(\"user1@\")},\n\t\t\t\t},\n\t\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t\t// user2 self-access\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{usernamep(\"user2@\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(usernamep(\"user2@\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t// user2 can access tag:shared only\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{usernamep(\"user2@\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(tagp(\"tag:shared\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{tagp(\"tag:shared\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(usernamep(\"user2@\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tspec: ScenarioSpec{\n\t\t\t\tNodesPerUser: 0, // manual creation for tagged node\n\t\t\t\tUsers:        []string{\"user1\", \"user2\"},\n\t\t\t},\n\t\t\tsetup: func(t *testing.T, scenario *Scenario, headscale ControlServer) (TailscaleClient, TailscaleClient, uint64) {\n\t\t\t\tt.Helper()\n\n\t\t\t\tuserMap, err := headscale.MapUsers()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t// Create user1's node WITH tag:shared via PreAuthKey\n\t\t\t\ttaggedKey, err := scenario.CreatePreAuthKeyWithTags(\n\t\t\t\t\tuserMap[\"user1\"].GetId(), false, false, []string{\"tag:shared\"},\n\t\t\t\t)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tuser1Node, err := scenario.CreateTailscaleNode(\n\t\t\t\t\t\"head\",\n\t\t\t\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t\t\t\t\ttsic.WithPackages(\"curl\"),\n\t\t\t\t\ttsic.WithWebserver(80),\n\t\t\t\t\ttsic.WithDockerWorkdir(\"/\"),\n\t\t\t\t\ttsic.WithNetfilter(\"off\"),\n\t\t\t\t)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\terr = user1Node.Login(headscale.GetEndpoint(), taggedKey.GetKey())\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t// Create user2's node (untagged)\n\t\t\t\tuntaggedKey, err := scenario.CreatePreAuthKey(userMap[\"user2\"].GetId(), false, false)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tuser2Node, err := scenario.CreateTailscaleNode(\n\t\t\t\t\t\"head\",\n\t\t\t\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t\t\t\t\ttsic.WithPackages(\"curl\"),\n\t\t\t\t\ttsic.WithWebserver(80),\n\t\t\t\t\ttsic.WithDockerWorkdir(\"/\"),\n\t\t\t\t\ttsic.WithNetfilter(\"off\"),\n\t\t\t\t)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\terr = user2Node.Login(headscale.GetEndpoint(), untaggedKey.GetKey())\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\terr = scenario.WaitForTailscaleSync()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t// Tagged nodes have no user_id, so list all and find by tag.\n\t\t\t\tallNodes, err := headscale.ListNodes()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\ttagged := findNode(allNodes, func(n *v1.Node) bool { return len(n.GetTags()) > 0 })\n\t\t\t\trequire.NotNil(t, tagged, \"expected a tagged node\")\n\n\t\t\t\treturn user2Node, user1Node, tagged.GetId()\n\t\t\t},\n\t\t\tinitialAccess: true,                  // user2 can access user1 (has tag:shared)\n\t\t\ttagChange:     []string{\"tag:other\"}, // replace with tag:other\n\t\t\tfinalAccess:   false,                 // user2 cannot access (no ACL for tag:other)\n\t\t},\n\t\t{\n\t\t\tname: \"change-tag-changes-access\",\n\t\t\tpolicy: &policyv2.Policy{\n\t\t\t\tTagOwners: policyv2.TagOwners{\n\t\t\t\t\t\"tag:team-a\": policyv2.Owners{usernameOwner(\"user1@\")},\n\t\t\t\t\t\"tag:team-b\": policyv2.Owners{usernameOwner(\"user1@\")},\n\t\t\t\t},\n\t\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t\t// user2 self-access\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{usernamep(\"user2@\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(usernamep(\"user2@\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t// user2 can access tag:team-b only (NOT tag:team-a)\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{usernamep(\"user2@\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(tagp(\"tag:team-b\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{tagp(\"tag:team-b\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(usernamep(\"user2@\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tspec: ScenarioSpec{\n\t\t\t\tNodesPerUser: 0,\n\t\t\t\tUsers:        []string{\"user1\", \"user2\"},\n\t\t\t},\n\t\t\tsetup: func(t *testing.T, scenario *Scenario, headscale ControlServer) (TailscaleClient, TailscaleClient, uint64) {\n\t\t\t\tt.Helper()\n\n\t\t\t\tuserMap, err := headscale.MapUsers()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t// Create user1's node with tag:team-a (user2 has NO ACL for this)\n\t\t\t\ttaggedKey, err := scenario.CreatePreAuthKeyWithTags(\n\t\t\t\t\tuserMap[\"user1\"].GetId(), false, false, []string{\"tag:team-a\"},\n\t\t\t\t)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tuser1Node, err := scenario.CreateTailscaleNode(\n\t\t\t\t\t\"head\",\n\t\t\t\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t\t\t\t\ttsic.WithPackages(\"curl\"),\n\t\t\t\t\ttsic.WithWebserver(80),\n\t\t\t\t\ttsic.WithDockerWorkdir(\"/\"),\n\t\t\t\t\ttsic.WithNetfilter(\"off\"),\n\t\t\t\t)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\terr = user1Node.Login(headscale.GetEndpoint(), taggedKey.GetKey())\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t// Create user2's node\n\t\t\t\tuntaggedKey, err := scenario.CreatePreAuthKey(userMap[\"user2\"].GetId(), false, false)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tuser2Node, err := scenario.CreateTailscaleNode(\n\t\t\t\t\t\"head\",\n\t\t\t\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t\t\t\t\ttsic.WithPackages(\"curl\"),\n\t\t\t\t\ttsic.WithWebserver(80),\n\t\t\t\t\ttsic.WithDockerWorkdir(\"/\"),\n\t\t\t\t\ttsic.WithNetfilter(\"off\"),\n\t\t\t\t)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\terr = user2Node.Login(headscale.GetEndpoint(), untaggedKey.GetKey())\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\terr = scenario.WaitForTailscaleSync()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t// Tagged nodes have no user_id, so list all and find by tag.\n\t\t\t\tallNodes, err := headscale.ListNodes()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\ttagged := findNode(allNodes, func(n *v1.Node) bool { return len(n.GetTags()) > 0 })\n\t\t\t\trequire.NotNil(t, tagged, \"expected a tagged node\")\n\n\t\t\t\treturn user2Node, user1Node, tagged.GetId()\n\t\t\t},\n\t\t\tinitialAccess: false,                  // user2 cannot access (tag:team-a not in ACL)\n\t\t\ttagChange:     []string{\"tag:team-b\"}, // change to tag:team-b\n\t\t\tfinalAccess:   true,                   // user2 can now access (tag:team-b in ACL)\n\t\t},\n\t\t{\n\t\t\tname: \"multiple-tags-partial-removal\",\n\t\t\tpolicy: &policyv2.Policy{\n\t\t\t\tTagOwners: policyv2.TagOwners{\n\t\t\t\t\t\"tag:web\":      policyv2.Owners{usernameOwner(\"user1@\")},\n\t\t\t\t\t\"tag:internal\": policyv2.Owners{usernameOwner(\"user1@\")},\n\t\t\t\t},\n\t\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t\t// user2 self-access\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{usernamep(\"user2@\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(usernamep(\"user2@\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t// user2 can access tag:web\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{usernamep(\"user2@\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(tagp(\"tag:web\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{tagp(\"tag:web\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(usernamep(\"user2@\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tspec: ScenarioSpec{\n\t\t\t\tNodesPerUser: 0,\n\t\t\t\tUsers:        []string{\"user1\", \"user2\"},\n\t\t\t},\n\t\t\tsetup: func(t *testing.T, scenario *Scenario, headscale ControlServer) (TailscaleClient, TailscaleClient, uint64) {\n\t\t\t\tt.Helper()\n\n\t\t\t\tuserMap, err := headscale.MapUsers()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t// Create user1's node with BOTH tags\n\t\t\t\ttaggedKey, err := scenario.CreatePreAuthKeyWithTags(\n\t\t\t\t\tuserMap[\"user1\"].GetId(), false, false, []string{\"tag:web\", \"tag:internal\"},\n\t\t\t\t)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tuser1Node, err := scenario.CreateTailscaleNode(\n\t\t\t\t\t\"head\",\n\t\t\t\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t\t\t\t\ttsic.WithPackages(\"curl\"),\n\t\t\t\t\ttsic.WithWebserver(80),\n\t\t\t\t\ttsic.WithDockerWorkdir(\"/\"),\n\t\t\t\t\ttsic.WithNetfilter(\"off\"),\n\t\t\t\t)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\terr = user1Node.Login(headscale.GetEndpoint(), taggedKey.GetKey())\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t// Create user2's node\n\t\t\t\tuntaggedKey, err := scenario.CreatePreAuthKey(userMap[\"user2\"].GetId(), false, false)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tuser2Node, err := scenario.CreateTailscaleNode(\n\t\t\t\t\t\"head\",\n\t\t\t\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t\t\t\t\ttsic.WithPackages(\"curl\"),\n\t\t\t\t\ttsic.WithWebserver(80),\n\t\t\t\t\ttsic.WithDockerWorkdir(\"/\"),\n\t\t\t\t\ttsic.WithNetfilter(\"off\"),\n\t\t\t\t)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\terr = user2Node.Login(headscale.GetEndpoint(), untaggedKey.GetKey())\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\terr = scenario.WaitForTailscaleSync()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t// Tagged nodes have no user_id, so list all and find by tag.\n\t\t\t\tallNodes, err := headscale.ListNodes()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\ttagged := findNode(allNodes, func(n *v1.Node) bool { return len(n.GetTags()) > 0 })\n\t\t\t\trequire.NotNil(t, tagged, \"expected a tagged node\")\n\n\t\t\t\treturn user2Node, user1Node, tagged.GetId()\n\t\t\t},\n\t\t\tinitialAccess: true,                     // user2 can access (has tag:web)\n\t\t\ttagChange:     []string{\"tag:internal\"}, // remove tag:web, keep tag:internal\n\t\t\tfinalAccess:   false,                    // user2 cannot access (no ACL for tag:internal)\n\t\t},\n\t\t{\n\t\t\tname: \"tag-change-updates-peer-identity\",\n\t\t\tpolicy: &policyv2.Policy{\n\t\t\t\tTagOwners: policyv2.TagOwners{\n\t\t\t\t\t\"tag:server\": policyv2.Owners{usernameOwner(\"user1@\")},\n\t\t\t\t},\n\t\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{usernamep(\"user2@\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(usernamep(\"user2@\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{usernamep(\"user2@\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(tagp(\"tag:server\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{tagp(\"tag:server\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(usernamep(\"user2@\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tspec: ScenarioSpec{\n\t\t\t\tNodesPerUser: 1,\n\t\t\t\tUsers:        []string{\"user1\", \"user2\"},\n\t\t\t},\n\t\t\tsetup: func(t *testing.T, scenario *Scenario, headscale ControlServer) (TailscaleClient, TailscaleClient, uint64) {\n\t\t\t\tt.Helper()\n\n\t\t\t\tuser1Clients, err := scenario.ListTailscaleClients(\"user1\")\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tuser2Clients, err := scenario.ListTailscaleClients(\"user2\")\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tnodes, err := headscale.ListNodes(\"user1\")\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\treturn user2Clients[0], user1Clients[0], nodes[0].GetId()\n\t\t\t},\n\t\t\tinitialAccess: false,                  // user2 cannot access user1 (no tag yet)\n\t\t\ttagChange:     []string{\"tag:server\"}, // assign tag:server\n\t\t\tfinalAccess:   true,                   // user2 can now access via tag:server\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tscenario, err := NewScenario(tt.spec)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\t\t\terr = scenario.CreateHeadscaleEnv(\n\t\t\t\t[]tsic.Option{\n\t\t\t\t\ttsic.WithNetfilter(\"off\"),\n\t\t\t\t\ttsic.WithPackages(\"curl\"),\n\t\t\t\t\ttsic.WithWebserver(80),\n\t\t\t\t\ttsic.WithDockerWorkdir(\"/\"),\n\t\t\t\t},\n\t\t\t\thsic.WithACLPolicy(tt.policy),\n\t\t\t\thsic.WithTestName(\"acl-tag-\"+tt.name),\n\t\t\t)\n\t\t\trequire.NoError(t, err)\n\n\t\t\theadscale, err := scenario.Headscale()\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Run test-specific setup\n\t\t\tsourceClient, targetClient, targetNodeID := tt.setup(t, scenario, headscale)\n\n\t\t\ttargetFQDN, err := targetClient.FQDN()\n\t\t\trequire.NoError(t, err)\n\n\t\t\ttargetURL := fmt.Sprintf(\"http://%s/etc/hostname\", targetFQDN)\n\n\t\t\t// Step 1: Verify initial access state\n\t\t\tt.Logf(\"Step 1: Verifying initial access (expect success=%v)\", tt.initialAccess)\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := sourceClient.Curl(targetURL)\n\t\t\t\tif tt.initialAccess {\n\t\t\t\t\tassert.NoError(c, err, \"Initial access should succeed\")\n\t\t\t\t\tassert.NotEmpty(c, result, \"Initial access should return content\")\n\t\t\t\t} else {\n\t\t\t\t\tassert.Error(c, err, \"Initial access should fail\")\n\t\t\t\t}\n\t\t\t}, 30*time.Second, 500*time.Millisecond, \"verifying initial access state\")\n\n\t\t\t// Step 1b: Verify initial NetMap visibility\n\t\t\tt.Logf(\"Step 1b: Verifying initial NetMap visibility (expect visible=%v)\", tt.initialAccess)\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tstatus, err := sourceClient.Status()\n\t\t\t\tassert.NoError(c, err)\n\n\t\t\t\ttargetHostname := targetClient.Hostname()\n\t\t\t\tfound := false\n\n\t\t\t\tfor _, peer := range status.Peer {\n\t\t\t\t\tif strings.Contains(peer.HostName, targetHostname) {\n\t\t\t\t\t\tfound = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif tt.initialAccess {\n\t\t\t\t\tassert.True(c, found, \"Target should be visible in NetMap initially\")\n\t\t\t\t} else {\n\t\t\t\t\tassert.False(c, found, \"Target should NOT be visible in NetMap initially\")\n\t\t\t\t}\n\t\t\t}, 30*time.Second, 500*time.Millisecond, \"verifying initial NetMap visibility\")\n\n\t\t\t// Step 2: Apply tag change\n\t\t\tt.Logf(\"Step 2: Setting tags on node %d to %v\", targetNodeID, tt.tagChange)\n\t\t\terr = headscale.SetNodeTags(targetNodeID, tt.tagChange)\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Verify tag was applied\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tallNodes, err := headscale.ListNodes()\n\t\t\t\tassert.NoError(c, err)\n\n\t\t\t\tnode := findNode(allNodes, func(n *v1.Node) bool { return n.GetId() == targetNodeID })\n\t\t\t\tassert.NotNil(c, node, \"Node should still exist\")\n\n\t\t\t\tif node != nil {\n\t\t\t\t\tassert.ElementsMatch(c, tt.tagChange, node.GetTags(), \"Tags should be updated\")\n\t\t\t\t}\n\t\t\t}, 10*time.Second, 500*time.Millisecond, \"verifying tag change applied\")\n\n\t\t\t// Step 3: Verify final access state (this is the key test for #2389)\n\t\t\tt.Logf(\"Step 3: Verifying final access after tag change (expect success=%v)\", tt.finalAccess)\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tresult, err := sourceClient.Curl(targetURL)\n\t\t\t\tif tt.finalAccess {\n\t\t\t\t\tassert.NoError(c, err, \"Final access should succeed after tag change\")\n\t\t\t\t\tassert.NotEmpty(c, result, \"Final access should return content\")\n\t\t\t\t} else {\n\t\t\t\t\tassert.Error(c, err, \"Final access should fail after tag change\")\n\t\t\t\t}\n\t\t\t}, 30*time.Second, 500*time.Millisecond, \"verifying access propagated after tag change\")\n\n\t\t\t// Step 3b: Verify final NetMap visibility\n\t\t\tt.Logf(\"Step 3b: Verifying final NetMap visibility (expect visible=%v)\", tt.finalAccess)\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tstatus, err := sourceClient.Status()\n\t\t\t\tassert.NoError(c, err)\n\n\t\t\t\ttargetHostname := targetClient.Hostname()\n\t\t\t\tfound := false\n\n\t\t\t\tfor _, peer := range status.Peer {\n\t\t\t\t\tif strings.Contains(peer.HostName, targetHostname) {\n\t\t\t\t\t\tfound = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif tt.finalAccess {\n\t\t\t\t\tassert.True(c, found, \"Target should be visible in NetMap after tag change\")\n\t\t\t\t} else {\n\t\t\t\t\tassert.False(c, found, \"Target should NOT be visible in NetMap after tag change\")\n\t\t\t\t}\n\t\t\t}, 60*time.Second, 500*time.Millisecond, \"verifying NetMap visibility propagated after tag change\")\n\n\t\t\tt.Logf(\"Test %s PASSED: Tag change propagated correctly\", tt.name)\n\t\t})\n\t}\n}\n\n// TestACLTagPropagationPortSpecific validates that tag changes correctly update\n// port-specific ACLs. When a tag change restricts access to specific ports,\n// the peer should remain visible but only the allowed ports should be accessible.\nfunc TestACLTagPropagationPortSpecific(t *testing.T) {\n\tIntegrationSkip(t)\n\n\t// Policy: tag:webserver allows port 80, tag:sshonly allows port 22\n\t// When we change from tag:webserver to tag:sshonly, HTTP should fail but ping should still work\n\tpolicy := &policyv2.Policy{\n\t\tTagOwners: policyv2.TagOwners{\n\t\t\t\"tag:webserver\": policyv2.Owners{usernameOwner(\"user1@\")},\n\t\t\t\"tag:sshonly\":   policyv2.Owners{usernameOwner(\"user1@\")},\n\t\t},\n\t\tACLs: []policyv2.ACL{\n\t\t\t{\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []policyv2.Alias{usernamep(\"user2@\")},\n\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\taliasWithPorts(usernamep(\"user2@\"), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t\t// user2 can access tag:webserver on port 80 only\n\t\t\t{\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []policyv2.Alias{usernamep(\"user2@\")},\n\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\taliasWithPorts(tagp(\"tag:webserver\"), tailcfg.PortRange{First: 80, Last: 80}),\n\t\t\t\t},\n\t\t\t},\n\t\t\t// user2 can access tag:sshonly on port 22 only\n\t\t\t{\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []policyv2.Alias{usernamep(\"user2@\")},\n\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\taliasWithPorts(tagp(\"tag:sshonly\"), tailcfg.PortRange{First: 22, Last: 22}),\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Allow ICMP for ping tests\n\t\t\t{\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []policyv2.Alias{usernamep(\"user2@\")},\n\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\taliasWithPorts(tagp(\"tag:webserver\"), tailcfg.PortRangeAny),\n\t\t\t\t\taliasWithPorts(tagp(\"tag:sshonly\"), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t\tProtocol: \"icmp\",\n\t\t\t},\n\t\t\t// Return path\n\t\t\t{\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []policyv2.Alias{tagp(\"tag:webserver\"), tagp(\"tag:sshonly\")},\n\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\taliasWithPorts(usernamep(\"user2@\"), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\trequire.NoError(t, err)\n\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{\n\t\t\ttsic.WithNetfilter(\"off\"),\n\t\t\ttsic.WithPackages(\"curl\"),\n\t\t\ttsic.WithWebserver(80),\n\t\t\ttsic.WithDockerWorkdir(\"/\"),\n\t\t},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"acl-tag-port-specific\"),\n\t)\n\trequire.NoError(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\tuserMap, err := headscale.MapUsers()\n\trequire.NoError(t, err)\n\n\t// Create user1's node WITH tag:webserver\n\ttaggedKey, err := scenario.CreatePreAuthKeyWithTags(\n\t\tuserMap[\"user1\"].GetId(), false, false, []string{\"tag:webserver\"},\n\t)\n\trequire.NoError(t, err)\n\n\tuser1Node, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t\ttsic.WithPackages(\"curl\"),\n\t\ttsic.WithWebserver(80),\n\t\ttsic.WithDockerWorkdir(\"/\"),\n\t\ttsic.WithNetfilter(\"off\"),\n\t)\n\trequire.NoError(t, err)\n\n\terr = user1Node.Login(headscale.GetEndpoint(), taggedKey.GetKey())\n\trequire.NoError(t, err)\n\n\t// Create user2's node\n\tuntaggedKey, err := scenario.CreatePreAuthKey(userMap[\"user2\"].GetId(), false, false)\n\trequire.NoError(t, err)\n\n\tuser2Node, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t\ttsic.WithPackages(\"curl\"),\n\t\ttsic.WithDockerWorkdir(\"/\"),\n\t\ttsic.WithNetfilter(\"off\"),\n\t)\n\trequire.NoError(t, err)\n\n\terr = user2Node.Login(headscale.GetEndpoint(), untaggedKey.GetKey())\n\trequire.NoError(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequire.NoError(t, err)\n\n\t// Tagged nodes have no user_id, so list all and find by tag.\n\tallNodes, err := headscale.ListNodes()\n\trequire.NoError(t, err)\n\n\ttagged := findNode(allNodes, func(n *v1.Node) bool { return len(n.GetTags()) > 0 })\n\trequire.NotNil(t, tagged, \"expected a tagged node\")\n\n\ttargetNodeID := tagged.GetId()\n\n\ttargetFQDN, err := user1Node.FQDN()\n\trequire.NoError(t, err)\n\n\ttargetURL := fmt.Sprintf(\"http://%s/etc/hostname\", targetFQDN)\n\n\t// Step 1: Verify initial state - HTTP on port 80 should work with tag:webserver\n\tt.Log(\"Step 1: Verifying HTTP access with tag:webserver (should succeed)\")\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tresult, err := user2Node.Curl(targetURL)\n\t\tassert.NoError(c, err, \"HTTP should work with tag:webserver\")\n\t\tassert.NotEmpty(c, result)\n\t}, 30*time.Second, 500*time.Millisecond, \"initial HTTP access with tag:webserver\")\n\n\t// Step 2: Change tag from webserver to sshonly\n\tt.Logf(\"Step 2: Changing tag from webserver to sshonly on node %d\", targetNodeID)\n\terr = headscale.SetNodeTags(targetNodeID, []string{\"tag:sshonly\"})\n\trequire.NoError(t, err)\n\n\t// Step 3: Verify peer is still visible in NetMap (partial access, not full removal)\n\tt.Log(\"Step 3: Verifying peer remains visible in NetMap after tag change\")\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tstatus, err := user2Node.Status()\n\t\tassert.NoError(c, err)\n\n\t\ttargetHostname := user1Node.Hostname()\n\t\tfound := false\n\n\t\tfor _, peer := range status.Peer {\n\t\t\tif strings.Contains(peer.HostName, targetHostname) {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tassert.True(c, found, \"Peer should still be visible with tag:sshonly (port 22 access)\")\n\t}, 60*time.Second, 500*time.Millisecond, \"peer visibility after tag change\")\n\n\t// Step 4: Verify HTTP on port 80 now fails (tag:sshonly only allows port 22)\n\tt.Log(\"Step 4: Verifying HTTP access is now blocked (tag:sshonly only allows port 22)\")\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t_, err := user2Node.Curl(targetURL)\n\t\tassert.Error(c, err, \"HTTP should fail with tag:sshonly (only port 22 allowed)\")\n\t}, 60*time.Second, 500*time.Millisecond, \"HTTP blocked after tag change to sshonly\")\n\n\tt.Log(\"Test PASSED: Port-specific ACL changes propagated correctly\")\n}\n\n// TestACLGroupWithUnknownUser tests issue #2967 where a group containing\n// a reference to a non-existent user should not break connectivity for\n// valid users in the same group. The expected behavior is that unknown\n// users are silently ignored during group resolution.\nfunc TestACLGroupWithUnknownUser(t *testing.T) {\n\tIntegrationSkip(t)\n\n\t// This test verifies that when a group contains a reference to a\n\t// non-existent user (e.g., \"nonexistent@\"), the valid users in\n\t// the group should still be able to connect to each other.\n\t//\n\t// Issue: https://github.com/juanfont/headscale/issues/2967\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 1,\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\t// Create a policy with a group that includes a non-existent user\n\t// alongside valid users. The group should still work for valid users.\n\tpolicy := &policyv2.Policy{\n\t\tGroups: policyv2.Groups{\n\t\t\t// This group contains a reference to \"nonexistent@\" which does not exist\n\t\t\tpolicyv2.Group(\"group:test\"): []policyv2.Username{\n\t\t\t\tpolicyv2.Username(\"user1@\"),\n\t\t\t\tpolicyv2.Username(\"user2@\"),\n\t\t\t\tpolicyv2.Username(\"nonexistent@\"), // This user does not exist\n\t\t\t},\n\t\t},\n\t\tACLs: []policyv2.ACL{\n\t\t\t{\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []policyv2.Alias{groupp(\"group:test\")},\n\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\taliasWithPorts(groupp(\"group:test\"), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{\n\t\t\ttsic.WithNetfilter(\"off\"),\n\t\t\ttsic.WithPackages(\"curl\"),\n\t\t\ttsic.WithWebserver(80),\n\t\t\ttsic.WithDockerWorkdir(\"/\"),\n\t\t},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"acl-unknown-user\"),\n\t)\n\trequire.NoError(t, err)\n\n\t_, err = scenario.ListTailscaleClientsFQDNs()\n\trequire.NoError(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequire.NoError(t, err)\n\n\tuser1Clients, err := scenario.ListTailscaleClients(\"user1\")\n\trequire.NoError(t, err)\n\trequire.Len(t, user1Clients, 1)\n\n\tuser2Clients, err := scenario.ListTailscaleClients(\"user2\")\n\trequire.NoError(t, err)\n\trequire.Len(t, user2Clients, 1)\n\n\tuser1 := user1Clients[0]\n\tuser2 := user2Clients[0]\n\n\t// Get FQDNs for connectivity test\n\tuser1FQDN, err := user1.FQDN()\n\trequire.NoError(t, err)\n\tuser2FQDN, err := user2.FQDN()\n\trequire.NoError(t, err)\n\n\t// Test that user1 can reach user2 (valid users should be able to communicate)\n\t// This is the key assertion for issue #2967: valid users should work\n\t// even if the group contains references to non-existent users.\n\tt.Log(\"Testing connectivity: user1 -> user2 (should succeed despite unknown user in group)\")\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", user2FQDN)\n\t\tresult, err := user1.Curl(url)\n\t\tassert.NoError(c, err, \"user1 should be able to reach user2\")\n\t\tassert.Len(c, result, 13, \"expected hostname response\")\n\t}, 30*time.Second, 500*time.Millisecond, \"user1 should reach user2\")\n\n\t// Test that user2 can reach user1 (bidirectional)\n\tt.Log(\"Testing connectivity: user2 -> user1 (should succeed despite unknown user in group)\")\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", user1FQDN)\n\t\tresult, err := user2.Curl(url)\n\t\tassert.NoError(c, err, \"user2 should be able to reach user1\")\n\t\tassert.Len(c, result, 13, \"expected hostname response\")\n\t}, 30*time.Second, 500*time.Millisecond, \"user2 should reach user1\")\n\n\tt.Log(\"Test PASSED: Valid users can communicate despite unknown user reference in group\")\n}\n\n// TestACLGroupAfterUserDeletion tests issue #2967 scenario where a user\n// is deleted but their reference remains in an ACL group. The remaining\n// valid users should still be able to communicate.\nfunc TestACLGroupAfterUserDeletion(t *testing.T) {\n\tIntegrationSkip(t)\n\n\t// This test verifies that when a user is deleted from headscale but\n\t// their reference remains in an ACL group, the remaining valid users\n\t// in the group should still be able to connect to each other.\n\t//\n\t// Issue: https://github.com/juanfont/headscale/issues/2967\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 1,\n\t\tUsers:        []string{\"user1\", \"user2\", \"user3\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\t// Create a policy with a group containing all three users\n\tpolicy := &policyv2.Policy{\n\t\tGroups: policyv2.Groups{\n\t\t\tpolicyv2.Group(\"group:all\"): []policyv2.Username{\n\t\t\t\tpolicyv2.Username(\"user1@\"),\n\t\t\t\tpolicyv2.Username(\"user2@\"),\n\t\t\t\tpolicyv2.Username(\"user3@\"),\n\t\t\t},\n\t\t},\n\t\tACLs: []policyv2.ACL{\n\t\t\t{\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []policyv2.Alias{groupp(\"group:all\")},\n\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\taliasWithPorts(groupp(\"group:all\"), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{\n\t\t\ttsic.WithNetfilter(\"off\"),\n\t\t\ttsic.WithPackages(\"curl\"),\n\t\t\ttsic.WithWebserver(80),\n\t\t\ttsic.WithDockerWorkdir(\"/\"),\n\t\t},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"acl-deleted-user\"),\n\t\thsic.WithPolicyMode(types.PolicyModeDB), // Use DB mode so policy persists after user deletion\n\t)\n\trequire.NoError(t, err)\n\n\t_, err = scenario.ListTailscaleClientsFQDNs()\n\trequire.NoError(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequire.NoError(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\tuser1Clients, err := scenario.ListTailscaleClients(\"user1\")\n\trequire.NoError(t, err)\n\trequire.Len(t, user1Clients, 1)\n\n\tuser2Clients, err := scenario.ListTailscaleClients(\"user2\")\n\trequire.NoError(t, err)\n\trequire.Len(t, user2Clients, 1)\n\n\tuser3Clients, err := scenario.ListTailscaleClients(\"user3\")\n\trequire.NoError(t, err)\n\trequire.Len(t, user3Clients, 1)\n\n\tuser1 := user1Clients[0]\n\tuser2 := user2Clients[0]\n\n\t// Get FQDNs for connectivity test\n\tuser1FQDN, err := user1.FQDN()\n\trequire.NoError(t, err)\n\tuser2FQDN, err := user2.FQDN()\n\trequire.NoError(t, err)\n\n\t// Step 1: Verify initial connectivity - all users can reach each other\n\tt.Log(\"Step 1: Verifying initial connectivity between all users\")\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", user2FQDN)\n\t\tresult, err := user1.Curl(url)\n\t\tassert.NoError(c, err, \"user1 should be able to reach user2 initially\")\n\t\tassert.Len(c, result, 13, \"expected hostname response\")\n\t}, 30*time.Second, 500*time.Millisecond, \"initial user1 -> user2 connectivity\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", user1FQDN)\n\t\tresult, err := user2.Curl(url)\n\t\tassert.NoError(c, err, \"user2 should be able to reach user1 initially\")\n\t\tassert.Len(c, result, 13, \"expected hostname response\")\n\t}, 30*time.Second, 500*time.Millisecond, \"initial user2 -> user1 connectivity\")\n\n\t// Step 2: Get user3's node and user, then delete them\n\tt.Log(\"Step 2: Deleting user3's node and user from headscale\")\n\n\t// First, get user3's node ID\n\tnodes, err := headscale.ListNodes(\"user3\")\n\trequire.NoError(t, err)\n\trequire.Len(t, nodes, 1, \"user3 should have exactly one node\")\n\tuser3NodeID := nodes[0].GetId()\n\n\t// Delete user3's node first (required before deleting the user)\n\terr = headscale.DeleteNode(user3NodeID)\n\trequire.NoError(t, err, \"failed to delete user3's node\")\n\n\t// Now get user3's user ID and delete the user\n\tuser3, err := GetUserByName(headscale, \"user3\")\n\trequire.NoError(t, err, \"user3 should exist\")\n\n\t// Now delete user3 (after their nodes are deleted)\n\terr = headscale.DeleteUser(user3.GetId())\n\trequire.NoError(t, err)\n\n\t// Verify user3 is deleted\n\t_, err = GetUserByName(headscale, \"user3\")\n\trequire.Error(t, err, \"user3 should be deleted\")\n\n\t// Step 3: Verify that user1 and user2 can still communicate (before triggering policy refresh)\n\t// The policy still references \"user3@\" in the group, but since user3 is deleted,\n\t// connectivity may still work due to cached/stale policy state.\n\tt.Log(\"Step 3: Verifying connectivity still works immediately after user3 deletion (stale cache)\")\n\n\t// Test that user1 can still reach user2\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", user2FQDN)\n\t\tresult, err := user1.Curl(url)\n\t\tassert.NoError(c, err, \"user1 should still be able to reach user2 after user3 deletion (stale cache)\")\n\t\tassert.Len(c, result, 13, \"expected hostname response\")\n\t}, 60*time.Second, 500*time.Millisecond, \"user1 -> user2 after user3 deletion\")\n\n\t// Step 4: Create a NEW user - this triggers updatePolicyManagerUsers() which\n\t// re-evaluates the policy. According to issue #2967, this is when the bug manifests:\n\t// the deleted user3@ in the group causes the entire group to fail resolution.\n\tt.Log(\"Step 4: Creating a new user (user4) to trigger policy re-evaluation\")\n\n\t_, err = headscale.CreateUser(\"user4\")\n\trequire.NoError(t, err, \"failed to create user4\")\n\n\t// Verify user4 was created\n\t_, err = GetUserByName(headscale, \"user4\")\n\trequire.NoError(t, err, \"user4 should exist after creation\")\n\n\t// Step 5: THIS IS THE CRITICAL TEST - verify connectivity STILL works after\n\t// creating a new user. Without the fix, the group containing the deleted user3@\n\t// would fail to resolve, breaking connectivity for user1 and user2.\n\tt.Log(\"Step 5: Verifying connectivity AFTER creating new user (this triggers the bug)\")\n\n\t// Test that user1 can still reach user2 AFTER the policy refresh triggered by user creation\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", user2FQDN)\n\t\tresult, err := user1.Curl(url)\n\t\tassert.NoError(c, err, \"user1 should still reach user2 after policy refresh (BUG if this fails)\")\n\t\tassert.Len(c, result, 13, \"expected hostname response\")\n\t}, 60*time.Second, 500*time.Millisecond, \"user1 -> user2 after policy refresh (issue #2967)\")\n\n\t// Test that user2 can still reach user1\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", user1FQDN)\n\t\tresult, err := user2.Curl(url)\n\t\tassert.NoError(c, err, \"user2 should still reach user1 after policy refresh (BUG if this fails)\")\n\t\tassert.Len(c, result, 13, \"expected hostname response\")\n\t}, 60*time.Second, 500*time.Millisecond, \"user2 -> user1 after policy refresh (issue #2967)\")\n\n\tt.Log(\"Test PASSED: Remaining users can communicate after deleted user and policy refresh\")\n}\n\n// TestACLGroupDeletionExactReproduction reproduces issue #2967 exactly as reported:\n// The reporter had ACTIVE pinging between nodes while making changes.\n// The bug is that deleting a user and then creating a new user causes\n// connectivity to break for remaining users in the group.\n//\n// Key difference from other tests: We keep multiple nodes ACTIVE and pinging\n// each other throughout the test, just like the reporter's scenario.\n//\n// Reporter's steps (v0.28.0-beta.1):\n// 1. Start pinging between nodes\n// 2. Create policy with group:admin = [user1@]\n// 3. Create users \"deleteable\" and \"existinguser\"\n// 4. Add deleteable@ to ACL: Pinging continues\n// 5. Delete deleteable: Pinging continues\n// 6. Add existinguser@ to ACL: Pinging continues\n// 7. Create new user \"anotheruser\": Pinging continues\n// 8. Add anotherinvaliduser@ to ACL: Pinging stops.\nfunc TestACLGroupDeletionExactReproduction(t *testing.T) {\n\tIntegrationSkip(t)\n\n\t// Issue: https://github.com/juanfont/headscale/issues/2967\n\n\tconst userToDelete = \"user2\"\n\n\t// We need 3 users with active nodes to properly test this:\n\t// - user1: will remain throughout (like \"ritty\" in the issue)\n\t// - user2: will be deleted (like \"deleteable\" in the issue)\n\t// - user3: will remain and should still be able to ping user1 after user2 deletion\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 1,\n\t\tUsers:        []string{\"user1\", userToDelete, \"user3\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\trequire.NoError(t, err)\n\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\t// Initial policy: all three users in group, can communicate with each other\n\tinitialPolicy := &policyv2.Policy{\n\t\tGroups: policyv2.Groups{\n\t\t\tpolicyv2.Group(\"group:admin\"): []policyv2.Username{\n\t\t\t\tpolicyv2.Username(\"user1@\"),\n\t\t\t\tpolicyv2.Username(userToDelete + \"@\"),\n\t\t\t\tpolicyv2.Username(\"user3@\"),\n\t\t\t},\n\t\t},\n\t\tACLs: []policyv2.ACL{\n\t\t\t{\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []policyv2.Alias{groupp(\"group:admin\")},\n\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t// Use *:* like the reporter's ACL\n\t\t\t\t\taliasWithPorts(wildcard(), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{\n\t\t\ttsic.WithNetfilter(\"off\"),\n\t\t\ttsic.WithPackages(\"curl\"),\n\t\t\ttsic.WithWebserver(80),\n\t\t\ttsic.WithDockerWorkdir(\"/\"),\n\t\t},\n\t\thsic.WithACLPolicy(initialPolicy),\n\t\thsic.WithTestName(\"acl-exact-repro\"),\n\t\thsic.WithPolicyMode(types.PolicyModeDB), // test updates policy at runtime via CLI\n\t)\n\trequire.NoError(t, err)\n\n\t_, err = scenario.ListTailscaleClientsFQDNs()\n\trequire.NoError(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequire.NoError(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\t// Get all clients\n\tuser1Clients, err := scenario.ListTailscaleClients(\"user1\")\n\trequire.NoError(t, err)\n\trequire.Len(t, user1Clients, 1)\n\tuser1 := user1Clients[0]\n\n\tuser3Clients, err := scenario.ListTailscaleClients(\"user3\")\n\trequire.NoError(t, err)\n\trequire.Len(t, user3Clients, 1)\n\tuser3 := user3Clients[0]\n\n\tuser1FQDN, err := user1.FQDN()\n\trequire.NoError(t, err)\n\tuser3FQDN, err := user3.FQDN()\n\trequire.NoError(t, err)\n\n\t// Step 1: Verify initial connectivity - user1 and user3 can ping each other\n\tt.Log(\"Step 1: Verifying initial connectivity (user1 <-> user3)\")\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", user3FQDN)\n\t\tresult, err := user1.Curl(url)\n\t\tassert.NoError(c, err, \"user1 should reach user3\")\n\t\tassert.Len(c, result, 13, \"expected hostname response\")\n\t}, 60*time.Second, 500*time.Millisecond, \"user1 -> user3\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", user1FQDN)\n\t\tresult, err := user3.Curl(url)\n\t\tassert.NoError(c, err, \"user3 should reach user1\")\n\t\tassert.Len(c, result, 13, \"expected hostname response\")\n\t}, 60*time.Second, 500*time.Millisecond, \"user3 -> user1\")\n\n\tt.Log(\"Step 1: PASSED - initial connectivity works\")\n\n\t// Step 2: Delete user2's node and user (like reporter deleting \"deleteable\")\n\t// The ACL still references user2@ but user2 no longer exists\n\tt.Log(\"Step 2: Deleting user2 (node + user) from database - ACL still references user2@\")\n\n\tnodes, err := headscale.ListNodes(userToDelete)\n\trequire.NoError(t, err)\n\trequire.Len(t, nodes, 1)\n\terr = headscale.DeleteNode(nodes[0].GetId())\n\trequire.NoError(t, err)\n\n\tuserToDeleteObj, err := GetUserByName(headscale, userToDelete)\n\trequire.NoError(t, err, \"user to delete should exist\")\n\n\terr = headscale.DeleteUser(userToDeleteObj.GetId())\n\trequire.NoError(t, err)\n\n\tt.Log(\"Step 2: DONE - user2 deleted, ACL still has user2@ reference\")\n\n\t// Step 3: Verify connectivity still works after user2 deletion\n\t// This tests the immediate effect of the fix - policy should be updated\n\tt.Log(\"Step 3: Verifying connectivity STILL works after user2 deletion\")\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", user3FQDN)\n\t\tresult, err := user1.Curl(url)\n\t\tassert.NoError(c, err, \"user1 should still reach user3 after user2 deletion\")\n\t\tassert.Len(c, result, 13, \"expected hostname response\")\n\t}, 60*time.Second, 500*time.Millisecond, \"user1 -> user3 after user2 deletion\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", user1FQDN)\n\t\tresult, err := user3.Curl(url)\n\t\tassert.NoError(c, err, \"user3 should still reach user1 after user2 deletion\")\n\t\tassert.Len(c, result, 13, \"expected hostname response\")\n\t}, 60*time.Second, 500*time.Millisecond, \"user3 -> user1 after user2 deletion\")\n\n\tt.Log(\"Step 3: PASSED - connectivity works after user2 deletion\")\n\n\t// Step 4: Create a NEW user - this triggers updatePolicyManagerUsers()\n\t// According to the reporter, this is when the bug manifests\n\tt.Log(\"Step 4: Creating new user (user4) - this triggers policy re-evaluation\")\n\n\t_, err = headscale.CreateUser(\"user4\")\n\trequire.NoError(t, err)\n\n\t// Step 5: THE CRITICAL TEST - verify connectivity STILL works\n\t// Without the fix: DeleteUser didn't update policy, so when CreateUser\n\t// triggers updatePolicyManagerUsers(), the stale user2@ is now unknown,\n\t// potentially breaking the group.\n\tt.Log(\"Step 5: Verifying connectivity AFTER creating new user (BUG trigger point)\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", user3FQDN)\n\t\tresult, err := user1.Curl(url)\n\t\tassert.NoError(c, err, \"BUG #2967: user1 should still reach user3 after user4 creation\")\n\t\tassert.Len(c, result, 13, \"expected hostname response\")\n\t}, 60*time.Second, 500*time.Millisecond, \"user1 -> user3 after user4 creation (issue #2967)\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", user1FQDN)\n\t\tresult, err := user3.Curl(url)\n\t\tassert.NoError(c, err, \"BUG #2967: user3 should still reach user1 after user4 creation\")\n\t\tassert.Len(c, result, 13, \"expected hostname response\")\n\t}, 60*time.Second, 500*time.Millisecond, \"user3 -> user1 after user4 creation (issue #2967)\")\n\n\t// Additional verification: check filter rules are not empty\n\tfilter, err := headscale.DebugFilter()\n\trequire.NoError(t, err)\n\tt.Logf(\"Filter rules: %d\", len(filter))\n\trequire.NotEmpty(t, filter, \"Filter rules should not be empty\")\n\n\tt.Log(\"Test PASSED: Connectivity maintained throughout user deletion and creation\")\n\tt.Log(\"Issue #2967 would cause 'pinging to stop' at Step 5\")\n}\n\n// TestACLDynamicUnknownUserAddition tests the v0.28.0-beta.1 scenario from issue #2967:\n// \"Pinging still stops when a non-registered user is added to a group\"\n//\n// This test verifies that when a policy is DYNAMICALLY updated (via SetPolicy)\n// to include a non-existent user in a group, connectivity for valid users\n// is maintained. The v2 policy engine should gracefully handle unknown users.\n//\n// Steps:\n// 1. Start with a valid policy (only existing users in group)\n// 2. Verify connectivity works\n// 3. Update policy to add unknown user to the group\n// 4. Verify connectivity STILL works for valid users.\nfunc TestACLDynamicUnknownUserAddition(t *testing.T) {\n\tIntegrationSkip(t)\n\n\t// Issue: https://github.com/juanfont/headscale/issues/2967\n\t// Comment: \"Pinging still stops when a non-registered user is added to a group\"\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 1,\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\trequire.NoError(t, err)\n\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\t// Start with a VALID policy - only existing users in the group\n\tvalidPolicy := &policyv2.Policy{\n\t\tGroups: policyv2.Groups{\n\t\t\tpolicyv2.Group(\"group:test\"): []policyv2.Username{\n\t\t\t\tpolicyv2.Username(\"user1@\"),\n\t\t\t\tpolicyv2.Username(\"user2@\"),\n\t\t\t},\n\t\t},\n\t\tACLs: []policyv2.ACL{\n\t\t\t{\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []policyv2.Alias{groupp(\"group:test\")},\n\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\taliasWithPorts(wildcard(), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{\n\t\t\ttsic.WithNetfilter(\"off\"),\n\t\t\ttsic.WithPackages(\"curl\"),\n\t\t\ttsic.WithWebserver(80),\n\t\t\ttsic.WithDockerWorkdir(\"/\"),\n\t\t},\n\t\thsic.WithACLPolicy(validPolicy),\n\t\thsic.WithTestName(\"acl-dynamic-unknown\"),\n\t\thsic.WithPolicyMode(types.PolicyModeDB), // test updates policy at runtime via CLI\n\t)\n\trequire.NoError(t, err)\n\n\t_, err = scenario.ListTailscaleClientsFQDNs()\n\trequire.NoError(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequire.NoError(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\tuser1Clients, err := scenario.ListTailscaleClients(\"user1\")\n\trequire.NoError(t, err)\n\trequire.Len(t, user1Clients, 1)\n\tuser1 := user1Clients[0]\n\n\tuser2Clients, err := scenario.ListTailscaleClients(\"user2\")\n\trequire.NoError(t, err)\n\trequire.Len(t, user2Clients, 1)\n\tuser2 := user2Clients[0]\n\n\tuser1FQDN, err := user1.FQDN()\n\trequire.NoError(t, err)\n\tuser2FQDN, err := user2.FQDN()\n\trequire.NoError(t, err)\n\n\t// Step 1: Verify initial connectivity with VALID policy\n\tt.Log(\"Step 1: Verifying initial connectivity with valid policy (no unknown users)\")\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", user2FQDN)\n\t\tresult, err := user1.Curl(url)\n\t\tassert.NoError(c, err, \"user1 should reach user2\")\n\t\tassert.Len(c, result, 13, \"expected hostname response\")\n\t}, 60*time.Second, 500*time.Millisecond, \"initial user1 -> user2\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", user1FQDN)\n\t\tresult, err := user2.Curl(url)\n\t\tassert.NoError(c, err, \"user2 should reach user1\")\n\t\tassert.Len(c, result, 13, \"expected hostname response\")\n\t}, 60*time.Second, 500*time.Millisecond, \"initial user2 -> user1\")\n\n\tt.Log(\"Step 1: PASSED - connectivity works with valid policy\")\n\n\t// Step 2: DYNAMICALLY update policy to add unknown user\n\t// This mimics the v0.28.0-beta.1 scenario where a non-existent user is added\n\tt.Log(\"Step 2: Updating policy to add unknown user (nonexistent@) to the group\")\n\n\tpolicyWithUnknown := &policyv2.Policy{\n\t\tGroups: policyv2.Groups{\n\t\t\tpolicyv2.Group(\"group:test\"): []policyv2.Username{\n\t\t\t\tpolicyv2.Username(\"user1@\"),\n\t\t\t\tpolicyv2.Username(\"user2@\"),\n\t\t\t\tpolicyv2.Username(\"nonexistent@\"), // Added unknown user\n\t\t\t},\n\t\t},\n\t\tACLs: []policyv2.ACL{\n\t\t\t{\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []policyv2.Alias{groupp(\"group:test\")},\n\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\taliasWithPorts(wildcard(), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\terr = headscale.SetPolicy(policyWithUnknown)\n\trequire.NoError(t, err)\n\n\t// Wait for policy to propagate\n\terr = scenario.WaitForTailscaleSync()\n\trequire.NoError(t, err)\n\n\t// Step 3: THE CRITICAL TEST - verify connectivity STILL works\n\t// v0.28.0-beta.1 issue: \"Pinging still stops when a non-registered user is added to a group\"\n\t// With v2 policy graceful error handling, this should pass\n\tt.Log(\"Step 3: Verifying connectivity AFTER adding unknown user to policy\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", user2FQDN)\n\t\tresult, err := user1.Curl(url)\n\t\tassert.NoError(c, err, \"user1 should STILL reach user2 after adding unknown user\")\n\t\tassert.Len(c, result, 13, \"expected hostname response\")\n\t}, 60*time.Second, 500*time.Millisecond, \"user1 -> user2 after unknown user added\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", user1FQDN)\n\t\tresult, err := user2.Curl(url)\n\t\tassert.NoError(c, err, \"user2 should STILL reach user1 after adding unknown user\")\n\t\tassert.Len(c, result, 13, \"expected hostname response\")\n\t}, 60*time.Second, 500*time.Millisecond, \"user2 -> user1 after unknown user added\")\n\n\tt.Log(\"Step 3: PASSED - connectivity maintained after adding unknown user\")\n\tt.Log(\"Test PASSED: v0.28.0-beta.1 scenario - unknown user added dynamically, valid users still work\")\n}\n\n// TestACLDynamicUnknownUserRemoval tests the scenario from issue #2967 comments:\n// \"Removing all invalid users from ACL restores connectivity\"\n//\n// This test verifies that:\n// 1. Start with a policy containing unknown user\n// 2. Connectivity still works (v2 graceful handling)\n// 3. Update policy to remove unknown user\n// 4. Connectivity remains working\n//\n// This ensures the fix handles both:\n// - Adding unknown users (tested above)\n// - Removing unknown users from policy.\nfunc TestACLDynamicUnknownUserRemoval(t *testing.T) {\n\tIntegrationSkip(t)\n\n\t// Issue: https://github.com/juanfont/headscale/issues/2967\n\t// Comment: \"Removing all invalid users from ACL restores connectivity\"\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 1,\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\t// Start with a policy that INCLUDES an unknown user\n\tpolicyWithUnknown := &policyv2.Policy{\n\t\tGroups: policyv2.Groups{\n\t\t\tpolicyv2.Group(\"group:test\"): []policyv2.Username{\n\t\t\t\tpolicyv2.Username(\"user1@\"),\n\t\t\t\tpolicyv2.Username(\"user2@\"),\n\t\t\t\tpolicyv2.Username(\"invaliduser@\"), // Unknown user from the start\n\t\t\t},\n\t\t},\n\t\tACLs: []policyv2.ACL{\n\t\t\t{\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []policyv2.Alias{groupp(\"group:test\")},\n\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\taliasWithPorts(wildcard(), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{\n\t\t\ttsic.WithNetfilter(\"off\"),\n\t\t\ttsic.WithPackages(\"curl\"),\n\t\t\ttsic.WithWebserver(80),\n\t\t\ttsic.WithDockerWorkdir(\"/\"),\n\t\t},\n\t\thsic.WithACLPolicy(policyWithUnknown),\n\t\thsic.WithTestName(\"acl-unknown-removal\"),\n\t\thsic.WithPolicyMode(types.PolicyModeDB), // test updates policy at runtime via CLI\n\t)\n\trequire.NoError(t, err)\n\n\t_, err = scenario.ListTailscaleClientsFQDNs()\n\trequire.NoError(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequire.NoError(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\tuser1Clients, err := scenario.ListTailscaleClients(\"user1\")\n\trequire.NoError(t, err)\n\trequire.Len(t, user1Clients, 1)\n\tuser1 := user1Clients[0]\n\n\tuser2Clients, err := scenario.ListTailscaleClients(\"user2\")\n\trequire.NoError(t, err)\n\trequire.Len(t, user2Clients, 1)\n\tuser2 := user2Clients[0]\n\n\tuser1FQDN, err := user1.FQDN()\n\trequire.NoError(t, err)\n\tuser2FQDN, err := user2.FQDN()\n\trequire.NoError(t, err)\n\n\t// Step 1: Verify initial connectivity WITH unknown user in policy\n\t// With v2 graceful handling, this should work\n\tt.Log(\"Step 1: Verifying connectivity with unknown user in policy (v2 graceful handling)\")\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", user2FQDN)\n\t\tresult, err := user1.Curl(url)\n\t\tassert.NoError(c, err, \"user1 should reach user2 even with unknown user in policy\")\n\t\tassert.Len(c, result, 13, \"expected hostname response\")\n\t}, 60*time.Second, 500*time.Millisecond, \"initial user1 -> user2 with unknown\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", user1FQDN)\n\t\tresult, err := user2.Curl(url)\n\t\tassert.NoError(c, err, \"user2 should reach user1 even with unknown user in policy\")\n\t\tassert.Len(c, result, 13, \"expected hostname response\")\n\t}, 60*time.Second, 500*time.Millisecond, \"initial user2 -> user1 with unknown\")\n\n\tt.Log(\"Step 1: PASSED - connectivity works even with unknown user (v2 graceful handling)\")\n\n\t// Step 2: Update policy to REMOVE the unknown user\n\tt.Log(\"Step 2: Updating policy to remove unknown user\")\n\n\tcleanPolicy := &policyv2.Policy{\n\t\tGroups: policyv2.Groups{\n\t\t\tpolicyv2.Group(\"group:test\"): []policyv2.Username{\n\t\t\t\tpolicyv2.Username(\"user1@\"),\n\t\t\t\tpolicyv2.Username(\"user2@\"),\n\t\t\t\t// invaliduser@ removed\n\t\t\t},\n\t\t},\n\t\tACLs: []policyv2.ACL{\n\t\t\t{\n\t\t\t\tAction:  \"accept\",\n\t\t\t\tSources: []policyv2.Alias{groupp(\"group:test\")},\n\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\taliasWithPorts(wildcard(), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\terr = headscale.SetPolicy(cleanPolicy)\n\trequire.NoError(t, err)\n\n\t// Wait for policy to propagate\n\terr = scenario.WaitForTailscaleSync()\n\trequire.NoError(t, err)\n\n\t// Step 3: Verify connectivity after removing unknown user\n\t// Issue comment: \"Removing all invalid users from ACL restores connectivity\"\n\tt.Log(\"Step 3: Verifying connectivity AFTER removing unknown user\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", user2FQDN)\n\t\tresult, err := user1.Curl(url)\n\t\tassert.NoError(c, err, \"user1 should reach user2 after removing unknown user\")\n\t\tassert.Len(c, result, 13, \"expected hostname response\")\n\t}, 60*time.Second, 500*time.Millisecond, \"user1 -> user2 after unknown removed\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", user1FQDN)\n\t\tresult, err := user2.Curl(url)\n\t\tassert.NoError(c, err, \"user2 should reach user1 after removing unknown user\")\n\t\tassert.Len(c, result, 13, \"expected hostname response\")\n\t}, 60*time.Second, 500*time.Millisecond, \"user2 -> user1 after unknown removed\")\n\n\tt.Log(\"Step 3: PASSED - connectivity maintained after removing unknown user\")\n\tt.Log(\"Test PASSED: Removing unknown users from policy works correctly\")\n}\n"
  },
  {
    "path": "integration/api_auth_test.go",
    "content": "package integration\n\nimport (\n\t\"context\"\n\t\"crypto/tls\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\t\"github.com/juanfont/headscale/integration/hsic\"\n\t\"github.com/juanfont/headscale/integration/tsic\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"google.golang.org/protobuf/encoding/protojson\"\n)\n\n// TestAPIAuthenticationBypass tests that the API authentication middleware\n// properly blocks unauthorized requests and does not leak sensitive data.\n// This test reproduces the security issue described in:\n// - https://github.com/juanfont/headscale/issues/2809\n// - https://github.com/juanfont/headscale/pull/2810\n//\n// The bug: When authentication fails, the middleware writes \"Unauthorized\"\n// but doesn't return early, allowing the handler to execute and append\n// sensitive data to the response.\nfunc TestAPIAuthenticationBypass(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tUsers: []string{\"user1\", \"user2\", \"user3\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName(\"apiauthbypass\"))\n\trequire.NoError(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\t// Create an API key using the CLI\n\tvar validAPIKey string\n\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tapiKeyOutput, err := headscale.Execute(\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"apikeys\",\n\t\t\t\t\"create\",\n\t\t\t\t\"--expiration\",\n\t\t\t\t\"24h\",\n\t\t\t},\n\t\t)\n\t\tassert.NoError(ct, err)\n\t\tassert.NotEmpty(ct, apiKeyOutput)\n\t\tvalidAPIKey = strings.TrimSpace(apiKeyOutput)\n\t}, 20*time.Second, 1*time.Second)\n\n\t// Get the API endpoint\n\tendpoint := headscale.GetEndpoint()\n\tapiURL := endpoint + \"/api/v1/user\"\n\n\t// Create HTTP client\n\tclient := &http.Client{\n\t\tTimeout: 10 * time.Second,\n\t\tTransport: &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec\n\t\t},\n\t}\n\n\tt.Run(\"HTTP_NoAuthHeader\", func(t *testing.T) {\n\t\t// Test 1: Request without any Authorization header\n\t\t// Expected: Should return 401 with ONLY \"Unauthorized\" text, no user data\n\t\treq, err := http.NewRequestWithContext(context.Background(), http.MethodGet, apiURL, nil)\n\t\trequire.NoError(t, err)\n\n\t\tresp, err := client.Do(req)\n\t\trequire.NoError(t, err)\n\n\t\tdefer resp.Body.Close()\n\n\t\tbody, err := io.ReadAll(resp.Body)\n\t\trequire.NoError(t, err)\n\n\t\t// Should return 401 Unauthorized\n\t\tassert.Equal(t, http.StatusUnauthorized, resp.StatusCode,\n\t\t\t\"Expected 401 status code for request without auth header\")\n\n\t\tbodyStr := string(body)\n\n\t\t// Should contain \"Unauthorized\" message\n\t\tassert.Contains(t, bodyStr, \"Unauthorized\",\n\t\t\t\"Response should contain 'Unauthorized' message\")\n\n\t\t// Should NOT contain user data after \"Unauthorized\"\n\t\t// This is the security bypass - if users array is present, auth was bypassed\n\t\tvar jsonCheck map[string]any\n\n\t\tjsonErr := json.Unmarshal(body, &jsonCheck)\n\n\t\t// If we can unmarshal JSON and it contains \"users\", that's the bypass\n\t\tif jsonErr == nil {\n\t\t\tassert.NotContains(t, jsonCheck, \"users\",\n\t\t\t\t\"SECURITY ISSUE: Response should NOT contain 'users' data when unauthorized\")\n\t\t\tassert.NotContains(t, jsonCheck, \"user\",\n\t\t\t\t\"SECURITY ISSUE: Response should NOT contain 'user' data when unauthorized\")\n\t\t}\n\n\t\t// Additional check: response should not contain \"user1\", \"user2\", \"user3\"\n\t\tassert.NotContains(t, bodyStr, \"user1\",\n\t\t\t\"SECURITY ISSUE: Response should NOT leak user 'user1' data\")\n\t\tassert.NotContains(t, bodyStr, \"user2\",\n\t\t\t\"SECURITY ISSUE: Response should NOT leak user 'user2' data\")\n\t\tassert.NotContains(t, bodyStr, \"user3\",\n\t\t\t\"SECURITY ISSUE: Response should NOT leak user 'user3' data\")\n\n\t\t// Response should be minimal, just \"Unauthorized\"\n\t\t// Allow some variation in response format but body should be small\n\t\tassert.Less(t, len(bodyStr), 100,\n\t\t\t\"SECURITY ISSUE: Unauthorized response body should be minimal, got: %s\", bodyStr)\n\t})\n\n\tt.Run(\"HTTP_InvalidAuthHeader\", func(t *testing.T) {\n\t\t// Test 2: Request with invalid Authorization header (missing \"Bearer \" prefix)\n\t\t// Expected: Should return 401 with ONLY \"Unauthorized\" text, no user data\n\t\treq, err := http.NewRequestWithContext(context.Background(), http.MethodGet, apiURL, nil)\n\t\trequire.NoError(t, err)\n\t\treq.Header.Set(\"Authorization\", \"InvalidToken\")\n\n\t\tresp, err := client.Do(req)\n\t\trequire.NoError(t, err)\n\n\t\tdefer resp.Body.Close()\n\n\t\tbody, err := io.ReadAll(resp.Body)\n\t\trequire.NoError(t, err)\n\n\t\tassert.Equal(t, http.StatusUnauthorized, resp.StatusCode,\n\t\t\t\"Expected 401 status code for invalid auth header format\")\n\n\t\tbodyStr := string(body)\n\t\tassert.Contains(t, bodyStr, \"Unauthorized\")\n\n\t\t// Should not leak user data\n\t\tassert.NotContains(t, bodyStr, \"user1\",\n\t\t\t\"SECURITY ISSUE: Response should NOT leak user data\")\n\t\tassert.NotContains(t, bodyStr, \"user2\",\n\t\t\t\"SECURITY ISSUE: Response should NOT leak user data\")\n\t\tassert.NotContains(t, bodyStr, \"user3\",\n\t\t\t\"SECURITY ISSUE: Response should NOT leak user data\")\n\n\t\tassert.Less(t, len(bodyStr), 100,\n\t\t\t\"SECURITY ISSUE: Unauthorized response should be minimal\")\n\t})\n\n\tt.Run(\"HTTP_InvalidBearerToken\", func(t *testing.T) {\n\t\t// Test 3: Request with Bearer prefix but invalid token\n\t\t// Expected: Should return 401 with ONLY \"Unauthorized\" text, no user data\n\t\t// Note: Both malformed and properly formatted invalid tokens should return 401\n\t\treq, err := http.NewRequestWithContext(context.Background(), http.MethodGet, apiURL, nil)\n\t\trequire.NoError(t, err)\n\t\treq.Header.Set(\"Authorization\", \"Bearer invalid-token-12345\")\n\n\t\tresp, err := client.Do(req)\n\t\trequire.NoError(t, err)\n\n\t\tdefer resp.Body.Close()\n\n\t\tbody, err := io.ReadAll(resp.Body)\n\t\trequire.NoError(t, err)\n\n\t\tassert.Equal(t, http.StatusUnauthorized, resp.StatusCode,\n\t\t\t\"Expected 401 status code for invalid bearer token\")\n\n\t\tbodyStr := string(body)\n\t\tassert.Contains(t, bodyStr, \"Unauthorized\")\n\n\t\t// Should not leak user data\n\t\tassert.NotContains(t, bodyStr, \"user1\",\n\t\t\t\"SECURITY ISSUE: Response should NOT leak user data\")\n\t\tassert.NotContains(t, bodyStr, \"user2\",\n\t\t\t\"SECURITY ISSUE: Response should NOT leak user data\")\n\t\tassert.NotContains(t, bodyStr, \"user3\",\n\t\t\t\"SECURITY ISSUE: Response should NOT leak user data\")\n\n\t\tassert.Less(t, len(bodyStr), 100,\n\t\t\t\"SECURITY ISSUE: Unauthorized response should be minimal\")\n\t})\n\n\tt.Run(\"HTTP_ValidAPIKey\", func(t *testing.T) {\n\t\t// Test 4: Request with valid API key\n\t\t// Expected: Should return 200 with user data (this is the authorized case)\n\t\treq, err := http.NewRequestWithContext(context.Background(), http.MethodGet, apiURL, nil)\n\t\trequire.NoError(t, err)\n\t\treq.Header.Set(\"Authorization\", \"Bearer \"+validAPIKey)\n\n\t\tresp, err := client.Do(req)\n\t\trequire.NoError(t, err)\n\n\t\tdefer resp.Body.Close()\n\n\t\tbody, err := io.ReadAll(resp.Body)\n\t\trequire.NoError(t, err)\n\n\t\t// Should succeed with valid auth\n\t\tassert.Equal(t, http.StatusOK, resp.StatusCode,\n\t\t\t\"Expected 200 status code with valid API key\")\n\n\t\t// Should be able to parse as protobuf JSON\n\t\tvar response v1.ListUsersResponse\n\n\t\terr = protojson.Unmarshal(body, &response)\n\t\trequire.NoError(t, err, \"Response should be valid protobuf JSON with valid API key\")\n\n\t\t// Should contain our test users\n\t\tusers := response.GetUsers()\n\t\tassert.Len(t, users, 3, \"Should have 3 users\")\n\n\t\tuserNames := make([]string, len(users))\n\t\tfor i, u := range users {\n\t\t\tuserNames[i] = u.GetName()\n\t\t}\n\n\t\tassert.Contains(t, userNames, \"user1\")\n\t\tassert.Contains(t, userNames, \"user2\")\n\t\tassert.Contains(t, userNames, \"user3\")\n\t})\n}\n\n// TestAPIAuthenticationBypassCurl tests the same security issue using curl\n// from inside a container, which is closer to how the issue was discovered.\nfunc TestAPIAuthenticationBypassCurl(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tUsers: []string{\"testuser1\", \"testuser2\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName(\"apiauthcurl\"))\n\trequire.NoError(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\t// Create a valid API key\n\tapiKeyOutput, err := headscale.Execute(\n\t\t[]string{\n\t\t\t\"headscale\",\n\t\t\t\"apikeys\",\n\t\t\t\"create\",\n\t\t\t\"--expiration\",\n\t\t\t\"24h\",\n\t\t},\n\t)\n\trequire.NoError(t, err)\n\n\tvalidAPIKey := strings.TrimSpace(apiKeyOutput)\n\n\tendpoint := headscale.GetEndpoint()\n\tapiURL := endpoint + \"/api/v1/user\"\n\n\tt.Run(\"Curl_NoAuth\", func(t *testing.T) {\n\t\t// Execute curl from inside the headscale container without auth\n\t\tcurlOutput, err := headscale.Execute(\n\t\t\t[]string{\n\t\t\t\t\"curl\",\n\t\t\t\t\"-s\",\n\t\t\t\t\"-w\",\n\t\t\t\t\"\\nHTTP_CODE:%{http_code}\",\n\t\t\t\tapiURL,\n\t\t\t},\n\t\t)\n\t\trequire.NoError(t, err)\n\n\t\t// Parse the output\n\t\tlines := strings.Split(curlOutput, \"\\n\")\n\n\t\tvar (\n\t\t\thttpCode     string\n\t\t\tresponseBody string\n\t\t)\n\n\t\tvar responseBodySb280 strings.Builder\n\n\t\tfor _, line := range lines {\n\t\t\tif after, ok := strings.CutPrefix(line, \"HTTP_CODE:\"); ok {\n\t\t\t\thttpCode = after\n\t\t\t} else {\n\t\t\t\tresponseBodySb280.WriteString(line)\n\t\t\t}\n\t\t}\n\n\t\tresponseBody += responseBodySb280.String()\n\n\t\t// Should return 401\n\t\tassert.Equal(t, \"401\", httpCode,\n\t\t\t\"Curl without auth should return 401\")\n\n\t\t// Should contain Unauthorized\n\t\tassert.Contains(t, responseBody, \"Unauthorized\",\n\t\t\t\"Response should contain 'Unauthorized'\")\n\n\t\t// Should NOT leak user data\n\t\tassert.NotContains(t, responseBody, \"testuser1\",\n\t\t\t\"SECURITY ISSUE: Should not leak user data\")\n\t\tassert.NotContains(t, responseBody, \"testuser2\",\n\t\t\t\"SECURITY ISSUE: Should not leak user data\")\n\n\t\t// Response should be small (just \"Unauthorized\")\n\t\tassert.Less(t, len(responseBody), 100,\n\t\t\t\"SECURITY ISSUE: Unauthorized response should be minimal, got: %s\", responseBody)\n\t})\n\n\tt.Run(\"Curl_InvalidAuth\", func(t *testing.T) {\n\t\t// Execute curl with invalid auth header\n\t\tcurlOutput, err := headscale.Execute(\n\t\t\t[]string{\n\t\t\t\t\"curl\",\n\t\t\t\t\"-s\",\n\t\t\t\t\"-H\",\n\t\t\t\t\"Authorization: InvalidToken\",\n\t\t\t\t\"-w\",\n\t\t\t\t\"\\nHTTP_CODE:%{http_code}\",\n\t\t\t\tapiURL,\n\t\t\t},\n\t\t)\n\t\trequire.NoError(t, err)\n\n\t\tlines := strings.Split(curlOutput, \"\\n\")\n\n\t\tvar (\n\t\t\thttpCode     string\n\t\t\tresponseBody string\n\t\t)\n\n\t\tvar responseBodySb326 strings.Builder\n\n\t\tfor _, line := range lines {\n\t\t\tif after, ok := strings.CutPrefix(line, \"HTTP_CODE:\"); ok {\n\t\t\t\thttpCode = after\n\t\t\t} else {\n\t\t\t\tresponseBodySb326.WriteString(line)\n\t\t\t}\n\t\t}\n\n\t\tresponseBody += responseBodySb326.String()\n\n\t\tassert.Equal(t, \"401\", httpCode)\n\t\tassert.Contains(t, responseBody, \"Unauthorized\")\n\t\tassert.NotContains(t, responseBody, \"testuser1\",\n\t\t\t\"SECURITY ISSUE: Should not leak user data\")\n\t\tassert.NotContains(t, responseBody, \"testuser2\",\n\t\t\t\"SECURITY ISSUE: Should not leak user data\")\n\t})\n\n\tt.Run(\"Curl_ValidAuth\", func(t *testing.T) {\n\t\t// Execute curl with valid API key\n\t\tcurlOutput, err := headscale.Execute(\n\t\t\t[]string{\n\t\t\t\t\"curl\",\n\t\t\t\t\"-s\",\n\t\t\t\t\"-H\",\n\t\t\t\t\"Authorization: Bearer \" + validAPIKey,\n\t\t\t\t\"-w\",\n\t\t\t\t\"\\nHTTP_CODE:%{http_code}\",\n\t\t\t\tapiURL,\n\t\t\t},\n\t\t)\n\t\trequire.NoError(t, err)\n\n\t\tlines := strings.Split(curlOutput, \"\\n\")\n\n\t\tvar (\n\t\t\thttpCode     string\n\t\t\tresponseBody string\n\t\t)\n\n\t\tvar responseBodySb361 strings.Builder\n\n\t\tfor _, line := range lines {\n\t\t\tif after, ok := strings.CutPrefix(line, \"HTTP_CODE:\"); ok {\n\t\t\t\thttpCode = after\n\t\t\t} else {\n\t\t\t\tresponseBodySb361.WriteString(line)\n\t\t\t}\n\t\t}\n\n\t\tresponseBody += responseBodySb361.String()\n\n\t\t// Should succeed\n\t\tassert.Equal(t, \"200\", httpCode,\n\t\t\t\"Curl with valid API key should return 200\")\n\n\t\t// Should contain user data\n\t\tvar response v1.ListUsersResponse\n\n\t\terr = protojson.Unmarshal([]byte(responseBody), &response)\n\t\trequire.NoError(t, err, \"Response should be valid protobuf JSON\")\n\n\t\tusers := response.GetUsers()\n\t\tassert.Len(t, users, 2, \"Should have 2 users\")\n\t})\n}\n\n// TestGRPCAuthenticationBypass tests that the gRPC authentication interceptor\n// properly blocks unauthorized requests.\n// This test verifies that the gRPC API does not have the same bypass issue\n// as the HTTP API middleware.\nfunc TestGRPCAuthenticationBypass(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tUsers: []string{\"grpcuser1\", \"grpcuser2\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\t// We need TLS for remote gRPC connections\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithTestName(\"grpcauthtest\"),\n\t\thsic.WithConfigEnv(map[string]string{\n\t\t\t// Enable gRPC on the standard port\n\t\t\t\"HEADSCALE_GRPC_LISTEN_ADDR\": \"0.0.0.0:50443\",\n\t\t}),\n\t)\n\trequire.NoError(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\t// Create a valid API key\n\tapiKeyOutput, err := headscale.Execute(\n\t\t[]string{\n\t\t\t\"headscale\",\n\t\t\t\"apikeys\",\n\t\t\t\"create\",\n\t\t\t\"--expiration\",\n\t\t\t\"24h\",\n\t\t},\n\t)\n\trequire.NoError(t, err)\n\n\tvalidAPIKey := strings.TrimSpace(apiKeyOutput)\n\n\t// Get the gRPC endpoint\n\t// For gRPC, we need to use the hostname and port 50443\n\tgrpcAddress := headscale.GetHostname() + \":50443\"\n\n\tt.Run(\"gRPC_NoAPIKey\", func(t *testing.T) {\n\t\t// Test 1: Try to use CLI without API key (should fail)\n\t\t// When HEADSCALE_CLI_ADDRESS is set but HEADSCALE_CLI_API_KEY is not set,\n\t\t// the CLI should fail immediately\n\t\t_, err := headscale.Execute(\n\t\t\t[]string{\n\t\t\t\t\"sh\", \"-c\",\n\t\t\t\tfmt.Sprintf(\"HEADSCALE_CLI_ADDRESS=%s HEADSCALE_CLI_INSECURE=true headscale users list --output json 2>&1\", grpcAddress),\n\t\t\t},\n\t\t)\n\n\t\t// Should fail - CLI exits when API key is missing\n\t\tassert.Error(t, err,\n\t\t\t\"gRPC connection without API key should fail\")\n\t})\n\n\tt.Run(\"gRPC_InvalidAPIKey\", func(t *testing.T) {\n\t\t// Test 2: Try to use CLI with invalid API key (should fail with auth error)\n\t\toutput, err := headscale.Execute(\n\t\t\t[]string{\n\t\t\t\t\"sh\", \"-c\",\n\t\t\t\tfmt.Sprintf(\"HEADSCALE_CLI_ADDRESS=%s HEADSCALE_CLI_API_KEY=invalid-key-12345 HEADSCALE_CLI_INSECURE=true headscale users list --output json 2>&1\", grpcAddress),\n\t\t\t},\n\t\t)\n\n\t\t// Should fail with authentication error\n\t\trequire.Error(t, err,\n\t\t\t\"gRPC connection with invalid API key should fail\")\n\n\t\t// Should contain authentication error message\n\t\toutputStr := strings.ToLower(output)\n\t\tassert.True(t,\n\t\t\tstrings.Contains(outputStr, \"unauthenticated\") ||\n\t\t\t\tstrings.Contains(outputStr, \"invalid token\") ||\n\t\t\t\tstrings.Contains(outputStr, \"validating token\") ||\n\t\t\t\tstrings.Contains(outputStr, \"authentication\"),\n\t\t\t\"Error should indicate authentication failure, got: %s\", output)\n\n\t\t// Should NOT leak user data\n\t\tassert.NotContains(t, output, \"grpcuser1\",\n\t\t\t\"SECURITY ISSUE: gRPC should not leak user data with invalid auth\")\n\t\tassert.NotContains(t, output, \"grpcuser2\",\n\t\t\t\"SECURITY ISSUE: gRPC should not leak user data with invalid auth\")\n\t})\n\n\tt.Run(\"gRPC_ValidAPIKey\", func(t *testing.T) {\n\t\t// Test 3: Use CLI with valid API key (should succeed)\n\t\toutput, err := headscale.Execute(\n\t\t\t[]string{\n\t\t\t\t\"sh\", \"-c\",\n\t\t\t\tfmt.Sprintf(\"HEADSCALE_CLI_ADDRESS=%s HEADSCALE_CLI_API_KEY=%s HEADSCALE_CLI_INSECURE=true headscale users list --output json\", grpcAddress, validAPIKey),\n\t\t\t},\n\t\t)\n\n\t\t// Should succeed\n\t\trequire.NoError(t, err,\n\t\t\t\"gRPC connection with valid API key should succeed, output: %s\", output)\n\n\t\t// CLI outputs the users array directly, not wrapped in ListUsersResponse\n\t\t// Parse as JSON array (CLI uses json.Marshal, not protojson)\n\t\tvar users []*v1.User\n\n\t\terr = json.Unmarshal([]byte(output), &users)\n\t\trequire.NoError(t, err, \"Response should be valid JSON array\")\n\t\tassert.Len(t, users, 2, \"Should have 2 users\")\n\n\t\tuserNames := make([]string, len(users))\n\t\tfor i, u := range users {\n\t\t\tuserNames[i] = u.GetName()\n\t\t}\n\n\t\tassert.Contains(t, userNames, \"grpcuser1\")\n\t\tassert.Contains(t, userNames, \"grpcuser2\")\n\t})\n}\n\n// TestCLIWithConfigAuthenticationBypass tests that the headscale CLI\n// with --config flag does not have authentication bypass issues when\n// connecting to a remote server.\n// Note: When using --config with local unix socket, no auth is needed.\n// This test focuses on remote gRPC connections which require API keys.\nfunc TestCLIWithConfigAuthenticationBypass(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tUsers: []string{\"cliuser1\", \"cliuser2\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithTestName(\"cliconfigauth\"),\n\t\thsic.WithConfigEnv(map[string]string{\n\t\t\t\"HEADSCALE_GRPC_LISTEN_ADDR\": \"0.0.0.0:50443\",\n\t\t}),\n\t)\n\trequire.NoError(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\t// Create a valid API key\n\tapiKeyOutput, err := headscale.Execute(\n\t\t[]string{\n\t\t\t\"headscale\",\n\t\t\t\"apikeys\",\n\t\t\t\"create\",\n\t\t\t\"--expiration\",\n\t\t\t\"24h\",\n\t\t},\n\t)\n\trequire.NoError(t, err)\n\n\tvalidAPIKey := strings.TrimSpace(apiKeyOutput)\n\n\tgrpcAddress := headscale.GetHostname() + \":50443\"\n\n\t// Create a config file for testing\n\tconfigWithoutKey := fmt.Sprintf(`\ncli:\n  address: %s\n  timeout: 5s\n  insecure: true\n`, grpcAddress)\n\n\tconfigWithInvalidKey := fmt.Sprintf(`\ncli:\n  address: %s\n  api_key: invalid-key-12345\n  timeout: 5s\n  insecure: true\n`, grpcAddress)\n\n\tconfigWithValidKey := fmt.Sprintf(`\ncli:\n  address: %s\n  api_key: %s\n  timeout: 5s\n  insecure: true\n`, grpcAddress, validAPIKey)\n\n\tt.Run(\"CLI_Config_NoAPIKey\", func(t *testing.T) {\n\t\t// Create config file without API key\n\t\terr := headscale.WriteFile(\"/tmp/config_no_key.yaml\", []byte(configWithoutKey))\n\t\trequire.NoError(t, err)\n\n\t\t// Try to use CLI with config that has no API key\n\t\t_, err = headscale.Execute(\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"--config\", \"/tmp/config_no_key.yaml\",\n\t\t\t\t\"users\", \"list\",\n\t\t\t\t\"--output\", \"json\",\n\t\t\t},\n\t\t)\n\n\t\t// Should fail\n\t\tassert.Error(t, err,\n\t\t\t\"CLI with config missing API key should fail\")\n\t})\n\n\tt.Run(\"CLI_Config_InvalidAPIKey\", func(t *testing.T) {\n\t\t// Create config file with invalid API key\n\t\terr := headscale.WriteFile(\"/tmp/config_invalid_key.yaml\", []byte(configWithInvalidKey))\n\t\trequire.NoError(t, err)\n\n\t\t// Try to use CLI with invalid API key\n\t\toutput, err := headscale.Execute(\n\t\t\t[]string{\n\t\t\t\t\"sh\", \"-c\",\n\t\t\t\t\"headscale --config /tmp/config_invalid_key.yaml users list --output json 2>&1\",\n\t\t\t},\n\t\t)\n\n\t\t// Should fail\n\t\trequire.Error(t, err,\n\t\t\t\"CLI with invalid API key should fail\")\n\n\t\t// Should indicate authentication failure\n\t\toutputStr := strings.ToLower(output)\n\t\tassert.True(t,\n\t\t\tstrings.Contains(outputStr, \"unauthenticated\") ||\n\t\t\t\tstrings.Contains(outputStr, \"invalid token\") ||\n\t\t\t\tstrings.Contains(outputStr, \"validating token\") ||\n\t\t\t\tstrings.Contains(outputStr, \"authentication\"),\n\t\t\t\"Error should indicate authentication failure, got: %s\", output)\n\n\t\t// Should NOT leak user data\n\t\tassert.NotContains(t, output, \"cliuser1\",\n\t\t\t\"SECURITY ISSUE: CLI should not leak user data with invalid auth\")\n\t\tassert.NotContains(t, output, \"cliuser2\",\n\t\t\t\"SECURITY ISSUE: CLI should not leak user data with invalid auth\")\n\t})\n\n\tt.Run(\"CLI_Config_ValidAPIKey\", func(t *testing.T) {\n\t\t// Create config file with valid API key\n\t\terr := headscale.WriteFile(\"/tmp/config_valid_key.yaml\", []byte(configWithValidKey))\n\t\trequire.NoError(t, err)\n\n\t\t// Use CLI with valid API key\n\t\toutput, err := headscale.Execute(\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"--config\", \"/tmp/config_valid_key.yaml\",\n\t\t\t\t\"users\", \"list\",\n\t\t\t\t\"--output\", \"json\",\n\t\t\t},\n\t\t)\n\n\t\t// Should succeed\n\t\trequire.NoError(t, err,\n\t\t\t\"CLI with valid API key should succeed\")\n\n\t\t// CLI outputs the users array directly, not wrapped in ListUsersResponse\n\t\t// Parse as JSON array (CLI uses json.Marshal, not protojson)\n\t\tvar users []*v1.User\n\n\t\terr = json.Unmarshal([]byte(output), &users)\n\t\trequire.NoError(t, err, \"Response should be valid JSON array\")\n\t\tassert.Len(t, users, 2, \"Should have 2 users\")\n\n\t\tuserNames := make([]string, len(users))\n\t\tfor i, u := range users {\n\t\t\tuserNames[i] = u.GetName()\n\t\t}\n\n\t\tassert.Contains(t, userNames, \"cliuser1\")\n\t\tassert.Contains(t, userNames, \"cliuser2\")\n\t})\n}\n"
  },
  {
    "path": "integration/auth_key_test.go",
    "content": "package integration\n\nimport (\n\t\"fmt\"\n\t\"net/netip\"\n\t\"slices\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\tpolicyv2 \"github.com/juanfont/headscale/hscontrol/policy/v2\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/integration/hsic\"\n\t\"github.com/juanfont/headscale/integration/tsic\"\n\t\"github.com/samber/lo\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/tailcfg\"\n)\n\nfunc TestAuthKeyLogoutAndReloginSameUser(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tfor _, https := range []bool{true, false} {\n\t\tt.Run(fmt.Sprintf(\"with-https-%t\", https), func(t *testing.T) {\n\t\t\tspec := ScenarioSpec{\n\t\t\t\tNodesPerUser: len(MustTestVersions),\n\t\t\t\tUsers:        []string{\"user1\", \"user2\"},\n\t\t\t}\n\n\t\t\tscenario, err := NewScenario(spec)\n\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\t\t\topts := []hsic.Option{\n\t\t\t\thsic.WithTestName(\"authkey-relogsame\"),\n\t\t\t}\n\n\t\t\terr = scenario.CreateHeadscaleEnv([]tsic.Option{}, opts...)\n\t\t\trequireNoErrHeadscaleEnv(t, err)\n\n\t\t\tallClients, err := scenario.ListTailscaleClients()\n\t\t\trequireNoErrListClients(t, err)\n\n\t\t\tallIps, err := scenario.ListTailscaleClientsIPs()\n\t\t\trequireNoErrListClientIPs(t, err)\n\n\t\t\terr = scenario.WaitForTailscaleSync()\n\t\t\trequireNoErrSync(t, err)\n\n\t\t\theadscale, err := scenario.Headscale()\n\t\t\trequireNoErrGetHeadscale(t, err)\n\n\t\t\texpectedNodes := collectExpectedNodeIDs(t, allClients)\n\t\t\trequireAllClientsOnline(t, headscale, expectedNodes, true, \"all clients should be connected\", 120*time.Second)\n\n\t\t\t// Validate that all nodes have NetInfo and DERP servers before logout\n\t\t\trequireAllClientsNetInfoAndDERP(t, headscale, expectedNodes, \"all clients should have NetInfo and DERP before logout\", 3*time.Minute)\n\n\t\t\t// assertClientsState(t, allClients)\n\n\t\t\tclientIPs := make(map[TailscaleClient][]netip.Addr)\n\n\t\t\tfor _, client := range allClients {\n\t\t\t\tips, err := client.IPs()\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"failed to get IPs for client %s: %s\", client.Hostname(), err)\n\t\t\t\t}\n\n\t\t\t\tclientIPs[client] = ips\n\t\t\t}\n\n\t\t\tvar (\n\t\t\t\tlistNodes             []*v1.Node\n\t\t\t\tnodeCountBeforeLogout int\n\t\t\t)\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tvar err error\n\n\t\t\t\tlistNodes, err = headscale.ListNodes()\n\t\t\t\tassert.NoError(c, err)\n\t\t\t\tassert.Len(c, listNodes, len(allClients))\n\n\t\t\t\tfor _, node := range listNodes {\n\t\t\t\t\tassertLastSeenSetWithCollect(c, node)\n\t\t\t\t}\n\t\t\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for expected node list before logout\")\n\n\t\t\tnodeCountBeforeLogout = len(listNodes)\n\t\t\tt.Logf(\"node count before logout: %d\", nodeCountBeforeLogout)\n\n\t\t\tfor _, client := range allClients {\n\t\t\t\terr := client.Logout()\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"failed to logout client %s: %s\", client.Hostname(), err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\terr = scenario.WaitForTailscaleLogout()\n\t\t\trequireNoErrLogout(t, err)\n\n\t\t\t// After taking down all nodes, verify all systems show nodes offline\n\t\t\trequireAllClientsOnline(t, headscale, expectedNodes, false, \"all nodes should have logged out\", 120*time.Second)\n\n\t\t\tt.Logf(\"all clients logged out\")\n\n\t\t\tt.Logf(\"Validating node persistence after logout at %s\", time.Now().Format(TimestampFormat))\n\t\t\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t\t\tvar err error\n\n\t\t\t\tlistNodes, err = headscale.ListNodes()\n\t\t\t\tassert.NoError(ct, err, \"Failed to list nodes after logout\")\n\t\t\t\tassert.Len(ct, listNodes, nodeCountBeforeLogout, \"Node count should match before logout count - expected %d nodes, got %d\", nodeCountBeforeLogout, len(listNodes))\n\t\t\t}, 30*time.Second, 2*time.Second, \"validating node persistence after logout (nodes should remain in database)\")\n\n\t\t\tfor _, node := range listNodes {\n\t\t\t\tassertLastSeenSet(t, node)\n\t\t\t}\n\n\t\t\t// if the server is not running with HTTPS, we have to wait a bit before\n\t\t\t// reconnection as the newest Tailscale client has a measure that will only\n\t\t\t// reconnect over HTTPS if they saw a noise connection previously.\n\t\t\t// https://github.com/tailscale/tailscale/commit/1eaad7d3deb0815e8932e913ca1a862afa34db38\n\t\t\t// https://github.com/juanfont/headscale/issues/2164\n\t\t\tif !https {\n\t\t\t\t//nolint:forbidigo // Intentional delay: Tailscale client requires 5 min wait before reconnecting over non-HTTPS\n\t\t\t\ttime.Sleep(5 * time.Minute)\n\t\t\t}\n\n\t\t\tuserMap, err := headscale.MapUsers()\n\t\t\trequire.NoError(t, err)\n\n\t\t\tfor _, userName := range spec.Users {\n\t\t\t\tkey, err := scenario.CreatePreAuthKey(userMap[userName].GetId(), true, false)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"failed to create pre-auth key for user %s: %s\", userName, err)\n\t\t\t\t}\n\n\t\t\t\terr = scenario.RunTailscaleUp(userName, headscale.GetEndpoint(), key.GetKey())\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"failed to run tailscale up for user %s: %s\", userName, err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tt.Logf(\"Validating node persistence after relogin at %s\", time.Now().Format(TimestampFormat))\n\t\t\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t\t\tvar err error\n\n\t\t\t\tlistNodes, err = headscale.ListNodes()\n\t\t\t\tassert.NoError(ct, err, \"Failed to list nodes after relogin\")\n\t\t\t\tassert.Len(ct, listNodes, nodeCountBeforeLogout, \"Node count should remain unchanged after relogin - expected %d nodes, got %d\", nodeCountBeforeLogout, len(listNodes))\n\t\t\t}, 60*time.Second, 2*time.Second, \"validating node count stability after same-user auth key relogin\")\n\n\t\t\tfor _, node := range listNodes {\n\t\t\t\tassertLastSeenSet(t, node)\n\t\t\t}\n\n\t\t\trequireAllClientsOnline(t, headscale, expectedNodes, true, \"all clients should be connected to batcher\", 120*time.Second)\n\n\t\t\t// Wait for Tailscale sync before validating NetInfo to ensure proper state propagation\n\t\t\terr = scenario.WaitForTailscaleSync()\n\t\t\trequireNoErrSync(t, err)\n\n\t\t\t// Validate that all nodes have NetInfo and DERP servers after reconnection\n\t\t\trequireAllClientsNetInfoAndDERP(t, headscale, expectedNodes, \"all clients should have NetInfo and DERP after reconnection\", 3*time.Minute)\n\n\t\t\terr = scenario.WaitForTailscaleSync()\n\t\t\trequireNoErrSync(t, err)\n\n\t\t\tallAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {\n\t\t\t\treturn x.String()\n\t\t\t})\n\n\t\t\tsuccess := pingAllHelper(t, allClients, allAddrs)\n\t\t\tt.Logf(\"%d successful pings out of %d\", success, len(allClients)*len(allIps))\n\n\t\t\tfor _, client := range allClients {\n\t\t\t\tips, err := client.IPs()\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"failed to get IPs for client %s: %s\", client.Hostname(), err)\n\t\t\t\t}\n\n\t\t\t\t// lets check if the IPs are the same\n\t\t\t\tif len(ips) != len(clientIPs[client]) {\n\t\t\t\t\tt.Fatalf(\"IPs changed for client %s\", client.Hostname())\n\t\t\t\t}\n\n\t\t\t\tfor _, ip := range ips {\n\t\t\t\t\tif !slices.Contains(clientIPs[client], ip) {\n\t\t\t\t\t\tt.Fatalf(\n\t\t\t\t\t\t\t\"IPs changed for client %s. Used to be %v now %v\",\n\t\t\t\t\t\t\tclient.Hostname(),\n\t\t\t\t\t\t\tclientIPs[client],\n\t\t\t\t\t\t\tips,\n\t\t\t\t\t\t)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tvar err error\n\n\t\t\t\tlistNodes, err = headscale.ListNodes()\n\t\t\t\tassert.NoError(c, err)\n\t\t\t\tassert.Len(c, listNodes, nodeCountBeforeLogout)\n\n\t\t\t\tfor _, node := range listNodes {\n\t\t\t\t\tassertLastSeenSetWithCollect(c, node)\n\t\t\t\t}\n\t\t\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for node list after relogin\")\n\t\t})\n\t}\n}\n\n// This test will first log in two sets of nodes to two sets of users, then\n// it will log out all nodes and log them in as user1 using a pre-auth key.\n// This should create new nodes for user1 while preserving the original nodes for user2.\n// Pre-auth key re-authentication with a different user creates new nodes, not transfers.\nfunc TestAuthKeyLogoutAndReloginNewUser(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: len(MustTestVersions),\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\trequire.NoError(t, err)\n\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv([]tsic.Option{},\n\t\thsic.WithTestName(\"keyrelognewuser\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t// assertClientsState(t, allClients)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\t// Collect expected node IDs for validation\n\texpectedNodes := collectExpectedNodeIDs(t, allClients)\n\n\t// Validate initial connection state\n\trequireAllClientsOnline(t, headscale, expectedNodes, true, \"all clients should be connected after initial login\", 120*time.Second)\n\trequireAllClientsNetInfoAndDERP(t, headscale, expectedNodes, \"all clients should have NetInfo and DERP after initial login\", 3*time.Minute)\n\n\tvar (\n\t\tlistNodes             []*v1.Node\n\t\tnodeCountBeforeLogout int\n\t)\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tvar err error\n\n\t\tlistNodes, err = headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, listNodes, len(allClients))\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for expected node list before logout\")\n\n\tnodeCountBeforeLogout = len(listNodes)\n\tt.Logf(\"node count before logout: %d\", nodeCountBeforeLogout)\n\n\tfor _, client := range allClients {\n\t\terr := client.Logout()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to logout client %s: %s\", client.Hostname(), err)\n\t\t}\n\t}\n\n\terr = scenario.WaitForTailscaleLogout()\n\trequireNoErrLogout(t, err)\n\n\t// Validate that all nodes are offline after logout\n\trequireAllClientsOnline(t, headscale, expectedNodes, false, \"all nodes should be offline after logout\", 120*time.Second)\n\n\tt.Logf(\"all clients logged out\")\n\n\tuserMap, err := headscale.MapUsers()\n\trequire.NoError(t, err)\n\n\t// Create a new authkey for user1, to be used for all clients\n\tkey, err := scenario.CreatePreAuthKey(userMap[\"user1\"].GetId(), true, false)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create pre-auth key for user1: %s\", err)\n\t}\n\n\t// Log in all clients as user1, iterating over the spec only returns the\n\t// clients, not the usernames.\n\tfor _, userName := range spec.Users {\n\t\terr = scenario.RunTailscaleUp(userName, headscale.GetEndpoint(), key.GetKey())\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to run tailscale up for user %s: %s\", userName, err)\n\t\t}\n\t}\n\n\tvar user1Nodes []*v1.Node\n\n\tt.Logf(\"Validating user1 node count after relogin at %s\", time.Now().Format(TimestampFormat))\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tvar err error\n\n\t\tuser1Nodes, err = headscale.ListNodes(\"user1\")\n\t\tassert.NoError(ct, err, \"Failed to list nodes for user1 after relogin\")\n\t\tassert.Len(ct, user1Nodes, len(allClients), \"User1 should have all %d clients after relogin, got %d nodes\", len(allClients), len(user1Nodes))\n\t}, 60*time.Second, 2*time.Second, \"validating user1 has all client nodes after auth key relogin\")\n\n\t// Collect expected node IDs for user1 after relogin\n\texpectedUser1Nodes := make([]types.NodeID, 0, len(user1Nodes))\n\tfor _, node := range user1Nodes {\n\t\texpectedUser1Nodes = append(expectedUser1Nodes, types.NodeID(node.GetId()))\n\t}\n\n\t// Validate connection state after relogin as user1\n\trequireAllClientsOnline(t, headscale, expectedUser1Nodes, true, \"all user1 nodes should be connected after relogin\", 120*time.Second)\n\trequireAllClientsNetInfoAndDERP(t, headscale, expectedUser1Nodes, \"all user1 nodes should have NetInfo and DERP after relogin\", 3*time.Minute)\n\n\t// Validate that user2 still has their original nodes after user1's re-authentication\n\t// When nodes re-authenticate with a different user's pre-auth key, NEW nodes are created\n\t// for the new user. The original nodes remain with the original user.\n\tvar user2Nodes []*v1.Node\n\n\tt.Logf(\"Validating user2 node persistence after user1 relogin at %s\", time.Now().Format(TimestampFormat))\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tvar err error\n\n\t\tuser2Nodes, err = headscale.ListNodes(\"user2\")\n\t\tassert.NoError(ct, err, \"Failed to list nodes for user2 after user1 relogin\")\n\t\tassert.Len(ct, user2Nodes, len(allClients)/2, \"User2 should still have %d clients after user1 relogin, got %d nodes\", len(allClients)/2, len(user2Nodes))\n\t}, 30*time.Second, 2*time.Second, \"validating user2 nodes persist after user1 relogin (should not be affected)\")\n\n\tt.Logf(\"Validating client login states after user switch at %s\", time.Now().Format(TimestampFormat))\n\n\tfor _, client := range allClients {\n\t\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t\tstatus, err := client.Status()\n\t\t\tassert.NoError(ct, err, \"Failed to get status for client %s\", client.Hostname())\n\t\t\tassert.Equal(ct, \"user1@test.no\", status.User[status.Self.UserID].LoginName, \"Client %s should be logged in as user1 after user switch, got %s\", client.Hostname(), status.User[status.Self.UserID].LoginName)\n\t\t}, 30*time.Second, 2*time.Second, \"validating %s is logged in as user1 after auth key user switch\", client.Hostname())\n\t}\n}\n\nfunc TestAuthKeyLogoutAndReloginSameUserExpiredKey(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tfor _, https := range []bool{true, false} {\n\t\tt.Run(fmt.Sprintf(\"with-https-%t\", https), func(t *testing.T) {\n\t\t\tspec := ScenarioSpec{\n\t\t\t\tNodesPerUser: len(MustTestVersions),\n\t\t\t\tUsers:        []string{\"user1\", \"user2\"},\n\t\t\t}\n\n\t\t\tscenario, err := NewScenario(spec)\n\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\t\t\topts := []hsic.Option{\n\t\t\t\thsic.WithTestName(\"authkey-rlogexpired\"),\n\t\t\t}\n\n\t\t\terr = scenario.CreateHeadscaleEnv([]tsic.Option{}, opts...)\n\t\t\trequireNoErrHeadscaleEnv(t, err)\n\n\t\t\tallClients, err := scenario.ListTailscaleClients()\n\t\t\trequireNoErrListClients(t, err)\n\n\t\t\terr = scenario.WaitForTailscaleSync()\n\t\t\trequireNoErrSync(t, err)\n\n\t\t\t// assertClientsState(t, allClients)\n\n\t\t\tclientIPs := make(map[TailscaleClient][]netip.Addr)\n\n\t\t\tfor _, client := range allClients {\n\t\t\t\tips, err := client.IPs()\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"failed to get IPs for client %s: %s\", client.Hostname(), err)\n\t\t\t\t}\n\n\t\t\t\tclientIPs[client] = ips\n\t\t\t}\n\n\t\t\theadscale, err := scenario.Headscale()\n\t\t\trequireNoErrGetHeadscale(t, err)\n\n\t\t\t// Collect expected node IDs for validation\n\t\t\texpectedNodes := collectExpectedNodeIDs(t, allClients)\n\n\t\t\t// Validate initial connection state\n\t\t\trequireAllClientsOnline(t, headscale, expectedNodes, true, \"all clients should be connected after initial login\", 120*time.Second)\n\t\t\trequireAllClientsNetInfoAndDERP(t, headscale, expectedNodes, \"all clients should have NetInfo and DERP after initial login\", 3*time.Minute)\n\n\t\t\tvar (\n\t\t\t\tlistNodes             []*v1.Node\n\t\t\t\tnodeCountBeforeLogout int\n\t\t\t)\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tvar err error\n\n\t\t\t\tlistNodes, err = headscale.ListNodes()\n\t\t\t\tassert.NoError(c, err)\n\t\t\t\tassert.Len(c, listNodes, len(allClients))\n\t\t\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for expected node list before logout\")\n\n\t\t\tnodeCountBeforeLogout = len(listNodes)\n\t\t\tt.Logf(\"node count before logout: %d\", nodeCountBeforeLogout)\n\n\t\t\tfor _, client := range allClients {\n\t\t\t\terr := client.Logout()\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"failed to logout client %s: %s\", client.Hostname(), err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\terr = scenario.WaitForTailscaleLogout()\n\t\t\trequireNoErrLogout(t, err)\n\n\t\t\t// Validate that all nodes are offline after logout\n\t\t\trequireAllClientsOnline(t, headscale, expectedNodes, false, \"all nodes should be offline after logout\", 120*time.Second)\n\n\t\t\tt.Logf(\"all clients logged out\")\n\n\t\t\t// if the server is not running with HTTPS, we have to wait a bit before\n\t\t\t// reconnection as the newest Tailscale client has a measure that will only\n\t\t\t// reconnect over HTTPS if they saw a noise connection previously.\n\t\t\t// https://github.com/tailscale/tailscale/commit/1eaad7d3deb0815e8932e913ca1a862afa34db38\n\t\t\t// https://github.com/juanfont/headscale/issues/2164\n\t\t\tif !https {\n\t\t\t\t//nolint:forbidigo // Intentional delay: Tailscale client requires 5 min wait before reconnecting over non-HTTPS\n\t\t\t\ttime.Sleep(5 * time.Minute)\n\t\t\t}\n\n\t\t\tuserMap, err := headscale.MapUsers()\n\t\t\trequire.NoError(t, err)\n\n\t\t\tfor _, userName := range spec.Users {\n\t\t\t\tkey, err := scenario.CreatePreAuthKey(userMap[userName].GetId(), true, false)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"failed to create pre-auth key for user %s: %s\", userName, err)\n\t\t\t\t}\n\n\t\t\t\t// Expire the key so it can't be used\n\t\t\t\t_, err = headscale.Execute(\n\t\t\t\t\t[]string{\n\t\t\t\t\t\t\"headscale\",\n\t\t\t\t\t\t\"preauthkeys\",\n\t\t\t\t\t\t\"expire\",\n\t\t\t\t\t\t\"--id\",\n\t\t\t\t\t\tstrconv.FormatUint(key.GetId(), 10),\n\t\t\t\t\t})\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\terr = scenario.RunTailscaleUp(userName, headscale.GetEndpoint(), key.GetKey())\n\t\t\t\tassert.ErrorContains(t, err, \"authkey expired\")\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TestAuthKeyDeleteKey tests Issue #2830: node with deleted auth key should still reconnect.\n// Scenario from user report: \"create node, delete the auth key, restart to validate it can connect\"\n// Steps:\n// 1. Create node with auth key\n// 2. DELETE the auth key from database (completely remove it)\n// 3. Restart node - should successfully reconnect using MachineKey identity.\nfunc TestAuthKeyDeleteKey(t *testing.T) {\n\tIntegrationSkip(t)\n\n\t// Create scenario with NO nodes - we'll create the node manually so we can capture the auth key\n\tscenario, err := NewScenario(ScenarioSpec{\n\t\tNodesPerUser: 0, // No nodes created automatically\n\t\tUsers:        []string{\"user1\"},\n\t})\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName(\"delkey\"))\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\t// Get the user\n\tuserMap, err := headscale.MapUsers()\n\trequire.NoError(t, err)\n\n\tuserID := userMap[\"user1\"].GetId()\n\n\t// Create a pre-auth key - we keep the full key string before it gets redacted\n\tauthKey, err := scenario.CreatePreAuthKey(userID, false, false)\n\trequire.NoError(t, err)\n\n\tauthKeyString := authKey.GetKey()\n\tauthKeyID := authKey.GetId()\n\tt.Logf(\"Created pre-auth key ID %d: %s\", authKeyID, authKeyString)\n\n\t// Create a tailscale client and log it in with the auth key\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t)\n\trequire.NoError(t, err)\n\n\terr = client.Login(headscale.GetEndpoint(), authKeyString)\n\trequire.NoError(t, err)\n\n\t// Wait for the node to be registered\n\tvar user1Nodes []*v1.Node\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tvar err error\n\n\t\tuser1Nodes, err = headscale.ListNodes(\"user1\")\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, user1Nodes, 1)\n\t}, 30*time.Second, 500*time.Millisecond, \"waiting for node to be registered\")\n\n\tnodeID := user1Nodes[0].GetId()\n\tnodeName := user1Nodes[0].GetName()\n\tt.Logf(\"Node %d (%s) created successfully with auth_key_id=%d\", nodeID, nodeName, authKeyID)\n\n\t// Verify node is online\n\trequireAllClientsOnline(t, headscale, []types.NodeID{types.NodeID(nodeID)}, true, \"node should be online initially\", 120*time.Second)\n\n\t// DELETE the pre-auth key using the API\n\tt.Logf(\"Deleting pre-auth key ID %d using API\", authKeyID)\n\n\terr = headscale.DeleteAuthKey(authKeyID)\n\trequire.NoError(t, err)\n\tt.Logf(\"Successfully deleted auth key\")\n\n\t// Simulate node restart (down + up)\n\tt.Logf(\"Restarting node after deleting its auth key\")\n\n\terr = client.Down()\n\trequire.NoError(t, err)\n\n\t// Wait for client to fully stop before bringing it back up\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tstatus, err := client.Status()\n\t\tassert.NoError(c, err)\n\t\tassert.Equal(c, \"Stopped\", status.BackendState)\n\t}, 10*time.Second, 200*time.Millisecond, \"client should be stopped\")\n\n\terr = client.Up()\n\trequire.NoError(t, err)\n\n\t// Verify node comes back online\n\t// This will FAIL without the fix because auth key validation will reject deleted key\n\t// With the fix, MachineKey identity allows reconnection even with deleted key\n\trequireAllClientsOnline(t, headscale, []types.NodeID{types.NodeID(nodeID)}, true, \"node should reconnect after restart despite deleted key\", 120*time.Second)\n\n\tt.Logf(\"✓ Node successfully reconnected after its auth key was deleted\")\n}\n\n// TestAuthKeyLogoutAndReloginRoutesPreserved tests that routes remain serving\n// after a node logs out and re-authenticates with the same user.\n//\n// This test validates the fix for issue #2896:\n// https://github.com/juanfont/headscale/issues/2896\n//\n// Bug: When a node with already-approved routes restarts/re-authenticates,\n// the routes show as \"Approved\" and \"Available\" but NOT \"Serving\" (Primary).\n// A headscale restart would fix it, indicating a state management issue.\n//\n// The test scenario:\n// 1. Node registers with auth key and advertises routes\n// 2. Routes are auto-approved and verified as serving\n// 3. Node logs out\n// 4. Node re-authenticates with same auth key\n// 5. Routes should STILL be serving (this is where the bug manifests).\nfunc TestAuthKeyLogoutAndReloginRoutesPreserved(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tuser := \"routeuser\"\n\tadvertiseRoute := \"10.55.0.0/24\"\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 1,\n\t\tUsers:        []string{user},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{\n\t\t\ttsic.WithAcceptRoutes(),\n\t\t\t// Advertise route on initial login\n\t\t\ttsic.WithExtraLoginArgs([]string{\"--advertise-routes=\" + advertiseRoute}),\n\t\t},\n\t\thsic.WithTestName(\"routelogout\"),\n\t\thsic.WithACLPolicy(\n\t\t\t&policyv2.Policy{\n\t\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:       \"accept\",\n\t\t\t\t\t\tSources:      []policyv2.Alias{policyv2.Wildcard},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{{Alias: policyv2.Wildcard, Ports: []tailcfg.PortRange{tailcfg.PortRangeAny}}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tAutoApprovers: policyv2.AutoApproverPolicy{\n\t\t\t\t\tRoutes: map[netip.Prefix]policyv2.AutoApprovers{\n\t\t\t\t\t\tnetip.MustParsePrefix(advertiseRoute): {new(policyv2.Username(user + \"@test.no\"))},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\trequire.Len(t, allClients, 1)\n\n\tclient := allClients[0]\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\t// Step 1: Verify initial route is advertised, approved, and SERVING\n\tt.Logf(\"Step 1: Verifying initial route is advertised, approved, and SERVING at %s\", time.Now().Format(TimestampFormat))\n\n\tvar initialNode *v1.Node\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1, \"Should have exactly 1 node\")\n\n\t\tif len(nodes) == 1 {\n\t\t\tinitialNode = nodes[0]\n\t\t\t// Check: 1 announced, 1 approved, 1 serving (subnet route)\n\t\t\tassert.Lenf(c, initialNode.GetAvailableRoutes(), 1,\n\t\t\t\t\"Node should have 1 available route, got %v\", initialNode.GetAvailableRoutes())\n\t\t\tassert.Lenf(c, initialNode.GetApprovedRoutes(), 1,\n\t\t\t\t\"Node should have 1 approved route, got %v\", initialNode.GetApprovedRoutes())\n\t\t\tassert.Lenf(c, initialNode.GetSubnetRoutes(), 1,\n\t\t\t\t\"Node should have 1 serving (subnet) route, got %v - THIS IS THE BUG if empty\", initialNode.GetSubnetRoutes())\n\t\t\tassert.Contains(c, initialNode.GetSubnetRoutes(), advertiseRoute,\n\t\t\t\t\"Subnet routes should contain %s\", advertiseRoute)\n\t\t}\n\t}, 30*time.Second, 500*time.Millisecond, \"initial route should be serving\")\n\n\trequire.NotNil(t, initialNode, \"Initial node should be found\")\n\tinitialNodeID := initialNode.GetId()\n\tt.Logf(\"Initial node ID: %d, Available: %v, Approved: %v, Serving: %v\",\n\t\tinitialNodeID, initialNode.GetAvailableRoutes(), initialNode.GetApprovedRoutes(), initialNode.GetSubnetRoutes())\n\n\t// Step 2: Logout\n\tt.Logf(\"Step 2: Logging out at %s\", time.Now().Format(TimestampFormat))\n\n\terr = client.Logout()\n\trequire.NoError(t, err)\n\n\t// Wait for logout to complete\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tstatus, err := client.Status()\n\t\tassert.NoError(ct, err)\n\t\tassert.Equal(ct, \"NeedsLogin\", status.BackendState, \"Expected NeedsLogin state after logout\")\n\t}, 30*time.Second, 1*time.Second, \"waiting for logout to complete\")\n\n\tt.Logf(\"Logout completed, node should still exist in database\")\n\n\t// Verify node still exists (routes should still be in DB)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1, \"Node should persist in database after logout\")\n\t}, 10*time.Second, 500*time.Millisecond, \"node should persist after logout\")\n\n\t// Step 3: Re-authenticate with the SAME user (using auth key)\n\tt.Logf(\"Step 3: Re-authenticating with same user at %s\", time.Now().Format(TimestampFormat))\n\n\tuserMap, err := headscale.MapUsers()\n\trequire.NoError(t, err)\n\n\tkey, err := scenario.CreatePreAuthKey(userMap[user].GetId(), true, false)\n\trequire.NoError(t, err)\n\n\t// Re-login - the container already has extraLoginArgs with --advertise-routes\n\t// from the initial setup, so routes will be advertised on re-login\n\terr = scenario.RunTailscaleUp(user, headscale.GetEndpoint(), key.GetKey())\n\trequire.NoError(t, err)\n\n\t// Wait for client to be running\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tstatus, err := client.Status()\n\t\tassert.NoError(ct, err)\n\t\tassert.Equal(ct, \"Running\", status.BackendState, \"Expected Running state after relogin\")\n\t}, 30*time.Second, 1*time.Second, \"waiting for relogin to complete\")\n\n\tt.Logf(\"Re-authentication completed at %s\", time.Now().Format(TimestampFormat))\n\n\t// Step 4: THE CRITICAL TEST - Verify routes are STILL SERVING after re-authentication\n\tt.Logf(\"Step 4: Verifying routes are STILL SERVING after re-authentication at %s\", time.Now().Format(TimestampFormat))\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1, \"Should still have exactly 1 node after relogin\")\n\n\t\tif len(nodes) == 1 {\n\t\t\tnode := nodes[0]\n\t\t\tt.Logf(\"After relogin - Available: %v, Approved: %v, Serving: %v\",\n\t\t\t\tnode.GetAvailableRoutes(), node.GetApprovedRoutes(), node.GetSubnetRoutes())\n\n\t\t\t// This is where issue #2896 manifests:\n\t\t\t// - Available shows the route (from Hostinfo.RoutableIPs)\n\t\t\t// - Approved shows the route (from ApprovedRoutes)\n\t\t\t// - BUT Serving (SubnetRoutes/PrimaryRoutes) is EMPTY!\n\t\t\tassert.Lenf(c, node.GetAvailableRoutes(), 1,\n\t\t\t\t\"Node should have 1 available route after relogin, got %v\", node.GetAvailableRoutes())\n\t\t\tassert.Lenf(c, node.GetApprovedRoutes(), 1,\n\t\t\t\t\"Node should have 1 approved route after relogin, got %v\", node.GetApprovedRoutes())\n\t\t\tassert.Lenf(c, node.GetSubnetRoutes(), 1,\n\t\t\t\t\"BUG #2896: Node should have 1 SERVING route after relogin, got %v\", node.GetSubnetRoutes())\n\t\t\tassert.Contains(c, node.GetSubnetRoutes(), advertiseRoute,\n\t\t\t\t\"BUG #2896: Subnet routes should contain %s after relogin\", advertiseRoute)\n\n\t\t\t// Also verify node ID was preserved (same node, not new registration)\n\t\t\tassert.Equal(c, initialNodeID, node.GetId(),\n\t\t\t\t\"Node ID should be preserved after same-user relogin\")\n\t\t}\n\t}, 30*time.Second, 500*time.Millisecond,\n\t\t\"BUG #2896: routes should remain SERVING after logout/relogin with same user\")\n\n\tt.Logf(\"Test completed - verifying issue #2896 fix\")\n}\n"
  },
  {
    "path": "integration/auth_oidc_test.go",
    "content": "package integration\n\nimport (\n\t\"maps\"\n\t\"net/netip\"\n\t\"net/url\"\n\t\"sort\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/google/go-cmp/cmp/cmpopts\"\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\tpolicyv2 \"github.com/juanfont/headscale/hscontrol/policy/v2\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/integration/hsic\"\n\t\"github.com/juanfont/headscale/integration/tsic\"\n\t\"github.com/oauth2-proxy/mockoidc\"\n\t\"github.com/samber/lo\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/ipn/ipnstate\"\n\t\"tailscale.com/tailcfg\"\n)\n\nfunc TestOIDCAuthenticationPingAll(t *testing.T) {\n\tIntegrationSkip(t)\n\n\t// Logins to MockOIDC is served by a queue with a strict order,\n\t// if we use more than one node per user, the order of the logins\n\t// will not be deterministic and the test will fail.\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 1,\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t\tOIDCUsers: []mockoidc.MockUser{\n\t\t\toidcMockUser(\"user1\", true),\n\t\t\toidcMockUser(\"user2\", false),\n\t\t},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\trequire.NoError(t, err)\n\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\toidcMap := map[string]string{\n\t\t\"HEADSCALE_OIDC_ISSUER\":             scenario.mockOIDC.Issuer(),\n\t\t\"HEADSCALE_OIDC_CLIENT_ID\":          scenario.mockOIDC.ClientID(),\n\t\t\"CREDENTIALS_DIRECTORY_TEST\":        \"/tmp\",\n\t\t\"HEADSCALE_OIDC_CLIENT_SECRET_PATH\": \"${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret\",\n\t}\n\n\t// OIDC tests configure the mock OIDC provider via environment\n\t// variables and inject the client secret as a file. This\n\t// pattern is shared by all OIDC integration tests.\n\terr = scenario.CreateHeadscaleEnvWithLoginURL(\n\t\tnil,\n\t\thsic.WithTestName(\"oidcauthping\"),\n\t\thsic.WithConfigEnv(oidcMap),\n\t\thsic.WithFileInContainer(\"/tmp/hs_client_oidc_secret\", []byte(scenario.mockOIDC.ClientSecret())),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\tallIps, err := scenario.ListTailscaleClientsIPs()\n\trequireNoErrListClientIPs(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t// assertClientsState(t, allClients)\n\n\tallAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {\n\t\treturn x.String()\n\t})\n\n\tsuccess := pingAllHelper(t, allClients, allAddrs)\n\tt.Logf(\"%d successful pings out of %d\", success, len(allClients)*len(allIps))\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\tlistUsers, err := headscale.ListUsers()\n\trequire.NoError(t, err)\n\n\twant := []*v1.User{\n\t\t{\n\t\t\tId:    1,\n\t\t\tName:  \"user1\",\n\t\t\tEmail: \"user1@test.no\",\n\t\t},\n\t\t{\n\t\t\tId:         2,\n\t\t\tName:       \"user1\",\n\t\t\tEmail:      \"user1@headscale.net\",\n\t\t\tProvider:   \"oidc\",\n\t\t\tProviderId: scenario.mockOIDC.Issuer() + \"/user1\",\n\t\t},\n\t\t{\n\t\t\tId:    3,\n\t\t\tName:  \"user2\",\n\t\t\tEmail: \"user2@test.no\",\n\t\t},\n\t\t{\n\t\t\tId:         4,\n\t\t\tName:       \"user2\",\n\t\t\tEmail:      \"\", // Unverified\n\t\t\tProvider:   \"oidc\",\n\t\t\tProviderId: scenario.mockOIDC.Issuer() + \"/user2\",\n\t\t},\n\t}\n\n\tsort.Slice(listUsers, func(i, j int) bool {\n\t\treturn listUsers[i].GetId() < listUsers[j].GetId()\n\t})\n\n\tif diff := cmp.Diff(want, listUsers, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, \"CreatedAt\")); diff != \"\" {\n\t\tt.Fatalf(\"unexpected users: %s\", diff)\n\t}\n}\n\n// TestOIDCExpireNodesBasedOnTokenExpiry validates that nodes correctly transition to NeedsLogin\n// state when their OIDC tokens expire. This test uses a short token TTL to validate the\n// expiration behavior without waiting for production-length timeouts.\n//\n// The test verifies:\n// - Nodes can successfully authenticate via OIDC and establish connectivity\n// - When OIDC tokens expire, nodes transition to NeedsLogin state\n// - The expiration is based on individual token issue times, not a global timer\n//\n// Known timing considerations:\n// - Nodes may expire at different times due to sequential login processing\n// - The test must account for login time spread between first and last node.\nfunc TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tshortAccessTTL := 5 * time.Minute\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 1,\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t\tOIDCUsers: []mockoidc.MockUser{\n\t\t\toidcMockUser(\"user1\", true),\n\t\t\toidcMockUser(\"user2\", false),\n\t\t},\n\t\tOIDCAccessTTL: shortAccessTTL,\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\toidcMap := map[string]string{\n\t\t\"HEADSCALE_OIDC_ISSUER\":                scenario.mockOIDC.Issuer(),\n\t\t\"HEADSCALE_OIDC_CLIENT_ID\":             scenario.mockOIDC.ClientID(),\n\t\t\"HEADSCALE_OIDC_CLIENT_SECRET\":         scenario.mockOIDC.ClientSecret(),\n\t\t\"HEADSCALE_OIDC_USE_EXPIRY_FROM_TOKEN\": \"1\",\n\t}\n\n\terr = scenario.CreateHeadscaleEnvWithLoginURL(\n\t\tnil,\n\t\thsic.WithTestName(\"oidcexpirenodes\"),\n\t\thsic.WithConfigEnv(oidcMap),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\tallIps, err := scenario.ListTailscaleClientsIPs()\n\trequireNoErrListClientIPs(t, err)\n\n\t// Record when sync completes to better estimate token expiry timing\n\tsyncCompleteTime := time.Now()\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\tloginDuration := time.Since(syncCompleteTime)\n\tt.Logf(\"Login and sync completed in %v\", loginDuration)\n\n\t// assertClientsState(t, allClients)\n\n\tallAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {\n\t\treturn x.String()\n\t})\n\n\tsuccess := pingAllHelper(t, allClients, allAddrs)\n\tt.Logf(\"%d successful pings out of %d (before expiry)\", success, len(allClients)*len(allIps))\n\n\t// Wait for OIDC token expiry and verify all nodes transition to NeedsLogin.\n\t// We add extra time to account for:\n\t// - Sequential login processing causing different token issue times\n\t// - Network and processing delays\n\t// - Safety margin for test reliability\n\tloginTimeSpread := 1 * time.Minute // Account for sequential login delays\n\tsafetyBuffer := 30 * time.Second   // Additional safety margin\n\ttotalWaitTime := shortAccessTTL + loginTimeSpread + safetyBuffer\n\n\tt.Logf(\"Waiting %v for OIDC tokens to expire (TTL: %v, spread: %v, buffer: %v)\",\n\t\ttotalWaitTime, shortAccessTTL, loginTimeSpread, safetyBuffer)\n\n\t// EventuallyWithT retries the test function until it passes or times out.\n\t// IMPORTANT: Use 'ct' (CollectT) for all assertions inside the function, not 't'.\n\t// Using 't' would cause immediate test failure without retries, defeating the purpose\n\t// of EventuallyWithT which is designed to handle timing-dependent conditions.\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t// Check each client's status individually to provide better diagnostics\n\t\texpiredCount := 0\n\n\t\tfor _, client := range allClients {\n\t\t\tstatus, err := client.Status()\n\t\t\tif assert.NoError(ct, err, \"failed to get status for client %s\", client.Hostname()) {\n\t\t\t\tif status.BackendState == \"NeedsLogin\" {\n\t\t\t\t\texpiredCount++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Log progress for debugging\n\t\tif expiredCount < len(allClients) {\n\t\t\tt.Logf(\"Token expiry progress: %d/%d clients in NeedsLogin state\", expiredCount, len(allClients))\n\t\t}\n\n\t\t// All clients must be in NeedsLogin state\n\t\tassert.Equal(ct, len(allClients), expiredCount,\n\t\t\t\"expected all %d clients to be in NeedsLogin state, but only %d are\",\n\t\t\tlen(allClients), expiredCount)\n\n\t\t// Only check detailed logout state if all clients are expired\n\t\tif expiredCount == len(allClients) {\n\t\t\tassertTailscaleNodesLogout(ct, allClients)\n\t\t}\n\t}, totalWaitTime, 5*time.Second)\n}\n\nfunc TestOIDC024UserCreation(t *testing.T) {\n\tIntegrationSkip(t)\n\n\ttests := []struct {\n\t\tname          string\n\t\tconfig        map[string]string\n\t\temailVerified bool\n\t\tcliUsers      []string\n\t\toidcUsers     []string\n\t\twant          func(iss string) []*v1.User\n\t}{\n\t\t{\n\t\t\tname:          \"no-migration-verified-email\",\n\t\t\temailVerified: true,\n\t\t\tcliUsers:      []string{\"user1\", \"user2\"},\n\t\t\toidcUsers:     []string{\"user1\", \"user2\"},\n\t\t\twant: func(iss string) []*v1.User {\n\t\t\t\treturn []*v1.User{\n\t\t\t\t\t{\n\t\t\t\t\t\tId:    1,\n\t\t\t\t\t\tName:  \"user1\",\n\t\t\t\t\t\tEmail: \"user1@test.no\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tId:         2,\n\t\t\t\t\t\tName:       \"user1\",\n\t\t\t\t\t\tEmail:      \"user1@headscale.net\",\n\t\t\t\t\t\tProvider:   \"oidc\",\n\t\t\t\t\t\tProviderId: iss + \"/user1\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tId:    3,\n\t\t\t\t\t\tName:  \"user2\",\n\t\t\t\t\t\tEmail: \"user2@test.no\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tId:         4,\n\t\t\t\t\t\tName:       \"user2\",\n\t\t\t\t\t\tEmail:      \"user2@headscale.net\",\n\t\t\t\t\t\tProvider:   \"oidc\",\n\t\t\t\t\t\tProviderId: iss + \"/user2\",\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:          \"no-migration-not-verified-email\",\n\t\t\temailVerified: false,\n\t\t\tcliUsers:      []string{\"user1\", \"user2\"},\n\t\t\toidcUsers:     []string{\"user1\", \"user2\"},\n\t\t\twant: func(iss string) []*v1.User {\n\t\t\t\treturn []*v1.User{\n\t\t\t\t\t{\n\t\t\t\t\t\tId:    1,\n\t\t\t\t\t\tName:  \"user1\",\n\t\t\t\t\t\tEmail: \"user1@test.no\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tId:         2,\n\t\t\t\t\t\tName:       \"user1\",\n\t\t\t\t\t\tProvider:   \"oidc\",\n\t\t\t\t\t\tProviderId: iss + \"/user1\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tId:    3,\n\t\t\t\t\t\tName:  \"user2\",\n\t\t\t\t\t\tEmail: \"user2@test.no\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tId:         4,\n\t\t\t\t\t\tName:       \"user2\",\n\t\t\t\t\t\tProvider:   \"oidc\",\n\t\t\t\t\t\tProviderId: iss + \"/user2\",\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:          \"migration-no-strip-domains-not-verified-email\",\n\t\t\temailVerified: false,\n\t\t\tcliUsers:      []string{\"user1.headscale.net\", \"user2.headscale.net\"},\n\t\t\toidcUsers:     []string{\"user1\", \"user2\"},\n\t\t\twant: func(iss string) []*v1.User {\n\t\t\t\treturn []*v1.User{\n\t\t\t\t\t{\n\t\t\t\t\t\tId:    1,\n\t\t\t\t\t\tName:  \"user1.headscale.net\",\n\t\t\t\t\t\tEmail: \"user1.headscale.net@test.no\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tId:         2,\n\t\t\t\t\t\tName:       \"user1\",\n\t\t\t\t\t\tProvider:   \"oidc\",\n\t\t\t\t\t\tProviderId: iss + \"/user1\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tId:    3,\n\t\t\t\t\t\tName:  \"user2.headscale.net\",\n\t\t\t\t\t\tEmail: \"user2.headscale.net@test.no\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tId:         4,\n\t\t\t\t\t\tName:       \"user2\",\n\t\t\t\t\t\tProvider:   \"oidc\",\n\t\t\t\t\t\tProviderId: iss + \"/user2\",\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tspec := ScenarioSpec{\n\t\t\t\tNodesPerUser: 1,\n\t\t\t}\n\t\t\tspec.Users = append(spec.Users, tt.cliUsers...)\n\n\t\t\tfor _, user := range tt.oidcUsers {\n\t\t\t\tspec.OIDCUsers = append(spec.OIDCUsers, oidcMockUser(user, tt.emailVerified))\n\t\t\t}\n\n\t\t\tscenario, err := NewScenario(spec)\n\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\t\t\toidcMap := map[string]string{\n\t\t\t\t\"HEADSCALE_OIDC_ISSUER\":             scenario.mockOIDC.Issuer(),\n\t\t\t\t\"HEADSCALE_OIDC_CLIENT_ID\":          scenario.mockOIDC.ClientID(),\n\t\t\t\t\"CREDENTIALS_DIRECTORY_TEST\":        \"/tmp\",\n\t\t\t\t\"HEADSCALE_OIDC_CLIENT_SECRET_PATH\": \"${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret\",\n\t\t\t}\n\t\t\tmaps.Copy(oidcMap, tt.config)\n\n\t\t\terr = scenario.CreateHeadscaleEnvWithLoginURL(\n\t\t\t\tnil,\n\t\t\t\thsic.WithTestName(\"oidcmigration\"),\n\t\t\t\thsic.WithConfigEnv(oidcMap),\n\t\t\t\thsic.WithFileInContainer(\"/tmp/hs_client_oidc_secret\", []byte(scenario.mockOIDC.ClientSecret())),\n\t\t\t)\n\t\t\trequireNoErrHeadscaleEnv(t, err)\n\n\t\t\t// Ensure that the nodes have logged in, this is what\n\t\t\t// triggers user creation via OIDC.\n\t\t\terr = scenario.WaitForTailscaleSync()\n\t\t\trequireNoErrSync(t, err)\n\n\t\t\theadscale, err := scenario.Headscale()\n\t\t\trequire.NoError(t, err)\n\n\t\t\twant := tt.want(scenario.mockOIDC.Issuer())\n\n\t\t\tlistUsers, err := headscale.ListUsers()\n\t\t\trequire.NoError(t, err)\n\n\t\t\tsort.Slice(listUsers, func(i, j int) bool {\n\t\t\t\treturn listUsers[i].GetId() < listUsers[j].GetId()\n\t\t\t})\n\n\t\t\tif diff := cmp.Diff(want, listUsers, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, \"CreatedAt\")); diff != \"\" {\n\t\t\t\tt.Errorf(\"unexpected users: %s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestOIDCAuthenticationWithPKCE(t *testing.T) {\n\tIntegrationSkip(t)\n\n\t// Single user with one node for testing PKCE flow\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 1,\n\t\tUsers:        []string{\"user1\"},\n\t\tOIDCUsers: []mockoidc.MockUser{\n\t\t\toidcMockUser(\"user1\", true),\n\t\t},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\toidcMap := map[string]string{\n\t\t\"HEADSCALE_OIDC_ISSUER\":             scenario.mockOIDC.Issuer(),\n\t\t\"HEADSCALE_OIDC_CLIENT_ID\":          scenario.mockOIDC.ClientID(),\n\t\t\"HEADSCALE_OIDC_CLIENT_SECRET_PATH\": \"${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret\",\n\t\t\"CREDENTIALS_DIRECTORY_TEST\":        \"/tmp\",\n\t\t\"HEADSCALE_OIDC_PKCE_ENABLED\":       \"1\", // Enable PKCE\n\t}\n\n\terr = scenario.CreateHeadscaleEnvWithLoginURL(\n\t\tnil,\n\t\thsic.WithTestName(\"oidcauthpkce\"),\n\t\thsic.WithConfigEnv(oidcMap),\n\t\thsic.WithFileInContainer(\"/tmp/hs_client_oidc_secret\", []byte(scenario.mockOIDC.ClientSecret())),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\t// Get all clients and verify they can connect\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\tallIps, err := scenario.ListTailscaleClientsIPs()\n\trequireNoErrListClientIPs(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\tallAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {\n\t\treturn x.String()\n\t})\n\n\tsuccess := pingAllHelper(t, allClients, allAddrs)\n\tt.Logf(\"%d successful pings out of %d\", success, len(allClients)*len(allIps))\n}\n\n// TestOIDCReloginSameNodeNewUser tests the scenario where:\n// 1. A Tailscale client logs in with user1 (creates node1 for user1)\n// 2. The same client logs out and logs in with user2 (creates node2 for user2)\n// 3. The same client logs out and logs in with user1 again (reuses node1, node2 remains)\n// This validates that OIDC relogin properly handles node reuse and cleanup.\nfunc TestOIDCReloginSameNodeNewUser(t *testing.T) {\n\tIntegrationSkip(t)\n\n\t// Create no nodes and no users\n\tscenario, err := NewScenario(ScenarioSpec{\n\t\t// First login creates the first OIDC user\n\t\t// Second login logs in the same node, which creates a new node\n\t\t// Third login logs in the same node back into the original user\n\t\tOIDCUsers: []mockoidc.MockUser{\n\t\t\toidcMockUser(\"user1\", true),\n\t\t\toidcMockUser(\"user2\", true),\n\t\t\toidcMockUser(\"user1\", true),\n\t\t},\n\t})\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\toidcMap := map[string]string{\n\t\t\"HEADSCALE_OIDC_ISSUER\":             scenario.mockOIDC.Issuer(),\n\t\t\"HEADSCALE_OIDC_CLIENT_ID\":          scenario.mockOIDC.ClientID(),\n\t\t\"CREDENTIALS_DIRECTORY_TEST\":        \"/tmp\",\n\t\t\"HEADSCALE_OIDC_CLIENT_SECRET_PATH\": \"${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret\",\n\t}\n\n\terr = scenario.CreateHeadscaleEnvWithLoginURL(\n\t\tnil,\n\t\thsic.WithTestName(\"oidc-authrelog\"),\n\t\thsic.WithConfigEnv(oidcMap),\n\t\thsic.WithFileInContainer(\"/tmp/hs_client_oidc_secret\", []byte(scenario.mockOIDC.ClientSecret())),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\tts, err := scenario.CreateTailscaleNode(\"unstable\", tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]))\n\trequire.NoError(t, err)\n\n\tu, err := ts.LoginWithURL(headscale.GetEndpoint())\n\trequire.NoError(t, err)\n\n\t_, err = doLoginURL(ts.Hostname(), u)\n\trequire.NoError(t, err)\n\n\tt.Logf(\"Validating initial user creation at %s\", time.Now().Format(TimestampFormat))\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tlistUsers, err := headscale.ListUsers()\n\t\tassert.NoError(ct, err, \"Failed to list users during initial validation\")\n\t\tassert.Len(ct, listUsers, 1, \"Expected exactly 1 user after first login, got %d\", len(listUsers))\n\n\t\twantUsers := []*v1.User{\n\t\t\t{\n\t\t\t\tId:         1,\n\t\t\t\tName:       \"user1\",\n\t\t\t\tEmail:      \"user1@headscale.net\",\n\t\t\t\tProvider:   \"oidc\",\n\t\t\t\tProviderId: scenario.mockOIDC.Issuer() + \"/user1\",\n\t\t\t},\n\t\t}\n\n\t\tsort.Slice(listUsers, func(i, j int) bool {\n\t\t\treturn listUsers[i].GetId() < listUsers[j].GetId()\n\t\t})\n\n\t\tif diff := cmp.Diff(wantUsers, listUsers, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, \"CreatedAt\")); diff != \"\" {\n\t\t\tct.Errorf(\"User validation failed after first login - unexpected users: %s\", diff)\n\t\t}\n\t}, 30*time.Second, 1*time.Second, \"validating user1 creation after initial OIDC login\")\n\n\tt.Logf(\"Validating initial node creation at %s\", time.Now().Format(TimestampFormat))\n\n\tvar listNodes []*v1.Node\n\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tvar err error\n\n\t\tlistNodes, err = headscale.ListNodes()\n\t\tassert.NoError(ct, err, \"Failed to list nodes during initial validation\")\n\t\tassert.Len(ct, listNodes, 1, \"Expected exactly 1 node after first login, got %d\", len(listNodes))\n\t}, 30*time.Second, 1*time.Second, \"validating initial node creation for user1 after OIDC login\")\n\n\t// Collect expected node IDs for validation after user1 initial login\n\texpectedNodes := make([]types.NodeID, 0, 1)\n\n\tvar nodeID uint64\n\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tstatus := ts.MustStatus()\n\t\tassert.NotEmpty(ct, status.Self.ID, \"Node ID should be populated in status\")\n\n\t\tvar err error\n\n\t\tnodeID, err = strconv.ParseUint(string(status.Self.ID), 10, 64)\n\t\tassert.NoError(ct, err, \"Failed to parse node ID from status\")\n\t}, 30*time.Second, 1*time.Second, \"waiting for node ID to be populated in status after initial login\")\n\n\texpectedNodes = append(expectedNodes, types.NodeID(nodeID))\n\n\t// Validate initial connection state for user1\n\tvalidateInitialConnection(t, headscale, expectedNodes)\n\n\t// Log out user1 and log in user2, this should create a new node\n\t// for user2, the node should have the same machine key and\n\t// a new node key.\n\terr = ts.Logout()\n\trequire.NoError(t, err)\n\n\t// TODO(kradalby): Not sure why we need to logout twice, but it fails and\n\t// logs in immediately after the first logout and I cannot reproduce it\n\t// manually.\n\terr = ts.Logout()\n\trequire.NoError(t, err)\n\n\t// Wait for logout to complete and then do second logout\n\tt.Logf(\"Waiting for user1 logout completion at %s\", time.Now().Format(TimestampFormat))\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t// Check that the first logout completed\n\t\tstatus, err := ts.Status()\n\t\tassert.NoError(ct, err, \"Failed to get client status during logout validation\")\n\t\tassert.Equal(ct, \"NeedsLogin\", status.BackendState, \"Expected NeedsLogin state after logout, got %s\", status.BackendState)\n\t}, 30*time.Second, 1*time.Second, \"waiting for user1 logout to complete before user2 login\")\n\n\tu, err = ts.LoginWithURL(headscale.GetEndpoint())\n\trequire.NoError(t, err)\n\n\t_, err = doLoginURL(ts.Hostname(), u)\n\trequire.NoError(t, err)\n\n\tt.Logf(\"Validating user2 creation at %s\", time.Now().Format(TimestampFormat))\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tlistUsers, err := headscale.ListUsers()\n\t\tassert.NoError(ct, err, \"Failed to list users after user2 login\")\n\t\tassert.Len(ct, listUsers, 2, \"Expected exactly 2 users after user2 login, got %d users\", len(listUsers))\n\n\t\twantUsers := []*v1.User{\n\t\t\t{\n\t\t\t\tId:         1,\n\t\t\t\tName:       \"user1\",\n\t\t\t\tEmail:      \"user1@headscale.net\",\n\t\t\t\tProvider:   \"oidc\",\n\t\t\t\tProviderId: scenario.mockOIDC.Issuer() + \"/user1\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tId:         2,\n\t\t\t\tName:       \"user2\",\n\t\t\t\tEmail:      \"user2@headscale.net\",\n\t\t\t\tProvider:   \"oidc\",\n\t\t\t\tProviderId: scenario.mockOIDC.Issuer() + \"/user2\",\n\t\t\t},\n\t\t}\n\n\t\tsort.Slice(listUsers, func(i, j int) bool {\n\t\t\treturn listUsers[i].GetId() < listUsers[j].GetId()\n\t\t})\n\n\t\tif diff := cmp.Diff(wantUsers, listUsers, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, \"CreatedAt\")); diff != \"\" {\n\t\t\tct.Errorf(\"User validation failed after user2 login - expected both user1 and user2: %s\", diff)\n\t\t}\n\t}, 30*time.Second, 1*time.Second, \"validating both user1 and user2 exist after second OIDC login\")\n\n\tvar listNodesAfterNewUserLogin []*v1.Node\n\t// First, wait for the new node to be created\n\tt.Logf(\"Waiting for user2 node creation at %s\", time.Now().Format(TimestampFormat))\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tlistNodesAfterNewUserLogin, err = headscale.ListNodes()\n\t\tassert.NoError(ct, err, \"Failed to list nodes after user2 login\")\n\t\t// We might temporarily have more than 2 nodes during cleanup, so check for at least 2\n\t\tassert.GreaterOrEqual(ct, len(listNodesAfterNewUserLogin), 2, \"Should have at least 2 nodes after user2 login, got %d (may include temporary nodes during cleanup)\", len(listNodesAfterNewUserLogin))\n\t}, 30*time.Second, 1*time.Second, \"waiting for user2 node creation (allowing temporary extra nodes during cleanup)\")\n\n\t// Then wait for cleanup to stabilize at exactly 2 nodes\n\tt.Logf(\"Waiting for node cleanup stabilization at %s\", time.Now().Format(TimestampFormat))\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tlistNodesAfterNewUserLogin, err = headscale.ListNodes()\n\t\tassert.NoError(ct, err, \"Failed to list nodes during cleanup validation\")\n\t\tassert.Len(ct, listNodesAfterNewUserLogin, 2, \"Should have exactly 2 nodes after cleanup (1 for user1, 1 for user2), got %d nodes\", len(listNodesAfterNewUserLogin))\n\n\t\t// Validate that both nodes have the same machine key but different node keys\n\t\tif len(listNodesAfterNewUserLogin) >= 2 {\n\t\t\t// Machine key is the same as the \"machine\" has not changed,\n\t\t\t// but Node key is not as it is a new node\n\t\t\tassert.Equal(ct, listNodes[0].GetMachineKey(), listNodesAfterNewUserLogin[0].GetMachineKey(), \"Machine key should be preserved from original node\")\n\t\t\tassert.Equal(ct, listNodesAfterNewUserLogin[0].GetMachineKey(), listNodesAfterNewUserLogin[1].GetMachineKey(), \"Both nodes should share the same machine key\")\n\t\t\tassert.NotEqual(ct, listNodesAfterNewUserLogin[0].GetNodeKey(), listNodesAfterNewUserLogin[1].GetNodeKey(), \"Node keys should be different between user1 and user2 nodes\")\n\t\t}\n\t}, 90*time.Second, 2*time.Second, \"waiting for node count stabilization at exactly 2 nodes after user2 login\")\n\n\t// Security validation: Only user2's node should be active after user switch\n\tvar activeUser2NodeID types.NodeID\n\n\tfor _, node := range listNodesAfterNewUserLogin {\n\t\tif node.GetUser().GetId() == 2 { // user2\n\t\t\tactiveUser2NodeID = types.NodeID(node.GetId())\n\t\t\tt.Logf(\"Active user2 node: %d (User: %s)\", node.GetId(), node.GetUser().GetName())\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Validate only user2's node is online (security requirement)\n\tt.Logf(\"Validating only user2 node is online at %s\", time.Now().Format(TimestampFormat))\n\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodeStore, err := headscale.DebugNodeStore()\n\t\tassert.NoError(c, err, \"Failed to get nodestore debug info\")\n\n\t\t// Check user2 node is online\n\t\tif node, exists := nodeStore[activeUser2NodeID]; exists {\n\t\t\tassert.NotNil(c, node.IsOnline, \"User2 node should have online status\")\n\n\t\t\tif node.IsOnline != nil {\n\t\t\t\tassert.True(c, *node.IsOnline, \"User2 node should be online after login\")\n\t\t\t}\n\t\t} else {\n\t\t\tassert.Fail(c, \"User2 node not found in nodestore\")\n\t\t}\n\t}, 60*time.Second, 2*time.Second, \"validating only user2 node is online after user switch\")\n\n\t// Before logging out user2, validate we have exactly 2 nodes and both are stable\n\tt.Logf(\"Pre-logout validation: checking node stability at %s\", time.Now().Format(TimestampFormat))\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tcurrentNodes, err := headscale.ListNodes()\n\t\tassert.NoError(ct, err, \"Failed to list nodes before user2 logout\")\n\t\tassert.Len(ct, currentNodes, 2, \"Should have exactly 2 stable nodes before user2 logout, got %d\", len(currentNodes))\n\n\t\t// Validate node stability - ensure no phantom nodes\n\t\tfor i, node := range currentNodes {\n\t\t\tassert.NotNil(ct, node.GetUser(), \"Node %d should have a valid user before logout\", i)\n\t\t\tassert.NotEmpty(ct, node.GetMachineKey(), \"Node %d should have a valid machine key before logout\", i)\n\t\t\tt.Logf(\"Pre-logout node %d: User=%s, MachineKey=%s\", i, node.GetUser().GetName(), node.GetMachineKey()[:16]+\"...\")\n\t\t}\n\t}, 60*time.Second, 2*time.Second, \"validating stable node count and integrity before user2 logout\")\n\n\t// Log out user2, and log into user1, no new node should be created,\n\t// the node should now \"become\" node1 again\n\terr = ts.Logout()\n\trequire.NoError(t, err)\n\n\tt.Logf(\"Logged out take one\")\n\tt.Log(\"timestamp: \" + time.Now().Format(TimestampFormat) + \"\\n\")\n\n\t// TODO(kradalby): Not sure why we need to logout twice, but it fails and\n\t// logs in immediately after the first logout and I cannot reproduce it\n\t// manually.\n\terr = ts.Logout()\n\trequire.NoError(t, err)\n\n\tt.Logf(\"Logged out take two\")\n\tt.Log(\"timestamp: \" + time.Now().Format(TimestampFormat) + \"\\n\")\n\n\t// Wait for logout to complete and then do second logout\n\tt.Logf(\"Waiting for user2 logout completion at %s\", time.Now().Format(TimestampFormat))\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t// Check that the first logout completed\n\t\tstatus, err := ts.Status()\n\t\tassert.NoError(ct, err, \"Failed to get client status during user2 logout validation\")\n\t\tassert.Equal(ct, \"NeedsLogin\", status.BackendState, \"Expected NeedsLogin state after user2 logout, got %s\", status.BackendState)\n\t}, 30*time.Second, 1*time.Second, \"waiting for user2 logout to complete before user1 relogin\")\n\n\t// Before logging back in, ensure we still have exactly 2 nodes\n\t// Note: We skip validateLogoutComplete here since it expects all nodes to be offline,\n\t// but in OIDC scenario we maintain both nodes in DB with only active user online\n\n\t// Additional validation that nodes are properly maintained during logout\n\tt.Logf(\"Post-logout validation: checking node persistence at %s\", time.Now().Format(TimestampFormat))\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tcurrentNodes, err := headscale.ListNodes()\n\t\tassert.NoError(ct, err, \"Failed to list nodes after user2 logout\")\n\t\tassert.Len(ct, currentNodes, 2, \"Should still have exactly 2 nodes after user2 logout (nodes should persist), got %d\", len(currentNodes))\n\n\t\t// Ensure both nodes are still valid (not cleaned up incorrectly)\n\t\tfor i, node := range currentNodes {\n\t\t\tassert.NotNil(ct, node.GetUser(), \"Node %d should still have a valid user after user2 logout\", i)\n\t\t\tassert.NotEmpty(ct, node.GetMachineKey(), \"Node %d should still have a valid machine key after user2 logout\", i)\n\t\t\tt.Logf(\"Post-logout node %d: User=%s, MachineKey=%s\", i, node.GetUser().GetName(), node.GetMachineKey()[:16]+\"...\")\n\t\t}\n\t}, 60*time.Second, 2*time.Second, \"validating node persistence and integrity after user2 logout\")\n\n\t// We do not actually \"change\" the user here, it is done by logging in again\n\t// as the OIDC mock server is kind of like a stack, and the next user is\n\t// prepared and ready to go.\n\tu, err = ts.LoginWithURL(headscale.GetEndpoint())\n\trequire.NoError(t, err)\n\n\t_, err = doLoginURL(ts.Hostname(), u)\n\trequire.NoError(t, err)\n\n\tt.Logf(\"Waiting for user1 relogin completion at %s\", time.Now().Format(TimestampFormat))\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tstatus, err := ts.Status()\n\t\tassert.NoError(ct, err, \"Failed to get client status during user1 relogin validation\")\n\t\tassert.Equal(ct, \"Running\", status.BackendState, \"Expected Running state after user1 relogin, got %s\", status.BackendState)\n\t}, 30*time.Second, 1*time.Second, \"waiting for user1 relogin to complete (final login)\")\n\n\tt.Logf(\"Logged back in\")\n\tt.Log(\"timestamp: \" + time.Now().Format(TimestampFormat) + \"\\n\")\n\n\tt.Logf(\"Final validation: checking user persistence at %s\", time.Now().Format(TimestampFormat))\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tlistUsers, err := headscale.ListUsers()\n\t\tassert.NoError(ct, err, \"Failed to list users during final validation\")\n\t\tassert.Len(ct, listUsers, 2, \"Should still have exactly 2 users after user1 relogin, got %d\", len(listUsers))\n\n\t\twantUsers := []*v1.User{\n\t\t\t{\n\t\t\t\tId:         1,\n\t\t\t\tName:       \"user1\",\n\t\t\t\tEmail:      \"user1@headscale.net\",\n\t\t\t\tProvider:   \"oidc\",\n\t\t\t\tProviderId: scenario.mockOIDC.Issuer() + \"/user1\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tId:         2,\n\t\t\t\tName:       \"user2\",\n\t\t\t\tEmail:      \"user2@headscale.net\",\n\t\t\t\tProvider:   \"oidc\",\n\t\t\t\tProviderId: scenario.mockOIDC.Issuer() + \"/user2\",\n\t\t\t},\n\t\t}\n\n\t\tsort.Slice(listUsers, func(i, j int) bool {\n\t\t\treturn listUsers[i].GetId() < listUsers[j].GetId()\n\t\t})\n\n\t\tif diff := cmp.Diff(wantUsers, listUsers, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, \"CreatedAt\")); diff != \"\" {\n\t\t\tct.Errorf(\"Final user validation failed - both users should persist after relogin cycle: %s\", diff)\n\t\t}\n\t}, 30*time.Second, 1*time.Second, \"validating user persistence after complete relogin cycle (user1->user2->user1)\")\n\n\tvar listNodesAfterLoggingBackIn []*v1.Node\n\t// Wait for login to complete and nodes to stabilize\n\tt.Logf(\"Final node validation: checking node stability after user1 relogin at %s\", time.Now().Format(TimestampFormat))\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tlistNodesAfterLoggingBackIn, err = headscale.ListNodes()\n\t\tassert.NoError(ct, err, \"Failed to list nodes during final validation\")\n\n\t\t// Allow for temporary instability during login process\n\t\tif len(listNodesAfterLoggingBackIn) < 2 {\n\t\t\tct.Errorf(\"Not enough nodes yet during final validation, got %d, want at least 2\", len(listNodesAfterLoggingBackIn))\n\t\t\treturn\n\t\t}\n\n\t\t// Final check should have exactly 2 nodes\n\t\tassert.Len(ct, listNodesAfterLoggingBackIn, 2, \"Should have exactly 2 nodes after complete relogin cycle, got %d\", len(listNodesAfterLoggingBackIn))\n\n\t\t// Validate that the machine we had when we logged in the first time, has the same\n\t\t// machine key, but a different ID than the newly logged in version of the same\n\t\t// machine.\n\t\tassert.Equal(ct, listNodes[0].GetMachineKey(), listNodesAfterNewUserLogin[0].GetMachineKey(), \"Original user1 machine key should match user1 node after user switch\")\n\t\tassert.Equal(ct, listNodes[0].GetNodeKey(), listNodesAfterNewUserLogin[0].GetNodeKey(), \"Original user1 node key should match user1 node after user switch\")\n\t\tassert.Equal(ct, listNodes[0].GetId(), listNodesAfterNewUserLogin[0].GetId(), \"Original user1 node ID should match user1 node after user switch\")\n\t\tassert.Equal(ct, listNodes[0].GetMachineKey(), listNodesAfterNewUserLogin[1].GetMachineKey(), \"User1 and user2 nodes should share the same machine key\")\n\t\tassert.NotEqual(ct, listNodes[0].GetId(), listNodesAfterNewUserLogin[1].GetId(), \"User1 and user2 nodes should have different node IDs\")\n\t\tassert.NotEqual(ct, listNodes[0].GetUser().GetId(), listNodesAfterNewUserLogin[1].GetUser().GetId(), \"User1 and user2 nodes should belong to different users\")\n\n\t\t// Even tho we are logging in again with the same user, the previous key has been expired\n\t\t// and a new one has been generated. The node entry in the database should be the same\n\t\t// as the user + machinekey still matches.\n\t\tassert.Equal(ct, listNodes[0].GetMachineKey(), listNodesAfterLoggingBackIn[0].GetMachineKey(), \"Machine key should remain consistent after user1 relogin\")\n\t\tassert.NotEqual(ct, listNodes[0].GetNodeKey(), listNodesAfterLoggingBackIn[0].GetNodeKey(), \"Node key should be regenerated after user1 relogin\")\n\t\tassert.Equal(ct, listNodes[0].GetId(), listNodesAfterLoggingBackIn[0].GetId(), \"Node ID should be preserved for user1 after relogin\")\n\n\t\t// The \"logged back in\" machine should have the same machinekey but a different nodekey\n\t\t// than the version logged in with a different user.\n\t\tassert.Equal(ct, listNodesAfterLoggingBackIn[0].GetMachineKey(), listNodesAfterLoggingBackIn[1].GetMachineKey(), \"Both final nodes should share the same machine key\")\n\t\tassert.NotEqual(ct, listNodesAfterLoggingBackIn[0].GetNodeKey(), listNodesAfterLoggingBackIn[1].GetNodeKey(), \"Final nodes should have different node keys for different users\")\n\n\t\tt.Logf(\"Final validation complete - node counts and key relationships verified at %s\", time.Now().Format(TimestampFormat))\n\t}, 60*time.Second, 2*time.Second, \"validating final node state after complete user1->user2->user1 relogin cycle with detailed key validation\")\n\n\t// Security validation: Only user1's node should be active after relogin\n\tvar activeUser1NodeID types.NodeID\n\n\tfor _, node := range listNodesAfterLoggingBackIn {\n\t\tif node.GetUser().GetId() == 1 { // user1\n\t\t\tactiveUser1NodeID = types.NodeID(node.GetId())\n\t\t\tt.Logf(\"Active user1 node after relogin: %d (User: %s)\", node.GetId(), node.GetUser().GetName())\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Validate only user1's node is online (security requirement)\n\tt.Logf(\"Validating only user1 node is online after relogin at %s\", time.Now().Format(TimestampFormat))\n\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodeStore, err := headscale.DebugNodeStore()\n\t\tassert.NoError(c, err, \"Failed to get nodestore debug info\")\n\n\t\t// Check user1 node is online\n\t\tif node, exists := nodeStore[activeUser1NodeID]; exists {\n\t\t\tassert.NotNil(c, node.IsOnline, \"User1 node should have online status after relogin\")\n\n\t\t\tif node.IsOnline != nil {\n\t\t\t\tassert.True(c, *node.IsOnline, \"User1 node should be online after relogin\")\n\t\t\t}\n\t\t} else {\n\t\t\tassert.Fail(c, \"User1 node not found in nodestore after relogin\")\n\t\t}\n\t}, 60*time.Second, 2*time.Second, \"validating only user1 node is online after final relogin\")\n}\n\n// TestOIDCFollowUpUrl validates the follow-up login flow\n// Prerequisites:\n// - short TTL for the registration cache via HEADSCALE_TUNING_REGISTER_CACHE_EXPIRATION\n// Scenario:\n// - client starts a login process and gets initial AuthURL\n// - time.sleep(HEADSCALE_TUNING_REGISTER_CACHE_EXPIRATION + 30 secs) waits for the cache to expire\n// - client checks its status to verify that AuthUrl has changed (by followup URL)\n// - client uses the new AuthURL to log in. It should complete successfully.\nfunc TestOIDCFollowUpUrl(t *testing.T) {\n\tIntegrationSkip(t)\n\n\t// Create no nodes and no users\n\tscenario, err := NewScenario(\n\t\tScenarioSpec{\n\t\t\tOIDCUsers: []mockoidc.MockUser{\n\t\t\t\toidcMockUser(\"user1\", true),\n\t\t\t},\n\t\t},\n\t)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\toidcMap := map[string]string{\n\t\t\"HEADSCALE_OIDC_ISSUER\":             scenario.mockOIDC.Issuer(),\n\t\t\"HEADSCALE_OIDC_CLIENT_ID\":          scenario.mockOIDC.ClientID(),\n\t\t\"CREDENTIALS_DIRECTORY_TEST\":        \"/tmp\",\n\t\t\"HEADSCALE_OIDC_CLIENT_SECRET_PATH\": \"${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret\",\n\t\t// smaller cache expiration time to quickly expire AuthURL\n\t\t\"HEADSCALE_TUNING_REGISTER_CACHE_CLEANUP\":    \"10s\",\n\t\t\"HEADSCALE_TUNING_REGISTER_CACHE_EXPIRATION\": \"1m30s\",\n\t}\n\n\terr = scenario.CreateHeadscaleEnvWithLoginURL(\n\t\tnil,\n\t\thsic.WithTestName(\"oidc-followup\"),\n\t\thsic.WithConfigEnv(oidcMap),\n\t\thsic.WithFileInContainer(\"/tmp/hs_client_oidc_secret\", []byte(scenario.mockOIDC.ClientSecret())),\n\t)\n\trequire.NoError(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\tlistUsers, err := headscale.ListUsers()\n\trequire.NoError(t, err)\n\tassert.Empty(t, listUsers)\n\n\tts, err := scenario.CreateTailscaleNode(\n\t\t\"unstable\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t)\n\trequire.NoError(t, err)\n\n\tu, err := ts.LoginWithURL(headscale.GetEndpoint())\n\trequire.NoError(t, err)\n\n\t// wait for the registration cache to expire\n\t// a little bit more than HEADSCALE_TUNING_REGISTER_CACHE_EXPIRATION (1m30s)\n\t//nolint:forbidigo // Intentional delay: must wait for real-time cache expiration (HEADSCALE_TUNING_REGISTER_CACHE_EXPIRATION=1m30s)\n\ttime.Sleep(2 * time.Minute)\n\n\tvar newUrl *url.URL\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tst, err := ts.Status()\n\t\tassert.NoError(c, err)\n\t\tassert.Equal(c, \"NeedsLogin\", st.BackendState)\n\n\t\t// get new AuthURL from daemon\n\t\tnewUrl, err = url.Parse(st.AuthURL)\n\t\tassert.NoError(c, err)\n\n\t\tassert.NotEqual(c, u.String(), st.AuthURL, \"AuthURL should change\")\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for registration cache to expire and status to reflect NeedsLogin\")\n\n\t_, err = doLoginURL(ts.Hostname(), newUrl)\n\trequire.NoError(t, err)\n\n\tlistUsers, err = headscale.ListUsers()\n\trequire.NoError(t, err)\n\tassert.Len(t, listUsers, 1)\n\n\twantUsers := []*v1.User{\n\t\t{\n\t\t\tId:         1,\n\t\t\tName:       \"user1\",\n\t\t\tEmail:      \"user1@headscale.net\",\n\t\t\tProvider:   \"oidc\",\n\t\t\tProviderId: scenario.mockOIDC.Issuer() + \"/user1\",\n\t\t},\n\t}\n\n\tsort.Slice(\n\t\tlistUsers, func(i, j int) bool {\n\t\t\treturn listUsers[i].GetId() < listUsers[j].GetId()\n\t\t},\n\t)\n\n\tif diff := cmp.Diff(\n\t\twantUsers,\n\t\tlistUsers,\n\t\tcmpopts.IgnoreUnexported(v1.User{}),\n\t\tcmpopts.IgnoreFields(v1.User{}, \"CreatedAt\"),\n\t); diff != \"\" {\n\t\tt.Fatalf(\"unexpected users: %s\", diff)\n\t}\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tlistNodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, listNodes, 1)\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for expected node list after OIDC login\")\n}\n\n// TestOIDCMultipleOpenedLoginUrls tests the scenario:\n// - client (mostly Windows) opens multiple browser tabs with different login URLs\n// - client performs auth on the first opened browser tab\n//\n// This test makes sure that cookies are still valid for the first browser tab.\nfunc TestOIDCMultipleOpenedLoginUrls(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tscenario, err := NewScenario(\n\t\tScenarioSpec{\n\t\t\tOIDCUsers: []mockoidc.MockUser{\n\t\t\t\toidcMockUser(\"user1\", true),\n\t\t\t},\n\t\t},\n\t)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\toidcMap := map[string]string{\n\t\t\"HEADSCALE_OIDC_ISSUER\":             scenario.mockOIDC.Issuer(),\n\t\t\"HEADSCALE_OIDC_CLIENT_ID\":          scenario.mockOIDC.ClientID(),\n\t\t\"CREDENTIALS_DIRECTORY_TEST\":        \"/tmp\",\n\t\t\"HEADSCALE_OIDC_CLIENT_SECRET_PATH\": \"${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret\",\n\t}\n\n\terr = scenario.CreateHeadscaleEnvWithLoginURL(\n\t\tnil,\n\t\thsic.WithTestName(\"oidcauthrelog\"),\n\t\thsic.WithConfigEnv(oidcMap),\n\t\thsic.WithFileInContainer(\"/tmp/hs_client_oidc_secret\", []byte(scenario.mockOIDC.ClientSecret())),\n\t)\n\trequire.NoError(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\tlistUsers, err := headscale.ListUsers()\n\trequire.NoError(t, err)\n\tassert.Empty(t, listUsers)\n\n\tts, err := scenario.CreateTailscaleNode(\n\t\t\"unstable\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t)\n\trequire.NoError(t, err)\n\n\tu1, err := ts.LoginWithURL(headscale.GetEndpoint())\n\trequire.NoError(t, err)\n\n\tu2, err := ts.LoginWithURL(headscale.GetEndpoint())\n\trequire.NoError(t, err)\n\n\t// make sure login URLs are different\n\trequire.NotEqual(t, u1.String(), u2.String())\n\n\tloginClient, err := newLoginHTTPClient(ts.Hostname())\n\trequire.NoError(t, err)\n\n\t// open the first login URL \"in browser\"\n\t_, redirect1, err := doLoginURLWithClient(ts.Hostname(), u1, loginClient, false)\n\trequire.NoError(t, err)\n\t// open the second login URL \"in browser\"\n\t_, redirect2, err := doLoginURLWithClient(ts.Hostname(), u2, loginClient, false)\n\trequire.NoError(t, err)\n\n\t// two valid redirects with different state/nonce params\n\trequire.NotEqual(t, redirect1.String(), redirect2.String())\n\n\t// complete auth with the first opened \"browser tab\"\n\t_, _, err = doLoginURLWithClient(ts.Hostname(), redirect1, loginClient, true)\n\trequire.NoError(t, err)\n\n\tlistUsers, err = headscale.ListUsers()\n\trequire.NoError(t, err)\n\tassert.Len(t, listUsers, 1)\n\n\twantUsers := []*v1.User{\n\t\t{\n\t\t\tId:         1,\n\t\t\tName:       \"user1\",\n\t\t\tEmail:      \"user1@headscale.net\",\n\t\t\tProvider:   \"oidc\",\n\t\t\tProviderId: scenario.mockOIDC.Issuer() + \"/user1\",\n\t\t},\n\t}\n\n\tsort.Slice(\n\t\tlistUsers, func(i, j int) bool {\n\t\t\treturn listUsers[i].GetId() < listUsers[j].GetId()\n\t\t},\n\t)\n\n\tif diff := cmp.Diff(\n\t\twantUsers,\n\t\tlistUsers,\n\t\tcmpopts.IgnoreUnexported(v1.User{}),\n\t\tcmpopts.IgnoreFields(v1.User{}, \"CreatedAt\"),\n\t); diff != \"\" {\n\t\tt.Fatalf(\"unexpected users: %s\", diff)\n\t}\n\n\tassert.EventuallyWithT(\n\t\tt, func(c *assert.CollectT) {\n\t\t\tlistNodes, err := headscale.ListNodes()\n\t\t\tassert.NoError(c, err)\n\t\t\tassert.Len(c, listNodes, 1)\n\t\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for expected node list after OIDC login\",\n\t)\n}\n\n// TestOIDCReloginSameNodeSameUser tests the scenario where a single Tailscale client\n// authenticates using OIDC (OpenID Connect), logs out, and then logs back in as the same user.\n//\n// OIDC is an authentication layer built on top of OAuth 2.0 that allows users to authenticate\n// using external identity providers (like Google, Microsoft, etc.) rather than managing\n// credentials directly in headscale.\n//\n// This test validates the \"same user relogin\" behavior in headscale's OIDC authentication flow:\n// - A single client authenticates via OIDC as user1\n// - The client logs out, ending the session\n// - The same client logs back in via OIDC as the same user (user1)\n// - The test verifies that the user account persists correctly\n// - The test verifies that the machine key is preserved (since it's the same physical device)\n// - The test verifies that the node ID is preserved (since it's the same user on the same device)\n// - The test verifies that the node key is regenerated (since it's a new session)\n// - The test verifies that the client comes back online properly\n//\n// This scenario is important for normal user workflows where someone might need to restart\n// their Tailscale client, reboot their computer, or temporarily disconnect and reconnect.\n// It ensures that headscale properly handles session management while preserving device\n// identity and user associations.\n//\n// The test uses a single node scenario (unlike multi-node tests) to focus specifically on\n// the authentication and session management aspects rather than network topology changes.\n// The \"same node\" in the name refers to the same physical device/client, while \"same user\"\n// refers to authenticating with the same OIDC identity.\nfunc TestOIDCReloginSameNodeSameUser(t *testing.T) {\n\tIntegrationSkip(t)\n\n\t// Create scenario with same user for both login attempts\n\tscenario, err := NewScenario(ScenarioSpec{\n\t\tOIDCUsers: []mockoidc.MockUser{\n\t\t\toidcMockUser(\"user1\", true), // Initial login\n\t\t\toidcMockUser(\"user1\", true), // Relogin with same user\n\t\t},\n\t})\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\toidcMap := map[string]string{\n\t\t\"HEADSCALE_OIDC_ISSUER\":             scenario.mockOIDC.Issuer(),\n\t\t\"HEADSCALE_OIDC_CLIENT_ID\":          scenario.mockOIDC.ClientID(),\n\t\t\"CREDENTIALS_DIRECTORY_TEST\":        \"/tmp\",\n\t\t\"HEADSCALE_OIDC_CLIENT_SECRET_PATH\": \"${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret\",\n\t}\n\n\terr = scenario.CreateHeadscaleEnvWithLoginURL(\n\t\tnil,\n\t\thsic.WithTestName(\"oidcsameuser\"),\n\t\thsic.WithConfigEnv(oidcMap),\n\t\thsic.WithFileInContainer(\"/tmp/hs_client_oidc_secret\", []byte(scenario.mockOIDC.ClientSecret())),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\tts, err := scenario.CreateTailscaleNode(\"unstable\", tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]))\n\trequire.NoError(t, err)\n\n\t// Initial login as user1\n\tu, err := ts.LoginWithURL(headscale.GetEndpoint())\n\trequire.NoError(t, err)\n\n\t_, err = doLoginURL(ts.Hostname(), u)\n\trequire.NoError(t, err)\n\n\tt.Logf(\"Validating initial user1 creation at %s\", time.Now().Format(TimestampFormat))\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tlistUsers, err := headscale.ListUsers()\n\t\tassert.NoError(ct, err, \"Failed to list users during initial validation\")\n\t\tassert.Len(ct, listUsers, 1, \"Expected exactly 1 user after first login, got %d\", len(listUsers))\n\n\t\twantUsers := []*v1.User{\n\t\t\t{\n\t\t\t\tId:         1,\n\t\t\t\tName:       \"user1\",\n\t\t\t\tEmail:      \"user1@headscale.net\",\n\t\t\t\tProvider:   \"oidc\",\n\t\t\t\tProviderId: scenario.mockOIDC.Issuer() + \"/user1\",\n\t\t\t},\n\t\t}\n\n\t\tsort.Slice(listUsers, func(i, j int) bool {\n\t\t\treturn listUsers[i].GetId() < listUsers[j].GetId()\n\t\t})\n\n\t\tif diff := cmp.Diff(wantUsers, listUsers, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, \"CreatedAt\")); diff != \"\" {\n\t\t\tct.Errorf(\"User validation failed after first login - unexpected users: %s\", diff)\n\t\t}\n\t}, 30*time.Second, 1*time.Second, \"validating user1 creation after initial OIDC login\")\n\n\tt.Logf(\"Validating initial node creation at %s\", time.Now().Format(TimestampFormat))\n\n\tvar initialNodes []*v1.Node\n\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tvar err error\n\n\t\tinitialNodes, err = headscale.ListNodes()\n\t\tassert.NoError(ct, err, \"Failed to list nodes during initial validation\")\n\t\tassert.Len(ct, initialNodes, 1, \"Expected exactly 1 node after first login, got %d\", len(initialNodes))\n\t}, 30*time.Second, 1*time.Second, \"validating initial node creation for user1 after OIDC login\")\n\n\t// Collect expected node IDs for validation after user1 initial login\n\texpectedNodes := make([]types.NodeID, 0, 1)\n\n\tvar nodeID uint64\n\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tstatus := ts.MustStatus()\n\t\tassert.NotEmpty(ct, status.Self.ID, \"Node ID should be populated in status\")\n\n\t\tvar err error\n\n\t\tnodeID, err = strconv.ParseUint(string(status.Self.ID), 10, 64)\n\t\tassert.NoError(ct, err, \"Failed to parse node ID from status\")\n\t}, 30*time.Second, 1*time.Second, \"waiting for node ID to be populated in status after initial login\")\n\n\texpectedNodes = append(expectedNodes, types.NodeID(nodeID))\n\n\t// Validate initial connection state for user1\n\tvalidateInitialConnection(t, headscale, expectedNodes)\n\n\t// Store initial node keys for comparison\n\tinitialMachineKey := initialNodes[0].GetMachineKey()\n\tinitialNodeKey := initialNodes[0].GetNodeKey()\n\tinitialNodeID := initialNodes[0].GetId()\n\n\t// Logout user1\n\terr = ts.Logout()\n\trequire.NoError(t, err)\n\n\t// TODO(kradalby): Not sure why we need to logout twice, but it fails and\n\t// logs in immediately after the first logout and I cannot reproduce it\n\t// manually.\n\terr = ts.Logout()\n\trequire.NoError(t, err)\n\n\t// Wait for logout to complete\n\tt.Logf(\"Waiting for user1 logout completion at %s\", time.Now().Format(TimestampFormat))\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t// Check that the logout completed\n\t\tstatus, err := ts.Status()\n\t\tassert.NoError(ct, err, \"Failed to get client status during logout validation\")\n\t\tassert.Equal(ct, \"NeedsLogin\", status.BackendState, \"Expected NeedsLogin state after logout, got %s\", status.BackendState)\n\t}, 30*time.Second, 1*time.Second, \"waiting for user1 logout to complete before same-user relogin\")\n\n\t// Validate node persistence during logout (node should remain in DB)\n\tt.Logf(\"Validating node persistence during logout at %s\", time.Now().Format(TimestampFormat))\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tlistNodes, err := headscale.ListNodes()\n\t\tassert.NoError(ct, err, \"Failed to list nodes during logout validation\")\n\t\tassert.Len(ct, listNodes, 1, \"Should still have exactly 1 node during logout (node should persist in DB), got %d\", len(listNodes))\n\t}, 30*time.Second, 1*time.Second, \"validating node persistence in database during same-user logout\")\n\n\t// Login again as the same user (user1)\n\tu, err = ts.LoginWithURL(headscale.GetEndpoint())\n\trequire.NoError(t, err)\n\n\t_, err = doLoginURL(ts.Hostname(), u)\n\trequire.NoError(t, err)\n\n\tt.Logf(\"Waiting for user1 relogin completion at %s\", time.Now().Format(TimestampFormat))\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tstatus, err := ts.Status()\n\t\tassert.NoError(ct, err, \"Failed to get client status during relogin validation\")\n\t\tassert.Equal(ct, \"Running\", status.BackendState, \"Expected Running state after user1 relogin, got %s\", status.BackendState)\n\t}, 30*time.Second, 1*time.Second, \"waiting for user1 relogin to complete (same user)\")\n\n\tt.Logf(\"Final validation: checking user persistence after same-user relogin at %s\", time.Now().Format(TimestampFormat))\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tlistUsers, err := headscale.ListUsers()\n\t\tassert.NoError(ct, err, \"Failed to list users during final validation\")\n\t\tassert.Len(ct, listUsers, 1, \"Should still have exactly 1 user after same-user relogin, got %d\", len(listUsers))\n\n\t\twantUsers := []*v1.User{\n\t\t\t{\n\t\t\t\tId:         1,\n\t\t\t\tName:       \"user1\",\n\t\t\t\tEmail:      \"user1@headscale.net\",\n\t\t\t\tProvider:   \"oidc\",\n\t\t\t\tProviderId: scenario.mockOIDC.Issuer() + \"/user1\",\n\t\t\t},\n\t\t}\n\n\t\tsort.Slice(listUsers, func(i, j int) bool {\n\t\t\treturn listUsers[i].GetId() < listUsers[j].GetId()\n\t\t})\n\n\t\tif diff := cmp.Diff(wantUsers, listUsers, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, \"CreatedAt\")); diff != \"\" {\n\t\t\tct.Errorf(\"Final user validation failed - user1 should persist after same-user relogin: %s\", diff)\n\t\t}\n\t}, 30*time.Second, 1*time.Second, \"validating user1 persistence after same-user OIDC relogin cycle\")\n\n\tvar finalNodes []*v1.Node\n\n\tt.Logf(\"Final node validation: checking node stability after same-user relogin at %s\", time.Now().Format(TimestampFormat))\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tfinalNodes, err = headscale.ListNodes()\n\t\tassert.NoError(ct, err, \"Failed to list nodes during final validation\")\n\t\tassert.Len(ct, finalNodes, 1, \"Should have exactly 1 node after same-user relogin, got %d\", len(finalNodes))\n\n\t\t// Validate node key behavior for same user relogin\n\t\tfinalNode := finalNodes[0]\n\n\t\t// Machine key should be preserved (same physical machine)\n\t\tassert.Equal(ct, initialMachineKey, finalNode.GetMachineKey(), \"Machine key should be preserved for same user same node relogin\")\n\n\t\t// Node ID should be preserved (same user, same machine)\n\t\tassert.Equal(ct, initialNodeID, finalNode.GetId(), \"Node ID should be preserved for same user same node relogin\")\n\n\t\t// Node key should be regenerated (new session after logout)\n\t\tassert.NotEqual(ct, initialNodeKey, finalNode.GetNodeKey(), \"Node key should be regenerated after logout/relogin even for same user\")\n\n\t\tt.Logf(\"Final validation complete - same user relogin key relationships verified at %s\", time.Now().Format(TimestampFormat))\n\t}, 60*time.Second, 2*time.Second, \"validating final node state after same-user OIDC relogin cycle with key preservation validation\")\n\n\t// Security validation: user1's node should be active after relogin\n\tactiveUser1NodeID := types.NodeID(finalNodes[0].GetId())\n\n\tt.Logf(\"Validating user1 node is online after same-user relogin at %s\", time.Now().Format(TimestampFormat))\n\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodeStore, err := headscale.DebugNodeStore()\n\t\tassert.NoError(c, err, \"Failed to get nodestore debug info\")\n\n\t\t// Check user1 node is online\n\t\tif node, exists := nodeStore[activeUser1NodeID]; exists {\n\t\t\tassert.NotNil(c, node.IsOnline, \"User1 node should have online status after same-user relogin\")\n\n\t\t\tif node.IsOnline != nil {\n\t\t\t\tassert.True(c, *node.IsOnline, \"User1 node should be online after same-user relogin\")\n\t\t\t}\n\t\t} else {\n\t\t\tassert.Fail(c, \"User1 node not found in nodestore after same-user relogin\")\n\t\t}\n\t}, 60*time.Second, 2*time.Second, \"validating user1 node is online after same-user OIDC relogin\")\n}\n\n// TestOIDCExpiryAfterRestart validates that node expiry is preserved\n// when a tailscaled client restarts and reconnects to headscale.\n//\n// This test reproduces the bug reported in https://github.com/juanfont/headscale/issues/2862\n// where OIDC expiry was reset to 0001-01-01 00:00:00 after tailscaled restart.\n//\n// Test flow:\n// 1. Node logs in with OIDC (gets 72h expiry)\n// 2. Verify expiry is set correctly in headscale\n// 3. Restart tailscaled container (simulates daemon restart)\n// 4. Wait for reconnection\n// 5. Verify expiry is still set correctly (not zero).\nfunc TestOIDCExpiryAfterRestart(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tscenario, err := NewScenario(ScenarioSpec{\n\t\tOIDCUsers: []mockoidc.MockUser{\n\t\t\toidcMockUser(\"user1\", true),\n\t\t},\n\t})\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\toidcMap := map[string]string{\n\t\t\"HEADSCALE_OIDC_ISSUER\":             scenario.mockOIDC.Issuer(),\n\t\t\"HEADSCALE_OIDC_CLIENT_ID\":          scenario.mockOIDC.ClientID(),\n\t\t\"CREDENTIALS_DIRECTORY_TEST\":        \"/tmp\",\n\t\t\"HEADSCALE_OIDC_CLIENT_SECRET_PATH\": \"${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret\",\n\t\t\"HEADSCALE_OIDC_EXPIRY\":             \"72h\",\n\t}\n\n\terr = scenario.CreateHeadscaleEnvWithLoginURL(\n\t\tnil,\n\t\thsic.WithTestName(\"oidcexpiry\"),\n\t\thsic.WithConfigEnv(oidcMap),\n\t\thsic.WithFileInContainer(\"/tmp/hs_client_oidc_secret\", []byte(scenario.mockOIDC.ClientSecret())),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\t// Create and login tailscale client\n\tts, err := scenario.CreateTailscaleNode(\"unstable\", tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]))\n\trequire.NoError(t, err)\n\n\tu, err := ts.LoginWithURL(headscale.GetEndpoint())\n\trequire.NoError(t, err)\n\n\t_, err = doLoginURL(ts.Hostname(), u)\n\trequire.NoError(t, err)\n\n\tt.Logf(\"Validating initial login and expiry at %s\", time.Now().Format(TimestampFormat))\n\n\t// Verify initial expiry is set\n\tvar initialExpiry time.Time\n\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(ct, err)\n\t\tassert.Len(ct, nodes, 1)\n\n\t\tnode := nodes[0]\n\t\tassert.NotNil(ct, node.GetExpiry(), \"Expiry should be set after OIDC login\")\n\n\t\tif node.GetExpiry() != nil {\n\t\t\texpiryTime := node.GetExpiry().AsTime()\n\t\t\tassert.False(ct, expiryTime.IsZero(), \"Expiry should not be zero time\")\n\n\t\t\tinitialExpiry = expiryTime\n\t\t\tt.Logf(\"Initial expiry set to: %v (expires in %v)\", expiryTime, time.Until(expiryTime))\n\t\t}\n\t}, 30*time.Second, 1*time.Second, \"validating initial expiry after OIDC login\")\n\n\t// Now restart the tailscaled container\n\tt.Logf(\"Restarting tailscaled container at %s\", time.Now().Format(TimestampFormat))\n\n\terr = ts.Restart()\n\trequire.NoError(t, err, \"Failed to restart tailscaled container\")\n\n\tt.Logf(\"Tailscaled restarted, waiting for reconnection at %s\", time.Now().Format(TimestampFormat))\n\n\t// Wait for the node to come back online\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tstatus, err := ts.Status()\n\t\tif !assert.NoError(ct, err) {\n\t\t\treturn\n\t\t}\n\n\t\tif !assert.NotNil(ct, status) {\n\t\t\treturn\n\t\t}\n\n\t\tassert.Equal(ct, \"Running\", status.BackendState)\n\t}, 60*time.Second, 2*time.Second, \"waiting for tailscale to reconnect after restart\")\n\n\t// THE CRITICAL TEST: Verify expiry is still set correctly after restart\n\tt.Logf(\"Validating expiry preservation after restart at %s\", time.Now().Format(TimestampFormat))\n\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(ct, err)\n\t\tassert.Len(ct, nodes, 1, \"Should still have exactly 1 node after restart\")\n\n\t\tnode := nodes[0]\n\t\tassert.NotNil(ct, node.GetExpiry(), \"Expiry should NOT be nil after restart\")\n\n\t\tif node.GetExpiry() != nil {\n\t\t\texpiryTime := node.GetExpiry().AsTime()\n\n\t\t\t// This is the bug check - expiry should NOT be zero time\n\t\t\tassert.False(ct, expiryTime.IsZero(),\n\t\t\t\t\"BUG: Expiry was reset to zero time after tailscaled restart! This is issue #2862\")\n\n\t\t\t// Expiry should be exactly the same as before restart\n\t\t\tassert.Equal(ct, initialExpiry, expiryTime,\n\t\t\t\t\"Expiry should be exactly the same after restart, got %v, expected %v\",\n\t\t\t\texpiryTime, initialExpiry)\n\n\t\t\tt.Logf(\"SUCCESS: Expiry preserved after restart: %v (expires in %v)\",\n\t\t\t\texpiryTime, time.Until(expiryTime))\n\t\t}\n\t}, 30*time.Second, 1*time.Second, \"validating expiry preservation after restart\")\n}\n\n// TestOIDCACLPolicyOnJoin validates that ACL policies are correctly applied\n// to newly joined OIDC nodes without requiring a client restart.\n//\n// This test validates the fix for issue #2888:\n// https://github.com/juanfont/headscale/issues/2888\n//\n// Bug: Nodes joining via OIDC authentication did not get the appropriate ACL\n// policy applied until they restarted their client. This was a regression\n// introduced in v0.27.0.\n//\n// The test scenario:\n// 1. Creates a CLI user (gateway) with a node advertising a route\n// 2. Sets up ACL policy allowing all nodes to access advertised routes\n// 3. OIDC user authenticates and joins with a new node\n// 4. Verifies that the OIDC user's node IMMEDIATELY sees the advertised route\n//\n// Expected behavior:\n// - Without fix: OIDC node cannot see the route (PrimaryRoutes is nil/empty)\n// - With fix: OIDC node immediately sees the route in PrimaryRoutes\n//\n// Root cause: The buggy code called a.h.Change(c) immediately after user\n// creation but BEFORE node registration completed, creating a race condition\n// where policy change notifications were sent asynchronously before the node\n// was fully registered.\nfunc TestOIDCACLPolicyOnJoin(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tgatewayUser := \"gateway\"\n\toidcUser := \"oidcuser\"\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 1,\n\t\tUsers:        []string{gatewayUser},\n\t\tOIDCUsers: []mockoidc.MockUser{\n\t\t\toidcMockUser(oidcUser, true),\n\t\t},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\toidcMap := map[string]string{\n\t\t\"HEADSCALE_OIDC_ISSUER\":             scenario.mockOIDC.Issuer(),\n\t\t\"HEADSCALE_OIDC_CLIENT_ID\":          scenario.mockOIDC.ClientID(),\n\t\t\"CREDENTIALS_DIRECTORY_TEST\":        \"/tmp\",\n\t\t\"HEADSCALE_OIDC_CLIENT_SECRET_PATH\": \"${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret\",\n\t}\n\n\t// Create headscale environment with ACL policy that allows OIDC user\n\t// to access routes advertised by gateway user\n\terr = scenario.CreateHeadscaleEnvWithLoginURL(\n\t\t[]tsic.Option{\n\t\t\ttsic.WithAcceptRoutes(),\n\t\t},\n\t\thsic.WithTestName(\"oidcaclpolicy\"),\n\t\thsic.WithConfigEnv(oidcMap),\n\t\thsic.WithFileInContainer(\"/tmp/hs_client_oidc_secret\", []byte(scenario.mockOIDC.ClientSecret())),\n\t\thsic.WithACLPolicy(\n\t\t\t&policyv2.Policy{\n\t\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{prefixp(\"100.64.0.0/10\")},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(prefixp(\"100.64.0.0/10\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t\taliasWithPorts(prefixp(\"10.33.0.0/24\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t\taliasWithPorts(prefixp(\"10.44.0.0/24\"), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tAutoApprovers: policyv2.AutoApproverPolicy{\n\t\t\t\t\tRoutes: map[netip.Prefix]policyv2.AutoApprovers{\n\t\t\t\t\t\tnetip.MustParsePrefix(\"10.33.0.0/24\"): {usernameApprover(\"gateway@test.no\"), usernameApprover(\"oidcuser@headscale.net\"), usernameApprover(\"jane.doe@example.com\")},\n\t\t\t\t\t\tnetip.MustParsePrefix(\"10.44.0.0/24\"): {usernameApprover(\"gateway@test.no\"), usernameApprover(\"oidcuser@headscale.net\"), usernameApprover(\"jane.doe@example.com\")},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\t// Get the gateway client (CLI user) - only one client at first\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\trequire.Len(t, allClients, 1, \"Should have exactly 1 client (gateway) before OIDC login\")\n\n\tgatewayClient := allClients[0]\n\n\t// Wait for initial sync (gateway logs in)\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t// Gateway advertises route 10.33.0.0/24\n\tadvertiseRoute := \"10.33.0.0/24\"\n\tcommand := []string{\n\t\t\"tailscale\",\n\t\t\"set\",\n\t\t\"--advertise-routes=\" + advertiseRoute,\n\t}\n\t_, _, err = gatewayClient.Execute(command)\n\trequire.NoErrorf(t, err, \"failed to advertise route: %s\", err)\n\n\t// Wait for route advertisement to propagate\n\tvar gatewayNodeID uint64\n\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(ct, err)\n\t\tassert.Len(ct, nodes, 1)\n\n\t\tgatewayNode := nodes[0]\n\t\tgatewayNodeID = gatewayNode.GetId()\n\t\tassert.Len(ct, gatewayNode.GetAvailableRoutes(), 1)\n\t\tassert.Contains(ct, gatewayNode.GetAvailableRoutes(), advertiseRoute)\n\t}, 10*time.Second, 500*time.Millisecond, \"route advertisement should propagate to headscale\")\n\n\t// Approve the advertised route\n\t_, err = headscale.ApproveRoutes(\n\t\tgatewayNodeID,\n\t\t[]netip.Prefix{netip.MustParsePrefix(advertiseRoute)},\n\t)\n\trequire.NoError(t, err)\n\n\t// Wait for route approval to propagate\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(ct, err)\n\t\tassert.Len(ct, nodes, 1)\n\n\t\tgatewayNode := nodes[0]\n\t\tassert.Len(ct, gatewayNode.GetApprovedRoutes(), 1)\n\t\tassert.Contains(ct, gatewayNode.GetApprovedRoutes(), advertiseRoute)\n\t}, 10*time.Second, 500*time.Millisecond, \"route approval should propagate to headscale\")\n\n\t// NOW create the OIDC user by having them join\n\t// This is where issue #2888 manifests - the new OIDC node should immediately\n\t// see the gateway's advertised route\n\tt.Logf(\"OIDC user joining at %s\", time.Now().Format(TimestampFormat))\n\n\t// Create OIDC user's tailscale node\n\toidcAdvertiseRoute := \"10.44.0.0/24\"\n\toidcClient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t\ttsic.WithAcceptRoutes(),\n\t\ttsic.WithExtraLoginArgs([]string{\"--advertise-routes=\" + oidcAdvertiseRoute}),\n\t)\n\trequire.NoError(t, err)\n\n\t// OIDC login happens automatically via LoginWithURL\n\tloginURL, err := oidcClient.LoginWithURL(headscale.GetEndpoint())\n\trequire.NoError(t, err)\n\n\t_, err = doLoginURL(oidcClient.Hostname(), loginURL)\n\trequire.NoError(t, err)\n\n\tt.Logf(\"OIDC user logged in successfully at %s\", time.Now().Format(TimestampFormat))\n\n\t// THE CRITICAL TEST: Verify that the OIDC user's node can IMMEDIATELY\n\t// see the gateway's advertised route WITHOUT needing a client restart.\n\t//\n\t// This is where the bug manifests:\n\t// - Without fix: PrimaryRoutes will be nil/empty\n\t// - With fix: PrimaryRoutes immediately contains the advertised route\n\tt.Logf(\"Verifying OIDC user can immediately see advertised routes at %s\", time.Now().Format(TimestampFormat))\n\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tstatus, err := oidcClient.Status()\n\t\tassert.NoError(ct, err)\n\n\t\t// Find the gateway peer in the OIDC user's peer list\n\t\tvar gatewayPeer *ipnstate.PeerStatus\n\n\t\tfor _, peerKey := range status.Peers() {\n\t\t\tpeer := status.Peer[peerKey]\n\t\t\t// Gateway is the peer that's not the OIDC user\n\t\t\tif peer.UserID != status.Self.UserID {\n\t\t\t\tgatewayPeer = peer\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tassert.NotNil(ct, gatewayPeer, \"OIDC user should see gateway as peer\")\n\n\t\tif gatewayPeer != nil {\n\t\t\t// This is the critical assertion - PrimaryRoutes should NOT be nil\n\t\t\tassert.NotNil(ct, gatewayPeer.PrimaryRoutes,\n\t\t\t\t\"BUG #2888: Gateway peer PrimaryRoutes is nil - ACL policy not applied to new OIDC node!\")\n\n\t\t\tif gatewayPeer.PrimaryRoutes != nil {\n\t\t\t\troutes := gatewayPeer.PrimaryRoutes.AsSlice()\n\t\t\t\tassert.Contains(ct, routes, netip.MustParsePrefix(advertiseRoute),\n\t\t\t\t\t\"OIDC user should immediately see gateway's advertised route %s in PrimaryRoutes\", advertiseRoute)\n\t\t\t\tt.Logf(\"SUCCESS: OIDC user can see advertised route %s in gateway's PrimaryRoutes\", advertiseRoute)\n\t\t\t}\n\n\t\t\t// Also verify AllowedIPs includes the route\n\t\t\tif gatewayPeer.AllowedIPs != nil && gatewayPeer.AllowedIPs.Len() > 0 {\n\t\t\t\tallowedIPs := gatewayPeer.AllowedIPs.AsSlice()\n\t\t\t\tt.Logf(\"Gateway peer AllowedIPs: %v\", allowedIPs)\n\t\t\t}\n\t\t}\n\t}, 15*time.Second, 500*time.Millisecond,\n\t\t\"OIDC user should immediately see gateway's advertised route without client restart (issue #2888)\")\n\n\t// Verify that the Gateway node sees the OIDC node's advertised route (AutoApproveRoutes check)\n\tt.Logf(\"Verifying Gateway user can immediately see OIDC advertised routes at %s\", time.Now().Format(TimestampFormat))\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tstatus, err := gatewayClient.Status()\n\t\tassert.NoError(ct, err)\n\n\t\t// Find the OIDC peer in the Gateway user's peer list\n\t\tvar oidcPeer *ipnstate.PeerStatus\n\n\t\tfor _, peerKey := range status.Peers() {\n\t\t\tpeer := status.Peer[peerKey]\n\t\t\tif peer.UserID != status.Self.UserID {\n\t\t\t\toidcPeer = peer\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tassert.NotNil(ct, oidcPeer, \"Gateway user should see OIDC user as peer\")\n\n\t\tif oidcPeer != nil {\n\t\t\tassert.NotNil(ct, oidcPeer.PrimaryRoutes,\n\t\t\t\t\"BUG: OIDC peer PrimaryRoutes is nil - AutoApproveRoutes failed or overwritten!\")\n\n\t\t\tif oidcPeer.PrimaryRoutes != nil {\n\t\t\t\troutes := oidcPeer.PrimaryRoutes.AsSlice()\n\t\t\t\tassert.Contains(ct, routes, netip.MustParsePrefix(oidcAdvertiseRoute),\n\t\t\t\t\t\"Gateway user should immediately see OIDC's advertised route %s in PrimaryRoutes\", oidcAdvertiseRoute)\n\t\t\t}\n\t\t}\n\t}, 15*time.Second, 500*time.Millisecond,\n\t\t\"Gateway user should immediately see OIDC's advertised route (AutoApproveRoutes check)\")\n\n\t// Additional validation: Verify nodes in headscale match expectations\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(ct, err)\n\t\tassert.Len(ct, nodes, 2, \"Should have 2 nodes (gateway + oidcuser)\")\n\n\t\t// Verify OIDC user was created correctly\n\t\tusers, err := headscale.ListUsers()\n\t\tassert.NoError(ct, err)\n\t\t// Note: mockoidc may create additional default users (like jane.doe)\n\t\t// so we check for at least 2 users, not exactly 2\n\t\tassert.GreaterOrEqual(ct, len(users), 2, \"Should have at least 2 users (gateway CLI user + oidcuser)\")\n\n\t\t// Find gateway CLI user\n\t\tvar gatewayUser *v1.User\n\n\t\tfor _, user := range users {\n\t\t\tif user.GetName() == \"gateway\" && user.GetProvider() == \"\" {\n\t\t\t\tgatewayUser = user\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tassert.NotNil(ct, gatewayUser, \"Should have gateway CLI user\")\n\n\t\tif gatewayUser != nil {\n\t\t\tassert.Equal(ct, \"gateway\", gatewayUser.GetName())\n\t\t}\n\n\t\t// Find OIDC user\n\t\tvar oidcUserFound *v1.User\n\n\t\tfor _, user := range users {\n\t\t\tif user.GetName() == \"oidcuser\" && user.GetProvider() == \"oidc\" {\n\t\t\t\toidcUserFound = user\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tassert.NotNil(ct, oidcUserFound, \"Should have OIDC user\")\n\n\t\tif oidcUserFound != nil {\n\t\t\tassert.Equal(ct, \"oidcuser\", oidcUserFound.GetName())\n\t\t\tassert.Equal(ct, \"oidcuser@headscale.net\", oidcUserFound.GetEmail())\n\t\t}\n\t}, 10*time.Second, 500*time.Millisecond, \"headscale should have correct users and nodes\")\n\n\tt.Logf(\"Test completed successfully - issue #2888 fix validated\")\n}\n\n// TestOIDCReloginSameUserRoutesPreserved tests the scenario where:\n// - A node logs in via OIDC and advertises routes\n// - Routes are auto-approved and verified as SERVING\n// - The node logs out\n// - The node logs back in as the same user\n// - Routes should STILL be SERVING (not just approved/available)\n//\n// This test validates the fix for issue #2896:\n// https://github.com/juanfont/headscale/issues/2896\n//\n// Bug: When a node with already-approved routes restarts/re-authenticates,\n// the routes show as \"Approved\" and \"Available\" but NOT \"Serving\" (Primary).\n// A headscale restart would fix it, indicating a state management issue.\nfunc TestOIDCReloginSameUserRoutesPreserved(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tadvertiseRoute := \"10.55.0.0/24\"\n\n\t// Create scenario with same user for both login attempts\n\tscenario, err := NewScenario(ScenarioSpec{\n\t\tOIDCUsers: []mockoidc.MockUser{\n\t\t\toidcMockUser(\"user1\", true), // Initial login\n\t\t\toidcMockUser(\"user1\", true), // Relogin with same user\n\t\t},\n\t})\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\toidcMap := map[string]string{\n\t\t\"HEADSCALE_OIDC_ISSUER\":             scenario.mockOIDC.Issuer(),\n\t\t\"HEADSCALE_OIDC_CLIENT_ID\":          scenario.mockOIDC.ClientID(),\n\t\t\"CREDENTIALS_DIRECTORY_TEST\":        \"/tmp\",\n\t\t\"HEADSCALE_OIDC_CLIENT_SECRET_PATH\": \"${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret\",\n\t}\n\n\terr = scenario.CreateHeadscaleEnvWithLoginURL(\n\t\t[]tsic.Option{\n\t\t\ttsic.WithAcceptRoutes(),\n\t\t},\n\t\thsic.WithTestName(\"oidcrouterelogin\"),\n\t\thsic.WithConfigEnv(oidcMap),\n\t\thsic.WithFileInContainer(\"/tmp/hs_client_oidc_secret\", []byte(scenario.mockOIDC.ClientSecret())),\n\t\thsic.WithACLPolicy(\n\t\t\t&policyv2.Policy{\n\t\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:       \"accept\",\n\t\t\t\t\t\tSources:      []policyv2.Alias{policyv2.Wildcard},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{{Alias: policyv2.Wildcard, Ports: []tailcfg.PortRange{tailcfg.PortRangeAny}}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tAutoApprovers: policyv2.AutoApproverPolicy{\n\t\t\t\t\tRoutes: map[netip.Prefix]policyv2.AutoApprovers{\n\t\t\t\t\t\tnetip.MustParsePrefix(advertiseRoute): {usernameApprover(\"user1@headscale.net\")},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\t// Create client with route advertisement\n\tts, err := scenario.CreateTailscaleNode(\n\t\t\"unstable\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t\ttsic.WithAcceptRoutes(),\n\t\ttsic.WithExtraLoginArgs([]string{\"--advertise-routes=\" + advertiseRoute}),\n\t)\n\trequire.NoError(t, err)\n\n\t// Initial login as user1\n\tu, err := ts.LoginWithURL(headscale.GetEndpoint())\n\trequire.NoError(t, err)\n\n\t_, err = doLoginURL(ts.Hostname(), u)\n\trequire.NoError(t, err)\n\n\t// Wait for client to be running\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tstatus, err := ts.Status()\n\t\tassert.NoError(ct, err)\n\t\tassert.Equal(ct, \"Running\", status.BackendState)\n\t}, 30*time.Second, 1*time.Second, \"waiting for initial login to complete\")\n\n\t// Step 1: Verify initial route is advertised, approved, and SERVING\n\tt.Logf(\"Step 1: Verifying initial route is advertised, approved, and SERVING at %s\", time.Now().Format(TimestampFormat))\n\n\tvar initialNode *v1.Node\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1, \"Should have exactly 1 node\")\n\n\t\tif len(nodes) == 1 {\n\t\t\tinitialNode = nodes[0]\n\t\t\t// Check: 1 announced, 1 approved, 1 serving (subnet route)\n\t\t\tassert.Lenf(c, initialNode.GetAvailableRoutes(), 1,\n\t\t\t\t\"Node should have 1 available route, got %v\", initialNode.GetAvailableRoutes())\n\t\t\tassert.Lenf(c, initialNode.GetApprovedRoutes(), 1,\n\t\t\t\t\"Node should have 1 approved route, got %v\", initialNode.GetApprovedRoutes())\n\t\t\tassert.Lenf(c, initialNode.GetSubnetRoutes(), 1,\n\t\t\t\t\"Node should have 1 serving (subnet) route, got %v - THIS IS THE BUG if empty\", initialNode.GetSubnetRoutes())\n\t\t\tassert.Contains(c, initialNode.GetSubnetRoutes(), advertiseRoute,\n\t\t\t\t\"Subnet routes should contain %s\", advertiseRoute)\n\t\t}\n\t}, 30*time.Second, 500*time.Millisecond, \"initial route should be serving\")\n\n\trequire.NotNil(t, initialNode, \"Initial node should be found\")\n\tinitialNodeID := initialNode.GetId()\n\tt.Logf(\"Initial node ID: %d, Available: %v, Approved: %v, Serving: %v\",\n\t\tinitialNodeID, initialNode.GetAvailableRoutes(), initialNode.GetApprovedRoutes(), initialNode.GetSubnetRoutes())\n\n\t// Step 2: Logout\n\tt.Logf(\"Step 2: Logging out at %s\", time.Now().Format(TimestampFormat))\n\n\terr = ts.Logout()\n\trequire.NoError(t, err)\n\n\t// Wait for logout to complete\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tstatus, err := ts.Status()\n\t\tassert.NoError(ct, err)\n\t\tassert.Equal(ct, \"NeedsLogin\", status.BackendState, \"Expected NeedsLogin state after logout\")\n\t}, 30*time.Second, 1*time.Second, \"waiting for logout to complete\")\n\n\tt.Logf(\"Logout completed, node should still exist in database\")\n\n\t// Verify node still exists (routes should still be in DB)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1, \"Node should persist in database after logout\")\n\t}, 10*time.Second, 500*time.Millisecond, \"node should persist after logout\")\n\n\t// Step 3: Re-authenticate via OIDC as the same user\n\tt.Logf(\"Step 3: Re-authenticating with same user via OIDC at %s\", time.Now().Format(TimestampFormat))\n\n\tu, err = ts.LoginWithURL(headscale.GetEndpoint())\n\trequire.NoError(t, err)\n\n\t_, err = doLoginURL(ts.Hostname(), u)\n\trequire.NoError(t, err)\n\n\t// Wait for client to be running\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tstatus, err := ts.Status()\n\t\tassert.NoError(ct, err)\n\t\tassert.Equal(ct, \"Running\", status.BackendState, \"Expected Running state after relogin\")\n\t}, 30*time.Second, 1*time.Second, \"waiting for relogin to complete\")\n\n\tt.Logf(\"Re-authentication completed at %s\", time.Now().Format(TimestampFormat))\n\n\t// Step 4: THE CRITICAL TEST - Verify routes are STILL SERVING after re-authentication\n\tt.Logf(\"Step 4: Verifying routes are STILL SERVING after re-authentication at %s\", time.Now().Format(TimestampFormat))\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1, \"Should still have exactly 1 node after relogin\")\n\n\t\tif len(nodes) == 1 {\n\t\t\tnode := nodes[0]\n\t\t\tt.Logf(\"After relogin - Available: %v, Approved: %v, Serving: %v\",\n\t\t\t\tnode.GetAvailableRoutes(), node.GetApprovedRoutes(), node.GetSubnetRoutes())\n\n\t\t\t// This is where issue #2896 manifests:\n\t\t\t// - Available shows the route (from Hostinfo.RoutableIPs)\n\t\t\t// - Approved shows the route (from ApprovedRoutes)\n\t\t\t// - BUT Serving (SubnetRoutes/PrimaryRoutes) is EMPTY!\n\t\t\tassert.Lenf(c, node.GetAvailableRoutes(), 1,\n\t\t\t\t\"Node should have 1 available route after relogin, got %v\", node.GetAvailableRoutes())\n\t\t\tassert.Lenf(c, node.GetApprovedRoutes(), 1,\n\t\t\t\t\"Node should have 1 approved route after relogin, got %v\", node.GetApprovedRoutes())\n\t\t\tassert.Lenf(c, node.GetSubnetRoutes(), 1,\n\t\t\t\t\"BUG #2896: Node should have 1 SERVING route after relogin, got %v\", node.GetSubnetRoutes())\n\t\t\tassert.Contains(c, node.GetSubnetRoutes(), advertiseRoute,\n\t\t\t\t\"BUG #2896: Subnet routes should contain %s after relogin\", advertiseRoute)\n\n\t\t\t// Also verify node ID was preserved (same node, not new registration)\n\t\t\tassert.Equal(c, initialNodeID, node.GetId(),\n\t\t\t\t\"Node ID should be preserved after same-user relogin\")\n\t\t}\n\t}, 30*time.Second, 500*time.Millisecond,\n\t\t\"BUG #2896: routes should remain SERVING after OIDC logout/relogin with same user\")\n\n\tt.Logf(\"Test completed - verifying issue #2896 fix for OIDC\")\n}\n"
  },
  {
    "path": "integration/auth_web_flow_test.go",
    "content": "package integration\n\nimport (\n\t\"net/netip\"\n\t\"slices\"\n\t\"testing\"\n\t\"time\"\n\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/integration/hsic\"\n\t\"github.com/juanfont/headscale/integration/integrationutil\"\n\t\"github.com/samber/lo\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestAuthWebFlowAuthenticationPingAll(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: len(MustTestVersions),\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create scenario: %s\", err)\n\t}\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnvWithLoginURL(\n\t\tnil,\n\t\thsic.WithTestName(\"webauthping\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\tallIps, err := scenario.ListTailscaleClientsIPs()\n\trequireNoErrListClientIPs(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t// assertClientsState(t, allClients)\n\n\tallAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {\n\t\treturn x.String()\n\t})\n\n\tsuccess := pingAllHelper(t, allClients, allAddrs)\n\tt.Logf(\"%d successful pings out of %d\", success, len(allClients)*len(allIps))\n}\n\nfunc TestAuthWebFlowLogoutAndReloginSameUser(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: len(MustTestVersions),\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnvWithLoginURL(\n\t\tnil,\n\t\thsic.WithTestName(\"weblogout\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\tallIps, err := scenario.ListTailscaleClientsIPs()\n\trequireNoErrListClientIPs(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t// assertClientsState(t, allClients)\n\n\tallAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {\n\t\treturn x.String()\n\t})\n\n\tsuccess := pingAllHelper(t, allClients, allAddrs)\n\tt.Logf(\"%d successful pings out of %d\", success, len(allClients)*len(allIps))\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\t// Collect expected node IDs for validation\n\texpectedNodes := collectExpectedNodeIDs(t, allClients)\n\n\t// Validate initial connection state\n\tvalidateInitialConnection(t, headscale, expectedNodes)\n\n\tvar listNodes []*v1.Node\n\n\tt.Logf(\"Validating initial node count after web auth at %s\", time.Now().Format(TimestampFormat))\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tvar err error\n\n\t\tlistNodes, err = headscale.ListNodes()\n\t\tassert.NoError(ct, err, \"Failed to list nodes after web authentication\")\n\t\tassert.Len(ct, listNodes, len(allClients), \"Expected %d nodes after web auth, got %d\", len(allClients), len(listNodes))\n\t}, 30*time.Second, 2*time.Second, \"validating node count matches client count after web authentication\")\n\n\tnodeCountBeforeLogout := len(listNodes)\n\tt.Logf(\"node count before logout: %d\", nodeCountBeforeLogout)\n\n\tclientIPs := make(map[TailscaleClient][]netip.Addr)\n\n\tfor _, client := range allClients {\n\t\tips, err := client.IPs()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to get IPs for client %s: %s\", client.Hostname(), err)\n\t\t}\n\n\t\tclientIPs[client] = ips\n\t}\n\n\tfor _, client := range allClients {\n\t\terr := client.Logout()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to logout client %s: %s\", client.Hostname(), err)\n\t\t}\n\t}\n\n\terr = scenario.WaitForTailscaleLogout()\n\trequireNoErrLogout(t, err)\n\n\t// Validate that all nodes are offline after logout\n\tvalidateLogoutComplete(t, headscale, expectedNodes)\n\n\tt.Logf(\"all clients logged out\")\n\n\tfor _, userName := range spec.Users {\n\t\terr = scenario.RunTailscaleUpWithURL(userName, headscale.GetEndpoint())\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to run tailscale up (%q): %s\", headscale.GetEndpoint(), err)\n\t\t}\n\t}\n\n\tt.Logf(\"all clients logged in again\")\n\n\tt.Logf(\"Validating node persistence after logout at %s\", time.Now().Format(TimestampFormat))\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tvar err error\n\n\t\tlistNodes, err = headscale.ListNodes()\n\t\tassert.NoError(ct, err, \"Failed to list nodes after web flow logout\")\n\t\tassert.Len(ct, listNodes, nodeCountBeforeLogout, \"Node count should remain unchanged after logout - expected %d nodes, got %d\", nodeCountBeforeLogout, len(listNodes))\n\t}, 60*time.Second, 2*time.Second, \"validating node persistence in database after web flow logout\")\n\tt.Logf(\"node count first login: %d, after relogin: %d\", nodeCountBeforeLogout, len(listNodes))\n\n\t// Validate connection state after relogin\n\tvalidateReloginComplete(t, headscale, expectedNodes)\n\n\tallIps, err = scenario.ListTailscaleClientsIPs()\n\trequireNoErrListClientIPs(t, err)\n\n\tallAddrs = lo.Map(allIps, func(x netip.Addr, index int) string {\n\t\treturn x.String()\n\t})\n\n\tsuccess = pingAllHelper(t, allClients, allAddrs)\n\tt.Logf(\"%d successful pings out of %d\", success, len(allClients)*len(allIps))\n\n\tfor _, client := range allClients {\n\t\tips, err := client.IPs()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to get IPs for client %s: %s\", client.Hostname(), err)\n\t\t}\n\n\t\t// lets check if the IPs are the same\n\t\tif len(ips) != len(clientIPs[client]) {\n\t\t\tt.Fatalf(\"IPs changed for client %s\", client.Hostname())\n\t\t}\n\n\t\tfor _, ip := range ips {\n\t\t\tfound := slices.Contains(clientIPs[client], ip)\n\n\t\t\tif !found {\n\t\t\t\tt.Fatalf(\n\t\t\t\t\t\"IPs changed for client %s. Used to be %v now %v\",\n\t\t\t\t\tclient.Hostname(),\n\t\t\t\t\tclientIPs[client],\n\t\t\t\t\tips,\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\t}\n\n\tt.Logf(\"all clients IPs are the same\")\n}\n\n// TestAuthWebFlowLogoutAndReloginNewUser tests the scenario where multiple Tailscale clients\n// initially authenticate using the web-based authentication flow (where users visit a URL\n// in their browser to authenticate), then all clients log out and log back in as a different user.\n//\n// This test validates the \"user switching\" behavior in headscale's web authentication flow:\n// - Multiple clients authenticate via web flow, each to their respective users (user1, user2)\n// - All clients log out simultaneously\n// - All clients log back in via web flow, but this time they all authenticate as user1\n// - The test verifies that user1 ends up with all the client nodes\n// - The test verifies that user2's original nodes still exist in the database but are offline\n// - The test verifies network connectivity works after the user switch\n//\n// This scenario is important for organizations that need to reassign devices between users\n// or when consolidating multiple user accounts. It ensures that headscale properly handles\n// the security implications of user switching while maintaining node persistence in the database.\n//\n// The test uses headscale's web authentication flow, which is the most user-friendly method\n// where authentication happens through a web browser rather than pre-shared keys or OIDC.\nfunc TestAuthWebFlowLogoutAndReloginNewUser(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: len(MustTestVersions),\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnvWithLoginURL(\n\t\tnil,\n\t\thsic.WithTestName(\"webflowrelnewuser\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\tvar allIps []netip.Addr\n\n\tallIps, err = scenario.ListTailscaleClientsIPs()\n\trequireNoErrListClientIPs(t, err)\n\n\t_ = allIps // used below after user switch\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\t// Collect expected node IDs for validation\n\texpectedNodes := collectExpectedNodeIDs(t, allClients)\n\n\t// Validate initial connection state\n\tvalidateInitialConnection(t, headscale, expectedNodes)\n\n\tvar listNodes []*v1.Node\n\n\tt.Logf(\"Validating initial node count after web auth at %s\", time.Now().Format(TimestampFormat))\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tvar err error\n\n\t\tlistNodes, err = headscale.ListNodes()\n\t\tassert.NoError(ct, err, \"Failed to list nodes after initial web authentication\")\n\t\tassert.Len(ct, listNodes, len(allClients), \"Expected %d nodes after web auth, got %d\", len(allClients), len(listNodes))\n\t}, 30*time.Second, 2*time.Second, \"validating node count matches client count after initial web authentication\")\n\n\tnodeCountBeforeLogout := len(listNodes)\n\tt.Logf(\"node count before logout: %d\", nodeCountBeforeLogout)\n\n\t// Log out all clients\n\tfor _, client := range allClients {\n\t\terr := client.Logout()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to logout client %s: %s\", client.Hostname(), err)\n\t\t}\n\t}\n\n\terr = scenario.WaitForTailscaleLogout()\n\trequireNoErrLogout(t, err)\n\n\t// Validate that all nodes are offline after logout\n\tvalidateLogoutComplete(t, headscale, expectedNodes)\n\n\tt.Logf(\"all clients logged out\")\n\n\t// Log all clients back in as user1 using web flow\n\t// We manually iterate over all clients and authenticate each one as user1\n\t// This tests the cross-user re-authentication behavior where ALL clients\n\t// (including those originally from user2) are registered to user1\n\tfor _, client := range allClients {\n\t\tloginURL, err := client.LoginWithURL(headscale.GetEndpoint())\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to get login URL for client %s: %s\", client.Hostname(), err)\n\t\t}\n\n\t\tbody, err := doLoginURL(client.Hostname(), loginURL)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to complete login for client %s: %s\", client.Hostname(), err)\n\t\t}\n\n\t\t// Register all clients as user1 (this is where cross-user registration happens)\n\t\t// This simulates: headscale auth register --auth-id <id> --user user1\n\t\t_ = scenario.runHeadscaleRegister(\"user1\", body)\n\t}\n\n\t// Wait for all clients to reach running state\n\tfor _, client := range allClients {\n\t\terr := client.WaitForRunning(integrationutil.PeerSyncTimeout())\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%s tailscale node has not reached running: %s\", client.Hostname(), err)\n\t\t}\n\t}\n\n\tt.Logf(\"all clients logged back in as user1\")\n\n\tvar user1Nodes []*v1.Node\n\n\tt.Logf(\"Validating user1 node count after relogin at %s\", time.Now().Format(TimestampFormat))\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tvar err error\n\n\t\tuser1Nodes, err = headscale.ListNodes(\"user1\")\n\t\tassert.NoError(ct, err, \"Failed to list nodes for user1 after web flow relogin\")\n\t\tassert.Len(ct, user1Nodes, len(allClients), \"User1 should have all %d clients after web flow relogin, got %d nodes\", len(allClients), len(user1Nodes))\n\t}, 60*time.Second, 2*time.Second, \"validating user1 has all client nodes after web flow user switch relogin\")\n\n\t// Collect expected node IDs for user1 after relogin\n\texpectedUser1Nodes := make([]types.NodeID, 0, len(user1Nodes))\n\tfor _, node := range user1Nodes {\n\t\texpectedUser1Nodes = append(expectedUser1Nodes, types.NodeID(node.GetId()))\n\t}\n\n\t// Validate connection state after relogin as user1\n\tvalidateReloginComplete(t, headscale, expectedUser1Nodes)\n\n\t// Validate that user2's old nodes still exist in database (but are expired/offline)\n\t// When CLI registration creates new nodes for user1, user2's old nodes remain\n\tvar user2Nodes []*v1.Node\n\n\tt.Logf(\"Validating user2 old nodes remain in database after CLI registration to user1 at %s\", time.Now().Format(TimestampFormat))\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tvar err error\n\n\t\tuser2Nodes, err = headscale.ListNodes(\"user2\")\n\t\tassert.NoError(ct, err, \"Failed to list nodes for user2 after CLI registration to user1\")\n\t\tassert.Len(ct, user2Nodes, len(allClients)/2, \"User2 should still have %d old nodes (likely expired) after CLI registration to user1, got %d nodes\", len(allClients)/2, len(user2Nodes))\n\t}, 30*time.Second, 2*time.Second, \"validating user2 old nodes remain in database after CLI registration to user1\")\n\n\tt.Logf(\"Validating client login states after web flow user switch at %s\", time.Now().Format(TimestampFormat))\n\n\tfor _, client := range allClients {\n\t\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t\tstatus, err := client.Status()\n\t\t\tassert.NoError(ct, err, \"Failed to get status for client %s\", client.Hostname())\n\t\t\tassert.Equal(ct, \"user1@test.no\", status.User[status.Self.UserID].LoginName, \"Client %s should be logged in as user1 after web flow user switch, got %s\", client.Hostname(), status.User[status.Self.UserID].LoginName)\n\t\t}, 30*time.Second, 2*time.Second, \"validating %s is logged in as user1 after web flow user switch\", client.Hostname())\n\t}\n\n\t// Test connectivity after user switch\n\tallIps, err = scenario.ListTailscaleClientsIPs()\n\trequireNoErrListClientIPs(t, err)\n\n\tallAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {\n\t\treturn x.String()\n\t})\n\n\tsuccess := pingAllHelper(t, allClients, allAddrs)\n\tt.Logf(\"%d successful pings out of %d after web flow user switch\", success, len(allClients)*len(allIps))\n}\n"
  },
  {
    "path": "integration/cli_test.go",
    "content": "package integration\n\nimport (\n\t\"cmp\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"slices\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\ttcmp \"github.com/google/go-cmp/cmp\"\n\t\"github.com/google/go-cmp/cmp/cmpopts\"\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\tpolicyv2 \"github.com/juanfont/headscale/hscontrol/policy/v2\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/integration/hsic\"\n\t\"github.com/juanfont/headscale/integration/tsic\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/tailcfg\"\n)\n\nfunc executeAndUnmarshal[T any](headscale ControlServer, command []string, result T) error {\n\tstr, err := headscale.Execute(command)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal([]byte(str), result)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to unmarshal: %w\\n command err: %s\", err, str)\n\t}\n\n\treturn nil\n}\n\n// Interface ensuring that we can sort structs from gRPC that\n// have an ID field.\ntype GRPCSortable interface {\n\tGetId() uint64\n}\n\nfunc sortWithID[T GRPCSortable](a, b T) int {\n\treturn cmp.Compare(a.GetId(), b.GetId())\n}\n\nfunc TestUserCommand(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tUsers: []string{\"user1\", \"user2\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName(\"cli-user\"))\n\trequire.NoError(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\tvar (\n\t\tlistUsers []*v1.User\n\t\tresult    []string\n\t)\n\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\terr := executeAndUnmarshal(headscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"users\",\n\t\t\t\t\"list\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&listUsers,\n\t\t)\n\t\tassert.NoError(ct, err)\n\n\t\tslices.SortFunc(listUsers, sortWithID)\n\t\tresult = []string{listUsers[0].GetName(), listUsers[1].GetName()}\n\n\t\tassert.Equal(\n\t\t\tct,\n\t\t\t[]string{\"user1\", \"user2\"},\n\t\t\tresult,\n\t\t\t\"Should have user1 and user2 in users list\",\n\t\t)\n\t}, 20*time.Second, 1*time.Second)\n\n\t_, err = headscale.Execute(\n\t\t[]string{\n\t\t\t\"headscale\",\n\t\t\t\"users\",\n\t\t\t\"rename\",\n\t\t\t\"--output=json\",\n\t\t\tfmt.Sprintf(\"--identifier=%d\", listUsers[1].GetId()),\n\t\t\t\"--new-name=newname\",\n\t\t},\n\t)\n\trequire.NoError(t, err)\n\n\tvar listAfterRenameUsers []*v1.User\n\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\terr := executeAndUnmarshal(headscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"users\",\n\t\t\t\t\"list\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&listAfterRenameUsers,\n\t\t)\n\t\tassert.NoError(ct, err)\n\n\t\tslices.SortFunc(listAfterRenameUsers, sortWithID)\n\t\tresult = []string{listAfterRenameUsers[0].GetName(), listAfterRenameUsers[1].GetName()}\n\n\t\tassert.Equal(\n\t\t\tct,\n\t\t\t[]string{\"user1\", \"newname\"},\n\t\t\tresult,\n\t\t\t\"Should have user1 and newname after rename operation\",\n\t\t)\n\t}, 20*time.Second, 1*time.Second)\n\n\tvar listByUsername []*v1.User\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\terr = executeAndUnmarshal(headscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"users\",\n\t\t\t\t\"list\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t\t\"--name=user1\",\n\t\t\t},\n\t\t\t&listByUsername,\n\t\t)\n\t\tassert.NoError(c, err)\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for user list by username\")\n\n\tslices.SortFunc(listByUsername, sortWithID)\n\n\twant := []*v1.User{\n\t\t{\n\t\t\tId:    1,\n\t\t\tName:  \"user1\",\n\t\t\tEmail: \"user1@test.no\",\n\t\t},\n\t}\n\n\tif diff := tcmp.Diff(want, listByUsername, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, \"CreatedAt\")); diff != \"\" {\n\t\tt.Errorf(\"unexpected users (-want +got):\\n%s\", diff)\n\t}\n\n\tvar listByID []*v1.User\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\terr = executeAndUnmarshal(headscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"users\",\n\t\t\t\t\"list\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t\t\"--identifier=1\",\n\t\t\t},\n\t\t\t&listByID,\n\t\t)\n\t\tassert.NoError(c, err)\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for user list by ID\")\n\n\tslices.SortFunc(listByID, sortWithID)\n\n\twant = []*v1.User{\n\t\t{\n\t\t\tId:    1,\n\t\t\tName:  \"user1\",\n\t\t\tEmail: \"user1@test.no\",\n\t\t},\n\t}\n\n\tif diff := tcmp.Diff(want, listByID, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, \"CreatedAt\")); diff != \"\" {\n\t\tt.Errorf(\"unexpected users (-want +got):\\n%s\", diff)\n\t}\n\n\tdeleteResult, err := headscale.Execute(\n\t\t[]string{\n\t\t\t\"headscale\",\n\t\t\t\"users\",\n\t\t\t\"destroy\",\n\t\t\t\"--force\",\n\t\t\t// Delete \"user1\"\n\t\t\t\"--identifier=1\",\n\t\t},\n\t)\n\trequire.NoError(t, err)\n\tassert.Contains(t, deleteResult, \"User destroyed\")\n\n\tvar listAfterIDDelete []*v1.User\n\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\terr := executeAndUnmarshal(headscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"users\",\n\t\t\t\t\"list\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&listAfterIDDelete,\n\t\t)\n\t\tassert.NoError(ct, err)\n\n\t\tslices.SortFunc(listAfterIDDelete, sortWithID)\n\n\t\twant := []*v1.User{\n\t\t\t{\n\t\t\t\tId:    2,\n\t\t\t\tName:  \"newname\",\n\t\t\t\tEmail: \"user2@test.no\",\n\t\t\t},\n\t\t}\n\n\t\tif diff := tcmp.Diff(want, listAfterIDDelete, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, \"CreatedAt\")); diff != \"\" {\n\t\t\tassert.Fail(ct, \"unexpected users\", \"diff (-want +got):\\n%s\", diff)\n\t\t}\n\t}, 20*time.Second, 1*time.Second)\n\n\tdeleteResult, err = headscale.Execute(\n\t\t[]string{\n\t\t\t\"headscale\",\n\t\t\t\"users\",\n\t\t\t\"destroy\",\n\t\t\t\"--force\",\n\t\t\t\"--name=newname\",\n\t\t},\n\t)\n\trequire.NoError(t, err)\n\tassert.Contains(t, deleteResult, \"User destroyed\")\n\n\tvar listAfterNameDelete []v1.User\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\terr = executeAndUnmarshal(headscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"users\",\n\t\t\t\t\"list\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&listAfterNameDelete,\n\t\t)\n\t\tassert.NoError(c, err)\n\t\tassert.Empty(c, listAfterNameDelete)\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for user list after name delete\")\n}\n\nfunc TestPreAuthKeyCommand(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tuser := \"preauthkeyspace\"\n\tcount := 3\n\n\tspec := ScenarioSpec{\n\t\tUsers: []string{user},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName(\"clipak\"))\n\trequire.NoError(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\tkeys := make([]*v1.PreAuthKey, count)\n\n\trequire.NoError(t, err)\n\n\tfor index := range count {\n\t\tvar preAuthKey v1.PreAuthKey\n\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\terr := executeAndUnmarshal(\n\t\t\t\theadscale,\n\t\t\t\t[]string{\n\t\t\t\t\t\"headscale\",\n\t\t\t\t\t\"preauthkeys\",\n\t\t\t\t\t\"--user\",\n\t\t\t\t\t\"1\",\n\t\t\t\t\t\"create\",\n\t\t\t\t\t\"--reusable\",\n\t\t\t\t\t\"--expiration\",\n\t\t\t\t\t\"24h\",\n\t\t\t\t\t\"--output\",\n\t\t\t\t\t\"json\",\n\t\t\t\t\t\"--tags\",\n\t\t\t\t\t\"tag:test1,tag:test2\",\n\t\t\t\t},\n\t\t\t\t&preAuthKey,\n\t\t\t)\n\t\t\tassert.NoError(c, err)\n\t\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for preauth key creation\")\n\n\t\tkeys[index] = &preAuthKey\n\t}\n\n\tassert.Len(t, keys, 3)\n\n\tvar listedPreAuthKeys []v1.PreAuthKey\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\terr = executeAndUnmarshal(\n\t\t\theadscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"preauthkeys\",\n\t\t\t\t\"list\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&listedPreAuthKeys,\n\t\t)\n\t\tassert.NoError(c, err)\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for preauth keys list\")\n\n\t// There is one key created by \"scenario.CreateHeadscaleEnv\"\n\tassert.Len(t, listedPreAuthKeys, 4)\n\n\tassert.Equal(\n\t\tt,\n\t\t[]uint64{keys[0].GetId(), keys[1].GetId(), keys[2].GetId()},\n\t\t[]uint64{\n\t\t\tlistedPreAuthKeys[1].GetId(),\n\t\t\tlistedPreAuthKeys[2].GetId(),\n\t\t\tlistedPreAuthKeys[3].GetId(),\n\t\t},\n\t)\n\n\t// New keys show prefix after listing, so check the created keys instead\n\tassert.NotEmpty(t, keys[0].GetKey())\n\tassert.NotEmpty(t, keys[1].GetKey())\n\tassert.NotEmpty(t, keys[2].GetKey())\n\n\tassert.True(t, listedPreAuthKeys[1].GetExpiration().AsTime().After(time.Now()))\n\tassert.True(t, listedPreAuthKeys[2].GetExpiration().AsTime().After(time.Now()))\n\tassert.True(t, listedPreAuthKeys[3].GetExpiration().AsTime().After(time.Now()))\n\n\tassert.True(\n\t\tt,\n\t\tlistedPreAuthKeys[1].GetExpiration().AsTime().Before(time.Now().Add(time.Hour*26)),\n\t)\n\tassert.True(\n\t\tt,\n\t\tlistedPreAuthKeys[2].GetExpiration().AsTime().Before(time.Now().Add(time.Hour*26)),\n\t)\n\tassert.True(\n\t\tt,\n\t\tlistedPreAuthKeys[3].GetExpiration().AsTime().Before(time.Now().Add(time.Hour*26)),\n\t)\n\n\tfor index := range listedPreAuthKeys {\n\t\tif index == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tassert.Equal(\n\t\t\tt,\n\t\t\t[]string{\"tag:test1\", \"tag:test2\"},\n\t\t\tlistedPreAuthKeys[index].GetAclTags(),\n\t\t)\n\t}\n\n\t// Test key expiry\n\t_, err = headscale.Execute(\n\t\t[]string{\n\t\t\t\"headscale\",\n\t\t\t\"preauthkeys\",\n\t\t\t\"expire\",\n\t\t\t\"--id\",\n\t\t\tstrconv.FormatUint(keys[0].GetId(), 10),\n\t\t},\n\t)\n\trequire.NoError(t, err)\n\n\tvar listedPreAuthKeysAfterExpire []v1.PreAuthKey\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\terr = executeAndUnmarshal(\n\t\t\theadscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"preauthkeys\",\n\t\t\t\t\"list\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&listedPreAuthKeysAfterExpire,\n\t\t)\n\t\tassert.NoError(c, err)\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for preauth keys list after expire\")\n\n\tassert.True(t, listedPreAuthKeysAfterExpire[1].GetExpiration().AsTime().Before(time.Now()))\n\tassert.True(t, listedPreAuthKeysAfterExpire[2].GetExpiration().AsTime().After(time.Now()))\n\tassert.True(t, listedPreAuthKeysAfterExpire[3].GetExpiration().AsTime().After(time.Now()))\n}\n\nfunc TestPreAuthKeyCommandWithoutExpiry(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tuser := \"pre-auth-key-without-exp-user\"\n\tspec := ScenarioSpec{\n\t\tUsers: []string{user},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName(\"clipaknaexp\"))\n\trequire.NoError(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\tvar preAuthKey v1.PreAuthKey\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\terr = executeAndUnmarshal(\n\t\t\theadscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"preauthkeys\",\n\t\t\t\t\"--user\",\n\t\t\t\t\"1\",\n\t\t\t\t\"create\",\n\t\t\t\t\"--reusable\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&preAuthKey,\n\t\t)\n\t\tassert.NoError(c, err)\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for preauth key creation without expiry\")\n\n\tvar listedPreAuthKeys []v1.PreAuthKey\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\terr = executeAndUnmarshal(\n\t\t\theadscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"preauthkeys\",\n\t\t\t\t\"list\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&listedPreAuthKeys,\n\t\t)\n\t\tassert.NoError(c, err)\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for preauth keys list\")\n\n\t// There is one key created by \"scenario.CreateHeadscaleEnv\"\n\tassert.Len(t, listedPreAuthKeys, 2)\n\n\tassert.True(t, listedPreAuthKeys[1].GetExpiration().AsTime().After(time.Now()))\n\tassert.True(\n\t\tt,\n\t\tlistedPreAuthKeys[1].GetExpiration().AsTime().Before(time.Now().Add(time.Minute*70)),\n\t)\n}\n\nfunc TestPreAuthKeyCommandReusableEphemeral(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tuser := \"pre-auth-key-reus-ephm-user\"\n\tspec := ScenarioSpec{\n\t\tUsers: []string{user},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName(\"clipakresueeph\"))\n\trequire.NoError(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\tvar preAuthReusableKey v1.PreAuthKey\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\terr = executeAndUnmarshal(\n\t\t\theadscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"preauthkeys\",\n\t\t\t\t\"--user\",\n\t\t\t\t\"1\",\n\t\t\t\t\"create\",\n\t\t\t\t\"--reusable=true\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&preAuthReusableKey,\n\t\t)\n\t\tassert.NoError(c, err)\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for reusable preauth key creation\")\n\n\tvar preAuthEphemeralKey v1.PreAuthKey\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\terr = executeAndUnmarshal(\n\t\t\theadscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"preauthkeys\",\n\t\t\t\t\"--user\",\n\t\t\t\t\"1\",\n\t\t\t\t\"create\",\n\t\t\t\t\"--ephemeral=true\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&preAuthEphemeralKey,\n\t\t)\n\t\tassert.NoError(c, err)\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for ephemeral preauth key creation\")\n\n\tassert.True(t, preAuthEphemeralKey.GetEphemeral())\n\tassert.False(t, preAuthEphemeralKey.GetReusable())\n\n\tvar listedPreAuthKeys []v1.PreAuthKey\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\terr = executeAndUnmarshal(\n\t\t\theadscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"preauthkeys\",\n\t\t\t\t\"list\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&listedPreAuthKeys,\n\t\t)\n\t\tassert.NoError(c, err)\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for preauth keys list after reusable/ephemeral creation\")\n\n\t// There is one key created by \"scenario.CreateHeadscaleEnv\"\n\tassert.Len(t, listedPreAuthKeys, 3)\n}\n\nfunc TestPreAuthKeyCorrectUserLoggedInCommand(t *testing.T) {\n\tIntegrationSkip(t)\n\n\t//nolint:goconst // test data, not worth extracting\n\tuser1 := \"user1\"\n\t//nolint:goconst // test data, not worth extracting\n\tuser2 := \"user2\"\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 1,\n\t\tUsers:        []string{user1},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithTestName(\"cli-paklogin\"),\n\t)\n\trequire.NoError(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\tu2, err := headscale.CreateUser(user2)\n\trequire.NoError(t, err)\n\n\tvar user2Key v1.PreAuthKey\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\terr = executeAndUnmarshal(\n\t\t\theadscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"preauthkeys\",\n\t\t\t\t\"--user\",\n\t\t\t\tstrconv.FormatUint(u2.GetId(), 10),\n\t\t\t\t\"create\",\n\t\t\t\t\"--reusable\",\n\t\t\t\t\"--expiration\",\n\t\t\t\t\"24h\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t\t\"--tags\",\n\t\t\t\t\"tag:test1,tag:test2\",\n\t\t\t},\n\t\t\t&user2Key,\n\t\t)\n\t\tassert.NoError(c, err)\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for user2 preauth key creation\")\n\n\tvar listNodes []*v1.Node\n\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tvar err error\n\n\t\tlistNodes, err = headscale.ListNodes()\n\t\tassert.NoError(ct, err)\n\t\tassert.Len(ct, listNodes, 1, \"Should have exactly 1 node for user1\")\n\t\tassert.Equal(ct, user1, listNodes[0].GetUser().GetName(), \"Node should belong to user1\")\n\t}, 15*time.Second, 1*time.Second)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\trequire.Len(t, allClients, 1)\n\n\tclient := allClients[0]\n\n\t// Log out from user1\n\terr = client.Logout()\n\trequire.NoError(t, err)\n\n\terr = scenario.WaitForTailscaleLogout()\n\trequire.NoError(t, err)\n\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tstatus, err := client.Status()\n\t\tassert.NoError(ct, err)\n\t\tassert.NotContains(ct, []string{\"Starting\", \"Running\"}, status.BackendState,\n\t\t\t\"Expected node to be logged out, backend state: %s\", status.BackendState)\n\t}, 30*time.Second, 2*time.Second)\n\n\terr = client.Login(headscale.GetEndpoint(), user2Key.GetKey())\n\trequire.NoError(t, err)\n\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tstatus, err := client.Status()\n\t\tassert.NoError(ct, err)\n\t\tassert.Equal(ct, \"Running\", status.BackendState, \"Expected node to be logged in, backend state: %s\", status.BackendState)\n\t\t// With tags-as-identity model, tagged nodes show as TaggedDevices user (2147455555)\n\t\t// The PreAuthKey was created with tags, so the node is tagged\n\t\tassert.Equal(ct, \"userid:2147455555\", status.Self.UserID.String(), \"Expected node to be logged in as tagged-devices user\")\n\t}, 30*time.Second, 2*time.Second)\n\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tvar err error\n\n\t\tlistNodes, err = headscale.ListNodes()\n\t\tassert.NoError(ct, err)\n\t\tassert.Len(ct, listNodes, 2, \"Should have 2 nodes after re-login\")\n\t\tassert.Equal(ct, user1, listNodes[0].GetUser().GetName(), \"First node should belong to user1\")\n\t\t// Second node is tagged (created with tagged PreAuthKey), so it shows as \"tagged-devices\"\n\t\tassert.Equal(ct, \"tagged-devices\", listNodes[1].GetUser().GetName(), \"Second node should be tagged-devices\")\n\t}, 20*time.Second, 1*time.Second)\n}\n\nfunc TestTaggedNodesCLIOutput(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tuser1 := \"user1\"\n\tuser2 := \"user2\"\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 1,\n\t\tUsers:        []string{user1},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithTestName(\"tagcli\"),\n\t)\n\trequire.NoError(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\tu2, err := headscale.CreateUser(user2)\n\trequire.NoError(t, err)\n\n\tvar user2Key v1.PreAuthKey\n\n\t// Create a tagged PreAuthKey for user2\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\terr = executeAndUnmarshal(\n\t\t\theadscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"preauthkeys\",\n\t\t\t\t\"--user\",\n\t\t\t\tstrconv.FormatUint(u2.GetId(), 10),\n\t\t\t\t\"create\",\n\t\t\t\t\"--reusable\",\n\t\t\t\t\"--expiration\",\n\t\t\t\t\"24h\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t\t\"--tags\",\n\t\t\t\t\"tag:test1,tag:test2\",\n\t\t\t},\n\t\t\t&user2Key,\n\t\t)\n\t\tassert.NoError(c, err)\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for user2 tagged preauth key creation\")\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\trequire.Len(t, allClients, 1)\n\n\tclient := allClients[0]\n\n\t// Log out from user1\n\terr = client.Logout()\n\trequire.NoError(t, err)\n\n\terr = scenario.WaitForTailscaleLogout()\n\trequire.NoError(t, err)\n\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tstatus, err := client.Status()\n\t\tassert.NoError(ct, err)\n\t\tassert.NotContains(ct, []string{\"Starting\", \"Running\"}, status.BackendState,\n\t\t\t\"Expected node to be logged out, backend state: %s\", status.BackendState)\n\t}, 30*time.Second, 2*time.Second)\n\n\t// Log in with the tagged PreAuthKey (from user2, with tags)\n\terr = client.Login(headscale.GetEndpoint(), user2Key.GetKey())\n\trequire.NoError(t, err)\n\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tstatus, err := client.Status()\n\t\tassert.NoError(ct, err)\n\t\tassert.Equal(ct, \"Running\", status.BackendState, \"Expected node to be logged in, backend state: %s\", status.BackendState)\n\t\t// With tags-as-identity model, tagged nodes show as TaggedDevices user (2147455555)\n\t\tassert.Equal(ct, \"userid:2147455555\", status.Self.UserID.String(), \"Expected node to be logged in as tagged-devices user\")\n\t}, 30*time.Second, 2*time.Second)\n\n\t// Wait for the second node to appear\n\tvar listNodes []*v1.Node\n\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tvar err error\n\n\t\tlistNodes, err = headscale.ListNodes()\n\t\tassert.NoError(ct, err)\n\t\tassert.Len(ct, listNodes, 2, \"Should have 2 nodes after re-login with tagged key\")\n\t\tassert.Equal(ct, user1, listNodes[0].GetUser().GetName(), \"First node should belong to user1\")\n\t\tassert.Equal(ct, \"tagged-devices\", listNodes[1].GetUser().GetName(), \"Second node should be tagged-devices\")\n\t}, 20*time.Second, 1*time.Second)\n\n\t// Test: tailscale status output should show \"tagged-devices\" not \"userid:2147455555\"\n\t// This is the fix for issue #2970 - the Tailscale client should display user-friendly names\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tstdout, stderr, err := client.Execute([]string{\"tailscale\", \"status\"})\n\t\tassert.NoError(ct, err, \"tailscale status command should succeed, stderr: %s\", stderr)\n\n\t\tt.Logf(\"Tailscale status output:\\n%s\", stdout)\n\n\t\t// The output should contain \"tagged-devices\" for tagged nodes\n\t\tassert.Contains(ct, stdout, \"tagged-devices\", \"Tailscale status should show 'tagged-devices' for tagged nodes\")\n\n\t\t// The output should NOT show the raw numeric userid to the user\n\t\tassert.NotContains(ct, stdout, \"userid:2147455555\", \"Tailscale status should not show numeric userid for tagged nodes\")\n\t}, 20*time.Second, 1*time.Second)\n}\n\nfunc TestApiKeyCommand(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tcount := 5\n\n\tspec := ScenarioSpec{\n\t\tUsers: []string{\"user1\", \"user2\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName(\"cli-apikey\"))\n\trequire.NoError(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\tkeys := make([]string, count)\n\n\tfor idx := range count {\n\t\tapiResult, err := headscale.Execute(\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"apikeys\",\n\t\t\t\t\"create\",\n\t\t\t\t\"--expiration\",\n\t\t\t\t\"24h\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t)\n\t\trequire.NoError(t, err)\n\t\tassert.NotEmpty(t, apiResult)\n\n\t\tkeys[idx] = apiResult\n\t}\n\n\tassert.Len(t, keys, 5)\n\n\tvar listedAPIKeys []v1.ApiKey\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\terr = executeAndUnmarshal(headscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"apikeys\",\n\t\t\t\t\"list\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&listedAPIKeys,\n\t\t)\n\t\tassert.NoError(c, err)\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for API keys list\")\n\n\tassert.Len(t, listedAPIKeys, 5)\n\n\tassert.Equal(t, uint64(1), listedAPIKeys[0].GetId())\n\tassert.Equal(t, uint64(2), listedAPIKeys[1].GetId())\n\tassert.Equal(t, uint64(3), listedAPIKeys[2].GetId())\n\tassert.Equal(t, uint64(4), listedAPIKeys[3].GetId())\n\tassert.Equal(t, uint64(5), listedAPIKeys[4].GetId())\n\n\tassert.NotEmpty(t, listedAPIKeys[0].GetPrefix())\n\tassert.NotEmpty(t, listedAPIKeys[1].GetPrefix())\n\tassert.NotEmpty(t, listedAPIKeys[2].GetPrefix())\n\tassert.NotEmpty(t, listedAPIKeys[3].GetPrefix())\n\tassert.NotEmpty(t, listedAPIKeys[4].GetPrefix())\n\n\tassert.True(t, listedAPIKeys[0].GetExpiration().AsTime().After(time.Now()))\n\tassert.True(t, listedAPIKeys[1].GetExpiration().AsTime().After(time.Now()))\n\tassert.True(t, listedAPIKeys[2].GetExpiration().AsTime().After(time.Now()))\n\tassert.True(t, listedAPIKeys[3].GetExpiration().AsTime().After(time.Now()))\n\tassert.True(t, listedAPIKeys[4].GetExpiration().AsTime().After(time.Now()))\n\n\tassert.True(\n\t\tt,\n\t\tlistedAPIKeys[0].GetExpiration().AsTime().Before(time.Now().Add(time.Hour*26)),\n\t)\n\tassert.True(\n\t\tt,\n\t\tlistedAPIKeys[1].GetExpiration().AsTime().Before(time.Now().Add(time.Hour*26)),\n\t)\n\tassert.True(\n\t\tt,\n\t\tlistedAPIKeys[2].GetExpiration().AsTime().Before(time.Now().Add(time.Hour*26)),\n\t)\n\tassert.True(\n\t\tt,\n\t\tlistedAPIKeys[3].GetExpiration().AsTime().Before(time.Now().Add(time.Hour*26)),\n\t)\n\tassert.True(\n\t\tt,\n\t\tlistedAPIKeys[4].GetExpiration().AsTime().Before(time.Now().Add(time.Hour*26)),\n\t)\n\n\texpiredPrefixes := make(map[string]bool)\n\n\t// Expire three keys\n\tfor idx := range 3 {\n\t\t_, err := headscale.Execute(\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"apikeys\",\n\t\t\t\t\"expire\",\n\t\t\t\t\"--prefix\",\n\t\t\t\tlistedAPIKeys[idx].GetPrefix(),\n\t\t\t},\n\t\t)\n\t\trequire.NoError(t, err)\n\n\t\texpiredPrefixes[listedAPIKeys[idx].GetPrefix()] = true\n\t}\n\n\tvar listedAfterExpireAPIKeys []v1.ApiKey\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\terr = executeAndUnmarshal(headscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"apikeys\",\n\t\t\t\t\"list\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&listedAfterExpireAPIKeys,\n\t\t)\n\t\tassert.NoError(c, err)\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for API keys list after expire\")\n\n\tfor index := range listedAfterExpireAPIKeys {\n\t\tif _, ok := expiredPrefixes[listedAfterExpireAPIKeys[index].GetPrefix()]; ok {\n\t\t\t// Expired\n\t\t\tassert.True(\n\t\t\t\tt,\n\t\t\t\tlistedAfterExpireAPIKeys[index].GetExpiration().AsTime().Before(time.Now()),\n\t\t\t)\n\t\t} else {\n\t\t\t// Not expired\n\t\t\tassert.False(\n\t\t\t\tt,\n\t\t\t\tlistedAfterExpireAPIKeys[index].GetExpiration().AsTime().Before(time.Now()),\n\t\t\t)\n\t\t}\n\t}\n\n\t_, err = headscale.Execute(\n\t\t[]string{\n\t\t\t\"headscale\",\n\t\t\t\"apikeys\",\n\t\t\t\"delete\",\n\t\t\t\"--prefix\",\n\t\t\tlistedAPIKeys[0].GetPrefix(),\n\t\t})\n\trequire.NoError(t, err)\n\n\tvar listedAPIKeysAfterDelete []v1.ApiKey\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\terr = executeAndUnmarshal(headscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"apikeys\",\n\t\t\t\t\"list\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&listedAPIKeysAfterDelete,\n\t\t)\n\t\tassert.NoError(c, err)\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for API keys list after delete\")\n\n\tassert.Len(t, listedAPIKeysAfterDelete, 4)\n\n\t// Test expire by ID (using key at index 0)\n\t_, err = headscale.Execute(\n\t\t[]string{\n\t\t\t\"headscale\",\n\t\t\t\"apikeys\",\n\t\t\t\"expire\",\n\t\t\t\"--id\",\n\t\t\tstrconv.FormatUint(listedAPIKeysAfterDelete[0].GetId(), 10),\n\t\t})\n\trequire.NoError(t, err)\n\n\tvar listedAPIKeysAfterExpireByID []v1.ApiKey\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\terr = executeAndUnmarshal(headscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"apikeys\",\n\t\t\t\t\"list\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&listedAPIKeysAfterExpireByID,\n\t\t)\n\t\tassert.NoError(c, err)\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for API keys list after expire by ID\")\n\n\t// Verify the key was expired\n\tfor idx := range listedAPIKeysAfterExpireByID {\n\t\tif listedAPIKeysAfterExpireByID[idx].GetId() == listedAPIKeysAfterDelete[0].GetId() {\n\t\t\tassert.True(t, listedAPIKeysAfterExpireByID[idx].GetExpiration().AsTime().Before(time.Now()),\n\t\t\t\t\"Key expired by ID should have expiration in the past\")\n\t\t}\n\t}\n\n\t// Test delete by ID (using key at index 1)\n\tdeletedKeyID := listedAPIKeysAfterExpireByID[1].GetId()\n\t_, err = headscale.Execute(\n\t\t[]string{\n\t\t\t\"headscale\",\n\t\t\t\"apikeys\",\n\t\t\t\"delete\",\n\t\t\t\"--id\",\n\t\t\tstrconv.FormatUint(deletedKeyID, 10),\n\t\t})\n\trequire.NoError(t, err)\n\n\tvar listedAPIKeysAfterDeleteByID []v1.ApiKey\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\terr = executeAndUnmarshal(headscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"apikeys\",\n\t\t\t\t\"list\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&listedAPIKeysAfterDeleteByID,\n\t\t)\n\t\tassert.NoError(c, err)\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for API keys list after delete by ID\")\n\n\tassert.Len(t, listedAPIKeysAfterDeleteByID, 3)\n\n\t// Verify the specific key was deleted\n\tfor idx := range listedAPIKeysAfterDeleteByID {\n\t\tassert.NotEqual(t, deletedKeyID, listedAPIKeysAfterDeleteByID[idx].GetId(),\n\t\t\t\"Deleted key should not be present in the list\")\n\t}\n}\n\nfunc TestNodeCommand(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tUsers: []string{\"node-user\", \"other-user\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName(\"cli-node\"))\n\trequire.NoError(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\tregIDs := []string{\n\t\ttypes.MustAuthID().String(),\n\t\ttypes.MustAuthID().String(),\n\t\ttypes.MustAuthID().String(),\n\t\ttypes.MustAuthID().String(),\n\t\ttypes.MustAuthID().String(),\n\t}\n\tnodes := make([]*v1.Node, len(regIDs))\n\n\trequire.NoError(t, err)\n\n\tfor index, regID := range regIDs {\n\t\t_, err := headscale.Execute(\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"debug\",\n\t\t\t\t\"create-node\",\n\t\t\t\t\"--name\",\n\t\t\t\tfmt.Sprintf(\"node-%d\", index+1),\n\t\t\t\t\"--user\",\n\t\t\t\t\"node-user\",\n\t\t\t\t\"--key\",\n\t\t\t\tregID,\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t)\n\t\trequire.NoError(t, err)\n\n\t\tvar node v1.Node\n\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\terr = executeAndUnmarshal(\n\t\t\t\theadscale,\n\t\t\t\t[]string{\n\t\t\t\t\t\"headscale\",\n\t\t\t\t\t\"auth\",\n\t\t\t\t\t\"register\",\n\t\t\t\t\t\"--user\",\n\t\t\t\t\t\"node-user\",\n\t\t\t\t\t\"--auth-id\",\n\t\t\t\t\tregID,\n\t\t\t\t\t\"--output\",\n\t\t\t\t\t\"json\",\n\t\t\t\t},\n\t\t\t\t&node,\n\t\t\t)\n\t\t\tassert.NoError(c, err)\n\t\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for node registration\")\n\n\t\tnodes[index] = &node\n\t}\n\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tassert.Len(ct, nodes, len(regIDs), \"Should have correct number of nodes after CLI operations\")\n\t}, 15*time.Second, 1*time.Second)\n\n\t// Test list all nodes after added seconds\n\tvar listAll []v1.Node\n\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\terr := executeAndUnmarshal(\n\t\t\theadscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"nodes\",\n\t\t\t\t\"list\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&listAll,\n\t\t)\n\t\tassert.NoError(ct, err)\n\t\tassert.Len(ct, listAll, len(regIDs), \"Should list all nodes after CLI operations\")\n\t}, 20*time.Second, 1*time.Second)\n\n\tassert.Equal(t, uint64(1), listAll[0].GetId())\n\tassert.Equal(t, uint64(2), listAll[1].GetId())\n\tassert.Equal(t, uint64(3), listAll[2].GetId())\n\tassert.Equal(t, uint64(4), listAll[3].GetId())\n\tassert.Equal(t, uint64(5), listAll[4].GetId())\n\n\tassert.Equal(t, \"node-1\", listAll[0].GetName())\n\tassert.Equal(t, \"node-2\", listAll[1].GetName())\n\tassert.Equal(t, \"node-3\", listAll[2].GetName())\n\tassert.Equal(t, \"node-4\", listAll[3].GetName())\n\tassert.Equal(t, \"node-5\", listAll[4].GetName())\n\n\totherUserRegIDs := []string{\n\t\ttypes.MustAuthID().String(),\n\t\ttypes.MustAuthID().String(),\n\t}\n\totherUserMachines := make([]*v1.Node, len(otherUserRegIDs))\n\n\trequire.NoError(t, err)\n\n\tfor index, regID := range otherUserRegIDs {\n\t\t_, err := headscale.Execute(\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"debug\",\n\t\t\t\t\"create-node\",\n\t\t\t\t\"--name\",\n\t\t\t\tfmt.Sprintf(\"otheruser-node-%d\", index+1),\n\t\t\t\t\"--user\",\n\t\t\t\t\"other-user\",\n\t\t\t\t\"--key\",\n\t\t\t\tregID,\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t)\n\t\trequire.NoError(t, err)\n\n\t\tvar node v1.Node\n\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\terr = executeAndUnmarshal(\n\t\t\t\theadscale,\n\t\t\t\t[]string{\n\t\t\t\t\t\"headscale\",\n\t\t\t\t\t\"auth\",\n\t\t\t\t\t\"register\",\n\t\t\t\t\t\"--user\",\n\t\t\t\t\t\"other-user\",\n\t\t\t\t\t\"--auth-id\",\n\t\t\t\t\tregID,\n\t\t\t\t\t\"--output\",\n\t\t\t\t\t\"json\",\n\t\t\t\t},\n\t\t\t\t&node,\n\t\t\t)\n\t\t\tassert.NoError(c, err)\n\t\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for other-user node registration\")\n\n\t\totherUserMachines[index] = &node\n\t}\n\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tassert.Len(ct, otherUserMachines, len(otherUserRegIDs), \"Should have correct number of otherUser machines after CLI operations\")\n\t}, 15*time.Second, 1*time.Second)\n\n\t// Test list all nodes after added otherUser\n\tvar listAllWithotherUser []v1.Node\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\terr = executeAndUnmarshal(\n\t\t\theadscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"nodes\",\n\t\t\t\t\"list\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&listAllWithotherUser,\n\t\t)\n\t\tassert.NoError(c, err)\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for nodes list after adding other-user nodes\")\n\n\t// All nodes, nodes + otherUser\n\tassert.Len(t, listAllWithotherUser, 7)\n\n\tassert.Equal(t, uint64(6), listAllWithotherUser[5].GetId())\n\tassert.Equal(t, uint64(7), listAllWithotherUser[6].GetId())\n\n\tassert.Equal(t, \"otheruser-node-1\", listAllWithotherUser[5].GetName())\n\tassert.Equal(t, \"otheruser-node-2\", listAllWithotherUser[6].GetName())\n\n\t// Test list all nodes after added otherUser\n\tvar listOnlyotherUserMachineUser []v1.Node\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\terr = executeAndUnmarshal(\n\t\t\theadscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"nodes\",\n\t\t\t\t\"list\",\n\t\t\t\t\"--user\",\n\t\t\t\t\"other-user\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&listOnlyotherUserMachineUser,\n\t\t)\n\t\tassert.NoError(c, err)\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for nodes list filtered by other-user\")\n\n\tassert.Len(t, listOnlyotherUserMachineUser, 2)\n\n\tassert.Equal(t, uint64(6), listOnlyotherUserMachineUser[0].GetId())\n\tassert.Equal(t, uint64(7), listOnlyotherUserMachineUser[1].GetId())\n\n\tassert.Equal(\n\t\tt,\n\t\t\"otheruser-node-1\",\n\t\tlistOnlyotherUserMachineUser[0].GetName(),\n\t)\n\tassert.Equal(\n\t\tt,\n\t\t\"otheruser-node-2\",\n\t\tlistOnlyotherUserMachineUser[1].GetName(),\n\t)\n\n\t// Delete a nodes\n\t_, err = headscale.Execute(\n\t\t[]string{\n\t\t\t\"headscale\",\n\t\t\t\"nodes\",\n\t\t\t\"delete\",\n\t\t\t\"--identifier\",\n\t\t\t// Delete the last added machine\n\t\t\t\"4\",\n\t\t\t\"--output\",\n\t\t\t\"json\",\n\t\t\t\"--force\",\n\t\t},\n\t)\n\trequire.NoError(t, err)\n\n\t// Test: list main user after node is deleted\n\tvar listOnlyMachineUserAfterDelete []v1.Node\n\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\terr := executeAndUnmarshal(\n\t\t\theadscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"nodes\",\n\t\t\t\t\"list\",\n\t\t\t\t\"--user\",\n\t\t\t\t\"node-user\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&listOnlyMachineUserAfterDelete,\n\t\t)\n\t\tassert.NoError(ct, err)\n\t\tassert.Len(ct, listOnlyMachineUserAfterDelete, 4, \"Should have 4 nodes for node-user after deletion\")\n\t}, 20*time.Second, 1*time.Second)\n}\n\nfunc TestNodeExpireCommand(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tUsers: []string{\"node-expire-user\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName(\"cli-nodeexpire\"))\n\trequire.NoError(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\tregIDs := []string{\n\t\ttypes.MustAuthID().String(),\n\t\ttypes.MustAuthID().String(),\n\t\ttypes.MustAuthID().String(),\n\t\ttypes.MustAuthID().String(),\n\t\ttypes.MustAuthID().String(),\n\t}\n\tnodes := make([]*v1.Node, len(regIDs))\n\n\tfor index, regID := range regIDs {\n\t\t_, err := headscale.Execute(\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"debug\",\n\t\t\t\t\"create-node\",\n\t\t\t\t\"--name\",\n\t\t\t\tfmt.Sprintf(\"node-%d\", index+1),\n\t\t\t\t\"--user\",\n\t\t\t\t\"node-expire-user\",\n\t\t\t\t\"--key\",\n\t\t\t\tregID,\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t)\n\t\trequire.NoError(t, err)\n\n\t\tvar node v1.Node\n\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\terr = executeAndUnmarshal(\n\t\t\t\theadscale,\n\t\t\t\t[]string{\n\t\t\t\t\t\"headscale\",\n\t\t\t\t\t\"auth\",\n\t\t\t\t\t\"register\",\n\t\t\t\t\t\"--user\",\n\t\t\t\t\t\"node-expire-user\",\n\t\t\t\t\t\"--auth-id\",\n\t\t\t\t\tregID,\n\t\t\t\t\t\"--output\",\n\t\t\t\t\t\"json\",\n\t\t\t\t},\n\t\t\t\t&node,\n\t\t\t)\n\t\t\tassert.NoError(c, err)\n\t\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for node-expire-user node registration\")\n\n\t\tnodes[index] = &node\n\t}\n\n\tassert.Len(t, nodes, len(regIDs))\n\n\tvar listAll []v1.Node\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\terr = executeAndUnmarshal(\n\t\t\theadscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"nodes\",\n\t\t\t\t\"list\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&listAll,\n\t\t)\n\t\tassert.NoError(c, err)\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for nodes list in expire test\")\n\n\tassert.Len(t, listAll, 5)\n\n\tassert.True(t, listAll[0].GetExpiry().AsTime().IsZero())\n\tassert.True(t, listAll[1].GetExpiry().AsTime().IsZero())\n\tassert.True(t, listAll[2].GetExpiry().AsTime().IsZero())\n\tassert.True(t, listAll[3].GetExpiry().AsTime().IsZero())\n\tassert.True(t, listAll[4].GetExpiry().AsTime().IsZero())\n\n\tfor idx := range 3 {\n\t\t_, err := headscale.Execute(\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"nodes\",\n\t\t\t\t\"expire\",\n\t\t\t\t\"--identifier\",\n\t\t\t\tstrconv.FormatUint(listAll[idx].GetId(), 10),\n\t\t\t},\n\t\t)\n\t\trequire.NoError(t, err)\n\t}\n\n\tvar listAllAfterExpiry []v1.Node\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\terr = executeAndUnmarshal(\n\t\t\theadscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"nodes\",\n\t\t\t\t\"list\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&listAllAfterExpiry,\n\t\t)\n\t\tassert.NoError(c, err)\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for nodes list after expiry\")\n\n\tassert.Len(t, listAllAfterExpiry, 5)\n\n\tassert.True(t, listAllAfterExpiry[0].GetExpiry().AsTime().Before(time.Now()))\n\tassert.True(t, listAllAfterExpiry[1].GetExpiry().AsTime().Before(time.Now()))\n\tassert.True(t, listAllAfterExpiry[2].GetExpiry().AsTime().Before(time.Now()))\n\tassert.True(t, listAllAfterExpiry[3].GetExpiry().AsTime().IsZero())\n\tassert.True(t, listAllAfterExpiry[4].GetExpiry().AsTime().IsZero())\n}\n\nfunc TestNodeRenameCommand(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tUsers: []string{\"node-rename-command\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName(\"cli-noderename\"))\n\trequire.NoError(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\tregIDs := []string{\n\t\ttypes.MustAuthID().String(),\n\t\ttypes.MustAuthID().String(),\n\t\ttypes.MustAuthID().String(),\n\t\ttypes.MustAuthID().String(),\n\t\ttypes.MustAuthID().String(),\n\t}\n\tnodes := make([]*v1.Node, len(regIDs))\n\n\trequire.NoError(t, err)\n\n\tfor index, regID := range regIDs {\n\t\t_, err := headscale.Execute(\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"debug\",\n\t\t\t\t\"create-node\",\n\t\t\t\t\"--name\",\n\t\t\t\tfmt.Sprintf(\"node-%d\", index+1),\n\t\t\t\t\"--user\",\n\t\t\t\t\"node-rename-command\",\n\t\t\t\t\"--key\",\n\t\t\t\tregID,\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t)\n\t\trequire.NoError(t, err)\n\n\t\tvar node v1.Node\n\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\terr = executeAndUnmarshal(\n\t\t\t\theadscale,\n\t\t\t\t[]string{\n\t\t\t\t\t\"headscale\",\n\t\t\t\t\t\"auth\",\n\t\t\t\t\t\"register\",\n\t\t\t\t\t\"--user\",\n\t\t\t\t\t\"node-rename-command\",\n\t\t\t\t\t\"--auth-id\",\n\t\t\t\t\tregID,\n\t\t\t\t\t\"--output\",\n\t\t\t\t\t\"json\",\n\t\t\t\t},\n\t\t\t\t&node,\n\t\t\t)\n\t\t\tassert.NoError(c, err)\n\t\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for node-rename-command node registration\")\n\n\t\tnodes[index] = &node\n\t}\n\n\tassert.Len(t, nodes, len(regIDs))\n\n\tvar listAll []v1.Node\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\terr = executeAndUnmarshal(\n\t\t\theadscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"nodes\",\n\t\t\t\t\"list\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&listAll,\n\t\t)\n\t\tassert.NoError(c, err)\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for nodes list in rename test\")\n\n\tassert.Len(t, listAll, 5)\n\n\tassert.Contains(t, listAll[0].GetGivenName(), \"node-1\")\n\tassert.Contains(t, listAll[1].GetGivenName(), \"node-2\")\n\tassert.Contains(t, listAll[2].GetGivenName(), \"node-3\")\n\tassert.Contains(t, listAll[3].GetGivenName(), \"node-4\")\n\tassert.Contains(t, listAll[4].GetGivenName(), \"node-5\")\n\n\tfor idx := range 3 {\n\t\tres, err := headscale.Execute(\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"nodes\",\n\t\t\t\t\"rename\",\n\t\t\t\t\"--identifier\",\n\t\t\t\tstrconv.FormatUint(listAll[idx].GetId(), 10),\n\t\t\t\tfmt.Sprintf(\"newnode-%d\", idx+1),\n\t\t\t},\n\t\t)\n\t\trequire.NoError(t, err)\n\n\t\tassert.Contains(t, res, \"Node renamed\")\n\t}\n\n\tvar listAllAfterRename []v1.Node\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\terr = executeAndUnmarshal(\n\t\t\theadscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"nodes\",\n\t\t\t\t\"list\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&listAllAfterRename,\n\t\t)\n\t\tassert.NoError(c, err)\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for nodes list after rename\")\n\n\tassert.Len(t, listAllAfterRename, 5)\n\n\tassert.Equal(t, \"newnode-1\", listAllAfterRename[0].GetGivenName())\n\tassert.Equal(t, \"newnode-2\", listAllAfterRename[1].GetGivenName())\n\tassert.Equal(t, \"newnode-3\", listAllAfterRename[2].GetGivenName())\n\tassert.Contains(t, listAllAfterRename[3].GetGivenName(), \"node-4\")\n\tassert.Contains(t, listAllAfterRename[4].GetGivenName(), \"node-5\")\n\n\t// Test failure for too long names\n\t_, err = headscale.Execute(\n\t\t[]string{\n\t\t\t\"headscale\",\n\t\t\t\"nodes\",\n\t\t\t\"rename\",\n\t\t\t\"--identifier\",\n\t\t\tstrconv.FormatUint(listAll[4].GetId(), 10),\n\t\t\tstrings.Repeat(\"t\", 64),\n\t\t},\n\t)\n\trequire.ErrorContains(t, err, \"must not exceed 63 characters\")\n\n\tvar listAllAfterRenameAttempt []v1.Node\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\terr = executeAndUnmarshal(\n\t\t\theadscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"nodes\",\n\t\t\t\t\"list\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&listAllAfterRenameAttempt,\n\t\t)\n\t\tassert.NoError(c, err)\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for nodes list after failed rename attempt\")\n\n\tassert.Len(t, listAllAfterRenameAttempt, 5)\n\n\tassert.Equal(t, \"newnode-1\", listAllAfterRenameAttempt[0].GetGivenName())\n\tassert.Equal(t, \"newnode-2\", listAllAfterRenameAttempt[1].GetGivenName())\n\tassert.Equal(t, \"newnode-3\", listAllAfterRenameAttempt[2].GetGivenName())\n\tassert.Contains(t, listAllAfterRenameAttempt[3].GetGivenName(), \"node-4\")\n\tassert.Contains(t, listAllAfterRenameAttempt[4].GetGivenName(), \"node-5\")\n}\n\nfunc TestPolicyCommand(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tUsers: []string{\"user1\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithTestName(\"cli-policy\"),\n\t\thsic.WithConfigEnv(map[string]string{\n\t\t\t\"HEADSCALE_POLICY_MODE\": \"database\", // test sets/gets policy via CLI\n\t\t}),\n\t)\n\trequire.NoError(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\tp := policyv2.Policy{\n\t\tACLs: []policyv2.ACL{\n\t\t\t{\n\t\t\t\tAction:   \"accept\",\n\t\t\t\tProtocol: \"tcp\",\n\t\t\t\tSources:  []policyv2.Alias{wildcard()},\n\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\taliasWithPorts(wildcard(), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tTagOwners: policyv2.TagOwners{\n\t\t\tpolicyv2.Tag(\"tag:exists\"): policyv2.Owners{usernameOwner(\"user1@\")},\n\t\t},\n\t}\n\n\tpBytes, _ := json.Marshal(p) //nolint:errchkjson\n\n\tpolicyFilePath := \"/etc/headscale/policy.json\"\n\n\terr = headscale.WriteFile(policyFilePath, pBytes)\n\trequire.NoError(t, err)\n\n\t// No policy is present at this time.\n\t// Add a new policy from a file.\n\t_, err = headscale.Execute(\n\t\t[]string{\n\t\t\t\"headscale\",\n\t\t\t\"policy\",\n\t\t\t\"set\",\n\t\t\t\"-f\",\n\t\t\tpolicyFilePath,\n\t\t},\n\t)\n\n\trequire.NoError(t, err)\n\n\t// Get the current policy and check\n\t// if it is the same as the one we set.\n\tvar output *policyv2.Policy\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\terr = executeAndUnmarshal(\n\t\t\theadscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"policy\",\n\t\t\t\t\"get\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&output,\n\t\t)\n\t\tassert.NoError(c, err)\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for policy get command\")\n\n\tassert.Len(t, output.TagOwners, 1)\n\tassert.Len(t, output.ACLs, 1)\n}\n\nfunc TestPolicyBrokenConfigCommand(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 1,\n\t\tUsers:        []string{\"user1\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithTestName(\"cli-policybad\"),\n\t\thsic.WithConfigEnv(map[string]string{\n\t\t\t\"HEADSCALE_POLICY_MODE\": \"database\", // test sets invalid policy via CLI\n\t\t}),\n\t)\n\trequire.NoError(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\tp := policyv2.Policy{\n\t\tACLs: []policyv2.ACL{\n\t\t\t{\n\t\t\t\t// This is an unknown action, so it will return an error\n\t\t\t\t// and the config will not be applied.\n\t\t\t\tAction:   \"unknown-action\",\n\t\t\t\tProtocol: \"tcp\",\n\t\t\t\tSources:  []policyv2.Alias{wildcard()},\n\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\taliasWithPorts(wildcard(), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tTagOwners: policyv2.TagOwners{\n\t\t\tpolicyv2.Tag(\"tag:exists\"): policyv2.Owners{usernameOwner(\"user1@\")},\n\t\t},\n\t}\n\n\tpBytes, _ := json.Marshal(p) //nolint:errchkjson\n\n\tpolicyFilePath := \"/etc/headscale/policy.json\"\n\n\terr = headscale.WriteFile(policyFilePath, pBytes)\n\trequire.NoError(t, err)\n\n\t// No policy is present at this time.\n\t// Add a new policy from a file.\n\t_, err = headscale.Execute(\n\t\t[]string{\n\t\t\t\"headscale\",\n\t\t\t\"policy\",\n\t\t\t\"set\",\n\t\t\t\"-f\",\n\t\t\tpolicyFilePath,\n\t\t},\n\t)\n\trequire.ErrorContains(t, err, `invalid ACL action: \"unknown-action\"`)\n\n\t// The new policy was invalid, the old one should still be in place, which\n\t// is none.\n\t_, err = headscale.Execute(\n\t\t[]string{\n\t\t\t\"headscale\",\n\t\t\t\"policy\",\n\t\t\t\"get\",\n\t\t\t\"--output\",\n\t\t\t\"json\",\n\t\t},\n\t)\n\tassert.ErrorContains(t, err, \"acl policy not found\")\n}\n"
  },
  {
    "path": "integration/control.go",
    "content": "package integration\n\nimport (\n\t\"net/netip\"\n\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\t\"github.com/juanfont/headscale/hscontrol\"\n\tpolicyv2 \"github.com/juanfont/headscale/hscontrol/policy/v2\"\n\t\"github.com/juanfont/headscale/hscontrol/routes\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/integration/hsic\"\n\t\"github.com/ory/dockertest/v3\"\n\t\"tailscale.com/tailcfg\"\n)\n\ntype ControlServer interface {\n\tShutdown() (string, string, error)\n\tSaveLog(path string) (string, string, error)\n\tReadLog() (string, string, error)\n\tSaveProfile(path string) error\n\tExecute(command []string) (string, error)\n\tWriteFile(path string, content []byte) error\n\tConnectToNetwork(network *dockertest.Network) error\n\tGetHealthEndpoint() string\n\tGetEndpoint() string\n\tWaitForRunning() error\n\tCreateUser(user string) (*v1.User, error)\n\tCreateAuthKey(user uint64, reusable bool, ephemeral bool) (*v1.PreAuthKey, error)\n\tCreateAuthKeyWithTags(user uint64, reusable bool, ephemeral bool, tags []string) (*v1.PreAuthKey, error)\n\tCreateAuthKeyWithOptions(opts hsic.AuthKeyOptions) (*v1.PreAuthKey, error)\n\tDeleteAuthKey(id uint64) error\n\tListNodes(users ...string) ([]*v1.Node, error)\n\tDeleteNode(nodeID uint64) error\n\tNodesByUser() (map[string][]*v1.Node, error)\n\tNodesByName() (map[string]*v1.Node, error)\n\tListUsers() ([]*v1.User, error)\n\tMapUsers() (map[string]*v1.User, error)\n\tDeleteUser(userID uint64) error\n\tApproveRoutes(nodeID uint64, routes []netip.Prefix) (*v1.Node, error)\n\tSetNodeTags(nodeID uint64, tags []string) error\n\tGetCert() []byte\n\tGetHostname() string\n\tGetIPInNetwork(network *dockertest.Network) string\n\tSetPolicy(pol *policyv2.Policy) error\n\tGetAllMapReponses() (map[types.NodeID][]tailcfg.MapResponse, error)\n\tPrimaryRoutes() (*routes.DebugRoutes, error)\n\tDebugBatcher() (*hscontrol.DebugBatcherInfo, error)\n\tDebugNodeStore() (map[types.NodeID]types.Node, error)\n\tDebugFilter() ([]tailcfg.FilterRule, error)\n}\n"
  },
  {
    "path": "integration/derp_verify_endpoint_test.go",
    "content": "package integration\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"strconv\"\n\t\"testing\"\n\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/juanfont/headscale/integration/dsic\"\n\t\"github.com/juanfont/headscale/integration/hsic\"\n\t\"github.com/juanfont/headscale/integration/integrationutil\"\n\t\"github.com/juanfont/headscale/integration/tsic\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/derp\"\n\t\"tailscale.com/derp/derphttp\"\n\t\"tailscale.com/net/netmon\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/key\"\n)\n\nfunc TestDERPVerifyEndpoint(t *testing.T) {\n\tIntegrationSkip(t)\n\n\t// Generate random hostname for the headscale instance\n\thash, err := util.GenerateRandomStringDNSSafe(6)\n\trequire.NoError(t, err)\n\n\ttestName := \"derpverify\"\n\thostname := fmt.Sprintf(\"hs-%s-%s\", testName, hash)\n\n\theadscalePort := 8080\n\n\t// Create cert for headscale\n\tcaHeadscale, certHeadscale, keyHeadscale, err := integrationutil.CreateCertificate(hostname)\n\trequire.NoError(t, err)\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: len(MustTestVersions),\n\t\tUsers:        []string{\"user1\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\tderper, err := scenario.CreateDERPServer(\"head\",\n\t\tdsic.WithCACert(caHeadscale),\n\t\tdsic.WithVerifyClientURL(fmt.Sprintf(\"https://%s/verify\", net.JoinHostPort(hostname, strconv.Itoa(headscalePort)))),\n\t)\n\trequire.NoError(t, err)\n\n\tderpRegion := tailcfg.DERPRegion{\n\t\tRegionCode: \"test-derpverify\",\n\t\tRegionName: \"TestDerpVerify\",\n\t\tNodes: []*tailcfg.DERPNode{\n\t\t\t{\n\t\t\t\tName:             \"TestDerpVerify\",\n\t\t\t\tRegionID:         900,\n\t\t\t\tHostName:         derper.GetHostname(),\n\t\t\t\tSTUNPort:         derper.GetSTUNPort(),\n\t\t\t\tSTUNOnly:         false,\n\t\t\t\tDERPPort:         derper.GetDERPPort(),\n\t\t\t\tInsecureForTests: true,\n\t\t\t},\n\t\t},\n\t}\n\tderpMap := tailcfg.DERPMap{\n\t\tRegions: map[int]*tailcfg.DERPRegion{\n\t\t\t900: &derpRegion,\n\t\t},\n\t}\n\n\t// WithHostname is used instead of WithTestName because the hostname\n\t// must match the pre-generated TLS certificate created above.\n\t// The test name \"derpverify\" is embedded in the hostname variable.\n\t//\n\t// WithCACert passes the external DERP server's certificate so\n\t// tailscale clients trust it. WithCustomTLS and WithDERPConfig\n\t// configure headscale to use the external DERP server created\n\t// above instead of the default embedded one.\n\terr = scenario.CreateHeadscaleEnv([]tsic.Option{tsic.WithCACert(derper.GetCert())},\n\t\thsic.WithHostname(hostname),\n\t\thsic.WithPort(headscalePort),\n\t\thsic.WithCustomTLS(caHeadscale, certHeadscale, keyHeadscale),\n\t\thsic.WithDERPConfig(derpMap))\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\tfakeKey := key.NewNode()\n\tDERPVerify(t, fakeKey, derpRegion, false)\n\n\tfor _, client := range allClients {\n\t\tnodeKey, err := client.GetNodePrivateKey()\n\t\trequire.NoError(t, err)\n\t\tDERPVerify(t, *nodeKey, derpRegion, true)\n\t}\n}\n\nfunc DERPVerify(\n\tt *testing.T,\n\tnodeKey key.NodePrivate,\n\tregion tailcfg.DERPRegion,\n\texpectSuccess bool,\n) {\n\tt.Helper()\n\n\tc := derphttp.NewRegionClient(nodeKey, t.Logf, netmon.NewStatic(), func() *tailcfg.DERPRegion {\n\t\treturn &region\n\t})\n\tdefer c.Close()\n\n\tvar result error\n\n\terr := c.Connect(t.Context())\n\tif err != nil {\n\t\tresult = fmt.Errorf(\"client Connect: %w\", err)\n\t}\n\n\tif m, err := c.Recv(); err != nil { //nolint:noinlineerr\n\t\tresult = fmt.Errorf(\"client first Recv: %w\", err)\n\t} else if v, ok := m.(derp.ServerInfoMessage); !ok {\n\t\tresult = fmt.Errorf(\"client first Recv was unexpected type %T\", v) //nolint:err113\n\t}\n\n\tif expectSuccess && result != nil {\n\t\tt.Fatalf(\"DERP verify failed unexpectedly for client %s. Expected success but got error: %v\", nodeKey.Public(), result)\n\t} else if !expectSuccess && result == nil {\n\t\tt.Fatalf(\"DERP verify succeeded unexpectedly for client %s. Expected failure but it succeeded.\", nodeKey.Public())\n\t}\n}\n"
  },
  {
    "path": "integration/dns_test.go",
    "content": "package integration\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/integration/hsic\"\n\t\"github.com/juanfont/headscale/integration/tsic\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/tailcfg\"\n)\n\nfunc TestResolveMagicDNS(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: len(MustTestVersions),\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName(\"magicdns\"))\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t// assertClientsState(t, allClients)\n\n\t// Poor mans cache\n\t_, err = scenario.ListTailscaleClientsFQDNs()\n\trequireNoErrListFQDN(t, err)\n\n\t_, err = scenario.ListTailscaleClientsIPs()\n\trequireNoErrListClientIPs(t, err)\n\n\tfor _, client := range allClients {\n\t\tfor _, peer := range allClients {\n\t\t\t// It is safe to ignore this error as we handled it when caching it\n\t\t\tpeerFQDN, _ := peer.FQDN()\n\n\t\t\tassert.Equal(t, peer.Hostname()+\".headscale.net.\", peerFQDN)\n\n\t\t\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t\t\tcommand := []string{\n\t\t\t\t\t\"tailscale\",\n\t\t\t\t\t\"ip\", peerFQDN,\n\t\t\t\t}\n\t\t\t\tresult, _, err := client.Execute(command)\n\t\t\t\tassert.NoError(ct, err, \"Failed to execute resolve/ip command %s from %s\", peerFQDN, client.Hostname())\n\n\t\t\t\tips, err := peer.IPs()\n\t\t\t\tassert.NoError(ct, err, \"Failed to get IPs for %s\", peer.Hostname())\n\n\t\t\t\tfor _, ip := range ips {\n\t\t\t\t\tassert.Contains(ct, result, ip.String(), \"IP %s should be found in DNS resolution result from %s to %s\", ip.String(), client.Hostname(), peer.Hostname())\n\t\t\t\t}\n\t\t\t}, 30*time.Second, 2*time.Second)\n\t\t}\n\t}\n}\n\nfunc TestResolveMagicDNSExtraRecordsPath(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 1,\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\tconst erPath = \"/tmp/extra_records.json\"\n\n\textraRecords := make([]tailcfg.DNSRecord, 0, 2)\n\textraRecords = append(extraRecords, tailcfg.DNSRecord{\n\t\tName:  \"test.myvpn.example.com\",\n\t\tType:  \"A\",\n\t\tValue: \"6.6.6.6\",\n\t})\n\tb, _ := json.Marshal(extraRecords) //nolint:errchkjson\n\n\terr = scenario.CreateHeadscaleEnv([]tsic.Option{\n\t\ttsic.WithPackages(\"python3\", \"curl\", \"bind-tools\"),\n\t},\n\t\thsic.WithTestName(\"extrarecords\"),\n\t\thsic.WithConfigEnv(map[string]string{\n\t\t\t// Disable global nameservers to make the test run offline.\n\t\t\t\"HEADSCALE_DNS_NAMESERVERS_GLOBAL\": \"\",\n\t\t\t\"HEADSCALE_DNS_EXTRA_RECORDS_PATH\": erPath,\n\t\t}),\n\t\thsic.WithFileInContainer(erPath, b),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t// assertClientsState(t, allClients)\n\n\t// Poor mans cache\n\t_, err = scenario.ListTailscaleClientsFQDNs()\n\trequireNoErrListFQDN(t, err)\n\n\t_, err = scenario.ListTailscaleClientsIPs()\n\trequireNoErrListClientIPs(t, err)\n\n\tfor _, client := range allClients {\n\t\tassertCommandOutputContains(t, client, []string{\"dig\", \"test.myvpn.example.com\"}, \"6.6.6.6\")\n\t}\n\n\ths, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\t// Write the file directly into place from the docker API.\n\tb0, _ := json.Marshal([]tailcfg.DNSRecord{ //nolint:errchkjson\n\t\t{\n\t\t\tName:  \"docker.myvpn.example.com\",\n\t\t\tType:  \"A\",\n\t\t\tValue: \"2.2.2.2\",\n\t\t},\n\t})\n\n\terr = hs.WriteFile(erPath, b0)\n\trequire.NoError(t, err)\n\n\tfor _, client := range allClients {\n\t\tassertCommandOutputContains(t, client, []string{\"dig\", \"docker.myvpn.example.com\"}, \"2.2.2.2\")\n\t}\n\n\t// Write a new file and move it to the path to ensure the reload\n\t// works when a file is moved atomically into place.\n\textraRecords = append(extraRecords, tailcfg.DNSRecord{\n\t\tName:  \"otherrecord.myvpn.example.com\",\n\t\tType:  \"A\",\n\t\tValue: \"7.7.7.7\",\n\t})\n\tb2, _ := json.Marshal(extraRecords) //nolint:errchkjson\n\n\terr = hs.WriteFile(erPath+\"2\", b2)\n\trequire.NoError(t, err)\n\t_, err = hs.Execute([]string{\"mv\", erPath + \"2\", erPath})\n\trequire.NoError(t, err)\n\n\tfor _, client := range allClients {\n\t\tassertCommandOutputContains(t, client, []string{\"dig\", \"test.myvpn.example.com\"}, \"6.6.6.6\")\n\t\tassertCommandOutputContains(t, client, []string{\"dig\", \"otherrecord.myvpn.example.com\"}, \"7.7.7.7\")\n\t}\n\n\t// Write a new file and copy it to the path to ensure the reload\n\t// works when a file is copied into place.\n\tb3, _ := json.Marshal([]tailcfg.DNSRecord{ //nolint:errchkjson\n\t\t{\n\t\t\tName:  \"copy.myvpn.example.com\",\n\t\t\tType:  \"A\",\n\t\t\tValue: \"8.8.8.8\",\n\t\t},\n\t})\n\n\terr = hs.WriteFile(erPath+\"3\", b3)\n\trequire.NoError(t, err)\n\t_, err = hs.Execute([]string{\"cp\", erPath + \"3\", erPath})\n\trequire.NoError(t, err)\n\n\tfor _, client := range allClients {\n\t\tassertCommandOutputContains(t, client, []string{\"dig\", \"copy.myvpn.example.com\"}, \"8.8.8.8\")\n\t}\n\n\t// Write in place to ensure pipe like behaviour works\n\tb4, _ := json.Marshal([]tailcfg.DNSRecord{ //nolint:errchkjson\n\t\t{\n\t\t\tName:  \"docker.myvpn.example.com\",\n\t\t\tType:  \"A\",\n\t\t\tValue: \"9.9.9.9\",\n\t\t},\n\t})\n\tcommand := []string{\"echo\", fmt.Sprintf(\"'%s'\", string(b4)), \">\", erPath}\n\t_, err = hs.Execute([]string{\"bash\", \"-c\", strings.Join(command, \" \")})\n\trequire.NoError(t, err)\n\n\tfor _, client := range allClients {\n\t\tassertCommandOutputContains(t, client, []string{\"dig\", \"docker.myvpn.example.com\"}, \"9.9.9.9\")\n\t}\n\n\t// Delete the file and create a new one to ensure it is picked up again.\n\t_, err = hs.Execute([]string{\"rm\", erPath})\n\trequire.NoError(t, err)\n\n\t// The same paths should still be available as it is not cleared on delete.\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tfor _, client := range allClients {\n\t\t\tresult, _, err := client.Execute([]string{\"dig\", \"docker.myvpn.example.com\"})\n\t\t\tassert.NoError(ct, err)\n\t\t\tassert.Contains(ct, result, \"9.9.9.9\")\n\t\t}\n\t}, 10*time.Second, 1*time.Second)\n\n\t// Write a new file, the backoff mechanism should make the filewatcher pick it up\n\t// again.\n\terr = hs.WriteFile(erPath, b3)\n\trequire.NoError(t, err)\n\n\tfor _, client := range allClients {\n\t\tassertCommandOutputContains(t, client, []string{\"dig\", \"copy.myvpn.example.com\"}, \"8.8.8.8\")\n\t}\n}\n"
  },
  {
    "path": "integration/dockertestutil/build.go",
    "content": "package dockertestutil\n\nimport (\n\t\"context\"\n\t\"os/exec\"\n\t\"time\"\n)\n\n// RunDockerBuildForDiagnostics runs docker build manually to get detailed error output.\n// This is used when a docker build fails to provide more detailed diagnostic information\n// than what dockertest typically provides.\n//\n// Returns the build output regardless of success/failure, and an error if the build failed.\nfunc RunDockerBuildForDiagnostics(contextDir, dockerfile string) (string, error) {\n\t// Use a context with timeout to prevent hanging builds\n\tconst buildTimeout = 10 * time.Minute\n\n\tctx, cancel := context.WithTimeout(context.Background(), buildTimeout)\n\tdefer cancel()\n\n\tcmd := exec.CommandContext(ctx, \"docker\", \"build\", \"--progress=plain\", \"--no-cache\", \"-f\", dockerfile, contextDir)\n\toutput, err := cmd.CombinedOutput()\n\n\treturn string(output), err\n}\n"
  },
  {
    "path": "integration/dockertestutil/config.go",
    "content": "package dockertestutil\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/ory/dockertest/v3\"\n)\n\nconst (\n\t// TimestampFormatRunID is used for generating unique run identifiers\n\t// Format: \"20060102-150405\" provides compact date-time for file/directory names.\n\tTimestampFormatRunID = \"20060102-150405\"\n)\n\n// GetIntegrationRunID returns the run ID for the current integration test session.\n// This is set by the hi tool and passed through environment variables.\nfunc GetIntegrationRunID() string {\n\treturn os.Getenv(\"HEADSCALE_INTEGRATION_RUN_ID\")\n}\n\n// DockerAddIntegrationLabels adds integration test labels to Docker RunOptions.\n// This allows the hi tool to identify containers belonging to specific test runs.\n// This function should be called before passing RunOptions to dockertest functions.\nfunc DockerAddIntegrationLabels(opts *dockertest.RunOptions, testType string) {\n\trunID := GetIntegrationRunID()\n\tif runID == \"\" {\n\t\tpanic(\"HEADSCALE_INTEGRATION_RUN_ID environment variable is required\")\n\t}\n\n\tif opts.Labels == nil {\n\t\topts.Labels = make(map[string]string)\n\t}\n\n\topts.Labels[\"hi.run-id\"] = runID\n\topts.Labels[\"hi.test-type\"] = testType\n}\n\n// GenerateRunID creates a unique run identifier with timestamp and random hash.\n// Format: YYYYMMDD-HHMMSS-HASH (e.g., 20250619-143052-a1b2c3).\nfunc GenerateRunID() string {\n\tnow := time.Now()\n\ttimestamp := now.Format(TimestampFormatRunID)\n\n\t// Add a short random hash to ensure uniqueness\n\trandomHash := util.MustGenerateRandomStringDNSSafe(6)\n\n\treturn fmt.Sprintf(\"%s-%s\", timestamp, randomHash)\n}\n\n// ExtractRunIDFromContainerName extracts the run ID from container name.\n// Expects format: \"prefix-YYYYMMDD-HHMMSS-HASH\".\nfunc ExtractRunIDFromContainerName(containerName string) string {\n\tparts := strings.Split(containerName, \"-\")\n\tif len(parts) >= 3 {\n\t\t// Return the last three parts as the run ID (YYYYMMDD-HHMMSS-HASH)\n\t\treturn strings.Join(parts[len(parts)-3:], \"-\")\n\t}\n\n\tpanic(\"unexpected container name format: \" + containerName)\n}\n\n// IsRunningInContainer checks if the current process is running inside a Docker container.\n// This is used by tests to determine if they should run integration tests.\nfunc IsRunningInContainer() bool {\n\t// Check for the common indicator that we're in a container\n\t// This could be improved with more robust detection if needed\n\t_, err := os.Stat(\"/.dockerenv\")\n\treturn err == nil\n}\n"
  },
  {
    "path": "integration/dockertestutil/execute.go",
    "content": "package dockertestutil\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/ory/dockertest/v3\"\n)\n\nconst dockerExecuteTimeout = time.Second * 10\n\nvar (\n\tErrDockertestCommandFailed  = errors.New(\"dockertest command failed\")\n\tErrDockertestCommandTimeout = errors.New(\"dockertest command timed out\")\n)\n\ntype ExecuteCommandConfig struct {\n\ttimeout time.Duration\n}\n\ntype ExecuteCommandOption func(*ExecuteCommandConfig) error\n\nfunc ExecuteCommandTimeout(timeout time.Duration) ExecuteCommandOption {\n\treturn ExecuteCommandOption(func(conf *ExecuteCommandConfig) error {\n\t\tconf.timeout = timeout\n\t\treturn nil\n\t})\n}\n\n// buffer is a goroutine safe bytes.buffer.\ntype buffer struct {\n\tstore bytes.Buffer\n\tmutex sync.Mutex\n}\n\n// Write appends the contents of p to the buffer, growing the buffer as needed. It returns\n// the number of bytes written.\nfunc (b *buffer) Write(p []byte) (int, error) {\n\tb.mutex.Lock()\n\tdefer b.mutex.Unlock()\n\n\treturn b.store.Write(p)\n}\n\n// String returns the contents of the unread portion of the buffer\n// as a string.\nfunc (b *buffer) String() string {\n\tb.mutex.Lock()\n\tdefer b.mutex.Unlock()\n\n\treturn b.store.String()\n}\n\nfunc ExecuteCommand(\n\tresource *dockertest.Resource,\n\tcmd []string,\n\tenv []string,\n\toptions ...ExecuteCommandOption,\n) (string, string, error) {\n\tstdout := buffer{}\n\tstderr := buffer{}\n\n\texecConfig := ExecuteCommandConfig{\n\t\ttimeout: dockerExecuteTimeout,\n\t}\n\n\tfor _, opt := range options {\n\t\terr := opt(&execConfig)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", fmt.Errorf(\"execute-command/options: %w\", err)\n\t\t}\n\t}\n\n\ttype result struct {\n\t\texitCode int\n\t\terr      error\n\t}\n\n\tresultChan := make(chan result, 1)\n\n\t// Run your long running function in it's own goroutine and pass back it's\n\t// response into our channel.\n\tgo func() {\n\t\texitCode, err := resource.Exec(\n\t\t\tcmd,\n\t\t\tdockertest.ExecOptions{\n\t\t\t\tEnv:    append(env, \"HEADSCALE_LOG_LEVEL=info\"),\n\t\t\t\tStdOut: &stdout,\n\t\t\t\tStdErr: &stderr,\n\t\t\t},\n\t\t)\n\n\t\tresultChan <- result{exitCode, err}\n\t}()\n\n\t// Listen on our channel AND a timeout channel - which ever happens first.\n\tselect {\n\tcase res := <-resultChan:\n\t\tif res.err != nil {\n\t\t\treturn stdout.String(), stderr.String(), fmt.Errorf(\"command failed, stderr: %s: %w\", stderr.String(), res.err)\n\t\t}\n\n\t\tif res.exitCode != 0 {\n\t\t\t// Uncomment for debugging\n\t\t\t// log.Println(\"Command: \", cmd)\n\t\t\t// log.Println(\"stdout: \", stdout.String())\n\t\t\t// log.Println(\"stderr: \", stderr.String())\n\t\t\treturn stdout.String(), stderr.String(), fmt.Errorf(\"command failed, stderr: %s: %w\", stderr.String(), ErrDockertestCommandFailed)\n\t\t}\n\n\t\treturn stdout.String(), stderr.String(), nil\n\tcase <-time.After(execConfig.timeout):\n\t\treturn stdout.String(), stderr.String(), fmt.Errorf(\"command failed, stderr: %s: %w\", stderr.String(), ErrDockertestCommandTimeout)\n\t}\n}\n"
  },
  {
    "path": "integration/dockertestutil/logs.go",
    "content": "package dockertestutil\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\n\t\"github.com/ory/dockertest/v3\"\n\t\"github.com/ory/dockertest/v3/docker\"\n)\n\nconst filePerm = 0o644\n\nfunc WriteLog(\n\tpool *dockertest.Pool,\n\tresource *dockertest.Resource,\n\tstdout io.Writer,\n\tstderr io.Writer,\n) error {\n\treturn pool.Client.Logs(\n\t\tdocker.LogsOptions{\n\t\t\tContext:      context.TODO(),\n\t\t\tContainer:    resource.Container.ID,\n\t\t\tOutputStream: stdout,\n\t\t\tErrorStream:  stderr,\n\t\t\tTail:         \"all\",\n\t\t\tRawTerminal:  false,\n\t\t\tStdout:       true,\n\t\t\tStderr:       true,\n\t\t\tFollow:       false,\n\t\t\tTimestamps:   false,\n\t\t},\n\t)\n}\n\nfunc SaveLog(\n\tpool *dockertest.Pool,\n\tresource *dockertest.Resource,\n\tbasePath string,\n) (string, string, error) {\n\terr := os.MkdirAll(basePath, os.ModePerm)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tvar stdout, stderr bytes.Buffer\n\n\terr = WriteLog(pool, resource, &stdout, &stderr)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tlog.Printf(\"Saving logs for %s to %s\\n\", resource.Container.Name, basePath)\n\n\tstdoutPath := path.Join(basePath, resource.Container.Name+\".stdout.log\")\n\n\terr = os.WriteFile(\n\t\tstdoutPath,\n\t\tstdout.Bytes(),\n\t\tfilePerm,\n\t)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tstderrPath := path.Join(basePath, resource.Container.Name+\".stderr.log\")\n\n\terr = os.WriteFile(\n\t\tstderrPath,\n\t\tstderr.Bytes(),\n\t\tfilePerm,\n\t)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\treturn stdoutPath, stderrPath, nil\n}\n"
  },
  {
    "path": "integration/dockertestutil/network.go",
    "content": "package dockertestutil\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/ory/dockertest/v3\"\n\t\"github.com/ory/dockertest/v3/docker\"\n)\n\nvar ErrContainerNotFound = errors.New(\"container not found\")\n\nfunc GetFirstOrCreateNetwork(pool *dockertest.Pool, name string) (*dockertest.Network, error) {\n\tnetworks, err := pool.NetworksByName(name)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"looking up network names: %w\", err)\n\t}\n\n\tif len(networks) == 0 {\n\t\tif _, err := pool.CreateNetwork(name); err == nil { //nolint:noinlineerr // intentional inline check\n\t\t\t// Create does not give us an updated version of the resource, so we need to\n\t\t\t// get it again.\n\t\t\tnetworks, err := pool.NetworksByName(name)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\treturn &networks[0], nil\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"creating network: %w\", err)\n\t\t}\n\t}\n\n\treturn &networks[0], nil\n}\n\nfunc AddContainerToNetwork(\n\tpool *dockertest.Pool,\n\tnetwork *dockertest.Network,\n\ttestContainer string,\n) error {\n\tcontainers, err := pool.Client.ListContainers(docker.ListContainersOptions{\n\t\tAll: true,\n\t\tFilters: map[string][]string{\n\t\t\t\"name\": {testContainer},\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = pool.Client.ConnectNetwork(network.Network.ID, docker.NetworkConnectionOptions{\n\t\tContainer: containers[0].ID,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// TODO(kradalby): This doesn't work reliably, but calling the exact same functions\n\t// seem to work fine...\n\t// if container, ok := pool.ContainerByName(\"/\" + testContainer); ok {\n\t// \terr := container.ConnectToNetwork(network)\n\t// \tif err != nil {\n\t// \t\treturn err\n\t// \t}\n\t// }\n\n\treturn nil\n}\n\n// RandomFreeHostPort asks the kernel for a free open port that is ready to use.\n// (from https://github.com/phayes/freeport)\nfunc RandomFreeHostPort() (int, error) {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tlistener, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer listener.Close()\n\t//nolint:forcetypeassert\n\treturn listener.Addr().(*net.TCPAddr).Port, nil\n}\n\n// CleanUnreferencedNetworks removes networks that are not referenced by any containers.\nfunc CleanUnreferencedNetworks(pool *dockertest.Pool) error {\n\tfilter := \"name=hs-\"\n\n\tnetworks, err := pool.NetworksByName(filter)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting networks by filter %q: %w\", filter, err)\n\t}\n\n\tfor _, network := range networks {\n\t\tif len(network.Network.Containers) == 0 {\n\t\t\terr := pool.RemoveNetwork(&network)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"removing network %s: %s\", network.Network.Name, err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// CleanImagesInCI removes images if running in CI.\n// It only removes dangling (untagged) images to avoid forcing rebuilds.\n// Tagged images (golang:*, tailscale/tailscale:*, etc.) are automatically preserved.\nfunc CleanImagesInCI(pool *dockertest.Pool) error {\n\tif !util.IsCI() {\n\t\tlog.Println(\"Skipping image cleanup outside of CI\")\n\t\treturn nil\n\t}\n\n\timages, err := pool.Client.ListImages(docker.ListImagesOptions{})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting images: %w\", err)\n\t}\n\n\tremovedCount := 0\n\n\tfor _, image := range images {\n\t\t// Only remove dangling (untagged) images to avoid forcing rebuilds\n\t\t// Dangling images have no RepoTags or only have \"<none>:<none>\"\n\t\tif len(image.RepoTags) == 0 || (len(image.RepoTags) == 1 && image.RepoTags[0] == \"<none>:<none>\") {\n\t\t\tlog.Printf(\"Removing dangling image: %s\", image.ID[:12])\n\n\t\t\terr := pool.Client.RemoveImage(image.ID)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Warning: failed to remove image %s: %v\", image.ID[:12], err)\n\t\t\t} else {\n\t\t\t\tremovedCount++\n\t\t\t}\n\t\t}\n\t}\n\n\tif removedCount > 0 {\n\t\tlog.Printf(\"Removed %d dangling images in CI\", removedCount)\n\t} else {\n\t\tlog.Println(\"No dangling images to remove in CI\")\n\t}\n\n\treturn nil\n}\n\n// DockerRestartPolicy sets the restart policy for containers.\nfunc DockerRestartPolicy(config *docker.HostConfig) {\n\tconfig.RestartPolicy = docker.RestartPolicy{\n\t\tName: \"unless-stopped\",\n\t}\n}\n\n// DockerAllowLocalIPv6 allows IPv6 traffic within the container.\nfunc DockerAllowLocalIPv6(config *docker.HostConfig) {\n\tconfig.NetworkMode = \"default\"\n\tconfig.Sysctls = map[string]string{\n\t\t\"net.ipv6.conf.all.disable_ipv6\": \"0\",\n\t}\n}\n\n// DockerAllowNetworkAdministration gives the container network administration capabilities.\nfunc DockerAllowNetworkAdministration(config *docker.HostConfig) {\n\tconfig.CapAdd = append(config.CapAdd, \"NET_ADMIN\")\n\tconfig.Privileged = true\n}\n\n// DockerMemoryLimit sets memory limit and disables OOM kill for containers.\nfunc DockerMemoryLimit(config *docker.HostConfig) {\n\tconfig.Memory = 2 * 1024 * 1024 * 1024 // 2GB in bytes\n\tconfig.OOMKillDisable = true\n}\n"
  },
  {
    "path": "integration/dsic/dsic.go",
    "content": "package dsic\n\nimport (\n\t\"crypto/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/juanfont/headscale/integration/dockertestutil\"\n\t\"github.com/juanfont/headscale/integration/integrationutil\"\n\t\"github.com/ory/dockertest/v3\"\n\t\"github.com/ory/dockertest/v3/docker\"\n)\n\nconst (\n\tdsicHashLength       = 6\n\tdockerContextPath    = \"../.\"\n\tcaCertRoot           = \"/usr/local/share/ca-certificates\"\n\tDERPerCertRoot       = \"/usr/local/share/derper-certs\"\n\tdockerExecuteTimeout = 60 * time.Second\n)\n\nvar errDERPerStatusCodeNotOk = errors.New(\"DERPer status code not OK\")\n\n// DERPServerInContainer represents DERP Server in Container (DSIC).\ntype DERPServerInContainer struct {\n\tversion  string\n\thostname string\n\n\tpool      *dockertest.Pool\n\tcontainer *dockertest.Resource\n\tnetworks  []*dockertest.Network\n\n\tstunPort            int\n\tderpPort            int\n\tcaCerts             [][]byte\n\ttlsCACert           []byte\n\ttlsCert             []byte\n\ttlsKey              []byte\n\twithExtraHosts      []string\n\twithVerifyClientURL string\n\tworkdir             string\n}\n\n// Option represent optional settings that can be given to a\n// DERPer instance.\ntype Option = func(c *DERPServerInContainer)\n\n// WithCACert adds it to the trusted surtificate of the Tailscale container.\nfunc WithCACert(cert []byte) Option {\n\treturn func(dsic *DERPServerInContainer) {\n\t\tdsic.caCerts = append(dsic.caCerts, cert)\n\t}\n}\n\n// WithOrCreateNetwork sets the Docker container network to use with\n// the DERPer instance, if the parameter is nil, a new network,\n// isolating the DERPer, will be created. If a network is\n// passed, the DERPer instance will join the given network.\nfunc WithOrCreateNetwork(network *dockertest.Network) Option {\n\treturn func(dsic *DERPServerInContainer) {\n\t\tif network != nil {\n\t\t\tdsic.networks = append(dsic.networks, network)\n\n\t\t\treturn\n\t\t}\n\n\t\tnetwork, err := dockertestutil.GetFirstOrCreateNetwork(\n\t\t\tdsic.pool,\n\t\t\tdsic.hostname+\"-network\",\n\t\t)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"creating network: %s\", err)\n\t\t}\n\n\t\tdsic.networks = append(dsic.networks, network)\n\t}\n}\n\n// WithDockerWorkdir allows the docker working directory to be set.\nfunc WithDockerWorkdir(dir string) Option {\n\treturn func(tsic *DERPServerInContainer) {\n\t\ttsic.workdir = dir\n\t}\n}\n\n// WithVerifyClientURL sets the URL to verify the client.\nfunc WithVerifyClientURL(url string) Option {\n\treturn func(tsic *DERPServerInContainer) {\n\t\ttsic.withVerifyClientURL = url\n\t}\n}\n\n// WithExtraHosts adds extra hosts to the container.\nfunc WithExtraHosts(hosts []string) Option {\n\treturn func(tsic *DERPServerInContainer) {\n\t\ttsic.withExtraHosts = hosts\n\t}\n}\n\n// buildEntrypoint builds the container entrypoint command based on configuration.\n// It constructs proper wait conditions instead of fixed sleeps:\n// 1. Wait for network to be ready\n// 2. Wait for TLS cert to be written (always written after container start)\n// 3. Wait for CA certs if configured\n// 4. Update CA certificates\n// 5. Run derper with provided arguments.\nfunc (dsic *DERPServerInContainer) buildEntrypoint(derperArgs string) []string {\n\tvar commands []string\n\n\t// Wait for network to be ready\n\tcommands = append(commands, \"while ! ip route show default >/dev/null 2>&1; do sleep 0.1; done\")\n\n\t// Wait for TLS cert to be written (always written after container start)\n\tcommands = append(commands,\n\t\tfmt.Sprintf(\"while [ ! -f %s/%s.crt ]; do sleep 0.1; done\", DERPerCertRoot, dsic.hostname))\n\n\t// If CA certs are configured, wait for them to be written\n\tif len(dsic.caCerts) > 0 {\n\t\tcommands = append(commands,\n\t\t\tfmt.Sprintf(\"while [ ! -f %s/user-0.crt ]; do sleep 0.1; done\", caCertRoot))\n\t}\n\n\t// Update CA certificates\n\tcommands = append(commands, \"update-ca-certificates\")\n\n\t// Run derper\n\tcommands = append(commands, \"derper \"+derperArgs)\n\n\treturn []string{\"/bin/sh\", \"-c\", strings.Join(commands, \" ; \")}\n}\n\n// New returns a new TailscaleInContainer instance.\nfunc New(\n\tpool *dockertest.Pool,\n\tversion string,\n\tnetworks []*dockertest.Network,\n\topts ...Option,\n) (*DERPServerInContainer, error) {\n\thash, err := util.GenerateRandomStringDNSSafe(dsicHashLength)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Include run ID in hostname for easier identification of which test run owns this container\n\trunID := dockertestutil.GetIntegrationRunID()\n\n\tvar hostname string\n\n\tif runID != \"\" {\n\t\t// Use last 6 chars of run ID (the random hash part) for brevity\n\t\trunIDShort := runID[len(runID)-6:]\n\t\thostname = fmt.Sprintf(\"derp-%s-%s-%s\", runIDShort, strings.ReplaceAll(version, \".\", \"-\"), hash)\n\t} else {\n\t\thostname = fmt.Sprintf(\"derp-%s-%s\", strings.ReplaceAll(version, \".\", \"-\"), hash)\n\t}\n\n\ttlsCACert, tlsCert, tlsKey, err := integrationutil.CreateCertificate(hostname)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating certificates for derp test: %w\", err)\n\t}\n\n\tdsic := &DERPServerInContainer{\n\t\tversion:   version,\n\t\thostname:  hostname,\n\t\tpool:      pool,\n\t\tnetworks:  networks,\n\t\ttlsCACert: tlsCACert,\n\t\ttlsCert:   tlsCert,\n\t\ttlsKey:    tlsKey,\n\t\tstunPort:  3478, //nolint\n\t\tderpPort:  443,  //nolint\n\t}\n\n\t// Install the CA cert so the DERP server trusts its own certificate\n\t// and any headscale CA certs passed via WithCACert.\n\tdsic.caCerts = append(dsic.caCerts, tlsCACert)\n\n\tfor _, opt := range opts {\n\t\topt(dsic)\n\t}\n\n\tvar cmdArgs strings.Builder\n\tfmt.Fprintf(&cmdArgs, \"--hostname=%s\", hostname)\n\tfmt.Fprintf(&cmdArgs, \" --certmode=manual\")\n\tfmt.Fprintf(&cmdArgs, \" --certdir=%s\", DERPerCertRoot)\n\tfmt.Fprintf(&cmdArgs, \" --a=:%d\", dsic.derpPort)\n\tfmt.Fprintf(&cmdArgs, \" --stun=true\")\n\tfmt.Fprintf(&cmdArgs, \" --stun-port=%d\", dsic.stunPort)\n\n\tif dsic.withVerifyClientURL != \"\" {\n\t\tfmt.Fprintf(&cmdArgs, \" --verify-client-url=%s\", dsic.withVerifyClientURL)\n\t}\n\n\trunOptions := &dockertest.RunOptions{\n\t\tName:       hostname,\n\t\tNetworks:   dsic.networks,\n\t\tExtraHosts: dsic.withExtraHosts,\n\t\tEntrypoint: dsic.buildEntrypoint(cmdArgs.String()),\n\t\tExposedPorts: []string{\n\t\t\t\"80/tcp\",\n\t\t\tfmt.Sprintf(\"%d/tcp\", dsic.derpPort),\n\t\t\tfmt.Sprintf(\"%d/udp\", dsic.stunPort),\n\t\t},\n\t}\n\n\tif dsic.workdir != \"\" {\n\t\trunOptions.WorkingDir = dsic.workdir\n\t}\n\n\t// dockertest isn't very good at handling containers that has already\n\t// been created, this is an attempt to make sure this container isn't\n\t// present.\n\terr = pool.RemoveContainerByName(hostname)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar container *dockertest.Resource\n\n\tbuildOptions := &dockertest.BuildOptions{\n\t\tDockerfile: \"Dockerfile.derper\",\n\t\tContextDir: dockerContextPath,\n\t\tBuildArgs:  []docker.BuildArg{},\n\t}\n\n\tswitch version {\n\tcase \"head\":\n\t\tbuildOptions.BuildArgs = append(buildOptions.BuildArgs, docker.BuildArg{\n\t\t\tName:  \"VERSION_BRANCH\",\n\t\t\tValue: \"main\",\n\t\t})\n\tdefault:\n\t\tbuildOptions.BuildArgs = append(buildOptions.BuildArgs, docker.BuildArg{\n\t\t\tName:  \"VERSION_BRANCH\",\n\t\t\tValue: \"v\" + version,\n\t\t})\n\t}\n\t// Add integration test labels if running under hi tool\n\tdockertestutil.DockerAddIntegrationLabels(runOptions, \"derp\")\n\n\tcontainer, err = pool.BuildAndRunWithBuildOptions(\n\t\tbuildOptions,\n\t\trunOptions,\n\t\tdockertestutil.DockerRestartPolicy,\n\t\tdockertestutil.DockerAllowLocalIPv6,\n\t\tdockertestutil.DockerAllowNetworkAdministration,\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"%s starting tailscale DERPer container (version: %s): %w\",\n\t\t\thostname,\n\t\t\tversion,\n\t\t\terr,\n\t\t)\n\t}\n\n\tlog.Printf(\"Created %s container\\n\", hostname)\n\n\tdsic.container = container\n\n\tfor i, cert := range dsic.caCerts {\n\t\terr = dsic.WriteFile(fmt.Sprintf(\"%s/user-%d.crt\", caCertRoot, i), cert)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"writing TLS certificate to container: %w\", err)\n\t\t}\n\t}\n\n\tif len(dsic.tlsCert) != 0 {\n\t\terr = dsic.WriteFile(fmt.Sprintf(\"%s/%s.crt\", DERPerCertRoot, dsic.hostname), dsic.tlsCert)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"writing TLS certificate to container: %w\", err)\n\t\t}\n\t}\n\n\tif len(dsic.tlsKey) != 0 {\n\t\terr = dsic.WriteFile(fmt.Sprintf(\"%s/%s.key\", DERPerCertRoot, dsic.hostname), dsic.tlsKey)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"writing TLS key to container: %w\", err)\n\t\t}\n\t}\n\n\treturn dsic, nil\n}\n\n// Shutdown stops and cleans up the DERPer container.\nfunc (t *DERPServerInContainer) Shutdown() error {\n\terr := t.SaveLog(\"/tmp/control\")\n\tif err != nil {\n\t\tlog.Printf(\n\t\t\t\"saving log from %s: %s\",\n\t\t\tt.hostname,\n\t\t\tfmt.Errorf(\"saving log: %w\", err),\n\t\t)\n\t}\n\n\treturn t.pool.Purge(t.container)\n}\n\n// GetCert returns the CA certificate that clients should trust to\n// verify this DERP server's TLS certificate.\nfunc (t *DERPServerInContainer) GetCert() []byte {\n\treturn t.tlsCACert\n}\n\n// Hostname returns the hostname of the DERPer instance.\nfunc (t *DERPServerInContainer) Hostname() string {\n\treturn t.hostname\n}\n\n// Version returns the running DERPer version of the instance.\nfunc (t *DERPServerInContainer) Version() string {\n\treturn t.version\n}\n\n// ID returns the Docker container ID of the DERPServerInContainer\n// instance.\nfunc (t *DERPServerInContainer) ID() string {\n\treturn t.container.Container.ID\n}\n\nfunc (t *DERPServerInContainer) GetHostname() string {\n\treturn t.hostname\n}\n\n// GetSTUNPort returns the STUN port of the DERPer instance.\nfunc (t *DERPServerInContainer) GetSTUNPort() int {\n\treturn t.stunPort\n}\n\n// GetDERPPort returns the DERP port of the DERPer instance.\nfunc (t *DERPServerInContainer) GetDERPPort() int {\n\treturn t.derpPort\n}\n\n// WaitForRunning blocks until the DERPer instance is ready to be used.\nfunc (t *DERPServerInContainer) WaitForRunning() error {\n\turl := \"https://\" + net.JoinHostPort(t.GetHostname(), strconv.Itoa(t.GetDERPPort())) + \"/\"\n\tlog.Printf(\"waiting for DERPer to be ready at %s\", url)\n\n\tinsecureTransport := http.DefaultTransport.(*http.Transport).Clone()      //nolint\n\tinsecureTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} //nolint\n\tclient := &http.Client{Transport: insecureTransport}\n\n\treturn t.pool.Retry(func() error {\n\t\tresp, err := client.Get(url) //nolint\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"DERPer is not ready: %w\", err)\n\t\t}\n\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\treturn errDERPerStatusCodeNotOk\n\t\t}\n\n\t\treturn nil\n\t})\n}\n\n// ConnectToNetwork connects the DERPer instance to a network.\nfunc (t *DERPServerInContainer) ConnectToNetwork(network *dockertest.Network) error {\n\treturn t.container.ConnectToNetwork(network)\n}\n\n// WriteFile save file inside the container.\nfunc (t *DERPServerInContainer) WriteFile(path string, data []byte) error {\n\treturn integrationutil.WriteFileToContainer(t.pool, t.container, path, data)\n}\n\n// SaveLog saves the current stdout log of the container to a path\n// on the host system.\nfunc (t *DERPServerInContainer) SaveLog(path string) error {\n\t_, _, err := dockertestutil.SaveLog(t.pool, t.container, path)\n\n\treturn err\n}\n"
  },
  {
    "path": "integration/embedded_derp_test.go",
    "content": "package integration\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/integration/hsic\"\n\t\"github.com/juanfont/headscale/integration/tsic\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/key\"\n)\n\ntype ClientsSpec struct {\n\tPlain         int\n\tWebsocketDERP int\n}\n\nfunc TestDERPServerScenario(t *testing.T) {\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 1,\n\t\tUsers:        []string{\"user1\", \"user2\", \"user3\"},\n\t\tNetworks: map[string][]string{\n\t\t\t\"usernet1\": {\"user1\"},\n\t\t\t\"usernet2\": {\"user2\"},\n\t\t\t\"usernet3\": {\"user3\"},\n\t\t},\n\t}\n\n\tderpServerScenario(t, spec, \"derp-tcp\", false, func(scenario *Scenario) {\n\t\tallClients, err := scenario.ListTailscaleClients()\n\t\trequireNoErrListClients(t, err)\n\t\tt.Logf(\"checking %d clients for websocket connections\", len(allClients))\n\n\t\tfor _, client := range allClients {\n\t\t\tif didClientUseWebsocketForDERP(t, client) {\n\t\t\t\tt.Logf(\n\t\t\t\t\t\"client %q used websocket a connection, but was not expected to\",\n\t\t\t\t\tclient.Hostname(),\n\t\t\t\t)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t}\n\n\t\thsServer, err := scenario.Headscale()\n\t\trequireNoErrGetHeadscale(t, err)\n\n\t\tderpRegion := tailcfg.DERPRegion{\n\t\t\tRegionCode: \"test-derpverify\",\n\t\t\tRegionName: \"TestDerpVerify\",\n\t\t\tNodes: []*tailcfg.DERPNode{\n\t\t\t\t{\n\t\t\t\t\tName:             \"TestDerpVerify\",\n\t\t\t\t\tRegionID:         900,\n\t\t\t\t\tHostName:         hsServer.GetHostname(),\n\t\t\t\t\tSTUNPort:         3478,\n\t\t\t\t\tSTUNOnly:         false,\n\t\t\t\t\tDERPPort:         443,\n\t\t\t\t\tInsecureForTests: true,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tfakeKey := key.NewNode()\n\t\tDERPVerify(t, fakeKey, derpRegion, false)\n\t})\n}\n\nfunc TestDERPServerWebsocketScenario(t *testing.T) {\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 1,\n\t\tUsers:        []string{\"user1\", \"user2\", \"user3\"},\n\t\tNetworks: map[string][]string{\n\t\t\t\"usernet1\": {\"user1\"},\n\t\t\t\"usernet2\": {\"user2\"},\n\t\t\t\"usernet3\": {\"user3\"},\n\t\t},\n\t}\n\n\tderpServerScenario(t, spec, \"derp-ws\", true, func(scenario *Scenario) {\n\t\tallClients, err := scenario.ListTailscaleClients()\n\t\trequireNoErrListClients(t, err)\n\t\tt.Logf(\"checking %d clients for websocket connections\", len(allClients))\n\n\t\tfor _, client := range allClients {\n\t\t\tif !didClientUseWebsocketForDERP(t, client) {\n\t\t\t\tt.Logf(\n\t\t\t\t\t\"client %q does not seem to have used a websocket connection, even though it was expected to do so\",\n\t\t\t\t\tclient.Hostname(),\n\t\t\t\t)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t}\n\t})\n}\n\n// This function implements the common parts of a DERP scenario,\n// we *want* it to show up in stacktraces,\n// so marking it as a test helper would be counterproductive.\n//\n//nolint:thelper\nfunc derpServerScenario(\n\tt *testing.T,\n\tspec ScenarioSpec,\n\ttestName string,\n\twebsocket bool,\n\tfurtherAssertions ...func(*Scenario),\n) {\n\tIntegrationSkip(t)\n\n\tscenario, err := NewScenario(spec)\n\trequire.NoError(t, err)\n\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{\n\t\t\ttsic.WithWebsocketDERP(websocket),\n\t\t},\n\t\thsic.WithTestName(testName),\n\t\t// Expose STUN port for DERP NAT traversal.\n\t\thsic.WithExtraPorts([]string{\"3478/udp\"}),\n\t\t// DERP clients expect the server on the standard HTTPS port.\n\t\thsic.WithPort(443),\n\t\thsic.WithConfigEnv(map[string]string{\n\t\t\t\"HEADSCALE_DERP_AUTO_UPDATE_ENABLED\":   \"true\",\n\t\t\t\"HEADSCALE_DERP_UPDATE_FREQUENCY\":      \"10s\",\n\t\t\t\"HEADSCALE_LISTEN_ADDR\":                \"0.0.0.0:443\",\n\t\t\t\"HEADSCALE_DERP_SERVER_VERIFY_CLIENTS\": \"true\",\n\t\t}),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\tallHostnames, err := scenario.ListTailscaleClientsFQDNs()\n\trequireNoErrListFQDN(t, err)\n\n\tfor _, client := range allClients {\n\t\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t\tstatus, err := client.Status()\n\t\t\tassert.NoError(ct, err, \"Failed to get status for client %s\", client.Hostname())\n\n\t\t\tfor _, health := range status.Health {\n\t\t\t\tassert.NotContains(ct, health, \"could not connect to any relay server\",\n\t\t\t\t\t\"Client %s should be connected to DERP relay\", client.Hostname())\n\t\t\t\tassert.NotContains(ct, health, \"could not connect to the 'Headscale Embedded DERP' relay server.\",\n\t\t\t\t\t\"Client %s should be connected to Headscale Embedded DERP\", client.Hostname())\n\t\t\t}\n\t\t}, 30*time.Second, 2*time.Second)\n\t}\n\n\tsuccess := pingDerpAllHelper(t, allClients, allHostnames)\n\tif len(allHostnames)*len(allClients) > success {\n\t\tt.FailNow()\n\n\t\treturn\n\t}\n\n\tfor _, client := range allClients {\n\t\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t\tstatus, err := client.Status()\n\t\t\tassert.NoError(ct, err, \"Failed to get status for client %s\", client.Hostname())\n\n\t\t\tfor _, health := range status.Health {\n\t\t\t\tassert.NotContains(ct, health, \"could not connect to any relay server\",\n\t\t\t\t\t\"Client %s should be connected to DERP relay after first run\", client.Hostname())\n\t\t\t\tassert.NotContains(ct, health, \"could not connect to the 'Headscale Embedded DERP' relay server.\",\n\t\t\t\t\t\"Client %s should be connected to Headscale Embedded DERP after first run\", client.Hostname())\n\t\t\t}\n\t\t}, 30*time.Second, 2*time.Second)\n\t}\n\n\tt.Logf(\"Run 1: %d successful pings out of %d\", success, len(allClients)*len(allHostnames))\n\n\t// Let the DERP updater run a couple of times to ensure it does not\n\t// break the DERPMap. The updater runs on a 10s interval by default.\n\t//nolint:forbidigo // Intentional delay: must wait for DERP updater to run multiple times (interval-based)\n\ttime.Sleep(30 * time.Second)\n\n\tsuccess = pingDerpAllHelper(t, allClients, allHostnames)\n\tif len(allHostnames)*len(allClients) > success {\n\t\tt.Fail()\n\t}\n\n\tfor _, client := range allClients {\n\t\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t\tstatus, err := client.Status()\n\t\t\tassert.NoError(ct, err, \"Failed to get status for client %s\", client.Hostname())\n\n\t\t\tfor _, health := range status.Health {\n\t\t\t\tassert.NotContains(ct, health, \"could not connect to any relay server\",\n\t\t\t\t\t\"Client %s should be connected to DERP relay after second run\", client.Hostname())\n\t\t\t\tassert.NotContains(ct, health, \"could not connect to the 'Headscale Embedded DERP' relay server.\",\n\t\t\t\t\t\"Client %s should be connected to Headscale Embedded DERP after second run\", client.Hostname())\n\t\t\t}\n\t\t}, 30*time.Second, 2*time.Second)\n\t}\n\n\tt.Logf(\"Run2: %d successful pings out of %d\", success, len(allClients)*len(allHostnames))\n\n\tfor _, check := range furtherAssertions {\n\t\tcheck(scenario)\n\t}\n}\n"
  },
  {
    "path": "integration/general_test.go",
    "content": "package integration\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/netip\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/juanfont/headscale/integration/hsic\"\n\t\"github.com/juanfont/headscale/integration/integrationutil\"\n\t\"github.com/juanfont/headscale/integration/tsic\"\n\t\"github.com/rs/zerolog/log\"\n\t\"github.com/samber/lo\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"golang.org/x/sync/errgroup\"\n\t\"tailscale.com/client/tailscale/apitype\"\n\t\"tailscale.com/types/key\"\n)\n\nfunc TestPingAllByIP(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: len(MustTestVersions),\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t\tMaxWait:      dockertestMaxWait(),\n\t}\n\n\tscenario, err := NewScenario(spec)\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithTestName(\"pingallbyip\"),\n\t\t// All other tests use the default sequential allocation.\n\t\t// This test uses random allocation to ensure it does not\n\t\t// break basic connectivity.\n\t\thsic.WithIPAllocationStrategy(types.IPAllocationStrategyRandom),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\tallIps, err := scenario.ListTailscaleClientsIPs()\n\trequireNoErrListClientIPs(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\ths, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\t// Extract node IDs for validation\n\texpectedNodes := make([]types.NodeID, 0, len(allClients))\n\tfor _, client := range allClients {\n\t\tstatus := client.MustStatus()\n\t\tnodeID, err := strconv.ParseUint(string(status.Self.ID), 10, 64)\n\t\trequire.NoError(t, err, \"failed to parse node ID\")\n\t\texpectedNodes = append(expectedNodes, types.NodeID(nodeID))\n\t}\n\trequireAllClientsOnline(t, hs, expectedNodes, true, \"all clients should be online across all systems\", 30*time.Second)\n\n\t// assertClientsState(t, allClients)\n\n\tallAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {\n\t\treturn x.String()\n\t})\n\n\t// Get headscale instance for batcher debug check\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\t// Test our DebugBatcher functionality\n\tt.Logf(\"Testing DebugBatcher functionality...\")\n\trequireAllClientsOnline(t, headscale, expectedNodes, true, \"all clients should be connected to the batcher\", 30*time.Second)\n\n\tsuccess := pingAllHelper(t, allClients, allAddrs)\n\tt.Logf(\"%d successful pings out of %d\", success, len(allClients)*len(allIps))\n}\n\nfunc TestPingAllByIPPublicDERP(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: len(MustTestVersions),\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithTestName(\"pingallbyippubderp\"),\n\t\t// Explicitly use public DERP relays instead of the embedded\n\t\t// DERP server to verify connectivity through Tailscale's\n\t\t// infrastructure. TLS is disabled because the headscale\n\t\t// server does not need to terminate TLS for this test.\n\t\thsic.WithPublicDERP(),\n\t\thsic.WithoutTLS(),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\tallIps, err := scenario.ListTailscaleClientsIPs()\n\trequireNoErrListClientIPs(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t// assertClientsState(t, allClients)\n\n\tallAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {\n\t\treturn x.String()\n\t})\n\n\tsuccess := pingAllHelper(t, allClients, allAddrs)\n\tt.Logf(\"%d successful pings out of %d\", success, len(allClients)*len(allIps))\n}\n\nfunc TestEphemeral(t *testing.T) {\n\ttestEphemeralWithOptions(t, hsic.WithTestName(\"ephemeral\"))\n}\n\n// TestEphemeralInAlternateTimezone verifies that ephemeral node\n// expiry works correctly when the server runs in a non-UTC timezone.\nfunc TestEphemeralInAlternateTimezone(t *testing.T) {\n\ttestEphemeralWithOptions(\n\t\tt,\n\t\thsic.WithTestName(\"ephemeral-tz\"),\n\t\thsic.WithTimezone(\"America/Los_Angeles\"),\n\t)\n}\n\nfunc testEphemeralWithOptions(t *testing.T, opts ...hsic.Option) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: len(MustTestVersions),\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\theadscale, err := scenario.Headscale(opts...)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tfor _, userName := range spec.Users {\n\t\tuser, err := scenario.CreateUser(userName)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to create user %s: %s\", userName, err)\n\t\t}\n\n\t\terr = scenario.CreateTailscaleNodesInUser(userName, \"all\", spec.NodesPerUser, tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]))\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to create tailscale nodes in user %s: %s\", userName, err)\n\t\t}\n\n\t\tkey, err := scenario.CreatePreAuthKey(user.GetId(), true, true)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to create pre-auth key for user %s: %s\", userName, err)\n\t\t}\n\n\t\terr = scenario.RunTailscaleUp(userName, headscale.GetEndpoint(), key.GetKey())\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to run tailscale up for user %s: %s\", userName, err)\n\t\t}\n\t}\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\tallIps, err := scenario.ListTailscaleClientsIPs()\n\trequireNoErrListClientIPs(t, err)\n\n\tallAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {\n\t\treturn x.String()\n\t})\n\n\tsuccess := pingAllHelper(t, allClients, allAddrs)\n\tt.Logf(\"%d successful pings out of %d\", success, len(allClients)*len(allIps))\n\n\tfor _, client := range allClients {\n\t\terr := client.Logout()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to logout client %s: %s\", client.Hostname(), err)\n\t\t}\n\t}\n\n\terr = scenario.WaitForTailscaleLogout()\n\trequireNoErrLogout(t, err)\n\n\tt.Logf(\"all clients logged out\")\n\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(ct, err)\n\t\tassert.Len(ct, nodes, 0, \"All ephemeral nodes should be cleaned up after logout\")\n\t}, 30*time.Second, 2*time.Second)\n}\n\n// TestEphemeral2006DeletedTooQuickly verifies that ephemeral nodes are not\n// deleted by accident if they are still online and active.\nfunc TestEphemeral2006DeletedTooQuickly(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: len(MustTestVersions),\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\theadscale, err := scenario.Headscale(\n\t\thsic.WithTestName(\"ephemeral2006\"),\n\t\thsic.WithConfigEnv(map[string]string{\n\t\t\t\"HEADSCALE_EPHEMERAL_NODE_INACTIVITY_TIMEOUT\": \"1m6s\",\n\t\t}),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tfor _, userName := range spec.Users {\n\t\tuser, err := scenario.CreateUser(userName)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to create user %s: %s\", userName, err)\n\t\t}\n\n\t\terr = scenario.CreateTailscaleNodesInUser(userName, \"all\", spec.NodesPerUser, tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]))\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to create tailscale nodes in user %s: %s\", userName, err)\n\t\t}\n\n\t\tkey, err := scenario.CreatePreAuthKey(user.GetId(), true, true)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to create pre-auth key for user %s: %s\", userName, err)\n\t\t}\n\n\t\terr = scenario.RunTailscaleUp(userName, headscale.GetEndpoint(), key.GetKey())\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to run tailscale up for user %s: %s\", userName, err)\n\t\t}\n\t}\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\tallIps, err := scenario.ListTailscaleClientsIPs()\n\trequireNoErrListClientIPs(t, err)\n\n\tallAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {\n\t\treturn x.String()\n\t})\n\n\t// All ephemeral nodes should be online and reachable.\n\tsuccess := pingAllHelper(t, allClients, allAddrs)\n\tt.Logf(\"%d successful pings out of %d\", success, len(allClients)*len(allIps))\n\n\t// Take down all clients, this should start an expiry timer for each.\n\tfor _, client := range allClients {\n\t\terr := client.Down()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to take down client %s: %s\", client.Hostname(), err)\n\t\t}\n\t}\n\n\t// Wait a bit and bring up the clients again before the expiry\n\t// time of the ephemeral nodes.\n\t// Nodes should be able to reconnect and work fine.\n\tfor _, client := range allClients {\n\t\terr := client.Up()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to take down client %s: %s\", client.Hostname(), err)\n\t\t}\n\t}\n\n\t// Wait for clients to sync and be able to ping each other after reconnection\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\terr = scenario.WaitForTailscaleSync()\n\t\tassert.NoError(ct, err)\n\n\t\tsuccess = pingAllHelper(t, allClients, allAddrs)\n\t\tassert.Greater(ct, success, 0, \"Ephemeral nodes should be able to reconnect and ping\")\n\t}, 60*time.Second, 2*time.Second)\n\tt.Logf(\"%d successful pings out of %d\", success, len(allClients)*len(allIps))\n\n\t// Take down all clients, this should start an expiry timer for each.\n\tfor _, client := range allClients {\n\t\terr := client.Down()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to take down client %s: %s\", client.Hostname(), err)\n\t\t}\n\t}\n\n\t// This time wait for all of the nodes to expire and check that they are no longer\n\t// registered.\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tfor _, userName := range spec.Users {\n\t\t\tnodes, err := headscale.ListNodes(userName)\n\t\t\tassert.NoError(ct, err)\n\t\t\tassert.Len(ct, nodes, 0, \"Ephemeral nodes should be expired and removed for user %s\", userName)\n\t\t}\n\t}, 4*time.Minute, 10*time.Second)\n\n\tfor _, userName := range spec.Users {\n\t\tnodes, err := headscale.ListNodes(userName)\n\t\tif err != nil {\n\t\t\tlog.Error().\n\t\t\t\tErr(err).\n\t\t\t\tStr(\"user\", userName).\n\t\t\t\tMsg(\"Error listing nodes in user\")\n\n\t\t\treturn\n\t\t}\n\n\t\tif len(nodes) != 0 {\n\t\t\tt.Fatalf(\"expected no nodes, got %d in user %s\", len(nodes), userName)\n\t\t}\n\t}\n}\n\nfunc TestPingAllByHostname(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: len(MustTestVersions),\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName(\"pingallbyname\"))\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t// assertClientsState(t, allClients)\n\n\tallHostnames, err := scenario.ListTailscaleClientsFQDNs()\n\trequireNoErrListFQDN(t, err)\n\n\tsuccess := pingAllHelper(t, allClients, allHostnames)\n\n\tt.Logf(\"%d successful pings out of %d\", success, len(allClients)*len(allClients))\n}\n\n// If subtests are parallel, then they will start before setup is run.\n// This might mean we approach setup slightly wrong, but for now, ignore\n// the linter\n// nolint:tparallel\n// TestTaildrop tests the Taildrop file sharing functionality across multiple scenarios:\n// 1. Same-user transfers: Nodes owned by the same user can send files to each other\n// 2. Cross-user transfers: Nodes owned by different users cannot send files to each other\n// 3. Tagged device transfers: Tagged devices cannot send nor receive files\n//\n// Each user gets len(MustTestVersions) nodes to ensure compatibility across all supported versions.\nfunc TestTaildrop(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0, // We'll create nodes manually to control tags\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv([]tsic.Option{},\n\t\thsic.WithTestName(\"taildrop\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\tuserMap, err := headscale.MapUsers()\n\trequire.NoError(t, err)\n\n\tnetworks := scenario.Networks()\n\trequire.NotEmpty(t, networks, \"scenario should have at least one network\")\n\tnetwork := networks[0]\n\n\t// Create untagged nodes for user1 using all test versions\n\tuser1Key, err := scenario.CreatePreAuthKey(userMap[\"user1\"].GetId(), true, false)\n\trequire.NoError(t, err)\n\n\tvar user1Clients []TailscaleClient\n\tfor i, version := range MustTestVersions {\n\t\tt.Logf(\"Creating user1 client %d with version %s\", i, version)\n\t\tclient, err := scenario.CreateTailscaleNode(\n\t\t\tversion,\n\t\t\ttsic.WithNetwork(network),\n\t\t)\n\t\trequire.NoError(t, err)\n\n\t\terr = client.Login(headscale.GetEndpoint(), user1Key.GetKey())\n\t\trequire.NoError(t, err)\n\n\t\terr = client.WaitForRunning(integrationutil.PeerSyncTimeout())\n\t\trequire.NoError(t, err)\n\n\t\tuser1Clients = append(user1Clients, client)\n\t\tscenario.GetOrCreateUser(\"user1\").Clients[client.Hostname()] = client\n\t}\n\n\t// Create untagged nodes for user2 using all test versions\n\tuser2Key, err := scenario.CreatePreAuthKey(userMap[\"user2\"].GetId(), true, false)\n\trequire.NoError(t, err)\n\n\tvar user2Clients []TailscaleClient\n\tfor i, version := range MustTestVersions {\n\t\tt.Logf(\"Creating user2 client %d with version %s\", i, version)\n\t\tclient, err := scenario.CreateTailscaleNode(\n\t\t\tversion,\n\t\t\ttsic.WithNetwork(network),\n\t\t)\n\t\trequire.NoError(t, err)\n\n\t\terr = client.Login(headscale.GetEndpoint(), user2Key.GetKey())\n\t\trequire.NoError(t, err)\n\n\t\terr = client.WaitForRunning(integrationutil.PeerSyncTimeout())\n\t\trequire.NoError(t, err)\n\n\t\tuser2Clients = append(user2Clients, client)\n\t\tscenario.GetOrCreateUser(\"user2\").Clients[client.Hostname()] = client\n\t}\n\n\t// Create a tagged device (tags-as-identity: tags come from PreAuthKey)\n\t// Use \"head\" version to test latest behavior\n\ttaggedKey, err := scenario.CreatePreAuthKeyWithTags(userMap[\"user1\"].GetId(), true, false, []string{\"tag:server\"})\n\trequire.NoError(t, err)\n\n\ttaggedClient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(network),\n\t)\n\trequire.NoError(t, err)\n\n\terr = taggedClient.Login(headscale.GetEndpoint(), taggedKey.GetKey())\n\trequire.NoError(t, err)\n\n\terr = taggedClient.WaitForRunning(integrationutil.PeerSyncTimeout())\n\trequire.NoError(t, err)\n\n\t// Add tagged client to user1 for tracking (though it's tagged, not user-owned)\n\tscenario.GetOrCreateUser(\"user1\").Clients[taggedClient.Hostname()] = taggedClient\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\t// Expected: len(MustTestVersions) for user1 + len(MustTestVersions) for user2 + 1 tagged\n\texpectedClientCount := len(MustTestVersions)*2 + 1\n\trequire.Len(t, allClients, expectedClientCount,\n\t\t\"should have %d clients: %d user1 + %d user2 + 1 tagged\",\n\t\texpectedClientCount, len(MustTestVersions), len(MustTestVersions))\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t// Cache FQDNs\n\t_, err = scenario.ListTailscaleClientsFQDNs()\n\trequireNoErrListFQDN(t, err)\n\n\t// Install curl on all clients\n\tfor _, client := range allClients {\n\t\tif !strings.Contains(client.Hostname(), \"head\") {\n\t\t\tcommand := []string{\"apk\", \"add\", \"curl\"}\n\t\t\t_, _, err := client.Execute(command)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"failed to install curl on %s, err: %s\", client.Hostname(), err)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Helper to get FileTargets for a client.\n\tgetFileTargets := func(client TailscaleClient) ([]apitype.FileTarget, error) {\n\t\tcurlCommand := []string{\n\t\t\t\"curl\",\n\t\t\t\"--unix-socket\",\n\t\t\t\"/var/run/tailscale/tailscaled.sock\",\n\t\t\t\"http://local-tailscaled.sock/localapi/v0/file-targets\",\n\t\t}\n\t\tresult, _, err := client.Execute(curlCommand)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvar fts []apitype.FileTarget\n\t\tif err := json.Unmarshal([]byte(result), &fts); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse file-targets response: %w (response: %s)\", err, result)\n\t\t}\n\n\t\treturn fts, nil\n\t}\n\n\t// Helper to check if a client is in the FileTargets list\n\tisInFileTargets := func(fts []apitype.FileTarget, targetHostname string) bool {\n\t\tfor _, ft := range fts {\n\t\t\tif strings.Contains(ft.Node.Name, targetHostname) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\t// Test 1: Verify user1 nodes can see each other in FileTargets but not user2 nodes or tagged node\n\tt.Run(\"FileTargets-user1\", func(t *testing.T) {\n\t\tfor _, client := range user1Clients {\n\t\t\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t\t\tfts, err := getFileTargets(client)\n\t\t\t\tassert.NoError(ct, err)\n\n\t\t\t\t// Should see the other user1 clients\n\t\t\t\tfor _, peer := range user1Clients {\n\t\t\t\t\tif peer.Hostname() == client.Hostname() {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tassert.True(ct, isInFileTargets(fts, peer.Hostname()),\n\t\t\t\t\t\t\"user1 client %s should see user1 peer %s in FileTargets\", client.Hostname(), peer.Hostname())\n\t\t\t\t}\n\n\t\t\t\t// Should NOT see user2 clients\n\t\t\t\tfor _, peer := range user2Clients {\n\t\t\t\t\tassert.False(ct, isInFileTargets(fts, peer.Hostname()),\n\t\t\t\t\t\t\"user1 client %s should NOT see user2 peer %s in FileTargets\", client.Hostname(), peer.Hostname())\n\t\t\t\t}\n\n\t\t\t\t// Should NOT see tagged client\n\t\t\t\tassert.False(ct, isInFileTargets(fts, taggedClient.Hostname()),\n\t\t\t\t\t\"user1 client %s should NOT see tagged client %s in FileTargets\", client.Hostname(), taggedClient.Hostname())\n\t\t\t}, 10*time.Second, 1*time.Second)\n\t\t}\n\t})\n\n\t// Test 2: Verify user2 nodes can see each other in FileTargets but not user1 nodes or tagged node\n\tt.Run(\"FileTargets-user2\", func(t *testing.T) {\n\t\tfor _, client := range user2Clients {\n\t\t\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t\t\tfts, err := getFileTargets(client)\n\t\t\t\tassert.NoError(ct, err)\n\n\t\t\t\t// Should see the other user2 clients\n\t\t\t\tfor _, peer := range user2Clients {\n\t\t\t\t\tif peer.Hostname() == client.Hostname() {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tassert.True(ct, isInFileTargets(fts, peer.Hostname()),\n\t\t\t\t\t\t\"user2 client %s should see user2 peer %s in FileTargets\", client.Hostname(), peer.Hostname())\n\t\t\t\t}\n\n\t\t\t\t// Should NOT see user1 clients\n\t\t\t\tfor _, peer := range user1Clients {\n\t\t\t\t\tassert.False(ct, isInFileTargets(fts, peer.Hostname()),\n\t\t\t\t\t\t\"user2 client %s should NOT see user1 peer %s in FileTargets\", client.Hostname(), peer.Hostname())\n\t\t\t\t}\n\n\t\t\t\t// Should NOT see tagged client\n\t\t\t\tassert.False(ct, isInFileTargets(fts, taggedClient.Hostname()),\n\t\t\t\t\t\"user2 client %s should NOT see tagged client %s in FileTargets\", client.Hostname(), taggedClient.Hostname())\n\t\t\t}, 10*time.Second, 1*time.Second)\n\t\t}\n\t})\n\n\t// Test 3: Verify tagged device has no FileTargets (empty list)\n\tt.Run(\"FileTargets-tagged\", func(t *testing.T) {\n\t\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t\tfts, err := getFileTargets(taggedClient)\n\t\t\tassert.NoError(ct, err)\n\t\t\tassert.Empty(ct, fts, \"tagged client %s should have no FileTargets\", taggedClient.Hostname())\n\t\t}, 10*time.Second, 1*time.Second)\n\t})\n\n\t// Test 4: Same-user file transfer works (user1 -> user1) for all version combinations\n\tt.Run(\"SameUserTransfer\", func(t *testing.T) {\n\t\tfor _, sender := range user1Clients {\n\t\t\t// Create file on sender\n\t\t\tfilename := fmt.Sprintf(\"file_from_%s\", sender.Hostname())\n\t\t\tcommand := []string{\"touch\", fmt.Sprintf(\"/tmp/%s\", filename)}\n\t\t\t_, _, err := sender.Execute(command)\n\t\t\trequire.NoError(t, err, \"failed to create taildrop file on %s\", sender.Hostname())\n\n\t\t\tfor _, receiver := range user1Clients {\n\t\t\t\tif sender.Hostname() == receiver.Hostname() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\treceiverFQDN, _ := receiver.FQDN()\n\n\t\t\t\tt.Run(fmt.Sprintf(\"%s->%s\", sender.Hostname(), receiver.Hostname()), func(t *testing.T) {\n\t\t\t\t\tsendCommand := []string{\n\t\t\t\t\t\t\"tailscale\", \"file\", \"cp\",\n\t\t\t\t\t\tfmt.Sprintf(\"/tmp/%s\", filename),\n\t\t\t\t\t\tfmt.Sprintf(\"%s:\", receiverFQDN),\n\t\t\t\t\t}\n\n\t\t\t\t\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t\t\t\t\tt.Logf(\"Sending file from %s to %s\", sender.Hostname(), receiver.Hostname())\n\t\t\t\t\t\t_, _, err := sender.Execute(sendCommand)\n\t\t\t\t\t\tassert.NoError(ct, err)\n\t\t\t\t\t}, 10*time.Second, 1*time.Second)\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\n\t\t// Receive files on all user1 clients\n\t\tfor _, client := range user1Clients {\n\t\t\tgetCommand := []string{\"tailscale\", \"file\", \"get\", \"/tmp/\"}\n\t\t\t_, _, err := client.Execute(getCommand)\n\t\t\trequire.NoError(t, err, \"failed to get taildrop file on %s\", client.Hostname())\n\n\t\t\t// Verify files from all other user1 clients exist\n\t\t\tfor _, peer := range user1Clients {\n\t\t\t\tif client.Hostname() == peer.Hostname() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tt.Run(fmt.Sprintf(\"verify-%s-received-from-%s\", client.Hostname(), peer.Hostname()), func(t *testing.T) {\n\t\t\t\t\tlsCommand := []string{\"ls\", fmt.Sprintf(\"/tmp/file_from_%s\", peer.Hostname())}\n\t\t\t\t\tresult, _, err := client.Execute(lsCommand)\n\t\t\t\t\trequire.NoErrorf(t, err, \"failed to ls taildrop file from %s\", peer.Hostname())\n\t\t\t\t\tassert.Equal(t, fmt.Sprintf(\"/tmp/file_from_%s\\n\", peer.Hostname()), result)\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t})\n\n\t// Test 5: Cross-user file transfer fails (user1 -> user2)\n\tt.Run(\"CrossUserTransferBlocked\", func(t *testing.T) {\n\t\tsender := user1Clients[0]\n\t\treceiver := user2Clients[0]\n\n\t\t// Create file on sender\n\t\tfilename := fmt.Sprintf(\"cross_user_file_from_%s\", sender.Hostname())\n\t\tcommand := []string{\"touch\", fmt.Sprintf(\"/tmp/%s\", filename)}\n\t\t_, _, err := sender.Execute(command)\n\t\trequire.NoError(t, err, \"failed to create taildrop file on %s\", sender.Hostname())\n\n\t\t// Attempt to send file - this should fail\n\t\treceiverFQDN, _ := receiver.FQDN()\n\t\tsendCommand := []string{\n\t\t\t\"tailscale\", \"file\", \"cp\",\n\t\t\tfmt.Sprintf(\"/tmp/%s\", filename),\n\t\t\tfmt.Sprintf(\"%s:\", receiverFQDN),\n\t\t}\n\n\t\tt.Logf(\"Attempting cross-user file send from %s to %s (should fail)\", sender.Hostname(), receiver.Hostname())\n\t\t_, stderr, err := sender.Execute(sendCommand)\n\n\t\t// The file transfer should fail because user2 is not in user1's FileTargets\n\t\t// Either the command errors, or it silently fails (check stderr for error message)\n\t\tif err != nil {\n\t\t\tt.Logf(\"Cross-user transfer correctly failed with error: %v\", err)\n\t\t} else if strings.Contains(stderr, \"not a valid peer\") || strings.Contains(stderr, \"unknown target\") {\n\t\t\tt.Logf(\"Cross-user transfer correctly rejected: %s\", stderr)\n\t\t} else {\n\t\t\t// Even if command succeeded, verify the file was NOT received\n\t\t\tgetCommand := []string{\"tailscale\", \"file\", \"get\", \"/tmp/\"}\n\t\t\treceiver.Execute(getCommand)\n\n\t\t\tlsCommand := []string{\"ls\", fmt.Sprintf(\"/tmp/%s\", filename)}\n\t\t\t_, _, lsErr := receiver.Execute(lsCommand)\n\t\t\tassert.Error(t, lsErr, \"Cross-user file should NOT have been received\")\n\t\t}\n\t})\n\n\t// Test 6: Tagged device cannot send files\n\tt.Run(\"TaggedCannotSend\", func(t *testing.T) {\n\t\t// Create file on tagged client\n\t\tfilename := fmt.Sprintf(\"file_from_tagged_%s\", taggedClient.Hostname())\n\t\tcommand := []string{\"touch\", fmt.Sprintf(\"/tmp/%s\", filename)}\n\t\t_, _, err := taggedClient.Execute(command)\n\t\trequire.NoError(t, err, \"failed to create taildrop file on tagged client\")\n\n\t\t// Attempt to send to user1 client - should fail because tagged client has no FileTargets\n\t\treceiver := user1Clients[0]\n\t\treceiverFQDN, _ := receiver.FQDN()\n\t\tsendCommand := []string{\n\t\t\t\"tailscale\", \"file\", \"cp\",\n\t\t\tfmt.Sprintf(\"/tmp/%s\", filename),\n\t\t\tfmt.Sprintf(\"%s:\", receiverFQDN),\n\t\t}\n\n\t\tt.Logf(\"Attempting tagged->user file send from %s to %s (should fail)\", taggedClient.Hostname(), receiver.Hostname())\n\t\t_, stderr, err := taggedClient.Execute(sendCommand)\n\n\t\tif err != nil {\n\t\t\tt.Logf(\"Tagged client send correctly failed with error: %v\", err)\n\t\t} else if strings.Contains(stderr, \"not a valid peer\") || strings.Contains(stderr, \"unknown target\") || strings.Contains(stderr, \"no matches for\") {\n\t\t\tt.Logf(\"Tagged client send correctly rejected: %s\", stderr)\n\t\t} else {\n\t\t\t// Verify file was NOT received\n\t\t\tgetCommand := []string{\"tailscale\", \"file\", \"get\", \"/tmp/\"}\n\t\t\treceiver.Execute(getCommand)\n\n\t\t\tlsCommand := []string{\"ls\", fmt.Sprintf(\"/tmp/%s\", filename)}\n\t\t\t_, _, lsErr := receiver.Execute(lsCommand)\n\t\t\tassert.Error(t, lsErr, \"Tagged client's file should NOT have been received\")\n\t\t}\n\t})\n\n\t// Test 7: Tagged device cannot receive files (user1 tries to send to tagged)\n\tt.Run(\"TaggedCannotReceive\", func(t *testing.T) {\n\t\tsender := user1Clients[0]\n\n\t\t// Create file on sender\n\t\tfilename := fmt.Sprintf(\"file_to_tagged_from_%s\", sender.Hostname())\n\t\tcommand := []string{\"touch\", fmt.Sprintf(\"/tmp/%s\", filename)}\n\t\t_, _, err := sender.Execute(command)\n\t\trequire.NoError(t, err, \"failed to create taildrop file on %s\", sender.Hostname())\n\n\t\t// Attempt to send to tagged client - should fail because tagged is not in user1's FileTargets\n\t\ttaggedFQDN, _ := taggedClient.FQDN()\n\t\tsendCommand := []string{\n\t\t\t\"tailscale\", \"file\", \"cp\",\n\t\t\tfmt.Sprintf(\"/tmp/%s\", filename),\n\t\t\tfmt.Sprintf(\"%s:\", taggedFQDN),\n\t\t}\n\n\t\tt.Logf(\"Attempting user->tagged file send from %s to %s (should fail)\", sender.Hostname(), taggedClient.Hostname())\n\t\t_, stderr, err := sender.Execute(sendCommand)\n\n\t\tif err != nil {\n\t\t\tt.Logf(\"Send to tagged client correctly failed with error: %v\", err)\n\t\t} else if strings.Contains(stderr, \"not a valid peer\") || strings.Contains(stderr, \"unknown target\") || strings.Contains(stderr, \"no matches for\") {\n\t\t\tt.Logf(\"Send to tagged client correctly rejected: %s\", stderr)\n\t\t} else {\n\t\t\t// Verify file was NOT received by tagged client\n\t\t\tgetCommand := []string{\"tailscale\", \"file\", \"get\", \"/tmp/\"}\n\t\t\ttaggedClient.Execute(getCommand)\n\n\t\t\tlsCommand := []string{\"ls\", fmt.Sprintf(\"/tmp/%s\", filename)}\n\t\t\t_, _, lsErr := taggedClient.Execute(lsCommand)\n\t\t\tassert.Error(t, lsErr, \"File to tagged client should NOT have been received\")\n\t\t}\n\t})\n}\n\nfunc TestUpdateHostnameFromClient(t *testing.T) {\n\tIntegrationSkip(t)\n\n\thostnames := map[string]string{\n\t\t\"1\": \"user1-host\",\n\t\t\"2\": \"user2-host\",\n\t\t\"3\": \"user3-host\",\n\t}\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 3,\n\t\tUsers:        []string{\"user1\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\trequire.NoErrorf(t, err, \"failed to create scenario\")\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName(\"updatehostname\"))\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\t// update hostnames using the up command\n\tfor _, client := range allClients {\n\t\tstatus := client.MustStatus()\n\n\t\tcommand := []string{\n\t\t\t\"tailscale\",\n\t\t\t\"set\",\n\t\t\t\"--hostname=\" + hostnames[string(status.Self.ID)],\n\t\t}\n\t\t_, _, err = client.Execute(command)\n\t\trequire.NoErrorf(t, err, \"failed to set hostname\")\n\t}\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t// Wait for nodestore batch processing to complete\n\t// NodeStore batching timeout is 500ms, so we wait up to 1 second\n\tvar nodes []*v1.Node\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\terr := executeAndUnmarshal(\n\t\t\theadscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"node\",\n\t\t\t\t\"list\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&nodes,\n\t\t)\n\t\tassert.NoError(ct, err)\n\t\tassert.Len(ct, nodes, 3, \"Should have 3 nodes after hostname updates\")\n\n\t\tfor _, node := range nodes {\n\t\t\thostname := hostnames[strconv.FormatUint(node.GetId(), 10)]\n\t\t\tassert.Equal(ct, hostname, node.GetName(), \"Node name should match hostname\")\n\n\t\t\t// GivenName is normalized (lowercase, invalid chars stripped)\n\t\t\tnormalised, err := util.NormaliseHostname(hostname)\n\t\t\tassert.NoError(ct, err)\n\t\t\tassert.Equal(ct, normalised, node.GetGivenName(), \"Given name should match FQDN rules\")\n\t\t}\n\t}, 20*time.Second, 1*time.Second)\n\n\t// Rename givenName in nodes\n\tfor _, node := range nodes {\n\t\tgivenName := fmt.Sprintf(\"%d-givenname\", node.GetId())\n\t\t_, err = headscale.Execute(\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"node\",\n\t\t\t\t\"rename\",\n\t\t\t\tgivenName,\n\t\t\t\t\"--identifier\",\n\t\t\t\tstrconv.FormatUint(node.GetId(), 10),\n\t\t\t})\n\t\trequire.NoError(t, err)\n\t}\n\n\t// Verify that the server-side rename is reflected in DNSName while HostName remains unchanged\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t// Build a map of expected DNSNames by node ID\n\t\texpectedDNSNames := make(map[string]string)\n\t\tfor _, node := range nodes {\n\t\t\tnodeID := strconv.FormatUint(node.GetId(), 10)\n\t\t\texpectedDNSNames[nodeID] = fmt.Sprintf(\"%d-givenname.headscale.net.\", node.GetId())\n\t\t}\n\n\t\t// Verify from each client's perspective\n\t\tfor _, client := range allClients {\n\t\t\tstatus, err := client.Status()\n\t\t\tassert.NoError(ct, err)\n\n\t\t\t// Check self node\n\t\t\tselfID := string(status.Self.ID)\n\t\t\texpectedDNS := expectedDNSNames[selfID]\n\t\t\tassert.Equal(ct, expectedDNS, status.Self.DNSName,\n\t\t\t\t\"Self DNSName should be renamed for client %s (ID: %s)\", client.Hostname(), selfID)\n\n\t\t\t// HostName should remain as the original client-reported hostname\n\t\t\toriginalHostname := hostnames[selfID]\n\t\t\tassert.Equal(ct, originalHostname, status.Self.HostName,\n\t\t\t\t\"Self HostName should remain unchanged for client %s (ID: %s)\", client.Hostname(), selfID)\n\n\t\t\t// Check peers\n\t\t\tfor _, peer := range status.Peer {\n\t\t\t\tpeerID := string(peer.ID)\n\t\t\t\tif expectedDNS, ok := expectedDNSNames[peerID]; ok {\n\t\t\t\t\tassert.Equal(ct, expectedDNS, peer.DNSName,\n\t\t\t\t\t\t\"Peer DNSName should be renamed for peer ID %s as seen by client %s\", peerID, client.Hostname())\n\n\t\t\t\t\t// HostName should remain as the original client-reported hostname\n\t\t\t\t\toriginalHostname := hostnames[peerID]\n\t\t\t\t\tassert.Equal(ct, originalHostname, peer.HostName,\n\t\t\t\t\t\t\"Peer HostName should remain unchanged for peer ID %s as seen by client %s\", peerID, client.Hostname())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}, 60*time.Second, 2*time.Second)\n\n\tfor _, client := range allClients {\n\t\tstatus := client.MustStatus()\n\n\t\tcommand := []string{\n\t\t\t\"tailscale\",\n\t\t\t\"set\",\n\t\t\t\"--hostname=\" + hostnames[string(status.Self.ID)] + \"NEW\",\n\t\t}\n\t\t_, _, err = client.Execute(command)\n\t\trequire.NoErrorf(t, err, \"failed to set hostname\")\n\t}\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t// Wait for nodestore batch processing to complete\n\t// NodeStore batching timeout is 500ms, so we wait up to 1 second\n\tassert.Eventually(t, func() bool {\n\t\terr = executeAndUnmarshal(\n\t\t\theadscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"node\",\n\t\t\t\t\"list\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&nodes,\n\t\t)\n\n\t\tif err != nil || len(nodes) != 3 {\n\t\t\treturn false\n\t\t}\n\n\t\tfor _, node := range nodes {\n\t\t\thostname := hostnames[strconv.FormatUint(node.GetId(), 10)]\n\t\t\tgivenName := fmt.Sprintf(\"%d-givenname\", node.GetId())\n\t\t\t// Hostnames are lowercased before being stored, so \"NEW\" becomes \"new\"\n\t\t\tif node.GetName() != hostname+\"new\" || node.GetGivenName() != givenName {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}, time.Second, 50*time.Millisecond, \"hostname updates should be reflected in node list with new suffix\")\n}\n\nfunc TestExpireNode(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: len(MustTestVersions),\n\t\tUsers:        []string{\"user1\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName(\"expirenode\"))\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\tallIps, err := scenario.ListTailscaleClientsIPs()\n\trequireNoErrListClientIPs(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t// assertClientsState(t, allClients)\n\n\tallAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {\n\t\treturn x.String()\n\t})\n\n\tsuccess := pingAllHelper(t, allClients, allAddrs)\n\tt.Logf(\"before expire: %d successful pings out of %d\", success, len(allClients)*len(allIps))\n\n\tfor _, client := range allClients {\n\t\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t\tstatus, err := client.Status()\n\t\t\tassert.NoError(ct, err)\n\n\t\t\t// Assert that we have the original count - self\n\t\t\tassert.Len(ct, status.Peers(), spec.NodesPerUser-1, \"Client %s should see correct number of peers\", client.Hostname())\n\t\t}, 30*time.Second, 1*time.Second)\n\t}\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\t// TODO(kradalby): This is Headscale specific and would not play nicely\n\t// with other implementations of the ControlServer interface\n\tresult, err := headscale.Execute([]string{\n\t\t\"headscale\", \"nodes\", \"expire\", \"--identifier\", \"1\", \"--output\", \"json\",\n\t})\n\trequire.NoError(t, err)\n\n\tvar node v1.Node\n\terr = json.Unmarshal([]byte(result), &node)\n\trequire.NoError(t, err)\n\n\tvar expiredNodeKey key.NodePublic\n\terr = expiredNodeKey.UnmarshalText([]byte(node.GetNodeKey()))\n\trequire.NoError(t, err)\n\n\tt.Logf(\"Node %s with node_key %s has been expired\", node.GetName(), expiredNodeKey.String())\n\n\t// Verify that the expired node has been marked in all peers list.\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tfor _, client := range allClients {\n\t\t\tstatus, err := client.Status()\n\t\t\tassert.NoError(ct, err)\n\n\t\t\tif client.Hostname() != node.GetName() {\n\t\t\t\t// Check if the expired node appears as expired in this client's peer list\n\t\t\t\tfor key, peer := range status.Peer {\n\t\t\t\t\tif key == expiredNodeKey {\n\t\t\t\t\t\tassert.True(ct, peer.Expired, \"Node should be marked as expired for client %s\", client.Hostname())\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}, 3*time.Minute, 10*time.Second)\n\n\tnow := time.Now()\n\n\t// Verify that the expired node has been marked in all peers list.\n\tfor _, client := range allClients {\n\t\tif client.Hostname() == node.GetName() {\n\t\t\tcontinue\n\t\t}\n\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\tstatus, err := client.Status()\n\t\t\tassert.NoError(c, err)\n\n\t\t\t// Ensures that the node is present, and that it is expired.\n\t\t\tpeerStatus, ok := status.Peer[expiredNodeKey]\n\t\t\tassert.True(c, ok, \"expired node key should be present in peer list\")\n\n\t\t\tif ok {\n\t\t\t\tassert.NotNil(c, peerStatus.Expired)\n\t\t\t\tassert.NotNil(c, peerStatus.KeyExpiry)\n\n\t\t\t\tif peerStatus.KeyExpiry != nil {\n\t\t\t\t\tassert.Truef(\n\t\t\t\t\t\tc,\n\t\t\t\t\t\tpeerStatus.KeyExpiry.Before(now),\n\t\t\t\t\t\t\"node %q should have a key expire before %s, was %s\",\n\t\t\t\t\t\tpeerStatus.HostName,\n\t\t\t\t\t\tnow.String(),\n\t\t\t\t\t\tpeerStatus.KeyExpiry,\n\t\t\t\t\t)\n\t\t\t\t}\n\n\t\t\t\tassert.Truef(\n\t\t\t\t\tc,\n\t\t\t\t\tpeerStatus.Expired,\n\t\t\t\t\t\"node %q should be expired, expired is %v\",\n\t\t\t\t\tpeerStatus.HostName,\n\t\t\t\t\tpeerStatus.Expired,\n\t\t\t\t)\n\n\t\t\t\t_, stderr, _ := client.Execute([]string{\"tailscale\", \"ping\", node.GetName()})\n\t\t\t\tif !strings.Contains(stderr, \"node key has expired\") {\n\t\t\t\t\tc.Errorf(\n\t\t\t\t\t\t\"expected to be unable to ping expired host %q from %q\",\n\t\t\t\t\t\tnode.GetName(),\n\t\t\t\t\t\tclient.Hostname(),\n\t\t\t\t\t)\n\t\t\t\t}\n\t\t\t}\n\t\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for expired node status to propagate\")\n\t}\n}\n\n// TestSetNodeExpiryInFuture tests setting arbitrary expiration date\n// New expiration date should be stored in the db and propagated to all peers\nfunc TestSetNodeExpiryInFuture(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: len(MustTestVersions),\n\t\tUsers:        []string{\"user1\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName(\"expirenodefuture\"))\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\ttargetExpiry := time.Now().Add(2 * time.Hour).Round(time.Second).UTC()\n\n\tresult, err := headscale.Execute(\n\t\t[]string{\n\t\t\t\"headscale\", \"nodes\", \"expire\",\n\t\t\t\"--identifier\", \"1\",\n\t\t\t\"--output\", \"json\",\n\t\t\t\"--expiry\", targetExpiry.Format(time.RFC3339),\n\t\t},\n\t)\n\trequire.NoError(t, err)\n\n\tvar node v1.Node\n\terr = json.Unmarshal([]byte(result), &node)\n\trequire.NoError(t, err)\n\n\trequire.True(t, node.GetExpiry().AsTime().After(time.Now()))\n\trequire.WithinDuration(t, targetExpiry, node.GetExpiry().AsTime(), 2*time.Second)\n\n\tvar nodeKey key.NodePublic\n\terr = nodeKey.UnmarshalText([]byte(node.GetNodeKey()))\n\trequire.NoError(t, err)\n\n\tfor _, client := range allClients {\n\t\tif client.Hostname() == node.GetName() {\n\t\t\tcontinue\n\t\t}\n\n\t\tassert.EventuallyWithT(\n\t\t\tt, func(ct *assert.CollectT) {\n\t\t\t\tstatus, err := client.Status()\n\t\t\t\tassert.NoError(ct, err)\n\n\t\t\t\tpeerStatus, ok := status.Peer[nodeKey]\n\t\t\t\tassert.True(ct, ok, \"node key should be present in peer list\")\n\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tassert.NotNil(ct, peerStatus.KeyExpiry)\n\t\t\t\tassert.NotNil(ct, peerStatus.Expired)\n\n\t\t\t\tif peerStatus.KeyExpiry != nil {\n\t\t\t\t\tassert.WithinDuration(\n\t\t\t\t\t\tct,\n\t\t\t\t\t\ttargetExpiry,\n\t\t\t\t\t\t*peerStatus.KeyExpiry,\n\t\t\t\t\t\t5*time.Second,\n\t\t\t\t\t\t\"node %q should have key expiry near the requested future time\",\n\t\t\t\t\t\tpeerStatus.HostName,\n\t\t\t\t\t)\n\n\t\t\t\t\tassert.Truef(\n\t\t\t\t\t\tct,\n\t\t\t\t\t\tpeerStatus.KeyExpiry.After(time.Now()),\n\t\t\t\t\t\t\"node %q should have a key expiry timestamp in the future\",\n\t\t\t\t\t\tpeerStatus.HostName,\n\t\t\t\t\t)\n\t\t\t\t}\n\n\t\t\t\tassert.Falsef(\n\t\t\t\t\tct,\n\t\t\t\t\tpeerStatus.Expired,\n\t\t\t\t\t\"node %q should not be marked as expired\",\n\t\t\t\t\tpeerStatus.HostName,\n\t\t\t\t)\n\t\t\t}, 3*time.Minute, 5*time.Second, \"Waiting for future expiry to propagate\",\n\t\t)\n\t}\n}\n\n// TestDisableNodeExpiry tests disabling key expiry for a node.\n// First sets an expiry, then disables it and verifies the node never expires.\nfunc TestDisableNodeExpiry(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: len(MustTestVersions),\n\t\tUsers:        []string{\"user1\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName(\"disableexpiry\"))\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\t// First set an expiry on the node.\n\tresult, err := headscale.Execute(\n\t\t[]string{\n\t\t\t\"headscale\", \"nodes\", \"expire\",\n\t\t\t\"--identifier\", \"1\",\n\t\t\t\"--output\", \"json\",\n\t\t\t\"--expiry\", time.Now().Add(time.Hour).Format(time.RFC3339),\n\t\t},\n\t)\n\trequire.NoError(t, err)\n\n\tvar node v1.Node\n\terr = json.Unmarshal([]byte(result), &node)\n\trequire.NoError(t, err)\n\trequire.NotNil(t, node.GetExpiry(), \"node should have an expiry set\")\n\n\t// Now disable the expiry.\n\tresult, err = headscale.Execute(\n\t\t[]string{\n\t\t\t\"headscale\", \"nodes\", \"expire\",\n\t\t\t\"--identifier\", \"1\",\n\t\t\t\"--output\", \"json\",\n\t\t\t\"--disable\",\n\t\t},\n\t)\n\trequire.NoError(t, err)\n\n\tvar nodeDisabled v1.Node\n\terr = json.Unmarshal([]byte(result), &nodeDisabled)\n\trequire.NoError(t, err)\n\n\t// Expiry should be nil (or zero time) when disabled.\n\tif nodeDisabled.GetExpiry() != nil {\n\t\trequire.True(t, nodeDisabled.GetExpiry().AsTime().IsZero(),\n\t\t\t\"node expiry should be zero/nil after disabling\")\n\t}\n\n\tvar nodeKey key.NodePublic\n\terr = nodeKey.UnmarshalText([]byte(nodeDisabled.GetNodeKey()))\n\trequire.NoError(t, err)\n\n\t// Verify peers see the node as not expired.\n\tfor _, client := range allClients {\n\t\tif client.Hostname() == nodeDisabled.GetName() {\n\t\t\tcontinue\n\t\t}\n\n\t\tassert.EventuallyWithT(\n\t\t\tt, func(ct *assert.CollectT) {\n\t\t\t\tstatus, err := client.Status()\n\t\t\t\tassert.NoError(ct, err)\n\n\t\t\t\tpeerStatus, ok := status.Peer[nodeKey]\n\t\t\t\tassert.True(ct, ok, \"node key should be present in peer list\")\n\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// Node should not be expired.\n\t\t\t\tassert.Falsef(\n\t\t\t\t\tct,\n\t\t\t\t\tpeerStatus.Expired,\n\t\t\t\t\t\"node %q should not be marked as expired after disabling expiry\",\n\t\t\t\t\tpeerStatus.HostName,\n\t\t\t\t)\n\t\t\t}, 3*time.Minute, 5*time.Second, \"waiting for disabled expiry to propagate\",\n\t\t)\n\t}\n}\n\nfunc TestNodeOnlineStatus(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: len(MustTestVersions),\n\t\tUsers:        []string{\"user1\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName(\"online\"))\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\tallIps, err := scenario.ListTailscaleClientsIPs()\n\trequireNoErrListClientIPs(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t// assertClientsState(t, allClients)\n\n\tallAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {\n\t\treturn x.String()\n\t})\n\n\tsuccess := pingAllHelper(t, allClients, allAddrs)\n\tt.Logf(\"before expire: %d successful pings out of %d\", success, len(allClients)*len(allIps))\n\n\tfor _, client := range allClients {\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\tstatus, err := client.Status()\n\t\t\tassert.NoError(c, err)\n\n\t\t\t// Assert that we have the original count - self\n\t\t\tassert.Len(c, status.Peers(), len(MustTestVersions)-1)\n\t\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for expected peer count\")\n\t}\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\t// Duration is chosen arbitrarily, 10m is reported in #1561\n\ttestDuration := 12 * time.Minute\n\tstart := time.Now()\n\tend := start.Add(testDuration)\n\n\tlog.Printf(\"Starting online test from %v to %v\", start, end)\n\n\tfor {\n\t\t// Let the test run continuously for X minutes to verify\n\t\t// all nodes stay connected and has the expected status over time.\n\t\tif end.Before(time.Now()) {\n\t\t\treturn\n\t\t}\n\n\t\tvar nodes []*v1.Node\n\t\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t\tresult, err := headscale.Execute([]string{\n\t\t\t\t\"headscale\", \"nodes\", \"list\", \"--output\", \"json\",\n\t\t\t})\n\t\t\tassert.NoError(ct, err)\n\n\t\t\terr = json.Unmarshal([]byte(result), &nodes)\n\t\t\tassert.NoError(ct, err)\n\n\t\t\t// Verify that headscale reports the nodes as online\n\t\t\tfor _, node := range nodes {\n\t\t\t\t// All nodes should be online\n\t\t\t\tassert.Truef(\n\t\t\t\t\tct,\n\t\t\t\t\tnode.GetOnline(),\n\t\t\t\t\t\"expected %s to have online status in Headscale, marked as offline %s after start\",\n\t\t\t\t\tnode.GetName(),\n\t\t\t\t\ttime.Since(start),\n\t\t\t\t)\n\t\t\t}\n\t\t}, 15*time.Second, 1*time.Second)\n\n\t\t// Verify that all nodes report all nodes to be online\n\t\tfor _, client := range allClients {\n\t\t\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t\t\tstatus, err := client.Status()\n\t\t\t\tassert.NoError(ct, err)\n\t\t\t\tif status == nil {\n\t\t\t\t\tassert.Fail(ct, \"status is nil\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tfor _, peerKey := range status.Peers() {\n\t\t\t\t\tpeerStatus := status.Peer[peerKey]\n\n\t\t\t\t\t// .Online is only available from CapVer 16, which\n\t\t\t\t\t// is not present in 1.18 which is the lowest we\n\t\t\t\t\t// test.\n\t\t\t\t\tif strings.Contains(client.Hostname(), \"1-18\") {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\t// All peers of this nodes are reporting to be\n\t\t\t\t\t// connected to the control server\n\t\t\t\t\tassert.Truef(\n\t\t\t\t\t\tct,\n\t\t\t\t\t\tpeerStatus.Online,\n\t\t\t\t\t\t\"expected node %s to be marked as online in %s peer list, marked as offline %s after start\",\n\t\t\t\t\t\tpeerStatus.HostName,\n\t\t\t\t\t\tclient.Hostname(),\n\t\t\t\t\t\ttime.Since(start),\n\t\t\t\t\t)\n\t\t\t\t}\n\t\t\t}, 15*time.Second, 1*time.Second)\n\t\t}\n\n\t\t// Check maximum once per second\n\t\ttime.Sleep(time.Second)\n\t}\n}\n\n// TestPingAllByIPManyUpDown is a variant of the PingAll\n// test which will take the tailscale node up and down\n// five times ensuring they are able to restablish connectivity.\nfunc TestPingAllByIPManyUpDown(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: len(MustTestVersions),\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithTestName(\"pingallbyipmany\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\tallIps, err := scenario.ListTailscaleClientsIPs()\n\trequireNoErrListClientIPs(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t// assertClientsState(t, allClients)\n\n\tallAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {\n\t\treturn x.String()\n\t})\n\n\t// Get headscale instance for batcher debug checks\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\t// Initial check: all nodes should be connected to batcher\n\t// Extract node IDs for validation\n\texpectedNodes := make([]types.NodeID, 0, len(allClients))\n\tfor _, client := range allClients {\n\t\tstatus := client.MustStatus()\n\t\tnodeID, err := strconv.ParseUint(string(status.Self.ID), 10, 64)\n\t\trequire.NoError(t, err)\n\t\texpectedNodes = append(expectedNodes, types.NodeID(nodeID))\n\t}\n\trequireAllClientsOnline(t, headscale, expectedNodes, true, \"all clients should be connected to batcher\", 30*time.Second)\n\n\tsuccess := pingAllHelper(t, allClients, allAddrs)\n\tt.Logf(\"%d successful pings out of %d\", success, len(allClients)*len(allIps))\n\n\tfor run := range 3 {\n\t\tt.Logf(\"Starting DownUpPing run %d at %s\", run+1, time.Now().Format(TimestampFormat))\n\n\t\t// Create fresh errgroup with timeout for each run\n\t\tctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)\n\t\twg, _ := errgroup.WithContext(ctx)\n\n\t\tfor _, client := range allClients {\n\t\t\tc := client\n\t\t\twg.Go(func() error {\n\t\t\t\tt.Logf(\"taking down %q\", c.Hostname())\n\t\t\t\treturn c.Down()\n\t\t\t})\n\t\t}\n\n\t\tif err := wg.Wait(); err != nil {\n\t\t\tt.Fatalf(\"failed to take down all nodes: %s\", err)\n\t\t}\n\t\tt.Logf(\"All nodes taken down at %s\", time.Now().Format(TimestampFormat))\n\n\t\t// After taking down all nodes, verify all systems show nodes offline\n\t\trequireAllClientsOnline(t, headscale, expectedNodes, false, fmt.Sprintf(\"Run %d: all nodes should be offline after Down()\", run+1), 120*time.Second)\n\n\t\tfor _, client := range allClients {\n\t\t\tc := client\n\t\t\twg.Go(func() error {\n\t\t\t\tt.Logf(\"bringing up %q\", c.Hostname())\n\t\t\t\treturn c.Up()\n\t\t\t})\n\t\t}\n\n\t\tif err := wg.Wait(); err != nil {\n\t\t\tt.Fatalf(\"failed to bring up all nodes: %s\", err)\n\t\t}\n\t\tt.Logf(\"All nodes brought up at %s\", time.Now().Format(TimestampFormat))\n\n\t\t// After bringing up all nodes, verify batcher shows all reconnected\n\t\trequireAllClientsOnline(t, headscale, expectedNodes, true, fmt.Sprintf(\"Run %d: all nodes should be reconnected after Up()\", run+1), 120*time.Second)\n\n\t\t// Wait for sync and successful pings after nodes come back up\n\t\terr = scenario.WaitForTailscaleSync()\n\t\tassert.NoError(t, err)\n\n\t\tt.Logf(\"All nodes synced up %s\", time.Now().Format(TimestampFormat))\n\n\t\trequireAllClientsOnline(t, headscale, expectedNodes, true, fmt.Sprintf(\"Run %d: all systems should show nodes online after reconnection\", run+1), 60*time.Second)\n\n\t\tsuccess := pingAllHelper(t, allClients, allAddrs)\n\t\tassert.Equalf(t, len(allClients)*len(allIps), success, \"%d successful pings out of %d\", success, len(allClients)*len(allIps))\n\n\t\t// Clean up context for this run\n\t\tcancel()\n\t}\n}\n\nfunc Test2118DeletingOnlineNodePanics(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 1,\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithTestName(\"deletenocrash\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\tallIps, err := scenario.ListTailscaleClientsIPs()\n\trequireNoErrListClientIPs(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\tallAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {\n\t\treturn x.String()\n\t})\n\n\tsuccess := pingAllHelper(t, allClients, allAddrs)\n\tt.Logf(\"%d successful pings out of %d\", success, len(allClients)*len(allIps))\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\t// Test list all nodes after added otherUser\n\tvar nodeList []v1.Node\n\terr = executeAndUnmarshal(\n\t\theadscale,\n\t\t[]string{\n\t\t\t\"headscale\",\n\t\t\t\"nodes\",\n\t\t\t\"list\",\n\t\t\t\"--output\",\n\t\t\t\"json\",\n\t\t},\n\t\t&nodeList,\n\t)\n\trequire.NoError(t, err)\n\tassert.Len(t, nodeList, 2)\n\tassert.True(t, nodeList[0].GetOnline())\n\tassert.True(t, nodeList[1].GetOnline())\n\n\t// Delete the first node, which is online\n\t_, err = headscale.Execute(\n\t\t[]string{\n\t\t\t\"headscale\",\n\t\t\t\"nodes\",\n\t\t\t\"delete\",\n\t\t\t\"--identifier\",\n\t\t\t// Delete the last added machine\n\t\t\tfmt.Sprintf(\"%d\", nodeList[0].GetId()),\n\t\t\t\"--output\",\n\t\t\t\"json\",\n\t\t\t\"--force\",\n\t\t},\n\t)\n\trequire.NoError(t, err)\n\n\t// Ensure that the node has been deleted, this did not occur due to a panic.\n\tvar nodeListAfter []v1.Node\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\terr = executeAndUnmarshal(\n\t\t\theadscale,\n\t\t\t[]string{\n\t\t\t\t\"headscale\",\n\t\t\t\t\"nodes\",\n\t\t\t\t\"list\",\n\t\t\t\t\"--output\",\n\t\t\t\t\"json\",\n\t\t\t},\n\t\t\t&nodeListAfter,\n\t\t)\n\t\tassert.NoError(ct, err)\n\t\tassert.Len(ct, nodeListAfter, 1, \"Node should be deleted from list\")\n\t}, 10*time.Second, 1*time.Second)\n\n\terr = executeAndUnmarshal(\n\t\theadscale,\n\t\t[]string{\n\t\t\t\"headscale\",\n\t\t\t\"nodes\",\n\t\t\t\"list\",\n\t\t\t\"--output\",\n\t\t\t\"json\",\n\t\t},\n\t\t&nodeListAfter,\n\t)\n\trequire.NoError(t, err)\n\tassert.Len(t, nodeListAfter, 1)\n\tassert.True(t, nodeListAfter[0].GetOnline())\n\tassert.Equal(t, nodeList[1].GetId(), nodeListAfter[0].GetId())\n}\n"
  },
  {
    "path": "integration/helpers.go",
    "content": "package integration\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"maps\"\n\t\"net/netip\"\n\t\"slices\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/cenkalti/backoff/v5\"\n\t\"github.com/google/go-cmp/cmp\"\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\tpolicyv2 \"github.com/juanfont/headscale/hscontrol/policy/v2\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/juanfont/headscale/integration/integrationutil\"\n\t\"github.com/juanfont/headscale/integration/tsic\"\n\t\"github.com/oauth2-proxy/mockoidc\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/tailcfg\"\n)\n\nconst (\n\t// derpPingTimeout defines the timeout for individual DERP ping operations\n\t// Used in DERP connectivity tests to verify relay server communication.\n\tderpPingTimeout = 2 * time.Second\n\n\t// derpPingCount defines the number of ping attempts for DERP connectivity tests\n\t// Higher count provides better reliability assessment of DERP connectivity.\n\tderpPingCount = 10\n\n\t// TimestampFormat is the standard timestamp format used across all integration tests\n\t// Format: \"2006-01-02T15-04-05.999999999\" provides high precision timestamps\n\t// suitable for debugging and log correlation in integration tests.\n\tTimestampFormat = \"2006-01-02T15-04-05.999999999\"\n\n\t// TimestampFormatRunID is used for generating unique run identifiers\n\t// Format: \"20060102-150405\" provides compact date-time for file/directory names.\n\tTimestampFormatRunID = \"20060102-150405\"\n\n\t// stateOnline is the string representation for online state in logs.\n\tstateOnline = \"online\"\n\t// stateOffline is the string representation for offline state in logs.\n\tstateOffline = \"offline\"\n)\n\nvar errNoNewClientFound = errors.New(\"no new client found\")\n\n// NodeSystemStatus represents the status of a node across different systems.\ntype NodeSystemStatus struct {\n\tBatcher          bool\n\tBatcherConnCount int\n\tMapResponses     bool\n\tNodeStore        bool\n}\n\n// requireNoErrHeadscaleEnv validates that headscale environment creation succeeded.\n// Provides specific error context for headscale environment setup failures.\nfunc requireNoErrHeadscaleEnv(t *testing.T, err error) {\n\tt.Helper()\n\trequire.NoError(t, err, \"failed to create headscale environment\")\n}\n\n// requireNoErrGetHeadscale validates that headscale server retrieval succeeded.\n// Provides specific error context for headscale server access failures.\nfunc requireNoErrGetHeadscale(t *testing.T, err error) {\n\tt.Helper()\n\trequire.NoError(t, err, \"failed to get headscale\")\n}\n\n// requireNoErrListClients validates that client listing operations succeeded.\n// Provides specific error context for client enumeration failures.\nfunc requireNoErrListClients(t *testing.T, err error) {\n\tt.Helper()\n\trequire.NoError(t, err, \"failed to list clients\")\n}\n\n// requireNoErrListClientIPs validates that client IP retrieval succeeded.\n// Provides specific error context for client IP address enumeration failures.\nfunc requireNoErrListClientIPs(t *testing.T, err error) {\n\tt.Helper()\n\trequire.NoError(t, err, \"failed to get client IPs\")\n}\n\n// requireNoErrSync validates that client synchronization operations succeeded.\n// Provides specific error context for client sync failures across the network.\nfunc requireNoErrSync(t *testing.T, err error) {\n\tt.Helper()\n\trequire.NoError(t, err, \"failed to have all clients sync up\")\n}\n\n// requireNoErrListFQDN validates that FQDN listing operations succeeded.\n// Provides specific error context for DNS name enumeration failures.\nfunc requireNoErrListFQDN(t *testing.T, err error) {\n\tt.Helper()\n\trequire.NoError(t, err, \"failed to list FQDNs\")\n}\n\n// requireNoErrLogout validates that tailscale node logout operations succeeded.\n// Provides specific error context for client logout failures.\nfunc requireNoErrLogout(t *testing.T, err error) {\n\tt.Helper()\n\trequire.NoError(t, err, \"failed to log out tailscale nodes\")\n}\n\n// collectExpectedNodeIDs extracts node IDs from a list of TailscaleClients for validation purposes.\nfunc collectExpectedNodeIDs(t *testing.T, clients []TailscaleClient) []types.NodeID {\n\tt.Helper()\n\n\texpectedNodes := make([]types.NodeID, 0, len(clients))\n\tfor _, client := range clients {\n\t\tstatus := client.MustStatus()\n\t\tnodeID, err := strconv.ParseUint(string(status.Self.ID), 10, 64)\n\t\trequire.NoError(t, err)\n\n\t\texpectedNodes = append(expectedNodes, types.NodeID(nodeID))\n\t}\n\n\treturn expectedNodes\n}\n\n// validateInitialConnection performs comprehensive validation after initial client login.\n// Validates that all nodes are online and have proper NetInfo/DERP configuration,\n// essential for ensuring successful initial connection state in relogin tests.\nfunc validateInitialConnection(t *testing.T, headscale ControlServer, expectedNodes []types.NodeID) {\n\tt.Helper()\n\n\trequireAllClientsOnline(t, headscale, expectedNodes, true, \"all clients should be connected after initial login\", 120*time.Second)\n\trequireAllClientsNetInfoAndDERP(t, headscale, expectedNodes, \"all clients should have NetInfo and DERP after initial login\", 3*time.Minute)\n}\n\n// validateLogoutComplete performs comprehensive validation after client logout.\n// Ensures all nodes are properly offline across all headscale systems,\n// critical for validating clean logout state in relogin tests.\nfunc validateLogoutComplete(t *testing.T, headscale ControlServer, expectedNodes []types.NodeID) {\n\tt.Helper()\n\n\trequireAllClientsOnline(t, headscale, expectedNodes, false, \"all nodes should be offline after logout\", 120*time.Second)\n}\n\n// validateReloginComplete performs comprehensive validation after client relogin.\n// Validates that all nodes are back online with proper NetInfo/DERP configuration,\n// ensuring successful relogin state restoration in integration tests.\nfunc validateReloginComplete(t *testing.T, headscale ControlServer, expectedNodes []types.NodeID) {\n\tt.Helper()\n\n\trequireAllClientsOnline(t, headscale, expectedNodes, true, \"all clients should be connected after relogin\", 120*time.Second)\n\trequireAllClientsNetInfoAndDERP(t, headscale, expectedNodes, \"all clients should have NetInfo and DERP after relogin\", 3*time.Minute)\n}\n\n// requireAllClientsOnline validates that all nodes are online/offline across all headscale systems\n// requireAllClientsOnline verifies all expected nodes are in the specified online state across all systems.\nfunc requireAllClientsOnline(t *testing.T, headscale ControlServer, expectedNodes []types.NodeID, expectedOnline bool, message string, timeout time.Duration) {\n\tt.Helper()\n\n\tstartTime := time.Now()\n\n\tstateStr := stateOffline\n\tif expectedOnline {\n\t\tstateStr = stateOnline\n\t}\n\n\tt.Logf(\"requireAllSystemsOnline: Starting %s validation for %d nodes at %s - %s\", stateStr, len(expectedNodes), startTime.Format(TimestampFormat), message)\n\n\tif expectedOnline {\n\t\t// For online validation, use the existing logic with full timeout\n\t\trequireAllClientsOnlineWithSingleTimeout(t, headscale, expectedNodes, expectedOnline, message, timeout)\n\t} else {\n\t\t// For offline validation, use staged approach with component-specific timeouts\n\t\trequireAllClientsOfflineStaged(t, headscale, expectedNodes)\n\t}\n\n\tendTime := time.Now()\n\tt.Logf(\"requireAllSystemsOnline: Completed %s validation for %d nodes at %s - Duration: %s - %s\", stateStr, len(expectedNodes), endTime.Format(TimestampFormat), endTime.Sub(startTime), message)\n}\n\n// requireAllClientsOnlineWithSingleTimeout is the original validation logic for online state.\n//\n//nolint:gocyclo // complex validation with multiple node states\nfunc requireAllClientsOnlineWithSingleTimeout(t *testing.T, headscale ControlServer, expectedNodes []types.NodeID, expectedOnline bool, message string, timeout time.Duration) {\n\tt.Helper()\n\n\tvar prevReport string\n\n\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t// Get batcher state\n\t\tdebugInfo, err := headscale.DebugBatcher()\n\t\tassert.NoError(c, err, \"Failed to get batcher debug info\")\n\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\t// Get map responses\n\t\tmapResponses, err := headscale.GetAllMapReponses()\n\t\tassert.NoError(c, err, \"Failed to get map responses\")\n\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\t// Get nodestore state\n\t\tnodeStore, err := headscale.DebugNodeStore()\n\t\tassert.NoError(c, err, \"Failed to get nodestore debug info\")\n\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\t// Validate that all expected nodes are present in nodeStore\n\t\tfor _, nodeID := range expectedNodes {\n\t\t\t_, exists := nodeStore[nodeID]\n\t\t\tassert.True(c, exists, \"Expected node %d not found in nodeStore\", nodeID)\n\t\t}\n\n\t\t// Check that we have map responses for expected nodes\n\t\tmapResponseCount := len(mapResponses)\n\t\texpectedCount := len(expectedNodes)\n\t\tassert.GreaterOrEqual(c, mapResponseCount, expectedCount, \"MapResponses insufficient - expected at least %d responses, got %d\", expectedCount, mapResponseCount)\n\n\t\t// Build status map for each node\n\t\tnodeStatus := make(map[types.NodeID]NodeSystemStatus)\n\n\t\t// Initialize all expected nodes\n\t\tfor _, nodeID := range expectedNodes {\n\t\t\tnodeStatus[nodeID] = NodeSystemStatus{}\n\t\t}\n\n\t\t// Check batcher state for expected nodes\n\t\tfor _, nodeID := range expectedNodes {\n\t\t\tnodeIDStr := fmt.Sprintf(\"%d\", nodeID)\n\t\t\tif nodeInfo, exists := debugInfo.ConnectedNodes[nodeIDStr]; exists {\n\t\t\t\tif status, exists := nodeStatus[nodeID]; exists {\n\t\t\t\t\tstatus.Batcher = nodeInfo.Connected\n\t\t\t\t\tstatus.BatcherConnCount = nodeInfo.ActiveConnections\n\t\t\t\t\tnodeStatus[nodeID] = status\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Node not found in batcher, mark as disconnected\n\t\t\t\tif status, exists := nodeStatus[nodeID]; exists {\n\t\t\t\t\tstatus.Batcher = false\n\t\t\t\t\tstatus.BatcherConnCount = 0\n\t\t\t\t\tnodeStatus[nodeID] = status\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Check map responses using buildExpectedOnlineMap\n\t\tonlineFromMaps := make(map[types.NodeID]bool)\n\t\tonlineMap := integrationutil.BuildExpectedOnlineMap(mapResponses)\n\n\t\t// For single node scenarios, we can't validate peer visibility since there are no peers\n\t\tif len(expectedNodes) == 1 {\n\t\t\t// For single node, just check that we have map responses for the node\n\t\t\tfor nodeID := range nodeStatus {\n\t\t\t\tif _, exists := onlineMap[nodeID]; exists {\n\t\t\t\t\tonlineFromMaps[nodeID] = true\n\t\t\t\t} else {\n\t\t\t\t\tonlineFromMaps[nodeID] = false\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t// Multi-node scenario: check peer visibility\n\t\t\tfor nodeID := range nodeStatus {\n\t\t\t\t// Initialize as offline - will be set to true only if visible in all relevant peer maps\n\t\t\t\tonlineFromMaps[nodeID] = false\n\n\t\t\t\t// Count how many peer maps should show this node\n\t\t\t\texpectedPeerMaps := 0\n\t\t\t\tfoundOnlinePeerMaps := 0\n\n\t\t\t\tfor id, peerMap := range onlineMap {\n\t\t\t\t\tif id == nodeID {\n\t\t\t\t\t\tcontinue // Skip self-references\n\t\t\t\t\t}\n\n\t\t\t\t\texpectedPeerMaps++\n\n\t\t\t\t\tif online, exists := peerMap[nodeID]; exists && online {\n\t\t\t\t\t\tfoundOnlinePeerMaps++\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Node is considered online if it appears online in all peer maps\n\t\t\t\t// (or if there are no peer maps to check)\n\t\t\t\tif expectedPeerMaps == 0 || foundOnlinePeerMaps == expectedPeerMaps {\n\t\t\t\t\tonlineFromMaps[nodeID] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tassert.Lenf(c, onlineFromMaps, expectedCount, \"MapResponses missing nodes in status check\")\n\n\t\t// Update status with map response data\n\t\tfor nodeID, online := range onlineFromMaps {\n\t\t\tif status, exists := nodeStatus[nodeID]; exists {\n\t\t\t\tstatus.MapResponses = online\n\t\t\t\tnodeStatus[nodeID] = status\n\t\t\t}\n\t\t}\n\n\t\t// Check nodestore state for expected nodes\n\t\tfor _, nodeID := range expectedNodes {\n\t\t\tif node, exists := nodeStore[nodeID]; exists {\n\t\t\t\tif status, exists := nodeStatus[nodeID]; exists {\n\t\t\t\t\t// Check if node is online in nodestore\n\t\t\t\t\tstatus.NodeStore = node.IsOnline != nil && *node.IsOnline\n\t\t\t\t\tnodeStatus[nodeID] = status\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Verify all systems show nodes in expected state and report failures\n\t\tallMatch := true\n\n\t\tvar failureReport strings.Builder\n\n\t\tids := types.NodeIDs(slices.AppendSeq(make([]types.NodeID, 0, len(nodeStatus)), maps.Keys(nodeStatus)))\n\t\tslices.Sort(ids)\n\n\t\tfor _, nodeID := range ids {\n\t\t\tstatus := nodeStatus[nodeID]\n\t\t\tsystemsMatch := (status.Batcher == expectedOnline) &&\n\t\t\t\t(status.MapResponses == expectedOnline) &&\n\t\t\t\t(status.NodeStore == expectedOnline)\n\n\t\t\tif !systemsMatch {\n\t\t\t\tallMatch = false\n\n\t\t\t\tstateStr := stateOffline\n\t\t\t\tif expectedOnline {\n\t\t\t\t\tstateStr = stateOnline\n\t\t\t\t}\n\n\t\t\t\tfailureReport.WriteString(fmt.Sprintf(\"node:%d is not fully %s (timestamp: %s):\\n\", nodeID, stateStr, time.Now().Format(TimestampFormat)))\n\t\t\t\tfailureReport.WriteString(fmt.Sprintf(\"  - batcher: %t (expected: %t)\\n\", status.Batcher, expectedOnline))\n\t\t\t\tfailureReport.WriteString(fmt.Sprintf(\"    - conn count: %d\\n\", status.BatcherConnCount))\n\t\t\t\tfailureReport.WriteString(fmt.Sprintf(\"  - mapresponses: %t (expected: %t, down with at least one peer)\\n\", status.MapResponses, expectedOnline))\n\t\t\t\tfailureReport.WriteString(fmt.Sprintf(\"  - nodestore: %t (expected: %t)\\n\", status.NodeStore, expectedOnline))\n\t\t\t}\n\t\t}\n\n\t\tif !allMatch {\n\t\t\tif diff := cmp.Diff(prevReport, failureReport.String()); diff != \"\" {\n\t\t\t\tt.Logf(\"Node state validation report changed at %s:\", time.Now().Format(TimestampFormat))\n\t\t\t\tt.Logf(\"Previous report:\\n%s\", prevReport)\n\t\t\t\tt.Logf(\"Current report:\\n%s\", failureReport.String())\n\t\t\t\tt.Logf(\"Report diff:\\n%s\", diff)\n\n\t\t\t\tprevReport = failureReport.String()\n\t\t\t}\n\n\t\t\tfailureReport.WriteString(fmt.Sprintf(\"validation_timestamp: %s\\n\", time.Now().Format(TimestampFormat)))\n\t\t\t// Note: timeout_remaining not available in this context\n\n\t\t\tassert.Fail(c, failureReport.String())\n\t\t}\n\n\t\tstateStr := stateOffline\n\t\tif expectedOnline {\n\t\t\tstateStr = stateOnline\n\t\t}\n\n\t\tassert.True(c, allMatch, \"Not all %d nodes are %s across all systems (batcher, mapresponses, nodestore)\", len(expectedNodes), stateStr)\n\t}, timeout, 2*time.Second, message)\n}\n\n// requireAllClientsOfflineStaged validates offline state with staged timeouts for different components.\nfunc requireAllClientsOfflineStaged(t *testing.T, headscale ControlServer, expectedNodes []types.NodeID) {\n\tt.Helper()\n\n\t// Stage 1: Verify batcher disconnection (should be immediate)\n\tt.Logf(\"Stage 1: Verifying batcher disconnection for %d nodes\", len(expectedNodes))\n\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tdebugInfo, err := headscale.DebugBatcher()\n\t\tassert.NoError(c, err, \"Failed to get batcher debug info\")\n\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tallBatcherOffline := true\n\n\t\tfor _, nodeID := range expectedNodes {\n\t\t\tnodeIDStr := fmt.Sprintf(\"%d\", nodeID)\n\t\t\tif nodeInfo, exists := debugInfo.ConnectedNodes[nodeIDStr]; exists && nodeInfo.Connected {\n\t\t\t\tallBatcherOffline = false\n\n\t\t\t\tassert.False(c, nodeInfo.Connected, \"Node %d should not be connected in batcher\", nodeID)\n\t\t\t}\n\t\t}\n\n\t\tassert.True(c, allBatcherOffline, \"All nodes should be disconnected from batcher\")\n\t}, 15*time.Second, 1*time.Second, \"batcher disconnection validation\")\n\n\t// Stage 2: Verify nodestore offline status (up to 15 seconds due to disconnect detection delay)\n\tt.Logf(\"Stage 2: Verifying nodestore offline status for %d nodes (allowing for 10s disconnect detection delay)\", len(expectedNodes))\n\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodeStore, err := headscale.DebugNodeStore()\n\t\tassert.NoError(c, err, \"Failed to get nodestore debug info\")\n\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tallNodeStoreOffline := true\n\n\t\tfor _, nodeID := range expectedNodes {\n\t\t\tif node, exists := nodeStore[nodeID]; exists {\n\t\t\t\tisOnline := node.IsOnline != nil && *node.IsOnline\n\t\t\t\tif isOnline {\n\t\t\t\t\tallNodeStoreOffline = false\n\n\t\t\t\t\tassert.False(c, isOnline, \"Node %d should be offline in nodestore\", nodeID)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tassert.True(c, allNodeStoreOffline, \"All nodes should be offline in nodestore\")\n\t}, 20*time.Second, 1*time.Second, \"nodestore offline validation\")\n\n\t// Stage 3: Verify map response propagation (longest delay due to peer update timing)\n\tt.Logf(\"Stage 3: Verifying map response propagation for %d nodes (allowing for peer map update delays)\", len(expectedNodes))\n\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tmapResponses, err := headscale.GetAllMapReponses()\n\t\tassert.NoError(c, err, \"Failed to get map responses\")\n\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tonlineMap := integrationutil.BuildExpectedOnlineMap(mapResponses)\n\t\tallMapResponsesOffline := true\n\n\t\tif len(expectedNodes) == 1 {\n\t\t\t// Single node: check if it appears in map responses\n\t\t\tfor nodeID := range onlineMap {\n\t\t\t\tif slices.Contains(expectedNodes, nodeID) {\n\t\t\t\t\tallMapResponsesOffline = false\n\n\t\t\t\t\tassert.Fail(c, fmt.Sprintf(\"Node %d should not appear in map responses\", nodeID))\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t// Multi-node: check peer visibility\n\t\t\tfor _, nodeID := range expectedNodes {\n\t\t\t\tfor id, peerMap := range onlineMap {\n\t\t\t\t\tif id == nodeID {\n\t\t\t\t\t\tcontinue // Skip self-references\n\t\t\t\t\t}\n\n\t\t\t\t\tif online, exists := peerMap[nodeID]; exists && online {\n\t\t\t\t\t\tallMapResponsesOffline = false\n\n\t\t\t\t\t\tassert.False(c, online, \"Node %d should not be visible in node %d's map response\", nodeID, id)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tassert.True(c, allMapResponsesOffline, \"All nodes should be absent from peer map responses\")\n\t}, 60*time.Second, 2*time.Second, \"map response propagation validation\")\n\n\tt.Logf(\"All stages completed: nodes are fully offline across all systems\")\n}\n\n// requireAllClientsNetInfoAndDERP validates that all nodes have NetInfo in the database\n// and a valid DERP server based on the NetInfo. This function follows the pattern of\n// requireAllClientsOnline by using hsic.DebugNodeStore to get the database state.\n//\n//nolint:unparam // timeout is configurable for flexibility even though callers currently use same value\nfunc requireAllClientsNetInfoAndDERP(t *testing.T, headscale ControlServer, expectedNodes []types.NodeID, message string, timeout time.Duration) {\n\tt.Helper()\n\n\tstartTime := time.Now()\n\tt.Logf(\"requireAllClientsNetInfoAndDERP: Starting NetInfo/DERP validation for %d nodes at %s - %s\", len(expectedNodes), startTime.Format(TimestampFormat), message)\n\n\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t// Get nodestore state\n\t\tnodeStore, err := headscale.DebugNodeStore()\n\t\tassert.NoError(c, err, \"Failed to get nodestore debug info\")\n\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\t// Validate that all expected nodes are present in nodeStore\n\t\tfor _, nodeID := range expectedNodes {\n\t\t\t_, exists := nodeStore[nodeID]\n\t\t\tassert.True(c, exists, \"Expected node %d not found in nodeStore during NetInfo validation\", nodeID)\n\t\t}\n\n\t\t// Check each expected node\n\t\tfor _, nodeID := range expectedNodes {\n\t\t\tnode, exists := nodeStore[nodeID]\n\t\t\tassert.True(c, exists, \"Node %d not found in nodestore during NetInfo validation\", nodeID)\n\n\t\t\tif !exists {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Validate that the node has Hostinfo\n\t\t\tassert.NotNil(c, node.Hostinfo, \"Node %d (%s) should have Hostinfo for NetInfo validation\", nodeID, node.Hostname)\n\n\t\t\tif node.Hostinfo == nil {\n\t\t\t\tt.Logf(\"Node %d (%s) missing Hostinfo at %s\", nodeID, node.Hostname, time.Now().Format(TimestampFormat))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Validate that the node has NetInfo\n\t\t\tassert.NotNil(c, node.Hostinfo.NetInfo, \"Node %d (%s) should have NetInfo in Hostinfo for DERP connectivity\", nodeID, node.Hostname)\n\n\t\t\tif node.Hostinfo.NetInfo == nil {\n\t\t\t\tt.Logf(\"Node %d (%s) missing NetInfo at %s\", nodeID, node.Hostname, time.Now().Format(TimestampFormat))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Validate that the node has a valid DERP server (PreferredDERP should be > 0)\n\t\t\tpreferredDERP := node.Hostinfo.NetInfo.PreferredDERP\n\t\t\tassert.Positive(c, preferredDERP, \"Node %d (%s) should have a valid DERP server (PreferredDERP > 0) for relay connectivity, got %d\", nodeID, node.Hostname, preferredDERP)\n\n\t\t\tt.Logf(\"Node %d (%s) has valid NetInfo with DERP server %d at %s\", nodeID, node.Hostname, preferredDERP, time.Now().Format(TimestampFormat))\n\t\t}\n\t}, timeout, 5*time.Second, message)\n\n\tendTime := time.Now()\n\tduration := endTime.Sub(startTime)\n\tt.Logf(\"requireAllClientsNetInfoAndDERP: Completed NetInfo/DERP validation for %d nodes at %s - Duration: %v - %s\", len(expectedNodes), endTime.Format(TimestampFormat), duration, message)\n}\n\n// assertLastSeenSet validates that a node has a non-nil LastSeen timestamp.\n// Critical for ensuring node activity tracking is functioning properly.\nfunc assertLastSeenSet(t *testing.T, node *v1.Node) {\n\tt.Helper()\n\tassert.NotNil(t, node)\n\tassert.NotNil(t, node.GetLastSeen())\n}\n\nfunc assertLastSeenSetWithCollect(c *assert.CollectT, node *v1.Node) {\n\tassert.NotNil(c, node)\n\tassert.NotNil(c, node.GetLastSeen())\n}\n\n// assertTailscaleNodesLogout verifies that all provided Tailscale clients\n// are in the logged-out state (NeedsLogin).\nfunc assertTailscaleNodesLogout(t assert.TestingT, clients []TailscaleClient) {\n\tif h, ok := t.(interface{ Helper() }); ok {\n\t\th.Helper()\n\t}\n\n\tfor _, client := range clients {\n\t\tstatus, err := client.Status()\n\t\tassert.NoError(t, err, \"failed to get status for client %s\", client.Hostname()) //nolint:testifylint // assert.TestingT interface\n\t\tassert.Equal(t, \"NeedsLogin\", status.BackendState,\n\t\t\t\"client %s should be logged out\", client.Hostname())\n\t}\n}\n\n// pingAllHelper performs ping tests between all clients and addresses, returning success count.\n// This is used to validate network connectivity in integration tests.\n// Returns the total number of successful ping operations.\n//\n//nolint:unparam // opts is variadic for extensibility even though callers currently don't pass options\nfunc pingAllHelper(t *testing.T, clients []TailscaleClient, addrs []string, opts ...tsic.PingOption) int {\n\tt.Helper()\n\n\tsuccess := 0\n\n\tfor _, client := range clients {\n\t\tfor _, addr := range addrs {\n\t\t\terr := client.Ping(addr, opts...)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed to ping %s from %s: %s\", addr, client.Hostname(), err)\n\t\t\t} else {\n\t\t\t\tsuccess++\n\t\t\t}\n\t\t}\n\t}\n\n\treturn success\n}\n\n// pingDerpAllHelper performs DERP-based ping tests between all clients and addresses.\n// This specifically tests connectivity through DERP relay servers, which is important\n// for validating NAT traversal and relay functionality. Returns success count.\nfunc pingDerpAllHelper(t *testing.T, clients []TailscaleClient, addrs []string) int {\n\tt.Helper()\n\n\tsuccess := 0\n\n\tfor _, client := range clients {\n\t\tfor _, addr := range addrs {\n\t\t\tif isSelfClient(client, addr) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\terr := client.Ping(\n\t\t\t\taddr,\n\t\t\t\ttsic.WithPingTimeout(derpPingTimeout),\n\t\t\t\ttsic.WithPingCount(derpPingCount),\n\t\t\t\ttsic.WithPingUntilDirect(false),\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"failed to ping %s from %s: %s\", addr, client.Hostname(), err)\n\t\t\t} else {\n\t\t\t\tsuccess++\n\t\t\t}\n\t\t}\n\t}\n\n\treturn success\n}\n\n// isSelfClient determines if the given address belongs to the client itself.\n// Used to avoid self-ping operations in connectivity tests by checking\n// hostname and IP address matches.\nfunc isSelfClient(client TailscaleClient, addr string) bool {\n\tif addr == client.Hostname() {\n\t\treturn true\n\t}\n\n\tips, err := client.IPs()\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tfor _, ip := range ips {\n\t\tif ip.String() == addr {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n// assertClientsState validates the status and netmap of a list of clients for general connectivity.\n// Runs parallel validation of status, netcheck, and netmap for all clients to ensure\n// they have proper network configuration for all-to-all connectivity tests.\n//\n//nolint:unused\nfunc assertClientsState(t *testing.T, clients []TailscaleClient) {\n\tt.Helper()\n\n\tvar wg sync.WaitGroup\n\n\tfor _, client := range clients {\n\t\twg.Add(1)\n\n\t\tc := client // Avoid loop pointer\n\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\n\t\t\tassertValidStatus(t, c)\n\t\t\tassertValidNetcheck(t, c)\n\t\t\tassertValidNetmap(t, c)\n\t\t}()\n\t}\n\n\tt.Logf(\"waiting for client state checks to finish\")\n\twg.Wait()\n}\n\n// assertValidNetmap validates that a client's netmap has all required fields for proper operation.\n// Checks self node and all peers for essential networking data including hostinfo, addresses,\n// endpoints, and DERP configuration. Skips validation for Tailscale versions below 1.56.\n// This test is not suitable for ACL/partial connection tests.\n//\n//nolint:unused\nfunc assertValidNetmap(t *testing.T, client TailscaleClient) {\n\tt.Helper()\n\n\tif !util.TailscaleVersionNewerOrEqual(\"1.56\", client.Version()) {\n\t\tt.Logf(\"%q has version %q, skipping netmap check...\", client.Hostname(), client.Version())\n\n\t\treturn\n\t}\n\n\tt.Logf(\"Checking netmap of %q\", client.Hostname())\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnetmap, err := client.Netmap()\n\t\tassert.NoError(c, err, \"getting netmap for %q\", client.Hostname())\n\n\t\tassert.Truef(c, netmap.SelfNode.Hostinfo().Valid(), \"%q does not have Hostinfo\", client.Hostname())\n\n\t\tif hi := netmap.SelfNode.Hostinfo(); hi.Valid() {\n\t\t\tassert.LessOrEqual(c, 1, netmap.SelfNode.Hostinfo().Services().Len(), \"%q does not have enough services, got: %v\", client.Hostname(), netmap.SelfNode.Hostinfo().Services())\n\t\t}\n\n\t\tassert.NotEmptyf(c, netmap.SelfNode.AllowedIPs(), \"%q does not have any allowed IPs\", client.Hostname())\n\t\tassert.NotEmptyf(c, netmap.SelfNode.Addresses(), \"%q does not have any addresses\", client.Hostname())\n\n\t\tassert.Truef(c, netmap.SelfNode.Online().Get(), \"%q is not online\", client.Hostname())\n\n\t\tassert.Falsef(c, netmap.SelfNode.Key().IsZero(), \"%q does not have a valid NodeKey\", client.Hostname())\n\t\tassert.Falsef(c, netmap.SelfNode.Machine().IsZero(), \"%q does not have a valid MachineKey\", client.Hostname())\n\t\tassert.Falsef(c, netmap.SelfNode.DiscoKey().IsZero(), \"%q does not have a valid DiscoKey\", client.Hostname())\n\n\t\tfor _, peer := range netmap.Peers {\n\t\t\tassert.NotEqualf(c, \"127.3.3.40:0\", peer.LegacyDERPString(), \"peer (%s) has no home DERP in %q's netmap, got: %s\", peer.ComputedName(), client.Hostname(), peer.LegacyDERPString()) //nolint:staticcheck // SA1019: testing legacy field\n\t\t\tassert.NotEqualf(c, 0, peer.HomeDERP(), \"peer (%s) has no home DERP in %q's netmap, got: %d\", peer.ComputedName(), client.Hostname(), peer.HomeDERP())\n\n\t\t\tassert.Truef(c, peer.Hostinfo().Valid(), \"peer (%s) of %q does not have Hostinfo\", peer.ComputedName(), client.Hostname())\n\n\t\t\tif hi := peer.Hostinfo(); hi.Valid() {\n\t\t\t\tassert.LessOrEqualf(c, 3, peer.Hostinfo().Services().Len(), \"peer (%s) of %q does not have enough services, got: %v\", peer.ComputedName(), client.Hostname(), peer.Hostinfo().Services())\n\n\t\t\t\t// Netinfo is not always set\n\t\t\t\t// assert.Truef(c, hi.NetInfo().Valid(), \"peer (%s) of %q does not have NetInfo\", peer.ComputedName(), client.Hostname())\n\t\t\t\tif ni := hi.NetInfo(); ni.Valid() {\n\t\t\t\t\tassert.NotEqualf(c, 0, ni.PreferredDERP(), \"peer (%s) has no home DERP in %q's netmap, got: %s\", peer.ComputedName(), client.Hostname(), peer.Hostinfo().NetInfo().PreferredDERP())\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tassert.NotEmptyf(c, peer.Endpoints(), \"peer (%s) of %q does not have any endpoints\", peer.ComputedName(), client.Hostname())\n\t\t\tassert.NotEmptyf(c, peer.AllowedIPs(), \"peer (%s) of %q does not have any allowed IPs\", peer.ComputedName(), client.Hostname())\n\t\t\tassert.NotEmptyf(c, peer.Addresses(), \"peer (%s) of %q does not have any addresses\", peer.ComputedName(), client.Hostname())\n\n\t\t\tassert.Truef(c, peer.Online().Get(), \"peer (%s) of %q is not online\", peer.ComputedName(), client.Hostname())\n\n\t\t\tassert.Falsef(c, peer.Key().IsZero(), \"peer (%s) of %q does not have a valid NodeKey\", peer.ComputedName(), client.Hostname())\n\t\t\tassert.Falsef(c, peer.Machine().IsZero(), \"peer (%s) of %q does not have a valid MachineKey\", peer.ComputedName(), client.Hostname())\n\t\t\tassert.Falsef(c, peer.DiscoKey().IsZero(), \"peer (%s) of %q does not have a valid DiscoKey\", peer.ComputedName(), client.Hostname())\n\t\t}\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for valid netmap for %q\", client.Hostname())\n}\n\n// assertValidStatus validates that a client's status has all required fields for proper operation.\n// Checks self and peer status for essential data including hostinfo, tailscale IPs, endpoints,\n// and network map presence. This test is not suitable for ACL/partial connection tests.\n//\n//nolint:unused\nfunc assertValidStatus(t *testing.T, client TailscaleClient) {\n\tt.Helper()\n\n\tstatus, err := client.Status(true)\n\tif err != nil {\n\t\tt.Fatalf(\"getting status for %q: %s\", client.Hostname(), err)\n\t}\n\n\tassert.NotEmptyf(t, status.Self.HostName, \"%q does not have HostName set, likely missing Hostinfo\", client.Hostname())\n\tassert.NotEmptyf(t, status.Self.OS, \"%q does not have OS set, likely missing Hostinfo\", client.Hostname())\n\tassert.NotEmptyf(t, status.Self.Relay, \"%q does not have a relay, likely missing Hostinfo/Netinfo\", client.Hostname())\n\n\tassert.NotEmptyf(t, status.Self.TailscaleIPs, \"%q does not have Tailscale IPs\", client.Hostname())\n\n\t// This seem to not appear until version 1.56\n\tif status.Self.AllowedIPs != nil {\n\t\tassert.NotEmptyf(t, status.Self.AllowedIPs, \"%q does not have any allowed IPs\", client.Hostname())\n\t}\n\n\tassert.NotEmptyf(t, status.Self.Addrs, \"%q does not have any endpoints\", client.Hostname())\n\n\tassert.Truef(t, status.Self.Online, \"%q is not online\", client.Hostname())\n\n\tassert.Truef(t, status.Self.InNetworkMap, \"%q is not in network map\", client.Hostname())\n\n\t// This isn't really relevant for Self as it won't be in its own socket/wireguard.\n\t// assert.Truef(t, status.Self.InMagicSock, \"%q is not tracked by magicsock\", client.Hostname())\n\t// assert.Truef(t, status.Self.InEngine, \"%q is not in wireguard engine\", client.Hostname())\n\n\tfor _, peer := range status.Peer {\n\t\tassert.NotEmptyf(t, peer.HostName, \"peer (%s) of %q does not have HostName set, likely missing Hostinfo\", peer.DNSName, client.Hostname())\n\t\tassert.NotEmptyf(t, peer.OS, \"peer (%s) of %q does not have OS set, likely missing Hostinfo\", peer.DNSName, client.Hostname())\n\t\tassert.NotEmptyf(t, peer.Relay, \"peer (%s) of %q does not have a relay, likely missing Hostinfo/Netinfo\", peer.DNSName, client.Hostname())\n\n\t\tassert.NotEmptyf(t, peer.TailscaleIPs, \"peer (%s) of %q does not have Tailscale IPs\", peer.DNSName, client.Hostname())\n\n\t\t// This seem to not appear until version 1.56\n\t\tif peer.AllowedIPs != nil {\n\t\t\tassert.NotEmptyf(t, peer.AllowedIPs, \"peer (%s) of %q does not have any allowed IPs\", peer.DNSName, client.Hostname())\n\t\t}\n\n\t\t// Addrs does not seem to appear in the status from peers.\n\t\t// assert.NotEmptyf(t, peer.Addrs, \"peer (%s) of %q does not have any endpoints\", peer.DNSName, client.Hostname())\n\n\t\tassert.Truef(t, peer.Online, \"peer (%s) of %q is not online\", peer.DNSName, client.Hostname())\n\n\t\tassert.Truef(t, peer.InNetworkMap, \"peer (%s) of %q is not in network map\", peer.DNSName, client.Hostname())\n\t\tassert.Truef(t, peer.InMagicSock, \"peer (%s) of %q is not tracked by magicsock\", peer.DNSName, client.Hostname())\n\n\t\t// TODO(kradalby): InEngine is only true when a proper tunnel is set up,\n\t\t// there might be some interesting stuff to test here in the future.\n\t\t// assert.Truef(t, peer.InEngine, \"peer (%s) of %q is not in wireguard engine\", peer.DNSName, client.Hostname())\n\t}\n}\n\n// assertValidNetcheck validates that a client has a proper DERP relay configured.\n// Ensures the client has discovered and selected a DERP server for relay functionality,\n// which is essential for NAT traversal and connectivity in restricted networks.\n//\n//nolint:unused\nfunc assertValidNetcheck(t *testing.T, client TailscaleClient) {\n\tt.Helper()\n\n\treport, err := client.Netcheck()\n\tif err != nil {\n\t\tt.Fatalf(\"getting status for %q: %s\", client.Hostname(), err)\n\t}\n\n\tassert.NotEqualf(t, 0, report.PreferredDERP, \"%q does not have a DERP relay\", client.Hostname())\n}\n\n// assertCommandOutputContains executes a command with exponential backoff retry until the output\n// contains the expected string or timeout is reached (10 seconds).\n// This implements eventual consistency patterns and should be used instead of time.Sleep\n// before executing commands that depend on network state propagation.\n//\n// Timeout: 10 seconds with exponential backoff\n// Use cases: DNS resolution, route propagation, policy updates.\nfunc assertCommandOutputContains(t *testing.T, c TailscaleClient, command []string, contains string) {\n\tt.Helper()\n\n\t_, err := backoff.Retry(t.Context(), func() (struct{}, error) {\n\t\tstdout, stderr, err := c.Execute(command)\n\t\tif err != nil {\n\t\t\treturn struct{}{}, fmt.Errorf(\"executing command, stdout: %q stderr: %q, err: %w\", stdout, stderr, err)\n\t\t}\n\n\t\tif !strings.Contains(stdout, contains) {\n\t\t\treturn struct{}{}, fmt.Errorf(\"executing command, expected string %q not found in %q\", contains, stdout) //nolint:err113\n\t\t}\n\n\t\treturn struct{}{}, nil\n\t}, backoff.WithBackOff(backoff.NewExponentialBackOff()), backoff.WithMaxElapsedTime(10*time.Second))\n\n\tassert.NoError(t, err)\n}\n\n// dockertestMaxWait returns the maximum wait time for Docker-based test operations.\n// Uses longer timeouts in CI environments to account for slower resource allocation\n// and higher system load during automated testing.\nfunc dockertestMaxWait() time.Duration {\n\twait := 300 * time.Second //nolint\n\n\tif util.IsCI() {\n\t\twait = 600 * time.Second //nolint\n\t}\n\n\treturn wait\n}\n\n// didClientUseWebsocketForDERP analyzes client logs to determine if WebSocket was used for DERP.\n// Searches for WebSocket connection indicators in client logs to validate\n// DERP relay communication method for debugging connectivity issues.\nfunc didClientUseWebsocketForDERP(t *testing.T, client TailscaleClient) bool {\n\tt.Helper()\n\n\tbuf := &bytes.Buffer{}\n\n\terr := client.WriteLogs(buf, buf)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to fetch client logs: %s: %s\", client.Hostname(), err)\n\t}\n\n\tcount, err := countMatchingLines(buf, func(line string) bool {\n\t\treturn strings.Contains(line, \"websocket: connected to \")\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to process client logs: %s: %s\", client.Hostname(), err)\n\t}\n\n\treturn count > 0\n}\n\n// countMatchingLines counts lines in a reader that match the given predicate function.\n// Uses optimized buffering for log analysis and provides flexible line-by-line\n// filtering for log parsing and pattern matching in integration tests.\nfunc countMatchingLines(in io.Reader, predicate func(string) bool) (int, error) {\n\tcount := 0\n\tscanner := bufio.NewScanner(in)\n\t{\n\t\tconst logBufferInitialSize = 1024 << 10 // preallocate 1 MiB\n\n\t\tbuff := make([]byte, logBufferInitialSize)\n\t\tscanner.Buffer(buff, len(buff))\n\t\tscanner.Split(bufio.ScanLines)\n\t}\n\n\tfor scanner.Scan() {\n\t\tif predicate(scanner.Text()) {\n\t\t\tcount += 1\n\t\t}\n\t}\n\n\treturn count, scanner.Err()\n}\n\n// wildcard returns a wildcard alias (*) for use in policy v2 configurations.\n// Provides a convenient helper for creating permissive policy rules.\nfunc wildcard() policyv2.Alias {\n\treturn policyv2.Wildcard\n}\n\n// usernamep returns a pointer to a Username as an Alias for policy v2 configurations.\n// Used in ACL rules to reference specific users in network access policies.\nfunc usernamep(name string) policyv2.Alias {\n\treturn new(policyv2.Username(name))\n}\n\n// hostp returns a pointer to a Host as an Alias for policy v2 configurations.\n// Used in ACL rules to reference specific hosts in network access policies.\nfunc hostp(name string) policyv2.Alias {\n\treturn new(policyv2.Host(name))\n}\n\n// groupp returns a pointer to a Group as an Alias for policy v2 configurations.\n// Used in ACL rules to reference user groups in network access policies.\nfunc groupp(name string) policyv2.Alias {\n\treturn new(policyv2.Group(name))\n}\n\n// tagp returns a pointer to a Tag as an Alias for policy v2 configurations.\n// Used in ACL rules to reference node tags in network access policies.\nfunc tagp(name string) policyv2.Alias {\n\treturn new(policyv2.Tag(name))\n}\n\n// prefixp returns a pointer to a Prefix from a CIDR string for policy v2 configurations.\n// Converts CIDR notation to policy prefix format for network range specifications.\nfunc prefixp(cidr string) policyv2.Alias {\n\tp := policyv2.Prefix(netip.MustParsePrefix(cidr))\n\treturn &p\n}\n\n// aliasWithPorts creates an AliasWithPorts structure from an alias and port ranges.\n// Combines network targets with specific port restrictions for fine-grained\n// access control in policy v2 configurations.\nfunc aliasWithPorts(alias policyv2.Alias, ports ...tailcfg.PortRange) policyv2.AliasWithPorts {\n\treturn policyv2.AliasWithPorts{\n\t\tAlias: alias,\n\t\tPorts: ports,\n\t}\n}\n\n// usernameOwner returns a Username as an Owner for use in TagOwners policies.\n// Specifies which users can assign and manage specific tags in ACL configurations.\nfunc usernameOwner(name string) policyv2.Owner {\n\treturn new(policyv2.Username(name))\n}\n\n// groupOwner returns a Group as an Owner for use in TagOwners policies.\n// Specifies which groups can assign and manage specific tags in ACL configurations.\n//\n//nolint:unused\nfunc groupOwner(name string) policyv2.Owner {\n\treturn new(policyv2.Group(name))\n}\n\n// usernameApprover returns a Username as an AutoApprover for subnet route policies.\n// Specifies which users can automatically approve subnet route advertisements.\nfunc usernameApprover(name string) policyv2.AutoApprover {\n\treturn new(policyv2.Username(name))\n}\n\n// groupApprover returns a Group as an AutoApprover for subnet route policies.\n// Specifies which groups can automatically approve subnet route advertisements.\nfunc groupApprover(name string) policyv2.AutoApprover {\n\treturn new(policyv2.Group(name))\n}\n\n// tagApprover returns a Tag as an AutoApprover for subnet route policies.\n// Specifies which tagged nodes can automatically approve subnet route advertisements.\nfunc tagApprover(name string) policyv2.AutoApprover {\n\treturn new(policyv2.Tag(name))\n}\n\n// oidcMockUser creates a MockUser for OIDC authentication testing.\n// Generates consistent test user data with configurable email verification status\n// for validating OIDC integration flows in headscale authentication tests.\nfunc oidcMockUser(username string, emailVerified bool) mockoidc.MockUser {\n\treturn mockoidc.MockUser{\n\t\tSubject:           username,\n\t\tPreferredUsername: username,\n\t\tEmail:             username + \"@headscale.net\",\n\t\tEmailVerified:     emailVerified,\n\t}\n}\n\n// GetUserByName retrieves a user by name from the headscale server.\n// This is a common pattern used when creating preauth keys or managing users.\nfunc GetUserByName(headscale ControlServer, username string) (*v1.User, error) {\n\tusers, err := headscale.ListUsers()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"listing users: %w\", err)\n\t}\n\n\tfor _, u := range users {\n\t\tif u.GetName() == username {\n\t\t\treturn u, nil\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"user %s not found\", username) //nolint:err113\n}\n\n// findNode returns the first node in nodes for which match returns true,\n// or nil if no node matches.\nfunc findNode(nodes []*v1.Node, match func(*v1.Node) bool) *v1.Node {\n\tfor _, n := range nodes {\n\t\tif match(n) {\n\t\t\treturn n\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// FindNewClient finds a client that is in the new list but not in the original list.\n// This is useful when dynamically adding nodes during tests and needing to identify\n// which client was just added.\nfunc FindNewClient(original, updated []TailscaleClient) (TailscaleClient, error) {\n\tfor _, client := range updated {\n\t\tisOriginal := false\n\n\t\tfor _, origClient := range original {\n\t\t\tif client.Hostname() == origClient.Hostname() {\n\t\t\t\tisOriginal = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !isOriginal {\n\t\t\treturn client, nil\n\t\t}\n\t}\n\n\treturn nil, errNoNewClientFound\n}\n\n// AddAndLoginClient adds a new tailscale client to a user and logs it in.\n// This combines the common pattern of:\n// 1. Creating a new node\n// 2. Finding the new node in the client list\n// 3. Getting the user to create a preauth key\n// 4. Logging in the new node.\nfunc (s *Scenario) AddAndLoginClient(\n\tt *testing.T,\n\tusername string,\n\tversion string,\n\theadscale ControlServer,\n\ttsOpts ...tsic.Option,\n) (TailscaleClient, error) {\n\tt.Helper()\n\n\t// Get the original client list\n\toriginalClients, err := s.ListTailscaleClients(username)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"listing original clients: %w\", err)\n\t}\n\n\t// Create the new node\n\terr = s.CreateTailscaleNodesInUser(username, version, 1, tsOpts...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating tailscale node: %w\", err)\n\t}\n\n\t// Wait for the new node to appear in the client list\n\tvar newClient TailscaleClient\n\n\t_, err = backoff.Retry(t.Context(), func() (struct{}, error) {\n\t\tupdatedClients, err := s.ListTailscaleClients(username)\n\t\tif err != nil {\n\t\t\treturn struct{}{}, fmt.Errorf(\"listing updated clients: %w\", err)\n\t\t}\n\n\t\tif len(updatedClients) != len(originalClients)+1 {\n\t\t\treturn struct{}{}, fmt.Errorf(\"expected %d clients, got %d\", len(originalClients)+1, len(updatedClients)) //nolint:err113\n\t\t}\n\n\t\tnewClient, err = FindNewClient(originalClients, updatedClients)\n\t\tif err != nil {\n\t\t\treturn struct{}{}, fmt.Errorf(\"finding new client: %w\", err)\n\t\t}\n\n\t\treturn struct{}{}, nil\n\t}, backoff.WithBackOff(backoff.NewConstantBackOff(500*time.Millisecond)), backoff.WithMaxElapsedTime(10*time.Second))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"timeout waiting for new client: %w\", err)\n\t}\n\n\t// Get the user and create preauth key\n\tuser, err := GetUserByName(headscale, username)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"getting user: %w\", err)\n\t}\n\n\tauthKey, err := s.CreatePreAuthKey(user.GetId(), true, false)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating preauth key: %w\", err)\n\t}\n\n\t// Login the new client\n\terr = newClient.Login(headscale.GetEndpoint(), authKey.GetKey())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"logging in new client: %w\", err)\n\t}\n\n\treturn newClient, nil\n}\n\n// MustAddAndLoginClient is like AddAndLoginClient but fails the test on error.\nfunc (s *Scenario) MustAddAndLoginClient(\n\tt *testing.T,\n\tusername string,\n\tversion string,\n\theadscale ControlServer,\n\ttsOpts ...tsic.Option,\n) TailscaleClient {\n\tt.Helper()\n\n\tclient, err := s.AddAndLoginClient(t, username, version, headscale, tsOpts...)\n\trequire.NoError(t, err)\n\n\treturn client\n}\n"
  },
  {
    "path": "integration/hsic/config.go",
    "content": "package hsic\n\nimport \"github.com/juanfont/headscale/hscontrol/types\"\n\nfunc MinimumConfigYAML() string {\n\treturn `\nprivate_key_path: /tmp/private.key\nnoise:\n  private_key_path: /tmp/noise_private.key\n`\n}\n\nfunc DefaultConfigEnv() map[string]string {\n\treturn map[string]string{\n\t\t\"HEADSCALE_LOG_LEVEL\":                         \"trace\",\n\t\t\"HEADSCALE_POLICY_PATH\":                       \"\",\n\t\t\"HEADSCALE_DATABASE_TYPE\":                     \"sqlite\",\n\t\t\"HEADSCALE_DATABASE_SQLITE_PATH\":              \"/tmp/integration_test_db.sqlite3\",\n\t\t\"HEADSCALE_DATABASE_DEBUG\":                    \"0\",\n\t\t\"HEADSCALE_DATABASE_GORM_SLOW_THRESHOLD\":      \"1\",\n\t\t\"HEADSCALE_EPHEMERAL_NODE_INACTIVITY_TIMEOUT\": \"30m\",\n\t\t\"HEADSCALE_PREFIXES_V4\":                       \"100.64.0.0/10\",\n\t\t\"HEADSCALE_PREFIXES_V6\":                       \"fd7a:115c:a1e0::/48\",\n\t\t\"HEADSCALE_DNS_BASE_DOMAIN\":                   \"headscale.net\",\n\t\t\"HEADSCALE_DNS_MAGIC_DNS\":                     \"true\",\n\t\t\"HEADSCALE_DNS_OVERRIDE_LOCAL_DNS\":            \"false\",\n\t\t\"HEADSCALE_DNS_NAMESERVERS_GLOBAL\":            \"127.0.0.11 1.1.1.1\",\n\t\t\"HEADSCALE_PRIVATE_KEY_PATH\":                  \"/tmp/private.key\",\n\t\t\"HEADSCALE_NOISE_PRIVATE_KEY_PATH\":            \"/tmp/noise_private.key\",\n\t\t\"HEADSCALE_METRICS_LISTEN_ADDR\":               \"0.0.0.0:9090\",\n\t\t\"HEADSCALE_DEBUG_PORT\":                        \"40000\",\n\n\t\t// Embedded DERP is the default for test isolation.\n\t\t// Tests should not depend on external DERP infrastructure.\n\t\t// Use WithPublicDERP() to opt out for tests that explicitly\n\t\t// need public DERP relays.\n\t\t\"HEADSCALE_DERP_URLS\":                    \"\",\n\t\t\"HEADSCALE_DERP_AUTO_UPDATE_ENABLED\":     \"false\",\n\t\t\"HEADSCALE_DERP_UPDATE_FREQUENCY\":        \"1m\",\n\t\t\"HEADSCALE_DERP_SERVER_ENABLED\":          \"true\",\n\t\t\"HEADSCALE_DERP_SERVER_REGION_ID\":        \"999\",\n\t\t\"HEADSCALE_DERP_SERVER_REGION_CODE\":      \"headscale\",\n\t\t\"HEADSCALE_DERP_SERVER_REGION_NAME\":      \"Headscale Embedded DERP\",\n\t\t\"HEADSCALE_DERP_SERVER_STUN_LISTEN_ADDR\": \"0.0.0.0:3478\",\n\t\t\"HEADSCALE_DERP_SERVER_PRIVATE_KEY_PATH\": \"/tmp/derp.key\",\n\t\t\"DERP_DEBUG_LOGS\":                        \"true\",\n\t\t\"DERP_PROBER_DEBUG_LOGS\":                 \"true\",\n\n\t\t// a bunch of tests (ACL/Policy) rely on predictable IP alloc,\n\t\t// so ensure the sequential alloc is used by default.\n\t\t\"HEADSCALE_PREFIXES_ALLOCATION\": string(types.IPAllocationStrategySequential),\n\t}\n}\n"
  },
  {
    "path": "integration/hsic/hsic.go",
    "content": "package hsic\n\nimport (\n\t\"archive/tar\"\n\t\"bytes\"\n\t\"cmp\"\n\t\"context\"\n\t\"crypto/tls\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"maps\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/netip\"\n\t\"os\"\n\t\"path\"\n\t\"path/filepath\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/davecgh/go-spew/spew\"\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\t\"github.com/juanfont/headscale/hscontrol\"\n\tpolicyv2 \"github.com/juanfont/headscale/hscontrol/policy/v2\"\n\t\"github.com/juanfont/headscale/hscontrol/routes\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/juanfont/headscale/integration/dockertestutil\"\n\t\"github.com/juanfont/headscale/integration/integrationutil\"\n\t\"github.com/ory/dockertest/v3\"\n\t\"github.com/ory/dockertest/v3/docker\"\n\t\"gopkg.in/yaml.v3\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/util/mak\"\n)\n\nconst (\n\thsicHashLength                = 6\n\tdockerContextPath             = \"../.\"\n\tcaCertRoot                    = \"/usr/local/share/ca-certificates\"\n\taclPolicyPath                 = \"/etc/headscale/acl.hujson\"\n\ttlsCertPath                   = \"/etc/headscale/tls.cert\"\n\ttlsKeyPath                    = \"/etc/headscale/tls.key\"\n\theadscaleDefaultPort          = 8080\n\tIntegrationTestDockerFileName = \"Dockerfile.integration\"\n\tdefaultDirPerm                = 0o755\n)\n\nvar (\n\terrHeadscaleStatusCodeNotOk    = errors.New(\"headscale status code not ok\")\n\terrInvalidHeadscaleImageFormat = errors.New(\"invalid HEADSCALE_INTEGRATION_HEADSCALE_IMAGE format, expected repository:tag\")\n\terrHeadscaleImageRequiredInCI  = errors.New(\"HEADSCALE_INTEGRATION_HEADSCALE_IMAGE must be set in CI\")\n\terrInvalidPostgresImageFormat  = errors.New(\"invalid HEADSCALE_INTEGRATION_POSTGRES_IMAGE format, expected repository:tag\")\n)\n\ntype fileInContainer struct {\n\tpath     string\n\tcontents []byte\n}\n\n// HeadscaleInContainer is an implementation of ControlServer which\n// sets up a Headscale instance inside a container.\ntype HeadscaleInContainer struct {\n\thostname string\n\n\tpool      *dockertest.Pool\n\tcontainer *dockertest.Resource\n\tnetworks  []*dockertest.Network\n\n\tpgContainer *dockertest.Resource\n\n\t// optional config\n\tport             int\n\textraPorts       []string\n\thostMetricsPort  string // Dynamically assigned host port for metrics/pprof access\n\tcaCerts          [][]byte\n\thostPortBindings map[string][]string\n\taclPolicy        *policyv2.Policy\n\tenv              map[string]string\n\ttlsCACert        []byte\n\ttlsCert          []byte\n\ttlsKey           []byte\n\tnoTLS            bool\n\tfilesInContainer []fileInContainer\n\tpostgres         bool\n\tpolicyMode       types.PolicyMode\n}\n\n// Option represent optional settings that can be given to a\n// Headscale instance.\ntype Option = func(c *HeadscaleInContainer)\n\n// WithACLPolicy adds a hscontrol.ACLPolicy policy to the\n// HeadscaleInContainer instance.\nfunc WithACLPolicy(acl *policyv2.Policy) Option {\n\treturn func(hsic *HeadscaleInContainer) {\n\t\tif acl == nil {\n\t\t\treturn\n\t\t}\n\n\t\t// TODO(kradalby): Move somewhere appropriate\n\t\thsic.env[\"HEADSCALE_POLICY_PATH\"] = aclPolicyPath\n\n\t\thsic.aclPolicy = acl\n\t}\n}\n\n// WithCACert adds it to the trusted surtificate of the container.\nfunc WithCACert(cert []byte) Option {\n\treturn func(hsic *HeadscaleInContainer) {\n\t\thsic.caCerts = append(hsic.caCerts, cert)\n\t}\n}\n\n// WithoutTLS disables the default TLS configuration.\n// Most tests should not need this. Use only for tests that\n// explicitly need to test non-TLS behavior.\nfunc WithoutTLS() Option {\n\treturn func(hsic *HeadscaleInContainer) {\n\t\thsic.noTLS = true\n\t}\n}\n\n// WithCustomTLS uses the given certificates for the Headscale instance.\n// The caCert is installed into the container's trust store and returned\n// by GetCert() so that clients can trust this server.\nfunc WithCustomTLS(caCert, cert, key []byte) Option {\n\treturn func(hsic *HeadscaleInContainer) {\n\t\thsic.tlsCACert = caCert\n\t\thsic.tlsCert = cert\n\t\thsic.tlsKey = key\n\t\thsic.caCerts = append(hsic.caCerts, caCert)\n\t}\n}\n\n// WithConfigEnv takes a map of environment variables that\n// can be used to override Headscale configuration.\nfunc WithConfigEnv(configEnv map[string]string) Option {\n\treturn func(hsic *HeadscaleInContainer) {\n\t\tmaps.Copy(hsic.env, configEnv)\n\t}\n}\n\n// WithPort sets the port on where to run Headscale.\nfunc WithPort(port int) Option {\n\treturn func(hsic *HeadscaleInContainer) {\n\t\thsic.port = port\n\t}\n}\n\n// WithExtraPorts exposes additional ports on the container (e.g. 3478/udp for STUN).\nfunc WithExtraPorts(ports []string) Option {\n\treturn func(hsic *HeadscaleInContainer) {\n\t\thsic.extraPorts = ports\n\t}\n}\n\nfunc WithHostPortBindings(bindings map[string][]string) Option {\n\treturn func(hsic *HeadscaleInContainer) {\n\t\thsic.hostPortBindings = bindings\n\t}\n}\n\n// WithTestName sets a name for the test, this will be reflected\n// in the Docker container name.\nfunc WithTestName(testName string) Option {\n\treturn func(hsic *HeadscaleInContainer) {\n\t\thash, _ := util.GenerateRandomStringDNSSafe(hsicHashLength)\n\n\t\thostname := fmt.Sprintf(\"hs-%s-%s\", testName, hash)\n\t\thsic.hostname = hostname\n\t}\n}\n\n// WithHostname sets the hostname of the Headscale instance.\nfunc WithHostname(hostname string) Option {\n\treturn func(hsic *HeadscaleInContainer) {\n\t\thsic.hostname = hostname\n\t}\n}\n\n// WithFileInContainer adds a file to the container at the given path.\nfunc WithFileInContainer(path string, contents []byte) Option {\n\treturn func(hsic *HeadscaleInContainer) {\n\t\thsic.filesInContainer = append(hsic.filesInContainer,\n\t\t\tfileInContainer{\n\t\t\t\tpath:     path,\n\t\t\t\tcontents: contents,\n\t\t\t})\n\t}\n}\n\n// WithPostgres spins up a Postgres container and\n// sets it as the main database.\nfunc WithPostgres() Option {\n\treturn func(hsic *HeadscaleInContainer) {\n\t\thsic.postgres = true\n\t}\n}\n\n// WithPolicyMode sets the policy mode for headscale.\nfunc WithPolicyMode(mode types.PolicyMode) Option {\n\treturn func(hsic *HeadscaleInContainer) {\n\t\thsic.policyMode = mode\n\t\thsic.env[\"HEADSCALE_POLICY_MODE\"] = string(mode)\n\t}\n}\n\n// WithIPAllocationStrategy sets the tests IP Allocation strategy.\nfunc WithIPAllocationStrategy(strategy types.IPAllocationStrategy) Option {\n\treturn func(hsic *HeadscaleInContainer) {\n\t\thsic.env[\"HEADSCALE_PREFIXES_ALLOCATION\"] = string(strategy)\n\t}\n}\n\n// WithPublicDERP disables the embedded DERP server and restores\n// the default public DERP relay configuration. Use this for tests\n// that explicitly need to test public DERP behavior.\nfunc WithPublicDERP() Option {\n\treturn func(hsic *HeadscaleInContainer) {\n\t\thsic.env[\"HEADSCALE_DERP_URLS\"] = \"https://controlplane.tailscale.com/derpmap/default\"\n\t\thsic.env[\"HEADSCALE_DERP_SERVER_ENABLED\"] = \"false\"\n\t\tdelete(hsic.env, \"HEADSCALE_DERP_SERVER_REGION_ID\")\n\t\tdelete(hsic.env, \"HEADSCALE_DERP_SERVER_REGION_CODE\")\n\t\tdelete(hsic.env, \"HEADSCALE_DERP_SERVER_REGION_NAME\")\n\t\tdelete(hsic.env, \"HEADSCALE_DERP_SERVER_STUN_LISTEN_ADDR\")\n\t\tdelete(hsic.env, \"HEADSCALE_DERP_SERVER_PRIVATE_KEY_PATH\")\n\t\tdelete(hsic.env, \"DERP_DEBUG_LOGS\")\n\t\tdelete(hsic.env, \"DERP_PROBER_DEBUG_LOGS\")\n\t}\n}\n\n// WithDERPConfig configures Headscale use a custom\n// DERP server only.\nfunc WithDERPConfig(derpMap tailcfg.DERPMap) Option {\n\treturn func(hsic *HeadscaleInContainer) {\n\t\tcontents, err := yaml.Marshal(derpMap)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"marshalling DERP map: %s\", err)\n\n\t\t\treturn\n\t\t}\n\n\t\thsic.env[\"HEADSCALE_DERP_PATHS\"] = \"/etc/headscale/derp.yml\"\n\t\thsic.filesInContainer = append(hsic.filesInContainer,\n\t\t\tfileInContainer{\n\t\t\t\tpath:     \"/etc/headscale/derp.yml\",\n\t\t\t\tcontents: contents,\n\t\t\t})\n\n\t\t// Disable global DERP server and embedded DERP server\n\t\thsic.env[\"HEADSCALE_DERP_URLS\"] = \"\"\n\t\thsic.env[\"HEADSCALE_DERP_SERVER_ENABLED\"] = \"false\"\n\n\t\t// Envknob for enabling DERP debug logs\n\t\thsic.env[\"DERP_DEBUG_LOGS\"] = \"true\"\n\t\thsic.env[\"DERP_PROBER_DEBUG_LOGS\"] = \"true\"\n\t}\n}\n\n// WithTuning allows changing the tuning settings easily.\nfunc WithTuning(batchTimeout time.Duration, mapSessionChanSize int) Option {\n\treturn func(hsic *HeadscaleInContainer) {\n\t\thsic.env[\"HEADSCALE_TUNING_BATCH_CHANGE_DELAY\"] = batchTimeout.String()\n\t\thsic.env[\"HEADSCALE_TUNING_NODE_MAPSESSION_BUFFERED_CHAN_SIZE\"] = strconv.Itoa(\n\t\t\tmapSessionChanSize,\n\t\t)\n\t}\n}\n\nfunc WithTimezone(timezone string) Option {\n\treturn func(hsic *HeadscaleInContainer) {\n\t\thsic.env[\"TZ\"] = timezone\n\t}\n}\n\n// buildEntrypoint builds the container entrypoint command based on configuration.\n// It constructs proper wait conditions instead of fixed sleeps:\n// 1. Wait for network to be ready\n// 2. Wait for config.yaml (always written after container start)\n// 3. Wait for CA certs if configured\n// 4. Update CA certificates\n// 5. Run headscale serve\n// 6. Sleep at end to keep container alive for log collection on shutdown.\nfunc (hsic *HeadscaleInContainer) buildEntrypoint() []string {\n\tvar commands []string\n\n\t// Wait for network to be ready\n\tcommands = append(commands, \"while ! ip route show default >/dev/null 2>&1; do sleep 0.1; done\")\n\n\t// Wait for config.yaml to be written (always written after container start)\n\tcommands = append(commands, \"while [ ! -f /etc/headscale/config.yaml ]; do sleep 0.1; done\")\n\n\t// If CA certs are configured, wait for them to be written\n\tif len(hsic.caCerts) > 0 {\n\t\tcommands = append(commands,\n\t\t\tfmt.Sprintf(\"while [ ! -f %s/user-0.crt ]; do sleep 0.1; done\", caCertRoot))\n\t}\n\n\t// Update CA certificates\n\tcommands = append(commands, \"update-ca-certificates\")\n\n\t// Run headscale serve\n\tcommands = append(commands, \"/usr/local/bin/headscale serve\")\n\n\t// Keep container alive after headscale exits for log collection\n\tcommands = append(commands, \"/bin/sleep 30\")\n\n\treturn []string{\"/bin/bash\", \"-c\", strings.Join(commands, \" ; \")}\n}\n\n// New returns a new HeadscaleInContainer instance.\n//\n//nolint:gocyclo // complex container setup with many options\nfunc New(\n\tpool *dockertest.Pool,\n\tnetworks []*dockertest.Network,\n\topts ...Option,\n) (*HeadscaleInContainer, error) {\n\thash, err := util.GenerateRandomStringDNSSafe(hsicHashLength)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Include run ID in hostname for easier identification of which test run owns this container\n\trunID := dockertestutil.GetIntegrationRunID()\n\n\tvar hostname string\n\n\tif runID != \"\" {\n\t\t// Use last 6 chars of run ID (the random hash part) for brevity\n\t\trunIDShort := runID[len(runID)-6:]\n\t\thostname = fmt.Sprintf(\"hs-%s-%s\", runIDShort, hash)\n\t} else {\n\t\thostname = \"hs-\" + hash\n\t}\n\n\thsic := &HeadscaleInContainer{\n\t\thostname: hostname,\n\t\tport:     headscaleDefaultPort,\n\n\t\tpool:     pool,\n\t\tnetworks: networks,\n\n\t\tenv:              DefaultConfigEnv(),\n\t\tfilesInContainer: []fileInContainer{},\n\t\tpolicyMode:       types.PolicyModeFile,\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(hsic)\n\t}\n\n\t// TLS is enabled by default for all integration tests.\n\t// Generate a self-signed certificate if TLS was not explicitly\n\t// disabled via WithoutTLS() and no custom cert was provided\n\t// via WithCustomTLS().\n\tif !hsic.noTLS && len(hsic.tlsCert) == 0 {\n\t\tcaCert, cert, key, err := integrationutil.CreateCertificate(hsic.hostname)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"creating default TLS certificates: %w\", err)\n\t\t}\n\n\t\thsic.tlsCACert = caCert\n\t\thsic.tlsCert = cert\n\t\thsic.tlsKey = key\n\n\t\t// Install the CA cert into the headscale container's trust\n\t\t// store so that tools like curl trust the server's own\n\t\t// certificate.\n\t\thsic.caCerts = append(hsic.caCerts, caCert)\n\t}\n\n\tlog.Println(\"NAME: \", hsic.hostname)\n\n\tportProto := fmt.Sprintf(\"%d/tcp\", hsic.port)\n\n\theadscaleBuildOptions := &dockertest.BuildOptions{\n\t\tDockerfile: IntegrationTestDockerFileName,\n\t\tContextDir: dockerContextPath,\n\t}\n\n\tif hsic.postgres {\n\t\thsic.env[\"HEADSCALE_DATABASE_TYPE\"] = \"postgres\"\n\t\thsic.env[\"HEADSCALE_DATABASE_POSTGRES_HOST\"] = \"postgres-\" + hash\n\t\thsic.env[\"HEADSCALE_DATABASE_POSTGRES_USER\"] = \"headscale\"\n\t\thsic.env[\"HEADSCALE_DATABASE_POSTGRES_PASS\"] = \"headscale\"\n\t\thsic.env[\"HEADSCALE_DATABASE_POSTGRES_NAME\"] = \"headscale\"\n\t\tdelete(hsic.env, \"HEADSCALE_DATABASE_SQLITE_PATH\")\n\n\t\t// Determine postgres image - use prebuilt if available, otherwise pull from registry\n\t\tpgRepo := \"postgres\"\n\t\tpgTag := \"latest\"\n\n\t\tif prebuiltImage := os.Getenv(\"HEADSCALE_INTEGRATION_POSTGRES_IMAGE\"); prebuiltImage != \"\" {\n\t\t\trepo, tag, found := strings.Cut(prebuiltImage, \":\")\n\t\t\tif !found {\n\t\t\t\treturn nil, errInvalidPostgresImageFormat\n\t\t\t}\n\n\t\t\tpgRepo = repo\n\t\t\tpgTag = tag\n\t\t}\n\n\t\tpgRunOptions := &dockertest.RunOptions{\n\t\t\tName:       \"postgres-\" + hash,\n\t\t\tRepository: pgRepo,\n\t\t\tTag:        pgTag,\n\t\t\tNetworks:   networks,\n\t\t\tEnv: []string{\n\t\t\t\t\"POSTGRES_USER=headscale\",\n\t\t\t\t\"POSTGRES_PASSWORD=headscale\",\n\t\t\t\t\"POSTGRES_DB=headscale\",\n\t\t\t},\n\t\t}\n\n\t\t// Add integration test labels if running under hi tool\n\t\tdockertestutil.DockerAddIntegrationLabels(pgRunOptions, \"postgres\")\n\n\t\tpg, err := pool.RunWithOptions(pgRunOptions)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"starting postgres container: %w\", err)\n\t\t}\n\n\t\thsic.pgContainer = pg\n\t}\n\n\tenv := []string{\n\t\t\"HEADSCALE_DEBUG_PROFILING_ENABLED=1\",\n\t\t\"HEADSCALE_DEBUG_PROFILING_PATH=/tmp/profile\",\n\t\t\"HEADSCALE_DEBUG_DUMP_MAPRESPONSE_PATH=/tmp/mapresponses\",\n\t\t\"HEADSCALE_DEBUG_DEADLOCK=1\",\n\t\t\"HEADSCALE_DEBUG_DEADLOCK_TIMEOUT=5s\",\n\t\t\"HEADSCALE_DEBUG_HIGH_CARDINALITY_METRICS=1\",\n\t\t\"HEADSCALE_DEBUG_DUMP_CONFIG=1\",\n\t}\n\tif hsic.hasTLS() {\n\t\thsic.env[\"HEADSCALE_TLS_CERT_PATH\"] = tlsCertPath\n\t\thsic.env[\"HEADSCALE_TLS_KEY_PATH\"] = tlsKeyPath\n\t}\n\n\t// Server URL and Listen Addr should not be overridable outside of\n\t// the configuration passed to docker.\n\thsic.env[\"HEADSCALE_SERVER_URL\"] = hsic.GetEndpoint()\n\thsic.env[\"HEADSCALE_LISTEN_ADDR\"] = fmt.Sprintf(\"0.0.0.0:%d\", hsic.port)\n\n\tfor key, value := range hsic.env {\n\t\tenv = append(env, fmt.Sprintf(\"%s=%s\", key, value))\n\t}\n\n\tlog.Printf(\"ENV: \\n%s\", spew.Sdump(hsic.env))\n\n\trunOptions := &dockertest.RunOptions{\n\t\tName:         hsic.hostname,\n\t\tExposedPorts: append([]string{portProto, \"9090/tcp\"}, hsic.extraPorts...),\n\t\tNetworks:     networks,\n\t\t// Cmd:          []string{\"headscale\", \"serve\"},\n\t\t// TODO(kradalby): Get rid of this hack, we currently need to give us some\n\t\t// to inject the headscale configuration further down.\n\t\tEntrypoint: hsic.buildEntrypoint(),\n\t\tEnv:        env,\n\t}\n\n\t// Bind metrics port to dynamic host port (kernel assigns free port)\n\tif runOptions.PortBindings == nil {\n\t\trunOptions.PortBindings = map[docker.Port][]docker.PortBinding{}\n\t}\n\n\trunOptions.PortBindings[\"9090/tcp\"] = []docker.PortBinding{\n\t\t{HostPort: \"0\"}, // Let kernel assign a free port\n\t}\n\n\tif len(hsic.hostPortBindings) > 0 {\n\t\tfor port, hostPorts := range hsic.hostPortBindings {\n\t\t\trunOptions.PortBindings[docker.Port(port)] = []docker.PortBinding{}\n\t\t\tfor _, hostPort := range hostPorts {\n\t\t\t\trunOptions.PortBindings[docker.Port(port)] = append(\n\t\t\t\t\trunOptions.PortBindings[docker.Port(port)],\n\t\t\t\t\tdocker.PortBinding{HostPort: hostPort})\n\t\t\t}\n\t\t}\n\t}\n\n\t// dockertest isn't very good at handling containers that has already\n\t// been created, this is an attempt to make sure this container isn't\n\t// present.\n\terr = pool.RemoveContainerByName(hsic.hostname)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Add integration test labels if running under hi tool\n\tdockertestutil.DockerAddIntegrationLabels(runOptions, \"headscale\")\n\n\tvar container *dockertest.Resource\n\n\t// Check if a pre-built image is available via environment variable\n\tprebuiltImage := os.Getenv(\"HEADSCALE_INTEGRATION_HEADSCALE_IMAGE\")\n\n\tif prebuiltImage != \"\" {\n\t\tlog.Printf(\"Using pre-built headscale image: %s\", prebuiltImage)\n\t\t// Parse image into repository and tag\n\t\trepo, tag, ok := strings.Cut(prebuiltImage, \":\")\n\t\tif !ok {\n\t\t\treturn nil, errInvalidHeadscaleImageFormat\n\t\t}\n\n\t\trunOptions.Repository = repo\n\t\trunOptions.Tag = tag\n\n\t\tcontainer, err = pool.RunWithOptions(\n\t\t\trunOptions,\n\t\t\tdockertestutil.DockerRestartPolicy,\n\t\t\tdockertestutil.DockerAllowLocalIPv6,\n\t\t\tdockertestutil.DockerAllowNetworkAdministration,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"running pre-built headscale container %q: %w\", prebuiltImage, err)\n\t\t}\n\t} else if util.IsCI() {\n\t\treturn nil, errHeadscaleImageRequiredInCI\n\t} else {\n\t\tcontainer, err = pool.BuildAndRunWithBuildOptions(\n\t\t\theadscaleBuildOptions,\n\t\t\trunOptions,\n\t\t\tdockertestutil.DockerRestartPolicy,\n\t\t\tdockertestutil.DockerAllowLocalIPv6,\n\t\t\tdockertestutil.DockerAllowNetworkAdministration,\n\t\t)\n\t\tif err != nil {\n\t\t\t// Try to get more detailed build output\n\t\t\tlog.Printf(\"Docker build/run failed, attempting to get detailed output...\")\n\n\t\t\tbuildOutput, buildErr := dockertestutil.RunDockerBuildForDiagnostics(dockerContextPath, IntegrationTestDockerFileName)\n\n\t\t\t// Show the last 100 lines of build output to avoid overwhelming the logs\n\t\t\tlines := strings.Split(buildOutput, \"\\n\")\n\n\t\t\tconst maxLines = 100\n\n\t\t\tstartLine := 0\n\t\t\tif len(lines) > maxLines {\n\t\t\t\tstartLine = len(lines) - maxLines\n\t\t\t}\n\n\t\t\trelevantOutput := strings.Join(lines[startLine:], \"\\n\")\n\n\t\t\tif buildErr != nil {\n\t\t\t\t// The diagnostic build also failed - this is the real error\n\t\t\t\treturn nil, fmt.Errorf(\"starting headscale container: %w\\n\\nDocker build failed. Last %d lines of output:\\n%s\", err, maxLines, relevantOutput)\n\t\t\t}\n\n\t\t\tif buildOutput != \"\" {\n\t\t\t\t// Build succeeded on retry but container creation still failed\n\t\t\t\treturn nil, fmt.Errorf(\"starting headscale container: %w\\n\\nDocker build succeeded on retry, but container creation failed. Last %d lines of build output:\\n%s\", err, maxLines, relevantOutput)\n\t\t\t}\n\n\t\t\t// No output at all - diagnostic build command may have failed\n\t\t\treturn nil, fmt.Errorf(\"starting headscale container: %w\\n\\nUnable to get diagnostic build output (command may have failed silently)\", err)\n\t\t}\n\t}\n\n\tlog.Printf(\"Created %s container\\n\", hsic.hostname)\n\n\thsic.container = container\n\n\t// Get the dynamically assigned host port for metrics/pprof\n\thsic.hostMetricsPort = container.GetHostPort(\"9090/tcp\")\n\n\tlog.Printf(\n\t\t\"Headscale %s metrics available at http://localhost:%s/metrics (debug at http://localhost:%s/debug/)\\n\",\n\t\thsic.hostname,\n\t\thsic.hostMetricsPort,\n\t\thsic.hostMetricsPort,\n\t)\n\n\t// Write the CA certificates to the container\n\tfor i, cert := range hsic.caCerts {\n\t\terr = hsic.WriteFile(fmt.Sprintf(\"%s/user-%d.crt\", caCertRoot, i), cert)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"writing TLS certificate to container: %w\", err)\n\t\t}\n\t}\n\n\terr = hsic.WriteFile(\"/etc/headscale/config.yaml\", []byte(MinimumConfigYAML()))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"writing headscale config to container: %w\", err)\n\t}\n\n\tif hsic.aclPolicy != nil {\n\t\terr = hsic.writePolicy(hsic.aclPolicy)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"writing policy: %w\", err)\n\t\t}\n\t}\n\n\tif hsic.hasTLS() {\n\t\terr = hsic.WriteFile(tlsCertPath, hsic.tlsCert)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"writing TLS certificate to container: %w\", err)\n\t\t}\n\n\t\terr = hsic.WriteFile(tlsKeyPath, hsic.tlsKey)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"writing TLS key to container: %w\", err)\n\t\t}\n\t}\n\n\tfor _, f := range hsic.filesInContainer {\n\t\terr := hsic.WriteFile(f.path, f.contents)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"writing %q: %w\", f.path, err)\n\t\t}\n\t}\n\n\t// Load the database from policy file on repeat until it succeeds,\n\t// this is done as the container sleeps before starting headscale.\n\tif hsic.aclPolicy != nil && hsic.policyMode == types.PolicyModeDB {\n\t\terr := pool.Retry(hsic.reloadDatabasePolicy)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"loading database policy on startup: %w\", err)\n\t\t}\n\t}\n\n\treturn hsic, nil\n}\n\nfunc (t *HeadscaleInContainer) ConnectToNetwork(network *dockertest.Network) error {\n\treturn t.container.ConnectToNetwork(network)\n}\n\nfunc (t *HeadscaleInContainer) hasTLS() bool {\n\treturn len(t.tlsCert) != 0 && len(t.tlsKey) != 0\n}\n\n// Shutdown stops and cleans up the Headscale container.\nfunc (t *HeadscaleInContainer) Shutdown() (string, string, error) {\n\tstdoutPath, stderrPath, err := t.SaveLog(\"/tmp/control\")\n\tif err != nil {\n\t\tlog.Printf(\n\t\t\t\"saving log from control: %s\",\n\t\t\tfmt.Errorf(\"saving log from control: %w\", err),\n\t\t)\n\t}\n\n\terr = t.SaveMetrics(fmt.Sprintf(\"/tmp/control/%s_metrics.txt\", t.hostname))\n\tif err != nil {\n\t\tlog.Printf(\n\t\t\t\"saving metrics from control: %s\",\n\t\t\terr,\n\t\t)\n\t}\n\n\t// Send a interrupt signal to the \"headscale\" process inside the container\n\t// allowing it to shut down gracefully and flush the profile to disk.\n\t// The container will live for a bit longer due to the sleep at the end.\n\terr = t.SendInterrupt()\n\tif err != nil {\n\t\tlog.Printf(\n\t\t\t\"sending graceful interrupt to control: %s\",\n\t\t\tfmt.Errorf(\"sending graceful interrupt to control: %w\", err),\n\t\t)\n\t}\n\n\terr = t.SaveProfile(\"/tmp/control\")\n\tif err != nil {\n\t\tlog.Printf(\n\t\t\t\"saving profile from control: %s\",\n\t\t\tfmt.Errorf(\"saving profile from control: %w\", err),\n\t\t)\n\t}\n\n\terr = t.SaveMapResponses(\"/tmp/control\")\n\tif err != nil {\n\t\tlog.Printf(\n\t\t\t\"saving mapresponses from control: %s\",\n\t\t\tfmt.Errorf(\"saving mapresponses from control: %w\", err),\n\t\t)\n\t}\n\n\t// We dont have a database to save if we use postgres\n\tif !t.postgres {\n\t\terr = t.SaveDatabase(\"/tmp/control\")\n\t\tif err != nil {\n\t\t\tlog.Printf(\n\t\t\t\t\"saving database from control: %s\",\n\t\t\t\tfmt.Errorf(\"saving database from control: %w\", err),\n\t\t\t)\n\t\t}\n\t}\n\n\t// Cleanup postgres container if enabled.\n\tif t.postgres {\n\t\t_ = t.pool.Purge(t.pgContainer)\n\t}\n\n\treturn stdoutPath, stderrPath, t.pool.Purge(t.container)\n}\n\n// WriteLogs writes the current stdout/stderr log of the container to\n// the given io.Writers.\nfunc (t *HeadscaleInContainer) WriteLogs(stdout, stderr io.Writer) error {\n\treturn dockertestutil.WriteLog(t.pool, t.container, stdout, stderr)\n}\n\n// ReadLog returns the current stdout and stderr logs from the headscale container.\nfunc (t *HeadscaleInContainer) ReadLog() (string, string, error) {\n\tvar stdout, stderr bytes.Buffer\n\n\terr := dockertestutil.WriteLog(t.pool, t.container, &stdout, &stderr)\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"reading container logs: %w\", err)\n\t}\n\n\treturn stdout.String(), stderr.String(), nil\n}\n\n// SaveLog saves the current stdout log of the container to a path\n// on the host system.\nfunc (t *HeadscaleInContainer) SaveLog(path string) (string, string, error) {\n\treturn dockertestutil.SaveLog(t.pool, t.container, path)\n}\n\nfunc (t *HeadscaleInContainer) SaveMetrics(savePath string) error {\n\treq, err := http.NewRequestWithContext(context.Background(), http.MethodGet, \"http://\"+net.JoinHostPort(t.hostname, \"9090\")+\"/metrics\", nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating metrics request: %w\", err)\n\t}\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting metrics: %w\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tout, err := os.Create(savePath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating file for metrics: %w\", err)\n\t}\n\tdefer out.Close()\n\n\t_, err = io.Copy(out, resp.Body)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"copy response to file: %w\", err)\n\t}\n\n\treturn nil\n}\n\n// extractTarToDirectory extracts a tar archive to a directory.\nfunc extractTarToDirectory(tarData []byte, targetDir string) error {\n\terr := os.MkdirAll(targetDir, defaultDirPerm)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating directory %s: %w\", targetDir, err)\n\t}\n\n\t// Find the top-level directory to strip\n\tvar topLevelDir string\n\n\tfirstPass := tar.NewReader(bytes.NewReader(tarData))\n\tfor {\n\t\theader, err := firstPass.Next()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"reading tar header: %w\", err)\n\t\t}\n\n\t\tif header.Typeflag == tar.TypeDir && topLevelDir == \"\" {\n\t\t\ttopLevelDir = strings.TrimSuffix(header.Name, \"/\")\n\t\t\tbreak\n\t\t}\n\t}\n\n\ttarReader := tar.NewReader(bytes.NewReader(tarData))\n\tfor {\n\t\theader, err := tarReader.Next()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"reading tar header: %w\", err)\n\t\t}\n\n\t\t// Clean the path to prevent directory traversal\n\t\tcleanName := filepath.Clean(header.Name)\n\t\tif strings.Contains(cleanName, \"..\") {\n\t\t\tcontinue // Skip potentially dangerous paths\n\t\t}\n\n\t\t// Strip the top-level directory\n\t\tif topLevelDir != \"\" && strings.HasPrefix(cleanName, topLevelDir+\"/\") {\n\t\t\tcleanName = strings.TrimPrefix(cleanName, topLevelDir+\"/\")\n\t\t} else if cleanName == topLevelDir {\n\t\t\t// Skip the top-level directory itself\n\t\t\tcontinue\n\t\t}\n\n\t\t// Skip empty paths after stripping\n\t\tif cleanName == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\ttargetPath := filepath.Join(targetDir, cleanName)\n\n\t\tswitch header.Typeflag {\n\t\tcase tar.TypeDir:\n\t\t\t// Create directory\n\t\t\t//nolint:gosec // G115: header.Mode is trusted from tar archive\n\t\t\terr := os.MkdirAll(targetPath, os.FileMode(header.Mode))\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"creating directory %s: %w\", targetPath, err)\n\t\t\t}\n\t\tcase tar.TypeReg:\n\t\t\t// Ensure parent directories exist\n\t\t\terr := os.MkdirAll(filepath.Dir(targetPath), defaultDirPerm)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"creating parent directories for %s: %w\", targetPath, err)\n\t\t\t}\n\n\t\t\t// Create file\n\t\t\toutFile, err := os.Create(targetPath)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"creating file %s: %w\", targetPath, err)\n\t\t\t}\n\n\t\t\tif _, err := io.Copy(outFile, tarReader); err != nil { //nolint:gosec,noinlineerr // trusted tar from test container\n\t\t\t\toutFile.Close()\n\t\t\t\treturn fmt.Errorf(\"copying file contents: %w\", err)\n\t\t\t}\n\n\t\t\toutFile.Close()\n\n\t\t\t// Set file permissions\n\t\t\tif err := os.Chmod(targetPath, os.FileMode(header.Mode)); err != nil { //nolint:gosec,noinlineerr // safe mode from tar header\n\t\t\t\treturn fmt.Errorf(\"setting file permissions: %w\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (t *HeadscaleInContainer) SaveProfile(savePath string) error {\n\ttarFile, err := t.FetchPath(\"/tmp/profile\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttargetDir := path.Join(savePath, \"pprof\")\n\n\treturn extractTarToDirectory(tarFile, targetDir)\n}\n\nfunc (t *HeadscaleInContainer) SaveMapResponses(savePath string) error {\n\ttarFile, err := t.FetchPath(\"/tmp/mapresponses\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttargetDir := path.Join(savePath, \"mapresponses\")\n\n\treturn extractTarToDirectory(tarFile, targetDir)\n}\n\nfunc (t *HeadscaleInContainer) SaveDatabase(savePath string) error {\n\t// If using PostgreSQL, skip database file extraction\n\tif t.postgres {\n\t\treturn nil\n\t}\n\n\t// Also check for any .sqlite files\n\tsqliteFiles, err := t.Execute([]string{\"find\", \"/tmp\", \"-name\", \"*.sqlite*\", \"-type\", \"f\"})\n\tif err != nil {\n\t\tlog.Printf(\"Warning: could not find sqlite files: %v\", err)\n\t} else {\n\t\tlog.Printf(\"SQLite files found in %s:\\n%s\", t.hostname, sqliteFiles)\n\t}\n\n\t// Check if the database file exists and has a schema\n\tdbPath := \"/tmp/integration_test_db.sqlite3\"\n\n\tfileInfo, err := t.Execute([]string{\"ls\", \"-la\", dbPath})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"database file does not exist at %s: %w\", dbPath, err)\n\t}\n\n\tlog.Printf(\"Database file info: %s\", fileInfo)\n\n\t// Check if the database has any tables (schema)\n\tschemaCheck, err := t.Execute([]string{\"sqlite3\", dbPath, \".schema\"})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"checking database schema (sqlite3 command failed): %w\", err)\n\t}\n\n\tif strings.TrimSpace(schemaCheck) == \"\" {\n\t\treturn errors.New(\"database file exists but has no schema (empty database)\") //nolint:err113\n\t}\n\n\ttarFile, err := t.FetchPath(\"/tmp/integration_test_db.sqlite3\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"fetching database file: %w\", err)\n\t}\n\n\t// For database, extract the first regular file (should be the SQLite file)\n\ttarReader := tar.NewReader(bytes.NewReader(tarFile))\n\tfor {\n\t\theader, err := tarReader.Next()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"reading tar header: %w\", err)\n\t\t}\n\n\t\tlog.Printf(\n\t\t\t\"Found file in tar: %s (type: %d, size: %d)\",\n\t\t\theader.Name,\n\t\t\theader.Typeflag,\n\t\t\theader.Size,\n\t\t)\n\n\t\t// Extract the first regular file we find\n\t\tif header.Typeflag == tar.TypeReg {\n\t\t\tdbPath := path.Join(savePath, t.hostname+\".db\")\n\n\t\t\toutFile, err := os.Create(dbPath)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"creating database file: %w\", err)\n\t\t\t}\n\n\t\t\twritten, err := io.Copy(outFile, tarReader) //nolint:gosec // trusted tar from test container\n\t\t\toutFile.Close()\n\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"copying database file: %w\", err)\n\t\t\t}\n\n\t\t\tlog.Printf(\n\t\t\t\t\"Extracted database file: %s (%d bytes written, header claimed %d bytes)\",\n\t\t\t\tdbPath,\n\t\t\t\twritten,\n\t\t\t\theader.Size,\n\t\t\t)\n\n\t\t\t// Check if we actually wrote something\n\t\t\tif written == 0 {\n\t\t\t\treturn fmt.Errorf( //nolint:err113\n\t\t\t\t\t\"database file is empty (size: %d, header size: %d)\",\n\t\t\t\t\twritten,\n\t\t\t\t\theader.Size,\n\t\t\t\t)\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn errors.New(\"no regular file found in database tar archive\") //nolint:err113\n}\n\n// Execute runs a command inside the Headscale container and returns the\n// result of stdout as a string.\nfunc (t *HeadscaleInContainer) Execute(\n\tcommand []string,\n) (string, error) {\n\tstdout, stderr, err := dockertestutil.ExecuteCommand(\n\t\tt.container,\n\t\tcommand,\n\t\t[]string{},\n\t)\n\tif err != nil {\n\t\tlog.Printf(\"command: %v\", command)\n\t\tlog.Printf(\"command stderr: %s\\n\", stderr)\n\n\t\tif stdout != \"\" {\n\t\t\tlog.Printf(\"command stdout: %s\\n\", stdout)\n\t\t}\n\n\t\treturn stdout, fmt.Errorf(\"executing command in docker: %w, stderr: %s\", err, stderr)\n\t}\n\n\treturn stdout, nil\n}\n\n// GetPort returns the docker container port as a string.\nfunc (t *HeadscaleInContainer) GetPort() string {\n\treturn strconv.Itoa(t.port)\n}\n\n// GetHostMetricsPort returns the dynamically assigned host port for metrics/pprof access.\n// This port can be used by operators to access metrics at http://localhost:{port}/metrics\n// and debug endpoints at http://localhost:{port}/debug/ while tests are running.\nfunc (t *HeadscaleInContainer) GetHostMetricsPort() string {\n\treturn t.hostMetricsPort\n}\n\n// GetHealthEndpoint returns a health endpoint for the HeadscaleInContainer\n// instance.\nfunc (t *HeadscaleInContainer) GetHealthEndpoint() string {\n\treturn t.GetEndpoint() + \"/health\"\n}\n\n// GetEndpoint returns the Headscale endpoint for the HeadscaleInContainer.\nfunc (t *HeadscaleInContainer) GetEndpoint() string {\n\treturn t.getEndpoint(false)\n}\n\n// GetIPEndpoint returns the Headscale endpoint using IP address instead of hostname.\nfunc (t *HeadscaleInContainer) GetIPEndpoint() string {\n\treturn t.getEndpoint(true)\n}\n\n// getEndpoint returns the Headscale endpoint, optionally using IP address instead of hostname.\nfunc (t *HeadscaleInContainer) getEndpoint(useIP bool) string {\n\tvar host string\n\tif useIP && len(t.networks) > 0 {\n\t\t// Use IP address from the first network\n\t\thost = t.GetIPInNetwork(t.networks[0])\n\t} else {\n\t\thost = t.GetHostname()\n\t}\n\n\thostEndpoint := fmt.Sprintf(\"%s:%d\", host, t.port)\n\n\tif t.hasTLS() {\n\t\treturn \"https://\" + hostEndpoint\n\t}\n\n\treturn \"http://\" + hostEndpoint\n}\n\n// GetCert returns the CA certificate that clients should trust to\n// verify this server's TLS certificate.\nfunc (t *HeadscaleInContainer) GetCert() []byte {\n\treturn t.tlsCACert\n}\n\n// GetHostname returns the hostname of the HeadscaleInContainer.\nfunc (t *HeadscaleInContainer) GetHostname() string {\n\treturn t.hostname\n}\n\n// GetIPInNetwork returns the IP address of the HeadscaleInContainer in the given network.\nfunc (t *HeadscaleInContainer) GetIPInNetwork(network *dockertest.Network) string {\n\treturn t.container.GetIPInNetwork(network)\n}\n\n// WaitForRunning blocks until the Headscale instance is ready to\n// serve clients.\nfunc (t *HeadscaleInContainer) WaitForRunning() error {\n\turl := t.GetHealthEndpoint()\n\n\tlog.Printf(\"waiting for headscale to be ready at %s\", url)\n\n\tclient := &http.Client{}\n\n\tif t.hasTLS() {\n\t\tinsecureTransport := http.DefaultTransport.(*http.Transport).Clone()      //nolint\n\t\tinsecureTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} //nolint\n\t\tclient = &http.Client{Transport: insecureTransport}\n\t}\n\n\treturn t.pool.Retry(func() error {\n\t\tresp, err := client.Get(url) //nolint\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"headscale is not ready: %w\", err)\n\t\t}\n\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\treturn errHeadscaleStatusCodeNotOk\n\t\t}\n\n\t\treturn nil\n\t})\n}\n\n// CreateUser adds a new user to the Headscale instance.\nfunc (t *HeadscaleInContainer) CreateUser(\n\tuser string,\n) (*v1.User, error) {\n\tcommand := []string{\n\t\t\"headscale\",\n\t\t\"users\",\n\t\t\"create\",\n\t\tuser,\n\t\tfmt.Sprintf(\"--email=%s@test.no\", user),\n\t\t\"--output\",\n\t\t\"json\",\n\t}\n\n\tresult, _, err := dockertestutil.ExecuteCommand(\n\t\tt.container,\n\t\tcommand,\n\t\t[]string{},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar u v1.User\n\n\terr = json.Unmarshal([]byte(result), &u)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshalling user: %w\", err)\n\t}\n\n\treturn &u, nil\n}\n\n// AuthKeyOptions defines options for creating an auth key.\ntype AuthKeyOptions struct {\n\t// User is the user ID that owns the auth key. If nil and Tags are specified,\n\t// the auth key is owned by the tags only (tags-as-identity model).\n\tUser *uint64\n\t// Reusable indicates if the key can be used multiple times\n\tReusable bool\n\t// Ephemeral indicates if nodes registered with this key should be ephemeral\n\tEphemeral bool\n\t// Tags are the tags to assign to the auth key\n\tTags []string\n}\n\n// CreateAuthKeyWithOptions creates a new \"authorisation key\" with the specified options.\n// This supports both user-owned and tags-only auth keys.\nfunc (t *HeadscaleInContainer) CreateAuthKeyWithOptions(opts AuthKeyOptions) (*v1.PreAuthKey, error) {\n\tcommand := []string{\n\t\t\"headscale\",\n\t}\n\n\t// Only add --user flag if User is specified\n\tif opts.User != nil {\n\t\tcommand = append(command, \"--user\", strconv.FormatUint(*opts.User, 10))\n\t}\n\n\tcommand = append(command,\n\t\t\"preauthkeys\",\n\t\t\"create\",\n\t\t\"--expiration\",\n\t\t\"24h\",\n\t\t\"--output\",\n\t\t\"json\",\n\t)\n\n\tif opts.Reusable {\n\t\tcommand = append(command, \"--reusable\")\n\t}\n\n\tif opts.Ephemeral {\n\t\tcommand = append(command, \"--ephemeral\")\n\t}\n\n\tif len(opts.Tags) > 0 {\n\t\tcommand = append(command, \"--tags\", strings.Join(opts.Tags, \",\"))\n\t}\n\n\tresult, _, err := dockertestutil.ExecuteCommand(\n\t\tt.container,\n\t\tcommand,\n\t\t[]string{},\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"executing create auth key command: %w\", err)\n\t}\n\n\tvar preAuthKey v1.PreAuthKey\n\n\terr = json.Unmarshal([]byte(result), &preAuthKey)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshalling auth key: %w\", err)\n\t}\n\n\treturn &preAuthKey, nil\n}\n\n// CreateAuthKey creates a new \"authorisation key\" for a User that can be used\n// to authorise a TailscaleClient with the Headscale instance.\nfunc (t *HeadscaleInContainer) CreateAuthKey(\n\tuser uint64,\n\treusable bool,\n\tephemeral bool,\n) (*v1.PreAuthKey, error) {\n\treturn t.CreateAuthKeyWithOptions(AuthKeyOptions{\n\t\tUser:      &user,\n\t\tReusable:  reusable,\n\t\tEphemeral: ephemeral,\n\t})\n}\n\n// CreateAuthKeyWithTags creates a new \"authorisation key\" for a User with the specified tags.\n// This is used to create tagged PreAuthKeys for testing the tags-as-identity model.\nfunc (t *HeadscaleInContainer) CreateAuthKeyWithTags(\n\tuser uint64,\n\treusable bool,\n\tephemeral bool,\n\ttags []string,\n) (*v1.PreAuthKey, error) {\n\treturn t.CreateAuthKeyWithOptions(AuthKeyOptions{\n\t\tUser:      &user,\n\t\tReusable:  reusable,\n\t\tEphemeral: ephemeral,\n\t\tTags:      tags,\n\t})\n}\n\n// DeleteAuthKey deletes an \"authorisation key\" by ID.\nfunc (t *HeadscaleInContainer) DeleteAuthKey(\n\tid uint64,\n) error {\n\tcommand := []string{\n\t\t\"headscale\",\n\t\t\"preauthkeys\",\n\t\t\"delete\",\n\t\t\"--id\",\n\t\tstrconv.FormatUint(id, 10),\n\t\t\"--output\",\n\t\t\"json\",\n\t}\n\n\t_, _, err := dockertestutil.ExecuteCommand(\n\t\tt.container,\n\t\tcommand,\n\t\t[]string{},\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"executing delete auth key command: %w\", err)\n\t}\n\n\treturn nil\n}\n\n// ListNodes lists the currently registered Nodes in headscale.\n// Optionally a list of usernames can be passed to get users for\n// specific users.\nfunc (t *HeadscaleInContainer) ListNodes(\n\tusers ...string,\n) ([]*v1.Node, error) {\n\tvar ret []*v1.Node\n\n\texecUnmarshal := func(command []string) error {\n\t\tresult, _, err := dockertestutil.ExecuteCommand(\n\t\t\tt.container,\n\t\t\tcommand,\n\t\t\t[]string{},\n\t\t)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"executing list node command: %w\", err)\n\t\t}\n\n\t\tvar nodes []*v1.Node\n\n\t\terr = json.Unmarshal([]byte(result), &nodes)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling nodes: %w\", err)\n\t\t}\n\n\t\tret = append(ret, nodes...)\n\n\t\treturn nil\n\t}\n\n\tif len(users) == 0 {\n\t\terr := execUnmarshal([]string{\"headscale\", \"nodes\", \"list\", \"--output\", \"json\"})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tfor _, user := range users {\n\t\t\tcommand := []string{\"headscale\", \"--user\", user, \"nodes\", \"list\", \"--output\", \"json\"}\n\n\t\t\terr := execUnmarshal(command)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\tsort.Slice(ret, func(i, j int) bool {\n\t\treturn cmp.Compare(ret[i].GetId(), ret[j].GetId()) == -1\n\t})\n\n\treturn ret, nil\n}\n\nfunc (t *HeadscaleInContainer) DeleteNode(nodeID uint64) error {\n\tcommand := []string{\n\t\t\"headscale\",\n\t\t\"nodes\",\n\t\t\"delete\",\n\t\t\"--identifier\",\n\t\tstrconv.FormatUint(nodeID, 10),\n\t\t\"--output\",\n\t\t\"json\",\n\t\t\"--force\",\n\t}\n\n\t_, _, err := dockertestutil.ExecuteCommand(\n\t\tt.container,\n\t\tcommand,\n\t\t[]string{},\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"executing delete node command: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (t *HeadscaleInContainer) NodesByUser() (map[string][]*v1.Node, error) {\n\tnodes, err := t.ListNodes()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar userMap map[string][]*v1.Node\n\tfor _, node := range nodes {\n\t\tif _, ok := userMap[node.GetUser().GetName()]; !ok {\n\t\t\tmak.Set(&userMap, node.GetUser().GetName(), []*v1.Node{node})\n\t\t} else {\n\t\t\tuserMap[node.GetUser().GetName()] = append(userMap[node.GetUser().GetName()], node)\n\t\t}\n\t}\n\n\treturn userMap, nil\n}\n\nfunc (t *HeadscaleInContainer) NodesByName() (map[string]*v1.Node, error) {\n\tnodes, err := t.ListNodes()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar nameMap map[string]*v1.Node\n\tfor _, node := range nodes {\n\t\tmak.Set(&nameMap, node.GetName(), node)\n\t}\n\n\treturn nameMap, nil\n}\n\n// ListUsers returns a list of users from Headscale.\nfunc (t *HeadscaleInContainer) ListUsers() ([]*v1.User, error) {\n\tcommand := []string{\"headscale\", \"users\", \"list\", \"--output\", \"json\"}\n\n\tresult, _, err := dockertestutil.ExecuteCommand(\n\t\tt.container,\n\t\tcommand,\n\t\t[]string{},\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"executing list node command: %w\", err)\n\t}\n\n\tvar users []*v1.User\n\n\terr = json.Unmarshal([]byte(result), &users)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshalling nodes: %w\", err)\n\t}\n\n\treturn users, nil\n}\n\n// MapUsers returns a map of users from Headscale. It is keyed by the\n// user name.\nfunc (t *HeadscaleInContainer) MapUsers() (map[string]*v1.User, error) {\n\tusers, err := t.ListUsers()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar userMap map[string]*v1.User\n\tfor _, user := range users {\n\t\tmak.Set(&userMap, user.GetName(), user)\n\t}\n\n\treturn userMap, nil\n}\n\n// DeleteUser deletes a user from the Headscale instance.\nfunc (t *HeadscaleInContainer) DeleteUser(userID uint64) error {\n\tcommand := []string{\n\t\t\"headscale\",\n\t\t\"users\",\n\t\t\"delete\",\n\t\t\"--identifier\",\n\t\tstrconv.FormatUint(userID, 10),\n\t\t\"--force\",\n\t\t\"--output\",\n\t\t\"json\",\n\t}\n\n\t_, _, err := dockertestutil.ExecuteCommand(\n\t\tt.container,\n\t\tcommand,\n\t\t[]string{},\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"executing delete user command: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (h *HeadscaleInContainer) SetPolicy(pol *policyv2.Policy) error {\n\terr := h.writePolicy(pol)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"writing policy file: %w\", err)\n\t}\n\n\tswitch h.policyMode {\n\tcase types.PolicyModeDB:\n\t\terr := h.reloadDatabasePolicy()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"reloading database policy: %w\", err)\n\t\t}\n\tcase types.PolicyModeFile:\n\t\terr := h.Reload()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"reloading policy file: %w\", err)\n\t\t}\n\tdefault:\n\t\tpanic(\"policy mode is not valid: \" + h.policyMode)\n\t}\n\n\treturn nil\n}\n\nfunc (h *HeadscaleInContainer) reloadDatabasePolicy() error {\n\t_, err := h.Execute(\n\t\t[]string{\n\t\t\t\"headscale\",\n\t\t\t\"policy\",\n\t\t\t\"set\",\n\t\t\t\"-f\",\n\t\t\taclPolicyPath,\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"setting policy with db command: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (h *HeadscaleInContainer) writePolicy(pol *policyv2.Policy) error {\n\tpBytes, err := json.Marshal(pol)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"marshalling policy: %w\", err)\n\t}\n\n\terr = h.WriteFile(aclPolicyPath, pBytes)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"writing policy to headscale container: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (h *HeadscaleInContainer) PID() (int, error) {\n\t// Use pidof to find the headscale process, which is more reliable than grep\n\t// as it only looks for the actual binary name, not processes that contain\n\t// \"headscale\" in their command line (like the dlv debugger).\n\toutput, err := h.Execute([]string{\"pidof\", \"headscale\"})\n\tif err != nil {\n\t\t// pidof returns exit code 1 when no process is found\n\t\treturn 0, os.ErrNotExist\n\t}\n\n\t// pidof returns space-separated PIDs on a single line\n\tpidStrs := strings.Fields(strings.TrimSpace(output))\n\tif len(pidStrs) == 0 {\n\t\treturn 0, os.ErrNotExist\n\t}\n\n\tpids := make([]int, 0, len(pidStrs))\n\tfor _, pidStr := range pidStrs {\n\t\tpidInt, err := strconv.Atoi(pidStr)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"parsing PID %q: %w\", pidStr, err)\n\t\t}\n\t\t// We dont care about the root pid for the container\n\t\tif pidInt == 1 {\n\t\t\tcontinue\n\t\t}\n\n\t\tpids = append(pids, pidInt)\n\t}\n\n\tswitch len(pids) {\n\tcase 0:\n\t\treturn 0, os.ErrNotExist\n\tcase 1:\n\t\treturn pids[0], nil\n\tdefault:\n\t\t// If we still have multiple PIDs, return the first one as a fallback\n\t\t// This can happen in edge cases during startup/shutdown\n\t\treturn pids[0], nil\n\t}\n}\n\n// Reload sends a SIGHUP to the headscale process to reload internals,\n// for example Policy from file.\nfunc (h *HeadscaleInContainer) Reload() error {\n\tpid, err := h.PID()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting headscale PID: %w\", err)\n\t}\n\n\t_, err = h.Execute([]string{\"kill\", \"-HUP\", strconv.Itoa(pid)})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"reloading headscale with HUP: %w\", err)\n\t}\n\n\treturn nil\n}\n\n// ApproveRoutes approves routes for a node.\nfunc (t *HeadscaleInContainer) ApproveRoutes(id uint64, routes []netip.Prefix) (*v1.Node, error) {\n\tcommand := []string{\n\t\t\"headscale\", \"nodes\", \"approve-routes\",\n\t\t\"--output\", \"json\",\n\t\t\"--identifier\", strconv.FormatUint(id, 10),\n\t\t\"--routes=\" + strings.Join(util.PrefixesToString(routes), \",\"),\n\t}\n\n\tresult, _, err := dockertestutil.ExecuteCommand(\n\t\tt.container,\n\t\tcommand,\n\t\t[]string{},\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"executing approve routes command (node %d, routes %v): %w\",\n\t\t\tid,\n\t\t\troutes,\n\t\t\terr,\n\t\t)\n\t}\n\n\tvar node *v1.Node\n\n\terr = json.Unmarshal([]byte(result), &node)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshalling node response: %q, error: %w\", result, err)\n\t}\n\n\treturn node, nil\n}\n\n// SetNodeTags sets tags on a node via the headscale CLI.\n// This simulates what the Tailscale admin console UI does - it calls the headscale\n// SetTags API which is exposed via the CLI command: headscale nodes tag -i <id> -t <tags>.\nfunc (t *HeadscaleInContainer) SetNodeTags(nodeID uint64, tags []string) error {\n\tcommand := []string{\n\t\t\"headscale\", \"nodes\", \"tag\",\n\t\t\"--identifier\", strconv.FormatUint(nodeID, 10),\n\t\t\"--output\", \"json\",\n\t}\n\n\t// Add tags - the CLI expects -t flag for each tag or comma-separated\n\tif len(tags) > 0 {\n\t\tcommand = append(command, \"--tags\", strings.Join(tags, \",\"))\n\t} else {\n\t\t// Empty tags to clear all tags\n\t\tcommand = append(command, \"--tags\", \"\")\n\t}\n\n\t_, _, err := dockertestutil.ExecuteCommand(\n\t\tt.container,\n\t\tcommand,\n\t\t[]string{},\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"executing set tags command (node %d, tags %v): %w\", nodeID, tags, err)\n\t}\n\n\treturn nil\n}\n\n// WriteFile save file inside the Headscale container.\nfunc (t *HeadscaleInContainer) WriteFile(path string, data []byte) error {\n\treturn integrationutil.WriteFileToContainer(t.pool, t.container, path, data)\n}\n\n// FetchPath gets a path from inside the Headscale container and returns a tar\n// file as byte array.\nfunc (t *HeadscaleInContainer) FetchPath(path string) ([]byte, error) {\n\treturn integrationutil.FetchPathFromContainer(t.pool, t.container, path)\n}\n\nfunc (t *HeadscaleInContainer) SendInterrupt() error {\n\tpid, err := t.Execute([]string{\"pidof\", \"headscale\"})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = t.Execute([]string{\"kill\", \"-2\", strings.Trim(pid, \"'\\n\")})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (t *HeadscaleInContainer) GetAllMapReponses() (map[types.NodeID][]tailcfg.MapResponse, error) {\n\t// Execute curl inside the container to access the debug endpoint locally\n\tcommand := []string{\n\t\t\"curl\", \"-s\", \"-H\", \"Accept: application/json\", \"http://localhost:9090/debug/mapresponses\",\n\t}\n\n\tresult, err := t.Execute(command)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"fetching mapresponses from debug endpoint: %w\", err)\n\t}\n\n\tvar res map[types.NodeID][]tailcfg.MapResponse\n\tif err := json.Unmarshal([]byte(result), &res); err != nil { //nolint:noinlineerr\n\t\treturn nil, fmt.Errorf(\"decoding routes response: %w\", err)\n\t}\n\n\treturn res, nil\n}\n\n// PrimaryRoutes fetches the primary routes from the debug endpoint.\nfunc (t *HeadscaleInContainer) PrimaryRoutes() (*routes.DebugRoutes, error) {\n\t// Execute curl inside the container to access the debug endpoint locally\n\tcommand := []string{\n\t\t\"curl\", \"-s\", \"-H\", \"Accept: application/json\", \"http://localhost:9090/debug/routes\",\n\t}\n\n\tresult, err := t.Execute(command)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"fetching routes from debug endpoint: %w\", err)\n\t}\n\n\tvar debugRoutes routes.DebugRoutes\n\tif err := json.Unmarshal([]byte(result), &debugRoutes); err != nil { //nolint:noinlineerr\n\t\treturn nil, fmt.Errorf(\"decoding routes response: %w\", err)\n\t}\n\n\treturn &debugRoutes, nil\n}\n\n// DebugBatcher fetches the batcher debug information from the debug endpoint.\nfunc (t *HeadscaleInContainer) DebugBatcher() (*hscontrol.DebugBatcherInfo, error) {\n\t// Execute curl inside the container to access the debug endpoint locally\n\tcommand := []string{\n\t\t\"curl\", \"-s\", \"-H\", \"Accept: application/json\", \"http://localhost:9090/debug/batcher\",\n\t}\n\n\tresult, err := t.Execute(command)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"fetching batcher debug info: %w\", err)\n\t}\n\n\tvar debugInfo hscontrol.DebugBatcherInfo\n\tif err := json.Unmarshal([]byte(result), &debugInfo); err != nil { //nolint:noinlineerr\n\t\treturn nil, fmt.Errorf(\"decoding batcher debug response: %w\", err)\n\t}\n\n\treturn &debugInfo, nil\n}\n\n// DebugNodeStore fetches the NodeStore data from the debug endpoint.\nfunc (t *HeadscaleInContainer) DebugNodeStore() (map[types.NodeID]types.Node, error) {\n\t// Execute curl inside the container to access the debug endpoint locally\n\tcommand := []string{\n\t\t\"curl\", \"-s\", \"-H\", \"Accept: application/json\", \"http://localhost:9090/debug/nodestore\",\n\t}\n\n\tresult, err := t.Execute(command)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"fetching nodestore debug info: %w\", err)\n\t}\n\n\tvar nodeStore map[types.NodeID]types.Node\n\tif err := json.Unmarshal([]byte(result), &nodeStore); err != nil { //nolint:noinlineerr\n\t\treturn nil, fmt.Errorf(\"decoding nodestore debug response: %w\", err)\n\t}\n\n\treturn nodeStore, nil\n}\n\n// DebugFilter fetches the current filter rules from the debug endpoint.\nfunc (t *HeadscaleInContainer) DebugFilter() ([]tailcfg.FilterRule, error) {\n\t// Execute curl inside the container to access the debug endpoint locally\n\tcommand := []string{\n\t\t\"curl\", \"-s\", \"-H\", \"Accept: application/json\", \"http://localhost:9090/debug/filter\",\n\t}\n\n\tresult, err := t.Execute(command)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"fetching filter from debug endpoint: %w\", err)\n\t}\n\n\tvar filterRules []tailcfg.FilterRule\n\tif err := json.Unmarshal([]byte(result), &filterRules); err != nil { //nolint:noinlineerr\n\t\treturn nil, fmt.Errorf(\"decoding filter response: %w\", err)\n\t}\n\n\treturn filterRules, nil\n}\n\n// DebugPolicy fetches the current policy from the debug endpoint.\nfunc (t *HeadscaleInContainer) DebugPolicy() (string, error) {\n\t// Execute curl inside the container to access the debug endpoint locally\n\tcommand := []string{\n\t\t\"curl\", \"-s\", \"http://localhost:9090/debug/policy\",\n\t}\n\n\tresult, err := t.Execute(command)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"fetching policy from debug endpoint: %w\", err)\n\t}\n\n\treturn result, nil\n}\n"
  },
  {
    "path": "integration/integrationutil/util.go",
    "content": "package integrationutil\n\nimport (\n\t\"archive/tar\"\n\t\"bytes\"\n\t\"crypto/rand\"\n\t\"crypto/rsa\"\n\t\"crypto/x509\"\n\t\"crypto/x509/pkix\"\n\t\"encoding/pem\"\n\t\"fmt\"\n\t\"io\"\n\t\"math/big\"\n\t\"path/filepath\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/juanfont/headscale/integration/dockertestutil\"\n\t\"github.com/ory/dockertest/v3\"\n\t\"github.com/ory/dockertest/v3/docker\"\n\t\"tailscale.com/tailcfg\"\n)\n\n// PeerSyncTimeout returns the timeout for peer synchronization based on environment:\n// 60s for dev, 120s for CI.\nfunc PeerSyncTimeout() time.Duration {\n\tif util.IsCI() {\n\t\treturn 120 * time.Second\n\t}\n\n\treturn 60 * time.Second\n}\n\n// PeerSyncRetryInterval returns the retry interval for peer synchronization checks.\nfunc PeerSyncRetryInterval() time.Duration {\n\treturn 100 * time.Millisecond\n}\n\nfunc WriteFileToContainer(\n\tpool *dockertest.Pool,\n\tcontainer *dockertest.Resource,\n\tpath string,\n\tdata []byte,\n) error {\n\tdirPath, fileName := filepath.Split(path)\n\n\tfile := bytes.NewReader(data)\n\n\tbuf := bytes.NewBuffer([]byte{})\n\n\ttarWriter := tar.NewWriter(buf)\n\n\theader := &tar.Header{\n\t\tName: fileName,\n\t\tSize: file.Size(),\n\t\t// Mode:    int64(stat.Mode()),\n\t\t// ModTime: stat.ModTime(),\n\t}\n\n\terr := tarWriter.WriteHeader(header)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"writing file header to tar: %w\", err)\n\t}\n\n\t_, err = io.Copy(tarWriter, file)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"copying file to tar: %w\", err)\n\t}\n\n\terr = tarWriter.Close()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"closing tar: %w\", err)\n\t}\n\n\t// Ensure the directory is present inside the container\n\t_, _, err = dockertestutil.ExecuteCommand(\n\t\tcontainer,\n\t\t[]string{\"mkdir\", \"-p\", dirPath},\n\t\t[]string{},\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"ensuring directory: %w\", err)\n\t}\n\n\terr = pool.Client.UploadToContainer(\n\t\tcontainer.Container.ID,\n\t\tdocker.UploadToContainerOptions{\n\t\t\tNoOverwriteDirNonDir: false,\n\t\t\tPath:                 dirPath,\n\t\t\tInputStream:          bytes.NewReader(buf.Bytes()),\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc FetchPathFromContainer(\n\tpool *dockertest.Pool,\n\tcontainer *dockertest.Resource,\n\tpath string,\n) ([]byte, error) {\n\tbuf := bytes.NewBuffer([]byte{})\n\n\terr := pool.Client.DownloadFromContainer(\n\t\tcontainer.Container.ID,\n\t\tdocker.DownloadFromContainerOptions{\n\t\t\tOutputStream: buf,\n\t\t\tPath:         path,\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}\n\n// nolint\n// CreateCertificate generates a CA certificate and a server certificate\n// signed by that CA for the given hostname. It returns the CA certificate\n// PEM (for trust stores), server certificate PEM, and server private key\n// PEM.\nfunc CreateCertificate(hostname string) (caCertPEM, certPEM, keyPEM []byte, err error) {\n\t// From:\n\t// https://shaneutt.com/blog/golang-ca-and-signed-cert-go/\n\n\tca := &x509.Certificate{\n\t\tSerialNumber: big.NewInt(2019),\n\t\tSubject: pkix.Name{\n\t\t\tOrganization: []string{\"Headscale testing INC\"},\n\t\t\tCountry:      []string{\"NL\"},\n\t\t\tLocality:     []string{\"Leiden\"},\n\t\t},\n\t\tNotBefore: time.Now(),\n\t\tNotAfter:  time.Now().Add(60 * time.Hour),\n\t\tIsCA:      true,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{\n\t\t\tx509.ExtKeyUsageClientAuth,\n\t\t\tx509.ExtKeyUsageServerAuth,\n\t\t},\n\t\tKeyUsage:              x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,\n\t\tBasicConstraintsValid: true,\n\t}\n\n\tcaPrivKey, err := rsa.GenerateKey(rand.Reader, 4096)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\tcaBytes, err := x509.CreateCertificate(\n\t\trand.Reader,\n\t\tca,\n\t\tca,\n\t\t&caPrivKey.PublicKey,\n\t\tcaPrivKey,\n\t)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\tcaPEM := new(bytes.Buffer)\n\terr = pem.Encode(caPEM, &pem.Block{\n\t\tType:  \"CERTIFICATE\",\n\t\tBytes: caBytes,\n\t})\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\tcert := &x509.Certificate{\n\t\tSerialNumber: big.NewInt(1658),\n\t\tSubject: pkix.Name{\n\t\t\tCommonName:   hostname,\n\t\t\tOrganization: []string{\"Headscale testing INC\"},\n\t\t\tCountry:      []string{\"NL\"},\n\t\t\tLocality:     []string{\"Leiden\"},\n\t\t},\n\t\tNotBefore:    time.Now(),\n\t\tNotAfter:     time.Now().Add(60 * time.Minute),\n\t\tSubjectKeyId: []byte{1, 2, 3, 4, 6},\n\t\tExtKeyUsage:  []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},\n\t\tKeyUsage:     x509.KeyUsageDigitalSignature,\n\t\tDNSNames:     []string{hostname},\n\t}\n\n\tcertPrivKey, err := rsa.GenerateKey(rand.Reader, 4096)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\tcertBytes, err := x509.CreateCertificate(\n\t\trand.Reader,\n\t\tcert,\n\t\tca,\n\t\t&certPrivKey.PublicKey,\n\t\tcaPrivKey,\n\t)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\tserverCertPEM := new(bytes.Buffer)\n\terr = pem.Encode(serverCertPEM, &pem.Block{\n\t\tType:  \"CERTIFICATE\",\n\t\tBytes: certBytes,\n\t})\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\tcertPrivKeyPEM := new(bytes.Buffer)\n\terr = pem.Encode(certPrivKeyPEM, &pem.Block{\n\t\tType:  \"RSA PRIVATE KEY\",\n\t\tBytes: x509.MarshalPKCS1PrivateKey(certPrivKey),\n\t})\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\treturn caPEM.Bytes(), serverCertPEM.Bytes(), certPrivKeyPEM.Bytes(), nil\n}\n\nfunc BuildExpectedOnlineMap(all map[types.NodeID][]tailcfg.MapResponse) map[types.NodeID]map[types.NodeID]bool {\n\tres := make(map[types.NodeID]map[types.NodeID]bool)\n\tfor nid, mrs := range all {\n\t\tres[nid] = make(map[types.NodeID]bool)\n\n\t\tfor _, mr := range mrs {\n\t\t\tfor _, peer := range mr.Peers {\n\t\t\t\tif peer.Online != nil {\n\t\t\t\t\tres[nid][types.NodeID(peer.ID)] = *peer.Online //nolint:gosec // safe conversion for peer ID\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor _, peer := range mr.PeersChanged {\n\t\t\t\tif peer.Online != nil {\n\t\t\t\t\tres[nid][types.NodeID(peer.ID)] = *peer.Online //nolint:gosec // safe conversion for peer ID\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor _, peer := range mr.PeersChangedPatch {\n\t\t\t\tif peer.Online != nil {\n\t\t\t\t\tres[nid][types.NodeID(peer.NodeID)] = *peer.Online //nolint:gosec // safe conversion for peer ID\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn res\n}\n"
  },
  {
    "path": "integration/route_test.go",
    "content": "package integration\n\nimport (\n\t\"cmp\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"maps\"\n\t\"net/netip\"\n\t\"slices\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\tcmpdiff \"github.com/google/go-cmp/cmp\"\n\t\"github.com/google/go-cmp/cmp/cmpopts\"\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\tpolicyv2 \"github.com/juanfont/headscale/hscontrol/policy/v2\"\n\t\"github.com/juanfont/headscale/hscontrol/routes\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/juanfont/headscale/integration/hsic\"\n\t\"github.com/juanfont/headscale/integration/integrationutil\"\n\t\"github.com/juanfont/headscale/integration/tsic\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\txmaps \"golang.org/x/exp/maps\"\n\t\"tailscale.com/ipn/ipnstate\"\n\t\"tailscale.com/net/tsaddr\"\n\t\"tailscale.com/tailcfg\"\n\t\"tailscale.com/types/ipproto\"\n\t\"tailscale.com/types/views\"\n\t\"tailscale.com/util/must\"\n\t\"tailscale.com/util/slicesx\"\n\t\"tailscale.com/wgengine/filter\"\n)\n\nvar allPorts = filter.PortRange{First: 0, Last: 0xffff}\n\n// This test is both testing the routes command and the propagation of\n// routes.\nfunc TestEnablingRoutes(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 3,\n\t\tUsers:        []string{\"user1\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoErrorf(t, err, \"failed to create scenario: %s\", err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{tsic.WithAcceptRoutes()},\n\t\thsic.WithTestName(\"rt-enable\"))\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\texpectedRoutes := map[string]string{\n\t\t\"1\": \"10.0.0.0/24\",\n\t\t\"2\": \"10.0.1.0/24\",\n\t\t\"3\": \"10.0.2.0/24\",\n\t}\n\n\t// advertise routes using the up command\n\tfor _, client := range allClients {\n\t\tstatus := client.MustStatus()\n\t\tcommand := []string{\n\t\t\t\"tailscale\",\n\t\t\t\"set\",\n\t\t\t\"--advertise-routes=\" + expectedRoutes[string(status.Self.ID)],\n\t\t}\n\t\t_, _, err = client.Execute(command)\n\t\trequire.NoErrorf(t, err, \"failed to advertise route: %s\", err)\n\t}\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\tvar nodes []*v1.Node\n\t// Wait for route advertisements to propagate to NodeStore\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tvar err error\n\n\t\tnodes, err = headscale.ListNodes()\n\t\tassert.NoError(ct, err)\n\n\t\tfor _, node := range nodes {\n\t\t\tassert.Len(ct, node.GetAvailableRoutes(), 1)\n\t\t\tassert.Empty(ct, node.GetApprovedRoutes())\n\t\t\tassert.Empty(ct, node.GetSubnetRoutes())\n\t\t}\n\t}, 10*time.Second, 100*time.Millisecond, \"route advertisements should propagate to all nodes\")\n\n\t// Verify that no routes has been sent to the client,\n\t// they are not yet enabled.\n\tfor _, client := range allClients {\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\tstatus, err := client.Status()\n\t\t\tassert.NoError(c, err)\n\n\t\t\tfor _, peerKey := range status.Peers() {\n\t\t\t\tpeerStatus := status.Peer[peerKey]\n\n\t\t\t\tassert.Nil(c, peerStatus.PrimaryRoutes)\n\t\t\t}\n\t\t}, 5*time.Second, 200*time.Millisecond, \"Verifying no routes are active before approval\")\n\t}\n\n\tfor _, node := range nodes {\n\t\t_, err := headscale.ApproveRoutes(\n\t\t\tnode.GetId(),\n\t\t\tutil.MustStringsToPrefixes(node.GetAvailableRoutes()),\n\t\t)\n\t\trequire.NoError(t, err)\n\t}\n\n\t// Wait for route approvals to propagate to NodeStore\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tvar err error\n\n\t\tnodes, err = headscale.ListNodes()\n\t\tassert.NoError(ct, err)\n\n\t\tfor _, node := range nodes {\n\t\t\tassert.Len(ct, node.GetAvailableRoutes(), 1)\n\t\t\tassert.Len(ct, node.GetApprovedRoutes(), 1)\n\t\t\tassert.Len(ct, node.GetSubnetRoutes(), 1)\n\t\t}\n\t}, 10*time.Second, 100*time.Millisecond, \"route approvals should propagate to all nodes\")\n\n\t// Wait for route state changes to propagate to clients\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t// Verify that the clients can see the new routes\n\t\tfor _, client := range allClients {\n\t\t\tstatus, err := client.Status()\n\t\t\tassert.NoError(c, err)\n\n\t\t\tfor _, peerKey := range status.Peers() {\n\t\t\t\tpeerStatus := status.Peer[peerKey]\n\n\t\t\t\tassert.NotNil(c, peerStatus.PrimaryRoutes)\n\t\t\t\tassert.NotNil(c, peerStatus.AllowedIPs)\n\n\t\t\t\tif peerStatus.AllowedIPs != nil {\n\t\t\t\t\tassert.Len(c, peerStatus.AllowedIPs.AsSlice(), 3)\n\t\t\t\t}\n\n\t\t\t\trequirePeerSubnetRoutesWithCollect(c, peerStatus, []netip.Prefix{netip.MustParsePrefix(expectedRoutes[string(peerStatus.ID)])})\n\t\t\t}\n\t\t}\n\t}, 10*time.Second, 500*time.Millisecond, \"clients should see new routes\")\n\n\t_, err = headscale.ApproveRoutes(\n\t\t1,\n\t\t[]netip.Prefix{netip.MustParsePrefix(\"10.0.1.0/24\")},\n\t)\n\trequire.NoError(t, err)\n\n\t_, err = headscale.ApproveRoutes(\n\t\t2,\n\t\t[]netip.Prefix{},\n\t)\n\trequire.NoError(t, err)\n\n\t// Wait for route state changes to propagate to nodes\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tvar err error\n\n\t\tnodes, err = headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\n\t\tfor _, node := range nodes {\n\t\t\tif node.GetId() == 1 {\n\t\t\t\tassert.Len(c, node.GetAvailableRoutes(), 1) // 10.0.0.0/24\n\t\t\t\tassert.Len(c, node.GetApprovedRoutes(), 1)  // 10.0.1.0/24\n\t\t\t\tassert.Empty(c, node.GetSubnetRoutes())\n\t\t\t} else if node.GetId() == 2 {\n\t\t\t\tassert.Len(c, node.GetAvailableRoutes(), 1) // 10.0.1.0/24\n\t\t\t\tassert.Empty(c, node.GetApprovedRoutes())\n\t\t\t\tassert.Empty(c, node.GetSubnetRoutes())\n\t\t\t} else {\n\t\t\t\tassert.Len(c, node.GetAvailableRoutes(), 1) // 10.0.2.0/24\n\t\t\t\tassert.Len(c, node.GetApprovedRoutes(), 1)  // 10.0.2.0/24\n\t\t\t\tassert.Len(c, node.GetSubnetRoutes(), 1)    // 10.0.2.0/24\n\t\t\t}\n\t\t}\n\t}, 10*time.Second, 500*time.Millisecond, \"route state changes should propagate to nodes\")\n\n\t// Verify that the clients can see the new routes\n\tfor _, client := range allClients {\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\tstatus, err := client.Status()\n\t\t\tassert.NoError(c, err)\n\n\t\t\tfor _, peerKey := range status.Peers() {\n\t\t\t\tpeerStatus := status.Peer[peerKey]\n\n\t\t\t\tswitch peerStatus.ID {\n\t\t\t\tcase \"1\":\n\t\t\t\t\trequirePeerSubnetRoutesWithCollect(c, peerStatus, nil)\n\t\t\t\tcase \"2\":\n\t\t\t\t\trequirePeerSubnetRoutesWithCollect(c, peerStatus, nil)\n\t\t\t\tdefault:\n\t\t\t\t\trequirePeerSubnetRoutesWithCollect(c, peerStatus, []netip.Prefix{netip.MustParsePrefix(\"10.0.2.0/24\")})\n\t\t\t\t}\n\t\t\t}\n\t\t}, 5*time.Second, 200*time.Millisecond, \"Verifying final route state visible to clients\")\n\t}\n}\n\n//nolint:gocyclo // complex HA failover test scenario\nfunc TestHASubnetRouterFailover(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpropagationTime := 60 * time.Second\n\n\t// Helper function to validate primary routes table state\n\tvalidatePrimaryRoutes := func(t *testing.T, headscale ControlServer, expectedRoutes *routes.DebugRoutes, message string) {\n\t\tt.Helper()\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\tprimaryRoutesState, err := headscale.PrimaryRoutes()\n\t\t\tassert.NoError(c, err)\n\n\t\t\tif diff := cmpdiff.Diff(expectedRoutes, primaryRoutesState, util.PrefixComparer); diff != \"\" {\n\t\t\t\tt.Log(message)\n\t\t\t\tt.Errorf(\"validatePrimaryRoutes mismatch (-want +got):\\n%s\", diff)\n\t\t\t}\n\t\t}, propagationTime, 200*time.Millisecond, \"Validating primary routes table\")\n\t}\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 3,\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t\tNetworks: map[string][]string{\n\t\t\t\"usernet1\": {\"user1\"},\n\t\t\t\"usernet2\": {\"user2\"},\n\t\t},\n\t\tExtraService: map[string][]extraServiceFunc{\n\t\t\t\"usernet1\": {Webservice},\n\t\t},\n\t\t// We build the head image with curl and traceroute, so only use\n\t\t// that for this test.\n\t\tVersions: []string{\"head\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\trequire.NoErrorf(t, err, \"failed to create scenario: %s\", err)\n\t// defer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{tsic.WithAcceptRoutes()},\n\t\thsic.WithTestName(\"rt-hafailover\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\tprefp, err := scenario.SubnetOfNetwork(\"usernet1\")\n\trequire.NoError(t, err)\n\n\tpref := *prefp\n\tt.Logf(\"usernet1 prefix: %s\", pref.String())\n\n\tusernet1, err := scenario.Network(\"usernet1\")\n\trequire.NoError(t, err)\n\n\tservices, err := scenario.Services(\"usernet1\")\n\trequire.NoError(t, err)\n\trequire.Len(t, services, 1)\n\n\tweb := services[0]\n\twebip := netip.MustParseAddr(web.GetIPInNetwork(usernet1))\n\tweburl := fmt.Sprintf(\"http://%s/etc/hostname\", webip)\n\tt.Logf(\"webservice: %s, %s\", webip.String(), weburl)\n\n\t// Sort nodes by ID\n\tsort.SliceStable(allClients, func(i, j int) bool {\n\t\tstatusI := allClients[i].MustStatus()\n\t\tstatusJ := allClients[j].MustStatus()\n\n\t\treturn statusI.Self.ID < statusJ.Self.ID\n\t})\n\n\t// This is ok because the scenario makes users in order, so the three first\n\t// nodes, which are subnet routes, will be created first, and the last user\n\t// will be created with the second.\n\tsubRouter1 := allClients[0]\n\tsubRouter2 := allClients[1]\n\tsubRouter3 := allClients[2]\n\n\tclient := allClients[3]\n\n\tt.Logf(\"%s (%s) picked as client\", client.Hostname(), client.MustID())\n\tt.Logf(\"=== Initial Route Advertisement - Setting up HA configuration with 3 routers ===\")\n\tt.Logf(\"[%s] Starting test section\", time.Now().Format(TimestampFormat))\n\tt.Logf(\"  - Router 1 (%s): Advertising route %s - will become PRIMARY when approved\", subRouter1.Hostname(), pref.String())\n\tt.Logf(\"  - Router 2 (%s): Advertising route %s - will be STANDBY when approved\", subRouter2.Hostname(), pref.String())\n\tt.Logf(\"  - Router 3 (%s): Advertising route %s - will be STANDBY when approved\", subRouter3.Hostname(), pref.String())\n\tt.Logf(\"  Expected: All 3 routers advertise the same route for redundancy, but only one will be primary at a time\")\n\n\tfor _, client := range allClients[:3] {\n\t\tcommand := []string{\n\t\t\t\"tailscale\",\n\t\t\t\"set\",\n\t\t\t\"--advertise-routes=\" + pref.String(),\n\t\t}\n\t\t_, _, err = client.Execute(command)\n\t\trequire.NoErrorf(t, err, \"failed to advertise route: %s\", err)\n\t}\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t// Wait for route configuration changes after advertising routes\n\tvar nodes []*v1.Node\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err = headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 6)\n\t\trequire.GreaterOrEqual(t, len(nodes), 3, \"need at least 3 nodes to avoid panic\")\n\t\trequireNodeRouteCountWithCollect(c, nodes[0], 1, 0, 0)\n\t\trequireNodeRouteCountWithCollect(c, nodes[1], 1, 0, 0)\n\t\trequireNodeRouteCountWithCollect(c, nodes[2], 1, 0, 0)\n\t}, propagationTime, 200*time.Millisecond, \"Waiting for route advertisements: All 3 routers should have advertised routes (available=1) but none approved yet (approved=0, subnet=0)\")\n\n\t// Verify that no routes has been sent to the client,\n\t// they are not yet enabled.\n\tfor _, client := range allClients {\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\tstatus, err := client.Status()\n\t\t\tassert.NoError(c, err)\n\n\t\t\tfor _, peerKey := range status.Peers() {\n\t\t\t\tpeerStatus := status.Peer[peerKey]\n\n\t\t\t\tassert.Nil(c, peerStatus.PrimaryRoutes)\n\t\t\t\trequirePeerSubnetRoutesWithCollect(c, peerStatus, nil)\n\t\t\t}\n\t\t}, propagationTime, 200*time.Millisecond, \"Verifying no routes are active before approval\")\n\t}\n\n\t// Declare variables that will be used across multiple EventuallyWithT blocks\n\tvar (\n\t\tsrs1, srs2, srs3 *ipnstate.Status\n\t\tclientStatus     *ipnstate.Status\n\t\tsrs1PeerStatus   *ipnstate.PeerStatus\n\t\tsrs2PeerStatus   *ipnstate.PeerStatus\n\t\tsrs3PeerStatus   *ipnstate.PeerStatus\n\t)\n\n\t// Helper function to check test failure and print route map if needed\n\tcheckFailureAndPrintRoutes := func(t *testing.T, client TailscaleClient) { //nolint:thelper\n\t\tif t.Failed() {\n\t\t\tt.Logf(\"[%s] Test failed at this checkpoint\", time.Now().Format(TimestampFormat))\n\n\t\t\tstatus, err := client.Status()\n\t\t\tif err == nil {\n\t\t\t\tprintCurrentRouteMap(t, xmaps.Values(status.Peer)...)\n\t\t\t}\n\n\t\t\tt.FailNow()\n\t\t}\n\t}\n\n\t// Validate primary routes table state - no routes approved yet\n\tvalidatePrimaryRoutes(t, headscale, &routes.DebugRoutes{\n\t\tAvailableRoutes: map[types.NodeID][]netip.Prefix{},\n\t\tPrimaryRoutes:   map[string]types.NodeID{}, // No primary routes yet\n\t}, \"Primary routes table should be empty (no approved routes yet)\")\n\n\tcheckFailureAndPrintRoutes(t, client)\n\n\t// Enable route on node 1\n\tt.Logf(\"=== Approving route on router 1 (%s) - Single router mode (no HA yet) ===\", subRouter1.Hostname())\n\tt.Logf(\"[%s] Starting test section\", time.Now().Format(TimestampFormat))\n\tt.Logf(\"  Expected: Router 1 becomes PRIMARY with route %s active\", pref.String())\n\tt.Logf(\"  Expected: Routers 2 & 3 remain with advertised but unapproved routes\")\n\tt.Logf(\"  Expected: Client can access webservice through router 1 only\")\n\n\t_, err = headscale.ApproveRoutes(\n\t\tMustFindNode(subRouter1.Hostname(), nodes).GetId(),\n\t\t[]netip.Prefix{pref},\n\t)\n\trequire.NoError(t, err)\n\n\t// Wait for route approval on first subnet router\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err = headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 6)\n\t\trequire.GreaterOrEqual(t, len(nodes), 3, \"need at least 3 nodes to avoid panic\")\n\t\trequireNodeRouteCountWithCollect(c, nodes[0], 1, 1, 1)\n\t\trequireNodeRouteCountWithCollect(c, nodes[1], 1, 0, 0)\n\t\trequireNodeRouteCountWithCollect(c, nodes[2], 1, 0, 0)\n\t}, propagationTime, 200*time.Millisecond, \"Router 1 approval verification: Should be PRIMARY (available=1, approved=1, subnet=1), others still unapproved (available=1, approved=0, subnet=0)\")\n\n\t// Verify that the client has routes from the primary machine and can access\n\t// the webservice.\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tsrs1 = subRouter1.MustStatus()\n\t\tsrs2 = subRouter2.MustStatus()\n\t\tsrs3 = subRouter3.MustStatus()\n\t\tclientStatus = client.MustStatus()\n\n\t\tsrs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey]\n\t\tsrs2PeerStatus = clientStatus.Peer[srs2.Self.PublicKey]\n\t\tsrs3PeerStatus = clientStatus.Peer[srs3.Self.PublicKey]\n\n\t\tassert.NotNil(c, srs1PeerStatus, \"Router 1 peer should exist\")\n\t\tassert.NotNil(c, srs2PeerStatus, \"Router 2 peer should exist\")\n\t\tassert.NotNil(c, srs3PeerStatus, \"Router 3 peer should exist\")\n\n\t\tif srs1PeerStatus == nil || srs2PeerStatus == nil || srs3PeerStatus == nil {\n\t\t\treturn\n\t\t}\n\n\t\tassert.True(c, srs1PeerStatus.Online, \"Router 1 should be online and serving as PRIMARY\")\n\t\tassert.True(c, srs2PeerStatus.Online, \"Router 2 should be online but NOT serving routes (unapproved)\")\n\t\tassert.True(c, srs3PeerStatus.Online, \"Router 3 should be online but NOT serving routes (unapproved)\")\n\n\t\tassert.Nil(c, srs2PeerStatus.PrimaryRoutes)\n\t\tassert.Nil(c, srs3PeerStatus.PrimaryRoutes)\n\t\tassert.NotNil(c, srs1PeerStatus.PrimaryRoutes)\n\n\t\trequirePeerSubnetRoutesWithCollect(c, srs1PeerStatus, []netip.Prefix{pref})\n\t\trequirePeerSubnetRoutesWithCollect(c, srs2PeerStatus, nil)\n\t\trequirePeerSubnetRoutesWithCollect(c, srs3PeerStatus, nil)\n\n\t\tif srs1PeerStatus.PrimaryRoutes != nil {\n\t\t\tt.Logf(\"got list: %v, want in: %v\", srs1PeerStatus.PrimaryRoutes.AsSlice(), pref)\n\t\t\tassert.Contains(c,\n\t\t\t\tsrs1PeerStatus.PrimaryRoutes.AsSlice(),\n\t\t\t\tpref,\n\t\t\t)\n\t\t}\n\t}, propagationTime, 200*time.Millisecond, \"Verifying Router 1 is PRIMARY with routes after approval\")\n\n\tt.Logf(\"=== Validating connectivity through PRIMARY router 1 (%s) to webservice at %s ===\", must.Get(subRouter1.IPv4()).String(), webip.String())\n\tt.Logf(\"[%s] Starting test section\", time.Now().Format(TimestampFormat))\n\tt.Logf(\"  Expected: Traffic flows through router 1 as it's the only approved route\")\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tresult, err := client.Curl(weburl)\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, result, 13)\n\t}, propagationTime, 200*time.Millisecond, \"Verifying client can reach webservice through router 1\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\ttr, err := client.Traceroute(webip)\n\t\tassert.NoError(c, err)\n\n\t\tip, err := subRouter1.IPv4()\n\t\tif !assert.NoError(c, err, \"failed to get IPv4 for subRouter1\") {\n\t\t\treturn\n\t\t}\n\n\t\tassertTracerouteViaIPWithCollect(c, tr, ip)\n\t}, propagationTime, 200*time.Millisecond, \"Verifying traceroute goes through router 1\")\n\n\t// Validate primary routes table state - router 1 is primary\n\tvalidatePrimaryRoutes(t, headscale, &routes.DebugRoutes{\n\t\tAvailableRoutes: map[types.NodeID][]netip.Prefix{\n\t\t\ttypes.NodeID(MustFindNode(subRouter1.Hostname(), nodes).GetId()): {pref},\n\t\t\t// Note: Router 2 and 3 are available but not approved\n\t\t},\n\t\tPrimaryRoutes: map[string]types.NodeID{\n\t\t\tpref.String(): types.NodeID(MustFindNode(subRouter1.Hostname(), nodes).GetId()),\n\t\t},\n\t}, \"Router 1 should be primary for route \"+pref.String())\n\n\tcheckFailureAndPrintRoutes(t, client)\n\n\t// Enable route on node 2, now we will have a HA subnet router\n\tt.Logf(\"=== Enabling High Availability by approving route on router 2 (%s) ===\", subRouter2.Hostname())\n\tt.Logf(\"[%s] Starting test section\", time.Now().Format(TimestampFormat))\n\tt.Logf(\"  Current state: Router 1 is PRIMARY and actively serving traffic\")\n\tt.Logf(\"  Expected: Router 2 becomes STANDBY (approved but not primary)\")\n\tt.Logf(\"  Expected: Router 1 remains PRIMARY (no flapping - stability preferred)\")\n\tt.Logf(\"  Expected: HA is now active - if router 1 fails, router 2 can take over\")\n\n\t_, err = headscale.ApproveRoutes(\n\t\tMustFindNode(subRouter2.Hostname(), nodes).GetId(),\n\t\t[]netip.Prefix{pref},\n\t)\n\trequire.NoError(t, err)\n\n\t// Wait for route approval on second subnet router\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err = headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 6)\n\n\t\tif len(nodes) >= 3 {\n\t\t\trequireNodeRouteCountWithCollect(c, nodes[0], 1, 1, 1)\n\t\t\trequireNodeRouteCountWithCollect(c, nodes[1], 1, 1, 0)\n\t\t\trequireNodeRouteCountWithCollect(c, nodes[2], 1, 0, 0)\n\t\t}\n\t}, 3*time.Second, 200*time.Millisecond, \"HA setup verification: Router 2 approved as STANDBY (available=1, approved=1, subnet=0), Router 1 stays PRIMARY (subnet=1)\")\n\n\t// Verify that the client has routes from the primary machine\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tsrs1 = subRouter1.MustStatus()\n\t\tsrs2 = subRouter2.MustStatus()\n\t\tsrs3 = subRouter3.MustStatus()\n\t\tclientStatus = client.MustStatus()\n\n\t\tsrs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey]\n\t\tsrs2PeerStatus = clientStatus.Peer[srs2.Self.PublicKey]\n\t\tsrs3PeerStatus = clientStatus.Peer[srs3.Self.PublicKey]\n\n\t\tassert.NotNil(c, srs1PeerStatus, \"Router 1 peer should exist\")\n\t\tassert.NotNil(c, srs2PeerStatus, \"Router 2 peer should exist\")\n\t\tassert.NotNil(c, srs3PeerStatus, \"Router 3 peer should exist\")\n\n\t\tif srs1PeerStatus == nil || srs2PeerStatus == nil || srs3PeerStatus == nil {\n\t\t\treturn\n\t\t}\n\n\t\tassert.True(c, srs1PeerStatus.Online, \"Router 1 should be online and remain PRIMARY\")\n\t\tassert.True(c, srs2PeerStatus.Online, \"Router 2 should be online and now approved as STANDBY\")\n\t\tassert.True(c, srs3PeerStatus.Online, \"Router 3 should be online but still unapproved\")\n\n\t\tassert.Nil(c, srs2PeerStatus.PrimaryRoutes)\n\t\tassert.Nil(c, srs3PeerStatus.PrimaryRoutes)\n\t\tassert.NotNil(c, srs1PeerStatus.PrimaryRoutes)\n\n\t\trequirePeerSubnetRoutesWithCollect(c, srs1PeerStatus, []netip.Prefix{pref})\n\t\trequirePeerSubnetRoutesWithCollect(c, srs2PeerStatus, nil)\n\t\trequirePeerSubnetRoutesWithCollect(c, srs3PeerStatus, nil)\n\n\t\tif srs1PeerStatus.PrimaryRoutes != nil {\n\t\t\tt.Logf(\"got list: %v, want in: %v\", srs1PeerStatus.PrimaryRoutes.AsSlice(), pref)\n\t\t\tassert.Contains(c,\n\t\t\t\tsrs1PeerStatus.PrimaryRoutes.AsSlice(),\n\t\t\t\tpref,\n\t\t\t)\n\t\t}\n\t}, propagationTime, 200*time.Millisecond, \"Verifying Router 1 remains PRIMARY after Router 2 approval\")\n\n\t// Validate primary routes table state - router 1 still primary, router 2 approved but standby\n\tvalidatePrimaryRoutes(t, headscale, &routes.DebugRoutes{\n\t\tAvailableRoutes: map[types.NodeID][]netip.Prefix{\n\t\t\ttypes.NodeID(MustFindNode(subRouter1.Hostname(), nodes).GetId()): {pref},\n\t\t\ttypes.NodeID(MustFindNode(subRouter2.Hostname(), nodes).GetId()): {pref},\n\t\t\t// Note: Router 3 is available but not approved\n\t\t},\n\t\tPrimaryRoutes: map[string]types.NodeID{\n\t\t\tpref.String(): types.NodeID(MustFindNode(subRouter1.Hostname(), nodes).GetId()),\n\t\t},\n\t}, \"Router 1 should remain primary after router 2 approval\")\n\n\tcheckFailureAndPrintRoutes(t, client)\n\n\tt.Logf(\"=== Validating HA configuration - Router 1 PRIMARY, Router 2 STANDBY ===\")\n\tt.Logf(\"[%s] Starting test section\", time.Now().Format(TimestampFormat))\n\tt.Logf(\"  Current routing: Traffic through router 1 (%s) to %s\", must.Get(subRouter1.IPv4()), webip.String())\n\tt.Logf(\"  Expected: Router 1 continues to handle all traffic (no change from before)\")\n\tt.Logf(\"  Expected: Router 2 is ready to take over if router 1 fails\")\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tresult, err := client.Curl(weburl)\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, result, 13)\n\t}, propagationTime, 200*time.Millisecond, \"Verifying client can reach webservice through router 1 in HA mode\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\ttr, err := client.Traceroute(webip)\n\t\tassert.NoError(c, err)\n\n\t\tip, err := subRouter1.IPv4()\n\t\tif !assert.NoError(c, err, \"failed to get IPv4 for subRouter1\") {\n\t\t\treturn\n\t\t}\n\n\t\tassertTracerouteViaIPWithCollect(c, tr, ip)\n\t}, propagationTime, 200*time.Millisecond, \"Verifying traceroute still goes through router 1 in HA mode\")\n\n\t// Validate primary routes table state - router 1 primary, router 2 approved (standby)\n\tvalidatePrimaryRoutes(t, headscale, &routes.DebugRoutes{\n\t\tAvailableRoutes: map[types.NodeID][]netip.Prefix{\n\t\t\ttypes.NodeID(MustFindNode(subRouter1.Hostname(), nodes).GetId()): {pref},\n\t\t\ttypes.NodeID(MustFindNode(subRouter2.Hostname(), nodes).GetId()): {pref},\n\t\t\t// Note: Router 3 is available but not approved\n\t\t},\n\t\tPrimaryRoutes: map[string]types.NodeID{\n\t\t\tpref.String(): types.NodeID(MustFindNode(subRouter1.Hostname(), nodes).GetId()),\n\t\t},\n\t}, \"Router 1 primary with router 2 as standby\")\n\n\tcheckFailureAndPrintRoutes(t, client)\n\n\t// Enable route on node 3, now we will have a second standby and all will\n\t// be enabled.\n\tt.Logf(\"=== Adding second STANDBY router by approving route on router 3 (%s) ===\", subRouter3.Hostname())\n\tt.Logf(\"[%s] Starting test section\", time.Now().Format(TimestampFormat))\n\tt.Logf(\"  Current state: Router 1 PRIMARY, Router 2 STANDBY\")\n\tt.Logf(\"  Expected: Router 3 becomes second STANDBY (approved but not primary)\")\n\tt.Logf(\"  Expected: Router 1 remains PRIMARY, Router 2 remains first STANDBY\")\n\tt.Logf(\"  Expected: Full HA configuration with 1 PRIMARY + 2 STANDBY routers\")\n\n\t_, err = headscale.ApproveRoutes(\n\t\tMustFindNode(subRouter3.Hostname(), nodes).GetId(),\n\t\t[]netip.Prefix{pref},\n\t)\n\trequire.NoError(t, err)\n\n\t// Wait for route approval on third subnet router\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err = headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 6)\n\t\trequire.GreaterOrEqual(t, len(nodes), 3, \"need at least 3 nodes to avoid panic\")\n\t\trequireNodeRouteCountWithCollect(c, nodes[0], 1, 1, 1)\n\t\trequireNodeRouteCountWithCollect(c, nodes[1], 1, 1, 0)\n\t\trequireNodeRouteCountWithCollect(c, nodes[2], 1, 1, 0)\n\t}, 3*time.Second, 200*time.Millisecond, \"Full HA verification: Router 3 approved as second STANDBY (available=1, approved=1, subnet=0), Router 1 PRIMARY, Router 2 first STANDBY\")\n\n\t// Verify that the client has routes from the primary machine\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tsrs1 = subRouter1.MustStatus()\n\t\tsrs2 = subRouter2.MustStatus()\n\t\tsrs3 = subRouter3.MustStatus()\n\t\tclientStatus = client.MustStatus()\n\n\t\tsrs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey]\n\t\tsrs2PeerStatus = clientStatus.Peer[srs2.Self.PublicKey]\n\t\tsrs3PeerStatus = clientStatus.Peer[srs3.Self.PublicKey]\n\n\t\tassert.NotNil(c, srs1PeerStatus, \"Router 1 peer should exist\")\n\t\tassert.NotNil(c, srs2PeerStatus, \"Router 2 peer should exist\")\n\t\tassert.NotNil(c, srs3PeerStatus, \"Router 3 peer should exist\")\n\n\t\tif srs1PeerStatus == nil || srs2PeerStatus == nil || srs3PeerStatus == nil {\n\t\t\treturn\n\t\t}\n\n\t\tassert.True(c, srs1PeerStatus.Online, \"Router 1 should be online and remain PRIMARY\")\n\t\tassert.True(c, srs2PeerStatus.Online, \"Router 2 should be online as first STANDBY\")\n\t\tassert.True(c, srs3PeerStatus.Online, \"Router 3 should be online as second STANDBY\")\n\n\t\tassert.Nil(c, srs2PeerStatus.PrimaryRoutes)\n\t\tassert.Nil(c, srs3PeerStatus.PrimaryRoutes)\n\t\tassert.NotNil(c, srs1PeerStatus.PrimaryRoutes)\n\n\t\trequirePeerSubnetRoutesWithCollect(c, srs1PeerStatus, []netip.Prefix{pref})\n\t\trequirePeerSubnetRoutesWithCollect(c, srs2PeerStatus, nil)\n\t\trequirePeerSubnetRoutesWithCollect(c, srs3PeerStatus, nil)\n\n\t\tif srs1PeerStatus.PrimaryRoutes != nil {\n\t\t\tt.Logf(\"got list: %v, want in: %v\", srs1PeerStatus.PrimaryRoutes.AsSlice(), pref)\n\t\t\tassert.Contains(c,\n\t\t\t\tsrs1PeerStatus.PrimaryRoutes.AsSlice(),\n\t\t\t\tpref,\n\t\t\t)\n\t\t}\n\t}, propagationTime, 200*time.Millisecond, \"Verifying full HA with 3 routers: Router 1 PRIMARY, Routers 2 & 3 STANDBY\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tresult, err := client.Curl(weburl)\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, result, 13)\n\t}, propagationTime, 200*time.Millisecond, \"Verifying client can reach webservice through router 1 with full HA\")\n\n\t// Wait for traceroute to work correctly through the expected router\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\ttr, err := client.Traceroute(webip)\n\t\tassert.NoError(c, err)\n\n\t\t// Get the expected router IP - use a more robust approach to handle temporary disconnections\n\t\tips, err := subRouter1.IPs()\n\t\tassert.NoError(c, err)\n\t\tassert.NotEmpty(c, ips, \"subRouter1 should have IP addresses\")\n\n\t\tvar expectedIP netip.Addr\n\n\t\tfor _, ip := range ips {\n\t\t\tif ip.Is4() {\n\t\t\t\texpectedIP = ip\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tassert.True(c, expectedIP.IsValid(), \"subRouter1 should have a valid IPv4 address\")\n\n\t\tassertTracerouteViaIPWithCollect(c, tr, expectedIP)\n\t}, propagationTime, 200*time.Millisecond, \"Verifying traffic still flows through PRIMARY router 1 with full HA setup active\")\n\n\t// Validate primary routes table state - all 3 routers approved, router 1 still primary\n\tvalidatePrimaryRoutes(t, headscale, &routes.DebugRoutes{\n\t\tAvailableRoutes: map[types.NodeID][]netip.Prefix{\n\t\t\ttypes.NodeID(MustFindNode(subRouter1.Hostname(), nodes).GetId()): {pref},\n\t\t\ttypes.NodeID(MustFindNode(subRouter2.Hostname(), nodes).GetId()): {pref},\n\t\t\ttypes.NodeID(MustFindNode(subRouter3.Hostname(), nodes).GetId()): {pref},\n\t\t},\n\t\tPrimaryRoutes: map[string]types.NodeID{\n\t\t\tpref.String(): types.NodeID(MustFindNode(subRouter1.Hostname(), nodes).GetId()),\n\t\t},\n\t}, \"Router 1 primary with all 3 routers approved\")\n\n\tcheckFailureAndPrintRoutes(t, client)\n\n\t// Take down the current primary\n\tt.Logf(\"=== FAILOVER TEST: Taking down PRIMARY router 1 (%s) ===\", subRouter1.Hostname())\n\tt.Logf(\"[%s] Starting test section\", time.Now().Format(TimestampFormat))\n\tt.Logf(\"  Current state: Router 1 PRIMARY (serving traffic), Router 2 & 3 STANDBY\")\n\tt.Logf(\"  Action: Shutting down router 1 to simulate failure\")\n\tt.Logf(\"  Expected: Router 2 (%s) should automatically become new PRIMARY\", subRouter2.Hostname())\n\tt.Logf(\"  Expected: Router 3 remains STANDBY\")\n\tt.Logf(\"  Expected: Traffic seamlessly fails over to router 2\")\n\n\terr = subRouter1.Down()\n\trequire.NoError(t, err)\n\n\t// Wait for router status changes after r1 goes down\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tsrs2 = subRouter2.MustStatus()\n\t\tclientStatus = client.MustStatus()\n\n\t\tsrs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey]\n\t\tsrs2PeerStatus = clientStatus.Peer[srs2.Self.PublicKey]\n\t\tsrs3PeerStatus = clientStatus.Peer[srs3.Self.PublicKey]\n\n\t\tassert.NotNil(c, srs1PeerStatus, \"Router 1 peer should exist\")\n\t\tassert.NotNil(c, srs2PeerStatus, \"Router 2 peer should exist\")\n\t\tassert.NotNil(c, srs3PeerStatus, \"Router 3 peer should exist\")\n\n\t\tif srs1PeerStatus == nil || srs2PeerStatus == nil || srs3PeerStatus == nil {\n\t\t\treturn\n\t\t}\n\n\t\tassert.False(c, srs1PeerStatus.Online, \"r1 should be offline\")\n\t\tassert.True(c, srs2PeerStatus.Online, \"r2 should be online\")\n\t\tassert.True(c, srs3PeerStatus.Online, \"r3 should be online\")\n\n\t\tassert.Nil(c, srs1PeerStatus.PrimaryRoutes)\n\t\tassert.NotNil(c, srs2PeerStatus.PrimaryRoutes)\n\t\tassert.Nil(c, srs3PeerStatus.PrimaryRoutes)\n\n\t\trequirePeerSubnetRoutesWithCollect(c, srs1PeerStatus, nil)\n\t\trequirePeerSubnetRoutesWithCollect(c, srs2PeerStatus, []netip.Prefix{pref})\n\t\trequirePeerSubnetRoutesWithCollect(c, srs3PeerStatus, nil)\n\n\t\tif srs2PeerStatus.PrimaryRoutes != nil {\n\t\t\tassert.Contains(c,\n\t\t\t\tsrs2PeerStatus.PrimaryRoutes.AsSlice(),\n\t\t\t\tpref,\n\t\t\t)\n\t\t}\n\t}, propagationTime, 200*time.Millisecond, \"Failover verification: Router 1 offline, Router 2 should be new PRIMARY with routes, Router 3 still STANDBY\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tresult, err := client.Curl(weburl)\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, result, 13)\n\t}, propagationTime, 200*time.Millisecond, \"Verifying client can reach webservice through router 2 after failover\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\ttr, err := client.Traceroute(webip)\n\t\tassert.NoError(c, err)\n\n\t\tip, err := subRouter2.IPv4()\n\t\tif !assert.NoError(c, err, \"failed to get IPv4 for subRouter2\") {\n\t\t\treturn\n\t\t}\n\n\t\tassertTracerouteViaIPWithCollect(c, tr, ip)\n\t}, propagationTime, 200*time.Millisecond, \"Verifying traceroute goes through router 2 after failover\")\n\n\t// Validate primary routes table state - router 2 is now primary after router 1 failure\n\tvalidatePrimaryRoutes(t, headscale, &routes.DebugRoutes{\n\t\tAvailableRoutes: map[types.NodeID][]netip.Prefix{\n\t\t\t// Router 1 is disconnected, so not in AvailableRoutes\n\t\t\ttypes.NodeID(MustFindNode(subRouter2.Hostname(), nodes).GetId()): {pref},\n\t\t\ttypes.NodeID(MustFindNode(subRouter3.Hostname(), nodes).GetId()): {pref},\n\t\t},\n\t\tPrimaryRoutes: map[string]types.NodeID{\n\t\t\tpref.String(): types.NodeID(MustFindNode(subRouter2.Hostname(), nodes).GetId()),\n\t\t},\n\t}, \"Router 2 should be primary after router 1 failure\")\n\n\tcheckFailureAndPrintRoutes(t, client)\n\n\t// Take down subnet router 2, leaving none available\n\tt.Logf(\"=== FAILOVER TEST: Taking down NEW PRIMARY router 2 (%s) ===\", subRouter2.Hostname())\n\tt.Logf(\"[%s] Starting test section\", time.Now().Format(TimestampFormat))\n\tt.Logf(\"  Current state: Router 1 OFFLINE, Router 2 PRIMARY (serving traffic), Router 3 STANDBY\")\n\tt.Logf(\"  Action: Shutting down router 2 to simulate cascading failure\")\n\tt.Logf(\"  Expected: Router 3 (%s) should become new PRIMARY (last remaining router)\", subRouter3.Hostname())\n\tt.Logf(\"  Expected: With only 1 router left, HA is effectively disabled\")\n\tt.Logf(\"  Expected: Traffic continues through router 3\")\n\n\terr = subRouter2.Down()\n\trequire.NoError(t, err)\n\n\t// Wait for router status changes after r2 goes down\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tclientStatus, err = client.Status()\n\t\tassert.NoError(c, err)\n\n\t\tsrs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey]\n\t\tsrs2PeerStatus = clientStatus.Peer[srs2.Self.PublicKey]\n\t\tsrs3PeerStatus = clientStatus.Peer[srs3.Self.PublicKey]\n\n\t\tassert.NotNil(c, srs1PeerStatus, \"Router 1 peer should exist\")\n\t\tassert.NotNil(c, srs2PeerStatus, \"Router 2 peer should exist\")\n\t\tassert.NotNil(c, srs3PeerStatus, \"Router 3 peer should exist\")\n\n\t\tif srs1PeerStatus == nil || srs2PeerStatus == nil || srs3PeerStatus == nil {\n\t\t\treturn\n\t\t}\n\n\t\tassert.False(c, srs1PeerStatus.Online, \"Router 1 should still be offline\")\n\t\tassert.False(c, srs2PeerStatus.Online, \"Router 2 should now be offline after failure\")\n\t\tassert.True(c, srs3PeerStatus.Online, \"Router 3 should be online and taking over as PRIMARY\")\n\n\t\tassert.Nil(c, srs1PeerStatus.PrimaryRoutes)\n\t\tassert.Nil(c, srs2PeerStatus.PrimaryRoutes)\n\t\tassert.NotNil(c, srs3PeerStatus.PrimaryRoutes)\n\n\t\trequirePeerSubnetRoutesWithCollect(c, srs1PeerStatus, nil)\n\t\trequirePeerSubnetRoutesWithCollect(c, srs2PeerStatus, nil)\n\t\trequirePeerSubnetRoutesWithCollect(c, srs3PeerStatus, []netip.Prefix{pref})\n\t}, propagationTime, 200*time.Millisecond, \"Second failover verification: Router 1 & 2 offline, Router 3 should be new PRIMARY (last router standing) with routes\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tresult, err := client.Curl(weburl)\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, result, 13)\n\t}, propagationTime, 200*time.Millisecond, \"Verifying client can reach webservice through router 3 after second failover\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\ttr, err := client.Traceroute(webip)\n\t\tassert.NoError(c, err)\n\n\t\tip, err := subRouter3.IPv4()\n\t\tif !assert.NoError(c, err, \"failed to get IPv4 for subRouter3\") {\n\t\t\treturn\n\t\t}\n\n\t\tassertTracerouteViaIPWithCollect(c, tr, ip)\n\t}, propagationTime, 200*time.Millisecond, \"Verifying traceroute goes through router 3 after second failover\")\n\n\t// Validate primary routes table state - router 3 is now primary after router 2 failure\n\tvalidatePrimaryRoutes(t, headscale, &routes.DebugRoutes{\n\t\tAvailableRoutes: map[types.NodeID][]netip.Prefix{\n\t\t\t// Routers 1 and 2 are disconnected, so not in AvailableRoutes\n\t\t\ttypes.NodeID(MustFindNode(subRouter3.Hostname(), nodes).GetId()): {pref},\n\t\t},\n\t\tPrimaryRoutes: map[string]types.NodeID{\n\t\t\tpref.String(): types.NodeID(MustFindNode(subRouter3.Hostname(), nodes).GetId()),\n\t\t},\n\t}, \"Router 3 should be primary after router 2 failure\")\n\n\tcheckFailureAndPrintRoutes(t, client)\n\n\t// Bring up subnet router 1, making the route available from there.\n\tt.Logf(\"=== RECOVERY TEST: Bringing router 1 (%s) back online ===\", subRouter1.Hostname())\n\tt.Logf(\"[%s] Starting test section\", time.Now().Format(TimestampFormat))\n\tt.Logf(\"  Current state: Router 1 OFFLINE, Router 2 OFFLINE, Router 3 PRIMARY (only router)\")\n\tt.Logf(\"  Action: Starting router 1 to restore HA capability\")\n\tt.Logf(\"  Expected: Router 3 remains PRIMARY (stability - no unnecessary failover)\")\n\tt.Logf(\"  Expected: Router 1 becomes STANDBY (ready for HA)\")\n\tt.Logf(\"  Expected: HA is restored with 2 routers available\")\n\n\terr = subRouter1.Up()\n\trequire.NoError(t, err)\n\n\t// Wait for router status changes after r1 comes back up\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tclientStatus, err = client.Status()\n\t\tassert.NoError(c, err)\n\n\t\tsrs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey]\n\t\tsrs2PeerStatus = clientStatus.Peer[srs2.Self.PublicKey]\n\t\tsrs3PeerStatus = clientStatus.Peer[srs3.Self.PublicKey]\n\n\t\tassert.NotNil(c, srs1PeerStatus, \"Router 1 peer should exist\")\n\t\tassert.NotNil(c, srs2PeerStatus, \"Router 2 peer should exist\")\n\t\tassert.NotNil(c, srs3PeerStatus, \"Router 3 peer should exist\")\n\n\t\tif srs1PeerStatus == nil || srs2PeerStatus == nil || srs3PeerStatus == nil {\n\t\t\treturn\n\t\t}\n\n\t\tassert.True(c, srs1PeerStatus.Online, \"Router 1 should be back online as STANDBY\")\n\t\tassert.False(c, srs2PeerStatus.Online, \"Router 2 should still be offline\")\n\t\tassert.True(c, srs3PeerStatus.Online, \"Router 3 should remain online as PRIMARY\")\n\n\t\tassert.Nil(c, srs1PeerStatus.PrimaryRoutes)\n\t\tassert.Nil(c, srs2PeerStatus.PrimaryRoutes)\n\t\tassert.NotNil(c, srs3PeerStatus.PrimaryRoutes)\n\n\t\trequirePeerSubnetRoutesWithCollect(c, srs1PeerStatus, nil)\n\t\trequirePeerSubnetRoutesWithCollect(c, srs2PeerStatus, nil)\n\t\trequirePeerSubnetRoutesWithCollect(c, srs3PeerStatus, []netip.Prefix{pref})\n\n\t\tif srs3PeerStatus.PrimaryRoutes != nil {\n\t\t\tassert.Contains(c,\n\t\t\t\tsrs3PeerStatus.PrimaryRoutes.AsSlice(),\n\t\t\t\tpref,\n\t\t\t)\n\t\t}\n\t}, propagationTime, 200*time.Millisecond, \"Recovery verification: Router 1 back online as STANDBY, Router 3 remains PRIMARY (no flapping) with routes\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tresult, err := client.Curl(weburl)\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, result, 13)\n\t}, propagationTime, 200*time.Millisecond, \"Verifying client can still reach webservice through router 3 after router 1 recovery\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\ttr, err := client.Traceroute(webip)\n\t\tassert.NoError(c, err)\n\n\t\tip, err := subRouter3.IPv4()\n\t\tif !assert.NoError(c, err, \"failed to get IPv4 for subRouter3\") {\n\t\t\treturn\n\t\t}\n\n\t\tassertTracerouteViaIPWithCollect(c, tr, ip)\n\t}, propagationTime, 200*time.Millisecond, \"Verifying traceroute still goes through router 3 after router 1 recovery\")\n\n\t// Validate primary routes table state - router 3 remains primary after router 1 comes back\n\tvalidatePrimaryRoutes(t, headscale, &routes.DebugRoutes{\n\t\tAvailableRoutes: map[types.NodeID][]netip.Prefix{\n\t\t\ttypes.NodeID(MustFindNode(subRouter1.Hostname(), nodes).GetId()): {pref},\n\t\t\t// Router 2 is still disconnected\n\t\t\ttypes.NodeID(MustFindNode(subRouter3.Hostname(), nodes).GetId()): {pref},\n\t\t},\n\t\tPrimaryRoutes: map[string]types.NodeID{\n\t\t\tpref.String(): types.NodeID(MustFindNode(subRouter3.Hostname(), nodes).GetId()),\n\t\t},\n\t}, \"Router 3 should remain primary after router 1 recovery\")\n\n\tcheckFailureAndPrintRoutes(t, client)\n\n\t// Bring up subnet router 2, should result in no change.\n\tt.Logf(\"=== FULL RECOVERY TEST: Bringing router 2 (%s) back online ===\", subRouter2.Hostname())\n\tt.Logf(\"[%s] Starting test section\", time.Now().Format(TimestampFormat))\n\tt.Logf(\"  Current state: Router 1 STANDBY, Router 2 OFFLINE, Router 3 PRIMARY\")\n\tt.Logf(\"  Action: Starting router 2 to restore full HA (3 routers)\")\n\tt.Logf(\"  Expected: Router 3 (%s) remains PRIMARY (stability - avoid unnecessary failovers)\", subRouter3.Hostname())\n\tt.Logf(\"  Expected: Router 1 (%s) remains first STANDBY\", subRouter1.Hostname())\n\tt.Logf(\"  Expected: Router 2 (%s) becomes second STANDBY\", subRouter2.Hostname())\n\tt.Logf(\"  Expected: Full HA restored with all 3 routers online\")\n\n\terr = subRouter2.Up()\n\trequire.NoError(t, err)\n\n\t// Wait for nodestore batch processing to complete and online status to be updated\n\t// NodeStore batching timeout is 500ms, so we wait up to 10 seconds for all routers to be online\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tclientStatus, err = client.Status()\n\t\tassert.NoError(c, err)\n\n\t\tsrs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey]\n\t\tsrs2PeerStatus = clientStatus.Peer[srs2.Self.PublicKey]\n\t\tsrs3PeerStatus = clientStatus.Peer[srs3.Self.PublicKey]\n\n\t\tassert.NotNil(c, srs1PeerStatus, \"Router 1 peer should exist\")\n\t\tassert.NotNil(c, srs2PeerStatus, \"Router 2 peer should exist\")\n\t\tassert.NotNil(c, srs3PeerStatus, \"Router 3 peer should exist\")\n\n\t\tif srs1PeerStatus == nil || srs2PeerStatus == nil || srs3PeerStatus == nil {\n\t\t\treturn\n\t\t}\n\n\t\tassert.True(c, srs1PeerStatus.Online, \"Router 1 should be online as STANDBY\")\n\t\tassert.True(c, srs2PeerStatus.Online, \"Router 2 should be back online as STANDBY\")\n\t\tassert.True(c, srs3PeerStatus.Online, \"Router 3 should remain online as PRIMARY\")\n\n\t\tassert.Nil(c, srs1PeerStatus.PrimaryRoutes)\n\t\tassert.Nil(c, srs2PeerStatus.PrimaryRoutes)\n\t\tassert.NotNil(c, srs3PeerStatus.PrimaryRoutes)\n\n\t\trequirePeerSubnetRoutesWithCollect(c, srs1PeerStatus, nil)\n\t\trequirePeerSubnetRoutesWithCollect(c, srs2PeerStatus, nil)\n\t\trequirePeerSubnetRoutesWithCollect(c, srs3PeerStatus, []netip.Prefix{pref})\n\n\t\tif srs3PeerStatus.PrimaryRoutes != nil {\n\t\t\tassert.Contains(c,\n\t\t\t\tsrs3PeerStatus.PrimaryRoutes.AsSlice(),\n\t\t\t\tpref,\n\t\t\t)\n\t\t}\n\t}, 10*time.Second, 500*time.Millisecond, \"Full recovery verification: All 3 routers online, Router 3 remains PRIMARY (no flapping) with routes\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tresult, err := client.Curl(weburl)\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, result, 13)\n\t}, propagationTime, 200*time.Millisecond, \"Verifying client can reach webservice through router 3 after full recovery\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\ttr, err := client.Traceroute(webip)\n\t\tassert.NoError(c, err)\n\n\t\tip, err := subRouter3.IPv4()\n\t\tif !assert.NoError(c, err, \"failed to get IPv4 for subRouter3\") {\n\t\t\treturn\n\t\t}\n\n\t\tassertTracerouteViaIPWithCollect(c, tr, ip)\n\t}, propagationTime, 200*time.Millisecond, \"Verifying traceroute goes through router 3 after full recovery\")\n\n\t// Validate primary routes table state - router 3 remains primary after all routers back online\n\tvalidatePrimaryRoutes(t, headscale, &routes.DebugRoutes{\n\t\tAvailableRoutes: map[types.NodeID][]netip.Prefix{\n\t\t\ttypes.NodeID(MustFindNode(subRouter1.Hostname(), nodes).GetId()): {pref},\n\t\t\ttypes.NodeID(MustFindNode(subRouter2.Hostname(), nodes).GetId()): {pref},\n\t\t\ttypes.NodeID(MustFindNode(subRouter3.Hostname(), nodes).GetId()): {pref},\n\t\t},\n\t\tPrimaryRoutes: map[string]types.NodeID{\n\t\t\tpref.String(): types.NodeID(MustFindNode(subRouter3.Hostname(), nodes).GetId()),\n\t\t},\n\t}, \"Router 3 should remain primary after full recovery\")\n\n\tcheckFailureAndPrintRoutes(t, client)\n\n\tt.Logf(\"=== ROUTE DISABLE TEST: Removing approved route from PRIMARY router 3 (%s) ===\", subRouter3.Hostname())\n\tt.Logf(\"[%s] Starting test section\", time.Now().Format(TimestampFormat))\n\tt.Logf(\"  Current state: Router 1 STANDBY, Router 2 STANDBY, Router 3 PRIMARY\")\n\tt.Logf(\"  Action: Disabling route approval on router 3 (route still advertised but not approved)\")\n\tt.Logf(\"  Expected: Router 1 (%s) should become new PRIMARY (lowest ID with approved route)\", subRouter1.Hostname())\n\tt.Logf(\"  Expected: Router 2 (%s) remains STANDBY\", subRouter2.Hostname())\n\tt.Logf(\"  Expected: Router 3 (%s) goes to advertised-only state (no longer serving)\", subRouter3.Hostname())\n\t_, err = headscale.ApproveRoutes(MustFindNode(subRouter3.Hostname(), nodes).GetId(), []netip.Prefix{})\n\n\t// Wait for nodestore batch processing and route state changes to complete\n\t// NodeStore batching timeout is 500ms, so we wait up to 10 seconds for route failover\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err = headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 6)\n\n\t\t// After disabling route on r3, r1 should become primary with 1 subnet route\n\t\trequireNodeRouteCountWithCollect(c, MustFindNode(subRouter1.Hostname(), nodes), 1, 1, 1)\n\t\trequireNodeRouteCountWithCollect(c, MustFindNode(subRouter2.Hostname(), nodes), 1, 1, 0)\n\t\trequireNodeRouteCountWithCollect(c, MustFindNode(subRouter3.Hostname(), nodes), 1, 0, 0)\n\t}, 10*time.Second, 500*time.Millisecond, \"Route disable verification: Router 3 route disabled, Router 1 should be new PRIMARY, Router 2 STANDBY\")\n\n\t// Verify that the route is announced from subnet router 1\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tclientStatus, err = client.Status()\n\t\tassert.NoError(c, err)\n\n\t\tsrs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey]\n\t\tsrs2PeerStatus = clientStatus.Peer[srs2.Self.PublicKey]\n\t\tsrs3PeerStatus = clientStatus.Peer[srs3.Self.PublicKey]\n\n\t\tassert.NotNil(c, srs1PeerStatus, \"Router 1 peer should exist\")\n\t\tassert.NotNil(c, srs2PeerStatus, \"Router 2 peer should exist\")\n\t\tassert.NotNil(c, srs3PeerStatus, \"Router 3 peer should exist\")\n\n\t\tif srs1PeerStatus == nil || srs2PeerStatus == nil || srs3PeerStatus == nil {\n\t\t\treturn\n\t\t}\n\n\t\tassert.NotNil(c, srs1PeerStatus.PrimaryRoutes)\n\t\tassert.Nil(c, srs2PeerStatus.PrimaryRoutes)\n\t\tassert.Nil(c, srs3PeerStatus.PrimaryRoutes)\n\n\t\trequirePeerSubnetRoutesWithCollect(c, srs1PeerStatus, []netip.Prefix{pref})\n\t\trequirePeerSubnetRoutesWithCollect(c, srs2PeerStatus, nil)\n\t\trequirePeerSubnetRoutesWithCollect(c, srs3PeerStatus, nil)\n\n\t\tif srs1PeerStatus.PrimaryRoutes != nil {\n\t\t\tassert.Contains(c,\n\t\t\t\tsrs1PeerStatus.PrimaryRoutes.AsSlice(),\n\t\t\t\tpref,\n\t\t\t)\n\t\t}\n\t}, propagationTime, 200*time.Millisecond, \"Verifying Router 1 becomes PRIMARY after Router 3 route disabled\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tresult, err := client.Curl(weburl)\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, result, 13)\n\t}, propagationTime, 200*time.Millisecond, \"Verifying client can reach webservice through router 1 after route disable\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\ttr, err := client.Traceroute(webip)\n\t\tassert.NoError(c, err)\n\n\t\tip, err := subRouter1.IPv4()\n\t\tif !assert.NoError(c, err, \"failed to get IPv4 for subRouter1\") {\n\t\t\treturn\n\t\t}\n\n\t\tassertTracerouteViaIPWithCollect(c, tr, ip)\n\t}, propagationTime, 200*time.Millisecond, \"Verifying traceroute goes through router 1 after route disable\")\n\n\t// Validate primary routes table state - router 1 is primary after router 3 route disabled\n\tvalidatePrimaryRoutes(t, headscale, &routes.DebugRoutes{\n\t\tAvailableRoutes: map[types.NodeID][]netip.Prefix{\n\t\t\ttypes.NodeID(MustFindNode(subRouter1.Hostname(), nodes).GetId()): {pref},\n\t\t\ttypes.NodeID(MustFindNode(subRouter2.Hostname(), nodes).GetId()): {pref},\n\t\t\t// Router 3's route is no longer approved, so not in AvailableRoutes\n\t\t},\n\t\tPrimaryRoutes: map[string]types.NodeID{\n\t\t\tpref.String(): types.NodeID(MustFindNode(subRouter1.Hostname(), nodes).GetId()),\n\t\t},\n\t}, \"Router 1 should be primary after router 3 route disabled\")\n\n\tcheckFailureAndPrintRoutes(t, client)\n\n\t// Disable the route of subnet router 1, making it failover to 2\n\tt.Logf(\"=== ROUTE DISABLE TEST: Removing approved route from NEW PRIMARY router 1 (%s) ===\", subRouter1.Hostname())\n\tt.Logf(\"[%s] Starting test section\", time.Now().Format(TimestampFormat))\n\tt.Logf(\"  Current state: Router 1 PRIMARY, Router 2 STANDBY, Router 3 advertised-only\")\n\tt.Logf(\"  Action: Disabling route approval on router 1\")\n\tt.Logf(\"  Expected: Router 2 (%s) should become new PRIMARY (only remaining approved route)\", subRouter2.Hostname())\n\tt.Logf(\"  Expected: Router 1 (%s) goes to advertised-only state\", subRouter1.Hostname())\n\tt.Logf(\"  Expected: Router 3 (%s) remains advertised-only\", subRouter3.Hostname())\n\t_, err = headscale.ApproveRoutes(MustFindNode(subRouter1.Hostname(), nodes).GetId(), []netip.Prefix{})\n\n\t// Wait for nodestore batch processing and route state changes to complete\n\t// NodeStore batching timeout is 500ms, so we wait up to 10 seconds for route failover\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err = headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 6)\n\n\t\t// After disabling route on r1, r2 should become primary with 1 subnet route\n\t\trequireNodeRouteCountWithCollect(c, MustFindNode(subRouter1.Hostname(), nodes), 1, 0, 0)\n\t\trequireNodeRouteCountWithCollect(c, MustFindNode(subRouter2.Hostname(), nodes), 1, 1, 1)\n\t\trequireNodeRouteCountWithCollect(c, MustFindNode(subRouter3.Hostname(), nodes), 1, 0, 0)\n\t}, 10*time.Second, 500*time.Millisecond, \"Second route disable verification: Router 1 route disabled, Router 2 should be new PRIMARY\")\n\n\t// Verify that the route is announced from subnet router 1\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tclientStatus, err = client.Status()\n\t\tassert.NoError(c, err)\n\n\t\tsrs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey]\n\t\tsrs2PeerStatus = clientStatus.Peer[srs2.Self.PublicKey]\n\t\tsrs3PeerStatus = clientStatus.Peer[srs3.Self.PublicKey]\n\n\t\tassert.NotNil(c, srs1PeerStatus, \"Router 1 peer should exist\")\n\t\tassert.NotNil(c, srs2PeerStatus, \"Router 2 peer should exist\")\n\t\tassert.NotNil(c, srs3PeerStatus, \"Router 3 peer should exist\")\n\n\t\tif srs1PeerStatus == nil || srs2PeerStatus == nil || srs3PeerStatus == nil {\n\t\t\treturn\n\t\t}\n\n\t\tassert.Nil(c, srs1PeerStatus.PrimaryRoutes)\n\t\tassert.NotNil(c, srs2PeerStatus.PrimaryRoutes)\n\t\tassert.Nil(c, srs3PeerStatus.PrimaryRoutes)\n\n\t\trequirePeerSubnetRoutesWithCollect(c, srs1PeerStatus, nil)\n\t\trequirePeerSubnetRoutesWithCollect(c, srs2PeerStatus, []netip.Prefix{pref})\n\t\trequirePeerSubnetRoutesWithCollect(c, srs3PeerStatus, nil)\n\n\t\tif srs2PeerStatus.PrimaryRoutes != nil {\n\t\t\tassert.Contains(c,\n\t\t\t\tsrs2PeerStatus.PrimaryRoutes.AsSlice(),\n\t\t\t\tpref,\n\t\t\t)\n\t\t}\n\t}, propagationTime, 200*time.Millisecond, \"Verifying Router 2 becomes PRIMARY after Router 1 route disabled\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tresult, err := client.Curl(weburl)\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, result, 13)\n\t}, propagationTime, 200*time.Millisecond, \"Verifying client can reach webservice through router 2 after second route disable\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\ttr, err := client.Traceroute(webip)\n\t\tassert.NoError(c, err)\n\n\t\tip, err := subRouter2.IPv4()\n\t\tif !assert.NoError(c, err, \"failed to get IPv4 for subRouter2\") {\n\t\t\treturn\n\t\t}\n\n\t\tassertTracerouteViaIPWithCollect(c, tr, ip)\n\t}, propagationTime, 200*time.Millisecond, \"Verifying traceroute goes through router 2 after second route disable\")\n\n\t// Validate primary routes table state - router 2 is primary after router 1 route disabled\n\tvalidatePrimaryRoutes(t, headscale, &routes.DebugRoutes{\n\t\tAvailableRoutes: map[types.NodeID][]netip.Prefix{\n\t\t\t// Router 1's route is no longer approved, so not in AvailableRoutes\n\t\t\ttypes.NodeID(MustFindNode(subRouter2.Hostname(), nodes).GetId()): {pref},\n\t\t\t// Router 3's route is still not approved\n\t\t},\n\t\tPrimaryRoutes: map[string]types.NodeID{\n\t\t\tpref.String(): types.NodeID(MustFindNode(subRouter2.Hostname(), nodes).GetId()),\n\t\t},\n\t}, \"Router 2 should be primary after router 1 route disabled\")\n\n\tcheckFailureAndPrintRoutes(t, client)\n\n\t// enable the route of subnet router 1, no change expected\n\tt.Logf(\"=== ROUTE RE-ENABLE TEST: Re-approving route on router 1 (%s) ===\", subRouter1.Hostname())\n\tt.Logf(\"[%s] Starting test section\", time.Now().Format(TimestampFormat))\n\tt.Logf(\"  Current state: Router 1 advertised-only, Router 2 PRIMARY, Router 3 advertised-only\")\n\tt.Logf(\"  Action: Re-enabling route approval on router 1\")\n\tt.Logf(\"  Expected: Router 2 (%s) remains PRIMARY (stability - no unnecessary flapping)\", subRouter2.Hostname())\n\tt.Logf(\"  Expected: Router 1 (%s) becomes STANDBY (approved but not primary)\", subRouter1.Hostname())\n\tt.Logf(\"  Expected: HA fully restored with Router 2 PRIMARY and Router 1 STANDBY\")\n\n\tr1Node := MustFindNode(subRouter1.Hostname(), nodes)\n\t_, err = headscale.ApproveRoutes(\n\t\tr1Node.GetId(),\n\t\tutil.MustStringsToPrefixes(r1Node.GetAvailableRoutes()),\n\t)\n\n\t// Wait for route state changes after re-enabling r1\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err = headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 6)\n\n\t\trequireNodeRouteCountWithCollect(c, MustFindNode(subRouter1.Hostname(), nodes), 1, 1, 0)\n\t\trequireNodeRouteCountWithCollect(c, MustFindNode(subRouter2.Hostname(), nodes), 1, 1, 1)\n\t\trequireNodeRouteCountWithCollect(c, MustFindNode(subRouter3.Hostname(), nodes), 1, 0, 0)\n\t}, propagationTime, 200*time.Millisecond, \"Re-enable verification: Router 1 approved as STANDBY, Router 2 remains PRIMARY (no flapping), full HA restored\")\n\n\t// Verify that the route is announced from subnet router 1\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tclientStatus, err = client.Status()\n\t\tassert.NoError(c, err)\n\n\t\tsrs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey]\n\t\tsrs2PeerStatus = clientStatus.Peer[srs2.Self.PublicKey]\n\t\tsrs3PeerStatus = clientStatus.Peer[srs3.Self.PublicKey]\n\n\t\tassert.NotNil(c, srs1PeerStatus, \"Router 1 peer should exist\")\n\t\tassert.NotNil(c, srs2PeerStatus, \"Router 2 peer should exist\")\n\t\tassert.NotNil(c, srs3PeerStatus, \"Router 3 peer should exist\")\n\n\t\tif srs1PeerStatus == nil || srs2PeerStatus == nil || srs3PeerStatus == nil {\n\t\t\treturn\n\t\t}\n\n\t\tassert.Nil(c, srs1PeerStatus.PrimaryRoutes)\n\t\tassert.NotNil(c, srs2PeerStatus.PrimaryRoutes)\n\t\tassert.Nil(c, srs3PeerStatus.PrimaryRoutes)\n\n\t\tif srs2PeerStatus.PrimaryRoutes != nil {\n\t\t\tassert.Contains(c,\n\t\t\t\tsrs2PeerStatus.PrimaryRoutes.AsSlice(),\n\t\t\t\tpref,\n\t\t\t)\n\t\t}\n\t}, propagationTime, 200*time.Millisecond, \"Verifying Router 2 remains PRIMARY after Router 1 route re-enabled\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tresult, err := client.Curl(weburl)\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, result, 13)\n\t}, propagationTime, 200*time.Millisecond, \"Verifying client can reach webservice through router 2 after route re-enable\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\ttr, err := client.Traceroute(webip)\n\t\tassert.NoError(c, err)\n\n\t\tip, err := subRouter2.IPv4()\n\t\tif !assert.NoError(c, err, \"failed to get IPv4 for subRouter2\") {\n\t\t\treturn\n\t\t}\n\n\t\tassertTracerouteViaIPWithCollect(c, tr, ip)\n\t}, propagationTime, 200*time.Millisecond, \"Verifying traceroute still goes through router 2 after route re-enable\")\n\n\t// Validate primary routes table state after router 1 re-approval\n\tvalidatePrimaryRoutes(t, headscale, &routes.DebugRoutes{\n\t\tAvailableRoutes: map[types.NodeID][]netip.Prefix{\n\t\t\ttypes.NodeID(MustFindNode(subRouter1.Hostname(), nodes).GetId()): {pref},\n\t\t\ttypes.NodeID(MustFindNode(subRouter2.Hostname(), nodes).GetId()): {pref},\n\t\t\t// Router 3 route is still not approved\n\t\t},\n\t\tPrimaryRoutes: map[string]types.NodeID{\n\t\t\tpref.String(): types.NodeID(MustFindNode(subRouter2.Hostname(), nodes).GetId()),\n\t\t},\n\t}, \"Router 2 should remain primary after router 1 re-approval\")\n\n\tcheckFailureAndPrintRoutes(t, client)\n\n\t// Enable route on node 3, we now have all routes re-enabled\n\tt.Logf(\"=== ROUTE RE-ENABLE TEST: Re-approving route on router 3 (%s) - Full HA Restoration ===\", subRouter3.Hostname())\n\tt.Logf(\"[%s] Starting test section\", time.Now().Format(TimestampFormat))\n\tt.Logf(\"  Current state: Router 1 STANDBY, Router 2 PRIMARY, Router 3 advertised-only\")\n\tt.Logf(\"  Action: Re-enabling route approval on router 3\")\n\tt.Logf(\"  Expected: Router 2 (%s) remains PRIMARY (stability preferred)\", subRouter2.Hostname())\n\tt.Logf(\"  Expected: Routers 1 & 3 are both STANDBY\")\n\tt.Logf(\"  Expected: Full HA restored with all 3 routers available\")\n\n\tr3Node := MustFindNode(subRouter3.Hostname(), nodes)\n\t_, err = headscale.ApproveRoutes(\n\t\tr3Node.GetId(),\n\t\tutil.MustStringsToPrefixes(r3Node.GetAvailableRoutes()),\n\t)\n\n\t// Wait for route state changes after re-enabling r3\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err = headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 6)\n\t\trequire.GreaterOrEqual(t, len(nodes), 3, \"need at least 3 nodes to avoid panic\")\n\t\t// After router 3 re-approval: Router 2 remains PRIMARY, Routers 1&3 are STANDBY\n\t\t// SubnetRoutes should only show routes for PRIMARY node (actively serving)\n\t\trequireNodeRouteCountWithCollect(c, nodes[0], 1, 1, 0) // Router 1: STANDBY (available, approved, but not serving)\n\t\trequireNodeRouteCountWithCollect(c, nodes[1], 1, 1, 1) // Router 2: PRIMARY (available, approved, and serving)\n\t\trequireNodeRouteCountWithCollect(c, nodes[2], 1, 1, 0) // Router 3: STANDBY (available, approved, but not serving)\n\t}, propagationTime, 200*time.Millisecond, \"Waiting for route state after router 3 re-approval\")\n\n\t// Validate primary routes table state after router 3 re-approval\n\tvalidatePrimaryRoutes(t, headscale, &routes.DebugRoutes{\n\t\tAvailableRoutes: map[types.NodeID][]netip.Prefix{\n\t\t\ttypes.NodeID(MustFindNode(subRouter1.Hostname(), nodes).GetId()): {pref},\n\t\t\ttypes.NodeID(MustFindNode(subRouter2.Hostname(), nodes).GetId()): {pref},\n\t\t\ttypes.NodeID(MustFindNode(subRouter3.Hostname(), nodes).GetId()): {pref},\n\t\t},\n\t\tPrimaryRoutes: map[string]types.NodeID{\n\t\t\tpref.String(): types.NodeID(MustFindNode(subRouter2.Hostname(), nodes).GetId()),\n\t\t},\n\t}, \"Router 2 should remain primary after router 3 re-approval\")\n\n\tcheckFailureAndPrintRoutes(t, client)\n}\n\n// TestSubnetRouteACL verifies that Subnet routes are distributed\n// as expected when ACLs are activated.\n// It implements the issue from\n// https://github.com/juanfont/headscale/issues/1604\nfunc TestSubnetRouteACL(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tuser := \"user4\"\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 2,\n\t\tUsers:        []string{user},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoErrorf(t, err, \"failed to create scenario: %s\", err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv([]tsic.Option{\n\t\ttsic.WithAcceptRoutes(),\n\t}, hsic.WithTestName(\"rt-subnetacl\"), hsic.WithACLPolicy(\n\t\t&policyv2.Policy{\n\t\t\tGroups: policyv2.Groups{\n\t\t\t\tpolicyv2.Group(\"group:admins\"): []policyv2.Username{policyv2.Username(user + \"@\")},\n\t\t\t},\n\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t{\n\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\tSources: []policyv2.Alias{groupp(\"group:admins\")},\n\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\taliasWithPorts(groupp(\"group:admins\"), tailcfg.PortRangeAny),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\tSources: []policyv2.Alias{groupp(\"group:admins\")},\n\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\taliasWithPorts(prefixp(\"10.33.0.0/16\"), tailcfg.PortRangeAny),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t))\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\texpectedRoutes := map[string]string{\n\t\t\"1\": \"10.33.0.0/16\",\n\t}\n\n\t// Sort nodes by ID\n\tsort.SliceStable(allClients, func(i, j int) bool {\n\t\tstatusI := allClients[i].MustStatus()\n\t\tstatusJ := allClients[j].MustStatus()\n\n\t\treturn statusI.Self.ID < statusJ.Self.ID\n\t})\n\n\tsubRouter1 := allClients[0]\n\n\tclient := allClients[1]\n\n\tfor _, client := range allClients {\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\tstatus, err := client.Status()\n\t\t\tassert.NoError(c, err)\n\n\t\t\tif route, ok := expectedRoutes[string(status.Self.ID)]; ok {\n\t\t\t\tcommand := []string{\n\t\t\t\t\t\"tailscale\",\n\t\t\t\t\t\"set\",\n\t\t\t\t\t\"--advertise-routes=\" + route,\n\t\t\t\t}\n\t\t\t\t_, _, err = client.Execute(command)\n\t\t\t\tassert.NoErrorf(c, err, \"failed to advertise route: %s\", err)\n\t\t\t}\n\t\t}, 5*time.Second, 200*time.Millisecond, \"Configuring route advertisements\")\n\t}\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t// Wait for route advertisements to propagate to the server\n\tvar nodes []*v1.Node\n\n\trequire.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tvar err error\n\n\t\tnodes, err = headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 2)\n\n\t\t// Find the node that should have the route by checking node IDs\n\t\tvar (\n\t\t\trouteNode *v1.Node\n\t\t\totherNode *v1.Node\n\t\t)\n\n\t\tfor _, node := range nodes {\n\t\t\tnodeIDStr := strconv.FormatUint(node.GetId(), 10)\n\t\t\tif _, shouldHaveRoute := expectedRoutes[nodeIDStr]; shouldHaveRoute {\n\t\t\t\trouteNode = node\n\t\t\t} else {\n\t\t\t\totherNode = node\n\t\t\t}\n\t\t}\n\n\t\tassert.NotNil(c, routeNode, \"could not find node that should have route\")\n\t\tassert.NotNil(c, otherNode, \"could not find node that should not have route\")\n\n\t\t// After NodeStore fix: routes are properly tracked in route manager\n\t\t// This test uses a policy with NO auto-approvers, so routes should be:\n\t\t// announced=1, approved=0, subnet=0 (routes announced but not approved)\n\t\trequireNodeRouteCountWithCollect(c, routeNode, 1, 0, 0)\n\t\trequireNodeRouteCountWithCollect(c, otherNode, 0, 0, 0)\n\t}, 10*time.Second, 100*time.Millisecond, \"route advertisements should propagate to server\")\n\n\t// Verify that no routes has been sent to the client,\n\t// they are not yet enabled.\n\tfor _, client := range allClients {\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\tstatus, err := client.Status()\n\t\t\tassert.NoError(c, err)\n\n\t\t\tfor _, peerKey := range status.Peers() {\n\t\t\t\tpeerStatus := status.Peer[peerKey]\n\n\t\t\t\tassert.Nil(c, peerStatus.PrimaryRoutes)\n\t\t\t\trequirePeerSubnetRoutesWithCollect(c, peerStatus, nil)\n\t\t\t}\n\t\t}, 5*time.Second, 200*time.Millisecond, \"Verifying no routes are active before approval\")\n\t}\n\n\t_, err = headscale.ApproveRoutes(\n\t\t1,\n\t\t[]netip.Prefix{netip.MustParsePrefix(expectedRoutes[\"1\"])},\n\t)\n\trequire.NoError(t, err)\n\n\t// Wait for route state changes to propagate to nodes\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err = headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 2)\n\n\t\trequireNodeRouteCountWithCollect(c, nodes[0], 1, 1, 1)\n\t\trequireNodeRouteCountWithCollect(c, nodes[1], 0, 0, 0)\n\t}, 10*time.Second, 500*time.Millisecond, \"route state changes should propagate to nodes\")\n\n\t// Verify that the client has routes from the primary machine\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tsrs1, err := subRouter1.Status()\n\t\tassert.NoError(c, err)\n\n\t\tclientStatus, err := client.Status()\n\t\tassert.NoError(c, err)\n\n\t\tsrs1PeerStatus := clientStatus.Peer[srs1.Self.PublicKey]\n\n\t\tassert.NotNil(c, srs1PeerStatus, \"Router 1 peer should exist\")\n\n\t\tif srs1PeerStatus == nil {\n\t\t\treturn\n\t\t}\n\n\t\trequirePeerSubnetRoutesWithCollect(c, srs1PeerStatus, []netip.Prefix{netip.MustParsePrefix(expectedRoutes[\"1\"])})\n\t}, 5*time.Second, 200*time.Millisecond, \"Verifying client can see subnet routes from router\")\n\n\t// Wait for packet filter updates to propagate to client netmap\n\twantClientFilter := []filter.Match{\n\t\t{\n\t\t\tIPProto: views.SliceOf([]ipproto.Proto{\n\t\t\t\tipproto.TCP, ipproto.UDP, ipproto.ICMPv4, ipproto.ICMPv6,\n\t\t\t}),\n\t\t\tSrcs: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"100.64.0.1/32\"),\n\t\t\t\tnetip.MustParsePrefix(\"100.64.0.2/32\"),\n\t\t\t\tnetip.MustParsePrefix(\"fd7a:115c:a1e0::1/128\"),\n\t\t\t\tnetip.MustParsePrefix(\"fd7a:115c:a1e0::2/128\"),\n\t\t\t},\n\t\t\tDsts: []filter.NetPortRange{\n\t\t\t\t{\n\t\t\t\t\tNet:   netip.MustParsePrefix(\"100.64.0.2/32\"),\n\t\t\t\t\tPorts: allPorts,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tNet:   netip.MustParsePrefix(\"fd7a:115c:a1e0::2/128\"),\n\t\t\t\t\tPorts: allPorts,\n\t\t\t\t},\n\t\t\t},\n\t\t\tCaps: []filter.CapMatch{},\n\t\t},\n\t}\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tclientNm, err := client.Netmap()\n\t\tassert.NoError(c, err)\n\n\t\tif diff := cmpdiff.Diff(wantClientFilter, clientNm.PacketFilter, util.ViewSliceIPProtoComparer, util.PrefixComparer); diff != \"\" {\n\t\t\tassert.Fail(c, fmt.Sprintf(\"Client (%s) filter, unexpected result (-want +got):\\n%s\", client.Hostname(), diff))\n\t\t}\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for client packet filter to update\")\n\n\t// Wait for packet filter updates to propagate to subnet router netmap\n\t// The two ACL rules (group:admins -> group:admins:* and group:admins -> 10.33.0.0/16:*)\n\t// are merged into one filter rule since they share the same SrcIPs and IPProto.\n\twantSubnetFilter := []filter.Match{\n\t\t{\n\t\t\tIPProto: views.SliceOf([]ipproto.Proto{\n\t\t\t\tipproto.TCP, ipproto.UDP, ipproto.ICMPv4, ipproto.ICMPv6,\n\t\t\t}),\n\t\t\tSrcs: []netip.Prefix{\n\t\t\t\tnetip.MustParsePrefix(\"100.64.0.1/32\"),\n\t\t\t\tnetip.MustParsePrefix(\"100.64.0.2/32\"),\n\t\t\t\tnetip.MustParsePrefix(\"fd7a:115c:a1e0::1/128\"),\n\t\t\t\tnetip.MustParsePrefix(\"fd7a:115c:a1e0::2/128\"),\n\t\t\t},\n\t\t\tDsts: []filter.NetPortRange{\n\t\t\t\t{\n\t\t\t\t\tNet:   netip.MustParsePrefix(\"100.64.0.1/32\"),\n\t\t\t\t\tPorts: allPorts,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tNet:   netip.MustParsePrefix(\"fd7a:115c:a1e0::1/128\"),\n\t\t\t\t\tPorts: allPorts,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tNet:   netip.MustParsePrefix(\"10.33.0.0/16\"),\n\t\t\t\t\tPorts: allPorts,\n\t\t\t\t},\n\t\t\t},\n\t\t\tCaps: []filter.CapMatch{},\n\t\t},\n\t}\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tsubnetNm, err := subRouter1.Netmap()\n\t\tassert.NoError(c, err)\n\n\t\tif diff := cmpdiff.Diff(wantSubnetFilter, subnetNm.PacketFilter, util.ViewSliceIPProtoComparer, util.PrefixComparer); diff != \"\" {\n\t\t\tassert.Fail(c, fmt.Sprintf(\"Subnet (%s) filter, unexpected result (-want +got):\\n%s\", subRouter1.Hostname(), diff))\n\t\t}\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for subnet router packet filter to update\")\n}\n\n// TestEnablingExitRoutes tests enabling exit routes for clients.\n// Its more or less the same as TestEnablingRoutes, but with the --advertise-exit-node flag\n// set during login instead of set.\nfunc TestEnablingExitRoutes(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tuser := \"user2\" //nolint:goconst // test-specific value, not related to userToDelete constant\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 2,\n\t\tUsers:        []string{user},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoErrorf(t, err, \"failed to create scenario\")\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv([]tsic.Option{\n\t\ttsic.WithExtraLoginArgs([]string{\"--advertise-exit-node\"}),\n\t}, hsic.WithTestName(\"rt-exitroute\"))\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\tvar nodes []*v1.Node\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tvar err error\n\n\t\tnodes, err = headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 2)\n\n\t\trequireNodeRouteCountWithCollect(c, nodes[0], 2, 0, 0)\n\t\trequireNodeRouteCountWithCollect(c, nodes[1], 2, 0, 0)\n\t}, 10*time.Second, 200*time.Millisecond, \"Waiting for route advertisements to propagate\")\n\n\t// Verify that no routes has been sent to the client,\n\t// they are not yet enabled.\n\tfor _, client := range allClients {\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\tstatus, err := client.Status()\n\t\t\tassert.NoError(c, err)\n\n\t\t\tfor _, peerKey := range status.Peers() {\n\t\t\t\tpeerStatus := status.Peer[peerKey]\n\n\t\t\t\tassert.Nil(c, peerStatus.PrimaryRoutes)\n\t\t\t}\n\t\t}, 5*time.Second, 200*time.Millisecond, \"Verifying no exit routes are active before approval\")\n\t}\n\n\t// Enable all routes, but do v4 on one and v6 on other to ensure they\n\t// are both added since they are exit routes.\n\t_, err = headscale.ApproveRoutes(\n\t\tnodes[0].GetId(),\n\t\t[]netip.Prefix{tsaddr.AllIPv4()},\n\t)\n\trequire.NoError(t, err)\n\t_, err = headscale.ApproveRoutes(\n\t\tnodes[1].GetId(),\n\t\t[]netip.Prefix{tsaddr.AllIPv6()},\n\t)\n\trequire.NoError(t, err)\n\n\t// Wait for route state changes to propagate\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err = headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 2)\n\n\t\trequireNodeRouteCountWithCollect(c, nodes[0], 2, 2, 2)\n\t\trequireNodeRouteCountWithCollect(c, nodes[1], 2, 2, 2)\n\t}, 10*time.Second, 500*time.Millisecond, \"route state changes should propagate to both nodes\")\n\n\t// Wait for route state changes to propagate to clients\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t// Verify that the clients can see the new routes\n\t\tfor _, client := range allClients {\n\t\t\tstatus, err := client.Status()\n\t\t\tassert.NoError(c, err)\n\n\t\t\tfor _, peerKey := range status.Peers() {\n\t\t\t\tpeerStatus := status.Peer[peerKey]\n\n\t\t\t\tassert.NotNil(c, peerStatus.AllowedIPs)\n\n\t\t\t\tif peerStatus.AllowedIPs != nil {\n\t\t\t\t\tassert.Len(c, peerStatus.AllowedIPs.AsSlice(), 4)\n\t\t\t\t\tassert.Contains(c, peerStatus.AllowedIPs.AsSlice(), tsaddr.AllIPv4())\n\t\t\t\t\tassert.Contains(c, peerStatus.AllowedIPs.AsSlice(), tsaddr.AllIPv6())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}, 10*time.Second, 500*time.Millisecond, \"clients should see new routes\")\n}\n\n// TestSubnetRouterMultiNetwork is an evolution of the subnet router test.\n// This test will set up multiple docker networks and use two isolated tailscale\n// clients and a service available in one of the networks to validate that a\n// subnet router is working as expected.\nfunc TestSubnetRouterMultiNetwork(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 1,\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t\tNetworks: map[string][]string{\n\t\t\t\"usernet1\": {\"user1\"},\n\t\t\t\"usernet2\": {\"user2\"},\n\t\t},\n\t\tExtraService: map[string][]extraServiceFunc{\n\t\t\t\"usernet1\": {Webservice},\n\t\t},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoErrorf(t, err, \"failed to create scenario: %s\", err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv([]tsic.Option{tsic.WithAcceptRoutes()},\n\t\thsic.WithTestName(\"rt-multinet\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\tassert.NotNil(t, headscale)\n\n\tpref, err := scenario.SubnetOfNetwork(\"usernet1\")\n\trequire.NoError(t, err)\n\n\tvar user1c, user2c TailscaleClient\n\n\tfor _, c := range allClients {\n\t\ts := c.MustStatus()\n\t\tif s.User[s.Self.UserID].LoginName == \"user1@test.no\" {\n\t\t\tuser1c = c\n\t\t}\n\n\t\tif s.User[s.Self.UserID].LoginName == \"user2@test.no\" {\n\t\t\tuser2c = c\n\t\t}\n\t}\n\n\trequire.NotNil(t, user1c)\n\trequire.NotNil(t, user2c)\n\n\t// Advertise the route for the dockersubnet of user1\n\tcommand := []string{\n\t\t\"tailscale\",\n\t\t\"set\",\n\t\t\"--advertise-routes=\" + pref.String(),\n\t}\n\t_, _, err = user1c.Execute(command)\n\trequire.NoErrorf(t, err, \"failed to advertise route: %s\", err)\n\n\tvar nodes []*v1.Node\n\t// Wait for route advertisements to propagate to NodeStore\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tvar err error\n\n\t\tnodes, err = headscale.ListNodes()\n\t\tassert.NoError(ct, err)\n\t\tassert.Len(ct, nodes, 2)\n\t\trequireNodeRouteCountWithCollect(ct, nodes[0], 1, 0, 0)\n\t}, 10*time.Second, 100*time.Millisecond, \"route advertisements should propagate\")\n\n\t// Verify that no routes has been sent to the client,\n\t// they are not yet enabled.\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tstatus, err := user1c.Status()\n\t\tassert.NoError(c, err)\n\n\t\tfor _, peerKey := range status.Peers() {\n\t\t\tpeerStatus := status.Peer[peerKey]\n\n\t\t\tassert.Nil(c, peerStatus.PrimaryRoutes)\n\t\t\trequirePeerSubnetRoutesWithCollect(c, peerStatus, nil)\n\t\t}\n\t}, 5*time.Second, 200*time.Millisecond, \"Verifying no routes are active before approval\")\n\n\t// Enable route\n\t_, err = headscale.ApproveRoutes(\n\t\tnodes[0].GetId(),\n\t\t[]netip.Prefix{*pref},\n\t)\n\trequire.NoError(t, err)\n\n\t// Wait for route state changes to propagate to nodes\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tvar err error\n\n\t\tnodes, err = headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 2)\n\t\trequireNodeRouteCountWithCollect(c, nodes[0], 1, 1, 1)\n\t}, 10*time.Second, 500*time.Millisecond, \"route state changes should propagate to nodes\")\n\n\t// Verify that the routes have been sent to the client\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tstatus, err := user2c.Status()\n\t\tassert.NoError(c, err)\n\n\t\tfor _, peerKey := range status.Peers() {\n\t\t\tpeerStatus := status.Peer[peerKey]\n\n\t\t\tif peerStatus.PrimaryRoutes != nil {\n\t\t\t\tassert.Contains(c, peerStatus.PrimaryRoutes.AsSlice(), *pref)\n\t\t\t}\n\n\t\t\trequirePeerSubnetRoutesWithCollect(c, peerStatus, []netip.Prefix{*pref})\n\t\t}\n\t}, 10*time.Second, 500*time.Millisecond, \"routes should be visible to client\")\n\n\tusernet1, err := scenario.Network(\"usernet1\")\n\trequire.NoError(t, err)\n\n\tservices, err := scenario.Services(\"usernet1\")\n\trequire.NoError(t, err)\n\trequire.Len(t, services, 1)\n\n\tweb := services[0]\n\twebip := netip.MustParseAddr(web.GetIPInNetwork(usernet1))\n\n\turl := fmt.Sprintf(\"http://%s/etc/hostname\", webip)\n\tt.Logf(\"url from %s to %s\", user2c.Hostname(), url)\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tresult, err := user2c.Curl(url)\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, result, 13)\n\t}, 5*time.Second, 200*time.Millisecond, \"Verifying client can reach webservice through subnet route\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\ttr, err := user2c.Traceroute(webip)\n\t\tassert.NoError(c, err)\n\n\t\tip, err := user1c.IPv4()\n\t\tif !assert.NoError(c, err, \"failed to get IPv4 for user1c\") {\n\t\t\treturn\n\t\t}\n\n\t\tassertTracerouteViaIPWithCollect(c, tr, ip)\n\t}, 5*time.Second, 200*time.Millisecond, \"Verifying traceroute goes through subnet router\")\n}\n\nfunc TestSubnetRouterMultiNetworkExitNode(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 1,\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t\tNetworks: map[string][]string{\n\t\t\t\"usernet1\": {\"user1\"},\n\t\t\t\"usernet2\": {\"user2\"},\n\t\t},\n\t\tExtraService: map[string][]extraServiceFunc{\n\t\t\t\"usernet1\": {Webservice},\n\t\t},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoErrorf(t, err, \"failed to create scenario: %s\", err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv([]tsic.Option{},\n\t\thsic.WithTestName(\"rt-multinetexit\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\tassert.NotNil(t, headscale)\n\n\tvar user1c, user2c TailscaleClient\n\n\tfor _, c := range allClients {\n\t\ts := c.MustStatus()\n\t\tif s.User[s.Self.UserID].LoginName == \"user1@test.no\" {\n\t\t\tuser1c = c\n\t\t}\n\n\t\tif s.User[s.Self.UserID].LoginName == \"user2@test.no\" {\n\t\t\tuser2c = c\n\t\t}\n\t}\n\n\trequire.NotNil(t, user1c)\n\trequire.NotNil(t, user2c)\n\n\t// Advertise the exit nodes for the dockersubnet of user1\n\tcommand := []string{\n\t\t\"tailscale\",\n\t\t\"set\",\n\t\t\"--advertise-exit-node\",\n\t}\n\t_, _, err = user1c.Execute(command)\n\trequire.NoErrorf(t, err, \"failed to advertise route: %s\", err)\n\n\tvar nodes []*v1.Node\n\t// Wait for route advertisements to propagate to NodeStore\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\tvar err error\n\n\t\tnodes, err = headscale.ListNodes()\n\t\tassert.NoError(ct, err)\n\t\tassert.Len(ct, nodes, 2)\n\t\trequireNodeRouteCountWithCollect(ct, nodes[0], 2, 0, 0)\n\t}, 10*time.Second, 100*time.Millisecond, \"route advertisements should propagate\")\n\n\t// Verify that no routes has been sent to the client,\n\t// they are not yet enabled.\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tstatus, err := user1c.Status()\n\t\tassert.NoError(c, err)\n\n\t\tfor _, peerKey := range status.Peers() {\n\t\t\tpeerStatus := status.Peer[peerKey]\n\n\t\t\tassert.Nil(c, peerStatus.PrimaryRoutes)\n\t\t\trequirePeerSubnetRoutesWithCollect(c, peerStatus, nil)\n\t\t}\n\t}, 5*time.Second, 200*time.Millisecond, \"Verifying no routes sent to client before approval\")\n\n\t// Enable route\n\t_, err = headscale.ApproveRoutes(nodes[0].GetId(), []netip.Prefix{tsaddr.AllIPv4()})\n\trequire.NoError(t, err)\n\n\t// Wait for route state changes to propagate to nodes\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err = headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 2)\n\t\trequireNodeRouteCountWithCollect(c, nodes[0], 2, 2, 2)\n\t}, 10*time.Second, 500*time.Millisecond, \"route state changes should propagate to nodes\")\n\n\t// Verify that the routes have been sent to the client\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tstatus, err := user2c.Status()\n\t\tassert.NoError(c, err)\n\n\t\tfor _, peerKey := range status.Peers() {\n\t\t\tpeerStatus := status.Peer[peerKey]\n\n\t\t\trequirePeerSubnetRoutesWithCollect(c, peerStatus, []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()})\n\t\t}\n\t}, 10*time.Second, 500*time.Millisecond, \"routes should be visible to client\")\n\n\t// Tell user2c to use user1c as an exit node.\n\tcommand = []string{\n\t\t\"tailscale\",\n\t\t\"set\",\n\t\t\"--exit-node\",\n\t\tuser1c.Hostname(),\n\t}\n\t_, _, err = user2c.Execute(command)\n\trequire.NoErrorf(t, err, \"failed to advertise route: %s\", err)\n\n\tusernet1, err := scenario.Network(\"usernet1\")\n\trequire.NoError(t, err)\n\n\tservices, err := scenario.Services(\"usernet1\")\n\trequire.NoError(t, err)\n\trequire.Len(t, services, 1)\n\n\tweb := services[0]\n\twebip := netip.MustParseAddr(web.GetIPInNetwork(usernet1))\n\n\t// We can't mess to much with ip forwarding in containers so\n\t// we settle for a simple ping here.\n\t// Direct is false since we use internal DERP which means we\n\t// can't discover a direct path between docker networks.\n\terr = user2c.Ping(webip.String(),\n\t\ttsic.WithPingUntilDirect(false),\n\t\ttsic.WithPingCount(1),\n\t\ttsic.WithPingTimeout(7*time.Second),\n\t)\n\trequire.NoError(t, err)\n}\n\nfunc MustFindNode(hostname string, nodes []*v1.Node) *v1.Node {\n\tfor _, node := range nodes {\n\t\tif node.GetName() == hostname {\n\t\t\treturn node\n\t\t}\n\t}\n\n\tpanic(\"node not found\")\n}\n\n// TestAutoApproveMultiNetwork tests auto approving of routes\n// by setting up two networks where network1 has three subnet\n// routers:\n// - routerUsernet1: advertising the docker network\n// - routerSubRoute: advertising a subroute, a /24 inside a auto approved /16\n// - routeExitNode: advertising an exit node\n//\n// Each router is tested step by step through the following scenarios\n//   - Policy is set to auto approve the nodes route\n//   - Node advertises route and it is verified that it is auto approved and sent to nodes\n//   - Policy is changed to _not_ auto approve the route\n//   - Verify that peers can still see the node\n//   - Disable route, making it unavailable\n//   - Verify that peers can no longer use node\n//   - Policy is changed back to auto approve route, check that routes already existing is approved.\n//   - Verify that routes can now be seen by peers.\n//\n//nolint:gocyclo // complex multi-network auto-approve test scenario\nfunc TestAutoApproveMultiNetwork(t *testing.T) {\n\tIntegrationSkip(t)\n\n\t// Timeout for EventuallyWithT assertions.\n\t// Set generously to account for CI infrastructure variability.\n\tassertTimeout := 60 * time.Second\n\n\tbigRoute := netip.MustParsePrefix(\"10.42.0.0/16\")\n\tsubRoute := netip.MustParsePrefix(\"10.42.7.0/24\")\n\tnotApprovedRoute := netip.MustParsePrefix(\"192.168.0.0/24\")\n\n\ttests := []struct {\n\t\tname     string\n\t\tpol      *policyv2.Policy\n\t\tapprover string\n\t\tspec     ScenarioSpec\n\t\twithURL  bool\n\t}{\n\t\t{\n\t\t\tname: \"authkey-tag\",\n\t\t\tpol: &policyv2.Policy{\n\t\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{wildcard()},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(wildcard(), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tTagOwners: policyv2.TagOwners{\n\t\t\t\t\tpolicyv2.Tag(\"tag:approve\"): policyv2.Owners{usernameOwner(\"user1@\")},\n\t\t\t\t},\n\t\t\t\tAutoApprovers: policyv2.AutoApproverPolicy{\n\t\t\t\t\tRoutes: map[netip.Prefix]policyv2.AutoApprovers{\n\t\t\t\t\t\tbigRoute: {tagApprover(\"tag:approve\")},\n\t\t\t\t\t},\n\t\t\t\t\tExitNode: policyv2.AutoApprovers{tagApprover(\"tag:approve\")},\n\t\t\t\t},\n\t\t\t},\n\t\t\tapprover: \"tag:approve\",\n\t\t\tspec: ScenarioSpec{\n\t\t\t\tNodesPerUser: 3,\n\t\t\t\tUsers:        []string{\"user1\", \"user2\"},\n\t\t\t\tNetworks: map[string][]string{\n\t\t\t\t\t\"usernet1\": {\"user1\"},\n\t\t\t\t\t\"usernet2\": {\"user2\"},\n\t\t\t\t},\n\t\t\t\tExtraService: map[string][]extraServiceFunc{\n\t\t\t\t\t\"usernet1\": {Webservice},\n\t\t\t\t},\n\t\t\t\t// We build the head image with curl and traceroute, so only use\n\t\t\t\t// that for this test.\n\t\t\t\tVersions: []string{\"head\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"authkey-user\",\n\t\t\tpol: &policyv2.Policy{\n\t\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{wildcard()},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(wildcard(), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tAutoApprovers: policyv2.AutoApproverPolicy{\n\t\t\t\t\tRoutes: map[netip.Prefix]policyv2.AutoApprovers{\n\t\t\t\t\t\tbigRoute: {usernameApprover(\"user1@\")},\n\t\t\t\t\t},\n\t\t\t\t\tExitNode: policyv2.AutoApprovers{usernameApprover(\"user1@\")},\n\t\t\t\t},\n\t\t\t},\n\t\t\tapprover: \"user1@\",\n\t\t\tspec: ScenarioSpec{\n\t\t\t\tNodesPerUser: 3,\n\t\t\t\tUsers:        []string{\"user1\", \"user2\"},\n\t\t\t\tNetworks: map[string][]string{\n\t\t\t\t\t\"usernet1\": {\"user1\"},\n\t\t\t\t\t\"usernet2\": {\"user2\"},\n\t\t\t\t},\n\t\t\t\tExtraService: map[string][]extraServiceFunc{\n\t\t\t\t\t\"usernet1\": {Webservice},\n\t\t\t\t},\n\t\t\t\t// We build the head image with curl and traceroute, so only use\n\t\t\t\t// that for this test.\n\t\t\t\tVersions: []string{\"head\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"authkey-group\",\n\t\t\tpol: &policyv2.Policy{\n\t\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{wildcard()},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(wildcard(), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tGroups: policyv2.Groups{\n\t\t\t\t\tpolicyv2.Group(\"group:approve\"): []policyv2.Username{policyv2.Username(\"user1@\")},\n\t\t\t\t},\n\t\t\t\tAutoApprovers: policyv2.AutoApproverPolicy{\n\t\t\t\t\tRoutes: map[netip.Prefix]policyv2.AutoApprovers{\n\t\t\t\t\t\tbigRoute: {groupApprover(\"group:approve\")},\n\t\t\t\t\t},\n\t\t\t\t\tExitNode: policyv2.AutoApprovers{groupApprover(\"group:approve\")},\n\t\t\t\t},\n\t\t\t},\n\t\t\tapprover: \"group:approve\",\n\t\t\tspec: ScenarioSpec{\n\t\t\t\tNodesPerUser: 3,\n\t\t\t\tUsers:        []string{\"user1\", \"user2\"},\n\t\t\t\tNetworks: map[string][]string{\n\t\t\t\t\t\"usernet1\": {\"user1\"},\n\t\t\t\t\t\"usernet2\": {\"user2\"},\n\t\t\t\t},\n\t\t\t\tExtraService: map[string][]extraServiceFunc{\n\t\t\t\t\t\"usernet1\": {Webservice},\n\t\t\t\t},\n\t\t\t\t// We build the head image with curl and traceroute, so only use\n\t\t\t\t// that for this test.\n\t\t\t\tVersions: []string{\"head\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"webauth-user\",\n\t\t\tpol: &policyv2.Policy{\n\t\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{wildcard()},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(wildcard(), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tAutoApprovers: policyv2.AutoApproverPolicy{\n\t\t\t\t\tRoutes: map[netip.Prefix]policyv2.AutoApprovers{\n\t\t\t\t\t\tbigRoute: {usernameApprover(\"user1@\")},\n\t\t\t\t\t},\n\t\t\t\t\tExitNode: policyv2.AutoApprovers{usernameApprover(\"user1@\")},\n\t\t\t\t},\n\t\t\t},\n\t\t\tapprover: \"user1@\",\n\t\t\tspec: ScenarioSpec{\n\t\t\t\tNodesPerUser: 3,\n\t\t\t\tUsers:        []string{\"user1\", \"user2\"},\n\t\t\t\tNetworks: map[string][]string{\n\t\t\t\t\t\"usernet1\": {\"user1\"},\n\t\t\t\t\t\"usernet2\": {\"user2\"},\n\t\t\t\t},\n\t\t\t\tExtraService: map[string][]extraServiceFunc{\n\t\t\t\t\t\"usernet1\": {Webservice},\n\t\t\t\t},\n\t\t\t\t// We build the head image with curl and traceroute, so only use\n\t\t\t\t// that for this test.\n\t\t\t\tVersions: []string{\"head\"},\n\t\t\t},\n\t\t\twithURL: true,\n\t\t},\n\t\t{\n\t\t\tname: \"webauth-tag\",\n\t\t\tpol: &policyv2.Policy{\n\t\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{wildcard()},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(wildcard(), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tTagOwners: policyv2.TagOwners{\n\t\t\t\t\tpolicyv2.Tag(\"tag:approve\"): policyv2.Owners{usernameOwner(\"user1@\")},\n\t\t\t\t},\n\t\t\t\tAutoApprovers: policyv2.AutoApproverPolicy{\n\t\t\t\t\tRoutes: map[netip.Prefix]policyv2.AutoApprovers{\n\t\t\t\t\t\tbigRoute: {tagApprover(\"tag:approve\")},\n\t\t\t\t\t},\n\t\t\t\t\tExitNode: policyv2.AutoApprovers{tagApprover(\"tag:approve\")},\n\t\t\t\t},\n\t\t\t},\n\t\t\tapprover: \"tag:approve\",\n\t\t\tspec: ScenarioSpec{\n\t\t\t\tNodesPerUser: 3,\n\t\t\t\tUsers:        []string{\"user1\", \"user2\"},\n\t\t\t\tNetworks: map[string][]string{\n\t\t\t\t\t\"usernet1\": {\"user1\"},\n\t\t\t\t\t\"usernet2\": {\"user2\"},\n\t\t\t\t},\n\t\t\t\tExtraService: map[string][]extraServiceFunc{\n\t\t\t\t\t\"usernet1\": {Webservice},\n\t\t\t\t},\n\t\t\t\t// We build the head image with curl and traceroute, so only use\n\t\t\t\t// that for this test.\n\t\t\t\tVersions: []string{\"head\"},\n\t\t\t},\n\t\t\twithURL: true,\n\t\t},\n\t\t{\n\t\t\tname: \"webauth-group\",\n\t\t\tpol: &policyv2.Policy{\n\t\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: []policyv2.Alias{wildcard()},\n\t\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\t\taliasWithPorts(wildcard(), tailcfg.PortRangeAny),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tGroups: policyv2.Groups{\n\t\t\t\t\tpolicyv2.Group(\"group:approve\"): []policyv2.Username{policyv2.Username(\"user1@\")},\n\t\t\t\t},\n\t\t\t\tAutoApprovers: policyv2.AutoApproverPolicy{\n\t\t\t\t\tRoutes: map[netip.Prefix]policyv2.AutoApprovers{\n\t\t\t\t\t\tbigRoute: {groupApprover(\"group:approve\")},\n\t\t\t\t\t},\n\t\t\t\t\tExitNode: policyv2.AutoApprovers{groupApprover(\"group:approve\")},\n\t\t\t\t},\n\t\t\t},\n\t\t\tapprover: \"group:approve\",\n\t\t\tspec: ScenarioSpec{\n\t\t\t\tNodesPerUser: 3,\n\t\t\t\tUsers:        []string{\"user1\", \"user2\"},\n\t\t\t\tNetworks: map[string][]string{\n\t\t\t\t\t\"usernet1\": {\"user1\"},\n\t\t\t\t\t\"usernet2\": {\"user2\"},\n\t\t\t\t},\n\t\t\t\tExtraService: map[string][]extraServiceFunc{\n\t\t\t\t\t\"usernet1\": {Webservice},\n\t\t\t\t},\n\t\t\t\t// We build the head image with curl and traceroute, so only use\n\t\t\t\t// that for this test.\n\t\t\t\tVersions: []string{\"head\"},\n\t\t\t},\n\t\t\twithURL: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tfor _, polMode := range []types.PolicyMode{types.PolicyModeDB, types.PolicyModeFile} {\n\t\t\tfor _, advertiseDuringUp := range []bool{false, true} {\n\t\t\t\tname := fmt.Sprintf(\"%s-advertiseduringup-%t-pol-%s\", tt.name, advertiseDuringUp, polMode)\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\t// Create a deep copy of the policy to avoid mutating the shared test case.\n\t\t\t\t\t// Each subtest modifies AutoApprovers.Routes (add then delete), so we need\n\t\t\t\t\t// an isolated copy to prevent state leakage between sequential test runs.\n\t\t\t\t\tpol := &policyv2.Policy{\n\t\t\t\t\t\tACLs:      slices.Clone(tt.pol.ACLs),\n\t\t\t\t\t\tGroups:    maps.Clone(tt.pol.Groups),\n\t\t\t\t\t\tTagOwners: maps.Clone(tt.pol.TagOwners),\n\t\t\t\t\t\tAutoApprovers: policyv2.AutoApproverPolicy{\n\t\t\t\t\t\t\tExitNode: slices.Clone(tt.pol.AutoApprovers.ExitNode),\n\t\t\t\t\t\t\tRoutes:   maps.Clone(tt.pol.AutoApprovers.Routes),\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\n\t\t\t\t\tscenario, err := NewScenario(tt.spec)\n\n\t\t\t\t\trequire.NoErrorf(t, err, \"failed to create scenario: %s\", err)\n\t\t\t\t\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\t\t\t\t\tvar nodes []*v1.Node\n\n\t\t\t\t\topts := []hsic.Option{\n\t\t\t\t\t\thsic.WithTestName(\"autoapprovemulti\"),\n\t\t\t\t\t\thsic.WithACLPolicy(pol),\n\t\t\t\t\t\thsic.WithPolicyMode(polMode), // test iterates over file and DB policy modes\n\t\t\t\t\t}\n\n\t\t\t\t\ttsOpts := []tsic.Option{\n\t\t\t\t\t\ttsic.WithAcceptRoutes(),\n\t\t\t\t\t}\n\n\t\t\t\t\troute, err := scenario.SubnetOfNetwork(\"usernet1\")\n\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\t// For tag-based approvers, nodes must be tagged with that tag\n\t\t\t\t\t// (tags-as-identity model: tagged nodes are identified by their tags)\n\t\t\t\t\tvar (\n\t\t\t\t\t\tpreAuthKeyTags []string\n\t\t\t\t\t\twebauthTagUser string\n\t\t\t\t\t)\n\n\t\t\t\t\tif strings.HasPrefix(tt.approver, \"tag:\") {\n\t\t\t\t\t\tpreAuthKeyTags = []string{tt.approver}\n\t\t\t\t\t\tif tt.withURL {\n\t\t\t\t\t\t\t// For webauth, only user1 can request tags (per tagOwners policy)\n\t\t\t\t\t\t\twebauthTagUser = \"user1\" //nolint:goconst // test value, not a constant\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\terr = scenario.createHeadscaleEnvWithTags(tt.withURL, tsOpts, preAuthKeyTags, webauthTagUser,\n\t\t\t\t\t\topts...,\n\t\t\t\t\t)\n\t\t\t\t\trequireNoErrHeadscaleEnv(t, err)\n\n\t\t\t\t\tallClients, err := scenario.ListTailscaleClients()\n\t\t\t\t\trequireNoErrListClients(t, err)\n\n\t\t\t\t\terr = scenario.WaitForTailscaleSync()\n\t\t\t\t\trequireNoErrSync(t, err)\n\n\t\t\t\t\tservices, err := scenario.Services(\"usernet1\")\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\trequire.Len(t, services, 1)\n\n\t\t\t\t\tusernet1, err := scenario.Network(\"usernet1\")\n\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\theadscale, err := scenario.Headscale()\n\t\t\t\t\trequireNoErrGetHeadscale(t, err)\n\t\t\t\t\tassert.NotNil(t, headscale)\n\n\t\t\t\t\t// Add the Docker network route to the auto-approvers\n\t\t\t\t\t// Keep existing auto-approvers (like bigRoute) in place\n\t\t\t\t\tvar approvers policyv2.AutoApprovers\n\n\t\t\t\t\tswitch {\n\t\t\t\t\tcase strings.HasPrefix(tt.approver, \"tag:\"):\n\t\t\t\t\t\tapprovers = append(approvers, tagApprover(tt.approver))\n\t\t\t\t\tcase strings.HasPrefix(tt.approver, \"group:\"):\n\t\t\t\t\t\tapprovers = append(approvers, groupApprover(tt.approver))\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tapprovers = append(approvers, usernameApprover(tt.approver))\n\t\t\t\t\t}\n\t\t\t\t\t// pol.AutoApprovers.Routes is already initialized in the deep copy above\n\t\t\t\t\tprefix := *route\n\t\t\t\t\tpol.AutoApprovers.Routes[prefix] = approvers\n\t\t\t\t\terr = headscale.SetPolicy(pol)\n\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\tif advertiseDuringUp {\n\t\t\t\t\t\ttsOpts = append(tsOpts,\n\t\t\t\t\t\t\ttsic.WithExtraLoginArgs([]string{\"--advertise-routes=\" + route.String()}),\n\t\t\t\t\t\t)\n\t\t\t\t\t}\n\n\t\t\t\t\t// For webauth with tag approver, the node needs to advertise the tag during registration\n\t\t\t\t\t// (tags-as-identity model: webauth nodes can use --advertise-tags if authorized by tagOwners)\n\t\t\t\t\tif tt.withURL && strings.HasPrefix(tt.approver, \"tag:\") {\n\t\t\t\t\t\ttsOpts = append(tsOpts, tsic.WithTags([]string{tt.approver}))\n\t\t\t\t\t}\n\n\t\t\t\t\ttsOpts = append(tsOpts, tsic.WithNetwork(usernet1))\n\n\t\t\t\t\t// This whole dance is to add a node _after_ all the other nodes\n\t\t\t\t\t// with an additional tsOpt which advertises the route as part\n\t\t\t\t\t// of the `tailscale up` command. If we do this as part of the\n\t\t\t\t\t// scenario creation, it will be added to all nodes and turn\n\t\t\t\t\t// into a HA node, which isn't something we are testing here.\n\t\t\t\t\trouterUsernet1, err := scenario.CreateTailscaleNode(\"head\", tsOpts...)\n\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\tdefer func() {\n\t\t\t\t\t\t_, _, err := routerUsernet1.Shutdown()\n\t\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\t}()\n\n\t\t\t\t\tif tt.withURL {\n\t\t\t\t\t\tu, err := routerUsernet1.LoginWithURL(headscale.GetEndpoint())\n\t\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\t\tbody, err := doLoginURL(routerUsernet1.Hostname(), u)\n\t\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\t\terr = scenario.runHeadscaleRegister(\"user1\", body)\n\t\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\t\t// Wait for the client to sync with the server after webauth registration.\n\t\t\t\t\t\t// Unlike authkey login which blocks until complete, webauth registration\n\t\t\t\t\t\t// happens on the server side and the client needs time to receive the network map.\n\t\t\t\t\t\terr = routerUsernet1.WaitForRunning(integrationutil.PeerSyncTimeout())\n\t\t\t\t\t\trequire.NoError(t, err, \"webauth client failed to reach Running state\")\n\t\t\t\t\t} else {\n\t\t\t\t\t\tuserMap, err := headscale.MapUsers()\n\t\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\t\t// If the approver is a tag, create a tagged PreAuthKey\n\t\t\t\t\t\t// (tags-as-identity model: tags come from PreAuthKey, not --advertise-tags)\n\t\t\t\t\t\tvar pak *v1.PreAuthKey\n\t\t\t\t\t\tif strings.HasPrefix(tt.approver, \"tag:\") {\n\t\t\t\t\t\t\tpak, err = scenario.CreatePreAuthKeyWithTags(userMap[\"user1\"].GetId(), false, false, []string{tt.approver})\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tpak, err = scenario.CreatePreAuthKey(userMap[\"user1\"].GetId(), false, false)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\t\terr = routerUsernet1.Login(headscale.GetEndpoint(), pak.GetKey())\n\t\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\t}\n\t\t\t\t\t// extra creation end.\n\n\t\t\t\t\t// Wait for the node to be fully running before getting its ID\n\t\t\t\t\t// This is especially important for webauth flow where login is asynchronous\n\t\t\t\t\terr = routerUsernet1.WaitForRunning(30 * time.Second)\n\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\t// Wait for bidirectional peer synchronization.\n\t\t\t\t\t// Both the router and all existing clients must see each other.\n\t\t\t\t\t// This is critical for connectivity - without this, the WireGuard\n\t\t\t\t\t// tunnels may not be established despite peers appearing in netmaps.\n\n\t\t\t\t\t// Router waits for all existing clients\n\t\t\t\t\terr = routerUsernet1.WaitForPeers(len(allClients), 60*time.Second, 1*time.Second)\n\t\t\t\t\trequire.NoError(t, err, \"router failed to see all peers\")\n\n\t\t\t\t\t// All clients wait for the router (they should see 6 peers including the router)\n\t\t\t\t\tfor _, existingClient := range allClients {\n\t\t\t\t\t\terr = existingClient.WaitForPeers(len(allClients), 60*time.Second, 1*time.Second)\n\t\t\t\t\t\trequire.NoErrorf(t, err, \"client %s failed to see all peers including router\", existingClient.Hostname())\n\t\t\t\t\t}\n\n\t\t\t\t\trouterUsernet1ID := routerUsernet1.MustID()\n\n\t\t\t\t\tweb := services[0]\n\t\t\t\t\twebip := netip.MustParseAddr(web.GetIPInNetwork(usernet1))\n\t\t\t\t\tweburl := fmt.Sprintf(\"http://%s/etc/hostname\", webip)\n\t\t\t\t\tt.Logf(\"webservice: %s, %s\", webip.String(), weburl)\n\n\t\t\t\t\t// Sort nodes by ID\n\t\t\t\t\tsort.SliceStable(allClients, func(i, j int) bool {\n\t\t\t\t\t\tstatusI := allClients[i].MustStatus()\n\t\t\t\t\t\tstatusJ := allClients[j].MustStatus()\n\n\t\t\t\t\t\treturn statusI.Self.ID < statusJ.Self.ID\n\t\t\t\t\t})\n\n\t\t\t\t\t// This is ok because the scenario makes users in order, so the three first\n\t\t\t\t\t// nodes, which are subnet routes, will be created first, and the last user\n\t\t\t\t\t// will be created with the second.\n\t\t\t\t\trouterSubRoute := allClients[1]\n\t\t\t\t\trouterExitNode := allClients[2]\n\n\t\t\t\t\tclient := allClients[3]\n\n\t\t\t\t\tif !advertiseDuringUp {\n\t\t\t\t\t\t// Advertise the route for the dockersubnet of user1\n\t\t\t\t\t\tcommand := []string{\n\t\t\t\t\t\t\t\"tailscale\",\n\t\t\t\t\t\t\t\"set\",\n\t\t\t\t\t\t\t\"--advertise-routes=\" + route.String(),\n\t\t\t\t\t\t}\n\t\t\t\t\t\t_, _, err = routerUsernet1.Execute(command)\n\t\t\t\t\t\trequire.NoErrorf(t, err, \"failed to advertise route: %s\", err)\n\t\t\t\t\t}\n\n\t\t\t\t\t// Wait for route state changes to propagate.\n\t\t\t\t\t// Use a longer timeout (30s) to account for CI infrastructure variability -\n\t\t\t\t\t// when advertiseDuringUp=true, routes are sent during registration and may\n\t\t\t\t\t// take longer to propagate through the server's auto-approval logic in slow\n\t\t\t\t\t// environments.\n\t\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t\t// These route should auto approve, so the node is expected to have a route\n\t\t\t\t\t\t// for all counts.\n\t\t\t\t\t\tnodes, err := headscale.ListNodes()\n\t\t\t\t\t\tassert.NoError(c, err)\n\n\t\t\t\t\t\trouterNode := MustFindNode(routerUsernet1.Hostname(), nodes)\n\t\t\t\t\t\tt.Logf(\"Initial auto-approval check - Router node %s: announced=%v, approved=%v, subnet=%v\",\n\t\t\t\t\t\t\trouterNode.GetName(),\n\t\t\t\t\t\t\trouterNode.GetAvailableRoutes(),\n\t\t\t\t\t\t\trouterNode.GetApprovedRoutes(),\n\t\t\t\t\t\t\trouterNode.GetSubnetRoutes())\n\n\t\t\t\t\t\trequireNodeRouteCountWithCollect(c, routerNode, 1, 1, 1)\n\t\t\t\t\t}, assertTimeout, 500*time.Millisecond, \"Initial route auto-approval: Route should be approved via policy\")\n\n\t\t\t\t\t// Verify that the routes have been sent to the client.\n\t\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t\tstatus, err := client.Status()\n\t\t\t\t\t\tassert.NoError(c, err)\n\n\t\t\t\t\t\t// Debug output to understand peer visibility\n\t\t\t\t\t\tt.Logf(\"Client %s sees %d peers\", client.Hostname(), len(status.Peers()))\n\n\t\t\t\t\t\trouterPeerFound := false\n\n\t\t\t\t\t\tfor _, peerKey := range status.Peers() {\n\t\t\t\t\t\t\tpeerStatus := status.Peer[peerKey]\n\n\t\t\t\t\t\t\tif peerStatus.ID == routerUsernet1ID.StableID() {\n\t\t\t\t\t\t\t\trouterPeerFound = true\n\n\t\t\t\t\t\t\t\tt.Logf(\"Client sees router peer %s (ID=%s): AllowedIPs=%v, PrimaryRoutes=%v\",\n\t\t\t\t\t\t\t\t\tpeerStatus.HostName,\n\t\t\t\t\t\t\t\t\tpeerStatus.ID,\n\t\t\t\t\t\t\t\t\tpeerStatus.AllowedIPs,\n\t\t\t\t\t\t\t\t\tpeerStatus.PrimaryRoutes)\n\n\t\t\t\t\t\t\t\tassert.NotNil(c, peerStatus.PrimaryRoutes)\n\n\t\t\t\t\t\t\t\tif peerStatus.PrimaryRoutes != nil {\n\t\t\t\t\t\t\t\t\tassert.Contains(c, peerStatus.PrimaryRoutes.AsSlice(), *route)\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\trequirePeerSubnetRoutesWithCollect(c, peerStatus, []netip.Prefix{*route})\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\trequirePeerSubnetRoutesWithCollect(c, peerStatus, nil)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tassert.True(c, routerPeerFound, \"Client should see the router peer\")\n\t\t\t\t\t}, assertTimeout, 200*time.Millisecond, \"Verifying routes sent to client after auto-approval\")\n\n\t\t\t\t\t// Verify WireGuard tunnel connectivity to the router before testing route.\n\t\t\t\t\t// The client may have the route in its netmap but the actual tunnel may not\n\t\t\t\t\t// be established yet, especially in CI environments with higher latency.\n\t\t\t\t\trouterIPv4, err := routerUsernet1.IPv4()\n\t\t\t\t\trequire.NoError(t, err, \"failed to get router IPv4\")\n\t\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t\terr := client.Ping(\n\t\t\t\t\t\t\trouterIPv4.String(),\n\t\t\t\t\t\t\ttsic.WithPingUntilDirect(false), // DERP relay is fine\n\t\t\t\t\t\t\ttsic.WithPingCount(1),\n\t\t\t\t\t\t\ttsic.WithPingTimeout(5*time.Second),\n\t\t\t\t\t\t)\n\t\t\t\t\t\tassert.NoError(c, err, \"ping to router should succeed\")\n\t\t\t\t\t}, assertTimeout, 200*time.Millisecond, \"Verifying WireGuard tunnel to router is established\")\n\n\t\t\t\t\turl := fmt.Sprintf(\"http://%s/etc/hostname\", webip)\n\t\t\t\t\tt.Logf(\"url from %s to %s\", client.Hostname(), url)\n\n\t\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t\tresult, err := client.Curl(url)\n\t\t\t\t\t\tassert.NoError(c, err)\n\t\t\t\t\t\tassert.Len(c, result, 13)\n\t\t\t\t\t}, assertTimeout, 200*time.Millisecond, \"Verifying client can reach webservice through auto-approved route\")\n\n\t\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t\ttr, err := client.Traceroute(webip)\n\t\t\t\t\t\tassert.NoError(c, err)\n\n\t\t\t\t\t\tip, err := routerUsernet1.IPv4()\n\t\t\t\t\t\tif !assert.NoError(c, err, \"failed to get IPv4 for routerUsernet1\") {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tassertTracerouteViaIPWithCollect(c, tr, ip)\n\t\t\t\t\t}, assertTimeout, 200*time.Millisecond, \"Verifying traceroute goes through auto-approved router\")\n\n\t\t\t\t\t// Remove the auto approval from the policy, any routes already enabled should be allowed.\n\t\t\t\t\tprefix = *route\n\t\t\t\t\tdelete(pol.AutoApprovers.Routes, prefix)\n\t\t\t\t\terr = headscale.SetPolicy(pol)\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\tt.Logf(\"Policy updated: removed auto-approver for route %s\", prefix)\n\n\t\t\t\t\t// Wait for route state changes to propagate\n\t\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t\t// Routes already approved should remain approved even after policy change\n\t\t\t\t\t\tnodes, err = headscale.ListNodes()\n\t\t\t\t\t\tassert.NoError(c, err)\n\n\t\t\t\t\t\trouterNode := MustFindNode(routerUsernet1.Hostname(), nodes)\n\t\t\t\t\t\tt.Logf(\"After policy removal - Router node %s: announced=%v, approved=%v, subnet=%v\",\n\t\t\t\t\t\t\trouterNode.GetName(),\n\t\t\t\t\t\t\trouterNode.GetAvailableRoutes(),\n\t\t\t\t\t\t\trouterNode.GetApprovedRoutes(),\n\t\t\t\t\t\t\trouterNode.GetSubnetRoutes())\n\n\t\t\t\t\t\trequireNodeRouteCountWithCollect(c, routerNode, 1, 1, 1)\n\t\t\t\t\t}, assertTimeout, 500*time.Millisecond, \"Routes should remain approved after auto-approver removal\")\n\n\t\t\t\t\t// Verify that the routes have been sent to the client.\n\t\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t\tstatus, err := client.Status()\n\t\t\t\t\t\tassert.NoError(c, err)\n\n\t\t\t\t\t\tfor _, peerKey := range status.Peers() {\n\t\t\t\t\t\t\tpeerStatus := status.Peer[peerKey]\n\n\t\t\t\t\t\t\tif peerStatus.ID == routerUsernet1ID.StableID() {\n\t\t\t\t\t\t\t\tassert.NotNil(c, peerStatus.PrimaryRoutes)\n\n\t\t\t\t\t\t\t\tif peerStatus.PrimaryRoutes != nil {\n\t\t\t\t\t\t\t\t\tassert.Contains(c, peerStatus.PrimaryRoutes.AsSlice(), *route)\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\trequirePeerSubnetRoutesWithCollect(c, peerStatus, []netip.Prefix{*route})\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\trequirePeerSubnetRoutesWithCollect(c, peerStatus, nil)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}, assertTimeout, 200*time.Millisecond, \"Verifying routes remain after policy change\")\n\n\t\t\t\t\turl = fmt.Sprintf(\"http://%s/etc/hostname\", webip)\n\t\t\t\t\tt.Logf(\"url from %s to %s\", client.Hostname(), url)\n\n\t\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t\tresult, err := client.Curl(url)\n\t\t\t\t\t\tassert.NoError(c, err)\n\t\t\t\t\t\tassert.Len(c, result, 13)\n\t\t\t\t\t}, assertTimeout, 200*time.Millisecond, \"Verifying client can still reach webservice after policy change\")\n\n\t\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t\ttr, err := client.Traceroute(webip)\n\t\t\t\t\t\tassert.NoError(c, err)\n\n\t\t\t\t\t\tip, err := routerUsernet1.IPv4()\n\t\t\t\t\t\tif !assert.NoError(c, err, \"failed to get IPv4 for routerUsernet1\") {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tassertTracerouteViaIPWithCollect(c, tr, ip)\n\t\t\t\t\t}, assertTimeout, 200*time.Millisecond, \"Verifying traceroute still goes through router after policy change\")\n\n\t\t\t\t\t// Disable the route, making it unavailable since it is no longer auto-approved\n\t\t\t\t\t_, err = headscale.ApproveRoutes(\n\t\t\t\t\t\tMustFindNode(routerUsernet1.Hostname(), nodes).GetId(),\n\t\t\t\t\t\t[]netip.Prefix{},\n\t\t\t\t\t)\n\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\t// Wait for route state changes to propagate\n\t\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t\t// These route should auto approve, so the node is expected to have a route\n\t\t\t\t\t\t// for all counts.\n\t\t\t\t\t\tnodes, err = headscale.ListNodes()\n\t\t\t\t\t\tassert.NoError(c, err)\n\t\t\t\t\t\trequireNodeRouteCountWithCollect(c, MustFindNode(routerUsernet1.Hostname(), nodes), 1, 0, 0)\n\t\t\t\t\t}, assertTimeout, 500*time.Millisecond, \"route state changes should propagate\")\n\n\t\t\t\t\t// Verify that the routes have been sent to the client.\n\t\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t\tstatus, err := client.Status()\n\t\t\t\t\t\tassert.NoError(c, err)\n\n\t\t\t\t\t\tfor _, peerKey := range status.Peers() {\n\t\t\t\t\t\t\tpeerStatus := status.Peer[peerKey]\n\t\t\t\t\t\t\trequirePeerSubnetRoutesWithCollect(c, peerStatus, nil)\n\t\t\t\t\t\t}\n\t\t\t\t\t}, assertTimeout, 200*time.Millisecond, \"Verifying routes disabled after route removal\")\n\n\t\t\t\t\t// Add the route back to the auto approver in the policy, the route should\n\t\t\t\t\t// now become available again.\n\t\t\t\t\tvar newApprovers policyv2.AutoApprovers\n\n\t\t\t\t\tswitch {\n\t\t\t\t\tcase strings.HasPrefix(tt.approver, \"tag:\"):\n\t\t\t\t\t\tnewApprovers = append(newApprovers, tagApprover(tt.approver))\n\t\t\t\t\tcase strings.HasPrefix(tt.approver, \"group:\"):\n\t\t\t\t\t\tnewApprovers = append(newApprovers, groupApprover(tt.approver))\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tnewApprovers = append(newApprovers, usernameApprover(tt.approver))\n\t\t\t\t\t}\n\t\t\t\t\t// pol.AutoApprovers.Routes is already initialized in the deep copy above\n\t\t\t\t\tprefix = *route\n\t\t\t\t\tpol.AutoApprovers.Routes[prefix] = newApprovers\n\t\t\t\t\terr = headscale.SetPolicy(pol)\n\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\t// Wait for route state changes to propagate\n\t\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t\t// These route should auto approve, so the node is expected to have a route\n\t\t\t\t\t\t// for all counts.\n\t\t\t\t\t\tnodes, err = headscale.ListNodes()\n\t\t\t\t\t\tassert.NoError(c, err)\n\t\t\t\t\t\trequireNodeRouteCountWithCollect(c, MustFindNode(routerUsernet1.Hostname(), nodes), 1, 1, 1)\n\t\t\t\t\t}, assertTimeout, 500*time.Millisecond, \"route state changes should propagate\")\n\n\t\t\t\t\t// Verify that the routes have been sent to the client.\n\t\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t\tstatus, err := client.Status()\n\t\t\t\t\t\tassert.NoError(c, err)\n\n\t\t\t\t\t\tfor _, peerKey := range status.Peers() {\n\t\t\t\t\t\t\tpeerStatus := status.Peer[peerKey]\n\n\t\t\t\t\t\t\tif peerStatus.ID == routerUsernet1ID.StableID() {\n\t\t\t\t\t\t\t\tassert.NotNil(c, peerStatus.PrimaryRoutes)\n\n\t\t\t\t\t\t\t\tif peerStatus.PrimaryRoutes != nil {\n\t\t\t\t\t\t\t\t\tassert.Contains(c, peerStatus.PrimaryRoutes.AsSlice(), *route)\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\trequirePeerSubnetRoutesWithCollect(c, peerStatus, []netip.Prefix{*route})\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\trequirePeerSubnetRoutesWithCollect(c, peerStatus, nil)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}, assertTimeout, 200*time.Millisecond, \"Verifying routes re-enabled after policy re-approval\")\n\n\t\t\t\t\turl = fmt.Sprintf(\"http://%s/etc/hostname\", webip)\n\t\t\t\t\tt.Logf(\"url from %s to %s\", client.Hostname(), url)\n\n\t\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t\tresult, err := client.Curl(url)\n\t\t\t\t\t\tassert.NoError(c, err)\n\t\t\t\t\t\tassert.Len(c, result, 13)\n\t\t\t\t\t}, assertTimeout, 200*time.Millisecond, \"Verifying client can reach webservice after route re-approval\")\n\n\t\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t\ttr, err := client.Traceroute(webip)\n\t\t\t\t\t\tassert.NoError(c, err)\n\n\t\t\t\t\t\tip, err := routerUsernet1.IPv4()\n\t\t\t\t\t\tif !assert.NoError(c, err, \"failed to get IPv4 for routerUsernet1\") {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tassertTracerouteViaIPWithCollect(c, tr, ip)\n\t\t\t\t\t}, assertTimeout, 200*time.Millisecond, \"Verifying traceroute goes through router after re-approval\")\n\n\t\t\t\t\t// Advertise and validate a subnet of an auto approved route, /24 inside the\n\t\t\t\t\t// auto approved /16.\n\t\t\t\t\tcommand := []string{\n\t\t\t\t\t\t\"tailscale\",\n\t\t\t\t\t\t\"set\",\n\t\t\t\t\t\t\"--advertise-routes=\" + subRoute.String(),\n\t\t\t\t\t}\n\t\t\t\t\t_, _, err = routerSubRoute.Execute(command)\n\t\t\t\t\trequire.NoErrorf(t, err, \"failed to advertise route: %s\", err)\n\n\t\t\t\t\t// Wait for route state changes to propagate\n\t\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t\t// These route should auto approve, so the node is expected to have a route\n\t\t\t\t\t\t// for all counts.\n\t\t\t\t\t\tnodes, err = headscale.ListNodes()\n\t\t\t\t\t\tassert.NoError(c, err)\n\t\t\t\t\t\trequireNodeRouteCountWithCollect(c, MustFindNode(routerUsernet1.Hostname(), nodes), 1, 1, 1)\n\t\t\t\t\t\trequireNodeRouteCountWithCollect(c, nodes[1], 1, 1, 1)\n\t\t\t\t\t}, assertTimeout, 500*time.Millisecond, \"route state changes should propagate\")\n\n\t\t\t\t\t// Verify that the routes have been sent to the client.\n\t\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t\tstatus, err := client.Status()\n\t\t\t\t\t\tassert.NoError(c, err)\n\n\t\t\t\t\t\tfor _, peerKey := range status.Peers() {\n\t\t\t\t\t\t\tpeerStatus := status.Peer[peerKey]\n\n\t\t\t\t\t\t\tif peerStatus.ID == routerUsernet1ID.StableID() {\n\t\t\t\t\t\t\t\tif peerStatus.PrimaryRoutes != nil {\n\t\t\t\t\t\t\t\t\tassert.Contains(c, peerStatus.PrimaryRoutes.AsSlice(), *route)\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\trequirePeerSubnetRoutesWithCollect(c, peerStatus, []netip.Prefix{*route})\n\t\t\t\t\t\t\t} else if peerStatus.ID == \"2\" {\n\t\t\t\t\t\t\t\tif peerStatus.PrimaryRoutes != nil {\n\t\t\t\t\t\t\t\t\tassert.Contains(c, peerStatus.PrimaryRoutes.AsSlice(), subRoute)\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\trequirePeerSubnetRoutesWithCollect(c, peerStatus, []netip.Prefix{subRoute})\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\trequirePeerSubnetRoutesWithCollect(c, peerStatus, nil)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}, assertTimeout, 200*time.Millisecond, \"Verifying sub-route propagated to client\")\n\n\t\t\t\t\t// Advertise a not approved route will not end up anywhere\n\t\t\t\t\tcommand = []string{\n\t\t\t\t\t\t\"tailscale\",\n\t\t\t\t\t\t\"set\",\n\t\t\t\t\t\t\"--advertise-routes=\" + notApprovedRoute.String(),\n\t\t\t\t\t}\n\t\t\t\t\t_, _, err = routerSubRoute.Execute(command)\n\t\t\t\t\trequire.NoErrorf(t, err, \"failed to advertise route: %s\", err)\n\n\t\t\t\t\t// Wait for route state changes to propagate\n\t\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t\t// These route should auto approve, so the node is expected to have a route\n\t\t\t\t\t\t// for all counts.\n\t\t\t\t\t\tnodes, err = headscale.ListNodes()\n\t\t\t\t\t\tassert.NoError(c, err)\n\t\t\t\t\t\trequireNodeRouteCountWithCollect(c, MustFindNode(routerUsernet1.Hostname(), nodes), 1, 1, 1)\n\t\t\t\t\t\trequireNodeRouteCountWithCollect(c, nodes[1], 1, 1, 0)\n\t\t\t\t\t\trequireNodeRouteCountWithCollect(c, nodes[2], 0, 0, 0)\n\t\t\t\t\t}, assertTimeout, 500*time.Millisecond, \"route state changes should propagate\")\n\n\t\t\t\t\t// Verify that the routes have been sent to the client.\n\t\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t\tstatus, err := client.Status()\n\t\t\t\t\t\tassert.NoError(c, err)\n\n\t\t\t\t\t\tfor _, peerKey := range status.Peers() {\n\t\t\t\t\t\t\tpeerStatus := status.Peer[peerKey]\n\n\t\t\t\t\t\t\tif peerStatus.ID == routerUsernet1ID.StableID() {\n\t\t\t\t\t\t\t\tassert.NotNil(c, peerStatus.PrimaryRoutes)\n\n\t\t\t\t\t\t\t\tif peerStatus.PrimaryRoutes != nil {\n\t\t\t\t\t\t\t\t\tassert.Contains(c, peerStatus.PrimaryRoutes.AsSlice(), *route)\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\trequirePeerSubnetRoutesWithCollect(c, peerStatus, []netip.Prefix{*route})\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\trequirePeerSubnetRoutesWithCollect(c, peerStatus, nil)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}, assertTimeout, 200*time.Millisecond, \"Verifying unapproved route not propagated\")\n\n\t\t\t\t\t// Exit routes are also automatically approved\n\t\t\t\t\tcommand = []string{\n\t\t\t\t\t\t\"tailscale\",\n\t\t\t\t\t\t\"set\",\n\t\t\t\t\t\t\"--advertise-exit-node\",\n\t\t\t\t\t}\n\t\t\t\t\t_, _, err = routerExitNode.Execute(command)\n\t\t\t\t\trequire.NoErrorf(t, err, \"failed to advertise route: %s\", err)\n\n\t\t\t\t\t// Wait for route state changes to propagate\n\t\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t\tnodes, err = headscale.ListNodes()\n\t\t\t\t\t\tassert.NoError(c, err)\n\t\t\t\t\t\trequireNodeRouteCountWithCollect(c, MustFindNode(routerUsernet1.Hostname(), nodes), 1, 1, 1)\n\t\t\t\t\t\trequireNodeRouteCountWithCollect(c, nodes[1], 1, 1, 0)\n\t\t\t\t\t\trequireNodeRouteCountWithCollect(c, nodes[2], 2, 2, 2)\n\t\t\t\t\t}, assertTimeout, 500*time.Millisecond, \"route state changes should propagate\")\n\n\t\t\t\t\t// Verify that the routes have been sent to the client.\n\t\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\t\tstatus, err := client.Status()\n\t\t\t\t\t\tassert.NoError(c, err)\n\n\t\t\t\t\t\tfor _, peerKey := range status.Peers() {\n\t\t\t\t\t\t\tpeerStatus := status.Peer[peerKey]\n\n\t\t\t\t\t\t\tif peerStatus.ID == routerUsernet1ID.StableID() {\n\t\t\t\t\t\t\t\tif peerStatus.PrimaryRoutes != nil {\n\t\t\t\t\t\t\t\t\tassert.Contains(c, peerStatus.PrimaryRoutes.AsSlice(), *route)\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\trequirePeerSubnetRoutesWithCollect(c, peerStatus, []netip.Prefix{*route})\n\t\t\t\t\t\t\t} else if peerStatus.ID == \"3\" {\n\t\t\t\t\t\t\t\trequirePeerSubnetRoutesWithCollect(c, peerStatus, []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()})\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\trequirePeerSubnetRoutesWithCollect(c, peerStatus, nil)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}, assertTimeout, 200*time.Millisecond, \"Verifying exit node routes propagated to client\")\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n}\n\n// assertTracerouteViaIPWithCollect is a version of assertTracerouteViaIP that works with assert.CollectT.\nfunc assertTracerouteViaIPWithCollect(c *assert.CollectT, tr util.Traceroute, ip netip.Addr) {\n\tassert.NotNil(c, tr)\n\tassert.True(c, tr.Success)\n\tassert.NoError(c, tr.Err) //nolint:testifylint // using assert.CollectT\n\tassert.NotEmpty(c, tr.Route)\n\t// Since we're inside EventuallyWithT, we can't use require.Greater with t\n\t// but assert.NotEmpty above ensures len(tr.Route) > 0\n\tif len(tr.Route) > 0 {\n\t\tassert.Equal(c, tr.Route[0].IP.String(), ip.String())\n\t}\n}\n\nfunc SortPeerStatus(a, b *ipnstate.PeerStatus) int {\n\treturn cmp.Compare(a.ID, b.ID)\n}\n\nfunc printCurrentRouteMap(t *testing.T, routers ...*ipnstate.PeerStatus) {\n\tt.Helper()\n\tt.Logf(\"== Current routing map ==\")\n\tslices.SortFunc(routers, SortPeerStatus)\n\n\tfor _, router := range routers {\n\t\tgot := filterNonRoutes(router)\n\t\tt.Logf(\"  Router %s (%s) is serving:\", router.HostName, router.ID)\n\t\tt.Logf(\"    AllowedIPs: %v\", got)\n\n\t\tif router.PrimaryRoutes != nil {\n\t\t\tt.Logf(\"    PrimaryRoutes: %v\", router.PrimaryRoutes.AsSlice())\n\t\t}\n\t}\n}\n\n// filterNonRoutes returns the list of routes that a [ipnstate.PeerStatus] is serving.\nfunc filterNonRoutes(status *ipnstate.PeerStatus) []netip.Prefix {\n\treturn slicesx.Filter(nil, status.AllowedIPs.AsSlice(), func(p netip.Prefix) bool {\n\t\tif tsaddr.IsExitRoute(p) {\n\t\t\treturn true\n\t\t}\n\n\t\treturn !slices.ContainsFunc(status.TailscaleIPs, p.Contains)\n\t})\n}\n\nfunc requirePeerSubnetRoutesWithCollect(c *assert.CollectT, status *ipnstate.PeerStatus, expected []netip.Prefix) {\n\tif status.AllowedIPs.Len() <= 2 && len(expected) != 0 {\n\t\tassert.Fail(c, fmt.Sprintf(\"peer %s (%s) has no subnet routes, expected %v\", status.HostName, status.ID, expected))\n\t\treturn\n\t}\n\n\tif len(expected) == 0 {\n\t\texpected = []netip.Prefix{}\n\t}\n\n\tgot := filterNonRoutes(status)\n\n\tif diff := cmpdiff.Diff(expected, got, util.PrefixComparer, cmpopts.EquateEmpty()); diff != \"\" {\n\t\tassert.Fail(c, fmt.Sprintf(\"peer %s (%s) subnet routes, unexpected result (-want +got):\\n%s\", status.HostName, status.ID, diff))\n\t}\n}\n\nfunc requireNodeRouteCountWithCollect(c *assert.CollectT, node *v1.Node, announced, approved, subnet int) {\n\tassert.Lenf(c, node.GetAvailableRoutes(), announced, \"expected %q announced routes(%v) to have %d route, had %d\", node.GetName(), node.GetAvailableRoutes(), announced, len(node.GetAvailableRoutes()))\n\tassert.Lenf(c, node.GetApprovedRoutes(), approved, \"expected %q approved routes(%v) to have %d route, had %d\", node.GetName(), node.GetApprovedRoutes(), approved, len(node.GetApprovedRoutes()))\n\tassert.Lenf(c, node.GetSubnetRoutes(), subnet, \"expected %q subnet routes(%v) to have %d route, had %d\", node.GetName(), node.GetSubnetRoutes(), subnet, len(node.GetSubnetRoutes()))\n}\n\n// TestSubnetRouteACLFiltering tests that a node can only access subnet routes\n// that are explicitly allowed in the ACL.\nfunc TestSubnetRouteACLFiltering(t *testing.T) {\n\tIntegrationSkip(t)\n\n\t// Use router and node users for better clarity\n\trouterUser := \"router\"\n\tnodeUser := \"node\"\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 1,\n\t\tUsers:        []string{routerUser, nodeUser},\n\t\tNetworks: map[string][]string{\n\t\t\t\"usernet1\": {routerUser, nodeUser},\n\t\t},\n\t\tExtraService: map[string][]extraServiceFunc{\n\t\t\t\"usernet1\": {Webservice},\n\t\t},\n\t\t// We build the head image with curl and traceroute, so only use\n\t\t// that for this test.\n\t\tVersions: []string{\"head\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoErrorf(t, err, \"failed to create scenario: %s\", err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\t// Set up the ACL policy that allows the node to access only one of the subnet routes (10.10.10.0/24)\n\taclPolicyStr := `{\n\t\t\"hosts\": {\n\t\t\t\"router\": \"100.64.0.1/32\",\n\t\t\t\"node\": \"100.64.0.2/32\"\n\t\t},\n\t\t\"acls\": [\n\t\t\t{\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"src\": [\n\t\t\t\t\t\"*\"\n\t\t\t\t],\n\t\t\t\t\"dst\": [\n\t\t\t\t\t\"router:8000\"\n\t\t\t\t]\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"action\": \"accept\",\n\t\t\t\t\"src\": [\n\t\t\t\t\t\"node\"\n\t\t\t\t],\n\t\t\t\t\"dst\": [\n\t\t\t\t\t\"*:*\"\n\t\t\t\t]\n\t\t\t}\n\t\t]\n\t}`\n\n\troute, err := scenario.SubnetOfNetwork(\"usernet1\")\n\trequire.NoError(t, err)\n\n\tservices, err := scenario.Services(\"usernet1\")\n\trequire.NoError(t, err)\n\trequire.Len(t, services, 1)\n\n\tusernet1, err := scenario.Network(\"usernet1\")\n\trequire.NoError(t, err)\n\n\tweb := services[0]\n\twebip := netip.MustParseAddr(web.GetIPInNetwork(usernet1))\n\tweburl := fmt.Sprintf(\"http://%s/etc/hostname\", webip)\n\tt.Logf(\"webservice: %s, %s\", webip.String(), weburl)\n\n\taclPolicy := &policyv2.Policy{}\n\terr = json.Unmarshal([]byte(aclPolicyStr), aclPolicy)\n\trequire.NoError(t, err)\n\n\terr = scenario.CreateHeadscaleEnv([]tsic.Option{\n\t\ttsic.WithAcceptRoutes(),\n\t}, hsic.WithTestName(\"routeaclfilter\"),\n\t\thsic.WithACLPolicy(aclPolicy),\n\t\thsic.WithPolicyMode(types.PolicyModeDB), // test updates policy at runtime via CLI\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\t// Get the router and node clients by user\n\trouterClients, err := scenario.ListTailscaleClients(routerUser)\n\trequire.NoError(t, err)\n\trequire.Len(t, routerClients, 1)\n\trouterClient := routerClients[0]\n\n\tnodeClients, err := scenario.ListTailscaleClients(nodeUser)\n\trequire.NoError(t, err)\n\trequire.Len(t, nodeClients, 1)\n\tnodeClient := nodeClients[0]\n\n\trouterIP, err := routerClient.IPv4()\n\trequire.NoError(t, err, \"failed to get router IPv4\")\n\tnodeIP, err := nodeClient.IPv4()\n\trequire.NoError(t, err, \"failed to get node IPv4\")\n\n\taclPolicy.Hosts = policyv2.Hosts{\n\t\tpolicyv2.Host(routerUser): policyv2.Prefix(must.Get(routerIP.Prefix(32))),\n\t\tpolicyv2.Host(nodeUser):   policyv2.Prefix(must.Get(nodeIP.Prefix(32))),\n\t}\n\taclPolicy.ACLs[1].Destinations = []policyv2.AliasWithPorts{\n\t\taliasWithPorts(prefixp(route.String()), tailcfg.PortRangeAny),\n\t}\n\trequire.NoError(t, headscale.SetPolicy(aclPolicy))\n\n\t// Set up the subnet routes for the router\n\troutes := []netip.Prefix{\n\t\t*route,                                 // This should be accessible by the client\n\t\tnetip.MustParsePrefix(\"10.10.11.0/24\"), // These should NOT be accessible\n\t\tnetip.MustParsePrefix(\"10.10.12.0/24\"),\n\t}\n\n\trouteArg := \"--advertise-routes=\" + routes[0].String() + \",\" + routes[1].String() + \",\" + routes[2].String()\n\tcommand := []string{\n\t\t\"tailscale\",\n\t\t\"set\",\n\t\trouteArg,\n\t}\n\n\t_, _, err = routerClient.Execute(command)\n\trequire.NoErrorf(t, err, \"failed to advertise routes: %s\", err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\tvar routerNode, nodeNode *v1.Node\n\t// Wait for route advertisements to propagate to NodeStore\n\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t// List nodes and verify the router has 3 available routes\n\t\tnodes, err := headscale.NodesByUser()\n\t\tassert.NoError(ct, err)\n\t\tassert.Len(ct, nodes, 2)\n\n\t\t// Find the router node\n\t\trouterNode = nodes[routerUser][0]\n\t\tnodeNode = nodes[nodeUser][0]\n\n\t\tassert.NotNil(ct, routerNode, \"Router node not found\")\n\t\tassert.NotNil(ct, nodeNode, \"Client node not found\")\n\n\t\t// Check that the router has 3 routes available but not approved yet\n\t\trequireNodeRouteCountWithCollect(ct, routerNode, 3, 0, 0)\n\t\trequireNodeRouteCountWithCollect(ct, nodeNode, 0, 0, 0)\n\t}, 10*time.Second, 100*time.Millisecond, \"route advertisements should propagate to router node\")\n\n\t// Approve all routes for the router\n\t_, err = headscale.ApproveRoutes(\n\t\trouterNode.GetId(),\n\t\tutil.MustStringsToPrefixes(routerNode.GetAvailableRoutes()),\n\t)\n\trequire.NoError(t, err)\n\n\t// Wait for route state changes to propagate\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t// List nodes and verify the router has 3 available routes\n\t\tvar err error\n\n\t\tnodes, err := headscale.NodesByUser()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 2)\n\n\t\t// Find the router node\n\t\trouterNode = nodes[routerUser][0]\n\n\t\t// Check that the router has 3 routes now approved and available\n\t\trequireNodeRouteCountWithCollect(c, routerNode, 3, 3, 3)\n\t}, 15*time.Second, 500*time.Millisecond, \"route state changes should propagate\")\n\n\t// Now check the client node status\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodeStatus, err := nodeClient.Status()\n\t\tassert.NoError(c, err)\n\n\t\trouterStatus, err := routerClient.Status()\n\t\tassert.NoError(c, err)\n\n\t\t// Check that the node can see the subnet routes from the router\n\t\trouterPeerStatus := nodeStatus.Peer[routerStatus.Self.PublicKey]\n\n\t\t// The node should only have 1 subnet route\n\t\trequirePeerSubnetRoutesWithCollect(c, routerPeerStatus, []netip.Prefix{*route})\n\t}, 5*time.Second, 200*time.Millisecond, \"Verifying node sees filtered subnet routes\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tresult, err := nodeClient.Curl(weburl)\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, result, 13)\n\t}, 60*time.Second, 200*time.Millisecond, \"Verifying node can reach webservice through allowed route\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\ttr, err := nodeClient.Traceroute(webip)\n\t\tassert.NoError(c, err)\n\n\t\tip, err := routerClient.IPv4()\n\t\tif !assert.NoError(c, err, \"failed to get IPv4 for routerClient\") {\n\t\t\treturn\n\t\t}\n\n\t\tassertTracerouteViaIPWithCollect(c, tr, ip)\n\t}, 60*time.Second, 200*time.Millisecond, \"Verifying traceroute goes through router\")\n}\n"
  },
  {
    "path": "integration/run.sh",
    "content": "#!/usr/bin/env ksh\n\nrun_tests() {\n\ttest_name=$1\n\tnum_tests=$2\n\n\tsuccess_count=0\n\tfailure_count=0\n\truntimes=()\n\n\techo \"-------------------\"\n\techo \"Running Tests for $test_name\"\n\n\tfor ((i = 1; i <= num_tests; i++)); do\n\t\tdocker network prune -f >/dev/null 2>&1\n\t\tdocker rm headscale-test-suite >/dev/null 2>&1 || true\n\t\tdocker kill \"$(docker ps -q)\" >/dev/null 2>&1 || true\n\n\t\techo \"Run $i\"\n\n\t\tstart=$(date +%s)\n\t\tdocker run \\\n\t\t\t--tty --rm \\\n\t\t\t--volume ~/.cache/hs-integration-go:/go \\\n\t\t\t--name headscale-test-suite \\\n\t\t\t--volume \"$PWD:$PWD\" -w \"$PWD\"/integration \\\n\t\t\t--volume /var/run/docker.sock:/var/run/docker.sock \\\n\t\t\t--volume \"$PWD\"/control_logs:/tmp/control \\\n\t\t\t-e \"HEADSCALE_INTEGRATION_POSTGRES\" \\\n\t\t\tgolang:1 \\\n\t\t\tgo test ./... \\\n\t\t\t-failfast \\\n\t\t\t-timeout 120m \\\n\t\t\t-parallel 1 \\\n\t\t\t-run \"^$test_name\\$\" >./control_logs/\"$test_name\"_\"$i\".log 2>&1\n\t\tstatus=$?\n\t\tend=$(date +%s)\n\n\t\truntime=$((end - start))\n\t\truntimes+=(\"$runtime\")\n\n\t\tif [ \"$status\" -eq 0 ]; then\n\t\t\t((success_count++))\n\t\telse\n\t\t\t((failure_count++))\n\t\tfi\n\tdone\n\n\techo \"-------------------\"\n\techo \"Test Summary for $test_name\"\n\techo \"-------------------\"\n\techo \"Total Tests: $num_tests\"\n\techo \"Successful Tests: $success_count\"\n\techo \"Failed Tests: $failure_count\"\n\techo \"Runtimes in seconds: ${runtimes[*]}\"\n\techo\n}\n\n# Check if both arguments are provided\nif [ $# -ne 2 ]; then\n\techo \"Usage: $0 <test_name> <num_tests>\"\n\texit 1\nfi\n\ntest_name=$1\nnum_tests=$2\n\ndocker network prune -f\n\nif [ \"$test_name\" = \"all\" ]; then\n\trg --regexp \"func (Test.+)\\(.*\" ./integration/ --replace '$1' --no-line-number --no-filename --no-heading | sort | while read -r test_name; do\n\t\trun_tests \"$test_name\" \"$num_tests\"\n\tdone\nelse\n\trun_tests \"$test_name\" \"$num_tests\"\nfi\n"
  },
  {
    "path": "integration/scenario.go",
    "content": "package integration\n\nimport (\n\t\"context\"\n\t\"crypto/tls\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/cookiejar\"\n\t\"net/netip\"\n\t\"net/url\"\n\t\"os\"\n\t\"slices\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\t\"github.com/juanfont/headscale/hscontrol/capver\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/juanfont/headscale/integration/dockertestutil\"\n\t\"github.com/juanfont/headscale/integration/dsic\"\n\t\"github.com/juanfont/headscale/integration/hsic\"\n\t\"github.com/juanfont/headscale/integration/integrationutil\"\n\t\"github.com/juanfont/headscale/integration/tsic\"\n\t\"github.com/oauth2-proxy/mockoidc\"\n\t\"github.com/ory/dockertest/v3\"\n\t\"github.com/ory/dockertest/v3/docker\"\n\t\"github.com/puzpuzpuz/xsync/v4\"\n\t\"github.com/samber/lo\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\txmaps \"golang.org/x/exp/maps\"\n\t\"golang.org/x/sync/errgroup\"\n\t\"tailscale.com/envknob\"\n\t\"tailscale.com/util/mak\"\n\t\"tailscale.com/util/multierr\"\n)\n\nconst (\n\tscenarioHashLength = 6\n)\n\nvar usePostgresForTest = envknob.Bool(\"HEADSCALE_INTEGRATION_POSTGRES\")\n\nvar (\n\terrNoHeadscaleAvailable = errors.New(\"no headscale available\")\n\terrNoUserAvailable      = errors.New(\"no user available\")\n\terrNoClientFound        = errors.New(\"client not found\")\n\n\t// AllVersions represents a list of Tailscale versions the suite\n\t// uses to test compatibility with the ControlServer.\n\t//\n\t// The list contains two special cases, \"head\" and \"unstable\" which\n\t// points to the current tip of Tailscale's main branch and the latest\n\t// released unstable version.\n\t//\n\t// The rest of the version represents Tailscale versions that can be\n\t// found in Tailscale's apt repository.\n\tAllVersions = append([]string{\"head\", \"unstable\"}, capver.TailscaleLatestMajorMinor(capver.SupportedMajorMinorVersions, true)...)\n\n\t// MustTestVersions is the minimum set of versions we should test.\n\t// At the moment, this is arbitrarily chosen as:\n\t//\n\t// - Two unstable (HEAD and unstable)\n\t// - Two latest versions\n\t// - Two oldest supported version.\n\tMustTestVersions = append(\n\t\tAllVersions[0:4],\n\t\tAllVersions[len(AllVersions)-2:]...,\n\t)\n)\n\n// User represents a User in the ControlServer and a map of TailscaleClient's\n// associated with the User.\ntype User struct {\n\tClients map[string]TailscaleClient\n\n\tcreateWaitGroup errgroup.Group\n\tjoinWaitGroup   errgroup.Group\n\tsyncWaitGroup   errgroup.Group\n}\n\n// Scenario is a representation of an environment with one ControlServer and\n// one or more User's and its associated TailscaleClients.\n// A Scenario is intended to simplify setting up a new testcase for testing\n// a ControlServer with TailscaleClients.\n// TODO(kradalby): make control server configurable, test correctness with Tailscale SaaS.\ntype Scenario struct {\n\t// TODO(kradalby): support multiple headcales for later, currently only\n\t// use one.\n\tcontrolServers *xsync.Map[string, ControlServer]\n\tderpServers    []*dsic.DERPServerInContainer\n\n\tusers map[string]*User\n\n\tpool          *dockertest.Pool\n\tnetworks      map[string]*dockertest.Network\n\tmockOIDC      scenarioOIDC\n\textraServices map[string][]*dockertest.Resource\n\n\tmu sync.Mutex\n\n\tspec          ScenarioSpec\n\tuserToNetwork map[string]*dockertest.Network\n\n\ttestHashPrefix     string\n\ttestDefaultNetwork string\n}\n\n// ScenarioSpec describes the users, nodes, and network topology to\n// set up for a given scenario.\ntype ScenarioSpec struct {\n\t// Users is a list of usernames that will be created.\n\t// Each created user will get nodes equivalent to NodesPerUser\n\tUsers []string\n\n\t// NodesPerUser is how many nodes should be attached to each user.\n\tNodesPerUser int\n\n\t// Networks, if set, is the separate Docker networks that should be\n\t// created and a list of the users that should be placed in those networks.\n\t// If not set, a single network will be created and all users+nodes will be\n\t// added there.\n\t// Please note that Docker networks are not necessarily routable and\n\t// connections between them might fall back to DERP.\n\tNetworks map[string][]string\n\n\t// ExtraService, if set, is additional a map of network to additional\n\t// container services that should be set up. These container services\n\t// typically dont run Tailscale, e.g. web service to test subnet router.\n\tExtraService map[string][]extraServiceFunc\n\n\t// Versions is specific list of versions to use for the test.\n\tVersions []string\n\n\t// OIDCSkipUserCreation, if true, skips creating users via headscale CLI\n\t// during environment setup. Useful for OIDC tests where the SSH policy\n\t// references users by name, since OIDC login creates users automatically\n\t// and pre-creating them via CLI causes duplicate user records.\n\tOIDCSkipUserCreation bool\n\n\t// OIDCUsers, if populated, will start a Mock OIDC server and populate\n\t// the user login stack with the given users.\n\t// If the NodesPerUser is set, it should align with this list to ensure\n\t// the correct users are logged in.\n\t// This is because the MockOIDC server can only serve login\n\t// requests based on a queue it has been given on startup.\n\t// We currently only populates it with one login request per user.\n\tOIDCUsers     []mockoidc.MockUser\n\tOIDCAccessTTL time.Duration\n\n\tMaxWait time.Duration\n}\n\nfunc (s *Scenario) prefixedNetworkName(name string) string {\n\treturn s.testHashPrefix + \"-\" + name\n}\n\n// NewScenario creates a test Scenario which can be used to bootstraps a ControlServer with\n// a set of Users and TailscaleClients.\nfunc NewScenario(spec ScenarioSpec) (*Scenario, error) {\n\tpool, err := dockertest.NewPool(\"\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"connecting to docker: %w\", err)\n\t}\n\n\t// Opportunity to clean up unreferenced networks.\n\t// This might be a no op, but it is worth a try as we sometime\n\t// dont clean up nicely after ourselves.\n\t_ = dockertestutil.CleanUnreferencedNetworks(pool)\n\t_ = dockertestutil.CleanImagesInCI(pool)\n\n\tif spec.MaxWait == 0 {\n\t\tpool.MaxWait = dockertestMaxWait()\n\t} else {\n\t\tpool.MaxWait = spec.MaxWait\n\t}\n\n\ttestHashPrefix := \"hs-\" + util.MustGenerateRandomStringDNSSafe(scenarioHashLength)\n\ts := &Scenario{\n\t\tcontrolServers: xsync.NewMap[string, ControlServer](),\n\t\tusers:          make(map[string]*User),\n\n\t\tpool: pool,\n\t\tspec: spec,\n\n\t\ttestHashPrefix:     testHashPrefix,\n\t\ttestDefaultNetwork: testHashPrefix + \"-default\",\n\t}\n\n\tvar userToNetwork map[string]*dockertest.Network\n\n\tif spec.Networks != nil || len(spec.Networks) != 0 {\n\t\tfor name, users := range s.spec.Networks {\n\t\t\tnetworkName := testHashPrefix + \"-\" + name\n\n\t\t\tnetwork, err := s.AddNetwork(networkName)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tfor _, user := range users {\n\t\t\t\tif n2, ok := userToNetwork[user]; ok {\n\t\t\t\t\treturn nil, fmt.Errorf(\"users can only have nodes placed in one network: %s into %s but already in %s\", user, network.Network.Name, n2.Network.Name) //nolint:err113\n\t\t\t\t}\n\n\t\t\t\tmak.Set(&userToNetwork, user, network)\n\t\t\t}\n\t\t}\n\t} else {\n\t\t_, err := s.AddNetwork(s.testDefaultNetwork)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tfor network, extras := range spec.ExtraService {\n\t\tfor _, extra := range extras {\n\t\t\tsvc, err := extra(s, network)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tmak.Set(&s.extraServices, s.prefixedNetworkName(network), append(s.extraServices[s.prefixedNetworkName(network)], svc))\n\t\t}\n\t}\n\n\ts.userToNetwork = userToNetwork\n\n\tif len(spec.OIDCUsers) != 0 {\n\t\tttl := defaultAccessTTL\n\t\tif spec.OIDCAccessTTL != 0 {\n\t\t\tttl = spec.OIDCAccessTTL\n\t\t}\n\n\t\terr = s.runMockOIDC(ttl, spec.OIDCUsers)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn s, nil\n}\n\nfunc (s *Scenario) AddNetwork(name string) (*dockertest.Network, error) {\n\tnetwork, err := dockertestutil.GetFirstOrCreateNetwork(s.pool, name)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating or getting network: %w\", err)\n\t}\n\n\t// We run the test suite in a docker container that calls a couple of endpoints for\n\t// readiness checks, this ensures that we can run the tests with individual networks\n\t// and have the client reach the different containers.\n\t// The container name includes the run ID to support multiple concurrent test runs.\n\ttestSuiteName := \"headscale-test-suite\"\n\tif runID := dockertestutil.GetIntegrationRunID(); runID != \"\" {\n\t\ttestSuiteName = \"headscale-test-suite-\" + runID\n\t}\n\n\terr = dockertestutil.AddContainerToNetwork(s.pool, network, testSuiteName)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"adding test suite container to network: %w\", err)\n\t}\n\n\tmak.Set(&s.networks, name, network)\n\n\treturn network, nil\n}\n\nfunc (s *Scenario) Networks() []*dockertest.Network {\n\tif len(s.networks) == 0 {\n\t\tpanic(\"Scenario.Networks called with empty network list\")\n\t}\n\n\treturn xmaps.Values(s.networks)\n}\n\nfunc (s *Scenario) Network(name string) (*dockertest.Network, error) {\n\tnet, ok := s.networks[s.prefixedNetworkName(name)]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"no network named: %s\", name) //nolint:err113\n\t}\n\n\treturn net, nil\n}\n\nfunc (s *Scenario) SubnetOfNetwork(name string) (*netip.Prefix, error) {\n\tnet, ok := s.networks[s.prefixedNetworkName(name)]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"no network named: %s\", name) //nolint:err113\n\t}\n\n\tif len(net.Network.IPAM.Config) == 0 {\n\t\treturn nil, fmt.Errorf(\"no IPAM config found in network: %s\", name) //nolint:err113\n\t}\n\n\tpref, err := netip.ParsePrefix(net.Network.IPAM.Config[0].Subnet)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &pref, nil\n}\n\nfunc (s *Scenario) Services(name string) ([]*dockertest.Resource, error) {\n\tres, ok := s.extraServices[s.prefixedNetworkName(name)]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"no network named: %s\", name) //nolint:err113\n\t}\n\n\treturn res, nil\n}\n\nfunc (s *Scenario) ShutdownAssertNoPanics(t *testing.T) {\n\tt.Helper()\n\n\tdefer func() { _ = dockertestutil.CleanUnreferencedNetworks(s.pool) }()\n\tdefer func() { _ = dockertestutil.CleanImagesInCI(s.pool) }()\n\n\ts.controlServers.Range(func(_ string, control ControlServer) bool {\n\t\tstdoutPath, stderrPath, err := control.Shutdown()\n\t\tif err != nil {\n\t\t\tlog.Printf(\n\t\t\t\t\"shutting down control: %s\",\n\t\t\t\tfmt.Errorf(\"tearing down control: %w\", err),\n\t\t\t)\n\t\t}\n\n\t\tif t != nil {\n\t\t\tstdout, err := os.ReadFile(stdoutPath)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.NotContains(t, string(stdout), \"panic\")\n\n\t\t\tstderr, err := os.ReadFile(stderrPath)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.NotContains(t, string(stderr), \"panic\")\n\t\t}\n\n\t\treturn true\n\t})\n\n\ts.mu.Lock()\n\n\tfor userName, user := range s.users {\n\t\tfor _, client := range user.Clients {\n\t\t\tlog.Printf(\"removing client %s in user %s\", client.Hostname(), userName)\n\n\t\t\tstdoutPath, stderrPath, err := client.Shutdown()\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"tearing down client: %s\", err)\n\t\t\t}\n\n\t\t\tif t != nil {\n\t\t\t\tstdout, err := os.ReadFile(stdoutPath)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.NotContains(t, string(stdout), \"panic\")\n\n\t\t\t\tstderr, err := os.ReadFile(stderrPath)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.NotContains(t, string(stderr), \"panic\")\n\t\t\t}\n\t\t}\n\t}\n\n\ts.mu.Unlock()\n\n\tfor _, derp := range s.derpServers {\n\t\terr := derp.Shutdown()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"tearing down derp server: %s\", err)\n\t\t}\n\t}\n\n\tfor _, svcs := range s.extraServices {\n\t\tfor _, svc := range svcs {\n\t\t\terr := svc.Close()\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"tearing down service %q: %s\", svc.Container.Name, err)\n\t\t\t}\n\t\t}\n\t}\n\n\tif s.mockOIDC.r != nil {\n\t\ts.mockOIDC.r.Close()\n\n\t\terr := s.mockOIDC.r.Close()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"tearing down oidc server: %s\", err)\n\t\t}\n\t}\n\n\tfor _, network := range s.networks {\n\t\terr := network.Close()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"tearing down network: %s\", err)\n\t\t}\n\t}\n}\n\n// Shutdown shuts down and cleans up all the containers (ControlServer, TailscaleClient)\n// and networks associated with it.\n// In addition, it will save the logs of the ControlServer to `/tmp/control` in the\n// environment running the tests.\nfunc (s *Scenario) Shutdown() {\n\ts.ShutdownAssertNoPanics(nil)\n}\n\n// Users returns the name of all users associated with the Scenario.\nfunc (s *Scenario) Users() []string {\n\tusers := make([]string, 0, len(s.users))\n\tfor user := range s.users {\n\t\tusers = append(users, user)\n\t}\n\n\treturn users\n}\n\n/// Headscale related stuff\n// Note: These functions assume that there is a _single_ headscale instance for now\n\n// Headscale returns a ControlServer instance based on hsic (HeadscaleInContainer)\n// If the Scenario already has an instance, the pointer to the running container\n// will be return, otherwise a new instance will be created.\n// TODO(kradalby): make port and headscale configurable, multiple instances support?\nfunc (s *Scenario) Headscale(opts ...hsic.Option) (ControlServer, error) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tif headscale, ok := s.controlServers.Load(\"headscale\"); ok {\n\t\treturn headscale, nil\n\t}\n\n\tif usePostgresForTest {\n\t\topts = append(opts, hsic.WithPostgres())\n\t}\n\n\theadscale, err := hsic.New(s.pool, s.Networks(), opts...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating headscale container: %w\", err)\n\t}\n\n\terr = headscale.WaitForRunning()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"reaching headscale container: %w\", err)\n\t}\n\n\ts.controlServers.Store(\"headscale\", headscale)\n\n\treturn headscale, nil\n}\n\n// Pool returns the dockertest pool for the scenario.\nfunc (s *Scenario) Pool() *dockertest.Pool {\n\treturn s.pool\n}\n\n// GetOrCreateUser gets or creates a user in the scenario.\nfunc (s *Scenario) GetOrCreateUser(userStr string) *User {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tif user, ok := s.users[userStr]; ok {\n\t\treturn user\n\t}\n\n\tuser := &User{\n\t\tClients: make(map[string]TailscaleClient),\n\t}\n\ts.users[userStr] = user\n\n\treturn user\n}\n\n// CreatePreAuthKey creates a \"pre authentorised key\" to be created in the\n// Headscale instance on behalf of the Scenario.\nfunc (s *Scenario) CreatePreAuthKey(\n\tuser uint64,\n\treusable bool,\n\tephemeral bool,\n) (*v1.PreAuthKey, error) {\n\tif headscale, err := s.Headscale(); err == nil { //nolint:noinlineerr\n\t\tkey, err := headscale.CreateAuthKey(user, reusable, ephemeral)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"creating user: %w\", err)\n\t\t}\n\n\t\treturn key, nil\n\t}\n\n\treturn nil, fmt.Errorf(\"creating user: %w\", errNoHeadscaleAvailable)\n}\n\n// CreatePreAuthKeyWithOptions creates a \"pre authorised key\" with the specified options\n// to be created in the Headscale instance on behalf of the Scenario.\nfunc (s *Scenario) CreatePreAuthKeyWithOptions(opts hsic.AuthKeyOptions) (*v1.PreAuthKey, error) {\n\theadscale, err := s.Headscale()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating preauth key with options: %w\", errNoHeadscaleAvailable)\n\t}\n\n\tkey, err := headscale.CreateAuthKeyWithOptions(opts)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating preauth key with options: %w\", err)\n\t}\n\n\treturn key, nil\n}\n\n// CreatePreAuthKeyWithTags creates a \"pre authorised key\" with the specified tags\n// to be created in the Headscale instance on behalf of the Scenario.\nfunc (s *Scenario) CreatePreAuthKeyWithTags(\n\tuser uint64,\n\treusable bool,\n\tephemeral bool,\n\ttags []string,\n) (*v1.PreAuthKey, error) {\n\theadscale, err := s.Headscale()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating preauth key with tags: %w\", errNoHeadscaleAvailable)\n\t}\n\n\tkey, err := headscale.CreateAuthKeyWithTags(user, reusable, ephemeral, tags)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating preauth key with tags: %w\", err)\n\t}\n\n\treturn key, nil\n}\n\n// CreateUser creates a User to be created in the\n// Headscale instance on behalf of the Scenario.\nfunc (s *Scenario) CreateUser(user string) (*v1.User, error) {\n\tif headscale, err := s.Headscale(); err == nil { //nolint:noinlineerr\n\t\tu, err := headscale.CreateUser(user)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"creating user: %w\", err)\n\t\t}\n\n\t\ts.mu.Lock()\n\t\ts.users[user] = &User{\n\t\t\tClients: make(map[string]TailscaleClient),\n\t\t}\n\t\ts.mu.Unlock()\n\n\t\treturn u, nil\n\t}\n\n\treturn nil, fmt.Errorf(\"creating user: %w\", errNoHeadscaleAvailable)\n}\n\n/// Client related stuff\n\nfunc (s *Scenario) CreateTailscaleNode(\n\tversion string,\n\topts ...tsic.Option,\n) (TailscaleClient, error) {\n\theadscale, err := s.Headscale()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating tailscale node (version: %s): %w\", version, err)\n\t}\n\n\tcert := headscale.GetCert()\n\thostname := headscale.GetHostname()\n\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\topts = append(opts,\n\t\ttsic.WithCACert(cert),\n\t\ttsic.WithHeadscaleName(hostname),\n\t)\n\n\ttsClient, err := tsic.New(\n\t\ts.pool,\n\t\tversion,\n\t\topts...,\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"creating tailscale node: %w\",\n\t\t\terr,\n\t\t)\n\t}\n\n\terr = tsClient.WaitForNeedsLogin(integrationutil.PeerSyncTimeout())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"waiting for tailscaled (%s) to need login: %w\",\n\t\t\ttsClient.Hostname(),\n\t\t\terr,\n\t\t)\n\t}\n\n\treturn tsClient, nil\n}\n\n// CreateTailscaleNodesInUser creates and adds a new TailscaleClient to a\n// User in the Scenario.\nfunc (s *Scenario) CreateTailscaleNodesInUser(\n\tuserStr string,\n\trequestedVersion string,\n\tcount int,\n\topts ...tsic.Option,\n) error {\n\tif user, ok := s.users[userStr]; ok {\n\t\tvar versions []string\n\n\t\tfor i := range count {\n\t\t\tversion := requestedVersion\n\t\t\tif requestedVersion == \"all\" {\n\t\t\t\tif s.spec.Versions != nil {\n\t\t\t\t\tversion = s.spec.Versions[i%len(s.spec.Versions)]\n\t\t\t\t} else {\n\t\t\t\t\tversion = MustTestVersions[i%len(MustTestVersions)]\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tversions = append(versions, version)\n\n\t\t\theadscale, err := s.Headscale()\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"creating tailscale node (version: %s): %w\", version, err)\n\t\t\t}\n\n\t\t\tcert := headscale.GetCert()\n\t\t\thostname := headscale.GetHostname()\n\n\t\t\t// Determine which network this tailscale client will be in\n\t\t\tvar network *dockertest.Network\n\t\t\tif s.userToNetwork != nil && s.userToNetwork[userStr] != nil {\n\t\t\t\tnetwork = s.userToNetwork[userStr]\n\t\t\t} else {\n\t\t\t\tnetwork = s.networks[s.testDefaultNetwork]\n\t\t\t}\n\n\t\t\t// Get headscale IP in this network for /etc/hosts fallback DNS\n\t\t\theadscaleIP := headscale.GetIPInNetwork(network)\n\t\t\textraHosts := []string{hostname + \":\" + headscaleIP}\n\n\t\t\ts.mu.Lock()\n\n\t\t\topts = append(opts,\n\t\t\t\ttsic.WithCACert(cert),\n\t\t\t\ttsic.WithHeadscaleName(hostname),\n\t\t\t\ttsic.WithExtraHosts(extraHosts),\n\t\t\t)\n\n\t\t\ts.mu.Unlock()\n\n\t\t\tuser.createWaitGroup.Go(func() error {\n\t\t\t\ts.mu.Lock()\n\t\t\t\ttsClient, err := tsic.New(\n\t\t\t\t\ts.pool,\n\t\t\t\t\tversion,\n\t\t\t\t\topts...,\n\t\t\t\t)\n\t\t\t\ts.mu.Unlock()\n\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\n\t\t\t\t\t\t\"creating tailscale node: %w\",\n\t\t\t\t\t\terr,\n\t\t\t\t\t)\n\t\t\t\t}\n\n\t\t\t\terr = tsClient.WaitForNeedsLogin(integrationutil.PeerSyncTimeout())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\n\t\t\t\t\t\t\"waiting for tailscaled (%s) to need login: %w\",\n\t\t\t\t\t\ttsClient.Hostname(),\n\t\t\t\t\t\terr,\n\t\t\t\t\t)\n\t\t\t\t}\n\n\t\t\t\ts.mu.Lock()\n\n\t\t\t\tuser.Clients[tsClient.Hostname()] = tsClient\n\n\t\t\t\ts.mu.Unlock()\n\n\t\t\t\treturn nil\n\t\t\t})\n\t\t}\n\n\t\terr := user.createWaitGroup.Wait()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tlog.Printf(\"testing versions %v, MustTestVersions %v\", lo.Uniq(versions), MustTestVersions)\n\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"adding tailscale node: %w\", errNoUserAvailable)\n}\n\n// RunTailscaleUp will log in all of the TailscaleClients associated with a\n// User to the given ControlServer (by URL).\nfunc (s *Scenario) RunTailscaleUp(\n\tuserStr, loginServer, authKey string,\n) error {\n\tif user, ok := s.users[userStr]; ok {\n\t\tfor _, client := range user.Clients {\n\t\t\tc := client\n\n\t\t\tuser.joinWaitGroup.Go(func() error {\n\t\t\t\treturn c.Login(loginServer, authKey)\n\t\t\t})\n\t\t}\n\n\t\terr := user.joinWaitGroup.Wait()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, client := range user.Clients {\n\t\t\terr := client.WaitForRunning(integrationutil.PeerSyncTimeout())\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"%s bringing up tailscale node: %w\", client.Hostname(), err)\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"bringing up tailscale node: %w\", errNoUserAvailable)\n}\n\n// CountTailscale returns the total number of TailscaleClients in a Scenario.\n// This is the sum of Users x TailscaleClients.\nfunc (s *Scenario) CountTailscale() int {\n\tcount := 0\n\n\tfor _, user := range s.users {\n\t\tcount += len(user.Clients)\n\t}\n\n\treturn count\n}\n\n// WaitForTailscaleSync blocks execution until all the TailscaleClient reports\n// to have all other TailscaleClients present in their netmap.NetworkMap.\nfunc (s *Scenario) WaitForTailscaleSync() error {\n\ttsCount := s.CountTailscale()\n\n\terr := s.WaitForTailscaleSyncWithPeerCount(tsCount-1, integrationutil.PeerSyncTimeout(), integrationutil.PeerSyncRetryInterval())\n\tif err != nil {\n\t\tfor _, user := range s.users {\n\t\t\tfor _, client := range user.Clients {\n\t\t\t\tpeers, allOnline, _ := client.FailingPeersAsString()\n\t\t\t\tif !allOnline {\n\t\t\t\t\tlog.Println(peers)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn err\n}\n\n// WaitForTailscaleSyncPerUser blocks execution until each TailscaleClient has the expected\n// number of peers for its user. This is useful for policies like autogroup:self where nodes\n// only see same-user peers, not all nodes in the network.\nfunc (s *Scenario) WaitForTailscaleSyncPerUser(timeout, retryInterval time.Duration) error {\n\tvar allErrors []error\n\n\tfor _, user := range s.users {\n\t\t// Calculate expected peer count: number of nodes in this user minus 1 (self)\n\t\texpectedPeers := len(user.Clients) - 1\n\n\t\tfor _, client := range user.Clients {\n\t\t\tc := client\n\t\t\texpectedCount := expectedPeers\n\n\t\t\tuser.syncWaitGroup.Go(func() error {\n\t\t\t\treturn c.WaitForPeers(expectedCount, timeout, retryInterval)\n\t\t\t})\n\t\t}\n\n\t\terr := user.syncWaitGroup.Wait()\n\t\tif err != nil {\n\t\t\tallErrors = append(allErrors, err)\n\t\t}\n\t}\n\n\tif len(allErrors) > 0 {\n\t\treturn multierr.New(allErrors...)\n\t}\n\n\treturn nil\n}\n\n// WaitForTailscaleSyncWithPeerCount blocks execution until all the TailscaleClient reports\n// to have all other TailscaleClients present in their netmap.NetworkMap.\nfunc (s *Scenario) WaitForTailscaleSyncWithPeerCount(peerCount int, timeout, retryInterval time.Duration) error {\n\tvar allErrors []error\n\n\tfor _, user := range s.users {\n\t\tfor _, client := range user.Clients {\n\t\t\tc := client\n\n\t\t\tuser.syncWaitGroup.Go(func() error {\n\t\t\t\treturn c.WaitForPeers(peerCount, timeout, retryInterval)\n\t\t\t})\n\t\t}\n\n\t\terr := user.syncWaitGroup.Wait()\n\t\tif err != nil {\n\t\t\tallErrors = append(allErrors, err)\n\t\t}\n\t}\n\n\tif len(allErrors) > 0 {\n\t\treturn multierr.New(allErrors...)\n\t}\n\n\treturn nil\n}\n\nfunc (s *Scenario) CreateHeadscaleEnvWithLoginURL(\n\ttsOpts []tsic.Option,\n\topts ...hsic.Option,\n) error {\n\treturn s.createHeadscaleEnv(true, tsOpts, opts...)\n}\n\nfunc (s *Scenario) CreateHeadscaleEnv(\n\ttsOpts []tsic.Option,\n\topts ...hsic.Option,\n) error {\n\treturn s.createHeadscaleEnv(false, tsOpts, opts...)\n}\n\n// CreateHeadscaleEnv starts the headscale environment and the clients\n// according to the ScenarioSpec passed to the Scenario.\nfunc (s *Scenario) createHeadscaleEnv(\n\twithURL bool,\n\ttsOpts []tsic.Option,\n\topts ...hsic.Option,\n) error {\n\treturn s.createHeadscaleEnvWithTags(withURL, tsOpts, nil, \"\", opts...)\n}\n\n// createHeadscaleEnvWithTags starts the headscale environment and the clients\n// according to the ScenarioSpec passed to the Scenario. If preAuthKeyTags is\n// non-empty and withURL is false, the tags will be applied to the PreAuthKey\n// (tags-as-identity model).\n//\n// For webauth (withURL=true), if webauthTagUser is non-empty and preAuthKeyTags\n// is non-empty, only nodes belonging to that user will request tags via\n// --advertise-tags. This is necessary because tagOwners ACL controls which\n// users can request specific tags.\nfunc (s *Scenario) createHeadscaleEnvWithTags(\n\twithURL bool,\n\ttsOpts []tsic.Option,\n\tpreAuthKeyTags []string,\n\twebauthTagUser string,\n\topts ...hsic.Option,\n) error {\n\theadscale, err := s.Headscale(opts...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, user := range s.spec.Users {\n\t\tvar u *v1.User\n\n\t\tif s.spec.OIDCSkipUserCreation {\n\t\t\t// Only register locally — OIDC login will create the headscale user.\n\t\t\ts.mu.Lock()\n\t\t\ts.users[user] = &User{Clients: make(map[string]TailscaleClient)}\n\t\t\ts.mu.Unlock()\n\t\t} else {\n\t\t\tu, err = s.CreateUser(user)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tvar userOpts []tsic.Option\n\t\tif s.userToNetwork != nil {\n\t\t\tuserOpts = append(tsOpts, tsic.WithNetwork(s.userToNetwork[user]))\n\t\t} else {\n\t\t\tuserOpts = append(tsOpts, tsic.WithNetwork(s.networks[s.testDefaultNetwork]))\n\t\t}\n\n\t\t// For webauth with tags, only apply tags to the specified webauthTagUser\n\t\t// (other users may not be authorized via tagOwners)\n\t\tif withURL && webauthTagUser != \"\" && len(preAuthKeyTags) > 0 && user == webauthTagUser {\n\t\t\tuserOpts = append(userOpts, tsic.WithTags(preAuthKeyTags))\n\t\t}\n\n\t\terr = s.CreateTailscaleNodesInUser(user, \"all\", s.spec.NodesPerUser, userOpts...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif withURL {\n\t\t\terr = s.RunTailscaleUpWithURL(user, headscale.GetEndpoint())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\t// Use tagged PreAuthKey if tags are provided (tags-as-identity model)\n\t\t\tvar key *v1.PreAuthKey\n\t\t\tif len(preAuthKeyTags) > 0 {\n\t\t\t\tkey, err = s.CreatePreAuthKeyWithTags(u.GetId(), true, false, preAuthKeyTags)\n\t\t\t} else {\n\t\t\t\tkey, err = s.CreatePreAuthKey(u.GetId(), true, false)\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\terr = s.RunTailscaleUp(user, headscale.GetEndpoint(), key.GetKey())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (s *Scenario) RunTailscaleUpWithURL(userStr, loginServer string) error {\n\tlog.Printf(\"running tailscale up for user %s\", userStr)\n\n\tif user, ok := s.users[userStr]; ok {\n\t\tfor _, client := range user.Clients {\n\t\t\ttsc := client\n\n\t\t\tuser.joinWaitGroup.Go(func() error {\n\t\t\t\tloginURL, err := tsc.LoginWithURL(loginServer)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"%s running tailscale up: %s\", tsc.Hostname(), err)\n\t\t\t\t}\n\n\t\t\t\tbody, err := doLoginURL(tsc.Hostname(), loginURL)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t// If the URL is not a OIDC URL, then we need to\n\t\t\t\t// run the register command to fully log in the client.\n\t\t\t\tif !strings.Contains(loginURL.String(), \"/oidc/\") {\n\t\t\t\t\t_ = s.runHeadscaleRegister(userStr, body)\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t})\n\n\t\t\tlog.Printf(\"client %s is ready\", client.Hostname())\n\t\t}\n\n\t\terr := user.joinWaitGroup.Wait()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, client := range user.Clients {\n\t\t\terr := client.WaitForRunning(integrationutil.PeerSyncTimeout())\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\n\t\t\t\t\t\"%s tailscale node has not reached running: %w\",\n\t\t\t\t\tclient.Hostname(),\n\t\t\t\t\terr,\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"bringing up tailscale node: %w\", errNoUserAvailable)\n}\n\ntype debugJar struct {\n\tinner *cookiejar.Jar\n\tmu    sync.RWMutex\n\tstore map[string]map[string]map[string]*http.Cookie // domain -> path -> name -> cookie\n}\n\nfunc newDebugJar() (*debugJar, error) {\n\tjar, err := cookiejar.New(nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &debugJar{\n\t\tinner: jar,\n\t\tstore: make(map[string]map[string]map[string]*http.Cookie),\n\t}, nil\n}\n\nfunc (j *debugJar) SetCookies(u *url.URL, cookies []*http.Cookie) {\n\tj.inner.SetCookies(u, cookies)\n\n\tj.mu.Lock()\n\tdefer j.mu.Unlock()\n\n\tfor _, c := range cookies {\n\t\tif c == nil || c.Name == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tdomain := c.Domain\n\t\tif domain == \"\" {\n\t\t\tdomain = u.Hostname()\n\t\t}\n\n\t\tpath := c.Path\n\t\tif path == \"\" {\n\t\t\tpath = \"/\"\n\t\t}\n\n\t\tif _, ok := j.store[domain]; !ok {\n\t\t\tj.store[domain] = make(map[string]map[string]*http.Cookie)\n\t\t}\n\n\t\tif _, ok := j.store[domain][path]; !ok {\n\t\t\tj.store[domain][path] = make(map[string]*http.Cookie)\n\t\t}\n\n\t\tj.store[domain][path][c.Name] = copyCookie(c)\n\t}\n}\n\nfunc (j *debugJar) Cookies(u *url.URL) []*http.Cookie {\n\treturn j.inner.Cookies(u)\n}\n\nfunc (j *debugJar) Dump(w io.Writer) {\n\tj.mu.RLock()\n\tdefer j.mu.RUnlock()\n\n\tfor domain, paths := range j.store {\n\t\tfmt.Fprintf(w, \"Domain: %s\\n\", domain)\n\n\t\tfor path, byName := range paths {\n\t\t\tfmt.Fprintf(w, \"  Path: %s\\n\", path)\n\n\t\t\tfor _, c := range byName {\n\t\t\t\tfmt.Fprintf(\n\t\t\t\t\tw, \"    %s=%s; Expires=%v; Secure=%v; HttpOnly=%v; SameSite=%v\\n\",\n\t\t\t\t\tc.Name, c.Value, c.Expires, c.Secure, c.HttpOnly, c.SameSite,\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc copyCookie(c *http.Cookie) *http.Cookie {\n\tcc := *c\n\treturn &cc\n}\n\nfunc newLoginHTTPClient(hostname string) (*http.Client, error) {\n\thc := &http.Client{\n\t\tTransport: LoggingRoundTripper{Hostname: hostname},\n\t}\n\n\tjar, err := newDebugJar()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%s creating cookiejar: %w\", hostname, err)\n\t}\n\n\thc.Jar = jar\n\n\treturn hc, nil\n}\n\n// doLoginURL visits the given login URL and returns the body as a string.\nfunc doLoginURL(hostname string, loginURL *url.URL) (string, error) {\n\tlog.Printf(\"%s login url: %s\\n\", hostname, loginURL.String())\n\n\thc, err := newLoginHTTPClient(hostname)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tbody, _, err := doLoginURLWithClient(hostname, loginURL, hc, true)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn body, nil\n}\n\n// doLoginURLWithClient performs the login request using the provided HTTP client.\n// When followRedirects is false, it will return the first redirect without following it.\nfunc doLoginURLWithClient(hostname string, loginURL *url.URL, hc *http.Client, followRedirects bool) (\n\tstring,\n\t*url.URL,\n\terror,\n) {\n\tif hc == nil {\n\t\treturn \"\", nil, fmt.Errorf(\"%s http client is nil\", hostname) //nolint:err113\n\t}\n\n\tif loginURL == nil {\n\t\treturn \"\", nil, fmt.Errorf(\"%s login url is nil\", hostname) //nolint:err113\n\t}\n\n\tlog.Printf(\"%s logging in with url: %s\", hostname, loginURL.String())\n\n\tctx := context.Background()\n\n\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, loginURL.String(), nil)\n\tif err != nil {\n\t\treturn \"\", nil, fmt.Errorf(\"%s creating http request: %w\", hostname, err)\n\t}\n\n\toriginalRedirect := hc.CheckRedirect\n\tif !followRedirects {\n\t\thc.CheckRedirect = func(req *http.Request, via []*http.Request) error {\n\t\t\treturn http.ErrUseLastResponse\n\t\t}\n\t}\n\n\tdefer func() {\n\t\thc.CheckRedirect = originalRedirect\n\t}()\n\n\tresp, err := hc.Do(req)\n\tif err != nil {\n\t\treturn \"\", nil, fmt.Errorf(\"%s sending http request: %w\", hostname, err)\n\t}\n\tdefer resp.Body.Close()\n\n\tbodyBytes, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", nil, fmt.Errorf(\"%s reading response body: %w\", hostname, err)\n\t}\n\n\tbody := string(bodyBytes)\n\n\tvar redirectURL *url.URL\n\tif resp.StatusCode >= http.StatusMultipleChoices && resp.StatusCode < http.StatusBadRequest {\n\t\tredirectURL, err = resp.Location()\n\t\tif err != nil {\n\t\t\treturn body, nil, fmt.Errorf(\"%s resolving redirect location: %w\", hostname, err)\n\t\t}\n\t}\n\n\tif followRedirects && resp.StatusCode != http.StatusOK {\n\t\tlog.Printf(\"body: %s\", body)\n\n\t\treturn body, redirectURL, fmt.Errorf(\"%s unexpected status code %d\", hostname, resp.StatusCode) //nolint:err113\n\t}\n\n\tif resp.StatusCode >= http.StatusBadRequest {\n\t\tlog.Printf(\"body: %s\", body)\n\n\t\treturn body, redirectURL, fmt.Errorf(\"%s unexpected status code %d\", hostname, resp.StatusCode) //nolint:err113\n\t}\n\n\tif hc.Jar != nil {\n\t\tif jar, ok := hc.Jar.(*debugJar); ok {\n\t\t\tjar.Dump(os.Stdout)\n\t\t} else {\n\t\t\tlog.Printf(\"cookies: %+v\", hc.Jar.Cookies(loginURL))\n\t\t}\n\t}\n\n\treturn body, redirectURL, nil\n}\n\nvar errParseAuthPage = errors.New(\"parsing auth page\")\n\nfunc (s *Scenario) runHeadscaleRegister(userStr string, body string) error {\n\t// see api.go HTML template\n\tcodeSep := strings.Split(body, \"</code>\")\n\tif len(codeSep) != 2 {\n\t\treturn errParseAuthPage\n\t}\n\n\tkeySep := strings.Split(codeSep[0], \"--auth-id \")\n\tif len(keySep) != 2 {\n\t\treturn errParseAuthPage\n\t}\n\n\tkey := keySep[1]\n\tkey = strings.SplitN(key, \" \", 2)[0]\n\tlog.Printf(\"registering node %s\", key)\n\n\tif headscale, err := s.Headscale(); err == nil { //nolint:noinlineerr\n\t\t_, err = headscale.Execute(\n\t\t\t[]string{\"headscale\", \"auth\", \"register\", \"--user\", userStr, \"--auth-id\", key},\n\t\t)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"registering node: %s\", err)\n\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"finding headscale: %w\", errNoHeadscaleAvailable)\n}\n\ntype LoggingRoundTripper struct {\n\tHostname string\n}\n\nfunc (t LoggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {\n\tnoTls := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true}, // nolint\n\t}\n\n\tresp, err := noTls.RoundTrip(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog.Printf(`\n---\n%s - method: %s | url: %s\n%s - status: %d | cookies: %+v\n---\n`, t.Hostname, req.Method, req.URL.String(), t.Hostname, resp.StatusCode, resp.Cookies())\n\n\treturn resp, nil\n}\n\n// GetIPs returns all netip.Addr of TailscaleClients associated with a User\n// in a Scenario.\nfunc (s *Scenario) GetIPs(user string) ([]netip.Addr, error) {\n\tvar ips []netip.Addr\n\n\tif ns, ok := s.users[user]; ok {\n\t\tfor _, client := range ns.Clients {\n\t\t\tclientIps, err := client.IPs()\n\t\t\tif err != nil {\n\t\t\t\treturn ips, fmt.Errorf(\"getting IPs: %w\", err)\n\t\t\t}\n\n\t\t\tips = append(ips, clientIps...)\n\t\t}\n\n\t\treturn ips, nil\n\t}\n\n\treturn ips, fmt.Errorf(\"getting IPs: %w\", errNoUserAvailable)\n}\n\n// GetClients returns all TailscaleClients associated with a User in a Scenario.\nfunc (s *Scenario) GetClients(user string) ([]TailscaleClient, error) {\n\tvar clients []TailscaleClient\n\n\tif ns, ok := s.users[user]; ok {\n\t\tfor _, client := range ns.Clients {\n\t\t\tclients = append(clients, client)\n\t\t}\n\n\t\treturn clients, nil\n\t}\n\n\treturn clients, fmt.Errorf(\"getting clients: %w\", errNoUserAvailable)\n}\n\n// ListTailscaleClients returns a list of TailscaleClients given the Users\n// passed as parameters.\nfunc (s *Scenario) ListTailscaleClients(users ...string) ([]TailscaleClient, error) {\n\tvar allClients []TailscaleClient\n\n\tif len(users) == 0 {\n\t\tusers = s.Users()\n\t}\n\n\tfor _, user := range users {\n\t\tclients, err := s.GetClients(user)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tallClients = append(allClients, clients...)\n\t}\n\n\treturn allClients, nil\n}\n\n// FindTailscaleClientByIP returns a TailscaleClient associated with an IP address\n// if it exists.\nfunc (s *Scenario) FindTailscaleClientByIP(ip netip.Addr) (TailscaleClient, error) {\n\tclients, err := s.ListTailscaleClients()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, client := range clients {\n\t\tips, _ := client.IPs()\n\t\tif slices.Contains(ips, ip) {\n\t\t\treturn client, nil\n\t\t}\n\t}\n\n\treturn nil, errNoClientFound\n}\n\n// ListTailscaleClientsIPs returns a list of netip.Addr based on Users\n// passed as parameters.\nfunc (s *Scenario) ListTailscaleClientsIPs(users ...string) ([]netip.Addr, error) {\n\tvar allIps []netip.Addr\n\n\tif len(users) == 0 {\n\t\tusers = s.Users()\n\t}\n\n\tfor _, user := range users {\n\t\tips, err := s.GetIPs(user)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tallIps = append(allIps, ips...)\n\t}\n\n\treturn allIps, nil\n}\n\n// ListTailscaleClientsFQDNs returns a list of FQDN based on Users\n// passed as parameters.\nfunc (s *Scenario) ListTailscaleClientsFQDNs(users ...string) ([]string, error) {\n\tallFQDNs := make([]string, 0)\n\n\tclients, err := s.ListTailscaleClients(users...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, client := range clients {\n\t\tfqdn, err := client.FQDN()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tallFQDNs = append(allFQDNs, fqdn)\n\t}\n\n\treturn allFQDNs, nil\n}\n\n// WaitForTailscaleLogout blocks execution until all TailscaleClients have\n// logged out of the ControlServer.\nfunc (s *Scenario) WaitForTailscaleLogout() error {\n\tfor _, user := range s.users {\n\t\tfor _, client := range user.Clients {\n\t\t\tc := client\n\n\t\t\tuser.syncWaitGroup.Go(func() error {\n\t\t\t\treturn c.WaitForNeedsLogin(integrationutil.PeerSyncTimeout())\n\t\t\t})\n\t\t}\n\n\t\terr := user.syncWaitGroup.Wait()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// CreateDERPServer creates a new DERP server in a container.\nfunc (s *Scenario) CreateDERPServer(version string, opts ...dsic.Option) (*dsic.DERPServerInContainer, error) {\n\tderp, err := dsic.New(s.pool, version, s.Networks(), opts...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating DERP server: %w\", err)\n\t}\n\n\terr = derp.WaitForRunning()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"reaching DERP server: %w\", err)\n\t}\n\n\ts.derpServers = append(s.derpServers, derp)\n\n\treturn derp, nil\n}\n\ntype scenarioOIDC struct {\n\tr   *dockertest.Resource\n\tcfg *types.OIDCConfig\n}\n\nfunc (o *scenarioOIDC) Issuer() string {\n\tif o.cfg == nil {\n\t\tpanic(\"OIDC has not been created\")\n\t}\n\n\treturn o.cfg.Issuer\n}\n\nfunc (o *scenarioOIDC) ClientSecret() string {\n\tif o.cfg == nil {\n\t\tpanic(\"OIDC has not been created\")\n\t}\n\n\treturn o.cfg.ClientSecret\n}\n\nfunc (o *scenarioOIDC) ClientID() string {\n\tif o.cfg == nil {\n\t\tpanic(\"OIDC has not been created\")\n\t}\n\n\treturn o.cfg.ClientID\n}\n\nconst (\n\tdockerContextPath      = \"../.\"\n\thsicOIDCMockHashLength = 6\n\tdefaultAccessTTL       = 10 * time.Minute\n)\n\nvar errStatusCodeNotOK = errors.New(\"status code not OK\")\n\nfunc (s *Scenario) runMockOIDC(accessTTL time.Duration, users []mockoidc.MockUser) error {\n\tport, err := dockertestutil.RandomFreeHostPort()\n\tif err != nil {\n\t\tlog.Fatalf(\"finding open port: %s\", err)\n\t}\n\n\tportNotation := fmt.Sprintf(\"%d/tcp\", port)\n\n\thash, _ := util.GenerateRandomStringDNSSafe(hsicOIDCMockHashLength)\n\n\thostname := \"hs-oidcmock-\" + hash\n\n\tusersJSON, err := json.Marshal(users)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmockOidcOptions := &dockertest.RunOptions{\n\t\tName:         hostname,\n\t\tCmd:          []string{\"headscale\", \"mockoidc\"},\n\t\tExposedPorts: []string{portNotation},\n\t\tPortBindings: map[docker.Port][]docker.PortBinding{\n\t\t\tdocker.Port(portNotation): {{HostPort: strconv.Itoa(port)}},\n\t\t},\n\t\tNetworks: s.Networks(),\n\t\tEnv: []string{\n\t\t\t\"MOCKOIDC_ADDR=\" + hostname,\n\t\t\tfmt.Sprintf(\"MOCKOIDC_PORT=%d\", port),\n\t\t\t\"MOCKOIDC_CLIENT_ID=superclient\",\n\t\t\t\"MOCKOIDC_CLIENT_SECRET=supersecret\",\n\t\t\t\"MOCKOIDC_ACCESS_TTL=\" + accessTTL.String(),\n\t\t\t\"MOCKOIDC_USERS=\" + string(usersJSON),\n\t\t},\n\t}\n\n\theadscaleBuildOptions := &dockertest.BuildOptions{\n\t\tDockerfile: hsic.IntegrationTestDockerFileName,\n\t\tContextDir: dockerContextPath,\n\t}\n\n\terr = s.pool.RemoveContainerByName(hostname)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.mockOIDC = scenarioOIDC{}\n\n\t// Add integration test labels if running under hi tool\n\tdockertestutil.DockerAddIntegrationLabels(mockOidcOptions, \"oidc\")\n\n\tif pmockoidc, err := s.pool.BuildAndRunWithBuildOptions( //nolint:noinlineerr\n\t\theadscaleBuildOptions,\n\t\tmockOidcOptions,\n\t\tdockertestutil.DockerRestartPolicy); err == nil {\n\t\ts.mockOIDC.r = pmockoidc\n\t} else {\n\t\treturn err\n\t}\n\n\t// headscale needs to set up the provider with a specific\n\t// IP addr to ensure we get the correct config from the well-known\n\t// endpoint.\n\tnetwork := s.Networks()[0]\n\tipAddr := s.mockOIDC.r.GetIPInNetwork(network)\n\n\tlog.Println(\"Waiting for headscale mock oidc to be ready for tests\")\n\n\thostEndpoint := net.JoinHostPort(ipAddr, strconv.Itoa(port))\n\n\tif err := s.pool.Retry(func() error { //nolint:noinlineerr\n\t\toidcConfigURL := fmt.Sprintf(\"http://%s/oidc/.well-known/openid-configuration\", hostEndpoint)\n\t\thttpClient := &http.Client{}\n\t\tctx := context.Background()\n\t\treq, _ := http.NewRequestWithContext(ctx, http.MethodGet, oidcConfigURL, nil)\n\n\t\tresp, err := httpClient.Do(req)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"headscale mock OIDC tests is not ready: %s\\n\", err)\n\n\t\t\treturn err\n\t\t}\n\t\tdefer resp.Body.Close()\n\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\treturn errStatusCodeNotOK\n\t\t}\n\n\t\treturn nil\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\ts.mockOIDC.cfg = &types.OIDCConfig{\n\t\tIssuer: fmt.Sprintf(\n\t\t\t\"http://%s/oidc\",\n\t\t\thostEndpoint,\n\t\t),\n\t\tClientID:                   \"superclient\",\n\t\tClientSecret:               \"supersecret\",\n\t\tOnlyStartIfOIDCIsAvailable: true,\n\t}\n\n\tlog.Printf(\"headscale mock oidc is ready for tests at %s\", hostEndpoint)\n\n\treturn nil\n}\n\ntype extraServiceFunc func(*Scenario, string) (*dockertest.Resource, error)\n\nfunc Webservice(s *Scenario, networkName string) (*dockertest.Resource, error) {\n\t// port, err := dockertestutil.RandomFreeHostPort()\n\t// if err != nil {\n\t// \tlog.Fatalf(\"finding open port: %s\", err)\n\t// }\n\t// portNotation := fmt.Sprintf(\"%d/tcp\", port)\n\thash := util.MustGenerateRandomStringDNSSafe(hsicOIDCMockHashLength)\n\n\thostname := \"hs-webservice-\" + hash\n\n\tnetwork, ok := s.networks[s.prefixedNetworkName(networkName)]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"network does not exist: %s\", networkName) //nolint:err113\n\t}\n\n\twebOpts := &dockertest.RunOptions{\n\t\tName: hostname,\n\t\tCmd:  []string{\"/bin/sh\", \"-c\", \"cd / ; python3 -m http.server --bind :: 80\"},\n\t\t// ExposedPorts: []string{portNotation},\n\t\t// PortBindings: map[docker.Port][]docker.PortBinding{\n\t\t// \tdocker.Port(portNotation): {{HostPort: strconv.Itoa(port)}},\n\t\t// },\n\t\tNetworks: []*dockertest.Network{network},\n\t\tEnv:      []string{},\n\t}\n\n\t// Add integration test labels if running under hi tool\n\tdockertestutil.DockerAddIntegrationLabels(webOpts, \"web\")\n\n\twebBOpts := &dockertest.BuildOptions{\n\t\tDockerfile: hsic.IntegrationTestDockerFileName,\n\t\tContextDir: dockerContextPath,\n\t}\n\n\tweb, err := s.pool.BuildAndRunWithBuildOptions(\n\t\twebBOpts,\n\t\twebOpts,\n\t\tdockertestutil.DockerRestartPolicy)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// headscale needs to set up the provider with a specific\n\t// IP addr to ensure we get the correct config from the well-known\n\t// endpoint.\n\t// ipAddr := web.GetIPInNetwork(network)\n\n\t// log.Println(\"Waiting for headscale mock oidc to be ready for tests\")\n\t// hostEndpoint := net.JoinHostPort(ipAddr, strconv.Itoa(port))\n\n\t// if err := s.pool.Retry(func() error {\n\t// \toidcConfigURL := fmt.Sprintf(\"http://%s/etc/hostname\", hostEndpoint)\n\t// \thttpClient := &http.Client{}\n\t// \tctx := context.Background()\n\t// \treq, _ := http.NewRequestWithContext(ctx, http.MethodGet, oidcConfigURL, nil)\n\t// \tresp, err := httpClient.Do(req)\n\t// \tif err != nil {\n\t// \t\tlog.Printf(\"headscale mock OIDC tests is not ready: %s\\n\", err)\n\n\t// \t\treturn err\n\t// \t}\n\t// \tdefer resp.Body.Close()\n\n\t// \tif resp.StatusCode != http.StatusOK {\n\t// \t\treturn errStatusCodeNotOK\n\t// \t}\n\n\t// \treturn nil\n\t// }); err != nil {\n\t// \treturn err\n\t// }\n\n\treturn web, nil\n}\n"
  },
  {
    "path": "integration/scenario_test.go",
    "content": "package integration\n\nimport (\n\t\"testing\"\n\n\t\"github.com/juanfont/headscale/integration/dockertestutil\"\n\t\"github.com/juanfont/headscale/integration/hsic\"\n\t\"github.com/juanfont/headscale/integration/tsic\"\n\t\"github.com/stretchr/testify/require\"\n)\n\n// This file is intended to \"test the test framework\", by proxy it will also test\n// some Headscale/Tailscale stuff, but mostly in very simple ways.\n\nfunc IntegrationSkip(t *testing.T) {\n\tt.Helper()\n\n\tif !dockertestutil.IsRunningInContainer() {\n\t\tt.Skip(\"not running in docker, skipping\")\n\t}\n\n\tif testing.Short() {\n\t\tt.Skip(\"skipping integration tests due to short flag\")\n\t}\n}\n\n// If subtests are parallel, then they will start before setup is run.\n// This might mean we approach setup slightly wrong, but for now, ignore\n// the linter\n// nolint:tparallel\nfunc TestHeadscale(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tvar err error\n\n\tuser := \"test-space\"\n\n\tscenario, err := NewScenario(ScenarioSpec{})\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\tt.Run(\"start-headscale\", func(t *testing.T) {\n\t\theadscale, err := scenario.Headscale(hsic.WithTestName(\"scenariohs\"))\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to create start headcale: %s\", err)\n\t\t}\n\n\t\terr = headscale.WaitForRunning()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"headscale failed to become ready: %s\", err)\n\t\t}\n\t})\n\n\tt.Run(\"create-user\", func(t *testing.T) {\n\t\t_, err := scenario.CreateUser(user)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to create user: %s\", err)\n\t\t}\n\n\t\tif _, ok := scenario.users[user]; !ok {\n\t\t\tt.Fatalf(\"user is not in scenario\")\n\t\t}\n\t})\n\n\tt.Run(\"create-auth-key\", func(t *testing.T) {\n\t\t_, err := scenario.CreatePreAuthKey(1, true, false)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to create preauthkey: %s\", err)\n\t\t}\n\t})\n}\n\n// If subtests are parallel, then they will start before setup is run.\n// This might mean we approach setup slightly wrong, but for now, ignore\n// the linter\n// nolint:tparallel\nfunc TestTailscaleNodesJoiningHeadcale(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tvar err error\n\n\tuser := \"join-node-test\"\n\n\tcount := 1\n\n\tscenario, err := NewScenario(ScenarioSpec{})\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\tt.Run(\"start-headscale\", func(t *testing.T) {\n\t\theadscale, err := scenario.Headscale(hsic.WithTestName(\"scenariojoin\"))\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to create start headcale: %s\", err)\n\t\t}\n\n\t\terr = headscale.WaitForRunning()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"headscale failed to become ready: %s\", err)\n\t\t}\n\t})\n\n\tt.Run(\"create-user\", func(t *testing.T) {\n\t\t_, err := scenario.CreateUser(user)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to create user: %s\", err)\n\t\t}\n\n\t\tif _, ok := scenario.users[user]; !ok {\n\t\t\tt.Fatalf(\"user is not in scenario\")\n\t\t}\n\t})\n\n\tt.Run(\"create-tailscale\", func(t *testing.T) {\n\t\terr := scenario.CreateTailscaleNodesInUser(user, \"unstable\", count, tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]))\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to add tailscale nodes: %s\", err)\n\t\t}\n\n\t\tif clients := len(scenario.users[user].Clients); clients != count {\n\t\t\tt.Fatalf(\"wrong number of tailscale clients: %d != %d\", clients, count)\n\t\t}\n\t})\n\n\tt.Run(\"join-headscale\", func(t *testing.T) {\n\t\tkey, err := scenario.CreatePreAuthKey(1, true, false)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to create preauthkey: %s\", err)\n\t\t}\n\n\t\theadscale, err := scenario.Headscale()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to create start headcale: %s\", err)\n\t\t}\n\n\t\terr = scenario.RunTailscaleUp(\n\t\t\tuser,\n\t\t\theadscale.GetEndpoint(),\n\t\t\tkey.GetKey(),\n\t\t)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to login: %s\", err)\n\t\t}\n\t})\n\n\tt.Run(\"get-ips\", func(t *testing.T) {\n\t\tips, err := scenario.GetIPs(user)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to get tailscale ips: %s\", err)\n\t\t}\n\n\t\tif len(ips) != count*2 {\n\t\t\tt.Fatalf(\"got the wrong amount of tailscale ips, %d != %d\", len(ips), count*2)\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "integration/ssh_test.go",
    "content": "package integration\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net/url\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\tpolicyv2 \"github.com/juanfont/headscale/hscontrol/policy/v2\"\n\t\"github.com/juanfont/headscale/integration/dockertestutil\"\n\t\"github.com/juanfont/headscale/integration/hsic\"\n\t\"github.com/juanfont/headscale/integration/tsic\"\n\t\"github.com/oauth2-proxy/mockoidc\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/tailcfg\"\n)\n\nfunc isSSHNoAccessStdError(stderr string) bool {\n\treturn strings.Contains(stderr, \"Permission denied (tailscale)\") ||\n\t\t// Since https://github.com/tailscale/tailscale/pull/14853\n\t\tstrings.Contains(stderr, \"failed to evaluate SSH policy\") ||\n\t\t// Since https://github.com/tailscale/tailscale/pull/16127\n\t\t// Covers both \"to this node\" and \"as user <name>\" variants.\n\t\tstrings.Contains(stderr, \"tailnet policy does not permit you to SSH\")\n}\n\nfunc sshScenario(t *testing.T, policy *policyv2.Policy, testName string, clientsPerUser int) *Scenario {\n\tt.Helper()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: clientsPerUser,\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t}\n\tscenario, err := NewScenario(spec)\n\trequire.NoError(t, err)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{\n\t\t\ttsic.WithSSH(),\n\n\t\t\t// Alpine containers dont have ip6tables set up, which causes\n\t\t\t// tailscaled to stop configuring the wgengine, causing it\n\t\t\t// to not configure DNS.\n\t\t\ttsic.WithNetfilter(\"off\"),\n\t\t\ttsic.WithPackages(\"openssh\"),\n\t\t\ttsic.WithExtraCommands(\"adduser ssh-it-user\"),\n\t\t\ttsic.WithDockerWorkdir(\"/\"),\n\t\t},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(testName),\n\t)\n\trequire.NoError(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequire.NoError(t, err)\n\n\t_, err = scenario.ListTailscaleClientsFQDNs()\n\trequire.NoError(t, err)\n\n\treturn scenario\n}\n\nfunc TestSSHOneUserToAll(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tscenario := sshScenario(t,\n\t\t&policyv2.Policy{\n\t\t\tGroups: policyv2.Groups{\n\t\t\t\tpolicyv2.Group(\"group:integration-test\"): []policyv2.Username{policyv2.Username(\"user1@\")},\n\t\t\t},\n\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t{\n\t\t\t\t\tAction:   \"accept\",\n\t\t\t\t\tProtocol: \"tcp\",\n\t\t\t\t\tSources:  []policyv2.Alias{wildcard()},\n\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\taliasWithPorts(wildcard(), tailcfg.PortRangeAny),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSSHs: []policyv2.SSH{\n\t\t\t\t{\n\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\tSources: policyv2.SSHSrcAliases{groupp(\"group:integration-test\")},\n\t\t\t\t\t// Use autogroup:member and autogroup:tagged instead of wildcard\n\t\t\t\t\t// since wildcard (*) is no longer supported for SSH destinations\n\t\t\t\t\tDestinations: policyv2.SSHDstAliases{\n\t\t\t\t\t\tnew(policyv2.AutoGroupMember),\n\t\t\t\t\t\tnew(policyv2.AutoGroupTagged),\n\t\t\t\t\t},\n\t\t\t\t\tUsers: []policyv2.SSHUser{policyv2.SSHUser(\"ssh-it-user\")},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"ssh-onetoall\",\n\t\tlen(MustTestVersions),\n\t)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\tuser1Clients, err := scenario.ListTailscaleClients(\"user1\")\n\trequireNoErrListClients(t, err)\n\n\tuser2Clients, err := scenario.ListTailscaleClients(\"user2\")\n\trequireNoErrListClients(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t_, err = scenario.ListTailscaleClientsFQDNs()\n\trequireNoErrListFQDN(t, err)\n\n\tfor _, client := range user1Clients {\n\t\tfor _, peer := range allClients {\n\t\t\tif client.Hostname() == peer.Hostname() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tassertSSHHostname(t, client, peer)\n\t\t}\n\t}\n\n\tfor _, client := range user2Clients {\n\t\tfor _, peer := range allClients {\n\t\t\tif client.Hostname() == peer.Hostname() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tassertSSHPermissionDenied(t, client, peer)\n\t\t}\n\t}\n}\n\n// TestSSHMultipleUsersAllToAll tests that users in a group can SSH to each other's devices\n// using autogroup:self as the destination, which allows same-user SSH access.\nfunc TestSSHMultipleUsersAllToAll(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tscenario := sshScenario(t,\n\t\t&policyv2.Policy{\n\t\t\tGroups: policyv2.Groups{\n\t\t\t\tpolicyv2.Group(\"group:integration-test\"): []policyv2.Username{policyv2.Username(\"user1@\"), policyv2.Username(\"user2@\")},\n\t\t\t},\n\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t{\n\t\t\t\t\tAction:   \"accept\",\n\t\t\t\t\tProtocol: \"tcp\",\n\t\t\t\t\tSources:  []policyv2.Alias{wildcard()},\n\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\taliasWithPorts(wildcard(), tailcfg.PortRangeAny),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSSHs: []policyv2.SSH{\n\t\t\t\t{\n\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\tSources: policyv2.SSHSrcAliases{groupp(\"group:integration-test\")},\n\t\t\t\t\t// Use autogroup:self to allow users to SSH to their own devices.\n\t\t\t\t\t// Username destinations (e.g., \"user1@\") now require the source\n\t\t\t\t\t// to be that exact same user only. For group-to-group SSH access,\n\t\t\t\t\t// use autogroup:self instead.\n\t\t\t\t\tDestinations: policyv2.SSHDstAliases{new(policyv2.AutoGroupSelf)},\n\t\t\t\t\tUsers:        []policyv2.SSHUser{policyv2.SSHUser(\"ssh-it-user\")},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"ssh-multiall\",\n\t\tlen(MustTestVersions),\n\t)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\tnsOneClients, err := scenario.ListTailscaleClients(\"user1\")\n\trequireNoErrListClients(t, err)\n\n\tnsTwoClients, err := scenario.ListTailscaleClients(\"user2\")\n\trequireNoErrListClients(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t_, err = scenario.ListTailscaleClientsFQDNs()\n\trequireNoErrListFQDN(t, err)\n\n\t// With autogroup:self, users can SSH to their own devices, but not to other users' devices.\n\t// Test that user1's devices can SSH to each other\n\tfor _, client := range nsOneClients {\n\t\tfor _, peer := range nsOneClients {\n\t\t\tif client.Hostname() == peer.Hostname() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tassertSSHHostname(t, client, peer)\n\t\t}\n\t}\n\n\t// Test that user2's devices can SSH to each other\n\tfor _, client := range nsTwoClients {\n\t\tfor _, peer := range nsTwoClients {\n\t\t\tif client.Hostname() == peer.Hostname() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tassertSSHHostname(t, client, peer)\n\t\t}\n\t}\n\n\t// Test that user1 cannot SSH to user2's devices (autogroup:self only allows same-user)\n\tfor _, client := range nsOneClients {\n\t\tfor _, peer := range nsTwoClients {\n\t\t\tassertSSHPermissionDenied(t, client, peer)\n\t\t}\n\t}\n\n\t// Test that user2 cannot SSH to user1's devices (autogroup:self only allows same-user)\n\tfor _, client := range nsTwoClients {\n\t\tfor _, peer := range nsOneClients {\n\t\t\tassertSSHPermissionDenied(t, client, peer)\n\t\t}\n\t}\n}\n\nfunc TestSSHNoSSHConfigured(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tscenario := sshScenario(t,\n\t\t&policyv2.Policy{\n\t\t\tGroups: policyv2.Groups{\n\t\t\t\tpolicyv2.Group(\"group:integration-test\"): []policyv2.Username{policyv2.Username(\"user1@\")},\n\t\t\t},\n\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t{\n\t\t\t\t\tAction:   \"accept\",\n\t\t\t\t\tProtocol: \"tcp\",\n\t\t\t\t\tSources:  []policyv2.Alias{wildcard()},\n\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\taliasWithPorts(wildcard(), tailcfg.PortRangeAny),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSSHs: []policyv2.SSH{},\n\t\t},\n\t\t\"ssh-nosshcfg\",\n\t\tlen(MustTestVersions),\n\t)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t_, err = scenario.ListTailscaleClientsFQDNs()\n\trequireNoErrListFQDN(t, err)\n\n\tfor _, client := range allClients {\n\t\tfor _, peer := range allClients {\n\t\t\tif client.Hostname() == peer.Hostname() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tassertSSHPermissionDenied(t, client, peer)\n\t\t}\n\t}\n}\n\nfunc TestSSHIsBlockedInACL(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tscenario := sshScenario(t,\n\t\t&policyv2.Policy{\n\t\t\tGroups: policyv2.Groups{\n\t\t\t\tpolicyv2.Group(\"group:integration-test\"): []policyv2.Username{policyv2.Username(\"user1@\")},\n\t\t\t},\n\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t{\n\t\t\t\t\tAction:   \"accept\",\n\t\t\t\t\tProtocol: \"tcp\",\n\t\t\t\t\tSources:  []policyv2.Alias{wildcard()},\n\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\taliasWithPorts(wildcard(), tailcfg.PortRange{First: 80, Last: 80}),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSSHs: []policyv2.SSH{\n\t\t\t\t{\n\t\t\t\t\tAction:       \"accept\",\n\t\t\t\t\tSources:      policyv2.SSHSrcAliases{groupp(\"group:integration-test\")},\n\t\t\t\t\tDestinations: policyv2.SSHDstAliases{new(policyv2.AutoGroupSelf)},\n\t\t\t\t\tUsers:        []policyv2.SSHUser{policyv2.SSHUser(\"ssh-it-user\")},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"ssh-blocked\",\n\t\tlen(MustTestVersions),\n\t)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t_, err = scenario.ListTailscaleClientsFQDNs()\n\trequireNoErrListFQDN(t, err)\n\n\tfor _, client := range allClients {\n\t\tfor _, peer := range allClients {\n\t\t\tif client.Hostname() == peer.Hostname() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tassertSSHTimeout(t, client, peer)\n\t\t}\n\t}\n}\n\nfunc TestSSHUserOnlyIsolation(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tscenario := sshScenario(t,\n\t\t&policyv2.Policy{\n\t\t\tGroups: policyv2.Groups{\n\t\t\t\tpolicyv2.Group(\"group:ssh1\"): []policyv2.Username{policyv2.Username(\"user1@\")},\n\t\t\t\tpolicyv2.Group(\"group:ssh2\"): []policyv2.Username{policyv2.Username(\"user2@\")},\n\t\t\t},\n\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t{\n\t\t\t\t\tAction:   \"accept\",\n\t\t\t\t\tProtocol: \"tcp\",\n\t\t\t\t\tSources:  []policyv2.Alias{wildcard()},\n\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\taliasWithPorts(wildcard(), tailcfg.PortRangeAny),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSSHs: []policyv2.SSH{\n\t\t\t\t// Use autogroup:self to allow users in each group to SSH to their own devices.\n\t\t\t\t// Username destinations (e.g., \"user1@\") require the source to be that\n\t\t\t\t// exact same user only, not a group containing that user.\n\t\t\t\t{\n\t\t\t\t\tAction:       \"accept\",\n\t\t\t\t\tSources:      policyv2.SSHSrcAliases{groupp(\"group:ssh1\")},\n\t\t\t\t\tDestinations: policyv2.SSHDstAliases{new(policyv2.AutoGroupSelf)},\n\t\t\t\t\tUsers:        []policyv2.SSHUser{policyv2.SSHUser(\"ssh-it-user\")},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tAction:       \"accept\",\n\t\t\t\t\tSources:      policyv2.SSHSrcAliases{groupp(\"group:ssh2\")},\n\t\t\t\t\tDestinations: policyv2.SSHDstAliases{new(policyv2.AutoGroupSelf)},\n\t\t\t\t\tUsers:        []policyv2.SSHUser{policyv2.SSHUser(\"ssh-it-user\")},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"ssh-isolation\",\n\t\tlen(MustTestVersions),\n\t)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\tssh1Clients, err := scenario.ListTailscaleClients(\"user1\")\n\trequireNoErrListClients(t, err)\n\n\tssh2Clients, err := scenario.ListTailscaleClients(\"user2\")\n\trequireNoErrListClients(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t_, err = scenario.ListTailscaleClientsFQDNs()\n\trequireNoErrListFQDN(t, err)\n\n\tfor _, client := range ssh1Clients {\n\t\tfor _, peer := range ssh2Clients {\n\t\t\tif client.Hostname() == peer.Hostname() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tassertSSHPermissionDenied(t, client, peer)\n\t\t}\n\t}\n\n\tfor _, client := range ssh2Clients {\n\t\tfor _, peer := range ssh1Clients {\n\t\t\tif client.Hostname() == peer.Hostname() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tassertSSHPermissionDenied(t, client, peer)\n\t\t}\n\t}\n\n\tfor _, client := range ssh1Clients {\n\t\tfor _, peer := range ssh1Clients {\n\t\t\tif client.Hostname() == peer.Hostname() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tassertSSHHostname(t, client, peer)\n\t\t}\n\t}\n\n\tfor _, client := range ssh2Clients {\n\t\tfor _, peer := range ssh2Clients {\n\t\t\tif client.Hostname() == peer.Hostname() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tassertSSHHostname(t, client, peer)\n\t\t}\n\t}\n}\n\nfunc doSSH(t *testing.T, client TailscaleClient, peer TailscaleClient) (string, string, error) {\n\tt.Helper()\n\treturn doSSHWithRetry(t, client, peer, true)\n}\n\nfunc doSSHWithoutRetry(t *testing.T, client TailscaleClient, peer TailscaleClient) (string, string, error) {\n\tt.Helper()\n\treturn doSSHWithRetry(t, client, peer, false)\n}\n\nfunc doSSHWithRetry(t *testing.T, client TailscaleClient, peer TailscaleClient, retry bool) (string, string, error) {\n\tt.Helper()\n\n\treturn doSSHWithRetryAsUser(t, client, peer, \"ssh-it-user\", retry)\n}\n\nfunc doSSHWithRetryAsUser(\n\tt *testing.T,\n\tclient TailscaleClient,\n\tpeer TailscaleClient,\n\tsshUser string,\n\tretry bool,\n) (string, string, error) {\n\tt.Helper()\n\n\tpeerFQDN, _ := peer.FQDN()\n\n\tcommand := []string{\n\t\t\"/usr/bin/ssh\", \"-o StrictHostKeyChecking=no\", \"-o ConnectTimeout=1\",\n\t\tfmt.Sprintf(\"%s@%s\", sshUser, peerFQDN),\n\t\t\"'hostname'\",\n\t}\n\n\tlog.Printf(\"Running from %s to %s as %s\", client.Hostname(), peer.Hostname(), sshUser)\n\tlog.Printf(\"Command: %s\", strings.Join(command, \" \"))\n\n\tvar (\n\t\tresult, stderr string\n\t\terr            error\n\t)\n\n\tif retry {\n\t\t// Use assert.EventuallyWithT to retry SSH connections for success cases\n\t\tassert.EventuallyWithT(t, func(ct *assert.CollectT) {\n\t\t\tresult, stderr, err = client.Execute(command)\n\n\t\t\t// If we get a permission denied error, we can fail immediately\n\t\t\t// since that is something we won't recover from by retrying.\n\t\t\tif err != nil && isSSHNoAccessStdError(stderr) {\n\t\t\t\treturn // Don't retry permission denied errors\n\t\t\t}\n\n\t\t\t// For all other errors, assert no error to trigger retry\n\t\t\tassert.NoError(ct, err)\n\t\t}, 10*time.Second, 200*time.Millisecond)\n\t} else {\n\t\t// For failure cases, just execute once\n\t\tresult, stderr, err = client.Execute(command)\n\t}\n\n\treturn result, stderr, err\n}\n\nfunc assertSSHHostname(t *testing.T, client TailscaleClient, peer TailscaleClient) {\n\tt.Helper()\n\n\tresult, _, err := doSSH(t, client, peer)\n\trequire.NoError(t, err)\n\n\trequire.Contains(t, peer.ContainerID(), strings.ReplaceAll(result, \"\\n\", \"\"))\n}\n\nfunc assertSSHPermissionDenied(t *testing.T, client TailscaleClient, peer TailscaleClient) {\n\tt.Helper()\n\n\tresult, stderr, err := doSSHWithoutRetry(t, client, peer)\n\n\tassert.Empty(t, result)\n\n\tassertSSHNoAccessStdError(t, err, stderr)\n}\n\nfunc assertSSHTimeout(t *testing.T, client TailscaleClient, peer TailscaleClient) {\n\tt.Helper()\n\n\tresult, stderr, _ := doSSHWithoutRetry(t, client, peer)\n\n\tassert.Empty(t, result)\n\n\tif !strings.Contains(stderr, \"Connection timed out\") &&\n\t\t!strings.Contains(stderr, \"Operation timed out\") {\n\t\tt.Fatalf(\"connection did not time out\")\n\t}\n}\n\nfunc assertSSHNoAccessStdError(t *testing.T, err error, stderr string) {\n\tt.Helper()\n\trequire.Error(t, err)\n\n\tif !isSSHNoAccessStdError(stderr) {\n\t\tt.Errorf(\"expected stderr output suggesting access denied, got: %s\", stderr)\n\t}\n}\n\nfunc doSSHAsUser(t *testing.T, client TailscaleClient, peer TailscaleClient, sshUser string) (string, string, error) {\n\tt.Helper()\n\n\treturn doSSHWithRetryAsUser(t, client, peer, sshUser, true)\n}\n\nfunc assertSSHHostnameAsUser(t *testing.T, client TailscaleClient, peer TailscaleClient, sshUser string) {\n\tt.Helper()\n\n\tresult, _, err := doSSHAsUser(t, client, peer, sshUser)\n\trequire.NoError(t, err)\n\n\trequire.Contains(t, peer.ContainerID(), strings.ReplaceAll(result, \"\\n\", \"\"))\n}\n\nfunc assertSSHPermissionDeniedAsUser(t *testing.T, client TailscaleClient, peer TailscaleClient, sshUser string) {\n\tt.Helper()\n\n\tresult, stderr, err := doSSHWithRetryAsUser(t, client, peer, sshUser, false)\n\n\tassert.Empty(t, result)\n\n\tassertSSHNoAccessStdError(t, err, stderr)\n}\n\n// TestSSHAutogroupSelf tests that SSH with autogroup:self works correctly:\n// - Users can SSH to their own devices\n// - Users cannot SSH to other users' devices.\nfunc TestSSHAutogroupSelf(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tscenario := sshScenario(t,\n\t\t&policyv2.Policy{\n\t\t\tACLs: []policyv2.ACL{\n\t\t\t\t{\n\t\t\t\t\tAction:   \"accept\",\n\t\t\t\t\tProtocol: \"tcp\",\n\t\t\t\t\tSources:  []policyv2.Alias{wildcard()},\n\t\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\t\taliasWithPorts(wildcard(), tailcfg.PortRangeAny),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSSHs: []policyv2.SSH{\n\t\t\t\t{\n\t\t\t\t\tAction: \"accept\",\n\t\t\t\t\tSources: policyv2.SSHSrcAliases{\n\t\t\t\t\t\tnew(policyv2.AutoGroupMember),\n\t\t\t\t\t},\n\t\t\t\t\tDestinations: policyv2.SSHDstAliases{\n\t\t\t\t\t\tnew(policyv2.AutoGroupSelf),\n\t\t\t\t\t},\n\t\t\t\t\tUsers: []policyv2.SSHUser{policyv2.SSHUser(\"ssh-it-user\")},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"ssh-agself\",\n\t\t2, // 2 clients per user\n\t)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\tuser1Clients, err := scenario.ListTailscaleClients(\"user1\")\n\trequireNoErrListClients(t, err)\n\n\tuser2Clients, err := scenario.ListTailscaleClients(\"user2\")\n\trequireNoErrListClients(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t// Test that user1's devices can SSH to each other\n\tfor _, client := range user1Clients {\n\t\tfor _, peer := range user1Clients {\n\t\t\tif client.Hostname() == peer.Hostname() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tassertSSHHostname(t, client, peer)\n\t\t}\n\t}\n\n\t// Test that user2's devices can SSH to each other\n\tfor _, client := range user2Clients {\n\t\tfor _, peer := range user2Clients {\n\t\t\tif client.Hostname() == peer.Hostname() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tassertSSHHostname(t, client, peer)\n\t\t}\n\t}\n\n\t// Test that user1 cannot SSH to user2's devices\n\tfor _, client := range user1Clients {\n\t\tfor _, peer := range user2Clients {\n\t\t\tassertSSHPermissionDenied(t, client, peer)\n\t\t}\n\t}\n\n\t// Test that user2 cannot SSH to user1's devices\n\tfor _, client := range user2Clients {\n\t\tfor _, peer := range user1Clients {\n\t\t\tassertSSHPermissionDenied(t, client, peer)\n\t\t}\n\t}\n}\n\ntype sshCheckResult struct {\n\tstdout string\n\tstderr string\n\terr    error\n}\n\n// doSSHCheck runs SSH in a goroutine with a longer timeout, returning a channel\n// for the result. The SSH command will block while waiting for auth approval in\n// check mode.\nfunc doSSHCheck(\n\tt *testing.T,\n\tclient TailscaleClient,\n\tpeer TailscaleClient,\n) chan sshCheckResult {\n\tt.Helper()\n\n\tpeerFQDN, _ := peer.FQDN()\n\n\tcommand := []string{\n\t\t\"/usr/bin/ssh\", \"-o StrictHostKeyChecking=no\", \"-o ConnectTimeout=30\",\n\t\tfmt.Sprintf(\"%s@%s\", \"ssh-it-user\", peerFQDN),\n\t\t\"'hostname'\",\n\t}\n\n\tlog.Printf(\n\t\t\"[SSH check] Running from %s to %s\",\n\t\tclient.Hostname(),\n\t\tpeer.Hostname(),\n\t)\n\n\tch := make(chan sshCheckResult, 1)\n\n\tgo func() {\n\t\tstdout, stderr, err := client.Execute(\n\t\t\tcommand,\n\t\t\tdockertestutil.ExecuteCommandTimeout(60*time.Second),\n\t\t)\n\t\tch <- sshCheckResult{stdout, stderr, err}\n\t}()\n\n\treturn ch\n}\n\n// findSSHCheckAuthID polls headscale container logs for the SSH action auth-id.\n// The SSH action handler logs \"SSH action follow-up\" with the auth_id on the\n// follow-up request (where auth_id is non-empty).\nfunc findSSHCheckAuthID(t *testing.T, headscale ControlServer) string {\n\tt.Helper()\n\n\tvar authID string\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t_, stderr, err := headscale.ReadLog()\n\t\tassert.NoError(c, err)\n\n\t\tfor line := range strings.SplitSeq(stderr, \"\\n\") {\n\t\t\tif !strings.Contains(line, \"SSH action follow-up\") {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif idx := strings.Index(line, \"auth_id=\"); idx != -1 {\n\t\t\t\tstart := idx + len(\"auth_id=\")\n\n\t\t\t\tend := strings.IndexByte(line[start:], ' ')\n\t\t\t\tif end == -1 {\n\t\t\t\t\tend = len(line[start:])\n\t\t\t\t}\n\n\t\t\t\tauthID = line[start : start+end]\n\t\t\t}\n\t\t}\n\n\t\tassert.NotEmpty(c, authID, \"auth-id not found in headscale logs\")\n\t}, 10*time.Second, 500*time.Millisecond, \"waiting for SSH check auth-id in headscale logs\")\n\n\treturn authID\n}\n\n// sshCheckPolicy returns a policy with SSH \"check\" mode for group:integration-test\n// targeting autogroup:member and autogroup:tagged destinations.\nfunc sshCheckPolicy() *policyv2.Policy {\n\treturn &policyv2.Policy{\n\t\tGroups: policyv2.Groups{\n\t\t\tpolicyv2.Group(\"group:integration-test\"): []policyv2.Username{\n\t\t\t\tpolicyv2.Username(\"user1@\"),\n\t\t\t},\n\t\t},\n\t\tACLs: []policyv2.ACL{\n\t\t\t{\n\t\t\t\tAction:   \"accept\",\n\t\t\t\tProtocol: \"tcp\",\n\t\t\t\tSources:  []policyv2.Alias{wildcard()},\n\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\taliasWithPorts(wildcard(), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tSSHs: []policyv2.SSH{\n\t\t\t{\n\t\t\t\tAction:  \"check\",\n\t\t\t\tSources: policyv2.SSHSrcAliases{groupp(\"group:integration-test\")},\n\t\t\t\tDestinations: policyv2.SSHDstAliases{\n\t\t\t\t\tnew(policyv2.AutoGroupMember),\n\t\t\t\t\tnew(policyv2.AutoGroupTagged),\n\t\t\t\t},\n\t\t\t\tUsers: []policyv2.SSHUser{policyv2.SSHUser(\"ssh-it-user\")},\n\t\t\t},\n\t\t},\n\t}\n}\n\n// sshCheckPolicyWithPeriod returns a policy with SSH \"check\" mode and a\n// specified checkPeriod for session duration.\nfunc sshCheckPolicyWithPeriod(period time.Duration) *policyv2.Policy {\n\treturn &policyv2.Policy{\n\t\tGroups: policyv2.Groups{\n\t\t\tpolicyv2.Group(\"group:integration-test\"): []policyv2.Username{\n\t\t\t\tpolicyv2.Username(\"user1@\"),\n\t\t\t},\n\t\t},\n\t\tACLs: []policyv2.ACL{\n\t\t\t{\n\t\t\t\tAction:   \"accept\",\n\t\t\t\tProtocol: \"tcp\",\n\t\t\t\tSources:  []policyv2.Alias{wildcard()},\n\t\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\t\taliasWithPorts(wildcard(), tailcfg.PortRangeAny),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tSSHs: []policyv2.SSH{\n\t\t\t{\n\t\t\t\tAction:  \"check\",\n\t\t\t\tSources: policyv2.SSHSrcAliases{groupp(\"group:integration-test\")},\n\t\t\t\tDestinations: policyv2.SSHDstAliases{\n\t\t\t\t\tnew(policyv2.AutoGroupMember),\n\t\t\t\t\tnew(policyv2.AutoGroupTagged),\n\t\t\t\t},\n\t\t\t\tUsers:       []policyv2.SSHUser{policyv2.SSHUser(\"ssh-it-user\")},\n\t\t\t\tCheckPeriod: &policyv2.SSHCheckPeriod{Duration: period},\n\t\t\t},\n\t\t},\n\t}\n}\n\n// findNewSSHCheckAuthID polls headscale logs for an SSH check auth-id\n// that differs from excludeID. Used to verify re-authentication after\n// session expiry.\nfunc findNewSSHCheckAuthID(\n\tt *testing.T,\n\theadscale ControlServer,\n\texcludeID string,\n) string {\n\tt.Helper()\n\n\tvar authID string\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t_, stderr, err := headscale.ReadLog()\n\t\tassert.NoError(c, err)\n\n\t\tfor line := range strings.SplitSeq(stderr, \"\\n\") {\n\t\t\tif !strings.Contains(line, \"SSH action follow-up\") {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif idx := strings.Index(line, \"auth_id=\"); idx != -1 {\n\t\t\t\tstart := idx + len(\"auth_id=\")\n\n\t\t\t\tend := strings.IndexByte(line[start:], ' ')\n\t\t\t\tif end == -1 {\n\t\t\t\t\tend = len(line[start:])\n\t\t\t\t}\n\n\t\t\t\tid := line[start : start+end]\n\t\t\t\tif id != excludeID {\n\t\t\t\t\tauthID = id\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tassert.NotEmpty(c, authID, \"new auth-id not found in headscale logs\")\n\t}, 10*time.Second, 500*time.Millisecond, \"waiting for new SSH check auth-id\")\n\n\treturn authID\n}\n\nfunc TestSSHOneUserToOneCheckModeCLI(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tscenario := sshScenario(t, sshCheckPolicy(), \"ssh-checkcli\", 1)\n\t// defer scenario.ShutdownAssertNoPanics(t)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\tuser1Clients, err := scenario.ListTailscaleClients(\"user1\")\n\trequireNoErrListClients(t, err)\n\n\tuser2Clients, err := scenario.ListTailscaleClients(\"user2\")\n\trequireNoErrListClients(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t_, err = scenario.ListTailscaleClientsFQDNs()\n\trequireNoErrListFQDN(t, err)\n\n\t// user1 can SSH (via check) to all peers\n\tfor _, client := range user1Clients {\n\t\tfor _, peer := range allClients {\n\t\t\tif client.Hostname() == peer.Hostname() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Start SSH — will block waiting for check auth\n\t\t\tsshResult := doSSHCheck(t, client, peer)\n\n\t\t\t// Find the auth-id from headscale logs\n\t\t\tauthID := findSSHCheckAuthID(t, headscale)\n\n\t\t\t// Approve via CLI\n\t\t\t_, err := headscale.Execute(\n\t\t\t\t[]string{\n\t\t\t\t\t\"headscale\", \"auth\", \"approve\",\n\t\t\t\t\t\"--auth-id\", authID,\n\t\t\t\t},\n\t\t\t)\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Wait for SSH to complete\n\t\t\tselect {\n\t\t\tcase result := <-sshResult:\n\t\t\t\trequire.NoError(t, result.err)\n\t\t\t\trequire.Contains(\n\t\t\t\t\tt,\n\t\t\t\t\tpeer.ContainerID(),\n\t\t\t\t\tstrings.ReplaceAll(result.stdout, \"\\n\", \"\"),\n\t\t\t\t)\n\t\t\tcase <-time.After(30 * time.Second):\n\t\t\t\tt.Fatal(\"SSH did not complete after auth approval\")\n\t\t\t}\n\t\t}\n\t}\n\n\t// user2 cannot SSH — not in the check policy group\n\tfor _, client := range user2Clients {\n\t\tfor _, peer := range allClients {\n\t\t\tif client.Hostname() == peer.Hostname() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tassertSSHPermissionDenied(t, client, peer)\n\t\t}\n\t}\n}\n\nfunc TestSSHOneUserToOneCheckModeOIDC(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser:         1,\n\t\tUsers:                []string{\"user1\", \"user2\"},\n\t\tOIDCSkipUserCreation: true,\n\t\tOIDCUsers: []mockoidc.MockUser{\n\t\t\t// First 2: consumed during node registration\n\t\t\toidcMockUser(\"user1\", true),\n\t\t\toidcMockUser(\"user2\", true),\n\t\t\t// Extra: consumed during SSH check auth flows.\n\t\t\t// Each SSH check pops one user from the queue.\n\t\t\toidcMockUser(\"user1\", true),\n\t\t},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\trequire.NoError(t, err)\n\t// defer scenario.ShutdownAssertNoPanics(t)\n\n\toidcMap := map[string]string{\n\t\t\"HEADSCALE_OIDC_ISSUER\":             scenario.mockOIDC.Issuer(),\n\t\t\"HEADSCALE_OIDC_CLIENT_ID\":          scenario.mockOIDC.ClientID(),\n\t\t\"CREDENTIALS_DIRECTORY_TEST\":        \"/tmp\",\n\t\t\"HEADSCALE_OIDC_CLIENT_SECRET_PATH\": \"${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret\",\n\t}\n\n\terr = scenario.CreateHeadscaleEnvWithLoginURL(\n\t\t[]tsic.Option{\n\t\t\ttsic.WithSSH(),\n\t\t\ttsic.WithNetfilter(\"off\"),\n\t\t\ttsic.WithPackages(\"openssh\"),\n\t\t\ttsic.WithExtraCommands(\"adduser ssh-it-user\"),\n\t\t\ttsic.WithDockerWorkdir(\"/\"),\n\t\t},\n\t\thsic.WithACLPolicy(sshCheckPolicy()),\n\t\thsic.WithTestName(\"sshcheckoidc\"),\n\t\thsic.WithConfigEnv(oidcMap),\n\t\thsic.WithFileInContainer(\n\t\t\t\"/tmp/hs_client_oidc_secret\",\n\t\t\t[]byte(scenario.mockOIDC.ClientSecret()),\n\t\t),\n\t)\n\trequire.NoError(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\tuser1Clients, err := scenario.ListTailscaleClients(\"user1\")\n\trequireNoErrListClients(t, err)\n\n\tuser2Clients, err := scenario.ListTailscaleClients(\"user2\")\n\trequireNoErrListClients(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t_, err = scenario.ListTailscaleClientsFQDNs()\n\trequireNoErrListFQDN(t, err)\n\n\t// user1 can SSH (via check) to all peers\n\tfor _, client := range user1Clients {\n\t\tfor _, peer := range allClients {\n\t\t\tif client.Hostname() == peer.Hostname() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Start SSH — will block waiting for check auth\n\t\t\tsshResult := doSSHCheck(t, client, peer)\n\n\t\t\t// Find the auth-id from headscale logs\n\t\t\tauthID := findSSHCheckAuthID(t, headscale)\n\n\t\t\t// Build auth URL and visit it to trigger OIDC flow.\n\t\t\t// The mock OIDC server auto-authenticates from the user queue.\n\t\t\tauthURL := headscale.GetEndpoint() + \"/auth/\" + authID\n\t\t\tparsedURL, err := url.Parse(authURL)\n\t\t\trequire.NoError(t, err)\n\n\t\t\t_, err = doLoginURL(\"ssh-check-oidc\", parsedURL)\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Wait for SSH to complete\n\t\t\tselect {\n\t\t\tcase result := <-sshResult:\n\t\t\t\trequire.NoError(t, result.err)\n\t\t\t\trequire.Contains(\n\t\t\t\t\tt,\n\t\t\t\t\tpeer.ContainerID(),\n\t\t\t\t\tstrings.ReplaceAll(result.stdout, \"\\n\", \"\"),\n\t\t\t\t)\n\t\t\tcase <-time.After(30 * time.Second):\n\t\t\t\tt.Fatal(\"SSH did not complete after OIDC auth\")\n\t\t\t}\n\t\t}\n\t}\n\n\t// user2 cannot SSH — not in the check policy group\n\tfor _, client := range user2Clients {\n\t\tfor _, peer := range allClients {\n\t\t\tif client.Hostname() == peer.Hostname() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tassertSSHPermissionDenied(t, client, peer)\n\t\t}\n\t}\n}\n\n// TestSSHCheckModeUnapprovedTimeout verifies that SSH in check mode is rejected\n// when nobody approves the auth request and the registration cache entry expires.\nfunc TestSSHCheckModeUnapprovedTimeout(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 1,\n\t\tUsers:        []string{\"user1\", \"user2\"},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{\n\t\t\ttsic.WithSSH(),\n\t\t\ttsic.WithNetfilter(\"off\"),\n\t\t\ttsic.WithPackages(\"openssh\"),\n\t\t\ttsic.WithExtraCommands(\"adduser ssh-it-user\"),\n\t\t\ttsic.WithDockerWorkdir(\"/\"),\n\t\t},\n\t\thsic.WithACLPolicy(sshCheckPolicy()),\n\t\thsic.WithTestName(\"sshchecktimeout\"),\n\t\thsic.WithConfigEnv(map[string]string{\n\t\t\t\"HEADSCALE_TUNING_REGISTER_CACHE_EXPIRATION\": \"15s\",\n\t\t\t\"HEADSCALE_TUNING_REGISTER_CACHE_CLEANUP\":    \"5s\",\n\t\t}),\n\t)\n\trequire.NoError(t, err)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\tuser1Clients, err := scenario.ListTailscaleClients(\"user1\")\n\trequireNoErrListClients(t, err)\n\n\tuser2Clients, err := scenario.ListTailscaleClients(\"user2\")\n\trequireNoErrListClients(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t_, err = scenario.ListTailscaleClientsFQDNs()\n\trequireNoErrListFQDN(t, err)\n\n\t// user1 attempts SSH — enters check flow, but nobody approves\n\tfor _, client := range user1Clients {\n\t\tfor _, peer := range allClients {\n\t\t\tif client.Hostname() == peer.Hostname() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tsshResult := doSSHCheck(t, client, peer)\n\n\t\t\t// Confirm the check flow was entered\n\t\t\t_ = findSSHCheckAuthID(t, headscale)\n\n\t\t\t// Do NOT approve — wait for cache expiry and SSH rejection\n\t\t\tselect {\n\t\t\tcase result := <-sshResult:\n\t\t\t\trequire.Error(t, result.err, \"SSH should be rejected when unapproved\")\n\t\t\t\tassert.Empty(t, result.stdout, \"no command output expected on rejection\")\n\t\t\tcase <-time.After(60 * time.Second):\n\t\t\t\tt.Fatal(\"SSH did not complete after cache expiry timeout\")\n\t\t\t}\n\t\t}\n\t}\n\n\t// user2 still gets immediate Permission Denied\n\tfor _, client := range user2Clients {\n\t\tfor _, peer := range allClients {\n\t\t\tif client.Hostname() == peer.Hostname() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tassertSSHPermissionDenied(t, client, peer)\n\t\t}\n\t}\n}\n\n// TestSSHCheckModeCheckPeriodCLI verifies that after approval with a short\n// checkPeriod, the session expires and the next SSH connection requires\n// re-authentication via a new check flow.\nfunc TestSSHCheckModeCheckPeriodCLI(t *testing.T) {\n\tIntegrationSkip(t)\n\n\t// 1 minute is the documented minimum checkPeriod\n\tscenario := sshScenario(t, sshCheckPolicyWithPeriod(time.Minute), \"ssh-checkperiod\", 1)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\tuser1Clients, err := scenario.ListTailscaleClients(\"user1\")\n\trequireNoErrListClients(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t_, err = scenario.ListTailscaleClientsFQDNs()\n\trequireNoErrListFQDN(t, err)\n\n\t// === Phase 1: First SSH check — approve, verify success ===\n\tfor _, client := range user1Clients {\n\t\tfor _, peer := range allClients {\n\t\t\tif client.Hostname() == peer.Hostname() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tsshResult := doSSHCheck(t, client, peer)\n\t\t\tfirstAuthID := findSSHCheckAuthID(t, headscale)\n\n\t\t\t_, err := headscale.Execute(\n\t\t\t\t[]string{\n\t\t\t\t\t\"headscale\", \"auth\", \"approve\",\n\t\t\t\t\t\"--auth-id\", firstAuthID,\n\t\t\t\t},\n\t\t\t)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tselect {\n\t\t\tcase result := <-sshResult:\n\t\t\t\trequire.NoError(t, result.err, \"first SSH should succeed after approval\")\n\t\t\t\trequire.Contains(\n\t\t\t\t\tt,\n\t\t\t\t\tpeer.ContainerID(),\n\t\t\t\t\tstrings.ReplaceAll(result.stdout, \"\\n\", \"\"),\n\t\t\t\t)\n\t\t\tcase <-time.After(30 * time.Second):\n\t\t\t\tt.Fatal(\"first SSH did not complete after auth approval\")\n\t\t\t}\n\n\t\t\t// === Phase 2: Wait for checkPeriod to expire ===\n\t\t\t//nolint:forbidigo // Intentional sleep: waiting for the check period session\n\t\t\t// to expire. This is a time-based expiry, not a pollable condition — the\n\t\t\t// Tailscale client caches the approval for SessionDuration and only\n\t\t\t// re-triggers the check flow after it elapses.\n\t\t\ttime.Sleep(70 * time.Second)\n\n\t\t\t// === Phase 3: Second SSH — must re-authenticate ===\n\t\t\tsshResult2 := doSSHCheck(t, client, peer)\n\t\t\tsecondAuthID := findNewSSHCheckAuthID(t, headscale, firstAuthID)\n\n\t\t\trequire.NotEqual(\n\t\t\t\tt,\n\t\t\t\tfirstAuthID,\n\t\t\t\tsecondAuthID,\n\t\t\t\t\"second SSH should trigger a new auth flow after checkPeriod expiry\",\n\t\t\t)\n\n\t\t\t_, err = headscale.Execute(\n\t\t\t\t[]string{\n\t\t\t\t\t\"headscale\", \"auth\", \"approve\",\n\t\t\t\t\t\"--auth-id\", secondAuthID,\n\t\t\t\t},\n\t\t\t)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tselect {\n\t\t\tcase result := <-sshResult2:\n\t\t\t\trequire.NoError(t, result.err, \"second SSH should succeed after re-approval\")\n\t\t\t\trequire.Contains(\n\t\t\t\t\tt,\n\t\t\t\t\tpeer.ContainerID(),\n\t\t\t\t\tstrings.ReplaceAll(result.stdout, \"\\n\", \"\"),\n\t\t\t\t)\n\t\t\tcase <-time.After(30 * time.Second):\n\t\t\t\tt.Fatal(\"second SSH did not complete after re-auth approval\")\n\t\t\t}\n\t\t}\n\t}\n}\n\n// TestSSHCheckModeAutoApprove verifies that after SSH check approval, a second\n// SSH within the checkPeriod is auto-approved without requiring manual approval.\nfunc TestSSHCheckModeAutoApprove(t *testing.T) {\n\tIntegrationSkip(t)\n\n\t// 5 minute checkPeriod — long enough not to expire during test\n\tscenario := sshScenario(t, sshCheckPolicyWithPeriod(5*time.Minute), \"ssh-autoapprove\", 1)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\tuser1Clients, err := scenario.ListTailscaleClients(\"user1\")\n\trequireNoErrListClients(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t_, err = scenario.ListTailscaleClientsFQDNs()\n\trequireNoErrListFQDN(t, err)\n\n\t// === Phase 1: First SSH check — approve, verify success ===\n\tfor _, client := range user1Clients {\n\t\tfor _, peer := range allClients {\n\t\t\tif client.Hostname() == peer.Hostname() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tsshResult := doSSHCheck(t, client, peer)\n\t\t\tfirstAuthID := findSSHCheckAuthID(t, headscale)\n\n\t\t\t_, err := headscale.Execute(\n\t\t\t\t[]string{\n\t\t\t\t\t\"headscale\", \"auth\", \"approve\",\n\t\t\t\t\t\"--auth-id\", firstAuthID,\n\t\t\t\t},\n\t\t\t)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tselect {\n\t\t\tcase result := <-sshResult:\n\t\t\t\trequire.NoError(t, result.err, \"first SSH should succeed after approval\")\n\t\t\t\trequire.Contains(\n\t\t\t\t\tt,\n\t\t\t\t\tpeer.ContainerID(),\n\t\t\t\t\tstrings.ReplaceAll(result.stdout, \"\\n\", \"\"),\n\t\t\t\t)\n\t\t\tcase <-time.After(30 * time.Second):\n\t\t\t\tt.Fatal(\"first SSH did not complete after auth approval\")\n\t\t\t}\n\n\t\t\t// === Phase 2: Immediate retry — should auto-approve ===\n\t\t\tresult, _, err := doSSH(t, client, peer)\n\t\t\trequire.NoError(t, err, \"second SSH should auto-approve without manual auth\")\n\t\t\trequire.Contains(\n\t\t\t\tt,\n\t\t\t\tpeer.ContainerID(),\n\t\t\t\tstrings.ReplaceAll(result, \"\\n\", \"\"),\n\t\t\t)\n\t\t}\n\t}\n}\n\n// TestSSHCheckModeNegativeCLI verifies that `headscale auth reject`\n// properly denies an SSH check.\nfunc TestSSHCheckModeNegativeCLI(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tscenario := sshScenario(t, sshCheckPolicy(), \"ssh-negcli\", 1)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\tallClients, err := scenario.ListTailscaleClients()\n\trequireNoErrListClients(t, err)\n\n\tuser1Clients, err := scenario.ListTailscaleClients(\"user1\")\n\trequireNoErrListClients(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequire.NoError(t, err)\n\n\terr = scenario.WaitForTailscaleSync()\n\trequireNoErrSync(t, err)\n\n\t_, err = scenario.ListTailscaleClientsFQDNs()\n\trequireNoErrListFQDN(t, err)\n\n\tfor _, client := range user1Clients {\n\t\tfor _, peer := range allClients {\n\t\t\tif client.Hostname() == peer.Hostname() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tsshResult := doSSHCheck(t, client, peer)\n\t\t\tauthID := findSSHCheckAuthID(t, headscale)\n\n\t\t\t// Reject via CLI\n\t\t\t_, err := headscale.Execute(\n\t\t\t\t[]string{\n\t\t\t\t\t\"headscale\", \"auth\", \"reject\",\n\t\t\t\t\t\"--auth-id\", authID,\n\t\t\t\t},\n\t\t\t)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tselect {\n\t\t\tcase result := <-sshResult:\n\t\t\t\trequire.Error(t, result.err, \"SSH should be rejected\")\n\t\t\t\tassert.Empty(t, result.stdout, \"no command output expected on rejection\")\n\t\t\tcase <-time.After(30 * time.Second):\n\t\t\t\tt.Fatal(\"SSH did not complete after auth rejection\")\n\t\t\t}\n\t\t}\n\t}\n}\n\n// TestSSHLocalpart tests that SSH with localpart:*@<domain> works correctly.\n// localpart maps the local-part of each user's OIDC email to an OS user,\n// so user1@headscale.net can SSH as local user \"user1\".\n// This requires OIDC login so that users have real email addresses.\nfunc TestSSHLocalpart(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tbaseACLs := []policyv2.ACL{\n\t\t{\n\t\t\tAction:   \"accept\",\n\t\t\tProtocol: \"tcp\",\n\t\t\tSources:  []policyv2.Alias{wildcard()},\n\t\t\tDestinations: []policyv2.AliasWithPorts{\n\t\t\t\taliasWithPorts(wildcard(), tailcfg.PortRangeAny),\n\t\t\t},\n\t\t},\n\t}\n\n\ttests := []struct {\n\t\tname   string\n\t\tpolicy *policyv2.Policy\n\t\ttestFn func(t *testing.T, scenario *Scenario)\n\t}{\n\t\t{\n\t\t\tname: \"MemberAndTagged\",\n\t\t\tpolicy: &policyv2.Policy{\n\t\t\t\tACLs: baseACLs,\n\t\t\t\tSSHs: []policyv2.SSH{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: policyv2.SSHSrcAliases{new(policyv2.AutoGroupMember)},\n\t\t\t\t\t\tDestinations: policyv2.SSHDstAliases{\n\t\t\t\t\t\t\tnew(policyv2.AutoGroupMember),\n\t\t\t\t\t\t\tnew(policyv2.AutoGroupTagged),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tUsers: []policyv2.SSHUser{\"localpart:*@headscale.net\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\ttestFn: func(t *testing.T, scenario *Scenario) {\n\t\t\t\tt.Helper()\n\n\t\t\t\tuser1Clients, err := scenario.ListTailscaleClients(\"user1\")\n\t\t\t\trequireNoErrListClients(t, err)\n\n\t\t\t\tuser2Clients, err := scenario.ListTailscaleClients(\"user2\")\n\t\t\t\trequireNoErrListClients(t, err)\n\n\t\t\t\t// user1 can SSH to user2's nodes as \"user1\" (localpart of user1@headscale.net)\n\t\t\t\tfor _, client := range user1Clients {\n\t\t\t\t\tfor _, peer := range user2Clients {\n\t\t\t\t\t\tassertSSHHostnameAsUser(t, client, peer, \"user1\")\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// user2 can SSH to user1's nodes as \"user2\" (localpart of user2@headscale.net)\n\t\t\t\tfor _, client := range user2Clients {\n\t\t\t\t\tfor _, peer := range user1Clients {\n\t\t\t\t\t\tassertSSHHostnameAsUser(t, client, peer, \"user2\")\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// user1 CANNOT SSH as \"user2\" — no rule maps user1's IPs to user2\n\t\t\t\tfor _, client := range user1Clients {\n\t\t\t\t\tfor _, peer := range user2Clients {\n\t\t\t\t\t\tassertSSHPermissionDeniedAsUser(t, client, peer, \"user2\")\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// user2 CANNOT SSH as \"user1\" — no rule maps user2's IPs to user1\n\t\t\t\tfor _, client := range user2Clients {\n\t\t\t\t\tfor _, peer := range user1Clients {\n\t\t\t\t\t\tassertSSHPermissionDeniedAsUser(t, client, peer, \"user1\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"AutogroupSelf\",\n\t\t\tpolicy: &policyv2.Policy{\n\t\t\t\tACLs: baseACLs,\n\t\t\t\tSSHs: []policyv2.SSH{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:       \"accept\",\n\t\t\t\t\t\tSources:      policyv2.SSHSrcAliases{new(policyv2.AutoGroupMember)},\n\t\t\t\t\t\tDestinations: policyv2.SSHDstAliases{new(policyv2.AutoGroupSelf)},\n\t\t\t\t\t\tUsers:        []policyv2.SSHUser{\"localpart:*@headscale.net\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\ttestFn: func(t *testing.T, scenario *Scenario) {\n\t\t\t\tt.Helper()\n\n\t\t\t\tuser1Clients, err := scenario.ListTailscaleClients(\"user1\")\n\t\t\t\trequireNoErrListClients(t, err)\n\n\t\t\t\tuser2Clients, err := scenario.ListTailscaleClients(\"user2\")\n\t\t\t\trequireNoErrListClients(t, err)\n\n\t\t\t\t// With autogroup:self, cross-user SSH should be denied regardless of localpart.\n\t\t\t\t// user1 cannot SSH to user2's nodes as \"user1\"\n\t\t\t\tfor _, client := range user1Clients {\n\t\t\t\t\tfor _, peer := range user2Clients {\n\t\t\t\t\t\tassertSSHPermissionDeniedAsUser(t, client, peer, \"user1\")\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// user2 cannot SSH to user1's nodes as \"user2\"\n\t\t\t\tfor _, client := range user2Clients {\n\t\t\t\t\tfor _, peer := range user1Clients {\n\t\t\t\t\t\tassertSSHPermissionDeniedAsUser(t, client, peer, \"user2\")\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// user1 also cannot SSH to user2's nodes as \"user2\"\n\t\t\t\tfor _, client := range user1Clients {\n\t\t\t\t\tfor _, peer := range user2Clients {\n\t\t\t\t\t\tassertSSHPermissionDeniedAsUser(t, client, peer, \"user2\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"LocalpartPlusRoot\",\n\t\t\tpolicy: &policyv2.Policy{\n\t\t\t\tACLs: baseACLs,\n\t\t\t\tSSHs: []policyv2.SSH{\n\t\t\t\t\t{\n\t\t\t\t\t\tAction:  \"accept\",\n\t\t\t\t\t\tSources: policyv2.SSHSrcAliases{new(policyv2.AutoGroupMember)},\n\t\t\t\t\t\tDestinations: policyv2.SSHDstAliases{\n\t\t\t\t\t\t\tnew(policyv2.AutoGroupMember),\n\t\t\t\t\t\t\tnew(policyv2.AutoGroupTagged),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tUsers: []policyv2.SSHUser{\n\t\t\t\t\t\t\t\"localpart:*@headscale.net\",\n\t\t\t\t\t\t\t\"root\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\ttestFn: func(t *testing.T, scenario *Scenario) {\n\t\t\t\tt.Helper()\n\n\t\t\t\tuser1Clients, err := scenario.ListTailscaleClients(\"user1\")\n\t\t\t\trequireNoErrListClients(t, err)\n\n\t\t\t\tuser2Clients, err := scenario.ListTailscaleClients(\"user2\")\n\t\t\t\trequireNoErrListClients(t, err)\n\n\t\t\t\t// localpart works: user1 can SSH to user2's nodes as \"user1\"\n\t\t\t\tfor _, client := range user1Clients {\n\t\t\t\t\tfor _, peer := range user2Clients {\n\t\t\t\t\t\tassertSSHHostnameAsUser(t, client, peer, \"user1\")\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// root also works: user1 can SSH to user2's nodes as \"root\"\n\t\t\t\tfor _, client := range user1Clients {\n\t\t\t\t\tfor _, peer := range user2Clients {\n\t\t\t\t\t\tassertSSHHostnameAsUser(t, client, peer, \"root\")\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// user2 can SSH as \"user2\" (localpart)\n\t\t\t\tfor _, client := range user2Clients {\n\t\t\t\t\tfor _, peer := range user1Clients {\n\t\t\t\t\t\tassertSSHHostnameAsUser(t, client, peer, \"user2\")\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// user2 can SSH as \"root\"\n\t\t\t\tfor _, client := range user2Clients {\n\t\t\t\t\tfor _, peer := range user1Clients {\n\t\t\t\t\t\tassertSSHHostnameAsUser(t, client, peer, \"root\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tspec := ScenarioSpec{\n\t\t\t\tNodesPerUser: 1,\n\t\t\t\tUsers:        []string{\"user1\", \"user2\"},\n\t\t\t\tOIDCUsers: []mockoidc.MockUser{\n\t\t\t\t\toidcMockUser(\"user1\", true),\n\t\t\t\t\toidcMockUser(\"user2\", true),\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tscenario, err := NewScenario(spec)\n\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\t\t\toidcMap := map[string]string{\n\t\t\t\t\"HEADSCALE_OIDC_ISSUER\":             scenario.mockOIDC.Issuer(),\n\t\t\t\t\"HEADSCALE_OIDC_CLIENT_ID\":          scenario.mockOIDC.ClientID(),\n\t\t\t\t\"CREDENTIALS_DIRECTORY_TEST\":        \"/tmp\",\n\t\t\t\t\"HEADSCALE_OIDC_CLIENT_SECRET_PATH\": \"${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret\",\n\t\t\t}\n\n\t\t\terr = scenario.CreateHeadscaleEnvWithLoginURL(\n\t\t\t\t[]tsic.Option{\n\t\t\t\t\ttsic.WithSSH(),\n\t\t\t\t\ttsic.WithNetfilter(\"off\"),\n\t\t\t\t\ttsic.WithPackages(\"openssh\"),\n\t\t\t\t\ttsic.WithExtraCommands(\"adduser user1\", \"adduser user2\"),\n\t\t\t\t\ttsic.WithDockerWorkdir(\"/\"),\n\t\t\t\t},\n\t\t\t\thsic.WithTestName(\"sshlocalpart\"),\n\t\t\t\thsic.WithACLPolicy(tt.policy),\n\t\t\t\thsic.WithConfigEnv(oidcMap),\n\t\t\t\thsic.WithFileInContainer(\"/tmp/hs_client_oidc_secret\", []byte(scenario.mockOIDC.ClientSecret())),\n\t\t\t)\n\t\t\trequireNoErrHeadscaleEnv(t, err)\n\n\t\t\terr = scenario.WaitForTailscaleSync()\n\t\t\trequireNoErrSync(t, err)\n\n\t\t\t_, err = scenario.ListTailscaleClientsFQDNs()\n\t\t\trequireNoErrListFQDN(t, err)\n\n\t\t\ttt.testFn(t, scenario)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "integration/tags_test.go",
    "content": "package integration\n\nimport (\n\t\"sort\"\n\t\"testing\"\n\t\"time\"\n\n\tv1 \"github.com/juanfont/headscale/gen/go/headscale/v1\"\n\tpolicyv2 \"github.com/juanfont/headscale/hscontrol/policy/v2\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/juanfont/headscale/integration/hsic\"\n\t\"github.com/juanfont/headscale/integration/tsic\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"tailscale.com/tailcfg\"\n)\n\nconst tagTestUser = \"taguser\"\n\n// =============================================================================\n// Helper Functions\n// =============================================================================\n\n// tagsTestPolicy creates a policy for tag tests with:\n// - tag:valid-owned: owned by the specified user\n// - tag:second: owned by the specified user\n// - tag:valid-unowned: owned by \"other-user\" (not the test user)\n// - tag:nonexistent is deliberately NOT defined.\nfunc tagsTestPolicy() *policyv2.Policy {\n\treturn &policyv2.Policy{\n\t\tTagOwners: policyv2.TagOwners{\n\t\t\t\"tag:valid-owned\":   policyv2.Owners{new(policyv2.Username(tagTestUser + \"@\"))},\n\t\t\t\"tag:second\":        policyv2.Owners{new(policyv2.Username(tagTestUser + \"@\"))},\n\t\t\t\"tag:valid-unowned\": policyv2.Owners{new(policyv2.Username(\"other-user@\"))},\n\t\t\t// Note: tag:nonexistent deliberately NOT defined\n\t\t},\n\t\tACLs: []policyv2.ACL{\n\t\t\t{\n\t\t\t\tAction:       \"accept\",\n\t\t\t\tSources:      []policyv2.Alias{policyv2.Wildcard},\n\t\t\t\tDestinations: []policyv2.AliasWithPorts{{Alias: policyv2.Wildcard, Ports: []tailcfg.PortRange{tailcfg.PortRangeAny}}},\n\t\t\t},\n\t\t},\n\t}\n}\n\n// tagsEqual compares two tag slices as unordered sets.\nfunc tagsEqual(actual, expected []string) bool {\n\tif len(actual) != len(expected) {\n\t\treturn false\n\t}\n\n\tsortedActual := append([]string{}, actual...)\n\tsortedExpected := append([]string{}, expected...)\n\n\tsort.Strings(sortedActual)\n\tsort.Strings(sortedExpected)\n\n\tfor i := range sortedActual {\n\t\tif sortedActual[i] != sortedExpected[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\n// assertNodeHasTagsWithCollect asserts that a node has exactly the expected tags (order-independent).\nfunc assertNodeHasTagsWithCollect(c *assert.CollectT, node *v1.Node, expectedTags []string) {\n\tactualTags := node.GetTags()\n\tsortedActual := append([]string{}, actualTags...)\n\tsortedExpected := append([]string{}, expectedTags...)\n\n\tsort.Strings(sortedActual)\n\tsort.Strings(sortedExpected)\n\tassert.Equal(c, sortedExpected, sortedActual, \"Node %s tags mismatch\", node.GetName())\n}\n\n// assertNodeHasNoTagsWithCollect asserts that a node has no tags.\nfunc assertNodeHasNoTagsWithCollect(c *assert.CollectT, node *v1.Node) {\n\tassert.Empty(c, node.GetTags(), \"Node %s should have no tags, but has: %v\", node.GetName(), node.GetTags())\n}\n\n// assertNodeSelfHasTagsWithCollect asserts that a client's self view has exactly the expected tags.\n// This validates that tag updates have propagated to the node's own status (issue #2978).\nfunc assertNodeSelfHasTagsWithCollect(c *assert.CollectT, client TailscaleClient, expectedTags []string) {\n\tstatus, err := client.Status()\n\t//nolint:testifylint // must use assert with CollectT in EventuallyWithT\n\tassert.NoError(c, err, \"failed to get client status\")\n\n\tif status == nil || status.Self == nil {\n\t\tassert.Fail(c, \"client status or self is nil\")\n\t\treturn\n\t}\n\n\tvar actualTagsSlice []string\n\n\tif status.Self.Tags != nil {\n\t\tfor _, tag := range status.Self.Tags.All() {\n\t\t\tactualTagsSlice = append(actualTagsSlice, tag)\n\t\t}\n\t}\n\n\tsortedActual := append([]string{}, actualTagsSlice...)\n\tsortedExpected := append([]string{}, expectedTags...)\n\n\tsort.Strings(sortedActual)\n\tsort.Strings(sortedExpected)\n\tassert.Equal(c, sortedExpected, sortedActual, \"Client %s self tags mismatch\", client.Hostname())\n}\n\n// =============================================================================\n// Test Suite 2: Auth Key WITH Pre-assigned Tags\n// =============================================================================\n\n// TestTagsAuthKeyWithTagRequestDifferentTag tests that requesting a different tag\n// than what the auth key provides results in registration failure.\n//\n// Test 2.1: Request different tag than key provides\n// Setup: Run `tailscale up --advertise-tags=\"tag:second\" --auth-key AUTH_KEY_WITH_TAG`\n// Expected: Registration fails with error containing \"requested tags [tag:second] are invalid or not permitted\".\nfunc TestTagsAuthKeyWithTagRequestDifferentTag(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0, // We'll create the node manually\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-authkey-diff\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\tuserMap, err := headscale.MapUsers()\n\trequire.NoError(t, err)\n\n\tuserID := userMap[tagTestUser].GetId()\n\n\t// Create a tagged PreAuthKey with tag:valid-owned\n\tauthKey, err := scenario.CreatePreAuthKeyWithTags(userID, false, false, []string{\"tag:valid-owned\"})\n\trequire.NoError(t, err)\n\tt.Logf(\"Created tagged PreAuthKey with tags: %v\", authKey.GetAclTags())\n\n\t// Create a tailscale client that will try to use --advertise-tags with a DIFFERENT tag\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t\ttsic.WithExtraLoginArgs([]string{\"--advertise-tags=tag:second\"}),\n\t)\n\trequire.NoError(t, err)\n\n\t// Login should fail because the advertised tags don't match the auth key's tags\n\terr = client.Login(headscale.GetEndpoint(), authKey.GetKey())\n\n\t// Document actual behavior - we expect this to fail\n\tif err != nil {\n\t\tt.Logf(\"Test 2.1 PASS: Registration correctly rejected with error: %v\", err)\n\t\tassert.ErrorContains(t, err, \"requested tags\")\n\t} else {\n\t\t// If it succeeded, document this unexpected behavior\n\t\tt.Logf(\"Test 2.1 UNEXPECTED: Registration succeeded when it should have failed\")\n\n\t\t// Check what tags the node actually has\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\tnodes, err := headscale.ListNodes()\n\t\t\tassert.NoError(c, err)\n\n\t\t\tif len(nodes) == 1 {\n\t\t\t\tt.Logf(\"Node registered with tags: %v (expected rejection)\", nodes[0].GetTags())\n\t\t\t}\n\t\t}, 10*time.Second, 500*time.Millisecond, \"checking node state\")\n\n\t\tt.Fail()\n\t}\n}\n\n// TestTagsAuthKeyWithTagNoAdvertiseFlag tests that registering with a tagged auth key\n// but no --advertise-tags flag results in the node inheriting the key's tags.\n//\n// Test 2.2: Register with no advertise-tags flag\n// Setup: Run `tailscale up --auth-key AUTH_KEY_WITH_TAG` (no --advertise-tags)\n// Expected: Registration succeeds, node has [\"tag:valid-owned\"] (inherited from key).\nfunc TestTagsAuthKeyWithTagNoAdvertiseFlag(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-authkey-inherit\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\tuserMap, err := headscale.MapUsers()\n\trequire.NoError(t, err)\n\n\tuserID := userMap[tagTestUser].GetId()\n\n\t// Create a tagged PreAuthKey with tag:valid-owned\n\tauthKey, err := scenario.CreatePreAuthKeyWithTags(userID, false, false, []string{\"tag:valid-owned\"})\n\trequire.NoError(t, err)\n\tt.Logf(\"Created tagged PreAuthKey with tags: %v\", authKey.GetAclTags())\n\n\t// Create a tailscale client WITHOUT --advertise-tags\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t\t// Note: NO WithExtraLoginArgs for --advertise-tags\n\t)\n\trequire.NoError(t, err)\n\n\t// Login with the tagged PreAuthKey\n\terr = client.Login(headscale.GetEndpoint(), authKey.GetKey())\n\trequire.NoError(t, err)\n\n\t// Wait for node to be registered and verify it has the key's tags\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1, \"Should have exactly 1 node\")\n\n\t\tif len(nodes) == 1 {\n\t\t\tnode := nodes[0]\n\t\t\tt.Logf(\"Node registered with tags: %v\", node.GetTags())\n\t\t\tassertNodeHasTagsWithCollect(c, node, []string{\"tag:valid-owned\"})\n\t\t}\n\t}, 30*time.Second, 500*time.Millisecond, \"verifying node inherited tags from auth key\")\n\n\tt.Logf(\"Test 2.2 completed - node inherited tags from auth key\")\n}\n\n// TestTagsAuthKeyWithTagCannotAddViaCLI tests that nodes registered with a tagged auth key\n// cannot add additional tags via the client CLI.\n//\n// Test 2.3: Cannot add tags via CLI after registration\n// Setup:\n//  1. Register with --auth-key AUTH_KEY_WITH_TAG\n//  2. Run `tailscale up --advertise-tags=\"tag:valid-owned,tag:second\" --auth-key AUTH_KEY_WITH_TAG`\n//\n// Expected: Command fails with error containing \"requested tags [tag:second] are invalid or not permitted\".\nfunc TestTagsAuthKeyWithTagCannotAddViaCLI(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-authkey-noadd\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\tuserMap, err := headscale.MapUsers()\n\trequire.NoError(t, err)\n\n\tuserID := userMap[tagTestUser].GetId()\n\n\t// Create a tagged PreAuthKey with tag:valid-owned\n\tauthKey, err := scenario.CreatePreAuthKeyWithTags(userID, false, false, []string{\"tag:valid-owned\"})\n\trequire.NoError(t, err)\n\n\t// Create and register a tailscale client\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t)\n\trequire.NoError(t, err)\n\n\t// Initial login\n\terr = client.Login(headscale.GetEndpoint(), authKey.GetKey())\n\trequire.NoError(t, err)\n\n\t// Wait for initial registration\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1)\n\n\t\tif len(nodes) == 1 {\n\t\t\tassertNodeHasTagsWithCollect(c, nodes[0], []string{\"tag:valid-owned\"})\n\t\t}\n\t}, 30*time.Second, 500*time.Millisecond, \"waiting for initial registration\")\n\n\tt.Logf(\"Node registered with tag:valid-owned, now attempting to add tag:second via CLI\")\n\n\t// Attempt to add additional tags via tailscale up\n\tcommand := []string{\n\t\t\"tailscale\", \"up\",\n\t\t\"--login-server=\" + headscale.GetEndpoint(),\n\t\t\"--authkey=\" + authKey.GetKey(),\n\t\t\"--advertise-tags=tag:valid-owned,tag:second\",\n\t}\n\t_, stderr, err := client.Execute(command)\n\n\t// Document actual behavior\n\tif err != nil {\n\t\tt.Logf(\"Test 2.3 PASS: CLI correctly rejected adding tags: %v, stderr: %s\", err, stderr)\n\t} else {\n\t\tt.Logf(\"Test 2.3: CLI command succeeded, checking if tags actually changed\")\n\n\t\t// Check if tags actually changed\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\tnodes, err := headscale.ListNodes()\n\t\t\tassert.NoError(c, err)\n\n\t\t\tif len(nodes) == 1 {\n\t\t\t\t// If still only has original tag, that's the expected behavior\n\t\t\t\tif tagsEqual(nodes[0].GetTags(), []string{\"tag:valid-owned\"}) {\n\t\t\t\t\tt.Logf(\"Test 2.3 PASS: Tags unchanged after CLI attempt: %v\", nodes[0].GetTags())\n\t\t\t\t} else {\n\t\t\t\t\tt.Logf(\"Test 2.3 FAIL: Tags changed unexpectedly to: %v\", nodes[0].GetTags())\n\t\t\t\t\tassert.Fail(c, \"Tags should not have changed\")\n\t\t\t\t}\n\t\t\t}\n\t\t}, 10*time.Second, 500*time.Millisecond, \"verifying tags unchanged\")\n\t}\n}\n\n// TestTagsAuthKeyWithTagCannotChangeViaCLI tests that nodes registered with a tagged auth key\n// cannot change to a completely different tag set via the client CLI.\n//\n// Test 2.4: Cannot change to different tag set via CLI\n// Setup:\n//  1. Register with --auth-key AUTH_KEY_WITH_TAG\n//  2. Run `tailscale up --advertise-tags=\"tag:second\" --auth-key AUTH_KEY_WITH_TAG`\n//\n// Expected: Command fails, tags remain [\"tag:valid-owned\"].\nfunc TestTagsAuthKeyWithTagCannotChangeViaCLI(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-authkey-nochange\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\tuserMap, err := headscale.MapUsers()\n\trequire.NoError(t, err)\n\n\tuserID := userMap[tagTestUser].GetId()\n\n\t// Create a tagged PreAuthKey with tag:valid-owned\n\tauthKey, err := scenario.CreatePreAuthKeyWithTags(userID, false, false, []string{\"tag:valid-owned\"})\n\trequire.NoError(t, err)\n\n\t// Create and register a tailscale client\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t)\n\trequire.NoError(t, err)\n\n\t// Initial login\n\terr = client.Login(headscale.GetEndpoint(), authKey.GetKey())\n\trequire.NoError(t, err)\n\n\t// Wait for initial registration\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1)\n\t}, 30*time.Second, 500*time.Millisecond, \"waiting for initial registration\")\n\n\tt.Logf(\"Node registered, now attempting to change to different tag via CLI\")\n\n\t// Attempt to change to a different tag via tailscale up\n\tcommand := []string{\n\t\t\"tailscale\", \"up\",\n\t\t\"--login-server=\" + headscale.GetEndpoint(),\n\t\t\"--authkey=\" + authKey.GetKey(),\n\t\t\"--advertise-tags=tag:second\",\n\t}\n\t_, stderr, err := client.Execute(command)\n\n\t// Document actual behavior\n\tif err != nil {\n\t\tt.Logf(\"Test 2.4 PASS: CLI correctly rejected changing tags: %v, stderr: %s\", err, stderr)\n\t} else {\n\t\tt.Logf(\"Test 2.4: CLI command succeeded, checking if tags actually changed\")\n\n\t\t// Check if tags remain unchanged\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\tnodes, err := headscale.ListNodes()\n\t\t\tassert.NoError(c, err)\n\n\t\t\tif len(nodes) == 1 {\n\t\t\t\tif tagsEqual(nodes[0].GetTags(), []string{\"tag:valid-owned\"}) {\n\t\t\t\t\tt.Logf(\"Test 2.4 PASS: Tags unchanged: %v\", nodes[0].GetTags())\n\t\t\t\t} else {\n\t\t\t\t\tt.Logf(\"Test 2.4 FAIL: Tags changed unexpectedly to: %v\", nodes[0].GetTags())\n\t\t\t\t\tassert.Fail(c, \"Tags should not have changed\")\n\t\t\t\t}\n\t\t\t}\n\t\t}, 10*time.Second, 500*time.Millisecond, \"verifying tags unchanged\")\n\t}\n}\n\n// TestTagsAuthKeyWithTagAdminOverrideReauthPreserves tests that admin-assigned tags\n// are preserved even after reauthentication - admin decisions are authoritative.\n//\n// Test 2.5: Admin assignment is preserved through reauth\n// Setup:\n//  1. Register with --auth-key AUTH_KEY_WITH_TAG\n//  2. Assign [\"tag:second\"] via headscale CLI\n//  3. Run `tailscale up --auth-key AUTH_KEY_WITH_TAG --force-reauth`\n//\n// Expected: After step 2 tags are [\"tag:second\"], after step 3 tags remain [\"tag:second\"].\nfunc TestTagsAuthKeyWithTagAdminOverrideReauthPreserves(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-authkey-admin\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\tuserMap, err := headscale.MapUsers()\n\trequire.NoError(t, err)\n\n\tuserID := userMap[tagTestUser].GetId()\n\n\t// Create a tagged PreAuthKey with tag:valid-owned\n\tauthKey, err := scenario.CreatePreAuthKeyWithTags(userID, true, false, []string{\"tag:valid-owned\"})\n\trequire.NoError(t, err)\n\n\t// Create and register a tailscale client\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t)\n\trequire.NoError(t, err)\n\n\t// Initial login\n\terr = client.Login(headscale.GetEndpoint(), authKey.GetKey())\n\trequire.NoError(t, err)\n\n\t// Wait for initial registration and get node ID\n\tvar nodeID uint64\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1)\n\n\t\tif len(nodes) == 1 {\n\t\t\tnodeID = nodes[0].GetId()\n\t\t\tassertNodeHasTagsWithCollect(c, nodes[0], []string{\"tag:valid-owned\"})\n\t\t}\n\t}, 30*time.Second, 500*time.Millisecond, \"waiting for initial registration\")\n\n\tt.Logf(\"Step 1 complete: Node %d registered with tag:valid-owned\", nodeID)\n\n\t// Step 2: Admin assigns different tags via headscale CLI\n\terr = headscale.SetNodeTags(nodeID, []string{\"tag:second\"})\n\trequire.NoError(t, err)\n\n\t// Verify admin assignment took effect (server-side)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\n\t\tif len(nodes) == 1 {\n\t\t\tt.Logf(\"After admin assignment, server tags are: %v\", nodes[0].GetTags())\n\t\t\tassertNodeHasTagsWithCollect(c, nodes[0], []string{\"tag:second\"})\n\t\t}\n\t}, 10*time.Second, 500*time.Millisecond, \"verifying admin tag assignment on server\")\n\n\t// Verify admin assignment propagated to node's self view (issue #2978)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tassertNodeSelfHasTagsWithCollect(c, client, []string{\"tag:second\"})\n\t}, 30*time.Second, 500*time.Millisecond, \"verifying admin tag assignment propagated to node self\")\n\n\tt.Logf(\"Step 2 complete: Admin assigned tag:second (verified on both server and node self)\")\n\n\t// Step 3: Force reauthentication\n\tcommand := []string{\n\t\t\"tailscale\", \"up\",\n\t\t\"--login-server=\" + headscale.GetEndpoint(),\n\t\t\"--authkey=\" + authKey.GetKey(),\n\t\t\"--force-reauth\",\n\t}\n\t//nolint:errcheck // Intentionally ignoring error - we check results below\n\tclient.Execute(command)\n\n\t// Verify admin tags are preserved even after reauth - admin decisions are authoritative (server-side)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.GreaterOrEqual(c, len(nodes), 1, \"Should have at least 1 node\")\n\n\t\tif len(nodes) >= 1 {\n\t\t\t// Find the most recently updated node (in case a new one was created)\n\t\t\tnode := nodes[len(nodes)-1]\n\t\t\tt.Logf(\"After reauth, server tags are: %v\", node.GetTags())\n\n\t\t\t// Expected: admin-assigned tags are preserved through reauth\n\t\t\tassertNodeHasTagsWithCollect(c, node, []string{\"tag:second\"})\n\t\t}\n\t}, 30*time.Second, 500*time.Millisecond, \"admin tags should be preserved after reauth on server\")\n\n\t// Verify admin tags are preserved in node's self view after reauth (issue #2978)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tassertNodeSelfHasTagsWithCollect(c, client, []string{\"tag:second\"})\n\t}, 30*time.Second, 500*time.Millisecond, \"admin tags should be preserved after reauth in node self\")\n\n\tt.Logf(\"Test 2.5 PASS: Admin tags preserved through reauth (admin decisions are authoritative)\")\n}\n\n// TestTagsAuthKeyWithTagCLICannotModifyAdminTags tests that the client CLI\n// cannot modify admin-assigned tags.\n//\n// Test 2.6: Client CLI cannot modify admin-assigned tags\n// Setup:\n//  1. Register with --auth-key AUTH_KEY_WITH_TAG\n//  2. Assign [\"tag:valid-owned\", \"tag:second\"] via headscale CLI\n//  3. Run `tailscale up --advertise-tags=\"tag:valid-owned\" --auth-key AUTH_KEY_WITH_TAG`\n//\n// Expected: Command either fails or is no-op, tags remain [\"tag:valid-owned\", \"tag:second\"].\nfunc TestTagsAuthKeyWithTagCLICannotModifyAdminTags(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-authkey-noadmin\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\tuserMap, err := headscale.MapUsers()\n\trequire.NoError(t, err)\n\n\tuserID := userMap[tagTestUser].GetId()\n\n\t// Create a tagged PreAuthKey with tag:valid-owned\n\tauthKey, err := scenario.CreatePreAuthKeyWithTags(userID, true, false, []string{\"tag:valid-owned\"})\n\trequire.NoError(t, err)\n\n\t// Create and register a tailscale client\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t)\n\trequire.NoError(t, err)\n\n\t// Initial login\n\terr = client.Login(headscale.GetEndpoint(), authKey.GetKey())\n\trequire.NoError(t, err)\n\n\t// Wait for initial registration and get node ID\n\tvar nodeID uint64\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1)\n\n\t\tif len(nodes) == 1 {\n\t\t\tnodeID = nodes[0].GetId()\n\t\t}\n\t}, 30*time.Second, 500*time.Millisecond, \"waiting for initial registration\")\n\n\t// Step 2: Admin assigns multiple tags via headscale CLI\n\terr = headscale.SetNodeTags(nodeID, []string{\"tag:valid-owned\", \"tag:second\"})\n\trequire.NoError(t, err)\n\n\t// Verify admin assignment (server-side)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\n\t\tif len(nodes) == 1 {\n\t\t\tassertNodeHasTagsWithCollect(c, nodes[0], []string{\"tag:valid-owned\", \"tag:second\"})\n\t\t}\n\t}, 10*time.Second, 500*time.Millisecond, \"verifying admin tag assignment on server\")\n\n\t// Verify admin assignment propagated to node's self view (issue #2978)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tassertNodeSelfHasTagsWithCollect(c, client, []string{\"tag:valid-owned\", \"tag:second\"})\n\t}, 30*time.Second, 500*time.Millisecond, \"verifying admin tag assignment propagated to node self\")\n\n\tt.Logf(\"Admin assigned both tags, now attempting to reduce via CLI\")\n\n\t// Step 3: Attempt to reduce tags via CLI\n\tcommand := []string{\n\t\t\"tailscale\", \"up\",\n\t\t\"--login-server=\" + headscale.GetEndpoint(),\n\t\t\"--authkey=\" + authKey.GetKey(),\n\t\t\"--advertise-tags=tag:valid-owned\",\n\t}\n\t_, stderr, err := client.Execute(command)\n\n\tt.Logf(\"CLI command result: err=%v, stderr=%s\", err, stderr)\n\n\t// Verify admin tags are preserved - CLI should not be able to reduce them (server-side)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1, \"Should have exactly 1 node\")\n\n\t\tif len(nodes) == 1 {\n\t\t\tt.Logf(\"After CLI attempt, server tags are: %v\", nodes[0].GetTags())\n\n\t\t\t// Expected: tags should remain unchanged (admin wins)\n\t\t\tassertNodeHasTagsWithCollect(c, nodes[0], []string{\"tag:valid-owned\", \"tag:second\"})\n\t\t}\n\t}, 10*time.Second, 500*time.Millisecond, \"admin tags should be preserved after CLI attempt on server\")\n\n\t// Verify admin tags are preserved in node's self view (issue #2978)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tassertNodeSelfHasTagsWithCollect(c, client, []string{\"tag:valid-owned\", \"tag:second\"})\n\t}, 30*time.Second, 500*time.Millisecond, \"admin tags should be preserved after CLI attempt in node self\")\n\n\tt.Logf(\"Test 2.6 PASS: Admin tags preserved - CLI cannot modify admin-assigned tags\")\n}\n\n// =============================================================================\n// Test Suite 3: Auth Key WITHOUT Tags\n// =============================================================================\n\n// TestTagsAuthKeyWithoutTagCannotRequestTags tests that nodes cannot request tags\n// when using an auth key that has no tags.\n//\n// Test 3.1: Cannot request tags with tagless key\n// Setup: Run `tailscale up --advertise-tags=\"tag:valid-owned\" --auth-key AUTH_KEY_WITHOUT_TAG`\n// Expected: Registration fails with error containing \"requested tags [tag:valid-owned] are invalid or not permitted\".\nfunc TestTagsAuthKeyWithoutTagCannotRequestTags(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-nokey-req\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\tuserMap, err := headscale.MapUsers()\n\trequire.NoError(t, err)\n\n\tuserID := userMap[tagTestUser].GetId()\n\n\t// Create an auth key WITHOUT tags\n\tauthKey, err := scenario.CreatePreAuthKey(userID, false, false)\n\trequire.NoError(t, err)\n\tt.Logf(\"Created PreAuthKey without tags\")\n\n\t// Create a tailscale client that will try to request tags\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t\ttsic.WithExtraLoginArgs([]string{\"--advertise-tags=tag:valid-owned\"}),\n\t)\n\trequire.NoError(t, err)\n\n\t// Login should fail because the auth key has no tags\n\terr = client.Login(headscale.GetEndpoint(), authKey.GetKey())\n\tif err != nil {\n\t\tt.Logf(\"Test 3.1 PASS: Registration correctly rejected: %v\", err)\n\t\tassert.ErrorContains(t, err, \"requested tags\")\n\t} else {\n\t\t// If it succeeded, document this unexpected behavior\n\t\tt.Logf(\"Test 3.1 UNEXPECTED: Registration succeeded when it should have failed\")\n\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\tnodes, err := headscale.ListNodes()\n\t\t\tassert.NoError(c, err)\n\n\t\t\tif len(nodes) == 1 {\n\t\t\t\tt.Logf(\"Node registered with tags: %v (expected rejection)\", nodes[0].GetTags())\n\t\t\t}\n\t\t}, 10*time.Second, 500*time.Millisecond, \"checking node state\")\n\n\t\tt.Fail()\n\t}\n}\n\n// TestTagsAuthKeyWithoutTagRegisterNoTags tests that registering with a tagless auth key\n// and no --advertise-tags results in a node with no tags.\n//\n// Test 3.2: Register with no tags\n// Setup: Run `tailscale up --auth-key AUTH_KEY_WITHOUT_TAG` (no --advertise-tags)\n// Expected: Registration succeeds, node has no tags (empty tag set).\nfunc TestTagsAuthKeyWithoutTagRegisterNoTags(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-nokey-noreg\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\tuserMap, err := headscale.MapUsers()\n\trequire.NoError(t, err)\n\n\tuserID := userMap[tagTestUser].GetId()\n\n\t// Create an auth key WITHOUT tags\n\tauthKey, err := scenario.CreatePreAuthKey(userID, false, false)\n\trequire.NoError(t, err)\n\n\t// Create a tailscale client without --advertise-tags\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t)\n\trequire.NoError(t, err)\n\n\t// Login should succeed\n\terr = client.Login(headscale.GetEndpoint(), authKey.GetKey())\n\trequire.NoError(t, err)\n\n\t// Verify node has no tags\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1)\n\n\t\tif len(nodes) == 1 {\n\t\t\tt.Logf(\"Node registered with tags: %v\", nodes[0].GetTags())\n\t\t\tassertNodeHasNoTagsWithCollect(c, nodes[0])\n\t\t}\n\t}, 30*time.Second, 500*time.Millisecond, \"verifying node has no tags\")\n\n\tt.Logf(\"Test 3.2 completed - node registered without tags\")\n}\n\n// TestTagsAuthKeyWithoutTagCannotAddViaCLI tests that nodes registered with a tagless\n// auth key cannot add tags via the client CLI.\n//\n// Test 3.3: Cannot add tags via CLI after registration\n// Setup:\n//  1. Register with --auth-key AUTH_KEY_WITHOUT_TAG\n//  2. Run `tailscale up --advertise-tags=\"tag:valid-owned\" --auth-key AUTH_KEY_WITHOUT_TAG`\n//\n// Expected: Command fails, node remains with no tags.\nfunc TestTagsAuthKeyWithoutTagCannotAddViaCLI(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-nokey-noadd\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\tuserMap, err := headscale.MapUsers()\n\trequire.NoError(t, err)\n\n\tuserID := userMap[tagTestUser].GetId()\n\n\t// Create an auth key WITHOUT tags\n\tauthKey, err := scenario.CreatePreAuthKey(userID, true, false)\n\trequire.NoError(t, err)\n\n\t// Create and register a tailscale client\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t)\n\trequire.NoError(t, err)\n\n\t// Initial login\n\terr = client.Login(headscale.GetEndpoint(), authKey.GetKey())\n\trequire.NoError(t, err)\n\n\t// Wait for initial registration\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1)\n\n\t\tif len(nodes) == 1 {\n\t\t\tassertNodeHasNoTagsWithCollect(c, nodes[0])\n\t\t}\n\t}, 30*time.Second, 500*time.Millisecond, \"waiting for initial registration\")\n\n\tt.Logf(\"Node registered without tags, attempting to add via CLI\")\n\n\t// Attempt to add tags via tailscale up\n\tcommand := []string{\n\t\t\"tailscale\", \"up\",\n\t\t\"--login-server=\" + headscale.GetEndpoint(),\n\t\t\"--authkey=\" + authKey.GetKey(),\n\t\t\"--advertise-tags=tag:valid-owned\",\n\t}\n\t_, stderr, err := client.Execute(command)\n\n\t// Document actual behavior\n\tif err != nil {\n\t\tt.Logf(\"Test 3.3 PASS: CLI correctly rejected adding tags: %v, stderr: %s\", err, stderr)\n\t} else {\n\t\tt.Logf(\"Test 3.3: CLI command succeeded, checking if tags actually changed\")\n\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\tnodes, err := headscale.ListNodes()\n\t\t\tassert.NoError(c, err)\n\n\t\t\tif len(nodes) == 1 {\n\t\t\t\tif len(nodes[0].GetTags()) == 0 {\n\t\t\t\t\tt.Logf(\"Test 3.3 PASS: Tags still empty after CLI attempt\")\n\t\t\t\t} else {\n\t\t\t\t\tt.Logf(\"Test 3.3 FAIL: Tags changed to: %v\", nodes[0].GetTags())\n\t\t\t\t\tassert.Fail(c, \"Tags should not have changed\")\n\t\t\t\t}\n\t\t\t}\n\t\t}, 10*time.Second, 500*time.Millisecond, \"verifying tags unchanged\")\n\t}\n}\n\n// TestTagsAuthKeyWithoutTagCLINoOpAfterAdminWithReset tests that the client CLI\n// is a no-op after admin tag assignment, even with --reset flag.\n//\n// Test 3.4: CLI no-op after admin tag assignment (with --reset)\n// Setup:\n//  1. Register with --auth-key AUTH_KEY_WITHOUT_TAG\n//  2. Assign [\"tag:valid-owned\"] via headscale CLI\n//  3. Run `tailscale up --auth-key AUTH_KEY_WITHOUT_TAG --reset`\n//\n// Expected: Command is no-op, tags remain [\"tag:valid-owned\"].\nfunc TestTagsAuthKeyWithoutTagCLINoOpAfterAdminWithReset(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-nokey-reset\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\tuserMap, err := headscale.MapUsers()\n\trequire.NoError(t, err)\n\n\tuserID := userMap[tagTestUser].GetId()\n\n\t// Create an auth key WITHOUT tags\n\tauthKey, err := scenario.CreatePreAuthKey(userID, true, false)\n\trequire.NoError(t, err)\n\n\t// Create and register a tailscale client\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t)\n\trequire.NoError(t, err)\n\n\t// Initial login\n\terr = client.Login(headscale.GetEndpoint(), authKey.GetKey())\n\trequire.NoError(t, err)\n\n\t// Wait for initial registration and get node ID\n\tvar nodeID uint64\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1)\n\n\t\tif len(nodes) == 1 {\n\t\t\tnodeID = nodes[0].GetId()\n\t\t\tassertNodeHasNoTagsWithCollect(c, nodes[0])\n\t\t}\n\t}, 30*time.Second, 500*time.Millisecond, \"waiting for initial registration\")\n\n\t// Step 2: Admin assigns tags\n\terr = headscale.SetNodeTags(nodeID, []string{\"tag:valid-owned\"})\n\trequire.NoError(t, err)\n\n\t// Verify admin assignment (server-side)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\n\t\tif len(nodes) == 1 {\n\t\t\tassertNodeHasTagsWithCollect(c, nodes[0], []string{\"tag:valid-owned\"})\n\t\t}\n\t}, 10*time.Second, 500*time.Millisecond, \"verifying admin tag assignment on server\")\n\n\t// Verify admin assignment propagated to node's self view (issue #2978)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tassertNodeSelfHasTagsWithCollect(c, client, []string{\"tag:valid-owned\"})\n\t}, 30*time.Second, 500*time.Millisecond, \"verifying admin tag assignment propagated to node self\")\n\n\tt.Logf(\"Admin assigned tag, now running CLI with --reset\")\n\n\t// Step 3: Run tailscale up with --reset\n\tcommand := []string{\n\t\t\"tailscale\", \"up\",\n\t\t\"--login-server=\" + headscale.GetEndpoint(),\n\t\t\"--authkey=\" + authKey.GetKey(),\n\t\t\"--reset\",\n\t}\n\t_, stderr, err := client.Execute(command)\n\tt.Logf(\"CLI --reset result: err=%v, stderr=%s\", err, stderr)\n\n\t// Verify admin tags are preserved - --reset should not remove them (server-side)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1, \"Should have exactly 1 node\")\n\n\t\tif len(nodes) == 1 {\n\t\t\tt.Logf(\"After --reset, server tags are: %v\", nodes[0].GetTags())\n\t\t\tassertNodeHasTagsWithCollect(c, nodes[0], []string{\"tag:valid-owned\"})\n\t\t}\n\t}, 10*time.Second, 500*time.Millisecond, \"admin tags should be preserved after --reset on server\")\n\n\t// Verify admin tags are preserved in node's self view after --reset (issue #2978)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tassertNodeSelfHasTagsWithCollect(c, client, []string{\"tag:valid-owned\"})\n\t}, 30*time.Second, 500*time.Millisecond, \"admin tags should be preserved after --reset in node self\")\n\n\tt.Logf(\"Test 3.4 PASS: Admin tags preserved after --reset\")\n}\n\n// TestTagsAuthKeyWithoutTagCLINoOpAfterAdminWithEmptyAdvertise tests that the client CLI\n// is a no-op after admin tag assignment, even with empty --advertise-tags.\n//\n// Test 3.5: CLI no-op after admin tag assignment (with empty advertise-tags)\n// Setup:\n//  1. Register with --auth-key AUTH_KEY_WITHOUT_TAG\n//  2. Assign [\"tag:valid-owned\"] via headscale CLI\n//  3. Run `tailscale up --auth-key AUTH_KEY_WITHOUT_TAG --advertise-tags=\"\"`\n//\n// Expected: Command is no-op, tags remain [\"tag:valid-owned\"].\nfunc TestTagsAuthKeyWithoutTagCLINoOpAfterAdminWithEmptyAdvertise(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-nokey-empty\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\tuserMap, err := headscale.MapUsers()\n\trequire.NoError(t, err)\n\n\tuserID := userMap[tagTestUser].GetId()\n\n\t// Create an auth key WITHOUT tags\n\tauthKey, err := scenario.CreatePreAuthKey(userID, true, false)\n\trequire.NoError(t, err)\n\n\t// Create and register a tailscale client\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t)\n\trequire.NoError(t, err)\n\n\t// Initial login\n\terr = client.Login(headscale.GetEndpoint(), authKey.GetKey())\n\trequire.NoError(t, err)\n\n\t// Wait for initial registration and get node ID\n\tvar nodeID uint64\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1)\n\n\t\tif len(nodes) == 1 {\n\t\t\tnodeID = nodes[0].GetId()\n\t\t}\n\t}, 30*time.Second, 500*time.Millisecond, \"waiting for initial registration\")\n\n\t// Step 2: Admin assigns tags\n\terr = headscale.SetNodeTags(nodeID, []string{\"tag:valid-owned\"})\n\trequire.NoError(t, err)\n\n\t// Verify admin assignment (server-side)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\n\t\tif len(nodes) == 1 {\n\t\t\tassertNodeHasTagsWithCollect(c, nodes[0], []string{\"tag:valid-owned\"})\n\t\t}\n\t}, 10*time.Second, 500*time.Millisecond, \"verifying admin tag assignment on server\")\n\n\t// Verify admin assignment propagated to node's self view (issue #2978)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tassertNodeSelfHasTagsWithCollect(c, client, []string{\"tag:valid-owned\"})\n\t}, 30*time.Second, 500*time.Millisecond, \"verifying admin tag assignment propagated to node self\")\n\n\tt.Logf(\"Admin assigned tag, now running CLI with empty --advertise-tags\")\n\n\t// Step 3: Run tailscale up with empty --advertise-tags\n\tcommand := []string{\n\t\t\"tailscale\", \"up\",\n\t\t\"--login-server=\" + headscale.GetEndpoint(),\n\t\t\"--authkey=\" + authKey.GetKey(),\n\t\t\"--advertise-tags=\",\n\t}\n\t_, stderr, err := client.Execute(command)\n\tt.Logf(\"CLI empty advertise-tags result: err=%v, stderr=%s\", err, stderr)\n\n\t// Verify admin tags are preserved - empty --advertise-tags should not remove them (server-side)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1, \"Should have exactly 1 node\")\n\n\t\tif len(nodes) == 1 {\n\t\t\tt.Logf(\"After empty --advertise-tags, server tags are: %v\", nodes[0].GetTags())\n\t\t\tassertNodeHasTagsWithCollect(c, nodes[0], []string{\"tag:valid-owned\"})\n\t\t}\n\t}, 10*time.Second, 500*time.Millisecond, \"admin tags should be preserved after empty --advertise-tags on server\")\n\n\t// Verify admin tags are preserved in node's self view after empty --advertise-tags (issue #2978)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tassertNodeSelfHasTagsWithCollect(c, client, []string{\"tag:valid-owned\"})\n\t}, 30*time.Second, 500*time.Millisecond, \"admin tags should be preserved after empty --advertise-tags in node self\")\n\n\tt.Logf(\"Test 3.5 PASS: Admin tags preserved after empty --advertise-tags\")\n}\n\n// TestTagsAuthKeyWithoutTagCLICannotReduceAdminMultiTag tests that the client CLI\n// cannot reduce an admin-assigned multi-tag set.\n//\n// Test 3.6: Client CLI cannot reduce admin-assigned multi-tag set\n// Setup:\n//  1. Register with --auth-key AUTH_KEY_WITHOUT_TAG\n//  2. Assign [\"tag:valid-owned\", \"tag:second\"] via headscale CLI\n//  3. Run `tailscale up --advertise-tags=\"tag:valid-owned\" --auth-key AUTH_KEY_WITHOUT_TAG`\n//\n// Expected: Command is no-op (or fails), tags remain [\"tag:valid-owned\", \"tag:second\"].\nfunc TestTagsAuthKeyWithoutTagCLICannotReduceAdminMultiTag(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-nokey-reduce\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\tuserMap, err := headscale.MapUsers()\n\trequire.NoError(t, err)\n\n\tuserID := userMap[tagTestUser].GetId()\n\n\t// Create an auth key WITHOUT tags\n\tauthKey, err := scenario.CreatePreAuthKey(userID, true, false)\n\trequire.NoError(t, err)\n\n\t// Create and register a tailscale client\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t)\n\trequire.NoError(t, err)\n\n\t// Initial login\n\terr = client.Login(headscale.GetEndpoint(), authKey.GetKey())\n\trequire.NoError(t, err)\n\n\t// Wait for initial registration and get node ID\n\tvar nodeID uint64\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1)\n\n\t\tif len(nodes) == 1 {\n\t\t\tnodeID = nodes[0].GetId()\n\t\t}\n\t}, 30*time.Second, 500*time.Millisecond, \"waiting for initial registration\")\n\n\t// Step 2: Admin assigns multiple tags\n\terr = headscale.SetNodeTags(nodeID, []string{\"tag:valid-owned\", \"tag:second\"})\n\trequire.NoError(t, err)\n\n\t// Verify admin assignment (server-side)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\n\t\tif len(nodes) == 1 {\n\t\t\tassertNodeHasTagsWithCollect(c, nodes[0], []string{\"tag:valid-owned\", \"tag:second\"})\n\t\t}\n\t}, 10*time.Second, 500*time.Millisecond, \"verifying admin tag assignment on server\")\n\n\t// Verify admin assignment propagated to node's self view (issue #2978)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tassertNodeSelfHasTagsWithCollect(c, client, []string{\"tag:valid-owned\", \"tag:second\"})\n\t}, 30*time.Second, 500*time.Millisecond, \"verifying admin tag assignment propagated to node self\")\n\n\tt.Logf(\"Admin assigned both tags, now attempting to reduce via CLI\")\n\n\t// Step 3: Attempt to reduce tags via CLI\n\tcommand := []string{\n\t\t\"tailscale\", \"up\",\n\t\t\"--login-server=\" + headscale.GetEndpoint(),\n\t\t\"--authkey=\" + authKey.GetKey(),\n\t\t\"--advertise-tags=tag:valid-owned\",\n\t}\n\t_, stderr, err := client.Execute(command)\n\tt.Logf(\"CLI reduce result: err=%v, stderr=%s\", err, stderr)\n\n\t// Verify admin tags are preserved - CLI should not be able to reduce them (server-side)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1, \"Should have exactly 1 node\")\n\n\t\tif len(nodes) == 1 {\n\t\t\tt.Logf(\"After CLI reduce attempt, server tags are: %v\", nodes[0].GetTags())\n\t\t\tassertNodeHasTagsWithCollect(c, nodes[0], []string{\"tag:valid-owned\", \"tag:second\"})\n\t\t}\n\t}, 10*time.Second, 500*time.Millisecond, \"admin tags should be preserved after CLI reduce attempt on server\")\n\n\t// Verify admin tags are preserved in node's self view after CLI reduce attempt (issue #2978)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tassertNodeSelfHasTagsWithCollect(c, client, []string{\"tag:valid-owned\", \"tag:second\"})\n\t}, 30*time.Second, 500*time.Millisecond, \"admin tags should be preserved after CLI reduce attempt in node self\")\n\n\tt.Logf(\"Test 3.6 PASS: Admin tags preserved - CLI cannot reduce admin-assigned multi-tag set\")\n}\n\n// =============================================================================\n// Test Suite 1: User Login Authentication (Web Auth Flow)\n// =============================================================================\n\n// TestTagsUserLoginOwnedTagAtRegistration tests that a user can advertise an owned tag\n// during web auth registration.\n//\n// Test 1.1: Advertise owned tag at registration\n// Setup: Web auth login with --advertise-tags=\"tag:valid-owned\"\n// Expected: Node has [\"tag:valid-owned\"].\nfunc TestTagsUserLoginOwnedTagAtRegistration(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0, // We'll create the node manually\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnvWithLoginURL(\n\t\t[]tsic.Option{\n\t\t\ttsic.WithExtraLoginArgs([]string{\"--advertise-tags=tag:valid-owned\"}),\n\t\t},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-webauth-owned\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\t// Create a tailscale client with --advertise-tags\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t\ttsic.WithExtraLoginArgs([]string{\"--advertise-tags=tag:valid-owned\"}),\n\t)\n\trequire.NoError(t, err)\n\n\t// Login via web auth flow\n\tloginURL, err := client.LoginWithURL(headscale.GetEndpoint())\n\trequire.NoError(t, err)\n\n\t// Complete the web auth by visiting the login URL\n\tbody, err := doLoginURL(client.Hostname(), loginURL)\n\trequire.NoError(t, err)\n\n\t// Register the node via headscale CLI\n\terr = scenario.runHeadscaleRegister(tagTestUser, body)\n\trequire.NoError(t, err)\n\n\t// Wait for client to be running\n\terr = client.WaitForRunning(120 * time.Second)\n\trequire.NoError(t, err)\n\n\t// Verify node has the advertised tag\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1, \"Should have exactly 1 node\")\n\n\t\tif len(nodes) == 1 {\n\t\t\tt.Logf(\"Node registered with tags: %v\", nodes[0].GetTags())\n\t\t\tassertNodeHasTagsWithCollect(c, nodes[0], []string{\"tag:valid-owned\"})\n\t\t}\n\t}, 30*time.Second, 500*time.Millisecond, \"verifying node has advertised tag\")\n\n\tt.Logf(\"Test 1.1 completed - web auth with owned tag succeeded\")\n}\n\n// TestTagsUserLoginNonExistentTagAtRegistration tests that advertising a non-existent tag\n// during web auth registration fails.\n//\n// Test 1.2: Advertise non-existent tag at registration\n// Setup: Web auth login with --advertise-tags=\"tag:nonexistent\"\n// Expected: Registration fails - node should not be registered OR should have no tags.\nfunc TestTagsUserLoginNonExistentTagAtRegistration(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnvWithLoginURL(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-webauth-nonexist\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\t// Create a tailscale client with non-existent tag\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t\ttsic.WithExtraLoginArgs([]string{\"--advertise-tags=tag:nonexistent\"}),\n\t)\n\trequire.NoError(t, err)\n\n\t// Login via web auth flow\n\tloginURL, err := client.LoginWithURL(headscale.GetEndpoint())\n\trequire.NoError(t, err)\n\n\t// Complete the web auth by visiting the login URL\n\tbody, err := doLoginURL(client.Hostname(), loginURL)\n\trequire.NoError(t, err)\n\n\t// Register the node via headscale CLI - this should fail due to non-existent tag\n\terr = scenario.runHeadscaleRegister(tagTestUser, body)\n\n\t// We expect registration to fail with an error about invalid/unauthorized tags\n\tif err != nil {\n\t\tt.Logf(\"Test 1.2 PASS: Registration correctly rejected with error: %v\", err)\n\t\tassert.ErrorContains(t, err, \"requested tags\")\n\t} else {\n\t\t// Check the result - if registration succeeded, the node should not have the invalid tag\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\tnodes, err := headscale.ListNodes()\n\t\t\tassert.NoError(c, err, \"Should be able to list nodes\")\n\n\t\t\tif len(nodes) == 0 {\n\t\t\t\tt.Logf(\"Test 1.2 PASS: Registration rejected - no nodes registered\")\n\t\t\t} else {\n\t\t\t\t// If a node was registered, it should NOT have the non-existent tag\n\t\t\t\tassert.NotContains(c, nodes[0].GetTags(), \"tag:nonexistent\",\n\t\t\t\t\t\"Non-existent tag should not be applied to node\")\n\t\t\t\tt.Logf(\"Test 1.2: Node registered with tags: %v (non-existent tag correctly rejected)\", nodes[0].GetTags())\n\t\t\t}\n\t\t}, 10*time.Second, 500*time.Millisecond, \"checking node registration result\")\n\t}\n}\n\n// TestTagsUserLoginUnownedTagAtRegistration tests that advertising an unowned tag\n// during web auth registration is rejected.\n//\n// Test 1.3: Advertise unowned tag at registration\n// Setup: Web auth login with --advertise-tags=\"tag:valid-unowned\"\n// Expected: Registration fails - node should not be registered OR should have no tags.\nfunc TestTagsUserLoginUnownedTagAtRegistration(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnvWithLoginURL(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-webauth-unowned\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\t// Create a tailscale client with unowned tag (tag:valid-unowned is owned by \"other-user\", not \"taguser\")\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t\ttsic.WithExtraLoginArgs([]string{\"--advertise-tags=tag:valid-unowned\"}),\n\t)\n\trequire.NoError(t, err)\n\n\t// Login via web auth flow\n\tloginURL, err := client.LoginWithURL(headscale.GetEndpoint())\n\trequire.NoError(t, err)\n\n\t// Complete the web auth\n\tbody, err := doLoginURL(client.Hostname(), loginURL)\n\trequire.NoError(t, err)\n\n\t// Register the node - should fail or reject the unowned tag\n\t_ = scenario.runHeadscaleRegister(tagTestUser, body)\n\n\t// Check the result - user should NOT be able to claim an unowned tag\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err, \"Should be able to list nodes\")\n\n\t\t// Either: no nodes registered (ideal), or node registered without the unowned tag\n\t\tif len(nodes) == 0 {\n\t\t\tt.Logf(\"Test 1.3 PASS: Registration rejected - no nodes registered\")\n\t\t} else {\n\t\t\t// If a node was registered, it should NOT have the unowned tag\n\t\t\tassert.NotContains(c, nodes[0].GetTags(), \"tag:valid-unowned\",\n\t\t\t\t\"Unowned tag should not be applied to node (tag:valid-unowned is owned by other-user)\")\n\t\t\tt.Logf(\"Test 1.3: Node registered with tags: %v (unowned tag correctly rejected)\", nodes[0].GetTags())\n\t\t}\n\t}, 10*time.Second, 500*time.Millisecond, \"checking node registration result\")\n}\n\n// TestTagsUserLoginAddTagViaCLIReauth tests that a user can add tags via CLI reauthentication.\n//\n// Test 1.4: Add tag via CLI reauthentication\n// Setup:\n//  1. Register with --advertise-tags=\"tag:valid-owned\"\n//  2. Run tailscale up --advertise-tags=\"tag:valid-owned,tag:second\"\n//\n// Expected: Triggers full reauthentication, node has both tags.\nfunc TestTagsUserLoginAddTagViaCLIReauth(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnvWithLoginURL(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-webauth-addtag\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\t// Step 1: Create and register with one tag\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t\ttsic.WithExtraLoginArgs([]string{\"--advertise-tags=tag:valid-owned\"}),\n\t)\n\trequire.NoError(t, err)\n\n\tloginURL, err := client.LoginWithURL(headscale.GetEndpoint())\n\trequire.NoError(t, err)\n\n\tbody, err := doLoginURL(client.Hostname(), loginURL)\n\trequire.NoError(t, err)\n\n\terr = scenario.runHeadscaleRegister(tagTestUser, body)\n\trequire.NoError(t, err)\n\n\terr = client.WaitForRunning(120 * time.Second)\n\trequire.NoError(t, err)\n\n\t// Verify initial tag\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\n\t\tif len(nodes) == 1 {\n\t\t\tt.Logf(\"Initial tags: %v\", nodes[0].GetTags())\n\t\t}\n\t}, 30*time.Second, 500*time.Millisecond, \"checking initial tags\")\n\n\t// Step 2: Try to add second tag via CLI\n\tt.Logf(\"Attempting to add second tag via CLI reauth\")\n\n\tcommand := []string{\n\t\t\"tailscale\", \"up\",\n\t\t\"--login-server=\" + headscale.GetEndpoint(),\n\t\t\"--advertise-tags=tag:valid-owned,tag:second\",\n\t}\n\t_, stderr, err := client.Execute(command)\n\tt.Logf(\"CLI result: err=%v, stderr=%s\", err, stderr)\n\n\t// Check final state - EventuallyWithT handles waiting for propagation\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\n\t\tif len(nodes) >= 1 {\n\t\t\tt.Logf(\"Test 1.4: After CLI, tags are: %v\", nodes[0].GetTags())\n\n\t\t\tif tagsEqual(nodes[0].GetTags(), []string{\"tag:valid-owned\", \"tag:second\"}) {\n\t\t\t\tt.Logf(\"Test 1.4 PASS: Both tags present after reauth\")\n\t\t\t} else {\n\t\t\t\tt.Logf(\"Test 1.4: Tags are %v (may require manual reauth completion)\", nodes[0].GetTags())\n\t\t\t}\n\t\t}\n\t}, 30*time.Second, 500*time.Millisecond, \"checking tags after CLI\")\n}\n\n// TestTagsUserLoginRemoveTagViaCLIReauth tests that a user can remove tags via CLI reauthentication.\n//\n// Test 1.5: Remove tag via CLI reauthentication\n// Setup:\n//  1. Register with --advertise-tags=\"tag:valid-owned,tag:second\"\n//  2. Run tailscale up --advertise-tags=\"tag:valid-owned\"\n//\n// Expected: Triggers full reauthentication, node has only [\"tag:valid-owned\"].\nfunc TestTagsUserLoginRemoveTagViaCLIReauth(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnvWithLoginURL(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-webauth-rmtag\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\t// Step 1: Create and register with two tags\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t\ttsic.WithExtraLoginArgs([]string{\"--advertise-tags=tag:valid-owned,tag:second\"}),\n\t)\n\trequire.NoError(t, err)\n\n\tloginURL, err := client.LoginWithURL(headscale.GetEndpoint())\n\trequire.NoError(t, err)\n\n\tbody, err := doLoginURL(client.Hostname(), loginURL)\n\trequire.NoError(t, err)\n\n\terr = scenario.runHeadscaleRegister(tagTestUser, body)\n\trequire.NoError(t, err)\n\n\terr = client.WaitForRunning(120 * time.Second)\n\trequire.NoError(t, err)\n\n\t// Verify initial tags\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\n\t\tif len(nodes) == 1 {\n\t\t\tt.Logf(\"Initial tags: %v\", nodes[0].GetTags())\n\t\t}\n\t}, 30*time.Second, 500*time.Millisecond, \"checking initial tags\")\n\n\t// Step 2: Try to remove second tag via CLI\n\tt.Logf(\"Attempting to remove tag via CLI reauth\")\n\n\tcommand := []string{\n\t\t\"tailscale\", \"up\",\n\t\t\"--login-server=\" + headscale.GetEndpoint(),\n\t\t\"--advertise-tags=tag:valid-owned\",\n\t}\n\t_, stderr, err := client.Execute(command)\n\tt.Logf(\"CLI result: err=%v, stderr=%s\", err, stderr)\n\n\t// Check final state - EventuallyWithT handles waiting for propagation\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\n\t\tif len(nodes) >= 1 {\n\t\t\tt.Logf(\"Test 1.5: After CLI, tags are: %v\", nodes[0].GetTags())\n\n\t\t\tif tagsEqual(nodes[0].GetTags(), []string{\"tag:valid-owned\"}) {\n\t\t\t\tt.Logf(\"Test 1.5 PASS: Only one tag after removal\")\n\t\t\t}\n\t\t}\n\t}, 30*time.Second, 500*time.Millisecond, \"checking tags after CLI\")\n}\n\n// TestTagsUserLoginCLINoOpAfterAdminAssignment tests that CLI advertise-tags becomes\n// a no-op after admin tag assignment.\n//\n// Test 1.6: CLI advertise-tags becomes no-op after admin tag assignment\n// Setup:\n//  1. Register with --advertise-tags=\"tag:valid-owned\"\n//  2. Assign [\"tag:second\"] via headscale CLI\n//  3. Run tailscale up --advertise-tags=\"tag:valid-owned\"\n//\n// Expected: Step 3 does NOT trigger reauthentication, tags remain [\"tag:second\"].\nfunc TestTagsUserLoginCLINoOpAfterAdminAssignment(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnvWithLoginURL(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-webauth-adminwin\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\t// Step 1: Register with one tag\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t\ttsic.WithExtraLoginArgs([]string{\"--advertise-tags=tag:valid-owned\"}),\n\t)\n\trequire.NoError(t, err)\n\n\tloginURL, err := client.LoginWithURL(headscale.GetEndpoint())\n\trequire.NoError(t, err)\n\n\tbody, err := doLoginURL(client.Hostname(), loginURL)\n\trequire.NoError(t, err)\n\n\terr = scenario.runHeadscaleRegister(tagTestUser, body)\n\trequire.NoError(t, err)\n\n\terr = client.WaitForRunning(120 * time.Second)\n\trequire.NoError(t, err)\n\n\t// Get node ID\n\tvar nodeID uint64\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1)\n\n\t\tif len(nodes) == 1 {\n\t\t\tnodeID = nodes[0].GetId()\n\t\t\tt.Logf(\"Step 1: Node %d registered with tags: %v\", nodeID, nodes[0].GetTags())\n\t\t}\n\t}, 30*time.Second, 500*time.Millisecond, \"waiting for initial registration\")\n\n\t// Step 2: Admin assigns different tag\n\terr = headscale.SetNodeTags(nodeID, []string{\"tag:second\"})\n\trequire.NoError(t, err)\n\n\t// Verify admin assignment (server-side)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\n\t\tif len(nodes) == 1 {\n\t\t\tt.Logf(\"Step 2: After admin assignment, server tags: %v\", nodes[0].GetTags())\n\t\t\tassertNodeHasTagsWithCollect(c, nodes[0], []string{\"tag:second\"})\n\t\t}\n\t}, 10*time.Second, 500*time.Millisecond, \"verifying admin assignment on server\")\n\n\t// Verify admin assignment propagated to node's self view (issue #2978)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tassertNodeSelfHasTagsWithCollect(c, client, []string{\"tag:second\"})\n\t}, 30*time.Second, 500*time.Millisecond, \"verifying admin assignment propagated to node self\")\n\n\t// Step 3: Try to change tags via CLI\n\tcommand := []string{\n\t\t\"tailscale\", \"up\",\n\t\t\"--login-server=\" + headscale.GetEndpoint(),\n\t\t\"--advertise-tags=tag:valid-owned\",\n\t}\n\t_, stderr, err := client.Execute(command)\n\tt.Logf(\"Step 3 CLI result: err=%v, stderr=%s\", err, stderr)\n\n\t// Verify admin tags are preserved - CLI advertise-tags should be a no-op after admin assignment (server-side)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1, \"Should have exactly 1 node\")\n\n\t\tif len(nodes) == 1 {\n\t\t\tt.Logf(\"Step 3: After CLI, server tags are: %v\", nodes[0].GetTags())\n\t\t\tassertNodeHasTagsWithCollect(c, nodes[0], []string{\"tag:second\"})\n\t\t}\n\t}, 10*time.Second, 500*time.Millisecond, \"admin tags should be preserved - CLI advertise-tags should be no-op on server\")\n\n\t// Verify admin tags are preserved in node's self view after CLI attempt (issue #2978)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tassertNodeSelfHasTagsWithCollect(c, client, []string{\"tag:second\"})\n\t}, 30*time.Second, 500*time.Millisecond, \"admin tags should be preserved - CLI advertise-tags should be no-op in node self\")\n\n\tt.Logf(\"Test 1.6 PASS: Admin tags preserved (CLI was no-op)\")\n}\n\n// TestTagsUserLoginCLICannotRemoveAdminTags tests that CLI cannot remove admin-assigned tags.\n//\n// Test 1.7: CLI cannot remove admin-assigned tags\n// Setup:\n//  1. Register with --advertise-tags=\"tag:valid-owned\"\n//  2. Assign [\"tag:valid-owned\", \"tag:second\"] via headscale CLI\n//  3. Run tailscale up --advertise-tags=\"tag:valid-owned\"\n//\n// Expected: Command is no-op, tags remain [\"tag:valid-owned\", \"tag:second\"].\nfunc TestTagsUserLoginCLICannotRemoveAdminTags(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnvWithLoginURL(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-webauth-norem\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\t// Step 1: Register with one tag\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t\ttsic.WithExtraLoginArgs([]string{\"--advertise-tags=tag:valid-owned\"}),\n\t)\n\trequire.NoError(t, err)\n\n\tloginURL, err := client.LoginWithURL(headscale.GetEndpoint())\n\trequire.NoError(t, err)\n\n\tbody, err := doLoginURL(client.Hostname(), loginURL)\n\trequire.NoError(t, err)\n\n\terr = scenario.runHeadscaleRegister(tagTestUser, body)\n\trequire.NoError(t, err)\n\n\terr = client.WaitForRunning(120 * time.Second)\n\trequire.NoError(t, err)\n\n\t// Get node ID\n\tvar nodeID uint64\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1)\n\n\t\tif len(nodes) == 1 {\n\t\t\tnodeID = nodes[0].GetId()\n\t\t}\n\t}, 30*time.Second, 500*time.Millisecond, \"waiting for initial registration\")\n\n\t// Step 2: Admin assigns both tags\n\terr = headscale.SetNodeTags(nodeID, []string{\"tag:valid-owned\", \"tag:second\"})\n\trequire.NoError(t, err)\n\n\t// Verify admin assignment (server-side)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\n\t\tif len(nodes) == 1 {\n\t\t\tt.Logf(\"After admin assignment, server tags: %v\", nodes[0].GetTags())\n\t\t\tassertNodeHasTagsWithCollect(c, nodes[0], []string{\"tag:valid-owned\", \"tag:second\"})\n\t\t}\n\t}, 10*time.Second, 500*time.Millisecond, \"verifying admin assignment on server\")\n\n\t// Verify admin assignment propagated to node's self view (issue #2978)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tassertNodeSelfHasTagsWithCollect(c, client, []string{\"tag:valid-owned\", \"tag:second\"})\n\t}, 30*time.Second, 500*time.Millisecond, \"verifying admin assignment propagated to node self\")\n\n\t// Step 3: Try to reduce tags via CLI\n\tcommand := []string{\n\t\t\"tailscale\", \"up\",\n\t\t\"--login-server=\" + headscale.GetEndpoint(),\n\t\t\"--advertise-tags=tag:valid-owned\",\n\t}\n\t_, stderr, err := client.Execute(command)\n\tt.Logf(\"CLI result: err=%v, stderr=%s\", err, stderr)\n\n\t// Verify admin tags are preserved - CLI should not be able to remove admin-assigned tags (server-side)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1, \"Should have exactly 1 node\")\n\n\t\tif len(nodes) == 1 {\n\t\t\tt.Logf(\"Test 1.7: After CLI, server tags are: %v\", nodes[0].GetTags())\n\t\t\tassertNodeHasTagsWithCollect(c, nodes[0], []string{\"tag:valid-owned\", \"tag:second\"})\n\t\t}\n\t}, 10*time.Second, 500*time.Millisecond, \"admin tags should be preserved - CLI cannot remove them on server\")\n\n\t// Verify admin tags are preserved in node's self view after CLI attempt (issue #2978)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tassertNodeSelfHasTagsWithCollect(c, client, []string{\"tag:valid-owned\", \"tag:second\"})\n\t}, 30*time.Second, 500*time.Millisecond, \"admin tags should be preserved - CLI cannot remove them in node self\")\n\n\tt.Logf(\"Test 1.7 PASS: Admin tags preserved (CLI cannot remove)\")\n}\n\n// =============================================================================\n// Test Suite 2 (continued): Additional Auth Key WITH Tags Tests\n// =============================================================================\n\n// TestTagsAuthKeyWithTagRequestNonExistentTag tests that requesting a non-existent tag\n// with a tagged auth key results in registration failure.\n//\n// Test 2.7: Request non-existent tag with tagged key\n// Setup: Run `tailscale up --advertise-tags=\"tag:nonexistent\" --auth-key AUTH_KEY_WITH_TAG`\n// Expected: Registration fails with error containing \"requested tags\".\nfunc TestTagsAuthKeyWithTagRequestNonExistentTag(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-authkey-nonexist\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\tuserMap, err := headscale.MapUsers()\n\trequire.NoError(t, err)\n\n\tuserID := userMap[tagTestUser].GetId()\n\n\t// Create a tagged PreAuthKey with tag:valid-owned\n\tauthKey, err := scenario.CreatePreAuthKeyWithTags(userID, false, false, []string{\"tag:valid-owned\"})\n\trequire.NoError(t, err)\n\tt.Logf(\"Created tagged PreAuthKey with tags: %v\", authKey.GetAclTags())\n\n\t// Create a tailscale client that will try to use --advertise-tags with a NON-EXISTENT tag\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t\ttsic.WithExtraLoginArgs([]string{\"--advertise-tags=tag:nonexistent\"}),\n\t)\n\trequire.NoError(t, err)\n\n\t// Login should fail because ANY advertise-tags is rejected for PreAuthKey registrations\n\terr = client.Login(headscale.GetEndpoint(), authKey.GetKey())\n\tif err != nil {\n\t\tt.Logf(\"Test 2.7 PASS: Registration correctly rejected with error: %v\", err)\n\t\tassert.ErrorContains(t, err, \"requested tags\")\n\t} else {\n\t\tt.Logf(\"Test 2.7 UNEXPECTED: Registration succeeded when it should have failed\")\n\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\tnodes, err := headscale.ListNodes()\n\t\t\tassert.NoError(c, err)\n\n\t\t\tif len(nodes) == 1 {\n\t\t\t\tt.Logf(\"Node registered with tags: %v (expected rejection)\", nodes[0].GetTags())\n\t\t\t}\n\t\t}, 10*time.Second, 500*time.Millisecond, \"checking node state\")\n\n\t\tt.Fail()\n\t}\n}\n\n// TestTagsAuthKeyWithTagRequestUnownedTag tests that requesting an unowned tag\n// with a tagged auth key results in registration failure.\n//\n// Test 2.8: Request unowned tag with tagged key\n// Setup: Run `tailscale up --advertise-tags=\"tag:valid-unowned\" --auth-key AUTH_KEY_WITH_TAG`\n// Expected: Registration fails with error containing \"requested tags\".\nfunc TestTagsAuthKeyWithTagRequestUnownedTag(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-authkey-unowned\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\tuserMap, err := headscale.MapUsers()\n\trequire.NoError(t, err)\n\n\tuserID := userMap[tagTestUser].GetId()\n\n\t// Create a tagged PreAuthKey with tag:valid-owned\n\tauthKey, err := scenario.CreatePreAuthKeyWithTags(userID, false, false, []string{\"tag:valid-owned\"})\n\trequire.NoError(t, err)\n\tt.Logf(\"Created tagged PreAuthKey with tags: %v\", authKey.GetAclTags())\n\n\t// Create a tailscale client that will try to use --advertise-tags with an UNOWNED tag\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t\ttsic.WithExtraLoginArgs([]string{\"--advertise-tags=tag:valid-unowned\"}),\n\t)\n\trequire.NoError(t, err)\n\n\t// Login should fail because ANY advertise-tags is rejected for PreAuthKey registrations\n\terr = client.Login(headscale.GetEndpoint(), authKey.GetKey())\n\tif err != nil {\n\t\tt.Logf(\"Test 2.8 PASS: Registration correctly rejected with error: %v\", err)\n\t\tassert.ErrorContains(t, err, \"requested tags\")\n\t} else {\n\t\tt.Logf(\"Test 2.8 UNEXPECTED: Registration succeeded when it should have failed\")\n\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\tnodes, err := headscale.ListNodes()\n\t\t\tassert.NoError(c, err)\n\n\t\t\tif len(nodes) == 1 {\n\t\t\t\tt.Logf(\"Node registered with tags: %v (expected rejection)\", nodes[0].GetTags())\n\t\t\t}\n\t\t}, 10*time.Second, 500*time.Millisecond, \"checking node state\")\n\n\t\tt.Fail()\n\t}\n}\n\n// =============================================================================\n// Test Suite 3 (continued): Additional Auth Key WITHOUT Tags Tests\n// =============================================================================\n\n// TestTagsAuthKeyWithoutTagRequestNonExistentTag tests that requesting a non-existent tag\n// with a tagless auth key results in registration failure.\n//\n// Test 3.7: Request non-existent tag with tagless key\n// Setup: Run `tailscale up --advertise-tags=\"tag:nonexistent\" --auth-key AUTH_KEY_WITHOUT_TAG`\n// Expected: Registration fails with error containing \"requested tags\".\nfunc TestTagsAuthKeyWithoutTagRequestNonExistentTag(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-nokey-nonexist\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\tuserMap, err := headscale.MapUsers()\n\trequire.NoError(t, err)\n\n\tuserID := userMap[tagTestUser].GetId()\n\n\t// Create an auth key WITHOUT tags\n\tauthKey, err := scenario.CreatePreAuthKey(userID, false, false)\n\trequire.NoError(t, err)\n\tt.Logf(\"Created PreAuthKey without tags\")\n\n\t// Create a tailscale client that will try to request a NON-EXISTENT tag\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t\ttsic.WithExtraLoginArgs([]string{\"--advertise-tags=tag:nonexistent\"}),\n\t)\n\trequire.NoError(t, err)\n\n\t// Login should fail because ANY advertise-tags is rejected for PreAuthKey registrations\n\terr = client.Login(headscale.GetEndpoint(), authKey.GetKey())\n\tif err != nil {\n\t\tt.Logf(\"Test 3.7 PASS: Registration correctly rejected: %v\", err)\n\t\tassert.ErrorContains(t, err, \"requested tags\")\n\t} else {\n\t\tt.Logf(\"Test 3.7 UNEXPECTED: Registration succeeded when it should have failed\")\n\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\tnodes, err := headscale.ListNodes()\n\t\t\tassert.NoError(c, err)\n\n\t\t\tif len(nodes) == 1 {\n\t\t\t\tt.Logf(\"Node registered with tags: %v (expected rejection)\", nodes[0].GetTags())\n\t\t\t}\n\t\t}, 10*time.Second, 500*time.Millisecond, \"checking node state\")\n\n\t\tt.Fail()\n\t}\n}\n\n// TestTagsAuthKeyWithoutTagRequestUnownedTag tests that requesting an unowned tag\n// with a tagless auth key results in registration failure.\n//\n// Test 3.8: Request unowned tag with tagless key\n// Setup: Run `tailscale up --advertise-tags=\"tag:valid-unowned\" --auth-key AUTH_KEY_WITHOUT_TAG`\n// Expected: Registration fails with error containing \"requested tags\".\nfunc TestTagsAuthKeyWithoutTagRequestUnownedTag(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-nokey-unowned\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\tuserMap, err := headscale.MapUsers()\n\trequire.NoError(t, err)\n\n\tuserID := userMap[tagTestUser].GetId()\n\n\t// Create an auth key WITHOUT tags\n\tauthKey, err := scenario.CreatePreAuthKey(userID, false, false)\n\trequire.NoError(t, err)\n\tt.Logf(\"Created PreAuthKey without tags\")\n\n\t// Create a tailscale client that will try to request an UNOWNED tag\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t\ttsic.WithExtraLoginArgs([]string{\"--advertise-tags=tag:valid-unowned\"}),\n\t)\n\trequire.NoError(t, err)\n\n\t// Login should fail because ANY advertise-tags is rejected for PreAuthKey registrations\n\terr = client.Login(headscale.GetEndpoint(), authKey.GetKey())\n\tif err != nil {\n\t\tt.Logf(\"Test 3.8 PASS: Registration correctly rejected: %v\", err)\n\t\tassert.ErrorContains(t, err, \"requested tags\")\n\t} else {\n\t\tt.Logf(\"Test 3.8 UNEXPECTED: Registration succeeded when it should have failed\")\n\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\tnodes, err := headscale.ListNodes()\n\t\t\tassert.NoError(c, err)\n\n\t\t\tif len(nodes) == 1 {\n\t\t\t\tt.Logf(\"Node registered with tags: %v (expected rejection)\", nodes[0].GetTags())\n\t\t\t}\n\t\t}, 10*time.Second, 500*time.Millisecond, \"checking node state\")\n\n\t\tt.Fail()\n\t}\n}\n\n// =============================================================================\n// Test Suite 4: Admin API (SetNodeTags) Validation Tests\n// =============================================================================\n\n// TestTagsAdminAPICannotSetNonExistentTag tests that the admin API rejects\n// setting a tag that doesn't exist in the policy.\n//\n// Test 4.1: Admin cannot set non-existent tag\n// Setup: Create node, then call SetNodeTags with [\"tag:nonexistent\"]\n// Expected: SetNodeTags returns error.\nfunc TestTagsAdminAPICannotSetNonExistentTag(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-admin-nonexist\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\tuserMap, err := headscale.MapUsers()\n\trequire.NoError(t, err)\n\n\tuserID := userMap[tagTestUser].GetId()\n\n\t// Create a tagged PreAuthKey to register a node\n\tauthKey, err := scenario.CreatePreAuthKeyWithTags(userID, false, false, []string{\"tag:valid-owned\"})\n\trequire.NoError(t, err)\n\n\t// Create and register a tailscale client\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t)\n\trequire.NoError(t, err)\n\n\terr = client.Login(headscale.GetEndpoint(), authKey.GetKey())\n\trequire.NoError(t, err)\n\n\t// Wait for registration and get node ID\n\tvar nodeID uint64\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1)\n\n\t\tif len(nodes) == 1 {\n\t\t\tnodeID = nodes[0].GetId()\n\t\t\tt.Logf(\"Node %d registered with tags: %v\", nodeID, nodes[0].GetTags())\n\t\t}\n\t}, 30*time.Second, 500*time.Millisecond, \"waiting for registration\")\n\n\t// Try to set a non-existent tag via admin API - should fail\n\terr = headscale.SetNodeTags(nodeID, []string{\"tag:nonexistent\"})\n\n\trequire.Error(t, err, \"SetNodeTags should fail for non-existent tag\")\n\tt.Logf(\"Test 4.1 PASS: Admin API correctly rejected non-existent tag: %v\", err)\n}\n\n// TestTagsAdminAPICanSetUnownedTag tests that the admin API CAN set a tag\n// that exists in policy but is owned by a different user.\n// Admin has full authority over tags - ownership only matters for client requests.\n//\n// Test 4.2: Admin CAN set unowned tag (admin has full authority)\n// Setup: Create node, then call SetNodeTags with [\"tag:valid-unowned\"]\n// Expected: SetNodeTags succeeds (admin can assign any existing tag).\nfunc TestTagsAdminAPICanSetUnownedTag(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-admin-unowned\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\tuserMap, err := headscale.MapUsers()\n\trequire.NoError(t, err)\n\n\tuserID := userMap[tagTestUser].GetId()\n\n\t// Create a tagged PreAuthKey to register a node\n\tauthKey, err := scenario.CreatePreAuthKeyWithTags(userID, false, false, []string{\"tag:valid-owned\"})\n\trequire.NoError(t, err)\n\n\t// Create and register a tailscale client\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t)\n\trequire.NoError(t, err)\n\n\terr = client.Login(headscale.GetEndpoint(), authKey.GetKey())\n\trequire.NoError(t, err)\n\n\t// Wait for registration and get node ID\n\tvar nodeID uint64\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1)\n\n\t\tif len(nodes) == 1 {\n\t\t\tnodeID = nodes[0].GetId()\n\t\t\tt.Logf(\"Node %d registered with tags: %v\", nodeID, nodes[0].GetTags())\n\t\t}\n\t}, 30*time.Second, 500*time.Millisecond, \"waiting for registration\")\n\n\t// Admin sets an \"unowned\" tag - should SUCCEED because admin has full authority\n\t// (tag:valid-unowned is owned by other-user, but admin can assign it)\n\terr = headscale.SetNodeTags(nodeID, []string{\"tag:valid-unowned\"})\n\trequire.NoError(t, err, \"SetNodeTags should succeed for admin setting any existing tag\")\n\n\t// Verify the tag was applied (server-side)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1)\n\n\t\tif len(nodes) == 1 {\n\t\t\tassertNodeHasTagsWithCollect(c, nodes[0], []string{\"tag:valid-unowned\"})\n\t\t}\n\t}, 10*time.Second, 500*time.Millisecond, \"verifying unowned tag was applied on server\")\n\n\t// Verify the tag was propagated to node's self view (issue #2978)\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tassertNodeSelfHasTagsWithCollect(c, client, []string{\"tag:valid-unowned\"})\n\t}, 30*time.Second, 500*time.Millisecond, \"verifying unowned tag propagated to node self\")\n\n\tt.Logf(\"Test 4.2 PASS: Admin API correctly allowed setting unowned tag\")\n}\n\n// TestTagsAdminAPICannotRemoveAllTags tests that the admin API rejects\n// removing all tags from a node (would orphan the node).\n//\n// Test 4.3: Admin cannot remove all tags\n// Setup: Create tagged node, then call SetNodeTags with []\n// Expected: SetNodeTags returns error.\nfunc TestTagsAdminAPICannotRemoveAllTags(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-admin-empty\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\tuserMap, err := headscale.MapUsers()\n\trequire.NoError(t, err)\n\n\tuserID := userMap[tagTestUser].GetId()\n\n\t// Create a tagged PreAuthKey to register a node\n\tauthKey, err := scenario.CreatePreAuthKeyWithTags(userID, false, false, []string{\"tag:valid-owned\"})\n\trequire.NoError(t, err)\n\n\t// Create and register a tailscale client\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t)\n\trequire.NoError(t, err)\n\n\terr = client.Login(headscale.GetEndpoint(), authKey.GetKey())\n\trequire.NoError(t, err)\n\n\t// Wait for registration and get node ID\n\tvar nodeID uint64\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1)\n\n\t\tif len(nodes) == 1 {\n\t\t\tnodeID = nodes[0].GetId()\n\t\t\tt.Logf(\"Node %d registered with tags: %v\", nodeID, nodes[0].GetTags())\n\t\t}\n\t}, 30*time.Second, 500*time.Millisecond, \"waiting for registration\")\n\n\t// Try to remove all tags - should fail\n\terr = headscale.SetNodeTags(nodeID, []string{})\n\n\trequire.Error(t, err, \"SetNodeTags should fail when trying to remove all tags\")\n\tt.Logf(\"Test 4.3 PASS: Admin API correctly rejected removing all tags: %v\", err)\n\n\t// Verify original tags are preserved\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1)\n\n\t\tif len(nodes) == 1 {\n\t\t\tassertNodeHasTagsWithCollect(c, nodes[0], []string{\"tag:valid-owned\"})\n\t\t}\n\t}, 10*time.Second, 500*time.Millisecond, \"verifying original tags preserved\")\n}\n\n// assertNetmapSelfHasTagsWithCollect asserts that the client's netmap self node has expected tags.\n// This validates at a deeper level than status - directly from tailscale debug netmap.\nfunc assertNetmapSelfHasTagsWithCollect(c *assert.CollectT, client TailscaleClient, expectedTags []string) {\n\tnm, err := client.Netmap()\n\t//nolint:testifylint // must use assert with CollectT in EventuallyWithT\n\tassert.NoError(c, err, \"failed to get client netmap\")\n\n\tif nm == nil {\n\t\tassert.Fail(c, \"client netmap is nil\")\n\t\treturn\n\t}\n\n\tvar actualTagsSlice []string\n\n\tif nm.SelfNode.Valid() {\n\t\tfor _, tag := range nm.SelfNode.Tags().All() { //nolint:unqueryvet // not SQLBoiler, tailcfg iterator\n\t\t\tactualTagsSlice = append(actualTagsSlice, tag)\n\t\t}\n\t}\n\n\tsortedActual := append([]string{}, actualTagsSlice...)\n\tsortedExpected := append([]string{}, expectedTags...)\n\n\tsort.Strings(sortedActual)\n\tsort.Strings(sortedExpected)\n\tassert.Equal(c, sortedExpected, sortedActual, \"Client %s netmap self tags mismatch\", client.Hostname())\n}\n\n// TestTagsIssue2978ReproTagReplacement specifically tests issue #2978:\n// When tags are changed on the server, the node's self view should update.\n// This test performs multiple tag replacements and checks for immediate propagation.\n//\n// Issue scenario (from nblock's report):\n// 1. Node registers via CLI auth with --advertise-tags=tag:foo\n// 2. Admin changes tag to tag:bar via headscale CLI/API\n// 3. Node's self view should show tag:bar (not tag:foo).\n//\n// This test uses web auth with --advertise-tags to match the reporter's flow.\nfunc TestTagsIssue2978ReproTagReplacement(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\t// Use CreateHeadscaleEnvWithLoginURL for web auth flow\n\terr = scenario.CreateHeadscaleEnvWithLoginURL(\n\t\t[]tsic.Option{\n\t\t\ttsic.WithExtraLoginArgs([]string{\"--advertise-tags=tag:valid-owned\"}),\n\t\t},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-issue-2978\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\t// Create a tailscale client with --advertise-tags (matching nblock's \"cli auth with --advertise-tags=tag:foo\")\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t\ttsic.WithExtraLoginArgs([]string{\"--advertise-tags=tag:valid-owned\"}),\n\t)\n\trequire.NoError(t, err)\n\n\t// Login via web auth flow (this is \"cli auth\" - tailscale up triggers web auth)\n\tloginURL, err := client.LoginWithURL(headscale.GetEndpoint())\n\trequire.NoError(t, err)\n\n\t// Complete the web auth by visiting the login URL\n\tbody, err := doLoginURL(client.Hostname(), loginURL)\n\trequire.NoError(t, err)\n\n\t// Register the node via headscale CLI\n\terr = scenario.runHeadscaleRegister(tagTestUser, body)\n\trequire.NoError(t, err)\n\n\t// Wait for client to be running\n\terr = client.WaitForRunning(120 * time.Second)\n\trequire.NoError(t, err)\n\n\t// Wait for initial registration with tag:valid-owned\n\tvar nodeID uint64\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1)\n\n\t\tif len(nodes) == 1 {\n\t\t\tnodeID = nodes[0].GetId()\n\t\t\tassertNodeHasTagsWithCollect(c, nodes[0], []string{\"tag:valid-owned\"})\n\t\t}\n\t}, 30*time.Second, 500*time.Millisecond, \"waiting for initial registration\")\n\n\t// Verify client initially sees tag:valid-owned\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tassertNodeSelfHasTagsWithCollect(c, client, []string{\"tag:valid-owned\"})\n\t}, 30*time.Second, 500*time.Millisecond, \"client should see initial tag\")\n\n\tt.Logf(\"Step 1: Node %d registered via web auth with --advertise-tags=tag:valid-owned, client sees it\", nodeID)\n\n\t// Step 2: Admin changes tag to tag:second (FIRST CALL - this is \"tag:bar\" in issue terms)\n\t// According to issue #2978, the first SetNodeTags call updates the server but\n\t// the client's self view does NOT update until a SECOND call with the same tag.\n\tt.Log(\"Step 2: Calling SetNodeTags FIRST time with tag:second\")\n\n\terr = headscale.SetNodeTags(nodeID, []string{\"tag:second\"})\n\trequire.NoError(t, err)\n\n\t// Verify server-side update happened\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\n\t\tif len(nodes) == 1 {\n\t\t\tassertNodeHasTagsWithCollect(c, nodes[0], []string{\"tag:second\"})\n\t\t}\n\t}, 10*time.Second, 500*time.Millisecond, \"server should show tag:second after first call\")\n\n\tt.Log(\"Step 2a: Server shows tag:second after first call\")\n\n\t// CRITICAL BUG CHECK: According to nblock, after the first SetNodeTags call,\n\t// the client's self view does NOT update even after waiting ~1 minute.\n\t// We wait 10 seconds and check - if the client STILL shows the OLD tag,\n\t// that demonstrates the bug. If the client shows the NEW tag, the bug is fixed.\n\tt.Log(\"Step 2b: Waiting 10 seconds to see if client self view updates (bug: it should NOT)\")\n\t//nolint:forbidigo // intentional sleep to demonstrate bug timing - client should get update immediately, not after waiting\n\ttime.Sleep(10 * time.Second)\n\n\t// Check client status after waiting\n\tstatus, err := client.Status()\n\trequire.NoError(t, err)\n\n\tvar selfTagsAfterFirstCall []string\n\n\tif status.Self != nil && status.Self.Tags != nil {\n\t\tfor _, tag := range status.Self.Tags.All() {\n\t\t\tselfTagsAfterFirstCall = append(selfTagsAfterFirstCall, tag)\n\t\t}\n\t}\n\n\tt.Logf(\"Step 2c: Client self tags after FIRST SetNodeTags + 10s wait: %v\", selfTagsAfterFirstCall)\n\n\t// Also check netmap\n\tnm, nmErr := client.Netmap()\n\n\tvar netmapTagsAfterFirstCall []string\n\n\tif nmErr == nil && nm != nil && nm.SelfNode.Valid() {\n\t\tfor _, tag := range nm.SelfNode.Tags().All() { //nolint:unqueryvet // not SQLBoiler, tailcfg iterator\n\t\t\tnetmapTagsAfterFirstCall = append(netmapTagsAfterFirstCall, tag)\n\t\t}\n\t}\n\n\tt.Logf(\"Step 2d: Client netmap self tags after FIRST SetNodeTags + 10s wait: %v\", netmapTagsAfterFirstCall)\n\n\t// Step 3: Call SetNodeTags AGAIN with the SAME tag (SECOND CALL)\n\t// According to nblock, this second call with the same tag triggers the update.\n\tt.Log(\"Step 3: Calling SetNodeTags SECOND time with SAME tag:second\")\n\n\terr = headscale.SetNodeTags(nodeID, []string{\"tag:second\"})\n\trequire.NoError(t, err)\n\n\t// Now the client should see the update quickly (within a few seconds)\n\tt.Log(\"Step 3a: Verifying client self view updates after SECOND call\")\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tassertNodeSelfHasTagsWithCollect(c, client, []string{\"tag:second\"})\n\t}, 10*time.Second, 500*time.Millisecond, \"client status.Self should update to tag:second after SECOND call\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tassertNetmapSelfHasTagsWithCollect(c, client, []string{\"tag:second\"})\n\t}, 10*time.Second, 500*time.Millisecond, \"client netmap.SelfNode should update to tag:second after SECOND call\")\n\n\tt.Log(\"Step 3b: Client self view updated to tag:second after SECOND call\")\n\n\t// Step 4: Do another tag change to verify the pattern repeats\n\tt.Log(\"Step 4: Calling SetNodeTags FIRST time with tag:valid-unowned\")\n\n\terr = headscale.SetNodeTags(nodeID, []string{\"tag:valid-unowned\"})\n\trequire.NoError(t, err)\n\n\t// Verify server-side update\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\n\t\tif len(nodes) == 1 {\n\t\t\tassertNodeHasTagsWithCollect(c, nodes[0], []string{\"tag:valid-unowned\"})\n\t\t}\n\t}, 10*time.Second, 500*time.Millisecond, \"server should show tag:valid-unowned\")\n\n\tt.Log(\"Step 4a: Server shows tag:valid-unowned after first call\")\n\n\t// Wait and check - bug means client still shows old tag\n\tt.Log(\"Step 4b: Waiting 10 seconds to see if client self view updates (bug: it should NOT)\")\n\t//nolint:forbidigo // intentional sleep to demonstrate bug timing - client should get update immediately, not after waiting\n\ttime.Sleep(10 * time.Second)\n\n\tstatus, err = client.Status()\n\trequire.NoError(t, err)\n\n\tvar selfTagsAfterSecondChange []string\n\n\tif status.Self != nil && status.Self.Tags != nil {\n\t\tfor _, tag := range status.Self.Tags.All() {\n\t\t\tselfTagsAfterSecondChange = append(selfTagsAfterSecondChange, tag)\n\t\t}\n\t}\n\n\tt.Logf(\"Step 4c: Client self tags after FIRST SetNodeTags(tag:valid-unowned) + 10s wait: %v\", selfTagsAfterSecondChange)\n\n\t// Step 5: Call SetNodeTags AGAIN with the SAME tag\n\tt.Log(\"Step 5: Calling SetNodeTags SECOND time with SAME tag:valid-unowned\")\n\n\terr = headscale.SetNodeTags(nodeID, []string{\"tag:valid-unowned\"})\n\trequire.NoError(t, err)\n\n\t// Now the client should see the update quickly\n\tt.Log(\"Step 5a: Verifying client self view updates after SECOND call\")\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tassertNodeSelfHasTagsWithCollect(c, client, []string{\"tag:valid-unowned\"})\n\t}, 10*time.Second, 500*time.Millisecond, \"client status.Self should update to tag:valid-unowned after SECOND call\")\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tassertNetmapSelfHasTagsWithCollect(c, client, []string{\"tag:valid-unowned\"})\n\t}, 10*time.Second, 500*time.Millisecond, \"client netmap.SelfNode should update to tag:valid-unowned after SECOND call\")\n\n\tt.Log(\"Test complete - see logs for bug reproduction details\")\n}\n\n// TestTagsAdminAPICannotSetInvalidFormat tests that the admin API rejects\n// tags that don't have the correct format (must start with \"tag:\").\n//\n// Test 4.4: Admin cannot set invalid format tag\n// Setup: Create node, then call SetNodeTags with [\"invalid-no-prefix\"]\n// Expected: SetNodeTags returns error.\nfunc TestTagsAdminAPICannotSetInvalidFormat(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-admin-invalid\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\tuserMap, err := headscale.MapUsers()\n\trequire.NoError(t, err)\n\n\tuserID := userMap[tagTestUser].GetId()\n\n\t// Create a tagged PreAuthKey to register a node\n\tauthKey, err := scenario.CreatePreAuthKeyWithTags(userID, false, false, []string{\"tag:valid-owned\"})\n\trequire.NoError(t, err)\n\n\t// Create and register a tailscale client\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t)\n\trequire.NoError(t, err)\n\n\terr = client.Login(headscale.GetEndpoint(), authKey.GetKey())\n\trequire.NoError(t, err)\n\n\t// Wait for registration and get node ID\n\tvar nodeID uint64\n\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1)\n\n\t\tif len(nodes) == 1 {\n\t\t\tnodeID = nodes[0].GetId()\n\t\t\tt.Logf(\"Node %d registered with tags: %v\", nodeID, nodes[0].GetTags())\n\t\t}\n\t}, 30*time.Second, 500*time.Millisecond, \"waiting for registration\")\n\n\t// Try to set a tag without the \"tag:\" prefix - should fail\n\terr = headscale.SetNodeTags(nodeID, []string{\"invalid-no-prefix\"})\n\n\trequire.Error(t, err, \"SetNodeTags should fail for invalid tag format\")\n\tt.Logf(\"Test 4.4 PASS: Admin API correctly rejected invalid tag format: %v\", err)\n\n\t// Verify original tags are preserved\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1)\n\n\t\tif len(nodes) == 1 {\n\t\t\tassertNodeHasTagsWithCollect(c, nodes[0], []string{\"tag:valid-owned\"})\n\t\t}\n\t}, 10*time.Second, 500*time.Millisecond, \"verifying original tags preserved\")\n}\n\n// =============================================================================\n// Test for Issue #2979: Reauth to untag a device\n// =============================================================================\n\n// TestTagsUserLoginReauthWithEmptyTagsRemovesAllTags tests that reauthenticating\n// with an empty tag list (--advertise-tags= --force-reauth) removes all tags\n// and returns ownership to the user.\n//\n// Bug #2979: Reauth to untag a device keeps it tagged\n// Setup: Register a node with tags via user login, then reauth with --advertise-tags= --force-reauth\n// Expected: Node should have no tags and ownership should return to the user.\n//\n// Note: This only works with --force-reauth because without it, the Tailscale\n// client doesn't trigger a full reauth to the server - it only updates local state.\nfunc TestTagsUserLoginReauthWithEmptyTagsRemovesAllTags(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tt.Run(\"with force-reauth\", func(t *testing.T) {\n\t\ttc := struct {\n\t\t\tname        string\n\t\t\ttestName    string\n\t\t\tforceReauth bool\n\t\t}{\n\t\t\tname:        \"with force-reauth\",\n\t\t\ttestName:    \"with-force-reauth\",\n\t\t\tforceReauth: true,\n\t\t}\n\t\tpolicy := tagsTestPolicy()\n\n\t\tspec := ScenarioSpec{\n\t\t\tNodesPerUser: 0,\n\t\t\tUsers:        []string{tagTestUser},\n\t\t}\n\n\t\tscenario, err := NewScenario(spec)\n\n\t\trequire.NoError(t, err)\n\t\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\t\terr = scenario.CreateHeadscaleEnvWithLoginURL(\n\t\t\t[]tsic.Option{},\n\t\t\thsic.WithACLPolicy(policy),\n\t\t\thsic.WithTestName(\"tags-reauth-untag-2979-\"+tc.testName),\n\t\t)\n\t\trequireNoErrHeadscaleEnv(t, err)\n\n\t\theadscale, err := scenario.Headscale()\n\t\trequireNoErrGetHeadscale(t, err)\n\n\t\t// Step 1: Create and register a node with tags\n\t\tt.Logf(\"Step 1: Registering node with tags\")\n\n\t\tclient, err := scenario.CreateTailscaleNode(\n\t\t\t\"head\",\n\t\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t\t\ttsic.WithExtraLoginArgs([]string{\"--advertise-tags=tag:valid-owned,tag:second\"}),\n\t\t)\n\t\trequire.NoError(t, err)\n\n\t\tloginURL, err := client.LoginWithURL(headscale.GetEndpoint())\n\t\trequire.NoError(t, err)\n\n\t\tbody, err := doLoginURL(client.Hostname(), loginURL)\n\t\trequire.NoError(t, err)\n\n\t\terr = scenario.runHeadscaleRegister(tagTestUser, body)\n\t\trequire.NoError(t, err)\n\n\t\terr = client.WaitForRunning(120 * time.Second)\n\t\trequire.NoError(t, err)\n\n\t\t// Verify initial tags\n\t\tvar initialNodeID uint64\n\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\tnodes, err := headscale.ListNodes()\n\t\t\tassert.NoError(c, err)\n\t\t\tassert.Len(c, nodes, 1, \"Expected exactly one node\")\n\n\t\t\tif len(nodes) == 1 {\n\t\t\t\tnode := nodes[0]\n\t\t\t\tinitialNodeID = node.GetId()\n\t\t\t\tt.Logf(\"Initial state - Node ID: %d, Tags: %v, User: %s\",\n\t\t\t\t\tnode.GetId(), node.GetTags(), node.GetUser().GetName())\n\n\t\t\t\t// Verify node has the expected tags\n\t\t\t\tassertNodeHasTagsWithCollect(c, node, []string{\"tag:valid-owned\", \"tag:second\"})\n\t\t\t}\n\t\t}, 30*time.Second, 500*time.Millisecond, \"checking initial tags\")\n\n\t\t// Step 2: Reauth with empty tags to remove all tags\n\t\tt.Logf(\"Step 2: Reauthenticating with empty tag list to untag device (%s)\", tc.name)\n\n\t\tif tc.forceReauth {\n\t\t\t// Manually run tailscale up with --force-reauth and empty tags\n\t\t\t// This will output a login URL that we need to complete\n\t\t\t// Include --hostname to match the initial login command\n\t\t\tcommand := []string{\n\t\t\t\t\"tailscale\", \"up\",\n\t\t\t\t\"--login-server=\" + headscale.GetEndpoint(),\n\t\t\t\t\"--hostname=\" + client.Hostname(),\n\t\t\t\t\"--advertise-tags=\",\n\t\t\t\t\"--force-reauth\",\n\t\t\t}\n\n\t\t\tstdout, stderr, _ := client.Execute(command)\n\t\t\tt.Logf(\"Reauth command stderr: %s\", stderr)\n\n\t\t\t// Parse the login URL from the command output\n\t\t\tloginURL, err := util.ParseLoginURLFromCLILogin(stdout + stderr)\n\t\t\trequire.NoError(t, err, \"Failed to parse login URL from reauth command\")\n\t\t\tt.Logf(\"Reauth login URL: %s\", loginURL)\n\n\t\t\tbody, err := doLoginURL(client.Hostname(), loginURL)\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = scenario.runHeadscaleRegister(tagTestUser, body)\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = client.WaitForRunning(120 * time.Second)\n\t\t\trequire.NoError(t, err)\n\t\t\tt.Logf(\"Completed reauth with empty tags\")\n\t\t} else {\n\t\t\t// Without force-reauth, just try tailscale up\n\t\t\t// Include --hostname to match the initial login command\n\t\t\tcommand := []string{\n\t\t\t\t\"tailscale\", \"up\",\n\t\t\t\t\"--login-server=\" + headscale.GetEndpoint(),\n\t\t\t\t\"--hostname=\" + client.Hostname(),\n\t\t\t\t\"--advertise-tags=\",\n\t\t\t}\n\t\t\tstdout, stderr, err := client.Execute(command)\n\t\t\tt.Logf(\"CLI reauth result: err=%v, stdout=%s, stderr=%s\", err, stdout, stderr)\n\t\t}\n\n\t\t// Step 3: Verify tags are removed and ownership is returned to user\n\t\t// This is the key assertion for bug #2979\n\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\tnodes, err := headscale.ListNodes()\n\t\t\tassert.NoError(c, err)\n\n\t\t\tif len(nodes) >= 1 {\n\t\t\t\tnode := nodes[0]\n\t\t\t\tt.Logf(\"After reauth - Node ID: %d, Tags: %v, User: %s\",\n\t\t\t\t\tnode.GetId(), node.GetTags(), node.GetUser().GetName())\n\n\t\t\t\t// Assert: Node should have NO tags\n\t\t\t\tassertNodeHasNoTagsWithCollect(c, node)\n\n\t\t\t\t// Assert: Node should be owned by the user (not tagged-devices)\n\t\t\t\tassert.Equal(c, tagTestUser, node.GetUser().GetName(),\n\t\t\t\t\t\"Node ownership should return to user %s after untagging\", tagTestUser)\n\n\t\t\t\t// Verify the node ID is still the same (not a new registration)\n\t\t\t\tassert.Equal(c, initialNodeID, node.GetId(),\n\t\t\t\t\t\"Node ID should remain the same after reauth\")\n\n\t\t\t\tif len(node.GetTags()) == 0 && node.GetUser().GetName() == tagTestUser {\n\t\t\t\t\tt.Logf(\"Test #2979 (%s) PASS: Node successfully untagged and ownership returned to user\", tc.name)\n\t\t\t\t} else {\n\t\t\t\t\tt.Logf(\"Test #2979 (%s) FAIL: Expected no tags and user=%s, got tags=%v user=%s\",\n\t\t\t\t\t\ttc.name, tagTestUser, node.GetTags(), node.GetUser().GetName())\n\t\t\t\t}\n\t\t\t}\n\t\t}, 60*time.Second, 1*time.Second, \"verifying tags removed and ownership returned\")\n\t})\n}\n\n// =============================================================================\n// Test Suite 5: Auth Key WITHOUT User (Tags-Only Ownership)\n// =============================================================================\n\n// TestTagsAuthKeyWithoutUserInheritsTags tests that when an auth key without a user\n// (tags-only) is used without --advertise-tags, the node inherits the key's tags.\n//\n// Test 5.1: Auth key without user, no --advertise-tags flag\n// Setup: Run `tailscale up --auth-key AUTH_KEY_WITH_TAGS_NO_USER`\n// Expected: Node registers with the tags from the auth key.\nfunc TestTagsAuthKeyWithoutUserInheritsTags(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-authkey-no-user-inherit\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\t// Create an auth key with tags but WITHOUT a user\n\tauthKey, err := scenario.CreatePreAuthKeyWithOptions(hsic.AuthKeyOptions{\n\t\tUser:      nil,\n\t\tReusable:  false,\n\t\tEphemeral: false,\n\t\tTags:      []string{\"tag:valid-owned\"},\n\t})\n\trequire.NoError(t, err)\n\tt.Logf(\"Created tags-only PreAuthKey with tags: %v\", authKey.GetAclTags())\n\n\t// Create a tailscale client WITHOUT --advertise-tags\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t\t// Note: NO WithExtraLoginArgs for --advertise-tags\n\t)\n\trequire.NoError(t, err)\n\n\t// Login with the tags-only auth key\n\terr = client.Login(headscale.GetEndpoint(), authKey.GetKey())\n\trequire.NoError(t, err)\n\n\t// Wait for node to be registered and verify it has the key's tags\n\t// Note: Tags-only nodes don't have a user, so we list all nodes\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1, \"Should have exactly 1 node\")\n\n\t\tif len(nodes) == 1 {\n\t\t\tnode := nodes[0]\n\t\t\tt.Logf(\"Node registered with tags: %v\", node.GetTags())\n\t\t\tassertNodeHasTagsWithCollect(c, node, []string{\"tag:valid-owned\"})\n\t\t}\n\t}, 30*time.Second, 500*time.Millisecond, \"verifying node inherited tags from auth key\")\n\n\tt.Logf(\"Test 5.1 PASS: Node inherited tags from tags-only auth key\")\n}\n\n// TestTagsAuthKeyWithoutUserRejectsAdvertisedTags tests that when an auth key without\n// a user (tags-only) is used WITH --advertise-tags, the registration is rejected.\n// PreAuthKey registrations do not allow client-requested tags.\n//\n// Test 5.2: Auth key without user, with --advertise-tags (should be rejected)\n// Setup: Run `tailscale up --advertise-tags=\"tag:second\" --auth-key AUTH_KEY_WITH_TAGS_NO_USER`\n// Expected: Registration fails with error containing \"requested tags\".\nfunc TestTagsAuthKeyWithoutUserRejectsAdvertisedTags(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnv(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-authkey-no-user-reject-advertise\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\t// Create an auth key with tags but WITHOUT a user\n\tauthKey, err := scenario.CreatePreAuthKeyWithOptions(hsic.AuthKeyOptions{\n\t\tUser:      nil,\n\t\tReusable:  false,\n\t\tEphemeral: false,\n\t\tTags:      []string{\"tag:valid-owned\"},\n\t})\n\trequire.NoError(t, err)\n\tt.Logf(\"Created tags-only PreAuthKey with tags: %v\", authKey.GetAclTags())\n\n\t// Create a tailscale client WITH --advertise-tags for a DIFFERENT tag\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t\ttsic.WithExtraLoginArgs([]string{\"--advertise-tags=tag:second\"}),\n\t)\n\trequire.NoError(t, err)\n\n\t// Login should fail because ANY advertise-tags is rejected for PreAuthKey registrations\n\terr = client.Login(headscale.GetEndpoint(), authKey.GetKey())\n\tif err != nil {\n\t\tt.Logf(\"Test 5.2 PASS: Registration correctly rejected with error: %v\", err)\n\t\tassert.ErrorContains(t, err, \"requested tags\")\n\t} else {\n\t\tt.Logf(\"Test 5.2 UNEXPECTED: Registration succeeded when it should have failed\")\n\t\tt.Fail()\n\t}\n}\n\n// =============================================================================\n// Test Suite 6: Tagged→User Conversion via CLI Register (#3038)\n// =============================================================================\n\n// TestTagsAuthKeyConvertToUserViaCLIRegister reproduces the panic from\n// issue #3038: register a node with a tags-only preauthkey (no user), then\n// convert it to a user-owned node via \"headscale auth register --auth-id <id> --user <user>\".\n// The crash happens in the mapper's generateUserProfiles when node.User is nil\n// after the tag→user conversion in processReauthTags.\n//\n// The key detail is using a tags-only PreAuthKey (User: nil). When created under\n// a user, the node inherits User from the PreAuthKey and the bug is masked.\nfunc TestTagsAuthKeyConvertToUserViaCLIRegister(t *testing.T) {\n\tIntegrationSkip(t)\n\n\tpolicy := tagsTestPolicy()\n\n\tspec := ScenarioSpec{\n\t\tNodesPerUser: 0,\n\t\tUsers:        []string{tagTestUser},\n\t}\n\n\tscenario, err := NewScenario(spec)\n\n\trequire.NoError(t, err)\n\tdefer scenario.ShutdownAssertNoPanics(t)\n\n\terr = scenario.CreateHeadscaleEnvWithLoginURL(\n\t\t[]tsic.Option{},\n\t\thsic.WithACLPolicy(policy),\n\t\thsic.WithTestName(\"tags-authkey-to-user-cli-3038\"),\n\t)\n\trequireNoErrHeadscaleEnv(t, err)\n\n\theadscale, err := scenario.Headscale()\n\trequireNoErrGetHeadscale(t, err)\n\n\t// Step 1: Create a tags-only preauthkey WITHOUT a user.\n\t// This is the critical detail: when PreAuthKey.UserID is nil, the node\n\t// enters the NodeStore with node.User == nil. The processReauthTags\n\t// conversion then sets UserID but not User, leaving it nil for the mapper.\n\tauthKey, err := scenario.CreatePreAuthKeyWithOptions(hsic.AuthKeyOptions{\n\t\tUser:      nil,\n\t\tReusable:  false,\n\t\tEphemeral: false,\n\t\tTags:      []string{\"tag:valid-owned\"},\n\t})\n\trequire.NoError(t, err)\n\tt.Logf(\"Created tags-only PreAuthKey (no user) with tags: %v\", authKey.GetAclTags())\n\n\tclient, err := scenario.CreateTailscaleNode(\n\t\t\"head\",\n\t\ttsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),\n\t)\n\trequire.NoError(t, err)\n\n\terr = client.Login(headscale.GetEndpoint(), authKey.GetKey())\n\trequire.NoError(t, err)\n\n\terr = client.WaitForRunning(120 * time.Second)\n\trequire.NoError(t, err)\n\n\t// Verify initial state: node is tagged\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1)\n\n\t\tif len(nodes) == 1 {\n\t\t\tassertNodeHasTagsWithCollect(c, nodes[0], []string{\"tag:valid-owned\"})\n\t\t\tt.Logf(\"Initial state - Node ID: %d, Tags: %v\", nodes[0].GetId(), nodes[0].GetTags())\n\t\t}\n\t}, 30*time.Second, 500*time.Millisecond, \"node should be tagged initially\")\n\n\t// Step 2: Force reauth with empty tags (triggers web auth flow)\n\tcommand := []string{\n\t\t\"tailscale\", \"up\",\n\t\t\"--login-server=\" + headscale.GetEndpoint(),\n\t\t\"--hostname=\" + client.Hostname(),\n\t\t\"--advertise-tags=\",\n\t\t\"--force-reauth\",\n\t}\n\n\tstdout, stderr, _ := client.Execute(command)\n\tt.Logf(\"Reauth command output: stdout=%s stderr=%s\", stdout, stderr)\n\n\tloginURL, err := util.ParseLoginURLFromCLILogin(stdout + stderr)\n\trequire.NoError(t, err, \"Failed to parse login URL from reauth command\")\n\n\tbody, err := doLoginURL(client.Hostname(), loginURL)\n\trequire.NoError(t, err)\n\n\t// Step 3: Register via CLI with user (this is the exact step that triggers the panic)\n\terr = scenario.runHeadscaleRegister(tagTestUser, body)\n\trequire.NoError(t, err)\n\n\terr = client.WaitForRunning(120 * time.Second)\n\trequire.NoError(t, err)\n\n\t// Step 4: Verify node is now user-owned and the mapper didn't panic.\n\t// The panic would occur when the mapper builds the MapResponse and calls\n\t// node.Owner().Model().ID with a nil User pointer.\n\t// ShutdownAssertNoPanics in the defer catches any panics in headscale logs.\n\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\tnodes, err := headscale.ListNodes()\n\t\tassert.NoError(c, err)\n\t\tassert.Len(c, nodes, 1)\n\n\t\tif len(nodes) == 1 {\n\t\t\tassertNodeHasNoTagsWithCollect(c, nodes[0])\n\t\t\tassert.Equal(c, tagTestUser, nodes[0].GetUser().GetName(),\n\t\t\t\t\"Node ownership should be returned to user after untagging\")\n\t\t\tt.Logf(\"After conversion - Node ID: %d, Tags: %v, User: %s\",\n\t\t\t\tnodes[0].GetId(), nodes[0].GetTags(), nodes[0].GetUser().GetName())\n\t\t}\n\t}, 60*time.Second, 1*time.Second, \"node should be user-owned after conversion via CLI register\")\n}\n"
  },
  {
    "path": "integration/tailscale.go",
    "content": "package integration\n\nimport (\n\t\"io\"\n\t\"net/netip\"\n\t\"net/url\"\n\t\"time\"\n\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/juanfont/headscale/integration/dockertestutil\"\n\t\"github.com/juanfont/headscale/integration/tsic\"\n\t\"tailscale.com/ipn/ipnstate\"\n\t\"tailscale.com/net/netcheck\"\n\t\"tailscale.com/types/key\"\n\t\"tailscale.com/types/netmap\"\n\t\"tailscale.com/wgengine/filter\"\n)\n\n// nolint\ntype TailscaleClient interface {\n\tHostname() string\n\tShutdown() (string, string, error)\n\tVersion() string\n\tExecute(\n\t\tcommand []string,\n\t\toptions ...dockertestutil.ExecuteCommandOption,\n\t) (string, string, error)\n\tLogin(loginServer, authKey string) error\n\tLoginWithURL(loginServer string) (*url.URL, error)\n\tLogout() error\n\tRestart() error\n\tUp() error\n\tDown() error\n\tIPs() ([]netip.Addr, error)\n\tMustIPs() []netip.Addr\n\tIPv4() (netip.Addr, error)\n\tMustIPv4() netip.Addr\n\tMustIPv6() netip.Addr\n\tFQDN() (string, error)\n\tMustFQDN() string\n\tStatus(...bool) (*ipnstate.Status, error)\n\tMustStatus() *ipnstate.Status\n\tNetmap() (*netmap.NetworkMap, error)\n\tDebugDERPRegion(region string) (*ipnstate.DebugDERPRegionReport, error)\n\tGetNodePrivateKey() (*key.NodePrivate, error)\n\tNetcheck() (*netcheck.Report, error)\n\tWaitForNeedsLogin(timeout time.Duration) error\n\tWaitForRunning(timeout time.Duration) error\n\tWaitForPeers(expected int, timeout, retryInterval time.Duration) error\n\tPing(hostnameOrIP string, opts ...tsic.PingOption) error\n\tCurl(url string, opts ...tsic.CurlOption) (string, error)\n\tCurlFailFast(url string) (string, error)\n\tTraceroute(netip.Addr) (util.Traceroute, error)\n\tContainerID() string\n\tMustID() types.NodeID\n\tReadFile(path string) ([]byte, error)\n\tPacketFilter() ([]filter.Match, error)\n\n\t// FailingPeersAsString returns a formatted-ish multi-line-string of peers in the client\n\t// and a bool indicating if the clients online count and peer count is equal.\n\tFailingPeersAsString() (string, bool, error)\n\n\tWriteLogs(stdout, stderr io.Writer) error\n}\n"
  },
  {
    "path": "integration/tsic/tsic.go",
    "content": "package tsic\n\nimport (\n\t\"archive/tar\"\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net/netip\"\n\t\"net/url\"\n\t\"os\"\n\t\"reflect\"\n\t\"runtime/debug\"\n\t\"slices\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/cenkalti/backoff/v5\"\n\t\"github.com/juanfont/headscale/hscontrol/types\"\n\t\"github.com/juanfont/headscale/hscontrol/util\"\n\t\"github.com/juanfont/headscale/integration/dockertestutil\"\n\t\"github.com/juanfont/headscale/integration/integrationutil\"\n\t\"github.com/ory/dockertest/v3\"\n\t\"github.com/ory/dockertest/v3/docker\"\n\t\"tailscale.com/ipn\"\n\t\"tailscale.com/ipn/ipnstate\"\n\t\"tailscale.com/ipn/store/mem\"\n\t\"tailscale.com/net/netcheck\"\n\t\"tailscale.com/paths\"\n\t\"tailscale.com/types/key\"\n\t\"tailscale.com/types/netmap\"\n\t\"tailscale.com/util/multierr\"\n\t\"tailscale.com/wgengine/filter\"\n)\n\nconst (\n\ttsicHashLength       = 6\n\tdefaultPingTimeout   = 200 * time.Millisecond\n\tdefaultPingCount     = 5\n\tdockerContextPath    = \"../.\"\n\tcaCertRoot           = \"/usr/local/share/ca-certificates\"\n\tdockerExecuteTimeout = 60 * time.Second\n)\n\nvar (\n\terrTailscalePingFailed             = errors.New(\"ping failed\")\n\terrTailscalePingNotDERP            = errors.New(\"ping not via DERP\")\n\terrTailscaleNotLoggedIn            = errors.New(\"tailscale not logged in\")\n\terrTailscaleWrongPeerCount         = errors.New(\"wrong peer count\")\n\terrTailscaleCannotUpWithoutAuthkey = errors.New(\"cannot up without authkey\")\n\terrInvalidClientConfig             = errors.New(\"verifiably invalid client config requested\")\n\terrInvalidTailscaleImageFormat     = errors.New(\"invalid HEADSCALE_INTEGRATION_TAILSCALE_IMAGE format, expected repository:tag\")\n\terrTailscaleImageRequiredInCI      = errors.New(\"HEADSCALE_INTEGRATION_TAILSCALE_IMAGE must be set in CI for HEAD version\")\n\terrContainerNotInitialized         = errors.New(\"container not initialized\")\n\terrFQDNNotYetAvailable             = errors.New(\"FQDN not yet available\")\n)\n\nconst (\n\tVersionHead = \"head\"\n)\n\nfunc errTailscaleStatus(hostname string, err error) error {\n\treturn fmt.Errorf(\"%s failed to fetch tailscale status: %w\", hostname, err)\n}\n\n// TailscaleInContainer is an implementation of TailscaleClient which\n// sets up a Tailscale instance inside a container.\ntype TailscaleInContainer struct {\n\tversion  string\n\thostname string\n\n\tpool      *dockertest.Pool\n\tcontainer *dockertest.Resource\n\tnetwork   *dockertest.Network\n\n\t// \"cache\"\n\tips  []netip.Addr\n\tfqdn string\n\n\t// optional config\n\tcaCerts           [][]byte\n\theadscaleHostname string\n\twithWebsocketDERP bool\n\twithSSH           bool\n\twithTags          []string\n\twithEntrypoint    []string\n\twithExtraHosts    []string\n\tworkdir           string\n\tnetfilter         string\n\textraLoginArgs    []string\n\twithAcceptRoutes  bool\n\twithPackages      []string // Alpine packages to install at container start\n\twithWebserverPort int      // Port for built-in HTTP server (0 = disabled)\n\twithExtraCommands []string // Extra shell commands to run before tailscaled\n\n\t// build options, solely for HEAD\n\tbuildConfig TailscaleInContainerBuildConfig\n}\n\ntype TailscaleInContainerBuildConfig struct {\n\ttags []string\n}\n\n// Option represent optional settings that can be given to a\n// Tailscale instance.\ntype Option = func(c *TailscaleInContainer)\n\n// WithCACert adds it to the trusted surtificate of the Tailscale container.\nfunc WithCACert(cert []byte) Option {\n\treturn func(tsic *TailscaleInContainer) {\n\t\ttsic.caCerts = append(tsic.caCerts, cert)\n\t}\n}\n\n// WithNetwork sets the Docker container network to use with\n// the Tailscale instance.\nfunc WithNetwork(network *dockertest.Network) Option {\n\treturn func(tsic *TailscaleInContainer) {\n\t\ttsic.network = network\n\t}\n}\n\n// WithHeadscaleName set the name of the headscale instance,\n// mostly useful in combination with TLS and WithCACert.\nfunc WithHeadscaleName(hsName string) Option {\n\treturn func(tsic *TailscaleInContainer) {\n\t\ttsic.headscaleHostname = hsName\n\t}\n}\n\n// WithTags associates the given tags to the Tailscale instance.\nfunc WithTags(tags []string) Option {\n\treturn func(tsic *TailscaleInContainer) {\n\t\ttsic.withTags = tags\n\t}\n}\n\n// WithWebsocketDERP toggles a development knob to\n// force enable DERP connection through the new websocket protocol.\nfunc WithWebsocketDERP(enabled bool) Option {\n\treturn func(tsic *TailscaleInContainer) {\n\t\ttsic.withWebsocketDERP = enabled\n\t}\n}\n\n// WithSSH enables SSH for the Tailscale instance.\nfunc WithSSH() Option {\n\treturn func(tsic *TailscaleInContainer) {\n\t\ttsic.withSSH = true\n\t}\n}\n\n// WithDockerWorkdir allows the docker working directory to be set.\nfunc WithDockerWorkdir(dir string) Option {\n\treturn func(tsic *TailscaleInContainer) {\n\t\ttsic.workdir = dir\n\t}\n}\n\nfunc WithExtraHosts(hosts []string) Option {\n\treturn func(tsic *TailscaleInContainer) {\n\t\ttsic.withExtraHosts = hosts\n\t}\n}\n\n// WithDockerEntrypoint allows the docker entrypoint of the container\n// to be overridden. This is a dangerous option which can make\n// the container not work as intended as a typo might prevent\n// tailscaled and other processes from starting.\n// Use with caution.\nfunc WithDockerEntrypoint(args []string) Option {\n\treturn func(tsic *TailscaleInContainer) {\n\t\ttsic.withEntrypoint = args\n\t}\n}\n\n// WithNetfilter configures Tailscales parameter --netfilter-mode\n// allowing us to turn of modifying ip[6]tables/nftables.\n// It takes: \"on\", \"off\", \"nodivert\".\nfunc WithNetfilter(state string) Option {\n\treturn func(tsic *TailscaleInContainer) {\n\t\ttsic.netfilter = state\n\t}\n}\n\n// WithBuildTag adds an additional value to the `-tags=` parameter\n// of the Go compiler, allowing callers to customize the Tailscale client build.\n// This option is only meaningful when invoked on **HEAD** versions of the client.\n// Attempts to use it with any other version is a bug in the calling code.\nfunc WithBuildTag(tag string) Option {\n\treturn func(tsic *TailscaleInContainer) {\n\t\tif tsic.version != VersionHead {\n\t\t\tpanic(errInvalidClientConfig)\n\t\t}\n\n\t\ttsic.buildConfig.tags = append(\n\t\t\ttsic.buildConfig.tags, tag,\n\t\t)\n\t}\n}\n\n// WithExtraLoginArgs adds additional arguments to the `tailscale up` command\n// as part of the Login function.\nfunc WithExtraLoginArgs(args []string) Option {\n\treturn func(tsic *TailscaleInContainer) {\n\t\ttsic.extraLoginArgs = append(tsic.extraLoginArgs, args...)\n\t}\n}\n\n// WithAcceptRoutes tells the node to accept incoming routes.\nfunc WithAcceptRoutes() Option {\n\treturn func(tsic *TailscaleInContainer) {\n\t\ttsic.withAcceptRoutes = true\n\t}\n}\n\n// WithPackages specifies Alpine packages to install when the container starts.\n// This requires internet access and uses `apk add`. Common packages:\n// - \"python3\" for HTTP server\n// - \"curl\" for HTTP client\n// - \"bind-tools\" for dig command\n// - \"iptables\", \"ip6tables\" for firewall rules\n// Note: Tests using this option require internet access and cannot use\n// the built-in DERP server in offline mode.\nfunc WithPackages(packages ...string) Option {\n\treturn func(tsic *TailscaleInContainer) {\n\t\ttsic.withPackages = append(tsic.withPackages, packages...)\n\t}\n}\n\n// WithWebserver starts a Python HTTP server on the specified port\n// alongside tailscaled. This is useful for testing subnet routing\n// and ACL connectivity. Automatically adds \"python3\" to packages if needed.\n// The server serves files from the root directory (/).\nfunc WithWebserver(port int) Option {\n\treturn func(tsic *TailscaleInContainer) {\n\t\ttsic.withWebserverPort = port\n\t}\n}\n\n// WithExtraCommands adds extra shell commands to run before tailscaled starts.\n// Commands are run after package installation and CA certificate updates.\nfunc WithExtraCommands(commands ...string) Option {\n\treturn func(tsic *TailscaleInContainer) {\n\t\ttsic.withExtraCommands = append(tsic.withExtraCommands, commands...)\n\t}\n}\n\n// buildEntrypoint constructs the container entrypoint command based on\n// configured options (packages, webserver, etc.).\nfunc (t *TailscaleInContainer) buildEntrypoint() []string {\n\tvar commands []string\n\n\t// Wait for network to be ready\n\tcommands = append(commands, \"while ! ip route show default >/dev/null 2>&1; do sleep 0.1; done\")\n\n\t// If CA certs are configured, wait for them to be written by the Go code\n\t// (certs are written after container start via tsic.WriteFile)\n\tif len(t.caCerts) > 0 {\n\t\tcommands = append(commands,\n\t\t\tfmt.Sprintf(\"while [ ! -f %s/user-0.crt ]; do sleep 0.1; done\", caCertRoot))\n\t}\n\n\t// Install packages if requested (requires internet access)\n\tpackages := t.withPackages\n\tif t.withWebserverPort > 0 && !slices.Contains(packages, \"python3\") {\n\t\tpackages = append(packages, \"python3\")\n\t}\n\n\tif len(packages) > 0 {\n\t\tcommands = append(commands, \"apk add --no-cache \"+strings.Join(packages, \" \"))\n\t}\n\n\t// Update CA certificates\n\tcommands = append(commands, \"update-ca-certificates\")\n\n\t// Run extra commands if any\n\tcommands = append(commands, t.withExtraCommands...)\n\n\t// Start webserver in background if requested\n\t// Use subshell to avoid & interfering with command joining\n\tif t.withWebserverPort > 0 {\n\t\tcommands = append(commands,\n\t\t\tfmt.Sprintf(\"(python3 -m http.server --bind :: %d &)\", t.withWebserverPort))\n\t}\n\n\t// Start tailscaled (must be last as it's the foreground process)\n\tcommands = append(commands, \"tailscaled --tun=tsdev --verbose=10\")\n\n\treturn []string{\"/bin/sh\", \"-c\", strings.Join(commands, \" ; \")}\n}\n\n// New returns a new TailscaleInContainer instance.\n//\n//nolint:gocyclo // complex container setup with many options\nfunc New(\n\tpool *dockertest.Pool,\n\tversion string,\n\topts ...Option,\n) (*TailscaleInContainer, error) {\n\thash, err := util.GenerateRandomStringDNSSafe(tsicHashLength)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Include run ID in hostname for easier identification of which test run owns this container\n\trunID := dockertestutil.GetIntegrationRunID()\n\n\tvar hostname string\n\n\tif runID != \"\" {\n\t\t// Use last 6 chars of run ID (the random hash part) for brevity\n\t\trunIDShort := runID[len(runID)-6:]\n\t\thostname = fmt.Sprintf(\"ts-%s-%s-%s\", runIDShort, strings.ReplaceAll(version, \".\", \"-\"), hash)\n\t} else {\n\t\thostname = fmt.Sprintf(\"ts-%s-%s\", strings.ReplaceAll(version, \".\", \"-\"), hash)\n\t}\n\n\ttsic := &TailscaleInContainer{\n\t\tversion:  version,\n\t\thostname: hostname,\n\n\t\tpool: pool,\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(tsic)\n\t}\n\n\t// Build the entrypoint command dynamically based on options.\n\t// Only build if no custom entrypoint was provided via WithDockerEntrypoint.\n\tif len(tsic.withEntrypoint) == 0 {\n\t\ttsic.withEntrypoint = tsic.buildEntrypoint()\n\t}\n\n\tif tsic.network == nil {\n\t\treturn nil, fmt.Errorf(\"no network set, called from: \\n%s\", string(debug.Stack())) //nolint:err113\n\t}\n\n\ttailscaleOptions := &dockertest.RunOptions{\n\t\tName:       hostname,\n\t\tNetworks:   []*dockertest.Network{tsic.network},\n\t\tEntrypoint: tsic.withEntrypoint,\n\t\tExtraHosts: tsic.withExtraHosts,\n\t\tEnv:        []string{},\n\t}\n\n\tif tsic.withWebsocketDERP {\n\t\tif version != VersionHead {\n\t\t\treturn tsic, errInvalidClientConfig\n\t\t}\n\n\t\tWithBuildTag(\"ts_debug_websockets\")(tsic)\n\n\t\ttailscaleOptions.Env = append(\n\t\t\ttailscaleOptions.Env,\n\t\t\tfmt.Sprintf(\"TS_DEBUG_DERP_WS_CLIENT=%t\", tsic.withWebsocketDERP),\n\t\t)\n\t}\n\n\ttailscaleOptions.ExtraHosts = append(tailscaleOptions.ExtraHosts,\n\t\t\"host.docker.internal:host-gateway\")\n\n\tif tsic.workdir != \"\" {\n\t\ttailscaleOptions.WorkingDir = tsic.workdir\n\t}\n\n\t// dockertest isn't very good at handling containers that has already\n\t// been created, this is an attempt to make sure this container isn't\n\t// present.\n\terr = pool.RemoveContainerByName(hostname)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Add integration test labels if running under hi tool\n\tdockertestutil.DockerAddIntegrationLabels(tailscaleOptions, \"tailscale\")\n\n\tvar container *dockertest.Resource\n\n\tif version != VersionHead {\n\t\t// build options are not meaningful with pre-existing images,\n\t\t// let's not lead anyone astray by pretending otherwise.\n\t\tdefaultBuildConfig := TailscaleInContainerBuildConfig{}\n\n\t\thasBuildConfig := !reflect.DeepEqual(defaultBuildConfig, tsic.buildConfig)\n\t\tif hasBuildConfig {\n\t\t\treturn tsic, errInvalidClientConfig\n\t\t}\n\t}\n\n\tswitch version {\n\tcase VersionHead:\n\t\t// Check if a pre-built image is available via environment variable\n\t\tprebuiltImage := os.Getenv(\"HEADSCALE_INTEGRATION_TAILSCALE_IMAGE\")\n\n\t\t// If custom build tags are required (e.g., for websocket DERP), we cannot use\n\t\t// the pre-built image as it won't have the necessary code compiled in.\n\t\thasBuildTags := len(tsic.buildConfig.tags) > 0\n\t\tif hasBuildTags && prebuiltImage != \"\" {\n\t\t\tlog.Printf(\"Ignoring pre-built image %s because custom build tags are required: %v\",\n\t\t\t\tprebuiltImage, tsic.buildConfig.tags)\n\t\t\tprebuiltImage = \"\"\n\t\t}\n\n\t\tif prebuiltImage != \"\" {\n\t\t\tlog.Printf(\"Using pre-built tailscale image: %s\", prebuiltImage)\n\n\t\t\t// Parse image into repository and tag\n\t\t\trepo, tag, ok := strings.Cut(prebuiltImage, \":\")\n\t\t\tif !ok {\n\t\t\t\treturn nil, errInvalidTailscaleImageFormat\n\t\t\t}\n\n\t\t\ttailscaleOptions.Repository = repo\n\t\t\ttailscaleOptions.Tag = tag\n\n\t\t\tcontainer, err = pool.RunWithOptions(\n\t\t\t\ttailscaleOptions,\n\t\t\t\tdockertestutil.DockerRestartPolicy,\n\t\t\t\tdockertestutil.DockerAllowLocalIPv6,\n\t\t\t\tdockertestutil.DockerAllowNetworkAdministration,\n\t\t\t\tdockertestutil.DockerMemoryLimit,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"running pre-built tailscale container %q: %w\", prebuiltImage, err)\n\t\t\t}\n\t\t} else if util.IsCI() && !hasBuildTags {\n\t\t\t// In CI, we require a pre-built image unless custom build tags are needed\n\t\t\treturn nil, errTailscaleImageRequiredInCI\n\t\t} else {\n\t\t\tbuildOptions := &dockertest.BuildOptions{\n\t\t\t\tDockerfile: \"Dockerfile.tailscale-HEAD\",\n\t\t\t\tContextDir: dockerContextPath,\n\t\t\t\tBuildArgs:  []docker.BuildArg{},\n\t\t\t}\n\n\t\t\tbuildTags := strings.Join(tsic.buildConfig.tags, \",\")\n\t\t\tif len(buildTags) > 0 {\n\t\t\t\tbuildOptions.BuildArgs = append(\n\t\t\t\t\tbuildOptions.BuildArgs,\n\t\t\t\t\tdocker.BuildArg{\n\t\t\t\t\t\tName:  \"BUILD_TAGS\",\n\t\t\t\t\t\tValue: buildTags,\n\t\t\t\t\t},\n\t\t\t\t)\n\t\t\t}\n\n\t\t\tcontainer, err = pool.BuildAndRunWithBuildOptions(\n\t\t\t\tbuildOptions,\n\t\t\t\ttailscaleOptions,\n\t\t\t\tdockertestutil.DockerRestartPolicy,\n\t\t\t\tdockertestutil.DockerAllowLocalIPv6,\n\t\t\t\tdockertestutil.DockerAllowNetworkAdministration,\n\t\t\t\tdockertestutil.DockerMemoryLimit,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\t// Try to get more detailed build output\n\t\t\t\tlog.Printf(\"Docker build failed for %s, attempting to get detailed output...\", hostname)\n\n\t\t\t\tbuildOutput, buildErr := dockertestutil.RunDockerBuildForDiagnostics(dockerContextPath, \"Dockerfile.tailscale-HEAD\")\n\n\t\t\t\t// Show the last 100 lines of build output to avoid overwhelming the logs\n\t\t\t\tlines := strings.Split(buildOutput, \"\\n\")\n\n\t\t\t\tconst maxLines = 100\n\n\t\t\t\tstartLine := 0\n\t\t\t\tif len(lines) > maxLines {\n\t\t\t\t\tstartLine = len(lines) - maxLines\n\t\t\t\t}\n\n\t\t\t\trelevantOutput := strings.Join(lines[startLine:], \"\\n\")\n\n\t\t\t\tif buildErr != nil {\n\t\t\t\t\t// The diagnostic build also failed - this is the real error\n\t\t\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\t\t\"%s could not start tailscale container (version: %s): %w\\n\\nDocker build failed. Last %d lines of output:\\n%s\",\n\t\t\t\t\t\thostname,\n\t\t\t\t\t\tversion,\n\t\t\t\t\t\terr,\n\t\t\t\t\t\tmaxLines,\n\t\t\t\t\t\trelevantOutput,\n\t\t\t\t\t)\n\t\t\t\t}\n\n\t\t\t\tif buildOutput != \"\" {\n\t\t\t\t\t// Build succeeded on retry but container creation still failed\n\t\t\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\t\t\"%s could not start tailscale container (version: %s): %w\\n\\nDocker build succeeded on retry, but container creation failed. Last %d lines of build output:\\n%s\",\n\t\t\t\t\t\thostname,\n\t\t\t\t\t\tversion,\n\t\t\t\t\t\terr,\n\t\t\t\t\t\tmaxLines,\n\t\t\t\t\t\trelevantOutput,\n\t\t\t\t\t)\n\t\t\t\t}\n\n\t\t\t\t// No output at all - diagnostic build command may have failed\n\t\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\t\"%s could not start tailscale container (version: %s): %w\\n\\nUnable to get diagnostic build output (command may have failed silently)\",\n\t\t\t\t\thostname,\n\t\t\t\t\tversion,\n\t\t\t\t\terr,\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\tcase \"unstable\":\n\t\ttailscaleOptions.Repository = \"tailscale/tailscale\"\n\t\ttailscaleOptions.Tag = version\n\n\t\tcontainer, err = pool.RunWithOptions(\n\t\t\ttailscaleOptions,\n\t\t\tdockertestutil.DockerRestartPolicy,\n\t\t\tdockertestutil.DockerAllowLocalIPv6,\n\t\t\tdockertestutil.DockerAllowNetworkAdministration,\n\t\t\tdockertestutil.DockerMemoryLimit,\n\t\t)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Docker run failed for %s (unstable), error: %v\", hostname, err)\n\t\t}\n\tdefault:\n\t\ttailscaleOptions.Repository = \"tailscale/tailscale\"\n\t\ttailscaleOptions.Tag = \"v\" + version\n\n\t\tcontainer, err = pool.RunWithOptions(\n\t\t\ttailscaleOptions,\n\t\t\tdockertestutil.DockerRestartPolicy,\n\t\t\tdockertestutil.DockerAllowLocalIPv6,\n\t\t\tdockertestutil.DockerAllowNetworkAdministration,\n\t\t\tdockertestutil.DockerMemoryLimit,\n\t\t)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Docker run failed for %s (version: v%s), error: %v\", hostname, version, err)\n\t\t}\n\t}\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"%s could not start tailscale container (version: %s): %w\",\n\t\t\thostname,\n\t\t\tversion,\n\t\t\terr,\n\t\t)\n\t}\n\n\tlog.Printf(\"Created %s container\\n\", hostname)\n\n\ttsic.container = container\n\n\tfor i, cert := range tsic.caCerts {\n\t\terr = tsic.WriteFile(fmt.Sprintf(\"%s/user-%d.crt\", caCertRoot, i), cert)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"writing TLS certificate to container: %w\", err)\n\t\t}\n\t}\n\n\treturn tsic, nil\n}\n\n// Shutdown stops and cleans up the Tailscale container.\nfunc (t *TailscaleInContainer) Shutdown() (string, string, error) {\n\tstdoutPath, stderrPath, err := t.SaveLog(\"/tmp/control\")\n\tif err != nil {\n\t\tlog.Printf(\n\t\t\t\"saving log from %s: %s\",\n\t\t\tt.hostname,\n\t\t\tfmt.Errorf(\"saving log: %w\", err),\n\t\t)\n\t}\n\n\treturn stdoutPath, stderrPath, t.pool.Purge(t.container)\n}\n\n// Hostname returns the hostname of the Tailscale instance.\nfunc (t *TailscaleInContainer) Hostname() string {\n\treturn t.hostname\n}\n\n// Version returns the running Tailscale version of the instance.\nfunc (t *TailscaleInContainer) Version() string {\n\treturn t.version\n}\n\n// ContainerID returns the Docker container ID of the TailscaleInContainer\n// instance.\nfunc (t *TailscaleInContainer) ContainerID() string {\n\treturn t.container.Container.ID\n}\n\n// Execute runs a command inside the Tailscale container and returns the\n// result of stdout as a string.\nfunc (t *TailscaleInContainer) Execute(\n\tcommand []string,\n\toptions ...dockertestutil.ExecuteCommandOption,\n) (string, string, error) {\n\tstdout, stderr, err := dockertestutil.ExecuteCommand(\n\t\tt.container,\n\t\tcommand,\n\t\t[]string{},\n\t\toptions...,\n\t)\n\tif err != nil {\n\t\t// log.Printf(\"command issued: %s\", strings.Join(command, \" \"))\n\t\t// log.Printf(\"command stderr: %s\\n\", stderr)\n\t\tif stdout != \"\" {\n\t\t\tlog.Printf(\"command stdout: %s\\n\", stdout)\n\t\t}\n\n\t\tif strings.Contains(stderr, \"NeedsLogin\") {\n\t\t\treturn stdout, stderr, errTailscaleNotLoggedIn\n\t\t}\n\n\t\treturn stdout, stderr, err\n\t}\n\n\treturn stdout, stderr, nil\n}\n\n// Logs retrieves the container logs.\nfunc (t *TailscaleInContainer) Logs(stdout, stderr io.Writer) error {\n\treturn dockertestutil.WriteLog(\n\t\tt.pool,\n\t\tt.container,\n\t\tstdout, stderr,\n\t)\n}\n\nfunc (t *TailscaleInContainer) buildLoginCommand(\n\tloginServer, authKey string,\n) []string {\n\tcommand := []string{\n\t\t\"tailscale\",\n\t\t\"up\",\n\t\t\"--login-server=\" + loginServer,\n\t\t\"--hostname=\" + t.hostname,\n\t\tfmt.Sprintf(\"--accept-routes=%t\", t.withAcceptRoutes),\n\t}\n\n\tif authKey != \"\" {\n\t\tcommand = append(command, \"--authkey=\"+authKey)\n\t}\n\n\tif t.extraLoginArgs != nil {\n\t\tcommand = append(command, t.extraLoginArgs...)\n\t}\n\n\tif t.withSSH {\n\t\tcommand = append(command, \"--ssh\")\n\t}\n\n\tif t.netfilter != \"\" {\n\t\tcommand = append(command, \"--netfilter-mode=\"+t.netfilter)\n\t}\n\n\tif len(t.withTags) > 0 {\n\t\tcommand = append(command,\n\t\t\t\"--advertise-tags=\"+strings.Join(t.withTags, \",\"),\n\t\t)\n\t}\n\n\treturn command\n}\n\n// Login runs the login routine on the given Tailscale instance.\n// This login mechanism uses the authorised key for authentication.\nfunc (t *TailscaleInContainer) Login(\n\tloginServer, authKey string,\n) error {\n\tcommand := t.buildLoginCommand(loginServer, authKey)\n\n\tif _, _, err := t.Execute(command, dockertestutil.ExecuteCommandTimeout(dockerExecuteTimeout)); err != nil { //nolint:noinlineerr\n\t\treturn fmt.Errorf(\n\t\t\t\"%s failed to join tailscale client (%s): %w\",\n\t\t\tt.hostname,\n\t\t\tstrings.Join(command, \" \"),\n\t\t\terr,\n\t\t)\n\t}\n\n\treturn nil\n}\n\n// LoginWithURL runs the login routine on the given Tailscale instance.\n// This login mechanism uses web + command line flow for authentication.\nfunc (t *TailscaleInContainer) LoginWithURL(\n\tloginServer string,\n) (*url.URL, error) {\n\tcommand := t.buildLoginCommand(loginServer, \"\")\n\n\tstdout, stderr, err := t.Execute(command)\n\tif errors.Is(err, errTailscaleNotLoggedIn) {\n\t\treturn nil, errTailscaleCannotUpWithoutAuthkey\n\t}\n\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tlog.Printf(\"join command: %q\", strings.Join(command, \" \"))\n\t\t}\n\t}()\n\n\tloginURL, err := util.ParseLoginURLFromCLILogin(stdout + stderr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn loginURL, nil\n}\n\n// Logout runs the logout routine on the given Tailscale instance.\nfunc (t *TailscaleInContainer) Logout() error {\n\t_, _, err := t.Execute([]string{\"tailscale\", \"logout\"})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstdout, stderr, _ := t.Execute([]string{\"tailscale\", \"status\"})\n\tif !strings.Contains(stdout+stderr, \"Logged out.\") {\n\t\treturn fmt.Errorf(\"logging out, stdout: %s, stderr: %s\", stdout, stderr) //nolint:err113\n\t}\n\n\treturn t.waitForBackendState(\"NeedsLogin\", integrationutil.PeerSyncTimeout())\n}\n\n// Restart restarts the Tailscale container using Docker API.\n// This simulates a container restart (e.g., docker restart or Kubernetes pod restart).\n// The container's entrypoint will re-execute, which typically includes running\n// \"tailscale up\" with any auth keys stored in environment variables.\nfunc (t *TailscaleInContainer) Restart() error {\n\tif t.container == nil {\n\t\treturn errContainerNotInitialized\n\t}\n\n\t// Use Docker API to restart the container\n\terr := t.pool.Client.RestartContainer(t.container.Container.ID, 30)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"restarting container %s: %w\", t.hostname, err)\n\t}\n\n\t// Wait for the container to be back up and tailscaled to be ready\n\t// We use exponential backoff to poll until we can successfully execute a command\n\t_, err = backoff.Retry(context.Background(), func() (struct{}, error) {\n\t\t// Try to execute a simple command to verify the container is responsive\n\t\t_, _, err := t.Execute([]string{\"tailscale\", \"version\"}, dockertestutil.ExecuteCommandTimeout(5*time.Second))\n\t\tif err != nil {\n\t\t\treturn struct{}{}, fmt.Errorf(\"container not ready: %w\", err)\n\t\t}\n\n\t\treturn struct{}{}, nil\n\t}, backoff.WithBackOff(backoff.NewExponentialBackOff()), backoff.WithMaxElapsedTime(30*time.Second))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"timeout waiting for container %s to restart and become ready: %w\", t.hostname, err)\n\t}\n\n\treturn nil\n}\n\n// Up runs `tailscale up` with no arguments.\nfunc (t *TailscaleInContainer) Up() error {\n\tcommand := []string{\n\t\t\"tailscale\",\n\t\t\"up\",\n\t}\n\n\tif _, _, err := t.Execute(command, dockertestutil.ExecuteCommandTimeout(dockerExecuteTimeout)); err != nil { //nolint:noinlineerr\n\t\treturn fmt.Errorf(\n\t\t\t\"%s failed to bring tailscale client up (%s): %w\",\n\t\t\tt.hostname,\n\t\t\tstrings.Join(command, \" \"),\n\t\t\terr,\n\t\t)\n\t}\n\n\treturn nil\n}\n\n// Down runs `tailscale down` with no arguments.\nfunc (t *TailscaleInContainer) Down() error {\n\tcommand := []string{\n\t\t\"tailscale\",\n\t\t\"down\",\n\t}\n\n\tif _, _, err := t.Execute(command, dockertestutil.ExecuteCommandTimeout(dockerExecuteTimeout)); err != nil { //nolint:noinlineerr\n\t\treturn fmt.Errorf(\n\t\t\t\"%s failed to bring tailscale client down (%s): %w\",\n\t\t\tt.hostname,\n\t\t\tstrings.Join(command, \" \"),\n\t\t\terr,\n\t\t)\n\t}\n\n\treturn nil\n}\n\n// IPs returns the netip.Addr of the Tailscale instance.\nfunc (t *TailscaleInContainer) IPs() ([]netip.Addr, error) {\n\tif len(t.ips) != 0 {\n\t\treturn t.ips, nil\n\t}\n\n\t// Retry with exponential backoff to handle eventual consistency\n\tips, err := backoff.Retry(context.Background(), func() ([]netip.Addr, error) {\n\t\tcommand := []string{\n\t\t\t\"tailscale\",\n\t\t\t\"ip\",\n\t\t}\n\n\t\tresult, _, err := t.Execute(command)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"%s failed to get IPs: %w\", t.hostname, err)\n\t\t}\n\n\t\tips := make([]netip.Addr, 0)\n\n\t\tfor address := range strings.SplitSeq(result, \"\\n\") {\n\t\t\taddress = strings.TrimSuffix(address, \"\\n\")\n\t\t\tif len(address) < 1 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tip, err := netip.ParseAddr(address)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"parsing IP %s: %w\", address, err)\n\t\t\t}\n\n\t\t\tips = append(ips, ip)\n\t\t}\n\n\t\tif len(ips) == 0 {\n\t\t\treturn nil, fmt.Errorf(\"no IPs returned yet for %s\", t.hostname) //nolint:err113\n\t\t}\n\n\t\treturn ips, nil\n\t}, backoff.WithBackOff(backoff.NewExponentialBackOff()), backoff.WithMaxElapsedTime(10*time.Second))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"getting IPs for %s after retries: %w\", t.hostname, err)\n\t}\n\n\treturn ips, nil\n}\n\nfunc (t *TailscaleInContainer) MustIPs() []netip.Addr {\n\tips, err := t.IPs()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn ips\n}\n\n// IPv4 returns the IPv4 address of the Tailscale instance.\nfunc (t *TailscaleInContainer) IPv4() (netip.Addr, error) {\n\tips, err := t.IPs()\n\tif err != nil {\n\t\treturn netip.Addr{}, err\n\t}\n\n\tfor _, ip := range ips {\n\t\tif ip.Is4() {\n\t\t\treturn ip, nil\n\t\t}\n\t}\n\n\treturn netip.Addr{}, fmt.Errorf(\"no IPv4 address found for %s\", t.hostname) //nolint:err113\n}\n\nfunc (t *TailscaleInContainer) MustIPv4() netip.Addr {\n\tip, err := t.IPv4()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn ip\n}\n\nfunc (t *TailscaleInContainer) MustIPv6() netip.Addr {\n\tfor _, ip := range t.MustIPs() {\n\t\tif ip.Is6() {\n\t\t\treturn ip\n\t\t}\n\t}\n\n\tpanic(\"no ipv6 found\")\n}\n\n// Status returns the ipnstate.Status of the Tailscale instance.\nfunc (t *TailscaleInContainer) Status(save ...bool) (*ipnstate.Status, error) {\n\tcommand := []string{\n\t\t\"tailscale\",\n\t\t\"status\",\n\t\t\"--json\",\n\t}\n\n\tresult, _, err := t.Execute(command)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"executing tailscale status command: %w\", err)\n\t}\n\n\tvar status ipnstate.Status\n\n\terr = json.Unmarshal([]byte(result), &status)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshalling tailscale status: %w\", err)\n\t}\n\n\terr = os.WriteFile(fmt.Sprintf(\"/tmp/control/%s_status.json\", t.hostname), []byte(result), 0o755) //nolint:gosec // test infrastructure log files\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"status netmap to /tmp/control: %w\", err)\n\t}\n\n\treturn &status, err\n}\n\n// MustStatus returns the ipnstate.Status of the Tailscale instance.\nfunc (t *TailscaleInContainer) MustStatus() *ipnstate.Status {\n\tstatus, err := t.Status()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn status\n}\n\n// MustID returns the ID of the Tailscale instance.\nfunc (t *TailscaleInContainer) MustID() types.NodeID {\n\tstatus, err := t.Status()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tid, err := strconv.ParseUint(string(status.Self.ID), 10, 64)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"parsing ID: %s\", err))\n\t}\n\n\treturn types.NodeID(id)\n}\n\n// Netmap returns the current Netmap (netmap.NetworkMap) of the Tailscale instance.\n// Only works with Tailscale 1.56 and newer.\n// Panics if version is lower then minimum.\nfunc (t *TailscaleInContainer) Netmap() (*netmap.NetworkMap, error) {\n\tif !util.TailscaleVersionNewerOrEqual(\"1.56\", t.version) {\n\t\tpanic(\"tsic.Netmap() called with unsupported version: \" + t.version)\n\t}\n\n\tcommand := []string{\n\t\t\"tailscale\",\n\t\t\"debug\",\n\t\t\"netmap\",\n\t}\n\n\tresult, stderr, err := t.Execute(command)\n\tif err != nil {\n\t\tfmt.Printf(\"stderr: %s\\n\", stderr)\n\t\treturn nil, fmt.Errorf(\"executing tailscale debug netmap command: %w\", err)\n\t}\n\n\tvar nm netmap.NetworkMap\n\n\terr = json.Unmarshal([]byte(result), &nm)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshalling tailscale netmap: %w\", err)\n\t}\n\n\terr = os.WriteFile(fmt.Sprintf(\"/tmp/control/%s_netmap.json\", t.hostname), []byte(result), 0o755) //nolint:gosec // test infrastructure log files\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"saving netmap to /tmp/control: %w\", err)\n\t}\n\n\treturn &nm, err\n}\n\n// Netmap returns the current Netmap (netmap.NetworkMap) of the Tailscale instance.\n// This implementation is based on getting the netmap from `tailscale debug watch-ipn`\n// as there seem to be some weirdness omitting endpoint and DERP info if we use\n// Patch updates.\n// This implementation works on all supported versions.\n// func (t *TailscaleInContainer) Netmap() (*netmap.NetworkMap, error) {\n// \t// watch-ipn will only give an update if something is happening,\n// \t// since we send keep alives, the worst case for this should be\n// \t// 1 minute, but set a slightly more conservative time.\n// \tctx, _ := context.WithTimeout(context.Background(), 3*time.Minute)\n\n// \tnotify, err := t.watchIPN(ctx)\n// \tif err != nil {\n// \t\treturn nil, err\n// \t}\n\n// \tif notify.NetMap == nil {\n// \t\treturn nil, fmt.Errorf(\"no netmap present in ipn.Notify\")\n// \t}\n\n// \treturn notify.NetMap, nil\n// }\n\n// watchIPN watches `tailscale debug watch-ipn` for a ipn.Notify object until\n// it gets one that has a netmap.NetworkMap.\n//\n//nolint:unused\nfunc (t *TailscaleInContainer) watchIPN(ctx context.Context) (*ipn.Notify, error) {\n\tpr, pw := io.Pipe()\n\n\ttype result struct {\n\t\tnotify *ipn.Notify\n\t\terr    error\n\t}\n\n\tresultChan := make(chan result, 1)\n\n\t// There is no good way to kill the goroutine with watch-ipn,\n\t// so make a nice func to send a kill command to issue when\n\t// we are done.\n\tkillWatcher := func() {\n\t\tstdout, stderr, err := t.Execute([]string{\n\t\t\t\"/bin/sh\", \"-c\", `kill $(ps aux | grep \"tailscale debug watch-ipn\" | grep -v grep | awk '{print $1}') || true`,\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Printf(\"killing tailscale watcher, \\nstdout: %s\\nstderr: %s\\nerr: %s\", stdout, stderr, err)\n\t\t}\n\t}\n\n\tgo func() {\n\t\t_, _ = t.container.Exec(\n\t\t\t// Prior to 1.56, the initial \"Connected.\" message was printed to stdout,\n\t\t\t// filter out with grep.\n\t\t\t[]string{\"/bin/sh\", \"-c\", `tailscale debug watch-ipn | grep -v \"Connected.\"`},\n\t\t\tdockertest.ExecOptions{\n\t\t\t\t// The interesting output is sent to stdout, so ignore stderr.\n\t\t\t\tStdOut: pw,\n\t\t\t\t// StdErr: pw,\n\t\t\t},\n\t\t)\n\t}()\n\n\tgo func() {\n\t\tdecoder := json.NewDecoder(pr)\n\t\tfor decoder.More() {\n\t\t\tvar notify ipn.Notify\n\n\t\t\terr := decoder.Decode(&notify)\n\t\t\tif err != nil {\n\t\t\t\tresultChan <- result{nil, fmt.Errorf(\"parse notify: %w\", err)}\n\t\t\t}\n\n\t\t\tif notify.NetMap != nil {\n\t\t\t\tresultChan <- result{&notify, nil}\n\t\t\t}\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\tkillWatcher()\n\n\t\treturn nil, ctx.Err()\n\n\tcase result := <-resultChan:\n\t\tkillWatcher()\n\n\t\tif result.err != nil {\n\t\t\treturn nil, result.err\n\t\t}\n\n\t\treturn result.notify, nil\n\t}\n}\n\nfunc (t *TailscaleInContainer) DebugDERPRegion(region string) (*ipnstate.DebugDERPRegionReport, error) {\n\tif !util.TailscaleVersionNewerOrEqual(\"1.34\", t.version) {\n\t\tpanic(\"tsic.DebugDERPRegion() called with unsupported version: \" + t.version)\n\t}\n\n\tcommand := []string{\n\t\t\"tailscale\",\n\t\t\"debug\",\n\t\t\"derp\",\n\t\tregion,\n\t}\n\n\tresult, stderr, err := t.Execute(command)\n\tif err != nil {\n\t\tfmt.Printf(\"stderr: %s\\n\", stderr) // nolint\n\n\t\treturn nil, fmt.Errorf(\"executing tailscale debug derp command: %w\", err)\n\t}\n\n\tvar report ipnstate.DebugDERPRegionReport\n\n\terr = json.Unmarshal([]byte(result), &report)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshalling tailscale derp region report: %w\", err)\n\t}\n\n\treturn &report, err\n}\n\n// Netcheck returns the current Netcheck Report (netcheck.Report) of the Tailscale instance.\nfunc (t *TailscaleInContainer) Netcheck() (*netcheck.Report, error) {\n\tcommand := []string{\n\t\t\"tailscale\",\n\t\t\"netcheck\",\n\t\t\"--format=json\",\n\t}\n\n\tresult, stderr, err := t.Execute(command)\n\tif err != nil {\n\t\tfmt.Printf(\"stderr: %s\\n\", stderr)\n\t\treturn nil, fmt.Errorf(\"executing tailscale debug netcheck command: %w\", err)\n\t}\n\n\tvar nm netcheck.Report\n\n\terr = json.Unmarshal([]byte(result), &nm)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshalling tailscale netcheck: %w\", err)\n\t}\n\n\treturn &nm, err\n}\n\n// FQDN returns the FQDN as a string of the Tailscale instance.\nfunc (t *TailscaleInContainer) FQDN() (string, error) {\n\tif t.fqdn != \"\" {\n\t\treturn t.fqdn, nil\n\t}\n\n\t// Retry with exponential backoff to handle eventual consistency\n\tfqdn, err := backoff.Retry(context.Background(), func() (string, error) {\n\t\tstatus, err := t.Status()\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"getting status: %w\", err)\n\t\t}\n\n\t\tif status.Self.DNSName == \"\" {\n\t\t\treturn \"\", errFQDNNotYetAvailable\n\t\t}\n\n\t\treturn status.Self.DNSName, nil\n\t}, backoff.WithBackOff(backoff.NewExponentialBackOff()), backoff.WithMaxElapsedTime(10*time.Second))\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"getting FQDN for %s after retries: %w\", t.hostname, err)\n\t}\n\n\treturn fqdn, nil\n}\n\n// MustFQDN returns the FQDN as a string of the Tailscale instance, panicking on error.\nfunc (t *TailscaleInContainer) MustFQDN() string {\n\tfqdn, err := t.FQDN()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn fqdn\n}\n\n// FailingPeersAsString returns a formatted-ish multi-line-string of peers in the client\n// and a bool indicating if the clients online count and peer count is equal.\nfunc (t *TailscaleInContainer) FailingPeersAsString() (string, bool, error) {\n\tstatus, err := t.Status()\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"getting FQDN: %w\", err)\n\t}\n\n\tvar b strings.Builder\n\n\tfmt.Fprintf(&b, \"Peers of %s\\n\", t.hostname)\n\tfmt.Fprint(&b, \"Hostname\\tOnline\\tLastSeen\\n\")\n\n\tpeerCount := len(status.Peers())\n\tonlineCount := 0\n\n\tfor _, peerKey := range status.Peers() {\n\t\tpeer := status.Peer[peerKey]\n\n\t\tif peer.Online {\n\t\t\tonlineCount++\n\t\t}\n\n\t\tfmt.Fprintf(&b, \"%s\\t%t\\t%s\\n\", peer.HostName, peer.Online, peer.LastSeen)\n\t}\n\n\tfmt.Fprintf(&b, \"Peer Count: %d, Online Count: %d\\n\\n\", peerCount, onlineCount)\n\n\treturn b.String(), peerCount == onlineCount, nil\n}\n\n// WaitForNeedsLogin blocks until the Tailscale (tailscaled) instance has\n// started and needs to be logged into.\nfunc (t *TailscaleInContainer) WaitForNeedsLogin(timeout time.Duration) error {\n\treturn t.waitForBackendState(\"NeedsLogin\", timeout)\n}\n\n// WaitForRunning blocks until the Tailscale (tailscaled) instance is logged in\n// and ready to be used.\nfunc (t *TailscaleInContainer) WaitForRunning(timeout time.Duration) error {\n\treturn t.waitForBackendState(\"Running\", timeout)\n}\n\nfunc (t *TailscaleInContainer) waitForBackendState(state string, timeout time.Duration) error {\n\tticker := time.NewTicker(integrationutil.PeerSyncRetryInterval())\n\tdefer ticker.Stop()\n\n\tctx, cancel := context.WithTimeout(context.Background(), timeout)\n\tdefer cancel()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn fmt.Errorf(\"timeout waiting for backend state %s on %s after %v\", state, t.hostname, timeout) //nolint:err113\n\t\tcase <-ticker.C:\n\t\t\tstatus, err := t.Status()\n\t\t\tif err != nil {\n\t\t\t\tcontinue // Keep retrying on status errors\n\t\t\t}\n\n\t\t\t// ipnstate.Status.CurrentTailnet was added in Tailscale 1.22.0\n\t\t\t// https://github.com/tailscale/tailscale/pull/3865\n\t\t\t//\n\t\t\t// Before that, we can check the BackendState to see if the\n\t\t\t// tailscaled daemon is connected to the control system.\n\t\t\tif status.BackendState == state {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n}\n\n// WaitForPeers blocks until N number of peers is present in the\n// Peer list of the Tailscale instance and is reporting Online.\n//\n// The method verifies that each peer:\n// - Has the expected peer count\n// - All peers are Online\n// - All peers have a hostname\n// - All peers have a DERP relay assigned\n//\n// Uses multierr to collect all validation errors.\nfunc (t *TailscaleInContainer) WaitForPeers(expected int, timeout, retryInterval time.Duration) error {\n\tticker := time.NewTicker(retryInterval)\n\tdefer ticker.Stop()\n\n\tctx, cancel := context.WithTimeout(context.Background(), timeout)\n\tdefer cancel()\n\n\tvar lastErrs []error\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tif len(lastErrs) > 0 {\n\t\t\t\treturn fmt.Errorf(\"timeout waiting for %d peers on %s after %v, errors: %w\", expected, t.hostname, timeout, multierr.New(lastErrs...))\n\t\t\t}\n\n\t\t\treturn fmt.Errorf(\"timeout waiting for %d peers on %s after %v\", expected, t.hostname, timeout) //nolint:err113\n\t\tcase <-ticker.C:\n\t\t\tstatus, err := t.Status()\n\t\t\tif err != nil {\n\t\t\t\tlastErrs = []error{errTailscaleStatus(t.hostname, err)}\n\t\t\t\tcontinue // Keep retrying on status errors\n\t\t\t}\n\n\t\t\tif peers := status.Peers(); len(peers) != expected {\n\t\t\t\tlastErrs = []error{fmt.Errorf(\n\t\t\t\t\t\"%s err: %w expected %d, got %d\",\n\t\t\t\t\tt.hostname,\n\t\t\t\t\terrTailscaleWrongPeerCount,\n\t\t\t\t\texpected,\n\t\t\t\t\tlen(peers),\n\t\t\t\t)}\n\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Verify that the peers of a given node is Online\n\t\t\t// has a hostname and a DERP relay.\n\t\t\tvar peerErrors []error\n\n\t\t\tfor _, peerKey := range status.Peers() {\n\t\t\t\tpeer := status.Peer[peerKey]\n\n\t\t\t\tif !peer.Online {\n\t\t\t\t\tpeerErrors = append(peerErrors, fmt.Errorf(\"[%s] peer count correct, but %s is not online\", t.hostname, peer.HostName)) //nolint:err113\n\t\t\t\t}\n\n\t\t\t\tif peer.HostName == \"\" {\n\t\t\t\t\tpeerErrors = append(peerErrors, fmt.Errorf(\"[%s] peer count correct, but %s does not have a Hostname\", t.hostname, peer.HostName)) //nolint:err113\n\t\t\t\t}\n\n\t\t\t\tif peer.Relay == \"\" {\n\t\t\t\t\tpeerErrors = append(peerErrors, fmt.Errorf(\"[%s] peer count correct, but %s does not have a DERP\", t.hostname, peer.HostName)) //nolint:err113\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif len(peerErrors) > 0 {\n\t\t\t\tlastErrs = peerErrors\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\ntype (\n\t// PingOption represent optional settings that can be given\n\t// to ping another host.\n\tPingOption = func(args *pingArgs)\n\n\tpingArgs struct {\n\t\ttimeout time.Duration\n\t\tcount   int\n\t\tdirect  bool\n\t}\n)\n\n// WithPingTimeout sets the timeout for the ping command.\nfunc WithPingTimeout(timeout time.Duration) PingOption {\n\treturn func(args *pingArgs) {\n\t\targs.timeout = timeout\n\t}\n}\n\n// WithPingCount sets the count of pings to attempt.\nfunc WithPingCount(count int) PingOption {\n\treturn func(args *pingArgs) {\n\t\targs.count = count\n\t}\n}\n\n// WithPingUntilDirect decides if the ping should only succeed\n// if a direct connection is established or if successful\n// DERP ping is sufficient.\nfunc WithPingUntilDirect(direct bool) PingOption {\n\treturn func(args *pingArgs) {\n\t\targs.direct = direct\n\t}\n}\n\n// Ping executes the Tailscale ping command and pings a hostname\n// or IP. It accepts a series of PingOption.\n// TODO(kradalby): Make multiping, go routine magic.\nfunc (t *TailscaleInContainer) Ping(hostnameOrIP string, opts ...PingOption) error {\n\targs := pingArgs{\n\t\ttimeout: defaultPingTimeout,\n\t\tcount:   defaultPingCount,\n\t\tdirect:  true,\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(&args)\n\t}\n\n\tcommand := make([]string, 0, 6)\n\tcommand = append(command,\n\t\t\"tailscale\", \"ping\",\n\t\tfmt.Sprintf(\"--timeout=%s\", args.timeout),\n\t\tfmt.Sprintf(\"--c=%d\", args.count),\n\t\t\"--until-direct=\"+strconv.FormatBool(args.direct),\n\t\thostnameOrIP,\n\t)\n\n\tresult, _, err := t.Execute(\n\t\tcommand,\n\t\tdockertestutil.ExecuteCommandTimeout(\n\t\t\ttime.Duration(int64(args.timeout)*int64(args.count)),\n\t\t),\n\t)\n\tif err != nil {\n\t\tlog.Printf(\"command: %v\", command)\n\t\tlog.Printf(\n\t\t\t\"running ping command from %s to %s, err: %s\",\n\t\t\tt.Hostname(),\n\t\t\thostnameOrIP,\n\t\t\terr,\n\t\t)\n\n\t\treturn err\n\t}\n\n\tif strings.Contains(result, \"is local\") {\n\t\treturn nil\n\t}\n\n\tif !strings.Contains(result, \"pong\") {\n\t\treturn errTailscalePingFailed\n\t}\n\n\tif !args.direct {\n\t\tif strings.Contains(result, \"via DERP\") {\n\t\t\treturn nil\n\t\t} else {\n\t\t\treturn errTailscalePingNotDERP\n\t\t}\n\t}\n\n\treturn nil\n}\n\ntype (\n\t// CurlOption repreent optional settings that can be given\n\t// to curl another host.\n\tCurlOption = func(args *curlArgs)\n\n\tcurlArgs struct {\n\t\tconnectionTimeout time.Duration\n\t\tmaxTime           time.Duration\n\t\tretry             int\n\t\tretryDelay        time.Duration\n\t\tretryMaxTime      time.Duration\n\t}\n)\n\n// WithCurlConnectionTimeout sets the timeout for each connection started\n// by curl.\nfunc WithCurlConnectionTimeout(timeout time.Duration) CurlOption {\n\treturn func(args *curlArgs) {\n\t\targs.connectionTimeout = timeout\n\t}\n}\n\n// WithCurlMaxTime sets the max time for a transfer for each connection started\n// by curl.\nfunc WithCurlMaxTime(t time.Duration) CurlOption {\n\treturn func(args *curlArgs) {\n\t\targs.maxTime = t\n\t}\n}\n\n// WithCurlRetry sets the number of retries a connection is attempted by curl.\nfunc WithCurlRetry(ret int) CurlOption {\n\treturn func(args *curlArgs) {\n\t\targs.retry = ret\n\t}\n}\n\nconst (\n\tdefaultConnectionTimeout = 1 * time.Second\n\tdefaultMaxTime           = 3 * time.Second\n\tdefaultRetry             = 3\n\tdefaultRetryDelay        = 200 * time.Millisecond\n\tdefaultRetryMaxTime      = 5 * time.Second\n)\n\n// Curl executes the Tailscale curl command and curls a hostname\n// or IP. It accepts a series of CurlOption.\nfunc (t *TailscaleInContainer) Curl(url string, opts ...CurlOption) (string, error) {\n\targs := curlArgs{\n\t\tconnectionTimeout: defaultConnectionTimeout,\n\t\tmaxTime:           defaultMaxTime,\n\t\tretry:             defaultRetry,\n\t\tretryDelay:        defaultRetryDelay,\n\t\tretryMaxTime:      defaultRetryMaxTime,\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(&args)\n\t}\n\n\tcommand := []string{\n\t\t\"curl\",\n\t\t\"--silent\",\n\t\t\"--connect-timeout\", strconv.Itoa(int(args.connectionTimeout.Seconds())),\n\t\t\"--max-time\", strconv.Itoa(int(args.maxTime.Seconds())),\n\t\t\"--retry\", strconv.Itoa(args.retry),\n\t\t\"--retry-delay\", strconv.Itoa(int(args.retryDelay.Seconds())),\n\t\t\"--retry-max-time\", strconv.Itoa(int(args.retryMaxTime.Seconds())),\n\t\turl,\n\t}\n\n\tvar result string\n\n\tresult, _, err := t.Execute(command)\n\tif err != nil {\n\t\tlog.Printf(\n\t\t\t\"running curl command from %s to %s, err: %s\",\n\t\t\tt.Hostname(),\n\t\t\turl,\n\t\t\terr,\n\t\t)\n\n\t\treturn result, err\n\t}\n\n\treturn result, nil\n}\n\n// CurlFailFast executes the Tailscale curl command with aggressive timeouts\n// optimized for testing expected connection failures. It uses minimal timeouts\n// to quickly detect blocked connections without waiting for multiple retries.\nfunc (t *TailscaleInContainer) CurlFailFast(url string) (string, error) {\n\t// Use aggressive timeouts for fast failure detection\n\treturn t.Curl(url,\n\t\tWithCurlConnectionTimeout(1*time.Second),\n\t\tWithCurlMaxTime(2*time.Second),\n\t\tWithCurlRetry(1))\n}\n\nfunc (t *TailscaleInContainer) Traceroute(ip netip.Addr) (util.Traceroute, error) {\n\tcommand := []string{\n\t\t\"traceroute\",\n\t\tip.String(),\n\t}\n\n\tvar result util.Traceroute\n\n\tstdout, stderr, err := t.Execute(command)\n\tif err != nil {\n\t\treturn result, err\n\t}\n\n\tresult, err = util.ParseTraceroute(stdout + stderr)\n\tif err != nil {\n\t\treturn result, err\n\t}\n\n\treturn result, nil\n}\n\n// WriteFile save file inside the Tailscale container.\nfunc (t *TailscaleInContainer) WriteFile(path string, data []byte) error {\n\treturn integrationutil.WriteFileToContainer(t.pool, t.container, path, data)\n}\n\n// SaveLog saves the current stdout log of the container to a path\n// on the host system.\nfunc (t *TailscaleInContainer) SaveLog(path string) (string, string, error) {\n\t// TODO(kradalby): Assert if tailscale logs contains panics.\n\t// NOTE(enoperm): `t.WriteLog | countMatchingLines`\n\t// is probably most of what is for that,\n\t// but I'd rather not change the behaviour here,\n\t// as it may affect all the other tests\n\t// I have not otherwise touched.\n\treturn dockertestutil.SaveLog(t.pool, t.container, path)\n}\n\n// WriteLogs writes the current stdout/stderr log of the container to\n// the given io.Writers.\nfunc (t *TailscaleInContainer) WriteLogs(stdout, stderr io.Writer) error {\n\treturn dockertestutil.WriteLog(t.pool, t.container, stdout, stderr)\n}\n\n// ReadFile reads a file from the Tailscale container.\n// It returns the content of the file as a byte slice.\nfunc (t *TailscaleInContainer) ReadFile(path string) ([]byte, error) {\n\ttarBytes, err := integrationutil.FetchPathFromContainer(t.pool, t.container, path)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"reading file from container: %w\", err)\n\t}\n\n\tvar out bytes.Buffer\n\n\ttr := tar.NewReader(bytes.NewReader(tarBytes))\n\tfor {\n\t\thdr, err := tr.Next()\n\t\tif err == io.EOF {\n\t\t\tbreak // End of archive\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"reading tar header: %w\", err)\n\t\t}\n\n\t\tif !strings.Contains(path, hdr.Name) {\n\t\t\treturn nil, fmt.Errorf(\"file not found in tar archive, looking for: %s, header was: %s\", path, hdr.Name) //nolint:err113\n\t\t}\n\n\t\tif _, err := io.Copy(&out, tr); err != nil { //nolint:gosec,noinlineerr // trusted tar from test container\n\t\t\treturn nil, fmt.Errorf(\"copying file to buffer: %w\", err)\n\t\t}\n\n\t\t// Only support reading the first tile\n\t\tbreak //nolint:staticcheck // SA4004: intentional - only read first file\n\t}\n\n\tif out.Len() == 0 {\n\t\treturn nil, errors.New(\"file is empty\") //nolint:err113\n\t}\n\n\treturn out.Bytes(), nil\n}\n\nfunc (t *TailscaleInContainer) GetNodePrivateKey() (*key.NodePrivate, error) {\n\tstate, err := t.ReadFile(paths.DefaultTailscaledStateFile())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"reading state file: %w\", err)\n\t}\n\n\tstore := &mem.Store{}\n\tif err = store.LoadFromJSON(state); err != nil { //nolint:noinlineerr\n\t\treturn nil, fmt.Errorf(\"unmarshalling state file: %w\", err)\n\t}\n\n\tcurrentProfileKey, err := store.ReadState(ipn.CurrentProfileStateKey)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"reading current profile state key: %w\", err)\n\t}\n\n\tcurrentProfile, err := store.ReadState(ipn.StateKey(currentProfileKey))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"reading current profile state: %w\", err)\n\t}\n\n\tp := &ipn.Prefs{}\n\tif err = json.Unmarshal(currentProfile, &p); err != nil { //nolint:noinlineerr\n\t\treturn nil, fmt.Errorf(\"unmarshalling current profile state: %w\", err)\n\t}\n\n\treturn &p.Persist.PrivateNodeKey, nil\n}\n\n// PacketFilter returns the current packet filter rules from the client's network map.\n// This is useful for verifying that policy changes have propagated to the client.\nfunc (t *TailscaleInContainer) PacketFilter() ([]filter.Match, error) {\n\tif !util.TailscaleVersionNewerOrEqual(\"1.56\", t.version) {\n\t\treturn nil, fmt.Errorf(\"tsic.PacketFilter() requires Tailscale 1.56+, current version: %s\", t.version) //nolint:err113\n\t}\n\n\tnm, err := t.Netmap()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"getting netmap: %w\", err)\n\t}\n\n\treturn nm.PacketFilter, nil\n}\n"
  },
  {
    "path": "mkdocs.yml",
    "content": "---\nsite_name: Headscale\nsite_url: https://juanfont.github.io/headscale/\nedit_uri: blob/main/docs/ # Change the master branch to main as we are using main as a main branch\nsite_author: Headscale authors\nsite_description: >-\n  An open source, self-hosted implementation of the Tailscale control server.\n\n# Repository\nrepo_name: juanfont/headscale\nrepo_url: https://github.com/juanfont/headscale\n\n# Copyright\ncopyright: Copyright &copy; 2026 Headscale authors\n\n# Configuration\ntheme:\n  name: material\n  features:\n    - announce.dismiss\n    - content.action.edit\n    - content.action.view\n    - content.code.annotate\n    - content.code.copy\n    # - content.tabs.link\n    - content.tooltips\n    # - header.autohide\n    # - navigation.expand\n    - navigation.footer\n    - navigation.indexes\n    # - navigation.instant\n    # - navigation.prune\n    - navigation.sections\n    - navigation.tabs\n    # - navigation.tabs.sticky\n    - navigation.top\n    - navigation.tracking\n    - search.highlight\n    - search.share\n    - search.suggest\n    - toc.follow\n    # - toc.integrate\n  palette:\n    - media: \"(prefers-color-scheme)\"\n      toggle:\n        icon: material/brightness-auto\n        name: Switch to light mode\n    - media: \"(prefers-color-scheme: light)\"\n      scheme: default\n      primary: white\n      toggle:\n        icon: material/brightness-7\n        name: Switch to dark mode\n    - media: \"(prefers-color-scheme: dark)\"\n      scheme: slate\n      toggle:\n        icon: material/brightness-4\n        name: Switch to system preference\n  font:\n    text: Roboto\n    code: Roboto Mono\n  favicon: assets/favicon.png\n  logo: assets/logo/headscale3-dots.svg\n\n# Excludes\nexclude_docs: |\n  /requirements.txt\n\n# Plugins\nplugins:\n  - search:\n      separator: '[\\s\\-,:!=\\[\\]()\"`/]+|\\.(?!\\d)|&[lg]t;|(?!\\b)(?=[A-Z][a-z])'\n  - macros:\n  - include-markdown:\n  - minify:\n      minify_html: true\n  - mike:\n  - social: {}\n  - redirects:\n      redirect_maps:\n        acls.md: ref/acls.md\n        android-client.md: usage/connect/android.md\n        apple-client.md: usage/connect/apple.md\n        dns-records.md: ref/dns.md\n        exit-node.md: ref/routes.md\n        faq.md: about/faq.md\n        iOS-client.md: usage/connect/apple.md#ios\n        oidc.md: ref/oidc.md\n        ref/exit-node.md: ref/routes.md\n        ref/remote-cli.md: ref/api.md#grpc\n        remote-cli.md: ref/api.md#grpc\n        reverse-proxy.md: ref/integration/reverse-proxy.md\n        tls.md: ref/tls.md\n        web-ui.md: ref/integration/web-ui.md\n        windows-client.md: usage/connect/windows.md\n\n# Customization\nextra:\n  version:\n    alias: true\n    provider: mike\n  annotate:\n    json: [.s2]\n  social:\n    - icon: fontawesome/brands/github\n      link: https://github.com/juanfont/headscale\n    - icon: material/coffee\n      link: https://ko-fi.com/headscale\n    - icon: fontawesome/brands/docker\n      link: https://github.com/juanfont/headscale/pkgs/container/headscale\n    - icon: fontawesome/brands/discord\n      link: https://discord.gg/c84AZQhmpx\n  headscale:\n    version: 0.28.0\n\n# Extensions\nmarkdown_extensions:\n  - abbr\n  - admonition\n  - attr_list\n  - def_list\n  - footnotes\n  - md_in_html\n  - toc:\n      permalink: true\n  - pymdownx.arithmatex:\n      generic: true\n  - pymdownx.betterem:\n      smart_enable: all\n  - pymdownx.caret\n  - pymdownx.details\n  - pymdownx.emoji:\n      emoji_generator: !!python/name:material.extensions.emoji.to_svg\n      emoji_index: !!python/name:material.extensions.emoji.twemoji\n  - pymdownx.highlight:\n      anchor_linenums: true\n      line_spans: __span\n      pygments_lang_class: true\n  - pymdownx.inlinehilite\n  - pymdownx.keys\n  - pymdownx.magiclink:\n      repo_url_shorthand: true\n      user: squidfunk\n      repo: mkdocs-material\n  - pymdownx.mark\n  - pymdownx.smartsymbols\n  - pymdownx.superfences:\n      custom_fences:\n        - name: mermaid\n          class: mermaid\n          format: !!python/name:pymdownx.superfences.fence_code_format\n  - pymdownx.tabbed:\n      alternate_style: true\n  - pymdownx.tasklist:\n      custom_checkbox: true\n  - pymdownx.tilde\n\n# Page tree\nnav:\n  - Welcome: index.md\n  - About:\n      - FAQ: about/faq.md\n      - Features: about/features.md\n      - Clients: about/clients.md\n      - Getting help: about/help.md\n      - Releases: about/releases.md\n      - Contributing: about/contributing.md\n      - Sponsor: about/sponsor.md\n\n  - Setup:\n      - Requirements and Assumptions: setup/requirements.md\n      - Installation:\n          - Official releases: setup/install/official.md\n          - Community packages: setup/install/community.md\n          - Container: setup/install/container.md\n          - Build from source: setup/install/source.md\n      - Upgrade: setup/upgrade.md\n  - Usage:\n      - Getting started: usage/getting-started.md\n      - Connect a node:\n          - Android: usage/connect/android.md\n          - Apple: usage/connect/apple.md\n          - Windows: usage/connect/windows.md\n  - Reference:\n      - Configuration: ref/configuration.md\n      - Registration methods: ref/registration.md\n      - OpenID Connect: ref/oidc.md\n      - Routes: ref/routes.md\n      - TLS: ref/tls.md\n      - ACLs: ref/acls.md\n      - DNS: ref/dns.md\n      - DERP: ref/derp.md\n      - API: ref/api.md\n      - Tags: ref/tags.md\n      - Debug: ref/debug.md\n      - Integration:\n          - Reverse proxy: ref/integration/reverse-proxy.md\n          - Web UI: ref/integration/web-ui.md\n          - Tools: ref/integration/tools.md\n"
  },
  {
    "path": "nix/README.md",
    "content": "# Headscale NixOS Module\n\nThis directory contains the NixOS module for Headscale.\n\n## Rationale\n\nThe module is maintained in this repository to keep the code and module\nsynchronized at the same commit. This allows faster iteration and ensures the\nmodule stays compatible with the latest Headscale changes. All changes should\naim to be upstreamed to nixpkgs.\n\n## Files\n\n- **[`module.nix`](./module.nix)** - The NixOS module implementation\n- **[`example-configuration.nix`](./example-configuration.nix)** - Example\n  configuration demonstrating all major features\n- **[`tests/`](./tests/)** - NixOS integration tests\n\n## Usage\n\nAdd to your flake inputs:\n\n```nix\ninputs.headscale.url = \"github:juanfont/headscale\";\n```\n\nThen import the module:\n\n```nix\nimports = [ inputs.headscale.nixosModules.default ];\n```\n\nSee [`example-configuration.nix`](./example-configuration.nix) for configuration\noptions.\n\n## Upstream\n\n- [nixpkgs module](https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/networking/headscale.nix)\n- [nixpkgs package](https://github.com/NixOS/nixpkgs/blob/master/pkgs/by-name/he/headscale/package.nix)\n\nThe module in this repository may be newer than the nixpkgs version.\n"
  },
  {
    "path": "nix/example-configuration.nix",
    "content": "# Example NixOS configuration using the headscale module\n#\n# This file demonstrates how to use the headscale NixOS module from this flake.\n# To use in your own configuration, add this to your flake.nix inputs:\n#\n#   inputs.headscale.url = \"github:juanfont/headscale\";\n#\n# Then import the module:\n#\n#   imports = [ inputs.headscale.nixosModules.default ];\n#\n\n{ config, pkgs, ... }:\n\n{\n  # Import the headscale module\n  # In a real configuration, this would come from the flake input\n  # imports = [ inputs.headscale.nixosModules.default ];\n\n  services.headscale = {\n    enable = true;\n\n    # Optional: Use a specific package (defaults to pkgs.headscale)\n    # package = pkgs.headscale;\n\n    # Listen on all interfaces (default is 127.0.0.1)\n    address = \"0.0.0.0\";\n    port = 8080;\n\n    settings = {\n      # The URL clients will connect to\n      server_url = \"https://headscale.example.com\";\n\n      # IP prefixes for the tailnet\n      # These use the freeform settings - you can set any headscale config option\n      prefixes = {\n        v4 = \"100.64.0.0/10\";\n        v6 = \"fd7a:115c:a1e0::/48\";\n        allocation = \"sequential\";\n      };\n\n      # DNS configuration with MagicDNS\n      dns = {\n        magic_dns = true;\n        base_domain = \"tailnet.example.com\";\n\n        # Whether to override client's local DNS settings (default: true)\n        # When true, nameservers.global must be set\n        override_local_dns = true;\n\n        nameservers = {\n          global = [ \"1.1.1.1\" \"8.8.8.8\" ];\n        };\n      };\n\n      # DERP (relay) configuration\n      derp = {\n        # Use default Tailscale DERP servers\n        urls = [ \"https://controlplane.tailscale.com/derpmap/default\" ];\n        auto_update_enabled = true;\n        update_frequency = \"24h\";\n\n        # Optional: Run your own DERP server\n        # server = {\n        #   enabled = true;\n        #   region_id = 999;\n        #   stun_listen_addr = \"0.0.0.0:3478\";\n        # };\n      };\n\n      # Database configuration (SQLite is recommended)\n      database = {\n        type = \"sqlite\";\n        sqlite = {\n          path = \"/var/lib/headscale/db.sqlite\";\n          write_ahead_log = true;\n        };\n\n        # PostgreSQL example (not recommended for new deployments)\n        # type = \"postgres\";\n        # postgres = {\n        #   host = \"localhost\";\n        #   port = 5432;\n        #   name = \"headscale\";\n        #   user = \"headscale\";\n        #   password_file = \"/run/secrets/headscale-db-password\";\n        # };\n      };\n\n      # Logging configuration\n      log = {\n        level = \"info\";\n        format = \"text\";\n      };\n\n      # Optional: OIDC authentication\n      # oidc = {\n      #   issuer = \"https://accounts.google.com\";\n      #   client_id = \"your-client-id\";\n      #   client_secret_path = \"/run/secrets/oidc-client-secret\";\n      #   scope = [ \"openid\" \"profile\" \"email\" ];\n      #   allowed_domains = [ \"example.com\" ];\n      # };\n\n      # Optional: Let's Encrypt TLS certificates\n      # tls_letsencrypt_hostname = \"headscale.example.com\";\n      # tls_letsencrypt_challenge_type = \"HTTP-01\";\n\n      # Optional: Provide your own TLS certificates\n      # tls_cert_path = \"/path/to/cert.pem\";\n      # tls_key_path = \"/path/to/key.pem\";\n\n      # ACL policy configuration\n      policy = {\n        mode = \"file\";\n        path = \"/var/lib/headscale/policy.hujson\";\n      };\n\n      # You can add ANY headscale configuration option here thanks to freeform settings\n      # For example, experimental features or settings not explicitly defined above:\n      # experimental_feature = true;\n      # custom_setting = \"value\";\n    };\n  };\n\n  # Optional: Open firewall ports\n  networking.firewall = {\n    allowedTCPPorts = [ 8080 ];\n    # If running a DERP server:\n    # allowedUDPPorts = [ 3478 ];\n  };\n\n  # Optional: Use with nginx reverse proxy for TLS termination\n  # services.nginx = {\n  #   enable = true;\n  #   virtualHosts.\"headscale.example.com\" = {\n  #     enableACME = true;\n  #     forceSSL = true;\n  #     locations.\"/\" = {\n  #       proxyPass = \"http://127.0.0.1:8080\";\n  #       proxyWebsockets = true;\n  #     };\n  #   };\n  # };\n}\n"
  },
  {
    "path": "nix/module.nix",
    "content": "{ config\n, lib\n, pkgs\n, ...\n}:\nlet\n  cfg = config.services.headscale;\n\n  dataDir = \"/var/lib/headscale\";\n  runDir = \"/run/headscale\";\n\n  cliConfig = {\n    # Turn off update checks since the origin of our package\n    # is nixpkgs and not Github.\n    disable_check_updates = true;\n\n    unix_socket = \"${runDir}/headscale.sock\";\n  };\n\n  settingsFormat = pkgs.formats.yaml { };\n  cliConfigFile = settingsFormat.generate \"headscale.yaml\" cliConfig;\n\n  assertRemovedOption = option: message: {\n    assertion = !lib.hasAttrByPath option cfg;\n    message =\n      \"The option `services.headscale.${lib.options.showOption option}` was removed. \" + message;\n  };\nin\n{\n  # Disable the upstream NixOS module to prevent conflicts\n  disabledModules = [ \"services/networking/headscale.nix\" ];\n\n  options = {\n    services.headscale = {\n      enable = lib.mkEnableOption \"headscale, Open Source coordination server for Tailscale\";\n\n      package = lib.mkPackageOption pkgs \"headscale\" { };\n\n      configFile = lib.mkOption {\n        type = lib.types.path;\n        readOnly = true;\n        default = settingsFormat.generate \"headscale.yaml\" cfg.settings;\n        defaultText = lib.literalExpression ''(pkgs.formats.yaml { }).generate \"headscale.yaml\" config.services.headscale.settings'';\n        description = ''\n          Path to the configuration file of headscale.\n        '';\n      };\n\n      user = lib.mkOption {\n        default = \"headscale\";\n        type = lib.types.str;\n        description = ''\n          User account under which headscale runs.\n\n          ::: {.note}\n          If left as the default value this user will automatically be created\n          on system activation, otherwise you are responsible for\n          ensuring the user exists before the headscale service starts.\n          :::\n        '';\n      };\n\n      group = lib.mkOption {\n        default = \"headscale\";\n        type = lib.types.str;\n        description = ''\n          Group under which headscale runs.\n\n          ::: {.note}\n          If left as the default value this group will automatically be created\n          on system activation, otherwise you are responsible for\n          ensuring the user exists before the headscale service starts.\n          :::\n        '';\n      };\n\n      address = lib.mkOption {\n        type = lib.types.str;\n        default = \"127.0.0.1\";\n        description = ''\n          Listening address of headscale.\n        '';\n        example = \"0.0.0.0\";\n      };\n\n      port = lib.mkOption {\n        type = lib.types.port;\n        default = 8080;\n        description = ''\n          Listening port of headscale.\n        '';\n        example = 443;\n      };\n\n      settings = lib.mkOption {\n        description = ''\n          Overrides to {file}`config.yaml` as a Nix attribute set.\n          Check the [example config](https://github.com/juanfont/headscale/blob/main/config-example.yaml)\n          for possible options.\n        '';\n        type = lib.types.submodule {\n          freeformType = settingsFormat.type;\n\n          options = {\n            server_url = lib.mkOption {\n              type = lib.types.str;\n              default = \"http://127.0.0.1:8080\";\n              description = ''\n                The url clients will connect to.\n              '';\n              example = \"https://myheadscale.example.com:443\";\n            };\n\n            noise.private_key_path = lib.mkOption {\n              type = lib.types.path;\n              default = \"${dataDir}/noise_private.key\";\n              description = ''\n                Path to noise private key file, generated automatically if it does not exist.\n              '';\n            };\n\n            prefixes =\n              let\n                prefDesc = ''\n                  Each prefix consists of either an IPv4 or IPv6 address,\n                  and the associated prefix length, delimited by a slash.\n                  It must be within IP ranges supported by the Tailscale\n                  client - i.e., subnets of 100.64.0.0/10 and fd7a:115c:a1e0::/48.\n                '';\n              in\n              {\n                v4 = lib.mkOption {\n                  type = lib.types.str;\n                  default = \"100.64.0.0/10\";\n                  description = prefDesc;\n                };\n\n                v6 = lib.mkOption {\n                  type = lib.types.str;\n                  default = \"fd7a:115c:a1e0::/48\";\n                  description = prefDesc;\n                };\n\n                allocation = lib.mkOption {\n                  type = lib.types.enum [\n                    \"sequential\"\n                    \"random\"\n                  ];\n                  example = \"random\";\n                  default = \"sequential\";\n                  description = ''\n                    Strategy used for allocation of IPs to nodes, available options:\n                    - sequential (default): assigns the next free IP from the previous given IP.\n                    - random: assigns the next free IP from a pseudo-random IP generator (crypto/rand).\n                  '';\n                };\n              };\n\n            derp = {\n              urls = lib.mkOption {\n                type = lib.types.listOf lib.types.str;\n                default = [ \"https://controlplane.tailscale.com/derpmap/default\" ];\n                description = ''\n                  List of urls containing DERP maps.\n                  See [How Tailscale works](https://tailscale.com/blog/how-tailscale-works/) for more information on DERP maps.\n                '';\n              };\n\n              paths = lib.mkOption {\n                type = lib.types.listOf lib.types.path;\n                default = [ ];\n                description = ''\n                  List of file paths containing DERP maps.\n                  See [How Tailscale works](https://tailscale.com/blog/how-tailscale-works/) for more information on DERP maps.\n                '';\n              };\n\n              auto_update_enabled = lib.mkOption {\n                type = lib.types.bool;\n                default = true;\n                description = ''\n                  Whether to automatically update DERP maps on a set frequency.\n                '';\n                example = false;\n              };\n\n              update_frequency = lib.mkOption {\n                type = lib.types.str;\n                default = \"24h\";\n                description = ''\n                  Frequency to update DERP maps.\n                '';\n                example = \"5m\";\n              };\n\n              server.private_key_path = lib.mkOption {\n                type = lib.types.path;\n                default = \"${dataDir}/derp_server_private.key\";\n                description = ''\n                  Path to derp private key file, generated automatically if it does not exist.\n                '';\n              };\n            };\n\n            ephemeral_node_inactivity_timeout = lib.mkOption {\n              type = lib.types.str;\n              default = \"30m\";\n              description = ''\n                Time before an inactive ephemeral node is deleted.\n              '';\n              example = \"5m\";\n            };\n\n            database = {\n              type = lib.mkOption {\n                type = lib.types.enum [\n                  \"sqlite\"\n                  \"sqlite3\"\n                  \"postgres\"\n                ];\n                example = \"postgres\";\n                default = \"sqlite\";\n                description = ''\n                  Database engine to use.\n                  Please note that using Postgres is highly discouraged as it is only supported for legacy reasons.\n                  All new development, testing and optimisations are done with SQLite in mind.\n                '';\n              };\n\n              sqlite = {\n                path = lib.mkOption {\n                  type = lib.types.nullOr lib.types.str;\n                  default = \"${dataDir}/db.sqlite\";\n                  description = \"Path to the sqlite3 database file.\";\n                };\n\n                write_ahead_log = lib.mkOption {\n                  type = lib.types.bool;\n                  default = true;\n                  description = ''\n                    Enable WAL mode for SQLite. This is recommended for production environments.\n                    <https://www.sqlite.org/wal.html>\n                  '';\n                  example = true;\n                };\n              };\n\n              postgres = {\n                host = lib.mkOption {\n                  type = lib.types.nullOr lib.types.str;\n                  default = null;\n                  example = \"127.0.0.1\";\n                  description = \"Database host address.\";\n                };\n\n                port = lib.mkOption {\n                  type = lib.types.nullOr lib.types.port;\n                  default = null;\n                  example = 3306;\n                  description = \"Database host port.\";\n                };\n\n                name = lib.mkOption {\n                  type = lib.types.nullOr lib.types.str;\n                  default = null;\n                  example = \"headscale\";\n                  description = \"Database name.\";\n                };\n\n                user = lib.mkOption {\n                  type = lib.types.nullOr lib.types.str;\n                  default = null;\n                  example = \"headscale\";\n                  description = \"Database user.\";\n                };\n\n                password_file = lib.mkOption {\n                  type = lib.types.nullOr lib.types.path;\n                  default = null;\n                  example = \"/run/keys/headscale-dbpassword\";\n                  description = ''\n                    A file containing the password corresponding to\n                    {option}`database.user`.\n                  '';\n                };\n              };\n            };\n\n            log = {\n              level = lib.mkOption {\n                type = lib.types.str;\n                default = \"info\";\n                description = ''\n                  headscale log level.\n                '';\n                example = \"debug\";\n              };\n\n              format = lib.mkOption {\n                type = lib.types.str;\n                default = \"text\";\n                description = ''\n                  headscale log format.\n                '';\n                example = \"json\";\n              };\n            };\n\n            dns = {\n              magic_dns = lib.mkOption {\n                type = lib.types.bool;\n                default = true;\n                description = ''\n                  Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).\n                '';\n                example = false;\n              };\n\n              base_domain = lib.mkOption {\n                type = lib.types.str;\n                default = \"\";\n                description = ''\n                  Defines the base domain to create the hostnames for MagicDNS.\n                  This domain must be different from the {option}`server_url`\n                  domain.\n                  {option}`base_domain` must be a FQDN, without the trailing dot.\n                  The FQDN of the hosts will be `hostname.base_domain` (e.g.\n                  `myhost.tailnet.example.com`).\n                '';\n                example = \"tailnet.example.com\";\n              };\n\n              override_local_dns = lib.mkOption {\n                type = lib.types.bool;\n                default = true;\n                description = ''\n                  Whether to [override clients' DNS servers](https://tailscale.com/kb/1054/dns#override-dns-servers).\n                '';\n                example = false;\n              };\n\n              nameservers = {\n                global = lib.mkOption {\n                  type = lib.types.listOf lib.types.str;\n                  default = [ ];\n                  description = ''\n                    List of nameservers to pass to Tailscale clients.\n                  '';\n                };\n              };\n\n              split = lib.mkOption {\n                type = lib.types.attrsOf (lib.types.listOf lib.types.str);\n                default = { };\n                description = ''\n                  Split DNS configuration (map of domains and which DNS server to use for each).\n                  See <https://tailscale.com/kb/1054/dns/>.\n                '';\n                example = {\n                  \"foo.bar.com\" = [ \"1.1.1.1\" ];\n                };\n              };\n\n              extra_records = lib.mkOption {\n                type = lib.types.nullOr (\n                  lib.types.listOf (\n                    lib.types.submodule {\n                      options = {\n                        name = lib.mkOption {\n                          type = lib.types.str;\n                          description = \"DNS record name.\";\n                          example = \"grafana.tailnet.example.com\";\n                        };\n                        type = lib.mkOption {\n                          type = lib.types.enum [\n                            \"A\"\n                            \"AAAA\"\n                          ];\n                          description = \"DNS record type.\";\n                          example = \"A\";\n                        };\n                        value = lib.mkOption {\n                          type = lib.types.str;\n                          description = \"DNS record value (IP address).\";\n                          example = \"100.64.0.3\";\n                        };\n                      };\n                    }\n                  )\n                );\n                default = null;\n                description = ''\n                  Extra DNS records to expose to clients.\n                '';\n                example = ''\n                  [ {\n                    name = \"grafana.tailnet.example.com\";\n                    type = \"A\";\n                    value = \"100.64.0.3\";\n                  } ]\n                '';\n              };\n\n              search_domains = lib.mkOption {\n                type = lib.types.listOf lib.types.str;\n                default = [ ];\n                description = ''\n                  Search domains to inject to Tailscale clients.\n                '';\n                example = [ \"mydomain.internal\" ];\n              };\n            };\n\n            oidc = {\n              issuer = lib.mkOption {\n                type = lib.types.str;\n                default = \"\";\n                description = ''\n                  URL to OpenID issuer.\n                '';\n                example = \"https://openid.example.com\";\n              };\n\n              client_id = lib.mkOption {\n                type = lib.types.str;\n                default = \"\";\n                description = ''\n                  OpenID Connect client ID.\n                '';\n              };\n\n              client_secret_path = lib.mkOption {\n                type = lib.types.nullOr lib.types.str;\n                default = null;\n                description = ''\n                  Path to OpenID Connect client secret file. Expands environment variables in format ''${VAR}.\n                '';\n              };\n\n              scope = lib.mkOption {\n                type = lib.types.listOf lib.types.str;\n                default = [\n                  \"openid\"\n                  \"profile\"\n                  \"email\"\n                ];\n                description = ''\n                  Scopes used in the OIDC flow.\n                '';\n              };\n\n              extra_params = lib.mkOption {\n                type = lib.types.attrsOf lib.types.str;\n                default = { };\n                description = ''\n                  Custom query parameters to send with the Authorize Endpoint request.\n                '';\n                example = {\n                  domain_hint = \"example.com\";\n                };\n              };\n\n              allowed_domains = lib.mkOption {\n                type = lib.types.listOf lib.types.str;\n                default = [ ];\n                description = ''\n                  Allowed principal domains. if an authenticated user's domain\n                  is not in this list authentication request will be rejected.\n                '';\n                example = [ \"example.com\" ];\n              };\n\n              allowed_users = lib.mkOption {\n                type = lib.types.listOf lib.types.str;\n                default = [ ];\n                description = ''\n                  Users allowed to authenticate even if not in allowedDomains.\n                '';\n                example = [ \"alice@example.com\" ];\n              };\n\n              pkce = {\n                enabled = lib.mkOption {\n                  type = lib.types.bool;\n                  default = false;\n                  description = ''\n                    Enable or disable PKCE (Proof Key for Code Exchange) support.\n                    PKCE adds an additional layer of security to the OAuth 2.0\n                    authorization code flow by preventing authorization code\n                    interception attacks\n                    See https://datatracker.ietf.org/doc/html/rfc7636\n                  '';\n                  example = true;\n                };\n\n                method = lib.mkOption {\n                  type = lib.types.str;\n                  default = \"S256\";\n                  description = ''\n                    PKCE method to use:\n                      - plain: Use plain code verifier\n                      - S256: Use SHA256 hashed code verifier (default, recommended)\n                  '';\n                };\n              };\n            };\n\n            tls_letsencrypt_hostname = lib.mkOption {\n              type = lib.types.nullOr lib.types.str;\n              default = \"\";\n              description = ''\n                Domain name to request a TLS certificate for.\n              '';\n            };\n\n            tls_letsencrypt_challenge_type = lib.mkOption {\n              type = lib.types.enum [\n                \"TLS-ALPN-01\"\n                \"HTTP-01\"\n              ];\n              default = \"HTTP-01\";\n              description = ''\n                Type of ACME challenge to use, currently supported types:\n                `HTTP-01` or `TLS-ALPN-01`.\n              '';\n            };\n\n            tls_letsencrypt_listen = lib.mkOption {\n              type = lib.types.nullOr lib.types.str;\n              default = \":http\";\n              description = ''\n                When HTTP-01 challenge is chosen, letsencrypt must set up a\n                verification endpoint, and it will be listening on:\n                `:http = port 80`.\n              '';\n            };\n\n            tls_cert_path = lib.mkOption {\n              type = lib.types.nullOr lib.types.path;\n              default = null;\n              description = ''\n                Path to already created certificate.\n              '';\n            };\n\n            tls_key_path = lib.mkOption {\n              type = lib.types.nullOr lib.types.path;\n              default = null;\n              description = ''\n                Path to key for already created certificate.\n              '';\n            };\n\n            policy = {\n              mode = lib.mkOption {\n                type = lib.types.enum [\n                  \"file\"\n                  \"database\"\n                ];\n                default = \"file\";\n                description = ''\n                  The mode can be \"file\" or \"database\" that defines\n                  where the ACL policies are stored and read from.\n                '';\n              };\n\n              path = lib.mkOption {\n                type = lib.types.nullOr lib.types.path;\n                default = null;\n                description = ''\n                  If the mode is set to \"file\", the path to a\n                  HuJSON file containing ACL policies.\n                '';\n              };\n            };\n          };\n        };\n      };\n    };\n  };\n\n  imports = with lib; [\n    (mkRenamedOptionModule\n      [ \"services\" \"headscale\" \"derp\" \"autoUpdate\" ]\n      [ \"services\" \"headscale\" \"settings\" \"derp\" \"auto_update_enabled\" ]\n    )\n    (mkRenamedOptionModule\n      [ \"services\" \"headscale\" \"derp\" \"auto_update_enable\" ]\n      [ \"services\" \"headscale\" \"settings\" \"derp\" \"auto_update_enabled\" ]\n    )\n    (mkRenamedOptionModule\n      [ \"services\" \"headscale\" \"derp\" \"paths\" ]\n      [ \"services\" \"headscale\" \"settings\" \"derp\" \"paths\" ]\n    )\n    (mkRenamedOptionModule\n      [ \"services\" \"headscale\" \"derp\" \"updateFrequency\" ]\n      [ \"services\" \"headscale\" \"settings\" \"derp\" \"update_frequency\" ]\n    )\n    (mkRenamedOptionModule\n      [ \"services\" \"headscale\" \"derp\" \"urls\" ]\n      [ \"services\" \"headscale\" \"settings\" \"derp\" \"urls\" ]\n    )\n    (mkRenamedOptionModule\n      [ \"services\" \"headscale\" \"ephemeralNodeInactivityTimeout\" ]\n      [ \"services\" \"headscale\" \"settings\" \"ephemeral_node_inactivity_timeout\" ]\n    )\n    (mkRenamedOptionModule\n      [ \"services\" \"headscale\" \"logLevel\" ]\n      [ \"services\" \"headscale\" \"settings\" \"log\" \"level\" ]\n    )\n    (mkRenamedOptionModule\n      [ \"services\" \"headscale\" \"openIdConnect\" \"clientId\" ]\n      [ \"services\" \"headscale\" \"settings\" \"oidc\" \"client_id\" ]\n    )\n    (mkRenamedOptionModule\n      [ \"services\" \"headscale\" \"openIdConnect\" \"clientSecretFile\" ]\n      [ \"services\" \"headscale\" \"settings\" \"oidc\" \"client_secret_path\" ]\n    )\n    (mkRenamedOptionModule\n      [ \"services\" \"headscale\" \"openIdConnect\" \"issuer\" ]\n      [ \"services\" \"headscale\" \"settings\" \"oidc\" \"issuer\" ]\n    )\n    (mkRenamedOptionModule\n      [ \"services\" \"headscale\" \"serverUrl\" ]\n      [ \"services\" \"headscale\" \"settings\" \"server_url\" ]\n    )\n    (mkRenamedOptionModule\n      [ \"services\" \"headscale\" \"tls\" \"certFile\" ]\n      [ \"services\" \"headscale\" \"settings\" \"tls_cert_path\" ]\n    )\n    (mkRenamedOptionModule\n      [ \"services\" \"headscale\" \"tls\" \"keyFile\" ]\n      [ \"services\" \"headscale\" \"settings\" \"tls_key_path\" ]\n    )\n    (mkRenamedOptionModule\n      [ \"services\" \"headscale\" \"tls\" \"letsencrypt\" \"challengeType\" ]\n      [ \"services\" \"headscale\" \"settings\" \"tls_letsencrypt_challenge_type\" ]\n    )\n    (mkRenamedOptionModule\n      [ \"services\" \"headscale\" \"tls\" \"letsencrypt\" \"hostname\" ]\n      [ \"services\" \"headscale\" \"settings\" \"tls_letsencrypt_hostname\" ]\n    )\n    (mkRenamedOptionModule\n      [ \"services\" \"headscale\" \"tls\" \"letsencrypt\" \"httpListen\" ]\n      [ \"services\" \"headscale\" \"settings\" \"tls_letsencrypt_listen\" ]\n    )\n\n    (mkRemovedOptionModule [ \"services\" \"headscale\" \"openIdConnect\" \"domainMap\" ] ''\n      Headscale no longer uses domain_map. If you're using an old version of headscale you can still set this option via services.headscale.settings.oidc.domain_map.\n    '')\n  ];\n\n  config = lib.mkIf cfg.enable {\n    assertions = [\n      {\n        assertion = with cfg.settings; dns.magic_dns -> dns.base_domain != \"\";\n        message = \"dns.base_domain must be set when using MagicDNS\";\n      }\n      {\n        assertion = with cfg.settings; dns.override_local_dns -> dns.nameservers.global != [ ];\n        message = \"dns.nameservers.global must be set when overriding local DNS\";\n      }\n      (assertRemovedOption [ \"settings\" \"acl_policy_path\" ] \"Use `policy.path` instead.\")\n      (assertRemovedOption [ \"settings\" \"db_host\" ] \"Use `database.postgres.host` instead.\")\n      (assertRemovedOption [ \"settings\" \"db_name\" ] \"Use `database.postgres.name` instead.\")\n      (assertRemovedOption [\n        \"settings\"\n        \"db_password_file\"\n      ] \"Use `database.postgres.password_file` instead.\")\n      (assertRemovedOption [ \"settings\" \"db_path\" ] \"Use `database.sqlite.path` instead.\")\n      (assertRemovedOption [ \"settings\" \"db_port\" ] \"Use `database.postgres.port` instead.\")\n      (assertRemovedOption [ \"settings\" \"db_type\" ] \"Use `database.type` instead.\")\n      (assertRemovedOption [ \"settings\" \"db_user\" ] \"Use `database.postgres.user` instead.\")\n      (assertRemovedOption [ \"settings\" \"dns_config\" ] \"Use `dns` instead.\")\n      (assertRemovedOption [ \"settings\" \"dns_config\" \"domains\" ] \"Use `dns.search_domains` instead.\")\n      (assertRemovedOption [\n        \"settings\"\n        \"dns_config\"\n        \"nameservers\"\n      ] \"Use `dns.nameservers.global` instead.\")\n      (assertRemovedOption [\n        \"settings\"\n        \"oidc\"\n        \"strip_email_domain\"\n      ] \"The strip_email_domain option got removed upstream\")\n    ];\n\n    services.headscale.settings = lib.mkMerge [\n      cliConfig\n      {\n        listen_addr = lib.mkDefault \"${cfg.address}:${toString cfg.port}\";\n\n        tls_letsencrypt_cache_dir = \"${dataDir}/.cache\";\n      }\n    ];\n\n    environment = {\n      # Headscale CLI needs a minimal config to be able to locate the unix socket\n      # to talk to the server instance.\n      etc.\"headscale/config.yaml\".source = cliConfigFile;\n\n      systemPackages = [ cfg.package ];\n    };\n\n    users.groups.headscale = lib.mkIf (cfg.group == \"headscale\") { };\n\n    users.users.headscale = lib.mkIf (cfg.user == \"headscale\") {\n      description = \"headscale user\";\n      home = dataDir;\n      group = cfg.group;\n      isSystemUser = true;\n    };\n\n    systemd.services.headscale = {\n      description = \"headscale coordination server for Tailscale\";\n      wants = [ \"network-online.target\" ];\n      after = [ \"network-online.target\" ];\n      wantedBy = [ \"multi-user.target\" ];\n\n      script = ''\n        ${lib.optionalString (cfg.settings.database.postgres.password_file != null) ''\n          export HEADSCALE_DATABASE_POSTGRES_PASS=\"$(head -n1 ${lib.escapeShellArg cfg.settings.database.postgres.password_file})\"\n        ''}\n\n        exec ${lib.getExe cfg.package} serve --config ${cfg.configFile}\n      '';\n\n      serviceConfig =\n        let\n          capabilityBoundingSet = [ \"CAP_CHOWN\" ] ++ lib.optional (cfg.port < 1024) \"CAP_NET_BIND_SERVICE\";\n        in\n        {\n          Restart = \"always\";\n          RestartSec = \"5s\";\n          Type = \"simple\";\n          User = cfg.user;\n          Group = cfg.group;\n\n          # Hardening options\n          RuntimeDirectory = \"headscale\";\n          # Allow headscale group access so users can be added and use the CLI.\n          RuntimeDirectoryMode = \"0750\";\n\n          StateDirectory = \"headscale\";\n          StateDirectoryMode = \"0750\";\n\n          ProtectSystem = \"strict\";\n          ProtectHome = true;\n          PrivateTmp = true;\n          PrivateDevices = true;\n          ProtectKernelTunables = true;\n          ProtectControlGroups = true;\n          RestrictSUIDSGID = true;\n          PrivateMounts = true;\n          ProtectKernelModules = true;\n          ProtectKernelLogs = true;\n          ProtectHostname = true;\n          ProtectClock = true;\n          ProtectProc = \"invisible\";\n          ProcSubset = \"pid\";\n          RestrictNamespaces = true;\n          RemoveIPC = true;\n          UMask = \"0077\";\n\n          CapabilityBoundingSet = capabilityBoundingSet;\n          AmbientCapabilities = capabilityBoundingSet;\n          NoNewPrivileges = true;\n          LockPersonality = true;\n          RestrictRealtime = true;\n          SystemCallFilter = [\n            \"@system-service\"\n            \"~@privileged\"\n            \"@chown\"\n          ];\n          SystemCallArchitectures = \"native\";\n          RestrictAddressFamilies = \"AF_INET AF_INET6 AF_UNIX\";\n        };\n    };\n  };\n\n  meta.maintainers = with lib.maintainers; [\n    kradalby\n    misterio77\n  ];\n}\n"
  },
  {
    "path": "nix/tests/headscale.nix",
    "content": "{ pkgs, lib, ... }:\nlet\n  tls-cert = pkgs.runCommand \"selfSignedCerts\" { buildInputs = [ pkgs.openssl ]; } ''\n    openssl req \\\n      -x509 -newkey rsa:4096 -sha256 -days 365 \\\n      -nodes -out cert.pem -keyout key.pem \\\n      -subj '/CN=headscale' -addext \"subjectAltName=DNS:headscale\"\n\n    mkdir -p $out\n    cp key.pem cert.pem $out\n  '';\nin\n{\n  name = \"headscale\";\n  meta.maintainers = with lib.maintainers; [\n    kradalby\n    misterio77\n  ];\n\n  nodes =\n    let\n      headscalePort = 8080;\n      stunPort = 3478;\n      peer = {\n        services.tailscale.enable = true;\n        security.pki.certificateFiles = [ \"${tls-cert}/cert.pem\" ];\n      };\n    in\n    {\n      peer1 = peer;\n      peer2 = peer;\n\n      headscale = {\n        services = {\n          headscale = {\n            enable = true;\n            port = headscalePort;\n            settings = {\n              server_url = \"https://headscale\";\n              ip_prefixes = [ \"100.64.0.0/10\" ];\n              derp = {\n                server = {\n                  enabled = true;\n                  region_id = 999;\n                  stun_listen_addr = \"0.0.0.0:${toString stunPort}\";\n                };\n                urls = [ ];\n              };\n              dns = {\n                base_domain = \"tailnet\";\n                extra_records = [\n                  {\n                    name = \"foo.bar\";\n                    type = \"A\";\n                    value = \"100.64.0.2\";\n                  }\n                ];\n                override_local_dns = false;\n              };\n            };\n          };\n          nginx = {\n            enable = true;\n            virtualHosts.headscale = {\n              addSSL = true;\n              sslCertificate = \"${tls-cert}/cert.pem\";\n              sslCertificateKey = \"${tls-cert}/key.pem\";\n              locations.\"/\" = {\n                proxyPass = \"http://127.0.0.1:${toString headscalePort}\";\n                proxyWebsockets = true;\n              };\n            };\n          };\n        };\n        networking.firewall = {\n          allowedTCPPorts = [\n            80\n            443\n          ];\n          allowedUDPPorts = [ stunPort ];\n        };\n        environment.systemPackages = [ pkgs.headscale ];\n      };\n    };\n\n  testScript = ''\n    start_all()\n    headscale.wait_for_unit(\"headscale\")\n    headscale.wait_for_open_port(443)\n\n    # Create headscale user and preauth-key\n    headscale.succeed(\"headscale users create test\")\n    authkey = headscale.succeed(\"headscale preauthkeys -u 1 create --reusable\")\n\n    # Connect peers\n    up_cmd = f\"tailscale up --login-server 'https://headscale' --auth-key {authkey}\"\n    peer1.execute(up_cmd)\n    peer2.execute(up_cmd)\n\n    # Check that they are reachable from the tailnet\n    peer1.wait_until_succeeds(\"tailscale ping peer2\")\n    peer2.wait_until_succeeds(\"tailscale ping peer1.tailnet\")\n    assert (res := peer1.wait_until_succeeds(\"${lib.getExe pkgs.dig} +short foo.bar\").strip()) == \"100.64.0.2\", f\"Domain {res} did not match 100.64.0.2\"\n  '';\n}\n"
  },
  {
    "path": "packaging/README.md",
    "content": "# Packaging\n\nWe use [nFPM](https://nfpm.goreleaser.com/) for making `.deb` packages.\n\nThis folder contains files we need to package with these releases.\n"
  },
  {
    "path": "packaging/deb/postinst",
    "content": "#!/bin/sh\n# postinst script for headscale.\n\nset -e\n\n# Summary of how this script can be called:\n#        * <postinst> 'configure' <most-recently-configured-version>\n#        * <old-postinst> 'abort-upgrade' <new version>\n#        * <conflictor's-postinst> 'abort-remove' 'in-favour' <package>\n#          <new-version>\n#        * <postinst> 'abort-remove'\n#        * <deconfigured's-postinst> 'abort-deconfigure' 'in-favour'\n#          <failed-install-package> <version> 'removing'\n#          <conflicting-package> <version>\n# for details, see https://www.debian.org/doc/debian-policy/ or\n# the debian-policy package.\n\nHEADSCALE_USER=\"headscale\"\nHEADSCALE_GROUP=\"headscale\"\nHEADSCALE_HOME_DIR=\"/var/lib/headscale\"\nHEADSCALE_SHELL=\"/usr/sbin/nologin\"\nHEADSCALE_SERVICE=\"headscale.service\"\n\ncase \"$1\" in\n    configure)\n      groupadd --force --system \"$HEADSCALE_GROUP\"\n      if ! id -u \"$HEADSCALE_USER\" >/dev/null 2>&1; then\n        useradd --system --shell \"$HEADSCALE_SHELL\" \\\n          --gid \"$HEADSCALE_GROUP\" --home-dir \"$HEADSCALE_HOME_DIR\" \\\n          --comment \"headscale default user\" \"$HEADSCALE_USER\"\n      fi\n\n      if dpkg --compare-versions \"$2\" lt-nl \"0.27\"; then\n        # < 0.24.0-beta.1 used /home/headscale as home and /bin/sh as shell.\n        # The directory /home/headscale was not created by the package or\n        # useradd but the service always used /var/lib/headscale which was\n        # always shipped by the package as empty directory. Previous versions\n        # of the package did not update the user account properties.\n        usermod --home \"$HEADSCALE_HOME_DIR\" --shell \"$HEADSCALE_SHELL\" \\\n          \"$HEADSCALE_USER\" >/dev/null\n      fi\n\n      if dpkg --compare-versions \"$2\" lt-nl \"0.27\" \\\n        && [ $(id --user \"$HEADSCALE_USER\") -ge 1000 ] \\\n        && [ $(id --group \"$HEADSCALE_GROUP\") -ge 1000 ]; then\n        # < 0.26.0-beta.1 created a regular user/group to run headscale.\n        # Previous versions of the package did not migrate to system uid/gid.\n        # Assume that the *default* uid/gid range is in use and only run this\n        # migration when the current uid/gid is allocated in the user range.\n        # Create a temporary system user/group to guarantee the allocation of a\n        # uid/gid in the system range. Assign this new uid/gid to the existing\n        # user and group and remove the temporary user/group afterwards.\n        tmp_name=\"headscaletmp\"\n        useradd --system --no-log-init --no-create-home --shell \"$HEADSCALE_SHELL\" \"$tmp_name\"\n        tmp_uid=\"$(id --user \"$tmp_name\")\"\n        tmp_gid=\"$(id --group \"$tmp_name\")\"\n        usermod --non-unique --uid \"$tmp_uid\" --gid \"$tmp_gid\" \"$HEADSCALE_USER\"\n        groupmod --non-unique --gid \"$tmp_gid\" \"$HEADSCALE_USER\"\n        userdel --force \"$tmp_name\"\n      fi\n\n      # Enable service and keep track of its state\n      if deb-systemd-helper --quiet was-enabled \"$HEADSCALE_SERVICE\"; then\n        deb-systemd-helper enable \"$HEADSCALE_SERVICE\" >/dev/null || true\n      else\n        deb-systemd-helper update-state \"$HEADSCALE_SERVICE\" >/dev/null || true\n      fi\n\n      # Bounce service\n      if [ -d /run/systemd/system ]; then\n        systemctl --system daemon-reload >/dev/null || true\n        if [ -n \"$2\" ]; then\n          deb-systemd-invoke restart \"$HEADSCALE_SERVICE\" >/dev/null || true\n        else\n          deb-systemd-invoke start \"$HEADSCALE_SERVICE\" >/dev/null || true\n        fi\n      fi\n    ;;\n\n    abort-upgrade|abort-remove|abort-deconfigure)\n    ;;\n\n    *)\n        echo \"postinst called with unknown argument '$1'\" >&2\n        exit 1\n    ;;\nesac\n"
  },
  {
    "path": "packaging/deb/postrm",
    "content": "#!/bin/sh\n# postrm script for headscale.\n\nset -e\n\n# Summary of how this script can be called:\n#        * <postrm> 'remove'\n#        * <postrm> 'purge'\n#        * <old-postrm> 'upgrade' <new-version>\n#        * <new-postrm> 'failed-upgrade' <old-version>\n#        * <new-postrm> 'abort-install'\n#        * <new-postrm> 'abort-install' <old-version>\n#        * <new-postrm> 'abort-upgrade' <old-version>\n#        * <disappearer's-postrm> 'disappear' <overwriter>\n#          <overwriter-version>\n# for details, see https://www.debian.org/doc/debian-policy/ or\n# the debian-policy package.\n\n\ncase \"$1\" in\n    remove)\n      if [ -d /run/systemd/system ]; then\n        systemctl --system daemon-reload >/dev/null || true\n      fi\n    ;;\n\n    purge)\n      userdel headscale\n      rm -rf /var/lib/headscale\n      if [ -x \"/usr/bin/deb-systemd-helper\" ]; then\n        deb-systemd-helper purge headscale.service >/dev/null || true\n      fi\n    ;;\n\n    upgrade|failed-upgrade|abort-install|abort-upgrade|disappear)\n    ;;\n\n    *)\n        echo \"postrm called with unknown argument '$1'\" >&2\n        exit 1\n    ;;\nesac\n"
  },
  {
    "path": "packaging/deb/prerm",
    "content": "#!/bin/sh\n# prerm script for headscale.\n\nset -e\n\n# Summary of how this script can be called:\n#        * <prerm> 'remove'\n#        * <old-prerm> 'upgrade' <new-version>\n#        * <new-prerm> 'failed-upgrade' <old-version>\n#        * <conflictor's-prerm> 'remove' 'in-favour' <package> <new-version>\n#        * <deconfigured's-prerm> 'deconfigure' 'in-favour'\n#          <package-being-installed> <version> 'removing'\n#          <conflicting-package> <version>\n# for details, see https://www.debian.org/doc/debian-policy/ or\n# the debian-policy package.\n\n\ncase \"$1\" in\n    remove)\n      if [ -d /run/systemd/system ]; then\n          deb-systemd-invoke stop headscale.service >/dev/null || true\n      fi\n    ;;\n    upgrade|deconfigure)\n    ;;\n\n    failed-upgrade)\n    ;;\n\n    *)\n        echo \"prerm called with unknown argument '$1'\" >&2\n        exit 1\n    ;;\nesac\n"
  },
  {
    "path": "packaging/systemd/headscale.service",
    "content": "[Unit]\nAfter=network.target\nDescription=headscale coordination server for Tailscale\nX-Restart-Triggers=/etc/headscale/config.yaml\n\n[Service]\nType=simple\nUser=headscale\nGroup=headscale\nExecStart=/usr/bin/headscale serve\nExecReload=/usr/bin/kill -HUP $MAINPID\nRestart=always\nRestartSec=5\n\nWorkingDirectory=/var/lib/headscale\nReadWritePaths=/var/lib/headscale\n\nAmbientCapabilities=CAP_NET_BIND_SERVICE CAP_CHOWN\nCapabilityBoundingSet=CAP_NET_BIND_SERVICE CAP_CHOWN\nLockPersonality=true\nNoNewPrivileges=true\nPrivateDevices=true\nPrivateMounts=true\nPrivateTmp=true\nProcSubset=pid\nProtectClock=true\nProtectControlGroups=true\nProtectHome=true\nProtectHostname=true\nProtectKernelLogs=true\nProtectKernelModules=true\nProtectKernelTunables=true\nProtectProc=invisible\nProtectSystem=strict\nRemoveIPC=true\nRestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX\nRestrictNamespaces=true\nRestrictRealtime=true\nRestrictSUIDSGID=true\nRuntimeDirectory=headscale\nRuntimeDirectoryMode=0750\nStateDirectory=headscale\nStateDirectoryMode=0750\nSystemCallArchitectures=native\nSystemCallFilter=@chown\nSystemCallFilter=@system-service\nSystemCallFilter=~@privileged\nUMask=0077\n\n[Install]\nWantedBy=multi-user.target\n"
  },
  {
    "path": "proto/buf.yaml",
    "content": "version: v1\nlint:\n  use:\n    - DEFAULT\nbreaking:\n  use:\n    - FILE\n\ndeps:\n  - buf.build/googleapis/googleapis\n  - buf.build/grpc-ecosystem/grpc-gateway\n  - buf.build/ufoundit-dev/protoc-gen-gorm\n"
  },
  {
    "path": "proto/headscale/v1/apikey.proto",
    "content": "syntax = \"proto3\";\npackage headscale.v1;\noption go_package = \"github.com/juanfont/headscale/gen/go/v1\";\n\nimport \"google/protobuf/timestamp.proto\";\n\nmessage ApiKey {\n  uint64 id = 1;\n  string prefix = 2;\n  google.protobuf.Timestamp expiration = 3;\n  google.protobuf.Timestamp created_at = 4;\n  google.protobuf.Timestamp last_seen = 5;\n}\n\nmessage CreateApiKeyRequest { google.protobuf.Timestamp expiration = 1; }\n\nmessage CreateApiKeyResponse { string api_key = 1; }\n\nmessage ExpireApiKeyRequest {\n  string prefix = 1;\n  uint64 id = 2;\n}\n\nmessage ExpireApiKeyResponse {}\n\nmessage ListApiKeysRequest {}\n\nmessage ListApiKeysResponse { repeated ApiKey api_keys = 1; }\n\nmessage DeleteApiKeyRequest {\n  string prefix = 1;\n  uint64 id = 2;\n}\n\nmessage DeleteApiKeyResponse {}\n"
  },
  {
    "path": "proto/headscale/v1/auth.proto",
    "content": "syntax = \"proto3\";\npackage headscale.v1;\noption go_package = \"github.com/juanfont/headscale/gen/go/v1\";\n\nimport \"headscale/v1/node.proto\";\n\nmessage AuthRegisterRequest {\n  string user = 1;\n  string auth_id = 2;\n}\n\nmessage AuthRegisterResponse {\n  Node node = 1;\n}\n\nmessage AuthApproveRequest {\n  string auth_id = 1;\n}\n\nmessage AuthApproveResponse {}\n\nmessage AuthRejectRequest {\n  string auth_id = 1;\n}\n\nmessage AuthRejectResponse {}\n"
  },
  {
    "path": "proto/headscale/v1/device.proto",
    "content": "syntax = \"proto3\";\npackage headscale.v1;\noption go_package = \"github.com/juanfont/headscale/gen/go/v1\";\n\nimport \"google/protobuf/timestamp.proto\";\n\n// This is a potential reimplementation of Tailscale's API\n// https://github.com/tailscale/tailscale/blob/main/api.md\n\nmessage Latency {\n  float latency_ms = 1;\n  bool preferred = 2;\n}\n\nmessage ClientSupports {\n  bool hair_pinning = 1;\n  bool ipv6 = 2;\n  bool pcp = 3;\n  bool pmp = 4;\n  bool udp = 5;\n  bool upnp = 6;\n}\n\nmessage ClientConnectivity {\n  repeated string endpoints = 1;\n  string derp = 2;\n  bool mapping_varies_by_dest_ip = 3;\n  map<string, Latency> latency = 4;\n  ClientSupports client_supports = 5;\n}\n\nmessage GetDeviceRequest { string id = 1; }\n\nmessage GetDeviceResponse {\n  repeated string addresses = 1;\n  string id = 2;\n  string user = 3;\n  string name = 4;\n  string hostname = 5;\n  string client_version = 6;\n  bool update_available = 7;\n  string os = 8;\n  google.protobuf.Timestamp created = 9;\n  google.protobuf.Timestamp last_seen = 10;\n  bool key_expiry_disabled = 11;\n  google.protobuf.Timestamp expires = 12;\n  bool authorized = 13;\n  bool is_external = 14;\n  string machine_key = 15;\n  string node_key = 16;\n  bool blocks_incoming_connections = 17;\n  repeated string enabled_routes = 18;\n  repeated string advertised_routes = 19;\n  ClientConnectivity client_connectivity = 20;\n}\n\nmessage DeleteDeviceRequest { string id = 1; }\n\nmessage DeleteDeviceResponse {}\n\nmessage GetDeviceRoutesRequest { string id = 1; }\n\nmessage GetDeviceRoutesResponse {\n  repeated string enabled_routes = 1;\n  repeated string advertised_routes = 2;\n}\n\nmessage EnableDeviceRoutesRequest {\n  string id = 1;\n  repeated string routes = 2;\n}\n\nmessage EnableDeviceRoutesResponse {\n  repeated string enabled_routes = 1;\n  repeated string advertised_routes = 2;\n}\n"
  },
  {
    "path": "proto/headscale/v1/headscale.proto",
    "content": "syntax = \"proto3\";\npackage headscale.v1;\noption go_package = \"github.com/juanfont/headscale/gen/go/v1\";\n\nimport \"google/api/annotations.proto\";\n\nimport \"headscale/v1/user.proto\";\nimport \"headscale/v1/preauthkey.proto\";\nimport \"headscale/v1/node.proto\";\nimport \"headscale/v1/apikey.proto\";\nimport \"headscale/v1/auth.proto\";\nimport \"headscale/v1/policy.proto\";\n\nservice HeadscaleService {\n  // --- User start ---\n  rpc CreateUser(CreateUserRequest) returns (CreateUserResponse) {\n    option (google.api.http) = {\n      post : \"/api/v1/user\"\n      body : \"*\"\n    };\n  }\n\n  rpc RenameUser(RenameUserRequest) returns (RenameUserResponse) {\n    option (google.api.http) = {\n      post : \"/api/v1/user/{old_id}/rename/{new_name}\"\n    };\n  }\n\n  rpc DeleteUser(DeleteUserRequest) returns (DeleteUserResponse) {\n    option (google.api.http) = {\n      delete : \"/api/v1/user/{id}\"\n    };\n  }\n\n  rpc ListUsers(ListUsersRequest) returns (ListUsersResponse) {\n    option (google.api.http) = {\n      get : \"/api/v1/user\"\n    };\n  }\n  // --- User end ---\n\n  // --- PreAuthKeys start ---\n  rpc CreatePreAuthKey(CreatePreAuthKeyRequest)\n      returns (CreatePreAuthKeyResponse) {\n    option (google.api.http) = {\n      post : \"/api/v1/preauthkey\"\n      body : \"*\"\n    };\n  }\n\n  rpc ExpirePreAuthKey(ExpirePreAuthKeyRequest)\n      returns (ExpirePreAuthKeyResponse) {\n    option (google.api.http) = {\n      post : \"/api/v1/preauthkey/expire\"\n      body : \"*\"\n    };\n  }\n\n  rpc DeletePreAuthKey(DeletePreAuthKeyRequest)\n      returns (DeletePreAuthKeyResponse) {\n    option (google.api.http) = {\n      delete : \"/api/v1/preauthkey\"\n    };\n  }\n\n  rpc ListPreAuthKeys(ListPreAuthKeysRequest)\n      returns (ListPreAuthKeysResponse) {\n    option (google.api.http) = {\n      get : \"/api/v1/preauthkey\"\n    };\n  }\n  // --- PreAuthKeys end ---\n\n  // --- Node start ---\n  rpc DebugCreateNode(DebugCreateNodeRequest)\n      returns (DebugCreateNodeResponse) {\n    option (google.api.http) = {\n      post : \"/api/v1/debug/node\"\n      body : \"*\"\n    };\n  }\n\n  rpc GetNode(GetNodeRequest) returns (GetNodeResponse) {\n    option (google.api.http) = {\n      get : \"/api/v1/node/{node_id}\"\n    };\n  }\n\n  rpc SetTags(SetTagsRequest) returns (SetTagsResponse) {\n    option (google.api.http) = {\n      post : \"/api/v1/node/{node_id}/tags\"\n      body : \"*\"\n    };\n  }\n\n  rpc SetApprovedRoutes(SetApprovedRoutesRequest)\n      returns (SetApprovedRoutesResponse) {\n    option (google.api.http) = {\n      post : \"/api/v1/node/{node_id}/approve_routes\"\n      body : \"*\"\n    };\n  }\n\n  rpc RegisterNode(RegisterNodeRequest) returns (RegisterNodeResponse) {\n    option (google.api.http) = {\n      post : \"/api/v1/node/register\"\n    };\n  }\n\n  rpc DeleteNode(DeleteNodeRequest) returns (DeleteNodeResponse) {\n    option (google.api.http) = {\n      delete : \"/api/v1/node/{node_id}\"\n    };\n  }\n\n  rpc ExpireNode(ExpireNodeRequest) returns (ExpireNodeResponse) {\n    option (google.api.http) = {\n      post : \"/api/v1/node/{node_id}/expire\"\n    };\n  }\n\n  rpc RenameNode(RenameNodeRequest) returns (RenameNodeResponse) {\n    option (google.api.http) = {\n      post : \"/api/v1/node/{node_id}/rename/{new_name}\"\n    };\n  }\n\n  rpc ListNodes(ListNodesRequest) returns (ListNodesResponse) {\n    option (google.api.http) = {\n      get : \"/api/v1/node\"\n    };\n  }\n\n  rpc BackfillNodeIPs(BackfillNodeIPsRequest)\n      returns (BackfillNodeIPsResponse) {\n    option (google.api.http) = {\n      post : \"/api/v1/node/backfillips\"\n    };\n  }\n\n  // --- Node end ---\n\n  // --- Auth start ---\n  rpc AuthRegister(AuthRegisterRequest) returns (AuthRegisterResponse) {\n    option (google.api.http) = {\n      post : \"/api/v1/auth/register\"\n      body : \"*\"\n    };\n  }\n\n  rpc AuthApprove(AuthApproveRequest) returns (AuthApproveResponse) {\n    option (google.api.http) = {\n      post : \"/api/v1/auth/approve\"\n      body : \"*\"\n    };\n  }\n\n  rpc AuthReject(AuthRejectRequest) returns (AuthRejectResponse) {\n    option (google.api.http) = {\n      post : \"/api/v1/auth/reject\"\n      body : \"*\"\n    };\n  }\n  // --- Auth end ---\n\n  // --- ApiKeys start ---\n  rpc CreateApiKey(CreateApiKeyRequest) returns (CreateApiKeyResponse) {\n    option (google.api.http) = {\n      post : \"/api/v1/apikey\"\n      body : \"*\"\n    };\n  }\n\n  rpc ExpireApiKey(ExpireApiKeyRequest) returns (ExpireApiKeyResponse) {\n    option (google.api.http) = {\n      post : \"/api/v1/apikey/expire\"\n      body : \"*\"\n    };\n  }\n\n  rpc ListApiKeys(ListApiKeysRequest) returns (ListApiKeysResponse) {\n    option (google.api.http) = {\n      get : \"/api/v1/apikey\"\n    };\n  }\n\n  rpc DeleteApiKey(DeleteApiKeyRequest) returns (DeleteApiKeyResponse) {\n    option (google.api.http) = {\n      delete : \"/api/v1/apikey/{prefix}\"\n    };\n  }\n  // --- ApiKeys end ---\n\n  // --- Policy start ---\n  rpc GetPolicy(GetPolicyRequest) returns (GetPolicyResponse) {\n    option (google.api.http) = {\n      get : \"/api/v1/policy\"\n    };\n  }\n\n  rpc SetPolicy(SetPolicyRequest) returns (SetPolicyResponse) {\n    option (google.api.http) = {\n      put : \"/api/v1/policy\"\n      body : \"*\"\n    };\n  }\n  // --- Policy end ---\n\n  // --- Health start ---\n  rpc Health(HealthRequest) returns (HealthResponse) {\n    option (google.api.http) = {\n      get : \"/api/v1/health\"\n    };\n  }\n  // --- Health end ---\n\n  // Implement Tailscale API\n  // rpc GetDevice(GetDeviceRequest) returns(GetDeviceResponse) {\n  //     option(google.api.http) = {\n  //         get : \"/api/v1/device/{id}\"\n  //     };\n  // }\n\n  // rpc DeleteDevice(DeleteDeviceRequest) returns(DeleteDeviceResponse) {\n  //     option(google.api.http) = {\n  //         delete : \"/api/v1/device/{id}\"\n  //     };\n  // }\n\n  // rpc GetDeviceRoutes(GetDeviceRoutesRequest)\n  // returns(GetDeviceRoutesResponse) {\n  //     option(google.api.http) = {\n  //         get : \"/api/v1/device/{id}/routes\"\n  //     };\n  // }\n\n  // rpc EnableDeviceRoutes(EnableDeviceRoutesRequest)\n  // returns(EnableDeviceRoutesResponse) {\n  //     option(google.api.http) = {\n  //         post : \"/api/v1/device/{id}/routes\"\n  //     };\n  // }\n}\n\nmessage HealthRequest {}\n\nmessage HealthResponse {\n  bool database_connectivity = 1;\n}\n"
  },
  {
    "path": "proto/headscale/v1/node.proto",
    "content": "syntax = \"proto3\";\npackage headscale.v1;\n\nimport \"google/protobuf/timestamp.proto\";\nimport \"headscale/v1/preauthkey.proto\";\nimport \"headscale/v1/user.proto\";\n\noption go_package = \"github.com/juanfont/headscale/gen/go/v1\";\n\nenum RegisterMethod {\n  REGISTER_METHOD_UNSPECIFIED = 0;\n  REGISTER_METHOD_AUTH_KEY = 1;\n  REGISTER_METHOD_CLI = 2;\n  REGISTER_METHOD_OIDC = 3;\n}\n\nmessage Node {\n  // 9: removal of last_successful_update\n  reserved 9;\n\n  uint64 id = 1;\n  string machine_key = 2;\n  string node_key = 3;\n  string disco_key = 4;\n  repeated string ip_addresses = 5;\n  string name = 6;\n  User user = 7;\n\n  google.protobuf.Timestamp last_seen = 8;\n  google.protobuf.Timestamp expiry = 10;\n\n  PreAuthKey pre_auth_key = 11;\n\n  google.protobuf.Timestamp created_at = 12;\n\n  RegisterMethod register_method = 13;\n\n  reserved 14 to 20;\n  // google.protobuf.Timestamp updated_at = 14;\n  // google.protobuf.Timestamp deleted_at = 15;\n\n  // bytes host_info      = 15;\n  // bytes endpoints      = 16;\n  // bytes enabled_routes = 17;\n\n  // Deprecated\n  // repeated string forced_tags = 18;\n  // repeated string invalid_tags = 19;\n  // repeated string valid_tags = 20;\n  string given_name = 21;\n  bool online = 22;\n  repeated string approved_routes = 23;\n  repeated string available_routes = 24;\n  repeated string subnet_routes = 25;\n  repeated string tags = 26;\n}\n\nmessage RegisterNodeRequest {\n  string user = 1;\n  string key = 2;\n}\n\nmessage RegisterNodeResponse {\n  Node node = 1;\n}\n\nmessage GetNodeRequest {\n  uint64 node_id = 1;\n}\n\nmessage GetNodeResponse {\n  Node node = 1;\n}\n\nmessage SetTagsRequest {\n  uint64 node_id = 1;\n  repeated string tags = 2;\n}\n\nmessage SetTagsResponse {\n  Node node = 1;\n}\n\nmessage SetApprovedRoutesRequest {\n  uint64 node_id = 1;\n  repeated string routes = 2;\n}\n\nmessage SetApprovedRoutesResponse {\n  Node node = 1;\n}\n\nmessage DeleteNodeRequest {\n  uint64 node_id = 1;\n}\n\nmessage DeleteNodeResponse {}\n\nmessage ExpireNodeRequest {\n  uint64 node_id = 1;\n  google.protobuf.Timestamp expiry = 2;\n  // When true, sets expiry to null (node will never expire).\n  bool disable_expiry = 3;\n}\n\nmessage ExpireNodeResponse {\n  Node node = 1;\n}\n\nmessage RenameNodeRequest {\n  uint64 node_id = 1;\n  string new_name = 2;\n}\n\nmessage RenameNodeResponse {\n  Node node = 1;\n}\n\nmessage ListNodesRequest {\n  string user = 1;\n}\n\nmessage ListNodesResponse {\n  repeated Node nodes = 1;\n}\n\nmessage DebugCreateNodeRequest {\n  string user = 1;\n  string key = 2;\n  string name = 3;\n  repeated string routes = 4;\n}\n\nmessage DebugCreateNodeResponse {\n  Node node = 1;\n}\n\nmessage BackfillNodeIPsRequest {\n  bool confirmed = 1;\n}\n\nmessage BackfillNodeIPsResponse {\n  repeated string changes = 1;\n}\n"
  },
  {
    "path": "proto/headscale/v1/policy.proto",
    "content": "syntax = \"proto3\";\npackage headscale.v1;\noption go_package = \"github.com/juanfont/headscale/gen/go/v1\";\n\nimport \"google/protobuf/timestamp.proto\";\n\nmessage SetPolicyRequest { string policy = 1; }\n\nmessage SetPolicyResponse {\n  string policy = 1;\n  google.protobuf.Timestamp updated_at = 2;\n}\n\nmessage GetPolicyRequest {}\n\nmessage GetPolicyResponse {\n  string policy = 1;\n  google.protobuf.Timestamp updated_at = 2;\n}\n"
  },
  {
    "path": "proto/headscale/v1/preauthkey.proto",
    "content": "syntax = \"proto3\";\npackage headscale.v1;\n\nimport \"google/protobuf/timestamp.proto\";\nimport \"headscale/v1/user.proto\";\n\noption go_package = \"github.com/juanfont/headscale/gen/go/v1\";\n\nmessage PreAuthKey {\n  User user = 1;\n  uint64 id = 2;\n  string key = 3;\n  bool reusable = 4;\n  bool ephemeral = 5;\n  bool used = 6;\n  google.protobuf.Timestamp expiration = 7;\n  google.protobuf.Timestamp created_at = 8;\n  repeated string acl_tags = 9;\n}\n\nmessage CreatePreAuthKeyRequest {\n  uint64 user = 1;\n  bool reusable = 2;\n  bool ephemeral = 3;\n  google.protobuf.Timestamp expiration = 4;\n  repeated string acl_tags = 5;\n}\n\nmessage CreatePreAuthKeyResponse {\n  PreAuthKey pre_auth_key = 1;\n}\n\nmessage ExpirePreAuthKeyRequest {\n  uint64 id = 1;\n}\n\nmessage ExpirePreAuthKeyResponse {}\n\nmessage DeletePreAuthKeyRequest {\n  uint64 id = 1;\n}\n\nmessage DeletePreAuthKeyResponse {}\n\nmessage ListPreAuthKeysRequest {}\n\nmessage ListPreAuthKeysResponse {\n  repeated PreAuthKey pre_auth_keys = 1;\n}\n"
  },
  {
    "path": "proto/headscale/v1/user.proto",
    "content": "syntax = \"proto3\";\npackage headscale.v1;\noption go_package = \"github.com/juanfont/headscale/gen/go/v1\";\n\nimport \"google/protobuf/timestamp.proto\";\n\nmessage User {\n  uint64 id = 1;\n  string name = 2;\n  google.protobuf.Timestamp created_at = 3;\n  string display_name = 4;\n  string email = 5;\n  string provider_id = 6;\n  string provider = 7;\n  string profile_pic_url = 8;\n}\n\nmessage CreateUserRequest {\n  string name = 1;\n  string display_name = 2;\n  string email = 3;\n  string picture_url = 4;\n}\n\nmessage CreateUserResponse { User user = 1; }\n\nmessage RenameUserRequest {\n  uint64 old_id = 1;\n  string new_name = 2;\n}\n\nmessage RenameUserResponse { User user = 1; }\n\nmessage DeleteUserRequest { uint64 id = 1; }\n\nmessage DeleteUserResponse {}\n\nmessage ListUsersRequest {\n  uint64 id = 1;\n  string name = 2;\n  string email = 3;\n}\n\nmessage ListUsersResponse { repeated User users = 1; }\n"
  },
  {
    "path": "swagger.go",
    "content": "package headscale\n\nimport (\n\t\"bytes\"\n\t_ \"embed\"\n\t\"html/template\"\n\t\"net/http\"\n\n\t\"github.com/rs/zerolog/log\"\n)\n\n//go:embed gen/openapiv2/headscale/v1/headscale.swagger.json\nvar apiV1JSON []byte\n\nfunc SwaggerUI(\n\twriter http.ResponseWriter,\n\treq *http.Request,\n) {\n\tswaggerTemplate := template.Must(template.New(\"swagger\").Parse(`\n<html>\n\t<head>\n\t<link rel=\"stylesheet\" type=\"text/css\" href=\"https://unpkg.com/swagger-ui-dist@3/swagger-ui.css\">\n\t<link rel=\"icon\" href=\"/favicon.ico\">\n\t<script src=\"https://unpkg.com/swagger-ui-dist@3/swagger-ui-standalone-preset.js\"></script>\n\t<script src=\"https://unpkg.com/swagger-ui-dist@3/swagger-ui-bundle.js\" charset=\"UTF-8\"></script>\n\t</head>\n\t<body>\n\t<div id=\"swagger-ui\"></div>\n\t<script>\n\t\twindow.addEventListener('load', (event) => {\n\t\t\tconst ui = SwaggerUIBundle({\n\t\t\t    url: \"/swagger/v1/openapiv2.json\",\n\t\t\t    dom_id: '#swagger-ui',\n\t\t\t    presets: [\n\t\t\t      SwaggerUIBundle.presets.apis,\n\t\t\t      SwaggerUIBundle.SwaggerUIStandalonePreset\n\t\t\t    ],\n\t\t\t\tplugins: [\n                \tSwaggerUIBundle.plugins.DownloadUrl\n            \t],\n\t\t\t\tdeepLinking: true,\n\t\t\t\t// TODO(kradalby): Figure out why this does not work\n\t\t\t\t// layout: \"StandaloneLayout\",\n\t\t\t  })\n\t\t\twindow.ui = ui\n\t\t});\n\t</script>\n\t</body>\n</html>`))\n\n\tvar payload bytes.Buffer\n\tif err := swaggerTemplate.Execute(&payload, struct{}{}); err != nil { //nolint:noinlineerr\n\t\tlog.Error().\n\t\t\tCaller().\n\t\t\tErr(err).\n\t\t\tMsg(\"Could not render Swagger\")\n\n\t\twriter.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\t\twriter.WriteHeader(http.StatusInternalServerError)\n\n\t\t_, err := writer.Write([]byte(\"Could not render Swagger\"))\n\t\tif err != nil {\n\t\t\tlog.Error().\n\t\t\t\tCaller().\n\t\t\t\tErr(err).\n\t\t\t\tMsg(\"Failed to write response\")\n\t\t}\n\n\t\treturn\n\t}\n\n\twriter.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\twriter.WriteHeader(http.StatusOK)\n\n\t_, err := writer.Write(payload.Bytes())\n\tif err != nil {\n\t\tlog.Error().\n\t\t\tCaller().\n\t\t\tErr(err).\n\t\t\tMsg(\"Failed to write response\")\n\t}\n}\n\nfunc SwaggerAPIv1(\n\twriter http.ResponseWriter,\n\treq *http.Request,\n) {\n\twriter.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\twriter.WriteHeader(http.StatusOK)\n\n\tif _, err := writer.Write(apiV1JSON); err != nil { //nolint:noinlineerr\n\t\tlog.Error().\n\t\t\tCaller().\n\t\t\tErr(err).\n\t\t\tMsg(\"Failed to write response\")\n\t}\n}\n"
  },
  {
    "path": "tools/capver/main.go",
    "content": "package main\n\n//go:generate go run main.go\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"go/format\"\n\t\"io\"\n\t\"log\"\n\t\"net/http\"\n\t\"os\"\n\t\"regexp\"\n\t\"slices\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\n\txmaps \"golang.org/x/exp/maps\"\n\t\"tailscale.com/tailcfg\"\n)\n\nconst (\n\tghcrTokenURL                = \"https://ghcr.io/token?service=ghcr.io&scope=repository:tailscale/tailscale:pull\" //nolint:gosec\n\tghcrTagsURL                 = \"https://ghcr.io/v2/tailscale/tailscale/tags/list?n=10000\"\n\trawFileURL                  = \"https://github.com/tailscale/tailscale/raw/refs/tags/%s/tailcfg/tailcfg.go\"\n\toutputFile                  = \"../../hscontrol/capver/capver_generated.go\"\n\ttestFile                    = \"../../hscontrol/capver/capver_test_data.go\"\n\tfallbackCapVer              = 90\n\tmaxTestCases                = 4\n\tsupportedMajorMinorVersions = 10\n\tfilePermissions             = 0o600\n\tsemverMatchGroups           = 4\n\tlatest3Count                = 3\n\tlatest2Count                = 2\n)\n\nvar errUnexpectedStatusCode = errors.New(\"unexpected status code\")\n\n// GHCRTokenResponse represents the response from GHCR token endpoint.\ntype GHCRTokenResponse struct {\n\tToken string `json:\"token\"`\n}\n\n// GHCRTagsResponse represents the response from GHCR tags list endpoint.\ntype GHCRTagsResponse struct {\n\tName string   `json:\"name\"`\n\tTags []string `json:\"tags\"`\n}\n\n// getGHCRToken fetches an anonymous token from GHCR for accessing public container images.\nfunc getGHCRToken(ctx context.Context) (string, error) {\n\tclient := &http.Client{}\n\n\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, ghcrTokenURL, nil)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error creating token request: %w\", err)\n\t}\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error fetching GHCR token: %w\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn \"\", fmt.Errorf(\"%w: %d\", errUnexpectedStatusCode, resp.StatusCode)\n\t}\n\n\tbody, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error reading token response: %w\", err)\n\t}\n\n\tvar tokenResp GHCRTokenResponse\n\n\terr = json.Unmarshal(body, &tokenResp)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error parsing token response: %w\", err)\n\t}\n\n\treturn tokenResp.Token, nil\n}\n\n// getGHCRTags fetches all available tags from GHCR for tailscale/tailscale.\nfunc getGHCRTags(ctx context.Context) ([]string, error) {\n\ttoken, err := getGHCRToken(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get GHCR token: %w\", err)\n\t}\n\n\tclient := &http.Client{}\n\n\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, ghcrTagsURL, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error creating tags request: %w\", err)\n\t}\n\n\treq.Header.Set(\"Authorization\", \"Bearer \"+token)\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error fetching tags: %w\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"%w: %d\", errUnexpectedStatusCode, resp.StatusCode)\n\t}\n\n\tbody, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error reading tags response: %w\", err)\n\t}\n\n\tvar tagsResp GHCRTagsResponse\n\n\terr = json.Unmarshal(body, &tagsResp)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error parsing tags response: %w\", err)\n\t}\n\n\treturn tagsResp.Tags, nil\n}\n\n// semverRegex matches semantic version tags like v1.90.0 or v1.90.1.\nvar semverRegex = regexp.MustCompile(`^v(\\d+)\\.(\\d+)\\.(\\d+)$`)\n\n// parseSemver extracts major, minor, patch from a semver tag.\n// Returns -1 for all values if not a valid semver.\nfunc parseSemver(tag string) (int, int, int) {\n\tmatches := semverRegex.FindStringSubmatch(tag)\n\tif len(matches) != semverMatchGroups {\n\t\treturn -1, -1, -1\n\t}\n\n\tmajor, _ := strconv.Atoi(matches[1])\n\tminor, _ := strconv.Atoi(matches[2])\n\tpatch, _ := strconv.Atoi(matches[3])\n\n\treturn major, minor, patch\n}\n\n// getMinorVersionsFromTags processes container tags and returns a map of minor versions\n// to the first available patch version for each minor.\n// For example: {\"v1.90\": \"v1.90.0\", \"v1.92\": \"v1.92.0\"}.\nfunc getMinorVersionsFromTags(tags []string) map[string]string {\n\t// Map minor version (e.g., \"v1.90\") to lowest patch version available\n\tminorToLowestPatch := make(map[string]struct {\n\t\tpatch   int\n\t\tfullVer string\n\t})\n\n\tfor _, tag := range tags {\n\t\tmajor, minor, patch := parseSemver(tag)\n\t\tif major < 0 {\n\t\t\tcontinue // Not a semver tag\n\t\t}\n\n\t\tminorKey := fmt.Sprintf(\"v%d.%d\", major, minor)\n\n\t\texisting, exists := minorToLowestPatch[minorKey]\n\t\tif !exists || patch < existing.patch {\n\t\t\tminorToLowestPatch[minorKey] = struct {\n\t\t\t\tpatch   int\n\t\t\t\tfullVer string\n\t\t\t}{\n\t\t\t\tpatch:   patch,\n\t\t\t\tfullVer: tag,\n\t\t\t}\n\t\t}\n\t}\n\n\t// Convert to simple map\n\tresult := make(map[string]string)\n\tfor minorVer, info := range minorToLowestPatch {\n\t\tresult[minorVer] = info.fullVer\n\t}\n\n\treturn result\n}\n\n// getCapabilityVersions fetches container tags from GHCR, identifies minor versions,\n// and fetches the capability version for each from the Tailscale source.\nfunc getCapabilityVersions(ctx context.Context) (map[string]tailcfg.CapabilityVersion, error) {\n\t// Fetch container tags from GHCR\n\ttags, err := getGHCRTags(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get container tags: %w\", err)\n\t}\n\n\tlog.Printf(\"Found %d container tags\", len(tags))\n\n\t// Get minor versions with their representative patch versions\n\tminorVersions := getMinorVersionsFromTags(tags)\n\tlog.Printf(\"Found %d minor versions\", len(minorVersions))\n\n\t// Regular expression to find the CurrentCapabilityVersion line\n\tre := regexp.MustCompile(`const CurrentCapabilityVersion CapabilityVersion = (\\d+)`)\n\n\tversions := make(map[string]tailcfg.CapabilityVersion)\n\tclient := &http.Client{}\n\n\tfor minorVer, patchVer := range minorVersions {\n\t\t// Fetch the raw Go file for the patch version\n\t\trawURL := fmt.Sprintf(rawFileURL, patchVer)\n\n\t\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, rawURL, nil) //nolint:gosec\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Warning: failed to create request for %s: %v\", patchVer, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tresp, err := client.Do(req)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Warning: failed to fetch %s: %v\", patchVer, err)\n\t\t\tcontinue\n\t\t}\n\t\tdefer resp.Body.Close()\n\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\tlog.Printf(\"Warning: got status %d for %s\", resp.StatusCode, patchVer)\n\t\t\tcontinue\n\t\t}\n\n\t\tbody, err := io.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Warning: failed to read response for %s: %v\", patchVer, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Find the CurrentCapabilityVersion\n\t\tmatches := re.FindStringSubmatch(string(body))\n\t\tif len(matches) > 1 {\n\t\t\tcapabilityVersionStr := matches[1]\n\t\t\tcapabilityVersion, _ := strconv.Atoi(capabilityVersionStr)\n\t\t\tversions[minorVer] = tailcfg.CapabilityVersion(capabilityVersion)\n\t\t\tlog.Printf(\"  %s (from %s): capVer %d\", minorVer, patchVer, capabilityVersion)\n\t\t}\n\t}\n\n\treturn versions, nil\n}\n\nfunc calculateMinSupportedCapabilityVersion(versions map[string]tailcfg.CapabilityVersion) tailcfg.CapabilityVersion {\n\t// Since we now store minor versions directly, just sort and take the oldest of the latest N\n\tminorVersions := xmaps.Keys(versions)\n\tsort.Strings(minorVersions)\n\n\tsupportedCount := min(len(minorVersions), supportedMajorMinorVersions)\n\n\tif supportedCount == 0 {\n\t\treturn fallbackCapVer\n\t}\n\n\t// The minimum supported version is the oldest of the latest 10\n\toldestSupportedMinor := minorVersions[len(minorVersions)-supportedCount]\n\n\treturn versions[oldestSupportedMinor]\n}\n\nfunc writeCapabilityVersionsToFile(versions map[string]tailcfg.CapabilityVersion, minSupportedCapVer tailcfg.CapabilityVersion) error {\n\t// Generate the Go code as a string\n\tvar content strings.Builder\n\tcontent.WriteString(\"package capver\\n\\n\")\n\tcontent.WriteString(\"// Generated DO NOT EDIT\\n\\n\")\n\tcontent.WriteString(`import \"tailscale.com/tailcfg\"`)\n\tcontent.WriteString(\"\\n\\n\")\n\tcontent.WriteString(\"var tailscaleToCapVer = map[string]tailcfg.CapabilityVersion{\\n\")\n\n\tsortedVersions := xmaps.Keys(versions)\n\tsort.Strings(sortedVersions)\n\n\tfor _, version := range sortedVersions {\n\t\tfmt.Fprintf(&content, \"\\t\\\"%s\\\": %d,\\n\", version, versions[version])\n\t}\n\n\tcontent.WriteString(\"}\\n\")\n\n\tcontent.WriteString(\"\\n\\n\")\n\tcontent.WriteString(\"var capVerToTailscaleVer = map[tailcfg.CapabilityVersion]string{\\n\")\n\n\tcapVarToTailscaleVer := make(map[tailcfg.CapabilityVersion]string)\n\n\tfor _, v := range sortedVersions {\n\t\tcapabilityVersion := versions[v]\n\n\t\t// If it is already set, skip and continue,\n\t\t// we only want the first tailscale version per\n\t\t// capability version.\n\t\tif _, ok := capVarToTailscaleVer[capabilityVersion]; ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tcapVarToTailscaleVer[capabilityVersion] = v\n\t}\n\n\tcapsSorted := xmaps.Keys(capVarToTailscaleVer)\n\tslices.Sort(capsSorted)\n\n\tfor _, capVer := range capsSorted {\n\t\tfmt.Fprintf(&content, \"\\t%d:\\t\\t\\\"%s\\\",\\n\", capVer, capVarToTailscaleVer[capVer])\n\t}\n\n\tcontent.WriteString(\"}\\n\\n\")\n\n\t// Add the SupportedMajorMinorVersions constant\n\tcontent.WriteString(\"// SupportedMajorMinorVersions is the number of major.minor Tailscale versions supported.\\n\")\n\tfmt.Fprintf(&content, \"const SupportedMajorMinorVersions = %d\\n\\n\", supportedMajorMinorVersions)\n\n\t// Add the MinSupportedCapabilityVersion constant\n\tcontent.WriteString(\"// MinSupportedCapabilityVersion represents the minimum capability version\\n\")\n\tcontent.WriteString(\"// supported by this Headscale instance (latest 10 minor versions)\\n\")\n\tfmt.Fprintf(&content, \"const MinSupportedCapabilityVersion tailcfg.CapabilityVersion = %d\\n\", minSupportedCapVer)\n\n\t// Format the generated code\n\tformatted, err := format.Source([]byte(content.String()))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error formatting Go code: %w\", err)\n\t}\n\n\t// Write to file\n\terr = os.WriteFile(outputFile, formatted, filePermissions)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error writing file: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc writeTestDataFile(versions map[string]tailcfg.CapabilityVersion, minSupportedCapVer tailcfg.CapabilityVersion) error {\n\t// Sort minor versions\n\tminorVersions := xmaps.Keys(versions)\n\tsort.Strings(minorVersions)\n\n\t// Take latest N\n\tsupportedCount := min(len(minorVersions), supportedMajorMinorVersions)\n\n\tlatest10 := minorVersions[len(minorVersions)-supportedCount:]\n\tlatest3 := minorVersions[len(minorVersions)-min(latest3Count, len(minorVersions)):]\n\tlatest2 := minorVersions[len(minorVersions)-min(latest2Count, len(minorVersions)):]\n\n\t// Generate test data file content\n\tvar content strings.Builder\n\tcontent.WriteString(\"package capver\\n\\n\")\n\tcontent.WriteString(\"// Generated DO NOT EDIT\\n\\n\")\n\tcontent.WriteString(\"import \\\"tailscale.com/tailcfg\\\"\\n\\n\")\n\n\t// Generate complete test struct for TailscaleLatestMajorMinor\n\tcontent.WriteString(\"var tailscaleLatestMajorMinorTests = []struct {\\n\")\n\tcontent.WriteString(\"\\tn        int\\n\")\n\tcontent.WriteString(\"\\tstripV   bool\\n\")\n\tcontent.WriteString(\"\\texpected []string\\n\")\n\tcontent.WriteString(\"}{\\n\")\n\n\t// Latest 3 with v prefix\n\tcontent.WriteString(\"\\t{3, false, []string{\")\n\n\tfor i, version := range latest3 {\n\t\tcontent.WriteString(fmt.Sprintf(\"\\\"%s\\\"\", version))\n\n\t\tif i < len(latest3)-1 {\n\t\t\tcontent.WriteString(\", \")\n\t\t}\n\t}\n\n\tcontent.WriteString(\"}},\\n\")\n\n\t// Latest 2 without v prefix\n\tcontent.WriteString(\"\\t{2, true, []string{\")\n\n\tfor i, version := range latest2 {\n\t\t// Strip v prefix for this test case\n\t\tverNoV := strings.TrimPrefix(version, \"v\")\n\t\tcontent.WriteString(fmt.Sprintf(\"\\\"%s\\\"\", verNoV))\n\n\t\tif i < len(latest2)-1 {\n\t\t\tcontent.WriteString(\", \")\n\t\t}\n\t}\n\n\tcontent.WriteString(\"}},\\n\")\n\n\t// Latest N without v prefix (all supported)\n\tcontent.WriteString(fmt.Sprintf(\"\\t{%d, true, []string{\\n\", supportedMajorMinorVersions))\n\n\tfor _, version := range latest10 {\n\t\tverNoV := strings.TrimPrefix(version, \"v\")\n\t\tcontent.WriteString(fmt.Sprintf(\"\\t\\t\\\"%s\\\",\\n\", verNoV))\n\t}\n\n\tcontent.WriteString(\"\\t}},\\n\")\n\n\t// Empty case\n\tcontent.WriteString(\"\\t{0, false, nil},\\n\")\n\tcontent.WriteString(\"}\\n\\n\")\n\n\t// Build capVerToTailscaleVer for test data\n\tcapVerToTailscaleVer := make(map[tailcfg.CapabilityVersion]string)\n\tsortedVersions := xmaps.Keys(versions)\n\tsort.Strings(sortedVersions)\n\n\tfor _, v := range sortedVersions {\n\t\tcapabilityVersion := versions[v]\n\t\tif _, ok := capVerToTailscaleVer[capabilityVersion]; !ok {\n\t\t\tcapVerToTailscaleVer[capabilityVersion] = v\n\t\t}\n\t}\n\n\t// Generate complete test struct for CapVerMinimumTailscaleVersion\n\tcontent.WriteString(\"var capVerMinimumTailscaleVersionTests = []struct {\\n\")\n\tcontent.WriteString(\"\\tinput    tailcfg.CapabilityVersion\\n\")\n\tcontent.WriteString(\"\\texpected string\\n\")\n\tcontent.WriteString(\"}{\\n\")\n\n\t// Add minimum supported version\n\tminVersionString := capVerToTailscaleVer[minSupportedCapVer]\n\tcontent.WriteString(fmt.Sprintf(\"\\t{%d, \\\"%s\\\"},\\n\", minSupportedCapVer, minVersionString))\n\n\t// Add a few more test cases\n\tcapsSorted := xmaps.Keys(capVerToTailscaleVer)\n\tslices.Sort(capsSorted)\n\n\ttestCount := 0\n\tfor _, capVer := range capsSorted {\n\t\tif testCount >= maxTestCases {\n\t\t\tbreak\n\t\t}\n\n\t\tif capVer != minSupportedCapVer { // Don't duplicate the min version test\n\t\t\tversion := capVerToTailscaleVer[capVer]\n\t\t\tcontent.WriteString(fmt.Sprintf(\"\\t{%d, \\\"%s\\\"},\\n\", capVer, version))\n\n\t\t\ttestCount++\n\t\t}\n\t}\n\n\t// Edge cases\n\tcontent.WriteString(\"\\t{9001, \\\"\\\"}, // Test case for a version higher than any in the map\\n\")\n\tcontent.WriteString(\"\\t{60, \\\"\\\"},   // Test case for a version lower than any in the map\\n\")\n\tcontent.WriteString(\"}\\n\")\n\n\t// Format the generated code\n\tformatted, err := format.Source([]byte(content.String()))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error formatting test data Go code: %w\", err)\n\t}\n\n\t// Write to file\n\terr = os.WriteFile(testFile, formatted, filePermissions)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error writing test data file: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc main() {\n\tctx := context.Background()\n\n\tversions, err := getCapabilityVersions(ctx)\n\tif err != nil {\n\t\tlog.Println(\"Error:\", err)\n\t\treturn\n\t}\n\n\t// Calculate the minimum supported capability version\n\tminSupportedCapVer := calculateMinSupportedCapabilityVersion(versions)\n\n\terr = writeCapabilityVersionsToFile(versions, minSupportedCapVer)\n\tif err != nil {\n\t\tlog.Println(\"Error writing to file:\", err)\n\t\treturn\n\t}\n\n\terr = writeTestDataFile(versions, minSupportedCapVer)\n\tif err != nil {\n\t\tlog.Println(\"Error writing test data file:\", err)\n\t\treturn\n\t}\n\n\tlog.Println(\"Capability versions written to\", outputFile)\n}\n"
  }
]