[
  {
    "path": ".envrc.template",
    "content": "# Example .envrc file for use with direnv.\n# Copy this file to .envrc and edit the values as required.\n# Do not check in your .envrc file to source control as it may contain secrets.\n\n# The following variables are required by the E2E test script: ./hack/e2e/test.sh.\nexport VEN_API_KEY=       # your Venafi Cloud API key with full permissions\nexport VEN_API_KEY_PULL=  # your Venafi Cloud API key with pull-only permissions\nexport VEN_ZONE=          # the Venafi Cloud zone to use for certificate requests\nexport VEN_VCP_REGION=    # the Venafi Cloud region to use (us or eu)\nexport VEN_API_HOST=      # the Venafi Cloud API host (usually api.venafi.cloud or api.venafi.eu)\nexport OCI_BASE=          # the base URL for the OCI registry where the Agent chart and image will be pushed\nexport CLOUDSDK_CORE_PROJECT= # the GCP project ID where a GKE cluster will be created.\nexport CLOUDSDK_COMPUTE_ZONE= # the GCP zone where a GKE cluster will be created. E.g. europe-west2-b\nexport CLUSTER_NAME=          # the name of the GKE cluster which will be created. E.g. cluster-1\n\n# The following variables are required for CyberArk / MachineHub integration tests.\nexport ARK_SUBDOMAIN=      # your CyberArk tenant subdomain e.g. tlskp-test\nexport ARK_USERNAME=       # your CyberArk username\nexport ARK_SECRET=         # your CyberArk password\n# OPTIONAL: the URL for the CyberArk Discovery API if not using the production environment\nexport ARK_DISCOVERY_API=https://platform-discovery.integration-cyberark.cloud/\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/bug_report.md",
    "content": "---\nname: Bug report\nabout: Issue for something that isn't working as expected\ntitle: ''\nlabels: ''\nassignees: ''\n\n---\n\n<Summary of the bug that you've encountered>\n\n**What happened?**\n\nWhat is the current bug behavior?\nGive all the context you can, provide relevant logs and/or screenshots.\n\n**What should had happened?**\n\nDescribe what you expected to happen.\n\n**Possible fixes**\n\nThis section is optional and should include possible solutions to explore and discuss further.\n"
  },
  {
    "path": ".github/actions/repo_access/action.yaml",
    "content": "name: 'Setup repo access'\ndescription: 'Setups authenticate to GitHub repos'\ninputs:\n  DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB:\n    required: true\n    description: \"DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB secret\"\noutputs: {}\nruns:\n  using: \"composite\"\n  steps:\n    - name: Configure jetstack/venafi-connection-lib repo pull access\n      shell: bash\n      run: |\n        mkdir ~/.ssh\n        chmod 700 ~/.ssh\n        \n        echo \"${{ inputs.DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB }}\" > ~/.ssh/venafi_connection_lib_id\n        chmod 600 ~/.ssh/venafi_connection_lib_id\n\n        cat <<EOT >> ~/.ssh/config          \n        Host venafi-connection-lib.github.com\n        HostName github.com\n        IdentityFile ~/.ssh/venafi_connection_lib_id\n        IdentitiesOnly yes\n        EOT\n        \n        cat <<EOT >> ~/.gitconfig\n        [url \"git@venafi-connection-lib.github.com:jetstack/venafi-connection-lib\"]\n          insteadOf = https://github.com/jetstack/venafi-connection-lib\n        EOT\n\n        echo \"GOPRIVATE=github.com/jetstack/venafi-connection-lib\" >> $GITHUB_ENV\n"
  },
  {
    "path": ".github/chainguard/make-self-upgrade.sts.yaml",
    "content": "# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.\n# Edit https://github.com/cert-manager/makefile-modules/blob/main/modules/repository-base/base/.github/chainguard/make-self-upgrade.sts.yaml instead.\n\nissuer: https://token.actions.githubusercontent.com\nsubject_pattern: ^repo:jetstack/jetstack-secure:ref:refs/heads/(main|master)$\n\npermissions:\n  contents: write\n  pull_requests: write\n  workflows: write\n"
  },
  {
    "path": ".github/renovate.json5",
    "content": "{\n  $schema: 'https://docs.renovatebot.com/renovate-schema.json',\n  extends: [\n    'github>cert-manager/makefile-modules:renovate-config.json5',\n  ],\n}\n"
  },
  {
    "path": ".github/workflows/govulncheck.yaml",
    "content": "# This file is MANUALLY maintained, but was originally based on the makefile-modules govulncheck workflow. See the original:\n# https://github.com/cert-manager/makefile-modules/blob/main/modules/go/base/.github/workflows/govulncheck.yaml\n\n# This file is separated from the upstream file so we can add additional auth for pulling\n# private dependencies. Govulncheck doesn't seem to be able to support skipping private\n# dependencies.\n\n# Run govulncheck at midnight every night on the main branch,\n# to alert us to recent vulnerabilities which affect the Go code in this\n# project.\nname: govulncheck\non:\n  workflow_dispatch: {}\n  schedule:\n    - cron: '0 0 * * *'\n\npermissions:\n  contents: read\n\njobs:\n  govulncheck:\n    runs-on: ubuntu-latest\n\n    if: github.repository == 'jetstack/jetstack-secure'\n\n    steps:\n      - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0\n        # Adding `fetch-depth: 0` makes sure tags are also fetched. We need\n        # the tags so `git describe` returns a valid version.\n        # see https://github.com/actions/checkout/issues/701 for extra info about this option\n        with: { fetch-depth: 0 }\n\n      # NOTE: This step is the change from the upstream workflow.\n      # We need credentials to pull the private dependency.\n      - uses: ./.github/actions/repo_access\n        with:\n          DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB: ${{ secrets.DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB }}\n\n      - id: go-version\n        run: |\n          make print-go-version >> \"$GITHUB_OUTPUT\"\n\n      - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0\n        with:\n          go-version: ${{ steps.go-version.outputs.result }}\n\n      - run: make verify-govulncheck\n"
  },
  {
    "path": ".github/workflows/make-self-upgrade.yaml",
    "content": "# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.\n# Edit https://github.com/cert-manager/makefile-modules/blob/main/modules/repository-base/base/.github/workflows/make-self-upgrade.yaml instead.\n\nname: make-self-upgrade\nconcurrency: make-self-upgrade\non:\n  workflow_dispatch: {}\n  schedule:\n    - cron: '0 0 * * *'\n\npermissions:\n  contents: read\n\njobs:\n  self_upgrade:\n    runs-on: ubuntu-latest\n\n    if: github.repository == 'jetstack/jetstack-secure'\n\n    permissions:\n      id-token: write\n    \n    env:\n      SOURCE_BRANCH: \"${{ github.ref_name }}\"\n      SELF_UPGRADE_BRANCH: \"self-upgrade-${{ github.ref_name }}\"\n\n    steps:\n      - name: Fail if branch is not head of branch.\n        if: ${{ !startsWith(github.ref, 'refs/heads/') && env.SOURCE_BRANCH != '' && env.SELF_UPGRADE_BRANCH != '' }}\n        run: |\n          echo \"This workflow should not be run on a non-branch-head.\"\n          exit 1\n\n      - name: Octo STS Token Exchange\n        uses: octo-sts/action@f603d3be9d8dd9871a265776e625a27b00effe05 # v1.1.1\n        id: octo-sts\n        with:\n          scope: 'jetstack/jetstack-secure'\n          identity: make-self-upgrade\n\n      - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2\n        # Adding `fetch-depth: 0` makes sure tags are also fetched. We need\n        # the tags so `git describe` returns a valid version.\n        # see https://github.com/actions/checkout/issues/701 for extra info about this option\n        with:\n          fetch-depth: 0\n          token: ${{ steps.octo-sts.outputs.token }}\n\n      - id: go-version\n        run: |\n          make print-go-version >> \"$GITHUB_OUTPUT\"\n\n      - uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0\n        with:\n          go-version: ${{ steps.go-version.outputs.result }}\n\n      - run: |\n          git checkout -B \"$SELF_UPGRADE_BRANCH\"\n\n      - run: |\n          make -j upgrade-klone\n          make -j generate\n\n      - id: is-up-to-date\n        shell: bash\n        run: |\n          git_status=$(git status -s)\n          is_up_to_date=\"true\"\n          if [ -n \"$git_status\" ]; then\n              is_up_to_date=\"false\"\n              echo \"The following changes will be committed:\"\n              echo \"$git_status\"\n          fi\n          echo \"result=$is_up_to_date\" >> \"$GITHUB_OUTPUT\"\n\n      - if: ${{ steps.is-up-to-date.outputs.result != 'true' }}\n        run: |\n          git config --global user.name \"cert-manager-bot\"\n          git config --global user.email \"cert-manager-bot@users.noreply.github.com\"\n          git add -A && git commit -m \"BOT: run 'make upgrade-klone' and 'make generate'\" --signoff\n          git push -f origin \"$SELF_UPGRADE_BRANCH\"\n\n      - if: ${{ steps.is-up-to-date.outputs.result != 'true' }}\n        uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0\n        with:\n          github-token: ${{ steps.octo-sts.outputs.token }}\n          script: |\n            const { repo, owner } = context.repo;\n            const pulls = await github.rest.pulls.list({\n              owner: owner,\n              repo: repo,\n              head: owner + ':' + process.env.SELF_UPGRADE_BRANCH,\n              base: process.env.SOURCE_BRANCH,\n              state: 'open',\n            });\n            \n            if (pulls.data.length < 1) {\n              const result = await github.rest.pulls.create({\n                title: '[CI] Merge ' + process.env.SELF_UPGRADE_BRANCH + ' into ' + process.env.SOURCE_BRANCH,\n                owner: owner,\n                repo: repo,\n                head: process.env.SELF_UPGRADE_BRANCH,\n                base: process.env.SOURCE_BRANCH,\n                body: [\n                  'This PR is auto-generated to bump the Makefile modules.',\n                ].join('\\n'),\n              });\n              await github.rest.issues.addLabels({\n                owner,\n                repo,\n                issue_number: result.data.number,\n                labels: ['ok-to-test', 'skip-review', 'release-note-none', 'kind/cleanup']\n              });\n            }\n"
  },
  {
    "path": ".github/workflows/release.yml",
    "content": "name: release\non:\n  push:\n    tags:\n      - \"v*\"\n\nenv:\n  VERSION: ${{ github.ref_name }}\n\njobs:\n  build_and_push:\n    runs-on: ubuntu-latest\n\n    permissions:\n      contents: read # needed for checkout\n      id-token: write # needed for keyless signing & google auth\n\n    steps:\n      - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1\n\n      - uses: ./.github/actions/repo_access\n        with:\n          DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB: ${{ secrets.DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB }}\n\n      - id: go-version\n        run: |\n          make print-go-version >> \"$GITHUB_OUTPUT\"\n\n      - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0\n        with:\n          registry: quay.io\n          username: ${{ secrets.QUAY_USERNAME }}\n          password: ${{ secrets.QUAY_PASSWORD }}\n\n      - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0\n        with:\n          go-version: ${{ steps.go-version.outputs.result }}\n\n      - id: release\n        run: make release ark-release ngts-release\n\n    outputs:\n      RELEASE_OCI_PREFLIGHT_IMAGE: ${{ steps.release.outputs.RELEASE_OCI_PREFLIGHT_IMAGE }}\n      RELEASE_OCI_PREFLIGHT_TAG: ${{ steps.release.outputs.RELEASE_OCI_PREFLIGHT_TAG }}\n      RELEASE_HELM_CHART_IMAGE: ${{ steps.release.outputs.RELEASE_HELM_CHART_IMAGE }}\n      RELEASE_HELM_CHART_VERSION: ${{ steps.release.outputs.RELEASE_HELM_CHART_VERSION }}\n      ARK_IMAGE: ${{ steps.release.outputs.ARK_IMAGE }}\n      ARK_IMAGE_TAG: ${{ steps.release.outputs.ARK_IMAGE_TAG }}\n      ARK_IMAGE_DIGEST: ${{ steps.release.outputs.ARK_IMAGE_DIGEST }}\n      ARK_CHART: ${{ steps.release.outputs.ARK_CHART }}\n      ARK_CHART_TAG: ${{ steps.release.outputs.ARK_CHART_TAG }}\n      ARK_CHART_DIGEST: ${{ steps.release.outputs.ARK_CHART_DIGEST }}\n      NGTS_IMAGE: ${{ steps.release.outputs.NGTS_IMAGE }}\n      NGTS_IMAGE_TAG: ${{ steps.release.outputs.NGTS_IMAGE_TAG }}\n      NGTS_IMAGE_DIGEST: ${{ steps.release.outputs.NGTS_IMAGE_DIGEST }}\n      NGTS_CHART: ${{ steps.release.outputs.NGTS_CHART }}\n      NGTS_CHART_TAG: ${{ steps.release.outputs.NGTS_CHART_TAG }}\n      NGTS_CHART_DIGEST: ${{ steps.release.outputs.NGTS_CHART_DIGEST }}\n\n  github_release:\n    runs-on: ubuntu-latest\n\n    needs: build_and_push\n\n    permissions:\n      contents: write # needed for creating a PR\n      pull-requests: write # needed for creating a PR\n\n    steps:\n      - run: |\n          touch .notes-file\n          echo \"OCI_PREFLIGHT_IMAGE: ${{ needs.build_and_push.outputs.RELEASE_OCI_PREFLIGHT_IMAGE }}\" >> .notes-file\n          echo \"OCI_PREFLIGHT_TAG: ${{ needs.build_and_push.outputs.RELEASE_OCI_PREFLIGHT_TAG }}\" >> .notes-file\n          echo \"HELM_CHART_IMAGE: ${{ needs.build_and_push.outputs.RELEASE_HELM_CHART_IMAGE }}\" >> .notes-file\n          echo \"HELM_CHART_VERSION: ${{ needs.build_and_push.outputs.RELEASE_HELM_CHART_VERSION }}\" >> .notes-file\n          echo \"ARK_IMAGE: ${{ needs.build_and_push.outputs.ARK_IMAGE }}\" >> .notes-file\n          echo \"ARK_IMAGE_TAG: ${{ needs.build_and_push.outputs.ARK_IMAGE_TAG }}\" >> .notes-file\n          echo \"ARK_IMAGE_DIGEST: ${{ needs.build_and_push.outputs.ARK_IMAGE_DIGEST }}\" >> .notes-file\n          echo \"ARK_CHART: ${{ needs.build_and_push.outputs.ARK_CHART }}\" >> .notes-file\n          echo \"ARK_CHART_TAG: ${{ needs.build_and_push.outputs.ARK_CHART_TAG }}\" >> .notes-file\n          echo \"ARK_CHART_DIGEST: ${{ needs.build_and_push.outputs.ARK_CHART_DIGEST }}\" >> .notes-file\n          echo \"NGTS_IMAGE: ${{ needs.build_and_push.outputs.NGTS_IMAGE }}\" >> .notes-file\n          echo \"NGTS_IMAGE_TAG: ${{ needs.build_and_push.outputs.NGTS_IMAGE_TAG }}\" >> .notes-file\n          echo \"NGTS_IMAGE_DIGEST: ${{ needs.build_and_push.outputs.NGTS_IMAGE_DIGEST }}\" >> .notes-file\n          echo \"NGTS_CHART: ${{ needs.build_and_push.outputs.NGTS_CHART }}\" >> .notes-file\n          echo \"NGTS_CHART_TAG: ${{ needs.build_and_push.outputs.NGTS_CHART_TAG }}\" >> .notes-file\n          echo \"NGTS_CHART_DIGEST: ${{ needs.build_and_push.outputs.NGTS_CHART_DIGEST }}\" >> .notes-file\n\n      - env:\n          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n        run: |\n          gh release create \"$VERSION\" \\\n            --repo=\"$GITHUB_REPOSITORY\" \\\n            --title=\"${VERSION}\" \\\n            --draft \\\n            --verify-tag \\\n            --notes-file .notes-file\n"
  },
  {
    "path": ".github/workflows/tests.yaml",
    "content": "name: tests\non:\n  push:\n    branches: [master]\n  pull_request: {}\njobs:\n  verify:\n    runs-on: ubuntu-latest\n    timeout-minutes: 15\n\n    steps:\n      - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1\n        # Adding `fetch-depth: 0` makes sure tags are also fetched. We need\n        # the tags so `git describe` returns a valid version.\n        # see https://github.com/actions/checkout/issues/701 for extra info about this option\n        with: { fetch-depth: 0 }\n\n      - uses: ./.github/actions/repo_access\n        with:\n          DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB: ${{ secrets.DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB }}\n\n      - id: go-version\n        run: |\n          make print-go-version >> \"$GITHUB_OUTPUT\"\n\n      - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0\n        with:\n          go-version: ${{ steps.go-version.outputs.result }}\n\n      - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0\n        with:\n          path: _bin/downloaded\n          key: downloaded-${{ runner.os }}-${{ hashFiles('klone.yaml') }}-verify\n\n      - run: make -j verify\n\n  test:\n    runs-on: ubuntu-latest\n    timeout-minutes: 15\n\n    permissions:\n      contents: read # needed for checkout\n      id-token: write # needed for google auth\n\n    steps:\n      - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1\n        # Adding `fetch-depth: 0` makes sure tags are also fetched. We need\n        # the tags so `git describe` returns a valid version.\n        # see https://github.com/actions/checkout/issues/701 for extra info about this option\n        with: { fetch-depth: 0 }\n\n      - uses: ./.github/actions/repo_access\n        with:\n          DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB: ${{ secrets.DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB }}\n\n      - id: go-version\n        run: |\n          make print-go-version >> \"$GITHUB_OUTPUT\"\n\n      - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0\n        with:\n          go-version: ${{ steps.go-version.outputs.result }}\n\n      - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0\n        with:\n          path: _bin/downloaded\n          key: downloaded-${{ runner.os }}-${{ hashFiles('klone.yaml') }}-test-unit\n\n      # NB: helm unit tests will be run by \"make verify\", so we don't run it here\n      - run: make -j test-unit\n        env:\n          # These environment variables are required to run the CyberArk client integration tests\n          ARK_DISCOVERY_API: https://platform-discovery.integration-cyberark.cloud/\n          ARK_SUBDOMAIN: ${{ secrets.ARK_SUBDOMAIN }}\n          ARK_USERNAME: ${{ secrets.ARK_USERNAME }}\n          ARK_SECRET: ${{ secrets.ARK_SECRET }}\n\n  ark-test-e2e:\n    # TEMPORARY: require an explicit label to test disco-agent until the test environment fixes a recurring issue\n    # where the e2e fails with a 400 error relating to \"conflicting tagging values\"\n    # The test is flaky, not broken and re-running eventually makes it pass - but that delays progress on\n    # other unrelated work.\n    if: contains(github.event.pull_request.labels.*.name, 'test-ark')\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1\n        # Adding `fetch-depth: 0` makes sure tags are also fetched. We need\n        # the tags so `git describe` returns a valid version.\n        # see https://github.com/actions/checkout/issues/701 for extra info about this option\n        with: { fetch-depth: 0 }\n\n      - uses: ./.github/actions/repo_access\n        with:\n          DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB: ${{ secrets.DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB }}\n\n      - id: go-version\n        run: |\n          make print-go-version >> \"$GITHUB_OUTPUT\"\n\n      - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0\n        with:\n          go-version: ${{ steps.go-version.outputs.result }}\n\n      - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0\n        with:\n          path: _bin/downloaded\n          key: downloaded-${{ runner.os }}-${{ hashFiles('klone.yaml') }}-test-unit\n\n      - run: make -j ark-test-e2e\n        env:\n          OCI_BASE: ${{ secrets.ARK_OCI_BASE }}\n          # These environment variables are required to connect to CyberArk Disco APIs\n          ARK_DISCOVERY_API: https://platform-discovery.integration-cyberark.cloud/\n          ARK_SUBDOMAIN: ${{ secrets.ARK_SUBDOMAIN }}\n          ARK_USERNAME: ${{ secrets.ARK_USERNAME }}\n          ARK_SECRET: ${{ secrets.ARK_SECRET }}\n\n  ngts-test-e2e:\n    # TEMPORARY: require an explicit label to test NGTS until we have a stable test environment\n    if: contains(github.event.pull_request.labels.*.name, 'test-ngts')\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1\n        # Adding `fetch-depth: 0` makes sure tags are also fetched. We need\n        # the tags so `git describe` returns a valid version.\n        # see https://github.com/actions/checkout/issues/701 for extra info about this option\n        with: { fetch-depth: 0 }\n\n      - uses: ./.github/actions/repo_access\n        with:\n          DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB: ${{ secrets.DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB }}\n\n      - id: go-version\n        run: |\n          make print-go-version >> \"$GITHUB_OUTPUT\"\n\n      - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0\n        with:\n          go-version: ${{ steps.go-version.outputs.result }}\n\n      - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0\n        with:\n          path: _bin/downloaded\n          key: downloaded-${{ runner.os }}-${{ hashFiles('klone.yaml') }}-test-unit\n\n      - run: make -j ngts-test-e2e\n        env:\n          OCI_BASE: ${{ secrets.NGTS_OCI_BASE }}\n          NGTS_CLIENT_ID: ${{ secrets.NGTS_CLIENT_ID }}\n          NGTS_PRIVATE_KEY: ${{ secrets.NGTS_PRIVATE_KEY }}\n          NGTS_TSG_ID: ${{ secrets.NGTS_TSG_ID }}\n\n  test-e2e:\n    if: contains(github.event.pull_request.labels.*.name, 'test-e2e')\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1\n        # Adding `fetch-depth: 0` makes sure tags are also fetched. We need\n        # the tags so `git describe` returns a valid version.\n        # see https://github.com/actions/checkout/issues/701 for extra info about this option\n        with: { fetch-depth: 0 }\n\n      - uses: ./.github/actions/repo_access\n        with:\n          DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB: ${{ secrets.DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB }}\n\n      - name: Authenticate to Google Cloud\n        uses: google-github-actions/auth@7c6bc770dae815cd3e89ee6cdf493a5fab2cc093 # v3.0.0\n        with:\n          credentials_json: '${{ secrets.GCP_SA_KEY }}'\n\n      - name: Set up gcloud\n        uses: google-github-actions/setup-gcloud@aa5489c8933f4cc7a4f7d45035b3b1440c9c10db # v3.0.1\n        with:\n          install_components: \"gke-gcloud-auth-plugin\"\n          project_id: machineidentitysecurity-jsci-e\n\n      - name: Configure Docker for Google Artifact Registry\n        run: gcloud auth configure-docker europe-west1-docker.pkg.dev\n\n      - id: go-version\n        run: |\n          make print-go-version >> \"$GITHUB_OUTPUT\"\n\n      - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0\n        with:\n          go-version: ${{ steps.go-version.outputs.result }}\n\n      - name: Generate timestamp for cluster name\n        id: timestamp # Give the step an ID to reference its output\n        run: |\n          # Generate a timestamp in the format YYMMDD-HHMMSS.\n          # Extracting from PR name would require sanitization due to GKE cluster naming constraints\n          TIMESTAMP=$(date +'%y%m%d-%H%M%S')\n          CLUSTER_NAME=\"test-secretless-${TIMESTAMP}\"\n          echo \"Generated cluster name: ${CLUSTER_NAME}\"\n          echo \"cluster_name=${CLUSTER_NAME}\" >> $GITHUB_OUTPUT\n\n      - run: |\n          make helm-plugins\n          make -j test-e2e-gke\n        # The VEN_API_KEY_PULL secret is set to my API key (Mladen) for glow.in.the.dark tenant.\n        env:\n          VEN_API_KEY: ${{ secrets.VEN_API_KEY_PULL }}\n          VEN_API_KEY_PULL: ${{ secrets.VEN_API_KEY_PULL }}\n          OCI_BASE: europe-west1-docker.pkg.dev/machineidentitysecurity-jsci-e/js-agent-ci-repo\n          VEN_API_HOST: api.venafi.cloud\n          VEN_ZONE: k8s-agent-CI\\Default\n          VEN_VCP_REGION: us\n          CLOUDSDK_CORE_PROJECT: machineidentitysecurity-jsci-e\n          CLOUDSDK_COMPUTE_ZONE: europe-west1-b\n          CLUSTER_NAME: ${{ steps.timestamp.outputs.cluster_name }}\n\n      - name: Delete GKE Cluster\n        # 'always()' - Run this step regardless of success or failure.\n        # '!contains(...)' - AND only run if the list of PR labels DOES NOT contain 'keep-e2e-cluster'.\n        # NOTE: You will have to delete the test cluster manually when finished with debugging or incur costs.\n        if: always() && !contains(github.event.pull_request.labels.*.name, 'keep-e2e-cluster')\n        run: |\n          echo \"Label 'keep-e2e-cluster' not found. Cleaning up GKE cluster ${{ steps.timestamp.outputs.cluster_name }}\"\n          gcloud container clusters delete ${{ steps.timestamp.outputs.cluster_name }} \\\n            --project=machineidentitysecurity-jsci-e \\\n            --zone=europe-west1-b \\\n            --quiet\n"
  },
  {
    "path": ".gitignore",
    "content": "/preflight\n/preflight.yaml\n/builds\n/bundles\n/output\ncredentials.json\n.terraform\nterraform.tfstate\nterraform.tfstate.backup\nbom.xml\npredicate.json\n*.pem\n*.pub\n*.tgz\n\n_bin\n.envrc\n"
  },
  {
    "path": ".golangci.yaml",
    "content": "version: \"2\"\nlinters:\n  default: none\n  exclusions:\n    generated: lax\n    presets: [comments, common-false-positives, legacy, std-error-handling]\n    rules:\n      - linters:\n          - errchkjson\n          - forbidigo\n          - gosec\n          - musttag\n          - nilerr\n          - unparam\n        text: .*\n    paths: [third_party, builtin$, examples$]\n    warn-unused: true\n  settings:\n    staticcheck:\n      checks: [\"all\", \"-ST1000\", \"-ST1001\", \"-ST1003\", \"-ST1005\", \"-ST1012\", \"-ST1016\", \"-ST1020\", \"-ST1021\", \"-ST1022\", \"-QF1001\", \"-QF1003\", \"-QF1008\"]\n  enable:\n    - asasalint\n    - asciicheck\n    - bidichk\n    - bodyclose\n    - canonicalheader\n    - contextcheck\n    - copyloopvar\n    - decorder\n    - dogsled\n    - dupword\n    - durationcheck\n    - errcheck\n    - errchkjson\n    - errname\n    - exhaustive\n    - exptostd\n    - forbidigo\n    - ginkgolinter\n    - gocheckcompilerdirectives\n    - gochecksumtype\n    - gocritic\n    - goheader\n    - goprintffuncname\n    - gosec\n    - gosmopolitan\n    - govet\n    - grouper\n    - importas\n    - ineffassign\n    - interfacebloat\n    - intrange\n    - loggercheck\n    - makezero\n    - mirror\n    - misspell\n    - modernize\n    - musttag\n    - nakedret\n    - nilerr\n    - nilnil\n    - noctx\n    - nosprintfhostport\n    - predeclared\n    - promlinter\n    - protogetter\n    - reassign\n    - sloglint\n    - staticcheck\n    - tagalign\n    - testableexamples\n    - unconvert\n    - unparam\n    - unused\n    - usestdlibvars\n    - usetesting\n    - wastedassign\nformatters:\n  enable: [gci, gofmt]\n  settings:\n    gci:\n      sections:\n        - standard # Standard section: captures all standard packages.\n        - default # Default section: contains all imports that could not be matched to another section type.\n        - localmodule # Local module section: contains all local packages. This section is not present unless explicitly enabled.\n        - blank # Blank section: contains all blank imports. This section is not present unless explicitly enabled.\n        - dot # Dot section: contains all dot imports. This section is not present unless explicitly enabled.\n      custom-order: true\n  exclusions:\n    generated: lax\n    paths: [third_party, builtin$, examples$]\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "# Contributing to Discovery Agent\n\nThank you for your interest in contributing! This document provides guidelines and instructions for contributing.\n\nNote that this repository holds two separate components:\n\n- disco-agent: For CyberArk DisCo\n- venafi-kubernetes-agent: For TLSPK / Certificate Manager SaaS\n\n## Table of Contents\n\n- [Getting Started](#getting-started)\n- [Development Environment](#development-environment)\n- [Making Changes](#making-changes)\n- [Testing](#testing)\n- [Submitting a Pull Request](#submitting-a-pull-request)\n- [Code Review Process](#code-review-process)\n- [Additional Resources](#additional-resources)\n\n### Prerequisites\n\nBefore you begin, ensure you have the following installed:\n\n- [Go](https://golang.org/doc/install) (version specified in `go.mod`)\n- [Make](https://www.gnu.org/software/make/)\n- [Git](https://git-scm.com/)\n- [Docker](https://docs.docker.com/get-docker/) (for building container images)\n\nTo check which Go version will be used:\n\n```bash\nmake which-go\n```\n\nIt's also possible to use a vendored version of Go, via `make vendor-go`.\n\n### Repository Tooling\n\nMost of the setup logic for provisioning tooling and for handling builds and testing\nis defined in Makefile logic.\n\nSpecifically, `the make/_shared` directory contains shared Makefile logic derived from\nthe cert-manager [makefile-modules](https://github.com/cert-manager/makefile-modules/) project.\n\n### Setting Up Your Development Environment\n\n1. **Fork the repository** on GitHub\n\n2. **Clone your fork:**\n\n   ```bash\n   git clone git@github.com:YOUR-USERNAME/jetstack-secure.git\n   cd jetstack-secure\n   ```\n\n3. **Add the upstream remote:**\n\n   ```bash\n   git remote add upstream git@github.com:jetstack/jetstack-secure.git\n   ```\n\n4. **Run initial verification:**\n\n   ```bash\n   make verify\n   ```\n\n   This ensures your environment is set up correctly.\n\n## Development Environment\n\n### Local Execution\n\nTo build and run the agent locally:\n\n```bash\ngo run main.go agent --agent-config-file ./path/to/agent/config/file.yaml -p 0h1m0s\n```\n\nExample configuration files are available:\n- [agent.yaml](./agent.yaml)\n- [examples/one-shot-secret.yaml](./examples/one-shot-secret.yaml)\n- [examples/cert-manager-agent.yaml](./examples/cert-manager-agent.yaml)\n\nYou can also run a local echo server to monitor agent requests:\n\n```bash\ngo run main.go echo\n```\n\n### Useful Make Targets\n\n- `make help` - Show all available make targets\n- `make verify` - Run all verification checks (linting, formatting, etc.)\n- `make test-unit` - Run unit tests\n- `make test-helm` - Run Helm chart tests\n- `make generate` - Generate code, documentation, and other artifacts\n- `make oci-build-preflight` - Build container image\n- `make clean` - Clean all temporary files\n\n## Making Changes\n\n### Creating a Branch\n\nAlways create a new branch for your changes:\n\n```bash\ngit checkout -b feature/your-feature-name\n```\n\nUse descriptive branch names:\n- `feature/` for new features\n- `fix/` for bug fixes\n- `docs/` for documentation changes\n- `refactor/` for refactoring\n\n### Code Style\n\nThis project follows standard Go conventions:\n\n- Run `make verify-golangci-lint` to check your code\n- Run `make fix-golangci-lint` to automatically fix some issues\n- Ensure all code is formatted with `gofmt`\n- Follow the [Effective Go](https://golang.org/doc/effective_go) guidelines\n- Most of the conventions are enforced by linters, and violations will prevent code being merged\n\n### Committing Changes\n\n1. **Stage your changes:**\n\n   ```bash\n   git add .\n   ```\n\n2. **Run verification before committing:**\n\n   ```bash\n   make verify\n   ```\n\n3. **Commit with a descriptive message:**\n\n   ```bash\n   git commit -m \"Brief description of your changes\"\n   ```\n\n   Write clear commit messages:\n   - Use the imperative mood (\"Add feature\" not \"Added feature\")\n   - Keep the first line under 72 characters\n   - Add additional context in the body if needed\n\n## Testing\n\n### Running Tests Locally\n\nBefore submitting a PR, ensure all tests pass:\n\n```bash\n# Run unit tests\nmake test-unit\n\n# Run Helm tests\nmake test-helm\n\n# Run all verification checks\nmake verify\n```\n\n### End-to-End Tests\n\nE2E tests run automatically in CI when you add specific labels to your PR:\n\n- Add the `test-e2e` label to trigger GKE-based E2E tests\n- Add the `keep-e2e-cluster` label if you need to keep the cluster for debugging (remember to delete it manually afterward to avoid costs)\n\nThe E2E test script is located at [hack/e2e/test.sh](./hack/e2e/test.sh).\n\n### Writing Tests\n\n- Add unit tests for all new functionality\n- Place tests in `*_test.go` files alongside the code they test\n- Use the [testify](https://github.com/stretchr/testify) library for assertions\n- Aim for meaningful test coverage, not just high percentages\n\n## Submitting a Pull Request\n\n1. **Push your branch to your fork:**\n\n   ```bash\n   git push origin feature/your-feature-name\n   ```\n\n2. **Create a Pull Request** on GitHub from your fork to the `master` branch of `jetstack/jetstack-secure`\n\n3. **Fill out the PR description** with:\n   - Clear description of the changes\n   - Related issue numbers (if applicable)\n   - Testing instructions\n   - Any breaking changes or special considerations\n\n4. **Ensure CI passes:**\n   - All tests must pass\n   - Code must pass verification / linting checks\n   - No merge conflicts\n\n## Code Review Process\n\n### For All Contributors\n\n- PRs require approval before merging\n- Keep PRs focused and reasonably sized\n- Update your branch if `master` has moved forward:\n\n  ```bash\n  git fetch upstream\n  git rebase upstream/master\n  git push --force-with-lease origin feature/your-feature-name\n  ```\n\n### For CyberArk Contributors\n\n**Contributors from inside CyberArk should reach out to the cert-manager team for reviews for PRs which are passing CI.**\n\nThe cert-manager team maintains this project and will provide code reviews and guidance for merging changes.\n\n## Additional Resources\n\n- [Project Documentation](https://docs.cyberark.com/mis-saas/vaas/k8s-components/c-tlspk-agent-overview/)\n- [Issue Tracker](https://github.com/jetstack/jetstack-secure/issues)\n- [Release Process](./RELEASE.md)\n- [cert-manager Community](https://cert-manager.io/docs/contributing/)\n\n## Getting Help\n\nIf you need help or have questions:\n\n1. Check existing [issues](https://github.com/jetstack/jetstack-secure/issues) and [documentation](https://docs.cyberark.com/mis-saas/vaas/k8s-components/c-tlspk-agent-overview/)\n2. Open a new issue with the `question` label\n3. For CyberArk contributors, reach out to the cert-manager team\n\n## License\n\nBy contributing, you agree that your contributions will be licensed under the license in the LICENSE file in the root directory of this repository.\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "LICENSES",
    "content": "This LICENSES file is generated by the `licenses` module in makefile-modules[0].\n\nThe licenses below the \"---\" are determined by the go-licenses tool[1].\n\nThe aim of this file is to collect the licenses of all dependencies, and provide\na single source of truth for licenses used by this project.\n\n## For Developers\n\nIf CI reports that this file is out of date, you should be careful to check that the\nnew licenses are acceptable for this project before running `make generate-go-licenses`\nto update this file.\n\nAcceptable licenses are those allowlisted by the CNCF[2].\n\nYou MUST NOT add any new dependencies whose licenses are not allowlisted by the CNCF,\nor which do not have an explicit license exception[3].\n\n## For Users\n\nIf this file was included in a release artifact, it is a snapshot of the licenses of all dependencies at the time of the release.\n\nYou can retrieve the actual license text by following these steps:\n\n1. Find the dependency name in this file\n2. Go to the source code repository of this project, and go to the tag corresponding to this release.\n3. Find the exact version of the dependency in the `go.mod` file\n4. Search for the dependency at the correct version in the [Go package index](https://pkg.go.dev/).\n\n## Links\n\n[0]: https://github.com/cert-manager/makefile-modules/\n[1]: https://github.com/google/go-licenses\n[2]: https://github.com/cncf/foundation/blob/db4179134ebe7fa00b140a050c19147db808b6fa/policies-guidance/allowed-third-party-license-policy.md#cncf-allowlist-license-policy\n[3]: https://github.com/cncf/foundation/blob/db4179134ebe7fa00b140a050c19147db808b6fa/license-exceptions/README.md\n\n---\n\ncel.dev/expr,Apache-2.0\ngithub.com/Khan/genqlient/graphql,MIT\ngithub.com/Venafi/vcert/v5,Apache-2.0\ngithub.com/antlr4-go/antlr/v4,BSD-3-Clause\ngithub.com/aymerick/douceur,MIT\ngithub.com/beorn7/perks/quantile,MIT\ngithub.com/blang/semver/v4,MIT\ngithub.com/cenkalti/backoff/v5,MIT\ngithub.com/cespare/xxhash/v2,MIT\ngithub.com/davecgh/go-spew/spew,ISC\ngithub.com/emicklei/go-restful/v3,MIT\ngithub.com/evanphx/json-patch/v5,BSD-3-Clause\ngithub.com/fatih/color,MIT\ngithub.com/fsnotify/fsnotify,BSD-3-Clause\ngithub.com/fxamacker/cbor/v2,MIT\ngithub.com/go-http-utils/headers,MIT\ngithub.com/go-logr/logr,Apache-2.0\ngithub.com/go-logr/zapr,Apache-2.0\ngithub.com/go-openapi/jsonpointer,Apache-2.0\ngithub.com/go-openapi/jsonreference,Apache-2.0\ngithub.com/go-openapi/swag,Apache-2.0\ngithub.com/go418/concurrentcache,Apache-2.0\ngithub.com/go418/concurrentcache/logger,Apache-2.0\ngithub.com/gogo/protobuf,BSD-3-Clause\ngithub.com/golang-jwt/jwt/v4,MIT\ngithub.com/golang-jwt/jwt/v5,MIT\ngithub.com/google/btree,Apache-2.0\ngithub.com/google/cel-go,Apache-2.0\ngithub.com/google/cel-go,BSD-3-Clause\ngithub.com/google/gnostic-models,Apache-2.0\ngithub.com/google/uuid,BSD-3-Clause\ngithub.com/gorilla/css/scanner,BSD-3-Clause\ngithub.com/gorilla/websocket,BSD-2-Clause\ngithub.com/hashicorp/errwrap,MPL-2.0\ngithub.com/hashicorp/go-multierror,MPL-2.0\ngithub.com/josharian/intern,MIT\ngithub.com/json-iterator/go,MIT\ngithub.com/lestrrat-go/blackmagic,MIT\ngithub.com/lestrrat-go/httpcc,MIT\ngithub.com/lestrrat-go/httprc/v3,MIT\ngithub.com/lestrrat-go/jwx/v3,MIT\ngithub.com/lestrrat-go/option/v2,MIT\ngithub.com/mailru/easyjson,MIT\ngithub.com/mattn/go-colorable,MIT\ngithub.com/mattn/go-isatty,MIT\ngithub.com/microcosm-cc/bluemonday,BSD-3-Clause\ngithub.com/modern-go/concurrent,Apache-2.0\ngithub.com/modern-go/reflect2,Apache-2.0\ngithub.com/munnerz/goautoneg,BSD-3-Clause\ngithub.com/pkg/errors,BSD-2-Clause\ngithub.com/pmezard/go-difflib/difflib,BSD-3-Clause\ngithub.com/pmylund/go-cache,MIT\ngithub.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil,BSD-3-Clause\ngithub.com/prometheus/client_golang/prometheus,Apache-2.0\ngithub.com/prometheus/client_model/go,Apache-2.0\ngithub.com/prometheus/common,Apache-2.0\ngithub.com/prometheus/procfs,Apache-2.0\ngithub.com/sosodev/duration,MIT\ngithub.com/spf13/cobra,Apache-2.0\ngithub.com/spf13/pflag,BSD-3-Clause\ngithub.com/stoewer/go-strcase,MIT\ngithub.com/stretchr/testify,MIT\ngithub.com/vektah/gqlparser/v2,MIT\ngithub.com/x448/float16,MIT\ngithub.com/youmark/pkcs8,MIT\ngo.opentelemetry.io/otel,Apache-2.0\ngo.opentelemetry.io/otel/trace,Apache-2.0\ngo.uber.org/multierr,MIT\ngo.uber.org/zap,MIT\ngo.yaml.in/yaml/v2,Apache-2.0\ngo.yaml.in/yaml/v3,MIT\ngolang.org/x/crypto,BSD-3-Clause\ngolang.org/x/exp,BSD-3-Clause\ngolang.org/x/net,BSD-3-Clause\ngolang.org/x/oauth2,BSD-3-Clause\ngolang.org/x/sync,BSD-3-Clause\ngolang.org/x/sys,BSD-3-Clause\ngolang.org/x/term,BSD-3-Clause\ngolang.org/x/text,BSD-3-Clause\ngolang.org/x/time/rate,BSD-3-Clause\ngomodules.xyz/jsonpatch/v2,Apache-2.0\ngoogle.golang.org/genproto/googleapis/api/expr/v1alpha1,Apache-2.0\ngoogle.golang.org/genproto/googleapis/rpc/status,Apache-2.0\ngoogle.golang.org/protobuf,BSD-3-Clause\ngopkg.in/evanphx/json-patch.v4,BSD-3-Clause\ngopkg.in/inf.v0,BSD-3-Clause\ngopkg.in/ini.v1,Apache-2.0\ngopkg.in/yaml.v2,Apache-2.0\ngopkg.in/yaml.v3,MIT\nk8s.io/api,Apache-2.0\nk8s.io/apiextensions-apiserver/pkg,Apache-2.0\nk8s.io/apimachinery/pkg,Apache-2.0\nk8s.io/apimachinery/third_party/forked/golang,BSD-3-Clause\nk8s.io/apiserver/pkg,Apache-2.0\nk8s.io/client-go,Apache-2.0\nk8s.io/component-base,Apache-2.0\nk8s.io/klog/v2,Apache-2.0\nk8s.io/kube-openapi/pkg,Apache-2.0\nk8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json,BSD-3-Clause\nk8s.io/kube-openapi/pkg/internal/third_party/govalidator,MIT\nk8s.io/kube-openapi/pkg/validation/errors,Apache-2.0\nk8s.io/kube-openapi/pkg/validation/spec,Apache-2.0\nk8s.io/kube-openapi/pkg/validation/strfmt,Apache-2.0\nk8s.io/kube-openapi/pkg/validation/validate,Apache-2.0\nk8s.io/utils,Apache-2.0\nk8s.io/utils/internal/third_party/forked/golang,BSD-3-Clause\nsigs.k8s.io/controller-runtime/pkg,Apache-2.0\nsigs.k8s.io/json,Apache-2.0\nsigs.k8s.io/json,BSD-3-Clause\nsigs.k8s.io/randfill,Apache-2.0\nsigs.k8s.io/structured-merge-diff/v6,Apache-2.0\nsigs.k8s.io/yaml,MIT\nsigs.k8s.io/yaml,Apache-2.0\nsigs.k8s.io/yaml,BSD-3-Clause\n"
  },
  {
    "path": "Makefile",
    "content": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.\n# Edit https://github.com/cert-manager/makefile-modules/blob/main/modules/repository-base/base/Makefile instead.\n\n# NOTE FOR DEVELOPERS: \"How do the Makefiles work and how can I extend them?\"\n#\n# Shared Makefile logic lives in the make/_shared/ directory. The source of truth for these files\n# lies outside of this repository, eg. in the cert-manager/makefile-modules repository.\n#\n# Logic specific to this repository must be defined in the make/00_mod.mk and make/02_mod.mk files:\n#   - The make/00_mod.mk file is included first and contains variable definitions needed by\n#     the shared Makefile logic.\n#   - The make/02_mod.mk file is included later, it can make use of most of the shared targets\n#     defined in the make/_shared/ directory (all targets defined in 00_mod.mk and 01_mod.mk).\n#     This file should be used to define targets specific to this repository.\n\n##################################\n\n# Some modules build their dependencies from variables, we want these to be \n# evaluated at the last possible moment. For this we use second expansion to \n# re-evaluate the generate and verify targets a second time.\n#\n# See https://www.gnu.org/software/make/manual/html_node/Secondary-Expansion.html\n.SECONDEXPANSION:\n\n# For details on some of these \"prelude\" settings, see:\n# https://clarkgrubb.com/makefile-style-guide\nMAKEFLAGS += --warn-undefined-variables --no-builtin-rules\nSHELL := /usr/bin/env bash\n# The `--norc` option prevents \"PS1: unbound\" errors.\n# If Bash thinks it is being run with its standard input connected to a network\n# connection (such as via SSH or via Docker), it reads and executes commands\n# from ~/.bashrc, regardless of whether it thinks it is in interactive mode.\n# Bash does not set PS1 in non-interactive environments. But on Ubuntu 24.04 the\n# default /etc/bash.bashrc file assumes that PS1 is set.\n#\n# See https://www.gnu.org/software/bash/manual/bash.html#Invoked-by-remote-shell-daemon\n.SHELLFLAGS := --norc -uo pipefail -c\n.DEFAULT_GOAL := help\n.DELETE_ON_ERROR:\n.SUFFIXES:\nFORCE:\n\nnoop: # do nothing\n\n# Set empty value for MAKECMDGOALS to prevent the \"warning: undefined variable 'MAKECMDGOALS'\"\n# warning from happening when running make without arguments\nMAKECMDGOALS ?=\n\n##################################\n# Host OS and architecture setup #\n##################################\n\n# The reason we don't use \"go env GOOS\" or \"go env GOARCH\" is that the \"go\"\n# binary may not be available in the PATH yet when the Makefiles are\n# evaluated. HOST_OS and HOST_ARCH only support Linux, *BSD and macOS (M1\n# and Intel).\nhost_os := $(shell uname -s | tr A-Z a-z)\nhost_arch := $(shell uname -m)\nHOST_OS ?= $(host_os)\nHOST_ARCH ?= $(host_arch)\n\nifeq (x86_64, $(HOST_ARCH))\n\tHOST_ARCH = amd64\nelse ifeq (aarch64, $(HOST_ARCH))\n\t# linux reports the arm64 arch as aarch64\n\tHOST_ARCH = arm64\nendif\n\n##################################\n# Git and versioning information #\n##################################\n\ngit_version := $(shell git describe --tags --always --match='v*' --abbrev=14 --dirty)\nVERSION ?= $(git_version)\nIS_PRERELEASE := $(shell git describe --tags --always --match='v*' --abbrev=0 | grep -q '-' && echo true || echo false)\nGITCOMMIT := $(shell git rev-parse HEAD)\nGITEPOCH := $(shell git show -s --format=%ct HEAD)\n\n##################################\n# Global variables and dirs      #\n##################################\n\nbin_dir := _bin\n\n# The ARTIFACTS environment variable is set by the CI system to a directory\n# where artifacts should be placed. These artifacts are then uploaded to a\n# storage bucket by the CI system (https://docs.prow.k8s.io/docs/components/pod-utilities/).\n# An example of such an artifact is a jUnit XML file containing test results.\n# If the ARTIFACTS environment variable is not set, we default to a local\n# directory in the _bin directory.\nARTIFACTS ?= $(bin_dir)/artifacts\n\n$(bin_dir) $(ARTIFACTS) $(bin_dir)/scratch:\n\tmkdir -p $@\n\n.PHONY: clean\n## Clean all temporary files\n## @category [shared] Tools\nclean:\n\trm -rf $(bin_dir)\n\n##################################\n# Include all the Makefiles      #\n##################################\n\n-include make/00_mod.mk\n-include make/_shared/*/00_mod.mk\n-include make/_shared/*/01_mod.mk\n-include make/02_mod.mk\n-include make/_shared/*/02_mod.mk\n"
  },
  {
    "path": "OWNERS",
    "content": "approvers:\n- j-fuentes\n- wwwil\n- charlieegan3\n- akvilemar\n- james-w\n- tfadeyi\nreviewers:\n- j-fuentes\n- wwwil\n- charlieegan3\n"
  },
  {
    "path": "OWNERS_ALIASES",
    "content": "# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.\n# Edit https://github.com/cert-manager/makefile-modules/blob/main/modules/repository-base/base/OWNERS_ALIASES instead.\n\naliases:\n  cm-maintainers:\n    - munnerz\n    - joshvanl\n    - wallrj\n    - jakexks\n    - maelvls\n    - sgtcodfish\n    - inteon\n    - thatsmrtalbot\n    - erikgb\n    - hjoshi123\n"
  },
  {
    "path": "README.md",
    "content": "# Discovery Agent\n\n[![tests](https://github.com/jetstack/jetstack-secure/actions/workflows/tests.yaml/badge.svg?branch=master&event=push)](https://github.com/jetstack/jetstack-secure/actions/workflows/tests.yaml)\n[![Go Reference](https://pkg.go.dev/badge/github.com/jetstack/jetstack-secure.svg)](https://pkg.go.dev/github.com/jetstack/jetstack-secure)\n[![Go Report Card](https://goreportcard.com/badge/github.com/jetstack/jetstack-secure)](https://goreportcard.com/report/github.com/jetstack/jetstack-secure)\n\n\"The agent\" manages your machine identities across Cloud Native Kubernetes and OpenShift environments and builds a detailed view of the enterprise security posture.\n\n## Installation\n\nPlease [review the documentation](https://docs.cyberark.com/mis-saas/vaas/k8s-components/c-tlspk-agent-overview/) for the agent.\n\nDetailed installation instructions are available for a variety of methods.\n\n## Local Execution\n\nTo build and run a version from master:\n\n```bash\ngo run main.go agent --agent-config-file ./path/to/agent/config/file.yaml -p 0h1m0s\n```\n\nYou can configure the agent to perform one data gathering loop and output the data to a local file:\n\n```bash\ngo run . agent \\\n   --agent-config-file examples/one-shot-secret.yaml \\\n   --one-shot \\\n   --output-path output.json\n```\n\n> Some examples of agent configuration files:\n>\n> - [./agent.yaml](./agent.yaml).\n> - [./examples/one-shot-secret.yaml](./examples/one-shot-secret.yaml).\n> - [./examples/cert-manager-agent.yaml](./examples/cert-manager-agent.yaml).\n\nYou might also want to run a local echo server to monitor requests sent by the agent:\n\n```bash\ngo run main.go echo\n```\n\n## Metrics\n\nThe agent exposes its metrics through a Prometheus server, on port 8081.\n\nThe Prometheus server is disabled by default but can be enabled by passing the `--enable-metrics` flag to the agent binary.\n\nIf you deploy the agent using the venafi-kubernetes-agent Helm chart, the metrics server will be enabled by default, on port 8081.\n\nIf you use the Prometheus Operator, you can use `--set metrics.podmonitor.enabled=true` to deploy a `PodMonitor` resource,\nwhich will add the venafi-kubernetes-agent metrics to your Prometheus server.\n\nThe following metrics are collected:\n\n- Go collector: via the [default registry](https://github.com/prometheus/client_golang/blob/34e02e282dc4a3cb55ca6441b489ec182e654d59/prometheus/registry.go#L60-L63) in Prometheus `client_golang`.\n- Process collector: via the [default registry](https://github.com/prometheus/client_golang/blob/34e02e282dc4a3cb55ca6441b489ec182e654d59/prometheus/registry.go#L60-L63) in Prometheus `client_golang`.\n- Agent metrics: `data_readings_upload_size`: Data readings upload size (in bytes) sent by the in-cluster agent.\n\n## End to end testing\n\nAn end to end test script is available in the [./hack/e2e/test.sh](./hack/e2e/test.sh) directory. It is configured to run in CI\nin the tests.yaml GitHub Actions workflow. To run the script you will need to add the `test-e2e` label to the PR.\nThe script creates a cluster in GKE and cleanups after itself unless the `keep-e2e-cluster` label is set on the PR. Adding that\nlabel will leave the cluster running for further debugging but it will incur costs so manually delete the cluster when done.\n"
  },
  {
    "path": "RELEASE.md",
    "content": "# Release Process\n\n> [!NOTE]\n> Before starting a release let the docs team know that a release is about to be created so that documentation can be prepared in advance.\n> This is not necessary for pre-releases.\n\nThe release process is semi-automated.\n\n### Step 1: Git Tag and GitHub Release\n\n> [!NOTE]\n>\n> Upon pushing the tag, a GitHub Action will do the following:\n>\n> - Build and publish the container image: `quay.io/jetstack/venafi-agent`,\n> - Build and publish the Helm chart: `oci://quay.io/jetstack/charts/venafi-kubernetes-agent`,\n> - Build and publish the container image: `quay.io/jetstack/disco-agent`,\n> - Build and publish the Helm chart: `oci://quay.io/jetstack/charts/disco-agent`,\n> - Build and publish the container image: `quay.io/jetstack/discovery-agent`,\n> - Build and publish the Helm chart: `oci://quay.io/jetstack/charts/discovery-agent`,\n> - Create a draft GitHub release,\n\n1. Run govulncheck; it's the best indicator that a dependency needs to be upgraded.\n\n   ```bash\n   make verify-govulncheck\n   ```\n\n   Any failures should be treated extremely seriously and patched before release unless you can be absolutely\n   confident it's a false positive.\n\n2. Consider upgrading Go dependencies using `go-mod-upgrade`:\n\n   ```bash\n   go install github.com/oligot/go-mod-upgrade@latest\n   go-mod-upgrade\n   make generate\n   ```\n\n   Once complete, you'll need to create a PR to merge the changes.\n\n3. Open the [tests GitHub Actions workflow][tests-workflow]\n   and verify that it succeeds on the master branch.\n\n4. Create a tag for the new release:\n\n   ```sh\n   export VERSION=v1.1.0\n   git tag --annotate --message=\"Release ${VERSION}\" \"${VERSION}\"\n   git push origin \"${VERSION}\"\n   ```\n\n   This triggers a [release action](https://github.com/jetstack/jetstack-secure/actions/workflows/release.yml).\n\n5. Wait until the release action finishes.\n\n6. Navigate to the [GitHub Releases](https://github.com/jetstack/jetstack-secure/releases) page and select the draft release to edit.\n\n   1. Click on “Generate release notes” to automatically compile the changelog.\n   2. Review and refine the generated notes to ensure they’re clear and useful\n      for end users.\n   3. Remove any irrelevant entries, such as “update deps,” “update CI,” “update\n      docs,” or similar internal changes that do not impact user functionality.\n\n7. Publish the release.\n\n8. Inform the `#venafi-kubernetes-agent` channel on Slack that a new version of the Discovery Agent has been released!\n   Consider also messaging the DisCo team at CyberArk (ask in the cert-manager team Slack channel if you don't know who to message)\n\n9. Inform the docs team of the new release so they can update the\n   documentation at <https://docs.cyberark.com/>.\n\n[tests-workflow]: https://github.com/jetstack/jetstack-secure/actions/workflows/tests.yaml?query=branch%3Amaster\n\n## Release Artifact Information\n\nFor context, the new tag will create the following images:\n\n| Image                                                                | Automation                                                                                   |\n| -------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- |\n| `quay.io/jetstack/venafi-agent`                                      | Automatically built by the [release action](.github/workflows/release.yml) on Git tag pushes |\n| `quay.io/jetstack/disco-agent`                                       | Automatically built by the [release action](.github/workflows/release.yml) on Git tag pushes |\n| `quay.io/jetstack/discovery-agent`                                   | Automatically built by the [release action](.github/workflows/release.yml) on Git tag pushes |\n| `registry.venafi.cloud/venafi-agent/venafi-agent`                    | Automatically mirrored by Harbor Replication rule                                            |\n| `private-registry.venafi.cloud/venafi-agent/venafi-agent`            | Automatically mirrored by Harbor Replication rule                                            |\n| `private-registry.venafi.eu/venafi-agent/venafi-agent`               | Automatically mirrored by Harbor Replication rule                                            |\n| `registry.ngts.paloaltonetworks.com/disco-agent/disco-agent`         | Automatically mirrored by Harbor Replication rule                                            |\n| `registry.ngts.paloaltonetworks.com/discovery-agent/discovery-agent` | Automatically mirrored by Harbor Replication rule                                            |\n\nand the following OCI Helm charts:\n\n| Helm Chart                                                           | Automation                                                                                   |\n| -------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- |\n| `oci://quay.io/jetstack/charts/venafi-kubernetes-agent`              | Automatically built by the [release action](.github/workflows/release.yml) on Git tag pushes |\n| `oci://quay.io/jetstack/charts/disco-agent`                          | Automatically built by the [release action](.github/workflows/release.yml) on Git tag pushes |\n| `oci://quay.io/jetstack/charts/discovery-agent`                      | Automatically built by the [release action](.github/workflows/release.yml) on Git tag pushes |\n| `oci://registry.venafi.cloud/charts/venafi-kubernetes-agent`         | Automatically mirrored by Harbor Replication rule                                            |\n| `oci://private-registry.venafi.cloud/charts/venafi-kubernetes-agent` | Automatically mirrored by Harbor Replication rule                                            |\n| `oci://private-registry.venafi.eu/charts/venafi-kubernetes-agent`    | Automatically mirrored by Harbor Replication rule                                            |\n| `oci://registry.ngts.paloaltonetworks.com/charts/disco-agent`        | Automatically mirrored by Harbor Replication rule                                            |\n| `oci://registry.ngts.paloaltonetworks.com/charts/discovery-agent`    | Automatically mirrored by Harbor Replication rule                                            |\n\n### Replication Flows\n\nTODO: These flows are helpful illustrations but describe a process whose source of truth is defined elsewhere. Instead, we should document the replication process where it's defined, in enterprise-builds.\n\nReplication flow for the venafi-kubernetes-agent Helm chart:\n\n```text\nv1.1.0 (Git tag in the jetstack-secure repo)\n └── oci://quay.io/jetstack/charts/venafi-kubernetes-agent --version 1.1.0 (GitHub Actions in the jetstack-secure repo)\n    └── oci://eu.gcr.io/jetstack-secure-enterprise/charts/venafi-kubernetes-agent (Enterprise Builds's GitHub Actions)\n        ├── oci://registry.venafi.cloud/charts/venafi-kubernetes-agent --version 1.1.0 (Harbor Replication)\n        └── oci://private-registry.venafi.cloud/charts/venafi-kubernetes-agent --version 1.1.0 (Harbor Replication)\n        └── oci://private-registry.venafi.eu/charts/venafi-kubernetes-agent --version 1.1.0 (Harbor Replication)\n```\n\nReplication flow for the venafi-kubernetes-agent container image:\n\n```text\nv1.1.0 (Git tag in the jetstack-secure repo)\n └── quay.io/jetstack/venafi-agent:v1.1.0 (GitHub Actions in the jetstack-secure repo)\n     └── eu.gcr.io/jetstack-secure-enterprise/venafi-agent:v1.1.0 (Enterprise Builds's GitHub Actions)\n         ├── registry.venafi.cloud/venafi-agent/venafi-agent:v1.1.0 (Harbor Replication)\n         ├── private-registry.venafi.cloud/venafi-agent/venafi-agent:v1.1.0 (Harbor Replication)\n         └── private-registry.venafi.eu/venafi-agent/venafi-agent:v1.1.0 (Harbor Replication)\n```\n\n[public-img-and-chart-replication.tf]: https://gitlab.com/venafi/vaas/delivery/harbor/-/blob/3d114f54092eb44a1deb0edc7c4e8a2d4f855aa2/public-registry/module/subsystems/tlspk/replication.tf\n[private-img-and-chart-replication.tf]: https://gitlab.com/venafi/vaas/delivery/harbor/-/blob/3d114f54092eb44a1deb0edc7c4e8a2d4f855aa2/private-registry/module/subsystems/tlspk/replication.tf\n[release_enterprise_builds.yaml]: https://github.com/jetstack/enterprise-builds/actions/workflows/release_enterprise_builds.yaml\n\n## Step 2: Testing\n\nWhen a release is complete, consider installing it into a cluster and testing it. TODO: provide guidance on doing those tests.\n"
  },
  {
    "path": "agent.yaml",
    "content": "server: \"https://platform.jetstack.io\"\norganization_id: \"my-organization\"\ncluster_id: \"my_cluster\"\nperiod: \"0h1m0s\"\ndata-gatherers:\n  - kind: \"dummy\"\n    name: \"dummy\"\n    config:\n      failed-attempts: 5\n  - kind: \"dummy\"\n    name: \"dummy-fail\"\n    config:\n      always-fail: true\nvenafi-cloud:\n  uploader_id: \"example-id\"\n  upload_path: \"/example/endpoint/path\"\n  "
  },
  {
    "path": "api/agent.go",
    "content": "package api\n\n// AgentMetadata is metadata about the agent.\ntype AgentMetadata struct {\n\tVersion string `json:\"version\"`\n\t// ClusterID is the name of the cluster or host where the agent is running.\n\t// It may send data for other clusters in its datareadings.\n\tClusterID string `json:\"cluster_id\"`\n}\n"
  },
  {
    "path": "api/common.go",
    "content": "// Package api provides types for Preflight reports and some common helpers.\npackage api\n\nimport (\n\t\"encoding/json\"\n\t\"time\"\n)\n\n// TimeFormat defines the format used for timestamps across all this API.\nconst TimeFormat = time.RFC3339\n\n// Time is a wrapper around time.Time that overrides how it is marshaled into JSON\ntype Time struct {\n\ttime.Time\n}\n\n// String returns a string representation of the timestamp\nfunc (t Time) String() string {\n\treturn t.Format(TimeFormat)\n}\n\n// MarshalJSON marshals the timestamp with RFC3339 format\nfunc (t Time) MarshalJSON() ([]byte, error) {\n\tstr := t.String()\n\tjsonStr, err := json.Marshal(str)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn jsonStr, nil\n}\n"
  },
  {
    "path": "api/datareading.go",
    "content": "package api\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured\"\n\t\"k8s.io/apimachinery/pkg/version\"\n)\n\n// DataReadingsPost is the payload in the upload request.\ntype DataReadingsPost struct {\n\tAgentMetadata *AgentMetadata `json:\"agent_metadata\"`\n\t// DataGatherTime represents the time that the data readings were gathered\n\tDataGatherTime time.Time      `json:\"data_gather_time\"`\n\tDataReadings   []*DataReading `json:\"data_readings\"`\n}\n\n// DataReading is the output of a DataGatherer.\ntype DataReading struct {\n\t// ClusterID is optional as it can be inferred from the agent\n\t// token when using basic authentication.\n\tClusterID     string `json:\"cluster_id,omitempty\"`\n\tDataGatherer  string `json:\"data-gatherer\"`\n\tTimestamp     Time   `json:\"timestamp\"`\n\tData          any    `json:\"data\"`\n\tSchemaVersion string `json:\"schema_version\"`\n}\n\n// UnmarshalJSON implements the json.Unmarshaler interface for DataReading.\n// The function attempts to decode the Data field into known types in a prioritized order.\n// Empty data is considered an error, because there is no way to discriminate between data types.\n// TODO(wallrj): Add a discriminator field to DataReading to avoid this complex logic.\n// E.g. \"data_type\": \"discovery\"|\"dynamic\"\nfunc (o *DataReading) UnmarshalJSON(data []byte) error {\n\tvar tmp struct {\n\t\tClusterID     string          `json:\"cluster_id,omitempty\"`\n\t\tDataGatherer  string          `json:\"data-gatherer\"`\n\t\tTimestamp     Time            `json:\"timestamp\"`\n\t\tData          json.RawMessage `json:\"data\"`\n\t\tSchemaVersion string          `json:\"schema_version\"`\n\t}\n\n\t// Decode the top-level fields of DataReading\n\tif err := jsonUnmarshalStrict(data, &tmp); err != nil {\n\t\treturn fmt.Errorf(\"failed to parse DataReading: %s\", err)\n\t}\n\n\t// Assign top-level fields to the DataReading object\n\to.ClusterID = tmp.ClusterID\n\to.DataGatherer = tmp.DataGatherer\n\to.Timestamp = tmp.Timestamp\n\to.SchemaVersion = tmp.SchemaVersion\n\n\t// Return an error if data is empty\n\tif len(tmp.Data) == 0 || bytes.Equal(tmp.Data, []byte(\"null\")) || bytes.Equal(tmp.Data, []byte(\"{}\")) {\n\t\treturn fmt.Errorf(\"failed to parse DataReading.Data for gatherer %q: empty data\", o.DataGatherer)\n\t}\n\n\t// Define a list of decoding attempts with prioritized types\n\tdataTypes := []struct {\n\t\ttarget any\n\t\tassign func(any)\n\t}{\n\t\t{&OIDCDiscoveryData{}, func(v any) { o.Data = v.(*OIDCDiscoveryData) }},\n\t\t{&DiscoveryData{}, func(v any) { o.Data = v.(*DiscoveryData) }},\n\t\t{&DynamicData{}, func(v any) { o.Data = v.(*DynamicData) }},\n\t}\n\n\t// Attempt to decode the Data field into each type\n\tfor _, dataType := range dataTypes {\n\t\tif err := jsonUnmarshalStrict(tmp.Data, dataType.target); err == nil {\n\t\t\tdataType.assign(dataType.target)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// Return an error if no type matches\n\treturn fmt.Errorf(\"failed to parse DataReading.Data for gatherer %q: unknown type\", o.DataGatherer)\n}\n\n// jsonUnmarshalStrict unmarshals JSON data into the provided interface,\n// disallowing unknown fields to ensure strict adherence to the expected structure.\nfunc jsonUnmarshalStrict(data []byte, v any) error {\n\tdecoder := json.NewDecoder(bytes.NewReader(data))\n\tdecoder.DisallowUnknownFields()\n\treturn decoder.Decode(v)\n}\n\n// GatheredResource wraps the raw k8s resource that is sent to the jetstack secure backend\ntype GatheredResource struct {\n\t// Resource is a reference to a k8s object that was found by the informer\n\t// should be of type unstructured.Unstructured, raw Object\n\tResource  any\n\tDeletedAt Time\n}\n\nfunc (v GatheredResource) MarshalJSON() ([]byte, error) {\n\tdateString := \"\"\n\tif !v.DeletedAt.IsZero() {\n\t\tdateString = v.DeletedAt.Format(TimeFormat)\n\t}\n\n\tdata := struct {\n\t\tResource  any    `json:\"resource\"`\n\t\tDeletedAt string `json:\"deleted_at,omitempty\"`\n\t}{\n\t\tResource:  v.Resource,\n\t\tDeletedAt: dateString,\n\t}\n\n\treturn json.Marshal(data)\n}\n\nfunc (v *GatheredResource) UnmarshalJSON(data []byte) error {\n\tvar tmpResource struct {\n\t\tResource  *unstructured.Unstructured `json:\"resource\"`\n\t\tDeletedAt Time                       `json:\"deleted_at\"`\n\t}\n\n\td := json.NewDecoder(bytes.NewReader(data))\n\td.DisallowUnknownFields()\n\n\tif err := d.Decode(&tmpResource); err != nil {\n\t\treturn err\n\t}\n\tv.Resource = tmpResource.Resource\n\tv.DeletedAt = tmpResource.DeletedAt\n\treturn nil\n}\n\n// DynamicData is the DataReading.Data returned by the k8sdynamic.DataGathererDynamic\n// gatherer\ntype DynamicData struct {\n\t// Items is a list of GatheredResource\n\tItems []*GatheredResource `json:\"items\"`\n}\n\n// DiscoveryData is the DataReading.Data returned by the k8sdiscovery.DataGathererDiscovery\n// gatherer\ntype DiscoveryData struct {\n\t// ClusterID is the unique ID of the Kubernetes cluster which this snapshot was taken from.\n\t// This is sourced from the kube-system namespace UID,\n\t// which is assumed to be stable for the lifetime of the cluster.\n\t// - https://github.com/kubernetes/kubernetes/issues/77487#issuecomment-489786023\n\tClusterID string `json:\"cluster_id\"`\n\t// ServerVersion is the version information of the k8s apiserver\n\t// See https://godoc.org/k8s.io/apimachinery/pkg/version#Info\n\tServerVersion *version.Info `json:\"server_version\"`\n}\n\n// OIDCDiscoveryData is the DataReading.Data returned by the oidc.OIDCDiscovery\n// gatherer\ntype OIDCDiscoveryData struct {\n\t// OIDCConfig contains OIDC configuration data from the API server's\n\t// `/.well-known/openid-configuration` endpoint\n\tOIDCConfig map[string]any `json:\"openid_configuration,omitempty\"`\n\t// OIDCConfigError contains any error encountered while fetching the OIDC configuration\n\tOIDCConfigError string `json:\"openid_configuration_error,omitempty\"`\n\n\t// JWKS contains JWKS data from the API server's `/openid/v1/jwks` endpoint\n\tJWKS map[string]any `json:\"jwks,omitempty\"`\n\t// JWKSError contains any error encountered while fetching the JWKS\n\tJWKSError string `json:\"jwks_error,omitempty\"`\n}\n"
  },
  {
    "path": "api/datareading_test.go",
    "content": "package api\n\nimport (\n\t\"encoding/json\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestJSONGatheredResourceDropsEmptyTime(t *testing.T) {\n\tvar resource GatheredResource\n\tbytes, err := json.Marshal(resource)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to marshal %s\", err)\n\t}\n\n\texpected := `{\"resource\":null}`\n\n\tif string(bytes) != expected {\n\t\tt.Fatalf(\"unexpected json \\ngot  %s\\nwant %s\", string(bytes), expected)\n\t}\n}\n\nfunc TestJSONGatheredResourceSetsTimeWhenPresent(t *testing.T) {\n\tvar resource GatheredResource\n\tresource.DeletedAt = Time{time.Date(2021, 3, 29, 0, 0, 0, 0, time.UTC)}\n\tbytes, err := json.Marshal(resource)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to marshal %s\", err)\n\t}\n\n\texpected := `{\"resource\":null,\"deleted_at\":\"2021-03-29T00:00:00Z\"}`\n\n\tif string(bytes) != expected {\n\t\tt.Fatalf(\"unexpected json \\ngot  %s\\nwant %s\", string(bytes), expected)\n\t}\n}\n\n// TestDataReading_UnmarshalJSON tests the UnmarshalJSON method of DataReading\n// with various scenarios including valid and invalid JSON inputs.\nfunc TestDataReading_UnmarshalJSON(t *testing.T) {\n\ttests := []struct {\n\t\tname         string\n\t\tinput        string\n\t\twantDataType any\n\t\texpectError  string\n\t}{\n\t\t{\n\t\t\tname: \"DiscoveryData type\",\n\t\t\tinput: `{\n\t\t\t\t\"cluster_id\": \"61b2db64-fd70-49a6-a257-08397b9b4bae\",\n\t\t\t\t\"data-gatherer\": \"discovery\",\n\t\t\t\t\"timestamp\": \"2024-06-01T12:00:00Z\",\n\t\t\t\t\"data\": {\n                    \"cluster_id\": \"60868ebf-6e47-4184-9bc0-20bb6824e210\",\n\t\t\t\t\t\"server_version\": {\n                        \"major\": \"1\",\n                        \"minor\": \"20\",\n                        \"gitVersion\": \"v1.20.0\"\n                    }\n                },\n\t\t\t\t\"schema_version\": \"v1\"\n\t\t\t}`,\n\t\t\twantDataType: &DiscoveryData{},\n\t\t},\n\t\t{\n\t\t\tname: \"DynamicData type\",\n\t\t\tinput: `{\n\t\t\t\t\"cluster_id\": \"69050b54-c61a-4384-95c3-35f890377a67\",\n\t\t\t\t\"data-gatherer\": \"dynamic\",\n\t\t\t\t\"timestamp\": \"2024-06-01T12:00:00Z\",\n\t\t\t\t\"data\": {\"items\": []},\n\t\t\t\t\"schema_version\": \"v1\"\n\t\t\t}`,\n\t\t\twantDataType: &DynamicData{},\n\t\t},\n\t\t{\n\t\t\tname: \"OIDCDiscoveryData type\",\n\t\t\tinput: `{\n\t\t\t\t\"cluster_id\": \"11111111-2222-3333-4444-555555555555\",\n\t\t\t\t\"data-gatherer\": \"oidc\",\n\t\t\t\t\"timestamp\": \"2024-06-01T12:00:00Z\",\n\t\t\t\t\"data\": {\n\t\t\t\t\t\"openid_configuration\": {\"issuer\": \"https://example.com\"},\n\t\t\t\t\t\"jwks\": {\"keys\": []}\n\t\t\t\t},\n\t\t\t\t\"schema_version\": \"v1\"\n\t\t\t}`,\n\t\t\twantDataType: &OIDCDiscoveryData{},\n\t\t},\n\t\t{\n\t\t\tname:        \"Invalid JSON\",\n\t\t\tinput:       `not a json`,\n\t\t\texpectError: \"failed to parse DataReading: invalid character 'o' in literal null (expecting 'u')\",\n\t\t},\n\t\t{\n\t\t\tname: \"Missing data field\",\n\t\t\tinput: `{\n\t\t\t\t\"cluster_id\": \"cc5a0429-8dc4-42c8-8e3a-eece9bca15c3\",\n\t\t\t\t\"data-gatherer\": \"missing-data-field\",\n\t\t\t\t\"timestamp\": \"2024-06-01T12:00:00Z\",\n\t\t\t\t\"schema_version\": \"v1\"\n\t\t\t}`,\n\t\t\texpectError: `failed to parse DataReading.Data for gatherer \"missing-data-field\": empty data`,\n\t\t},\n\t\t{\n\t\t\tname: \"Mismatched data type\",\n\t\t\tinput: `{\n\t\t\t\t\"cluster_id\": \"c272b13e-b19e-4782-833f-d55a305f3c9e\",\n\t\t\t\t\"data-gatherer\": \"unknown-data-type\",\n\t\t\t\t\"timestamp\": \"2024-06-01T12:00:00Z\",\n\t\t\t\t\"data\": \"this should be an object\",\n\t\t\t\t\"schema_version\": \"v1\"\n\t\t\t}`,\n\t\t\texpectError: `failed to parse DataReading.Data for gatherer \"unknown-data-type\": unknown type`,\n\t\t},\n\t\t{\n\t\t\tname: \"Empty data field\",\n\t\t\tinput: `{\n\t\t\t\t\"cluster_id\": \"07909675-113f-4b59-ba5e-529571a191e6\",\n\t\t\t\t\"data-gatherer\": \"empty-data\",\n\t\t\t\t\"timestamp\": \"2024-06-01T12:00:00Z\",\n\t\t\t\t\"data\": {},\n\t\t\t\t\"schema_version\": \"v1\"\n\t\t\t}`,\n\t\t\texpectError: `failed to parse DataReading.Data for gatherer \"empty-data\": empty data`,\n\t\t},\n\t\t{\n\t\t\tname: \"Additional field\",\n\t\t\tinput: `{\n\t\t\t\t\"cluster_id\": \"11df7332-4b32-4f5a-903b-0cbbef381850\",\n\t\t\t\t\"data-gatherer\": \"additional-field\",\n\t\t\t\t\"timestamp\": \"2024-06-01T12:00:00Z\",\n\t\t\t\t\"data\": {\n\t\t\t\t\t\"cluster_id\": \"60868ebf-6e47-4184-9bc0-20bb6824e210\"\n\t\t\t\t},\n\t\t\t\t\"extra_field\": \"should cause error\",\n\t\t\t\t\"schema_version\": \"v1\"\n\t\t\t}`,\n\t\t\texpectError: `failed to parse DataReading: json: unknown field \"extra_field\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"Additional data field\",\n\t\t\tinput: `{\n\t\t\t\t\"cluster_id\": \"ca44c338-987e-4d57-8320-63f538db4292\",\n\t\t\t\t\"data-gatherer\": \"additional-data-field\",\n\t\t\t\t\"timestamp\": \"2024-06-01T12:00:00Z\",\n\t\t\t\t\"data\": {\n\t\t\t\t\t\"cluster_id\": \"60868ebf-6e47-4184-9bc0-20bb6824e210\",\n\t\t\t\t\t\"server_version\": {\n\t\t\t\t\t\t\"major\": \"1\",\n\t\t\t\t\t\t\"minor\": \"20\",\n\t\t\t\t\t\t\"gitVersion\": \"v1.20.0\"\n  \t\t\t\t\t},\n\t\t\t\t\t\"extra_field\": \"should cause error\"\n\t\t\t\t},\n\t\t\t\t\"schema_version\": \"v1\"\n\t\t\t}`,\n\t\t\texpectError: `failed to parse DataReading.Data for gatherer \"additional-data-field\": unknown type`,\n\t\t},\n\t\t{\n\t\t\tname:        \"Empty JSON object\",\n\t\t\tinput:       `{}`,\n\t\t\texpectError: `failed to parse DataReading.Data for gatherer \"\": empty data`,\n\t\t},\n\t\t{\n\t\t\tname: \"Null data field\",\n\t\t\tinput: `{\n\t\t\t\t\"cluster_id\": \"36281cb3-7f3a-4efa-9879-7c988a9715b0\",\n\t\t\t\t\"data-gatherer\": \"null-data\",\n\t\t\t\t\"timestamp\": \"2024-06-01T12:00:00Z\",\n\t\t\t\t\"data\": null,\n\t\t\t\t\"schema_version\": \"v1\"\n\t\t\t}`,\n\t\t\texpectError: `failed to parse DataReading.Data for gatherer \"null-data\": empty data`,\n\t\t},\n\t\t{\n\t\t\tname: \"Empty string data field\",\n\t\t\tinput: `{\n\t\t\t\t\"cluster_id\": \"7b7aa8ee-58ac-4818-9b29-c0a76296ea1d\",\n\t\t\t\t\"data-gatherer\": \"empty-string-data\",\n\t\t\t\t\"timestamp\": \"2024-06-01T12:00:00Z\",\n\t\t\t\t\"data\": \"\",\n\t\t\t\t\"schema_version\": \"v1\"\n\t\t\t}`,\n\t\t\texpectError: `failed to parse DataReading.Data for gatherer \"empty-string-data\": unknown type`,\n\t\t},\n\t\t{\n\t\t\tname: \"Array instead of object in data field\",\n\t\t\tinput: `{\n\t\t\t\t\"cluster_id\": \"94d7757f-d084-4ccb-963b-f60fece0df2d\",\n\t\t\t\t\"data-gatherer\": \"array-data\",\n\t\t\t\t\"timestamp\": \"2024-06-01T12:00:00Z\",\n\t\t\t\t\"data\": [],\n\t\t\t\t\"schema_version\": \"v1\"\n\t\t\t}`,\n\t\t\texpectError: `failed to parse DataReading.Data for gatherer \"array-data\": unknown type`,\n\t\t},\n\t\t{\n\t\t\tname: \"Incorrect timestamp format\",\n\t\t\tinput: `{\n\t\t\t\t\"cluster_id\": \"d58f298d-b8c1-4d99-aa85-c27d9aec6f97\",\n\t\t\t\t\"data-gatherer\": \"bad-timestamp\",\n\t\t\t\t\"timestamp\": \"not-a-timestamp\",\n\t\t\t\t\"data\": {\n\t\t\t\t\t\"items\": []\n\t\t\t\t},\n\t\t\t\t\"schema_version\": \"v1\"\n\t\t\t}`,\n\t\t\texpectError: `failed to parse DataReading: parsing time \"not-a-timestamp\" as \"2006-01-02T15:04:05Z07:00\": cannot parse \"not-a-timestamp\" as \"2006\"`,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tvar dr DataReading\n\t\t\terr := dr.UnmarshalJSON([]byte(tt.input))\n\t\t\tif tt.expectError != \"\" {\n\t\t\t\tassert.EqualError(t, err, tt.expectError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.IsType(t, tt.wantDataType, dr.Data)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "cmd/agent.go",
    "content": "package cmd\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com/spf13/cobra\"\n\n\t\"github.com/jetstack/preflight/pkg/agent\"\n\t\"github.com/jetstack/preflight/pkg/permissions\"\n)\n\nvar agentCmd = &cobra.Command{\n\tUse:   \"agent\",\n\tShort: \"start the preflight agent\",\n\tLong: `The agent will periodically gather data for the configured data\n\tgatherers and send it to a remote backend for evaluation`,\n\tRunE: agent.Run,\n}\n\nvar agentInfoCmd = &cobra.Command{\n\tUse:   \"info\",\n\tShort: \"print several internal parameters of the agent\",\n\tLong:  `Print several internal parameters of the agent, as the built-in OAuth2 client ID.`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tprintVersion(true)\n\t\tfmt.Println()\n\t\tprintOAuth2Config()\n\t},\n}\n\nvar agentRBACCmd = &cobra.Command{\n\tUse:   \"rbac\",\n\tShort: \"print the agent's minimal RBAC manifest\",\n\tLong:  `Print RBAC string by reading GVRs`,\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\n\t\tb, err := os.ReadFile(agent.Flags.ConfigFilePath)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to read config file: %s\", err)\n\t\t}\n\t\tcfg, err := agent.ParseConfig(b)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to parse config file: %s\", err)\n\t\t}\n\n\t\terr = agent.ValidateDataGatherers(cfg.DataGatherers)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to validate data gatherers: %s\", err)\n\t\t}\n\n\t\tout := permissions.GenerateFullManifest(cfg.DataGatherers)\n\t\tfmt.Print(out)\n\t\treturn nil\n\t},\n}\n\nfunc init() {\n\trootCmd.AddCommand(agentCmd)\n\tagentCmd.AddCommand(agentInfoCmd)\n\tagentCmd.AddCommand(agentRBACCmd)\n\tagent.InitAgentCmdFlags(agentCmd, &agent.Flags)\n}\n"
  },
  {
    "path": "cmd/agent_test.go",
    "content": "package cmd\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/require\"\n\n\tarktesting \"github.com/jetstack/preflight/internal/cyberark/testing\"\n)\n\n// TestOutputModes tests the different output modes of the agent command.\n// It does this by running the agent command in a subprocess with the\n// appropriate flags and configuration files.\n// It assumes that the test is being run from the \"cmd\" directory and that\n// the repository root is the parent directory of the current working directory.\nfunc TestOutputModes(t *testing.T) {\n\trepoRoot := findRepoRoot(t)\n\n\tt.Run(\"localfile\", func(t *testing.T) {\n\t\trunSubprocess(t, repoRoot, []string{\n\t\t\t\"--agent-config-file\", filepath.Join(repoRoot, \"examples/localfile/config.yaml\"),\n\t\t\t\"--input-path\", filepath.Join(repoRoot, \"examples/localfile/input.json\"),\n\t\t\t\"--output-path\", \"/dev/null\",\n\t\t})\n\t})\n\n\tt.Run(\"machinehub\", func(t *testing.T) {\n\t\tif strings.ToLower(os.Getenv(\"ARK_LIVE_TEST\")) != \"true\" {\n\t\t\tt.Skip(\"set ARK_LIVE_TEST=true to run this test against the live service\")\n\t\t\treturn\n\t\t}\n\t\tarktesting.SkipIfNoEnv(t)\n\n\t\tt.Log(\"This test runs against a live service and has been known to flake. If you see timeout issues it's possible that the test is flaking and it could be unrelated to your changes.\")\n\n\t\trunSubprocess(t, repoRoot, []string{\n\t\t\t\"--agent-config-file\", filepath.Join(repoRoot, \"examples/machinehub/config.yaml\"),\n\t\t\t\"--input-path\", filepath.Join(repoRoot, \"examples/machinehub/input.json\"),\n\t\t\t\"--machine-hub\",\n\t\t})\n\t})\n}\n\n// findRepoRoot returns the absolute path to the repository root.\n// It assumes that the test is being run from the \"cmd\" directory.\nfunc findRepoRoot(t *testing.T) string {\n\tcwd, err := os.Getwd()\n\trequire.NoError(t, err)\n\trepoRoot, err := filepath.Abs(filepath.Join(cwd, \"..\"))\n\trequire.NoError(t, err)\n\treturn repoRoot\n}\n\n// runSubprocess runs the current test in a subprocess with the given args.\n// It sets the GO_CHILD environment variable to indicate to the subprocess\n// that it should run the main function instead of the test function.\n// It captures and logs the stdout and stderr of the subprocess.\n// It fails the test if the subprocess exits with a non-zero status.\n// It uses a timeout to avoid hanging indefinitely.\nfunc runSubprocess(t *testing.T, repoRoot string, args []string) {\n\tif _, found := os.LookupEnv(\"GO_CHILD\"); found {\n\t\tos.Args = append([]string{\n\t\t\t\"preflight\",\n\t\t\t\"agent\",\n\t\t\t\"--log-level\", \"6\",\n\t\t\t\"--one-shot\",\n\t\t}, args...)\n\t\tExecute()\n\t\treturn\n\t}\n\tt.Log(\"Running child process\", os.Args[0], \"-test.run=^\"+t.Name()+\"$\")\n\tctx, cancel := context.WithTimeout(t.Context(), time.Second*10)\n\tdefer cancel()\n\tcmd := exec.CommandContext(ctx, os.Args[0], \"-test.run=^\"+t.Name()+\"$\")\n\tvar stdout, stderr bytes.Buffer\n\tcmd.Stdout = &stdout\n\tcmd.Stderr = &stderr\n\tcmd.Env = append(os.Environ(), \"GO_CHILD=true\")\n\terr := cmd.Run()\n\tt.Logf(\"STDOUT\\n%s\\n\", stdout.String())\n\tt.Logf(\"STDERR\\n%s\\n\", stderr.String())\n\trequire.NoError(t, err, fmt.Sprintf(\"Error: %v\\nSTDERR: %s\", err, stderr.String()))\n}\n"
  },
  {
    "path": "cmd/ark/main.go",
    "content": "package main\n\nimport \"github.com/jetstack/preflight/cmd\"\n\nfunc main() {\n\tcmd.Execute()\n}\n"
  },
  {
    "path": "cmd/echo.go",
    "content": "package cmd\n\nimport (\n\t\"github.com/spf13/cobra\"\n\n\t\"github.com/jetstack/preflight/pkg/echo\"\n)\n\nvar echoCmd = &cobra.Command{\n\tUse:   \"echo\",\n\tShort: \"starts an echo server to test the agent\",\n\tLong: `The agent sends data to a server. This echo server\ncan be used to act as the server part and echo the data received by the agent.`,\n\tRunE: echo.Echo,\n}\n\nfunc init() {\n\trootCmd.AddCommand(echoCmd)\n\techoCmd.PersistentFlags().StringVarP(\n\t\t&echo.EchoListen,\n\t\t\"listen\",\n\t\t\"l\",\n\t\t\":8080\",\n\t\t\"Address where to listen.\",\n\t)\n\n\techoCmd.PersistentFlags().BoolVarP(\n\t\t&echo.Compact,\n\t\t\"compact\",\n\t\t\"\",\n\t\tfalse,\n\t\t\"Prints compact output.\",\n\t)\n}\n"
  },
  {
    "path": "cmd/helpers.go",
    "content": "package cmd\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\n\t\"github.com/jetstack/preflight/pkg/client\"\n\t\"github.com/jetstack/preflight/pkg/version\"\n)\n\nfunc printVersion(verbose bool) {\n\tfmt.Println(\"Preflight version: \", version.PreflightVersion, runtime.GOOS+\"/\"+runtime.GOARCH)\n\tif verbose {\n\t\tfmt.Println(\"  Commit: \", version.Commit)\n\t\tfmt.Println(\"  Built:  \", version.BuildDate)\n\t\tfmt.Println(\"  Go:     \", runtime.Version())\n\t}\n}\n\nfunc printOAuth2Config() {\n\tfmt.Println(\"OAuth2: \")\n\tfmt.Println(\"  ClientID:         \", client.ClientID)\n\tfmt.Println(\"  AuthServerDomain: \", client.AuthServerDomain)\n}\n"
  },
  {
    "path": "cmd/root.go",
    "content": "package cmd\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com/spf13/cobra\"\n\t\"github.com/spf13/pflag\"\n\t\"k8s.io/klog/v2\"\n\n\t\"github.com/jetstack/preflight/pkg/logs\"\n)\n\n// rootCmd represents the base command when called without any subcommands\nvar rootCmd = &cobra.Command{\n\tUse:   \"preflight\",\n\tShort: \"Kubernetes cluster configuration checker 🚀\",\n\tLong: `Preflight is a tool to automatically perform Kubernetes cluster\nconfiguration checks using Open Policy Agent (OPA).\n\nPreflight checks are bundled into Packages`,\n\tPersistentPreRunE: func(cmd *cobra.Command, args []string) error {\n\t\treturn logs.Initialize()\n\t},\n\t// SilenceErrors and SilenceUsage prevents this command or any sub-command\n\t// from printing arbitrary text to stderr.\n\t// Why? To ensure that each line of output can be parsed as a single message\n\t// for consumption by logging agents such as fluentd.\n\t// Usage information is still available on stdout with the `-h` and `--help`\n\t// flags.\n\tSilenceErrors: true,\n\tSilenceUsage:  true,\n}\n\nfunc init() {\n\tfor _, command := range rootCmd.Commands() {\n\t\tsetFlagsFromEnv(\"PREFLIGHT_\", command.PersistentFlags())\n\t}\n}\n\n// Execute adds all child commands to the root command and sets flags appropriately.\n// This is called by main.main(). It only needs to happen once to the rootCmd.\n// If the root command or sub-command returns an error, the error message will\n// be logged and the process will exit with status 1.\nfunc Execute() {\n\tlogs.AddFlags(rootCmd.PersistentFlags())\n\tctx := klog.NewContext(context.Background(), klog.Background())\n\tvar exitCode int\n\tif err := rootCmd.ExecuteContext(ctx); err != nil {\n\t\texitCode = 1\n\t\tklog.ErrorS(err, \"Exiting due to error\", \"exit-code\", exitCode)\n\t}\n\tklog.FlushAndExit(klog.ExitFlushTimeout, exitCode)\n}\n\nfunc setFlagsFromEnv(prefix string, fs *pflag.FlagSet) {\n\tset := map[string]bool{}\n\tfs.Visit(func(f *pflag.Flag) {\n\t\tset[f.Name] = true\n\t})\n\tfs.VisitAll(func(f *pflag.Flag) {\n\t\t// ignore flags set from the commandline\n\t\tif set[f.Name] {\n\t\t\treturn\n\t\t}\n\t\t// remove trailing _ to reduce common errors with the prefix, i.e. people setting it to MY_PROG_\n\t\tcleanPrefix := strings.TrimSuffix(prefix, \"_\")\n\t\tname := fmt.Sprintf(\"%s_%s\", cleanPrefix, strings.ReplaceAll(strings.ToUpper(f.Name), \"-\", \"_\"))\n\t\tif e, ok := os.LookupEnv(name); ok {\n\t\t\t_ = f.Value.Set(e)\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "cmd/version.go",
    "content": "package cmd\n\nimport (\n\t\"github.com/spf13/cobra\"\n)\n\nvar verbose bool\n\nvar versionCmd = &cobra.Command{\n\tUse:   \"version\",\n\tShort: \"Display the version\",\n\tLong: `Display preflight version.\n`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tprintVersion(verbose)\n\t},\n}\n\nfunc init() {\n\trootCmd.AddCommand(versionCmd)\n\tversionCmd.PersistentFlags().BoolVar(\n\t\t&verbose,\n\t\t\"verbose\",\n\t\tfalse,\n\t\t\"If enabled, displays the additional information about this build.\",\n\t)\n}\n"
  },
  {
    "path": "deploy/charts/disco-agent/.helmignore",
    "content": "# Patterns to ignore when building packages.\n# This supports shell glob matching, relative path matching, and\n# negation (prefixed with !). Only one pattern per line.\n.DS_Store\n# Common VCS dirs\n.git/\n.gitignore\n.bzr/\n.bzrignore\n.hg/\n.hgignore\n.svn/\n# Common backup files\n*.swp\n*.bak\n*.tmp\n*.orig\n*~\n# Various IDEs\n.project\n.idea/\n*.tmproj\n.vscode/\n"
  },
  {
    "path": "deploy/charts/disco-agent/Chart.yaml",
    "content": "apiVersion: v2\nname: disco-agent\ndescription: |-\n  The disco-agent connects your Kubernetes or Openshift cluster to CyberArk Discovery and Context.\n\nmaintainers:\n  - name: CyberArk\n    email: support@cyberark.com\n    url: https://cyberark.com\n\nsources:\n  - https://github.com/jetstack/jetstack-secure\n\n# These versions are meant to be overridden by `make helm-chart`. No `v` prefix\n# for the `version` because Helm doesn't support auto-determining the latest\n# version for OCI Helm charts that use a `v` prefix.\nversion: 0.0.0\nappVersion: \"v0.0.0\"\n"
  },
  {
    "path": "deploy/charts/disco-agent/README.md",
    "content": "# disco-agent\n\nThe Cyberark Discovery and Context Agent connects your Kubernetes or OpenShift\ncluster to the Discovery and Context service of the CyberArk Identity Security Platform.\n\n## Quick Start\n\n### Create a Namespace\n\nCreate a namespace for the agent:\n\n```sh\nexport NAMESPACE=cyberark\nkubectl create ns \"$NAMESPACE\" || true\n```\n\n### Add credentials to a Secret\n\nYou will require tenant details and credentials for the CyberArk Identity Security Platform.\nPut them in the following environment variables:\n\n```sh\nexport ARK_SUBDOMAIN=      # your CyberArk tenant subdomain e.g. tlskp-test\nexport ARK_USERNAME=       # your CyberArk username\nexport ARK_SECRET=         # your CyberArk password\n# OPTIONAL: the URL for the CyberArk Discovery API if not using the production environment\nexport ARK_DISCOVERY_API=https://platform-discovery.integration-cyberark.cloud/\n```\n\nCreate a Secret containing the tenant details and credentials:\n\n```sh\nkubectl create secret generic agent-credentials \\\n        --namespace \"$NAMESPACE\" \\\n        --from-literal=ARK_USERNAME=$ARK_USERNAME \\\n        --from-literal=ARK_SECRET=$ARK_SECRET \\\n        --from-literal=ARK_SUBDOMAIN=$ARK_SUBDOMAIN \\\n        --from-literal=ARK_DISCOVERY_API=$ARK_DISCOVERY_API\n```\n\nAlternatively, use the following Secret as a template:\n\n```yaml\n# agent-credentials.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n  name: agent-credentials\n  namespace: cyberark\ntype: Opaque\nstringData:\n  ARK_SUBDOMAIN: $ARK_SUBDOMAIN # your CyberArk tenant subdomain e.g. tlskp-test\n  ARK_SECRET: $ARK_SECRET       # your CyberArk password\n  ARK_USERNAME: $ARK_USERNAME   # your CyberArk username\n  # OPTIONAL: the URL for the CyberArk Discovery API if not using the production environment\n  # ARK_DISCOVERY_API: https://platform-discovery.integration-cyberark.cloud/\n```\n\n### Deploy the agent\n\nDeploy the agent:\n\n```sh\nhelm upgrade agent \"oci://${OCI_BASE}/charts/disco-agent\" \\\n     --install \\\n     --create-namespace \\\n     --namespace \"$NAMESPACE\" \\\n     --set fullnameOverride=disco-agent\n```\n\n### Troubleshooting\n\nCheck the Pod and its events:\n```sh\nkubectl describe -n cyberark pods -l app.kubernetes.io/name=disco-agent\n```\n\nCheck the logs:\n```sh\nkubectl logs deployments/disco-agent --namespace \"${NAMESPACE}\" --follow\n```\n\n## Values\n\n<!-- AUTO-GENERATED -->\n\n#### **replicaCount** ~ `number`\n> Default value:\n> ```yaml\n> 1\n> ```\n\nThis will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/\n#### **acceptTerms** ~ `bool`\n> Default value:\n> ```yaml\n> false\n> ```\n\nMust be set to indicate that you have read and accepted the CyberArk Terms of Service. If false, the helm chart will fail to install and will print a message with instructions on how to accept the TOS.\n#### **imageRegistry** ~ `string`\n> Default value:\n> ```yaml\n> quay.io\n> ```\n\nThe container registry used for disco-agent images by default. This can include path prefixes (e.g. \"artifactory.example.com/docker\").\n\n#### **imageNamespace** ~ `string`\n> Default value:\n> ```yaml\n> jetstack\n> ```\n\nThe repository namespace used for disco-agent images by default.  \nExamples:  \n- jetstack  \n- custom-namespace\n\n#### **image.registry** ~ `string`\n\nDeprecated: per-component registry prefix.  \n  \nIf set, this value is *prepended* to the image repository that the chart would otherwise render. This applies both when `image.repository` is set and when the repository is computed from  \n`imageRegistry` + `imageNamespace` + `image.name`.  \n  \nThis can produce \"double registry\" style references such as  \n`legacy.example.io/quay.io/jetstack/...`. Prefer using the global  \n`imageRegistry`/`imageNamespace` values.\n\n#### **image.repository** ~ `string`\n> Default value:\n> ```yaml\n> \"\"\n> ```\n\nFull repository override (takes precedence over `imageRegistry`, `imageNamespace`, and `image.name`).  \nExample: quay.io/jetstack/disco-agent\n\n#### **image.name** ~ `string`\n> Default value:\n> ```yaml\n> disco-agent\n> ```\n\nThe image name for the Discovery Agent.  \nThis is used (together with `imageRegistry` and `imageNamespace`) to construct the full image reference.\n\n#### **image.pullPolicy** ~ `string`\n> Default value:\n> ```yaml\n> IfNotPresent\n> ```\n\nThis sets the pull policy for images.\n#### **image.tag** ~ `string`\n> Default value:\n> ```yaml\n> \"\"\n> ```\n\nOverride the image tag to deploy by setting this variable. If no value is set, the chart's appVersion is used.\n#### **image.digest** ~ `string`\n> Default value:\n> ```yaml\n> \"\"\n> ```\n\nOverride the image digest to deploy by setting this variable. If set together with `image.tag`, the rendered image will include both tag and digest.\n#### **imagePullSecrets** ~ `array`\n> Default value:\n> ```yaml\n> []\n> ```\n\nThis is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/\n#### **nameOverride** ~ `string`\n> Default value:\n> ```yaml\n> \"\"\n> ```\n\nThis is to override the chart name.\n#### **fullnameOverride** ~ `string`\n> Default value:\n> ```yaml\n> \"\"\n> ```\n#### **serviceAccount.create** ~ `bool`\n> Default value:\n> ```yaml\n> true\n> ```\n\nSpecifies whether a service account should be created\n#### **serviceAccount.automount** ~ `bool`\n> Default value:\n> ```yaml\n> true\n> ```\n\nAutomatically mount a ServiceAccount's API credentials?\n#### **serviceAccount.annotations** ~ `object`\n> Default value:\n> ```yaml\n> {}\n> ```\n\nAnnotations to add to the service account\n#### **serviceAccount.name** ~ `string`\n> Default value:\n> ```yaml\n> \"\"\n> ```\n\nThe name of the service account to use.  \nIf not set and create is true, a name is generated using the fullname template\n#### **podAnnotations** ~ `object`\n> Default value:\n> ```yaml\n> {}\n> ```\n\nThis is for setting Kubernetes Annotations to a Pod. For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/\n#### **podLabels** ~ `object`\n> Default value:\n> ```yaml\n> {}\n> ```\n\nThis is for setting Kubernetes Labels to a Pod.  \nFor more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/\n#### **podSecurityContext** ~ `object`\n> Default value:\n> ```yaml\n> {}\n> ```\n#### **securityContext** ~ `object`\n> Default value:\n> ```yaml\n> allowPrivilegeEscalation: false\n> capabilities:\n>   drop:\n>     - ALL\n> readOnlyRootFilesystem: true\n> runAsNonRoot: true\n> seccompProfile:\n>   type: RuntimeDefault\n> ```\n\nAdd Container specific SecurityContext settings to the container. Takes precedence over `podSecurityContext` when set. See https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container\n\n#### **resources** ~ `object`\n> Default value:\n> ```yaml\n> {}\n> ```\n#### **volumes** ~ `array`\n> Default value:\n> ```yaml\n> []\n> ```\n\nAdditional volumes on the output Deployment definition.\n#### **volumeMounts** ~ `array`\n> Default value:\n> ```yaml\n> []\n> ```\n\nAdditional volumeMounts on the output Deployment definition.\n#### **nodeSelector** ~ `object`\n> Default value:\n> ```yaml\n> {}\n> ```\n#### **tolerations** ~ `array`\n> Default value:\n> ```yaml\n> []\n> ```\n#### **affinity** ~ `object`\n> Default value:\n> ```yaml\n> {}\n> ```\n#### **http_proxy** ~ `string`\n\nConfigures the HTTP_PROXY environment variable where a HTTP proxy is required.\n\n#### **https_proxy** ~ `string`\n\nConfigures the HTTPS_PROXY environment variable where a HTTP proxy is required.\n\n#### **no_proxy** ~ `string`\n\nConfigures the NO_PROXY environment variable where a HTTP proxy is required, but certain domains should be excluded.\n\n#### **podDisruptionBudget** ~ `object`\n> Default value:\n> ```yaml\n> enabled: false\n> ```\n\nConfigure a PodDisruptionBudget for the agent's Deployment. If running with multiple replicas, consider setting podDisruptionBudget.enabled to true.\n\n#### **config.period** ~ `string`\n> Default value:\n> ```yaml\n> 12h0m0s\n> ```\n\nPush data every 12 hours unless changed.\n#### **config.excludeAnnotationKeysRegex** ~ `array`\n> Default value:\n> ```yaml\n> []\n> ```\n\nYou can configure the agent to exclude some annotations or labels from being pushed . All Kubernetes objects are affected. The objects are still pushed, but the specified annotations and labels are removed before being pushed.  \n  \nDots is the only character that needs to be escaped in the regex. Use either double quotes with escaped single quotes or unquoted strings for the regex to avoid YAML parsing issues with `\\.`.  \n  \nExample: excludeAnnotationKeysRegex: ['^kapp\\.k14s\\.io/original.*']\n#### **config.excludeLabelKeysRegex** ~ `array`\n> Default value:\n> ```yaml\n> []\n> ```\n#### **config.clusterName** ~ `string`\n> Default value:\n> ```yaml\n> \"\"\n> ```\n\nA human readable name for the cluster where the agent is deployed (optional).  \n  \nThis cluster name will be associated with the data that the agent uploads to the Discovery and Context service. If empty (the default), the service account name will be used instead.\n#### **config.clusterDescription** ~ `string`\n> Default value:\n> ```yaml\n> \"\"\n> ```\n\nA short description of the cluster where the agent is deployed (optional).  \n  \nThis description will be associated with the data that the agent uploads to the Discovery and Context service. The description may include contact information such as the email address of the cluster administrator, so that any problems and risks identified by the Discovery and Context service can be communicated to the people responsible for the affected secrets.\n#### **config.sendSecretValues** ~ `bool`\n> Default value:\n> ```yaml\n> true\n> ```\n\nEnable sending of Secret values to CyberArk in addition to metadata. Metadata is always sent, but the actual values of Secrets are not sent by default. When enabled, Secret data is encrypted using envelope encryption using a key managed by CyberArk, fetched from the Discovery and Context service.\n#### **authentication.secretName** ~ `string`\n> Default value:\n> ```yaml\n> agent-credentials\n> ```\n#### **extraArgs** ~ `array`\n> Default value:\n> ```yaml\n> []\n> ```\n\n```yaml\nextraArgs:\n- --logging-format=json\n- --log-level=6 # To enable HTTP request logging\n```\n#### **pprof.enabled** ~ `bool`\n> Default value:\n> ```yaml\n> false\n> ```\n\nEnable profiling with the pprof endpoint\n#### **metrics.enabled** ~ `bool`\n> Default value:\n> ```yaml\n> true\n> ```\n\nEnable the metrics server.  \nIf false, the metrics server will be disabled and the other metrics fields below will be ignored.\n#### **metrics.podmonitor.enabled** ~ `bool`\n> Default value:\n> ```yaml\n> false\n> ```\n\nCreate a PodMonitor to add the metrics to Prometheus, if you are using Prometheus Operator. See https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor\n#### **metrics.podmonitor.namespace** ~ `string`\n\nThe namespace that the pod monitor should live in.  \nDefaults to the disco-agent namespace.\n\n#### **metrics.podmonitor.prometheusInstance** ~ `string`\n> Default value:\n> ```yaml\n> default\n> ```\n\nSpecifies the `prometheus` label on the created PodMonitor. This is used when different Prometheus instances have label selectors matching different PodMonitors.\n#### **metrics.podmonitor.interval** ~ `string`\n> Default value:\n> ```yaml\n> 60s\n> ```\n\nThe interval to scrape metrics.\n#### **metrics.podmonitor.scrapeTimeout** ~ `string`\n> Default value:\n> ```yaml\n> 30s\n> ```\n\nThe timeout before a metrics scrape fails.\n#### **metrics.podmonitor.labels** ~ `object`\n> Default value:\n> ```yaml\n> {}\n> ```\n\nAdditional labels to add to the PodMonitor.\n#### **metrics.podmonitor.annotations** ~ `object`\n> Default value:\n> ```yaml\n> {}\n> ```\n\nAdditional annotations to add to the PodMonitor.\n#### **metrics.podmonitor.honorLabels** ~ `bool`\n> Default value:\n> ```yaml\n> false\n> ```\n\nKeep labels from scraped data, overriding server-side labels.\n#### **metrics.podmonitor.endpointAdditionalProperties** ~ `object`\n> Default value:\n> ```yaml\n> {}\n> ```\n\nEndpointAdditionalProperties allows setting additional properties on the endpoint such as relabelings, metricRelabelings etc.  \n  \nFor example:\n\n```yaml\nendpointAdditionalProperties:\n relabelings:\n - action: replace\n   sourceLabels:\n   - __meta_kubernetes_pod_node_name\n   targetLabel: instance\n```\n\n<!-- /AUTO-GENERATED -->\n\n"
  },
  {
    "path": "deploy/charts/disco-agent/templates/NOTES.txt",
    "content": "CHART NAME: {{ .Chart.Name }}\nCHART VERSION: {{ .Chart.Version }}\nAPP VERSION: {{ .Chart.AppVersion }}\n\n- Check the application is running:\n> kubectl get pods -n {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }}\n\n- Check the application logs for successful connection to the platform:\n> kubectl logs -n {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }}\n\n{{ if .Values.config.sendSecretValues }}\nNB: sendSecretValues is set to \"true\". Encrypted secret data will be sent to the CyberArk Discovery and Context service\n{{ end }}\n"
  },
  {
    "path": "deploy/charts/disco-agent/templates/_helpers.tpl",
    "content": "{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"disco-agent.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" }}\n{{- end }}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"disco-agent.fullname\" -}}\n{{- if .Values.fullnameOverride }}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" }}\n{{- else }}\n{{- $name := default .Chart.Name .Values.nameOverride }}\n{{- if contains $name .Release.Name }}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" }}\n{{- else }}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" }}\n{{- end }}\n{{- end }}\n{{- end }}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"disco-agent.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" }}\n{{- end }}\n\n{{/*\nCommon labels\n*/}}\n{{- define \"disco-agent.labels\" -}}\nhelm.sh/chart: {{ include \"disco-agent.chart\" . }}\n{{ include \"disco-agent.selectorLabels\" . }}\n{{- if .Chart.AppVersion }}\napp.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\n{{- end }}\napp.kubernetes.io/managed-by: {{ .Release.Service }}\n{{- end }}\n\n{{/*\nSelector labels\n*/}}\n{{- define \"disco-agent.selectorLabels\" -}}\napp.kubernetes.io/name: {{ include \"disco-agent.name\" . }}\napp.kubernetes.io/instance: {{ .Release.Name }}\n{{- end }}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"disco-agent.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create }}\n{{- default (include \"disco-agent.fullname\" .) .Values.serviceAccount.name }}\n{{- else }}\n{{- default \"default\" .Values.serviceAccount.name }}\n{{- end }}\n{{- end }}\n\n{{/*\nUtil function for generating an image reference based on the provided options.\nThis function is derived from similar functions used in the cert-manager GitHub organization\n*/}}\n{{- define \"disco-agent.image\" -}}\n{{- /*\nCalling convention:\n- (tuple <imageValues> <imageRegistry> <imageNamespace> <defaultReference>)\nWe intentionally pass imageRegistry/imageNamespace as explicit arguments rather than reading\nfrom `.Values` inside this helper, because `helm-tool lint` does not reliably track `.Values.*`\nusage through tuple/variable indirection.\n*/ -}}\n{{- if ne (len .) 4 -}}\n\t{{- fail (printf \"ERROR: template \\\"disco-agent.image\\\" expects (tuple <imageValues> <imageRegistry> <imageNamespace> <defaultReference>), got %d arguments\" (len .)) -}}\n{{- end -}}\n{{- $image := index . 0 -}}\n{{- $imageRegistry := index . 1 | default \"\" -}}\n{{- $imageNamespace := index . 2 | default \"\" -}}\n{{- $defaultReference := index . 3 -}}\n{{- $repository := \"\" -}}\n{{- if $image.repository -}}\n\t{{- $repository = $image.repository -}}\n\t{{- /*\n\t\tBackwards compatibility: if image.registry is set, additionally prefix the repository with this registry.\n\t*/ -}}\n\t{{- if $image.registry -}}\n\t\t{{- $repository = printf \"%s/%s\" $image.registry $repository -}}\n\t{{- end -}}\n{{- else -}}\n\t{{- $name := required \"ERROR: image.name must be set when image.repository is empty\" $image.name -}}\n\t{{- $repository = $name -}}\n\t{{- if $imageNamespace -}}\n\t\t{{- $repository = printf \"%s/%s\" $imageNamespace $repository -}}\n\t{{- end -}}\n\t{{- if $imageRegistry -}}\n\t\t{{- $repository = printf \"%s/%s\" $imageRegistry $repository -}}\n\t{{- end -}}\n\t{{- /*\n\t\tBackwards compatibility: if image.registry is set, additionally prefix the repository with this registry.\n\t*/ -}}\n\t{{- if $image.registry -}}\n\t\t{{- $repository = printf \"%s/%s\" $image.registry $repository -}}\n\t{{- end -}}\n{{- end -}}\n{{- $repository -}}\n{{- if and $image.tag $image.digest -}}\n\t{{- printf \":%s@%s\" $image.tag $image.digest -}}\n{{- else if $image.tag -}}\n\t{{- printf \":%s\" $image.tag -}}\n{{- else if $image.digest -}}\n\t{{- printf \"@%s\" $image.digest -}}\n{{- else -}}\n\t{{- printf \"%s\" $defaultReference -}}\n{{- end -}}\n{{- end }}\n"
  },
  {
    "path": "deploy/charts/disco-agent/templates/configmap.yaml",
    "content": "apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: {{ include \"disco-agent.fullname\" . }}-config\n  namespace: {{ .Release.Namespace }}\n  labels:\n    {{- include \"disco-agent.labels\" . | nindent 4 }}\ndata:\n  config.yaml: |-\n    cluster_name: {{ .Values.config.clusterName | quote }}\n    cluster_description: {{ .Values.config.clusterDescription | quote }}\n    period: {{ .Values.config.period | quote }}\n    {{- with .Values.config.excludeAnnotationKeysRegex }}\n    exclude-annotation-keys-regex:\n      {{- . | toYaml | nindent 6 }}\n    {{- end }}\n    {{- with .Values.config.excludeLabelKeysRegex }}\n    exclude-label-keys-regex:\n      {{- . | toYaml | nindent 6 }}\n    {{- end }}\n    data-gatherers:\n    - kind: oidc\n      name: ark/oidc\n    - kind: k8s-discovery\n      name: ark/discovery\n    - kind: k8s-dynamic\n      name: ark/secrets\n      config:\n        resource-type:\n          version: v1\n          resource: secrets\n        field-selectors:\n        - type!=kubernetes.io/dockercfg\n        - type!=kubernetes.io/dockerconfigjson\n        - type!=bootstrap.kubernetes.io/token\n        - type!=helm.sh/release.v1\n    - kind: k8s-dynamic\n      name: ark/serviceaccounts\n      config:\n        resource-type:\n          resource: serviceaccounts\n          version: v1\n    - kind: k8s-dynamic\n      name: ark/roles\n      config:\n        resource-type:\n          version: v1\n          group: rbac.authorization.k8s.io\n          resource: roles\n    - kind: k8s-dynamic\n      name: ark/clusterroles\n      config:\n        resource-type:\n          version: v1\n          group: rbac.authorization.k8s.io\n          resource: clusterroles\n    - kind: k8s-dynamic\n      name: ark/rolebindings\n      config:\n        resource-type:\n          version: v1\n          group: rbac.authorization.k8s.io\n          resource: rolebindings\n    - kind: k8s-dynamic\n      name: ark/clusterrolebindings\n      config:\n        resource-type:\n          version: v1\n          group: rbac.authorization.k8s.io\n          resource: clusterrolebindings\n    - kind: k8s-dynamic\n      name: ark/jobs\n      config:\n        resource-type:\n          version: v1\n          group: batch\n          resource: jobs\n    - kind: k8s-dynamic\n      name: ark/cronjobs\n      config:\n        resource-type:\n          version: v1\n          group: batch\n          resource: cronjobs\n    - kind: k8s-dynamic\n      name: ark/deployments\n      config:\n        resource-type:\n          version: v1\n          group: apps\n          resource: deployments\n    - kind: k8s-dynamic\n      name: ark/statefulsets\n      config:\n        resource-type:\n          version: v1\n          group: apps\n          resource: statefulsets\n    - kind: k8s-dynamic\n      name: ark/daemonsets\n      config:\n        resource-type:\n          version: v1\n          group: apps\n          resource: daemonsets\n    - kind: k8s-dynamic\n      name: ark/pods\n      config:\n        resource-type:\n          version: v1\n          resource: pods\n    - kind: k8s-dynamic\n      name: ark/configmaps\n      config:\n        resource-type:\n          resource: configmaps\n          version: v1\n        label-selectors:\n        - conjur.org/name=conjur-connect-configmap\n    - kind: k8s-dynamic\n      name: ark/esoexternalsecrets\n      config:\n        resource-type:\n          group: external-secrets.io\n          version: v1\n          resource: externalsecrets\n    - kind: k8s-dynamic\n      name: ark/esosecretstores\n      config:\n        resource-type:\n          group: external-secrets.io\n          version: v1\n          resource: secretstores\n    - kind: k8s-dynamic\n      name: ark/esoclusterexternalsecrets\n      config:\n        resource-type:\n          group: external-secrets.io\n          version: v1\n          resource: clusterexternalsecrets\n    - kind: k8s-dynamic\n      name: ark/esoclustersecretstores\n      config:\n        resource-type:\n          group: external-secrets.io\n          version: v1\n          resource: clustersecretstores\n"
  },
  {
    "path": "deploy/charts/disco-agent/templates/deployment.yaml",
    "content": "{{- if not .Values.acceptTerms }}\n  {{- fail \"\\n\\n=================================================================\\n                Terms & Conditions Notice\\n=================================================================\\n\\nBefore installing this application, you must review and accept\\nthe terms and conditions available at:\\nhttps://www.cyberark.com/contract-terms/\\n\\nTo proceed with installation, you must indicate acceptance by\\nsetting:\\n\\n  - In your values file: acceptTerms: true\\n  or\\n  - Via the Helm flag: --set acceptTerms=true\\n\\nBy continuing with the next command, you confirm that you have\\nreviewed and accepted these terms and conditions.\\n\\n=================================================================\\n\" }}\n{{- end }}\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: {{ include \"disco-agent.fullname\" . }}\n  labels:\n    {{- include \"disco-agent.labels\" . | nindent 4 }}\nspec:\n  replicas: {{ .Values.replicaCount }}\n  selector:\n    matchLabels:\n      {{- include \"disco-agent.selectorLabels\" . | nindent 6 }}\n  template:\n    metadata:\n      {{- with .Values.podAnnotations }}\n      annotations:\n        {{- toYaml . | nindent 8 }}\n      {{- end }}\n      labels:\n        {{- include \"disco-agent.labels\" . | nindent 8 }}\n        {{- with .Values.podLabels }}\n        {{- toYaml . | nindent 8 }}\n        {{- end }}\n    spec:\n      {{- with .Values.imagePullSecrets }}\n      imagePullSecrets:\n        {{- toYaml . | nindent 8 }}\n      {{- end }}\n      serviceAccountName: {{ include \"disco-agent.serviceAccountName\" . }}\n      {{- with .Values.podSecurityContext }}\n      securityContext:\n        {{- toYaml . | nindent 8 }}\n      {{- end }}\n      containers:\n        - name: agent\n          {{- with .Values.securityContext }}\n          securityContext:\n            {{- toYaml . | nindent 12 }}\n          {{- end }}\n          image: \"{{ template \"disco-agent.image\" (tuple .Values.image .Values.imageRegistry .Values.imageNamespace (printf \":%s\" .Chart.AppVersion)) }}\"\n          imagePullPolicy: {{ .Values.image.pullPolicy }}\n          env:\n          - name: POD_NAMESPACE\n            valueFrom:\n              fieldRef:\n                fieldPath: metadata.namespace\n          - name: POD_NAME\n            valueFrom:\n              fieldRef:\n                fieldPath: metadata.name\n          - name: POD_UID\n            valueFrom:\n              fieldRef:\n                fieldPath: metadata.uid\n          - name: POD_NODE\n            valueFrom:\n              fieldRef:\n                fieldPath: spec.nodeName\n          - name: ARK_USERNAME\n            valueFrom:\n              secretKeyRef:\n                name: {{ .Values.authentication.secretName }}\n                key: ARK_USERNAME\n          - name: ARK_SECRET\n            valueFrom:\n              secretKeyRef:\n                name: {{ .Values.authentication.secretName }}\n                key: ARK_SECRET\n          - name: ARK_SUBDOMAIN\n            valueFrom:\n              secretKeyRef:\n                name: {{ .Values.authentication.secretName }}\n                key: ARK_SUBDOMAIN\n          - name: ARK_DISCOVERY_API\n            valueFrom:\n              secretKeyRef:\n                name: {{ .Values.authentication.secretName }}\n                key: ARK_DISCOVERY_API\n                optional: true\n          - name: ARK_SEND_SECRET_VALUES\n            value: {{ .Values.config.sendSecretValues | default \"false\" | quote }}\n          {{- with .Values.http_proxy }}\n          - name: HTTP_PROXY\n            value: {{ . }}\n          {{- end }}\n          {{- with .Values.https_proxy }}\n          - name: HTTPS_PROXY\n            value: {{ . }}\n          {{- end }}\n          {{- with .Values.no_proxy }}\n          - name: NO_PROXY\n            value: {{ . }}\n          {{- end }}\n          args:\n            - \"agent\"\n            - \"-c\"\n            - \"/etc/disco-agent/config.yaml\"\n            - --machine-hub\n            - --logging-format=json\n            {{- if .Values.metrics.enabled }}\n            - --enable-metrics\n            {{- end }}\n            {{- if .Values.pprof.enabled }}\n            - --enable-pprof\n            {{- end }}\n            {{- range .Values.extraArgs }}\n            - {{ . | quote }}\n            {{- end }}\n          {{- with .Values.resources }}\n          resources:\n            {{- toYaml . | nindent 12 }}\n          {{- end }}\n          volumeMounts:\n            - name: config\n              mountPath: \"/etc/disco-agent\"\n              readOnly: true\n            {{- with .Values.volumeMounts }}\n            {{- toYaml . | nindent 12 }}\n            {{- end }}\n          ports:\n            - name: agent-api\n              containerPort: 8081\n      volumes:\n        - name: config\n          configMap:\n            name: {{ include \"disco-agent.fullname\" . }}-config\n            optional: false\n        {{- with .Values.volumes }}\n        {{- toYaml . | nindent 8 }}\n        {{- end }}\n      {{- with .Values.nodeSelector }}\n      nodeSelector:\n        {{- toYaml . | nindent 8 }}\n      {{- end }}\n      {{- with .Values.affinity }}\n      affinity:\n        {{- toYaml . | nindent 8 }}\n      {{- end }}\n      {{- with .Values.tolerations }}\n      tolerations:\n        {{- toYaml . | nindent 8 }}\n      {{- end }}\n"
  },
  {
    "path": "deploy/charts/disco-agent/templates/poddisruptionbudget.yaml",
    "content": "{{- if .Values.podDisruptionBudget.enabled }}\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: {{ include \"disco-agent.fullname\" . }}\n  namespace: {{ .Release.Namespace }}\n  labels:\n    {{- include \"disco-agent.labels\" . | nindent 4 }}\nspec:\n  selector:\n    matchLabels:\n      {{- include \"disco-agent.selectorLabels\" . | nindent 6 }}\n\n  {{- if not (or (hasKey .Values.podDisruptionBudget \"minAvailable\") (hasKey .Values.podDisruptionBudget \"maxUnavailable\")) }}\n  minAvailable: 1 # Default value because minAvailable and maxUnavailable are not set\n  {{- end }}\n  {{- if hasKey .Values.podDisruptionBudget \"minAvailable\" }}\n  minAvailable: {{ .Values.podDisruptionBudget.minAvailable }}\n  {{- end }}\n  {{- if hasKey .Values.podDisruptionBudget \"maxUnavailable\" }}\n  maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }}\n  {{- end }}\n{{- end }}\n"
  },
  {
    "path": "deploy/charts/disco-agent/templates/podmonitor.yaml",
    "content": "{{- if and .Values.metrics.enabled .Values.metrics.podmonitor.enabled }}\napiVersion: monitoring.coreos.com/v1\nkind: PodMonitor\nmetadata:\n  name: {{ include \"disco-agent.fullname\" . }}\n{{- if .Values.metrics.podmonitor.namespace }}\n  namespace: {{ .Values.metrics.podmonitor.namespace }}\n{{- else }}\n  namespace: {{ .Release.Namespace | quote }}\n{{- end }}\n  labels:\n    {{- include \"disco-agent.labels\" . | nindent 4 }}\n    prometheus: {{ .Values.metrics.podmonitor.prometheusInstance }}\n    {{- with .Values.metrics.podmonitor.labels }}\n    {{- toYaml . | nindent 4 }}\n    {{- end }}\n{{- with .Values.metrics.podmonitor.annotations }}\n  annotations:\n    {{- toYaml . | nindent 4 }}\n{{- end }}\nspec:\n  jobLabel: {{ include \"disco-agent.fullname\" . }}\n  selector:\n    matchLabels:\n      {{- include \"disco-agent.selectorLabels\" . | nindent 6 }}\n{{- if .Values.metrics.podmonitor.namespace }}\n  namespaceSelector:\n    matchNames:\n      - {{ .Release.Namespace | quote }}\n{{- end }}\n  podMetricsEndpoints:\n    - port: agent-api\n      path: /metrics\n      interval: {{ .Values.metrics.podmonitor.interval }}\n      scrapeTimeout: {{ .Values.metrics.podmonitor.scrapeTimeout }}\n      honorLabels: {{ .Values.metrics.podmonitor.honorLabels }}\n      {{- with .Values.metrics.podmonitor.endpointAdditionalProperties }}\n      {{- toYaml . | nindent 4 }}\n      {{- end }}\n{{- end }}\n"
  },
  {
    "path": "deploy/charts/disco-agent/templates/rbac.yaml",
    "content": "---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ include \"disco-agent.fullname\" . }}-event-emitted\n  labels:\n    {{- include \"disco-agent.labels\" . | nindent 4 }}\nrules:\n  - apiGroups: [\"\"]\n    resources: [\"events\"]\n    verbs: [\"create\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ include \"disco-agent.fullname\" . }}-event-emitted\n  labels:\n    {{- include \"disco-agent.labels\" . | nindent 4 }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ include \"disco-agent.fullname\" . }}-event-emitted\nsubjects:\n  - kind: ServiceAccount\n    name: {{ include \"disco-agent.serviceAccountName\" . }}\n    namespace: {{ .Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ include \"disco-agent.fullname\" . }}-cluster-viewer\n  labels:\n    {{- include \"disco-agent.labels\" . | nindent 4 }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: view\nsubjects:\n  - kind: ServiceAccount\n    name: {{ include \"disco-agent.serviceAccountName\" . }}\n    namespace: {{ .Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: {{ include \"disco-agent.fullname\" . }}-secret-reader\n  labels:\n    {{- include \"disco-agent.labels\" . | nindent 4 }}\nrules:\n  - apiGroups: [\"\"]\n    resources: [\"secrets\"]\n    verbs: [\"get\", \"list\", \"watch\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ include \"disco-agent.fullname\" . }}-secret-reader\n  labels:\n    {{- include \"disco-agent.labels\" . | nindent 4 }}\nroleRef:\n  kind: ClusterRole\n  name: {{ include \"disco-agent.fullname\" . }}-secret-reader\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - kind: ServiceAccount\n    name: {{ include \"disco-agent.serviceAccountName\" . }}\n    namespace: {{ .Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: {{ include \"disco-agent.fullname\" . }}-rbac-reader\n  labels:\n    {{- include \"disco-agent.labels\" . | nindent 4 }}\nrules:\n  - apiGroups: [\"rbac.authorization.k8s.io\"]\n    resources:\n    - roles\n    - clusterroles\n    - rolebindings\n    - clusterrolebindings\n    verbs: [\"get\", \"list\", \"watch\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ include \"disco-agent.fullname\" . }}-rbac-reader\n  labels:\n    {{- include \"disco-agent.labels\" . | nindent 4 }}\nroleRef:\n  kind: ClusterRole\n  name: {{ include \"disco-agent.fullname\" . }}-rbac-reader\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - kind: ServiceAccount\n    name: {{ include \"disco-agent.serviceAccountName\" . }}\n    namespace: {{ .Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ include \"disco-agent.fullname\" . }}-oidc-discovery\n  labels:\n    {{- include \"disco-agent.labels\" . | nindent 4 }}\nroleRef:\n  kind: ClusterRole\n  name: system:service-account-issuer-discovery\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - kind: ServiceAccount\n    name: {{ include \"disco-agent.serviceAccountName\" . }}\n    namespace: {{ .Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: {{ include \"disco-agent.fullname\" . }}-eso-reader\n  labels:\n    {{- include \"disco-agent.labels\" . | nindent 4 }}\nrules:\n  - apiGroups: [\"external-secrets.io\"]\n    resources:\n    - externalsecrets\n    - clusterexternalsecrets\n    - secretstores\n    - clustersecretstores\n    verbs: [\"get\", \"list\", \"watch\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ include \"disco-agent.fullname\" . }}-eso-reader\n  labels:\n    {{- include \"disco-agent.labels\" . | nindent 4 }}\nroleRef:\n  kind: ClusterRole\n  name: {{ include \"disco-agent.fullname\" . }}-eso-reader\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - kind: ServiceAccount\n    name: {{ include \"disco-agent.serviceAccountName\" . }}\n    namespace: {{ .Release.Namespace }}\n"
  },
  {
    "path": "deploy/charts/disco-agent/templates/serviceaccount.yaml",
    "content": "{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: {{ include \"disco-agent.serviceAccountName\" . }}\n  labels:\n    {{- include \"disco-agent.labels\" . | nindent 4 }}\n  {{- with .Values.serviceAccount.annotations }}\n  annotations:\n    {{- toYaml . | nindent 4 }}\n  {{- end }}\nautomountServiceAccountToken: {{ .Values.serviceAccount.automount }}\n{{- end }}\n"
  },
  {
    "path": "deploy/charts/disco-agent/tests/README.md",
    "content": "# `helm unittest`\n\nWe use `helm unittest` to test the YAML output coming out of the Helm chart.\n\nIn order to update the snapshots, run the following command:\n\n```bash\nmake test-helm-snapshot\n```\n"
  },
  {
    "path": "deploy/charts/disco-agent/tests/__snapshot__/configmap_test.yaml.snap",
    "content": "custom-cluster-description:\n  1: |\n    apiVersion: v1\n    data:\n      config.yaml: |-\n        cluster_name: \"\"\n        cluster_description: \"A cloud hosted Kubernetes cluster hosting production workloads.\\n\\nteam: team-1\\nemail: team-1@example.com\\npurpose: Production workloads\\n\"\n        period: \"12h0m0s\"\n        data-gatherers:\n        - kind: oidc\n          name: ark/oidc\n        - kind: k8s-discovery\n          name: ark/discovery\n        - kind: k8s-dynamic\n          name: ark/secrets\n          config:\n            resource-type:\n              version: v1\n              resource: secrets\n            field-selectors:\n            - type!=kubernetes.io/dockercfg\n            - type!=kubernetes.io/dockerconfigjson\n            - type!=bootstrap.kubernetes.io/token\n            - type!=helm.sh/release.v1\n        - kind: k8s-dynamic\n          name: ark/serviceaccounts\n          config:\n            resource-type:\n              resource: serviceaccounts\n              version: v1\n        - kind: k8s-dynamic\n          name: ark/roles\n          config:\n            resource-type:\n              version: v1\n              group: rbac.authorization.k8s.io\n              resource: roles\n        - kind: k8s-dynamic\n          name: ark/clusterroles\n          config:\n            resource-type:\n              version: v1\n              group: rbac.authorization.k8s.io\n              resource: clusterroles\n        - kind: k8s-dynamic\n          name: ark/rolebindings\n          config:\n            resource-type:\n              version: v1\n              group: rbac.authorization.k8s.io\n              resource: rolebindings\n        - kind: k8s-dynamic\n          name: ark/clusterrolebindings\n          config:\n            resource-type:\n              version: v1\n              group: rbac.authorization.k8s.io\n              resource: clusterrolebindings\n        - kind: k8s-dynamic\n          name: ark/jobs\n          config:\n            resource-type:\n              version: v1\n              group: batch\n              resource: jobs\n        - kind: k8s-dynamic\n          name: ark/cronjobs\n          config:\n            resource-type:\n              version: v1\n              group: batch\n              resource: cronjobs\n        - kind: k8s-dynamic\n          name: ark/deployments\n          config:\n            resource-type:\n              version: v1\n              group: apps\n              resource: deployments\n        - kind: k8s-dynamic\n          name: ark/statefulsets\n          config:\n            resource-type:\n              version: v1\n              group: apps\n              resource: statefulsets\n        - kind: k8s-dynamic\n          name: ark/daemonsets\n          config:\n            resource-type:\n              version: v1\n              group: apps\n              resource: daemonsets\n        - kind: k8s-dynamic\n          name: ark/pods\n          config:\n            resource-type:\n              version: v1\n              resource: pods\n        - kind: k8s-dynamic\n          name: ark/configmaps\n          config:\n            resource-type:\n              resource: configmaps\n              version: v1\n            label-selectors:\n            - conjur.org/name=conjur-connect-configmap\n        - kind: k8s-dynamic\n          name: ark/esoexternalsecrets\n          config:\n            resource-type:\n              group: external-secrets.io\n              version: v1\n              resource: externalsecrets\n        - kind: k8s-dynamic\n          name: ark/esosecretstores\n          config:\n            resource-type:\n              group: external-secrets.io\n              version: v1\n              resource: secretstores\n        - kind: k8s-dynamic\n          name: ark/esoclusterexternalsecrets\n          config:\n            resource-type:\n              group: external-secrets.io\n              version: v1\n              resource: clusterexternalsecrets\n        - kind: k8s-dynamic\n          name: ark/esoclustersecretstores\n          config:\n            resource-type:\n              group: external-secrets.io\n              version: v1\n              resource: clustersecretstores\n    kind: ConfigMap\n    metadata:\n      labels:\n        app.kubernetes.io/instance: test\n        app.kubernetes.io/managed-by: Helm\n        app.kubernetes.io/name: disco-agent\n        app.kubernetes.io/version: v0.0.0\n        helm.sh/chart: disco-agent-0.0.0\n      name: test-disco-agent-config\n      namespace: test-ns\ncustom-cluster-name:\n  1: |\n    apiVersion: v1\n    data:\n      config.yaml: |-\n        cluster_name: \"cluster-1 region-1 cloud-1 \"\n        cluster_description: \"\"\n        period: \"12h0m0s\"\n        data-gatherers:\n        - kind: oidc\n          name: ark/oidc\n        - kind: k8s-discovery\n          name: ark/discovery\n        - kind: k8s-dynamic\n          name: ark/secrets\n          config:\n            resource-type:\n              version: v1\n              resource: secrets\n            field-selectors:\n            - type!=kubernetes.io/dockercfg\n            - type!=kubernetes.io/dockerconfigjson\n            - type!=bootstrap.kubernetes.io/token\n            - type!=helm.sh/release.v1\n        - kind: k8s-dynamic\n          name: ark/serviceaccounts\n          config:\n            resource-type:\n              resource: serviceaccounts\n              version: v1\n        - kind: k8s-dynamic\n          name: ark/roles\n          config:\n            resource-type:\n              version: v1\n              group: rbac.authorization.k8s.io\n              resource: roles\n        - kind: k8s-dynamic\n          name: ark/clusterroles\n          config:\n            resource-type:\n              version: v1\n              group: rbac.authorization.k8s.io\n              resource: clusterroles\n        - kind: k8s-dynamic\n          name: ark/rolebindings\n          config:\n            resource-type:\n              version: v1\n              group: rbac.authorization.k8s.io\n              resource: rolebindings\n        - kind: k8s-dynamic\n          name: ark/clusterrolebindings\n          config:\n            resource-type:\n              version: v1\n              group: rbac.authorization.k8s.io\n              resource: clusterrolebindings\n        - kind: k8s-dynamic\n          name: ark/jobs\n          config:\n            resource-type:\n              version: v1\n              group: batch\n              resource: jobs\n        - kind: k8s-dynamic\n          name: ark/cronjobs\n          config:\n            resource-type:\n              version: v1\n              group: batch\n              resource: cronjobs\n        - kind: k8s-dynamic\n          name: ark/deployments\n          config:\n            resource-type:\n              version: v1\n              group: apps\n              resource: deployments\n        - kind: k8s-dynamic\n          name: ark/statefulsets\n          config:\n            resource-type:\n              version: v1\n              group: apps\n              resource: statefulsets\n        - kind: k8s-dynamic\n          name: ark/daemonsets\n          config:\n            resource-type:\n              version: v1\n              group: apps\n              resource: daemonsets\n        - kind: k8s-dynamic\n          name: ark/pods\n          config:\n            resource-type:\n              version: v1\n              resource: pods\n        - kind: k8s-dynamic\n          name: ark/configmaps\n          config:\n            resource-type:\n              resource: configmaps\n              version: v1\n            label-selectors:\n            - conjur.org/name=conjur-connect-configmap\n        - kind: k8s-dynamic\n          name: ark/esoexternalsecrets\n          config:\n            resource-type:\n              group: external-secrets.io\n              version: v1\n              resource: externalsecrets\n        - kind: k8s-dynamic\n          name: ark/esosecretstores\n          config:\n            resource-type:\n              group: external-secrets.io\n              version: v1\n              resource: secretstores\n        - kind: k8s-dynamic\n          name: ark/esoclusterexternalsecrets\n          config:\n            resource-type:\n              group: external-secrets.io\n              version: v1\n              resource: clusterexternalsecrets\n        - kind: k8s-dynamic\n          name: ark/esoclustersecretstores\n          config:\n            resource-type:\n              group: external-secrets.io\n              version: v1\n              resource: clustersecretstores\n    kind: ConfigMap\n    metadata:\n      labels:\n        app.kubernetes.io/instance: test\n        app.kubernetes.io/managed-by: Helm\n        app.kubernetes.io/name: disco-agent\n        app.kubernetes.io/version: v0.0.0\n        helm.sh/chart: disco-agent-0.0.0\n      name: test-disco-agent-config\n      namespace: test-ns\ncustom-period:\n  1: |\n    apiVersion: v1\n    data:\n      config.yaml: |-\n        cluster_name: \"\"\n        cluster_description: \"\"\n        period: \"1m\"\n        data-gatherers:\n        - kind: oidc\n          name: ark/oidc\n        - kind: k8s-discovery\n          name: ark/discovery\n        - kind: k8s-dynamic\n          name: ark/secrets\n          config:\n            resource-type:\n              version: v1\n              resource: secrets\n            field-selectors:\n            - type!=kubernetes.io/dockercfg\n            - type!=kubernetes.io/dockerconfigjson\n            - type!=bootstrap.kubernetes.io/token\n            - type!=helm.sh/release.v1\n        - kind: k8s-dynamic\n          name: ark/serviceaccounts\n          config:\n            resource-type:\n              resource: serviceaccounts\n              version: v1\n        - kind: k8s-dynamic\n          name: ark/roles\n          config:\n            resource-type:\n              version: v1\n              group: rbac.authorization.k8s.io\n              resource: roles\n        - kind: k8s-dynamic\n          name: ark/clusterroles\n          config:\n            resource-type:\n              version: v1\n              group: rbac.authorization.k8s.io\n              resource: clusterroles\n        - kind: k8s-dynamic\n          name: ark/rolebindings\n          config:\n            resource-type:\n              version: v1\n              group: rbac.authorization.k8s.io\n              resource: rolebindings\n        - kind: k8s-dynamic\n          name: ark/clusterrolebindings\n          config:\n            resource-type:\n              version: v1\n              group: rbac.authorization.k8s.io\n              resource: clusterrolebindings\n        - kind: k8s-dynamic\n          name: ark/jobs\n          config:\n            resource-type:\n              version: v1\n              group: batch\n              resource: jobs\n        - kind: k8s-dynamic\n          name: ark/cronjobs\n          config:\n            resource-type:\n              version: v1\n              group: batch\n              resource: cronjobs\n        - kind: k8s-dynamic\n          name: ark/deployments\n          config:\n            resource-type:\n              version: v1\n              group: apps\n              resource: deployments\n        - kind: k8s-dynamic\n          name: ark/statefulsets\n          config:\n            resource-type:\n              version: v1\n              group: apps\n              resource: statefulsets\n        - kind: k8s-dynamic\n          name: ark/daemonsets\n          config:\n            resource-type:\n              version: v1\n              group: apps\n              resource: daemonsets\n        - kind: k8s-dynamic\n          name: ark/pods\n          config:\n            resource-type:\n              version: v1\n              resource: pods\n        - kind: k8s-dynamic\n          name: ark/configmaps\n          config:\n            resource-type:\n              resource: configmaps\n              version: v1\n            label-selectors:\n            - conjur.org/name=conjur-connect-configmap\n        - kind: k8s-dynamic\n          name: ark/esoexternalsecrets\n          config:\n            resource-type:\n              group: external-secrets.io\n              version: v1\n              resource: externalsecrets\n        - kind: k8s-dynamic\n          name: ark/esosecretstores\n          config:\n            resource-type:\n              group: external-secrets.io\n              version: v1\n              resource: secretstores\n        - kind: k8s-dynamic\n          name: ark/esoclusterexternalsecrets\n          config:\n            resource-type:\n              group: external-secrets.io\n              version: v1\n              resource: clusterexternalsecrets\n        - kind: k8s-dynamic\n          name: ark/esoclustersecretstores\n          config:\n            resource-type:\n              group: external-secrets.io\n              version: v1\n              resource: clustersecretstores\n    kind: ConfigMap\n    metadata:\n      labels:\n        app.kubernetes.io/instance: test\n        app.kubernetes.io/managed-by: Helm\n        app.kubernetes.io/name: disco-agent\n        app.kubernetes.io/version: v0.0.0\n        helm.sh/chart: disco-agent-0.0.0\n      name: test-disco-agent-config\n      namespace: test-ns\ndefaults:\n  1: |\n    apiVersion: v1\n    data:\n      config.yaml: |-\n        cluster_name: \"\"\n        cluster_description: \"\"\n        period: \"12h0m0s\"\n        data-gatherers:\n        - kind: oidc\n          name: ark/oidc\n        - kind: k8s-discovery\n          name: ark/discovery\n        - kind: k8s-dynamic\n          name: ark/secrets\n          config:\n            resource-type:\n              version: v1\n              resource: secrets\n            field-selectors:\n            - type!=kubernetes.io/dockercfg\n            - type!=kubernetes.io/dockerconfigjson\n            - type!=bootstrap.kubernetes.io/token\n            - type!=helm.sh/release.v1\n        - kind: k8s-dynamic\n          name: ark/serviceaccounts\n          config:\n            resource-type:\n              resource: serviceaccounts\n              version: v1\n        - kind: k8s-dynamic\n          name: ark/roles\n          config:\n            resource-type:\n              version: v1\n              group: rbac.authorization.k8s.io\n              resource: roles\n        - kind: k8s-dynamic\n          name: ark/clusterroles\n          config:\n            resource-type:\n              version: v1\n              group: rbac.authorization.k8s.io\n              resource: clusterroles\n        - kind: k8s-dynamic\n          name: ark/rolebindings\n          config:\n            resource-type:\n              version: v1\n              group: rbac.authorization.k8s.io\n              resource: rolebindings\n        - kind: k8s-dynamic\n          name: ark/clusterrolebindings\n          config:\n            resource-type:\n              version: v1\n              group: rbac.authorization.k8s.io\n              resource: clusterrolebindings\n        - kind: k8s-dynamic\n          name: ark/jobs\n          config:\n            resource-type:\n              version: v1\n              group: batch\n              resource: jobs\n        - kind: k8s-dynamic\n          name: ark/cronjobs\n          config:\n            resource-type:\n              version: v1\n              group: batch\n              resource: cronjobs\n        - kind: k8s-dynamic\n          name: ark/deployments\n          config:\n            resource-type:\n              version: v1\n              group: apps\n              resource: deployments\n        - kind: k8s-dynamic\n          name: ark/statefulsets\n          config:\n            resource-type:\n              version: v1\n              group: apps\n              resource: statefulsets\n        - kind: k8s-dynamic\n          name: ark/daemonsets\n          config:\n            resource-type:\n              version: v1\n              group: apps\n              resource: daemonsets\n        - kind: k8s-dynamic\n          name: ark/pods\n          config:\n            resource-type:\n              version: v1\n              resource: pods\n        - kind: k8s-dynamic\n          name: ark/configmaps\n          config:\n            resource-type:\n              resource: configmaps\n              version: v1\n            label-selectors:\n            - conjur.org/name=conjur-connect-configmap\n        - kind: k8s-dynamic\n          name: ark/esoexternalsecrets\n          config:\n            resource-type:\n              group: external-secrets.io\n              version: v1\n              resource: externalsecrets\n        - kind: k8s-dynamic\n          name: ark/esosecretstores\n          config:\n            resource-type:\n              group: external-secrets.io\n              version: v1\n              resource: secretstores\n        - kind: k8s-dynamic\n          name: ark/esoclusterexternalsecrets\n          config:\n            resource-type:\n              group: external-secrets.io\n              version: v1\n              resource: clusterexternalsecrets\n        - kind: k8s-dynamic\n          name: ark/esoclustersecretstores\n          config:\n            resource-type:\n              group: external-secrets.io\n              version: v1\n              resource: clustersecretstores\n    kind: ConfigMap\n    metadata:\n      labels:\n        app.kubernetes.io/instance: test\n        app.kubernetes.io/managed-by: Helm\n        app.kubernetes.io/name: disco-agent\n        app.kubernetes.io/version: v0.0.0\n        helm.sh/chart: disco-agent-0.0.0\n      name: test-disco-agent-config\n      namespace: test-ns\n"
  },
  {
    "path": "deploy/charts/disco-agent/tests/configmap_test.yaml",
    "content": "suite: test the contents of the config.yaml\ntemplates:\n  - configmap.yaml\nrelease:\n  name: test\n  namespace: test-ns\ntests:\n  - it: defaults\n    asserts:\n      - matchSnapshot: {}\n\n  - it: custom-period\n    set:\n      config.period: 1m\n    asserts:\n      - matchSnapshot: {}\n\n  - it: custom-cluster-name\n    set:\n      config.clusterName: \"cluster-1 region-1 cloud-1 \"\n    asserts:\n      - matchSnapshot: {}\n\n  - it: custom-cluster-description\n    set:\n      config.clusterDescription: |\n        A cloud hosted Kubernetes cluster hosting production workloads.\n\n        team: team-1\n        email: team-1@example.com\n        purpose: Production workloads\n    asserts:\n      - matchSnapshot: {}\n"
  },
  {
    "path": "deploy/charts/disco-agent/values.linter.exceptions",
    "content": ""
  },
  {
    "path": "deploy/charts/disco-agent/values.schema.json",
    "content": "{\n  \"$defs\": {\n    \"helm-values\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"acceptTerms\": {\n          \"$ref\": \"#/$defs/helm-values.acceptTerms\"\n        },\n        \"affinity\": {\n          \"$ref\": \"#/$defs/helm-values.affinity\"\n        },\n        \"authentication\": {\n          \"$ref\": \"#/$defs/helm-values.authentication\"\n        },\n        \"config\": {\n          \"$ref\": \"#/$defs/helm-values.config\"\n        },\n        \"extraArgs\": {\n          \"$ref\": \"#/$defs/helm-values.extraArgs\"\n        },\n        \"fullnameOverride\": {\n          \"$ref\": \"#/$defs/helm-values.fullnameOverride\"\n        },\n        \"global\": {\n          \"$ref\": \"#/$defs/helm-values.global\"\n        },\n        \"http_proxy\": {\n          \"$ref\": \"#/$defs/helm-values.http_proxy\"\n        },\n        \"https_proxy\": {\n          \"$ref\": \"#/$defs/helm-values.https_proxy\"\n        },\n        \"image\": {\n          \"$ref\": \"#/$defs/helm-values.image\"\n        },\n        \"imageNamespace\": {\n          \"$ref\": \"#/$defs/helm-values.imageNamespace\"\n        },\n        \"imagePullSecrets\": {\n          \"$ref\": \"#/$defs/helm-values.imagePullSecrets\"\n        },\n        \"imageRegistry\": {\n          \"$ref\": \"#/$defs/helm-values.imageRegistry\"\n        },\n        \"metrics\": {\n          \"$ref\": \"#/$defs/helm-values.metrics\"\n        },\n        \"nameOverride\": {\n          \"$ref\": \"#/$defs/helm-values.nameOverride\"\n        },\n        \"no_proxy\": {\n          \"$ref\": \"#/$defs/helm-values.no_proxy\"\n        },\n        \"nodeSelector\": {\n          \"$ref\": \"#/$defs/helm-values.nodeSelector\"\n        },\n        \"podAnnotations\": {\n          \"$ref\": \"#/$defs/helm-values.podAnnotations\"\n        },\n        \"podDisruptionBudget\": {\n          \"$ref\": \"#/$defs/helm-values.podDisruptionBudget\"\n        },\n        \"podLabels\": {\n          \"$ref\": \"#/$defs/helm-values.podLabels\"\n        },\n        \"podSecurityContext\": {\n          \"$ref\": \"#/$defs/helm-values.podSecurityContext\"\n        },\n        \"pprof\": {\n          \"$ref\": \"#/$defs/helm-values.pprof\"\n        },\n        \"replicaCount\": {\n          \"$ref\": \"#/$defs/helm-values.replicaCount\"\n        },\n        \"resources\": {\n          \"$ref\": \"#/$defs/helm-values.resources\"\n        },\n        \"securityContext\": {\n          \"$ref\": \"#/$defs/helm-values.securityContext\"\n        },\n        \"serviceAccount\": {\n          \"$ref\": \"#/$defs/helm-values.serviceAccount\"\n        },\n        \"tolerations\": {\n          \"$ref\": \"#/$defs/helm-values.tolerations\"\n        },\n        \"volumeMounts\": {\n          \"$ref\": \"#/$defs/helm-values.volumeMounts\"\n        },\n        \"volumes\": {\n          \"$ref\": \"#/$defs/helm-values.volumes\"\n        }\n      },\n      \"type\": \"object\"\n    },\n    \"helm-values.acceptTerms\": {\n      \"default\": false,\n      \"description\": \"Must be set to indicate that you have read and accepted the CyberArk Terms of Service. If false, the helm chart will fail to install and will print a message with instructions on how to accept the TOS.\",\n      \"type\": \"boolean\"\n    },\n    \"helm-values.affinity\": {\n      \"default\": {},\n      \"type\": \"object\"\n    },\n    \"helm-values.authentication\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"secretName\": {\n          \"$ref\": \"#/$defs/helm-values.authentication.secretName\"\n        }\n      },\n      \"type\": \"object\"\n    },\n    \"helm-values.authentication.secretName\": {\n      \"default\": \"agent-credentials\",\n      \"type\": \"string\"\n    },\n    \"helm-values.config\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"clusterDescription\": {\n          \"$ref\": \"#/$defs/helm-values.config.clusterDescription\"\n        },\n        \"clusterName\": {\n          \"$ref\": \"#/$defs/helm-values.config.clusterName\"\n        },\n        \"excludeAnnotationKeysRegex\": {\n          \"$ref\": \"#/$defs/helm-values.config.excludeAnnotationKeysRegex\"\n        },\n        \"excludeLabelKeysRegex\": {\n          \"$ref\": \"#/$defs/helm-values.config.excludeLabelKeysRegex\"\n        },\n        \"period\": {\n          \"$ref\": \"#/$defs/helm-values.config.period\"\n        },\n        \"sendSecretValues\": {\n          \"$ref\": \"#/$defs/helm-values.config.sendSecretValues\"\n        }\n      },\n      \"type\": \"object\"\n    },\n    \"helm-values.config.clusterDescription\": {\n      \"default\": \"\",\n      \"description\": \"A short description of the cluster where the agent is deployed (optional).\\n\\nThis description will be associated with the data that the agent uploads to the Discovery and Context service. The description may include contact information such as the email address of the cluster administrator, so that any problems and risks identified by the Discovery and Context service can be communicated to the people responsible for the affected secrets.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.config.clusterName\": {\n      \"default\": \"\",\n      \"description\": \"A human readable name for the cluster where the agent is deployed (optional).\\n\\nThis cluster name will be associated with the data that the agent uploads to the Discovery and Context service. If empty (the default), the service account name will be used instead.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.config.excludeAnnotationKeysRegex\": {\n      \"default\": [],\n      \"description\": \"You can configure the agent to exclude some annotations or labels from being pushed . All Kubernetes objects are affected. The objects are still pushed, but the specified annotations and labels are removed before being pushed.\\n\\nDots is the only character that needs to be escaped in the regex. Use either double quotes with escaped single quotes or unquoted strings for the regex to avoid YAML parsing issues with `\\\\.`.\\n\\nExample: excludeAnnotationKeysRegex: ['^kapp\\\\.k14s\\\\.io/original.*']\",\n      \"items\": {},\n      \"type\": \"array\"\n    },\n    \"helm-values.config.excludeLabelKeysRegex\": {\n      \"default\": [],\n      \"items\": {},\n      \"type\": \"array\"\n    },\n    \"helm-values.config.period\": {\n      \"default\": \"12h0m0s\",\n      \"description\": \"Push data every 12 hours unless changed.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.config.sendSecretValues\": {\n      \"default\": true,\n      \"description\": \"Enable sending of Secret values to CyberArk in addition to metadata. Metadata is always sent, but the actual values of Secrets are not sent by default. When enabled, Secret data is encrypted using envelope encryption using a key managed by CyberArk, fetched from the Discovery and Context service.\",\n      \"type\": \"boolean\"\n    },\n    \"helm-values.extraArgs\": {\n      \"default\": [],\n      \"description\": \"extraArgs:\\n- --logging-format=json\\n- --log-level=6 # To enable HTTP request logging\",\n      \"items\": {},\n      \"type\": \"array\"\n    },\n    \"helm-values.fullnameOverride\": {\n      \"default\": \"\",\n      \"type\": \"string\"\n    },\n    \"helm-values.global\": {\n      \"description\": \"Global values shared across all (sub)charts\"\n    },\n    \"helm-values.http_proxy\": {\n      \"description\": \"Configures the HTTP_PROXY environment variable where a HTTP proxy is required.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.https_proxy\": {\n      \"description\": \"Configures the HTTPS_PROXY environment variable where a HTTP proxy is required.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.image\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"digest\": {\n          \"$ref\": \"#/$defs/helm-values.image.digest\"\n        },\n        \"name\": {\n          \"$ref\": \"#/$defs/helm-values.image.name\"\n        },\n        \"pullPolicy\": {\n          \"$ref\": \"#/$defs/helm-values.image.pullPolicy\"\n        },\n        \"registry\": {\n          \"$ref\": \"#/$defs/helm-values.image.registry\"\n        },\n        \"repository\": {\n          \"$ref\": \"#/$defs/helm-values.image.repository\"\n        },\n        \"tag\": {\n          \"$ref\": \"#/$defs/helm-values.image.tag\"\n        }\n      },\n      \"type\": \"object\"\n    },\n    \"helm-values.image.digest\": {\n      \"default\": \"\",\n      \"description\": \"Override the image digest to deploy by setting this variable. If set together with `image.tag`, the rendered image will include both tag and digest.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.image.name\": {\n      \"default\": \"disco-agent\",\n      \"description\": \"The image name for the Discovery Agent.\\nThis is used (together with `imageRegistry` and `imageNamespace`) to construct the full image reference.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.image.pullPolicy\": {\n      \"default\": \"IfNotPresent\",\n      \"description\": \"This sets the pull policy for images.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.image.registry\": {\n      \"description\": \"Deprecated: per-component registry prefix.\\n\\nIf set, this value is *prepended* to the image repository that the chart would otherwise render. This applies both when `image.repository` is set and when the repository is computed from\\n`imageRegistry` + `imageNamespace` + `image.name`.\\n\\nThis can produce \\\"double registry\\\" style references such as\\n`legacy.example.io/quay.io/jetstack/...`. Prefer using the global\\n`imageRegistry`/`imageNamespace` values.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.image.repository\": {\n      \"default\": \"\",\n      \"description\": \"Full repository override (takes precedence over `imageRegistry`, `imageNamespace`, and `image.name`).\\nExample: quay.io/jetstack/disco-agent\",\n      \"type\": \"string\"\n    },\n    \"helm-values.image.tag\": {\n      \"default\": \"\",\n      \"description\": \"Override the image tag to deploy by setting this variable. If no value is set, the chart's appVersion is used.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.imageNamespace\": {\n      \"default\": \"jetstack\",\n      \"description\": \"The repository namespace used for disco-agent images by default.\\nExamples:\\n- jetstack\\n- custom-namespace\",\n      \"type\": \"string\"\n    },\n    \"helm-values.imagePullSecrets\": {\n      \"default\": [],\n      \"description\": \"This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/\",\n      \"items\": {},\n      \"type\": \"array\"\n    },\n    \"helm-values.imageRegistry\": {\n      \"default\": \"quay.io\",\n      \"description\": \"The container registry used for disco-agent images by default. This can include path prefixes (e.g. \\\"artifactory.example.com/docker\\\").\",\n      \"type\": \"string\"\n    },\n    \"helm-values.metrics\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"enabled\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.enabled\"\n        },\n        \"podmonitor\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor\"\n        }\n      },\n      \"type\": \"object\"\n    },\n    \"helm-values.metrics.enabled\": {\n      \"default\": true,\n      \"description\": \"Enable the metrics server.\\nIf false, the metrics server will be disabled and the other metrics fields below will be ignored.\",\n      \"type\": \"boolean\"\n    },\n    \"helm-values.metrics.podmonitor\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"annotations\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor.annotations\"\n        },\n        \"enabled\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor.enabled\"\n        },\n        \"endpointAdditionalProperties\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor.endpointAdditionalProperties\"\n        },\n        \"honorLabels\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor.honorLabels\"\n        },\n        \"interval\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor.interval\"\n        },\n        \"labels\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor.labels\"\n        },\n        \"namespace\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor.namespace\"\n        },\n        \"prometheusInstance\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor.prometheusInstance\"\n        },\n        \"scrapeTimeout\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor.scrapeTimeout\"\n        }\n      },\n      \"type\": \"object\"\n    },\n    \"helm-values.metrics.podmonitor.annotations\": {\n      \"default\": {},\n      \"description\": \"Additional annotations to add to the PodMonitor.\",\n      \"type\": \"object\"\n    },\n    \"helm-values.metrics.podmonitor.enabled\": {\n      \"default\": false,\n      \"description\": \"Create a PodMonitor to add the metrics to Prometheus, if you are using Prometheus Operator. See https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor\",\n      \"type\": \"boolean\"\n    },\n    \"helm-values.metrics.podmonitor.endpointAdditionalProperties\": {\n      \"default\": {},\n      \"description\": \"EndpointAdditionalProperties allows setting additional properties on the endpoint such as relabelings, metricRelabelings etc.\\n\\nFor example:\\nendpointAdditionalProperties:\\n relabelings:\\n - action: replace\\n   sourceLabels:\\n   - __meta_kubernetes_pod_node_name\\n   targetLabel: instance\",\n      \"type\": \"object\"\n    },\n    \"helm-values.metrics.podmonitor.honorLabels\": {\n      \"default\": false,\n      \"description\": \"Keep labels from scraped data, overriding server-side labels.\",\n      \"type\": \"boolean\"\n    },\n    \"helm-values.metrics.podmonitor.interval\": {\n      \"default\": \"60s\",\n      \"description\": \"The interval to scrape metrics.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.metrics.podmonitor.labels\": {\n      \"default\": {},\n      \"description\": \"Additional labels to add to the PodMonitor.\",\n      \"type\": \"object\"\n    },\n    \"helm-values.metrics.podmonitor.namespace\": {\n      \"description\": \"The namespace that the pod monitor should live in.\\nDefaults to the disco-agent namespace.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.metrics.podmonitor.prometheusInstance\": {\n      \"default\": \"default\",\n      \"description\": \"Specifies the `prometheus` label on the created PodMonitor. This is used when different Prometheus instances have label selectors matching different PodMonitors.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.metrics.podmonitor.scrapeTimeout\": {\n      \"default\": \"30s\",\n      \"description\": \"The timeout before a metrics scrape fails.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.nameOverride\": {\n      \"default\": \"\",\n      \"description\": \"This is to override the chart name.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.no_proxy\": {\n      \"description\": \"Configures the NO_PROXY environment variable where a HTTP proxy is required, but certain domains should be excluded.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.nodeSelector\": {\n      \"default\": {},\n      \"type\": \"object\"\n    },\n    \"helm-values.podAnnotations\": {\n      \"default\": {},\n      \"description\": \"This is for setting Kubernetes Annotations to a Pod. For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/\",\n      \"type\": \"object\"\n    },\n    \"helm-values.podDisruptionBudget\": {\n      \"default\": {\n        \"enabled\": false\n      },\n      \"description\": \"Configure a PodDisruptionBudget for the agent's Deployment. If running with multiple replicas, consider setting podDisruptionBudget.enabled to true.\",\n      \"type\": \"object\"\n    },\n    \"helm-values.podLabels\": {\n      \"default\": {},\n      \"description\": \"This is for setting Kubernetes Labels to a Pod.\\nFor more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/\",\n      \"type\": \"object\"\n    },\n    \"helm-values.podSecurityContext\": {\n      \"default\": {},\n      \"type\": \"object\"\n    },\n    \"helm-values.pprof\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"enabled\": {\n          \"$ref\": \"#/$defs/helm-values.pprof.enabled\"\n        }\n      },\n      \"type\": \"object\"\n    },\n    \"helm-values.pprof.enabled\": {\n      \"default\": false,\n      \"description\": \"Enable profiling with the pprof endpoint\",\n      \"type\": \"boolean\"\n    },\n    \"helm-values.replicaCount\": {\n      \"default\": 1,\n      \"description\": \"This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/\",\n      \"type\": \"number\"\n    },\n    \"helm-values.resources\": {\n      \"default\": {},\n      \"type\": \"object\"\n    },\n    \"helm-values.securityContext\": {\n      \"default\": {\n        \"allowPrivilegeEscalation\": false,\n        \"capabilities\": {\n          \"drop\": [\n            \"ALL\"\n          ]\n        },\n        \"readOnlyRootFilesystem\": true,\n        \"runAsNonRoot\": true,\n        \"seccompProfile\": {\n          \"type\": \"RuntimeDefault\"\n        }\n      },\n      \"description\": \"Add Container specific SecurityContext settings to the container. Takes precedence over `podSecurityContext` when set. See https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container\",\n      \"type\": \"object\"\n    },\n    \"helm-values.serviceAccount\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"annotations\": {\n          \"$ref\": \"#/$defs/helm-values.serviceAccount.annotations\"\n        },\n        \"automount\": {\n          \"$ref\": \"#/$defs/helm-values.serviceAccount.automount\"\n        },\n        \"create\": {\n          \"$ref\": \"#/$defs/helm-values.serviceAccount.create\"\n        },\n        \"name\": {\n          \"$ref\": \"#/$defs/helm-values.serviceAccount.name\"\n        }\n      },\n      \"type\": \"object\"\n    },\n    \"helm-values.serviceAccount.annotations\": {\n      \"default\": {},\n      \"description\": \"Annotations to add to the service account\",\n      \"type\": \"object\"\n    },\n    \"helm-values.serviceAccount.automount\": {\n      \"default\": true,\n      \"description\": \"Automatically mount a ServiceAccount's API credentials?\",\n      \"type\": \"boolean\"\n    },\n    \"helm-values.serviceAccount.create\": {\n      \"default\": true,\n      \"description\": \"Specifies whether a service account should be created\",\n      \"type\": \"boolean\"\n    },\n    \"helm-values.serviceAccount.name\": {\n      \"default\": \"\",\n      \"description\": \"The name of the service account to use.\\nIf not set and create is true, a name is generated using the fullname template\",\n      \"type\": \"string\"\n    },\n    \"helm-values.tolerations\": {\n      \"default\": [],\n      \"items\": {},\n      \"type\": \"array\"\n    },\n    \"helm-values.volumeMounts\": {\n      \"default\": [],\n      \"description\": \"Additional volumeMounts on the output Deployment definition.\",\n      \"items\": {},\n      \"type\": \"array\"\n    },\n    \"helm-values.volumes\": {\n      \"default\": [],\n      \"description\": \"Additional volumes on the output Deployment definition.\",\n      \"items\": {},\n      \"type\": \"array\"\n    }\n  },\n  \"$ref\": \"#/$defs/helm-values\",\n  \"$schema\": \"http://json-schema.org/draft-07/schema#\"\n}\n"
  },
  {
    "path": "deploy/charts/disco-agent/values.yaml",
    "content": "# Default values for disco-agent.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\n# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/\nreplicaCount: 1\n\n# Must be set to indicate that you have read and accepted the CyberArk Terms of Service. If false, the helm chart will fail to install and will print a message with instructions on how to accept the TOS.\nacceptTerms: false\n\n# The container registry used for disco-agent images by default.\n# This can include path prefixes (e.g. \"artifactory.example.com/docker\").\n# +docs:property\nimageRegistry: \"quay.io\"\n\n# The repository namespace used for disco-agent images by default.\n# Examples:\n# - jetstack\n# - custom-namespace\n# +docs:property\nimageNamespace: \"jetstack\"\n\n# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/\nimage:\n  # Deprecated: per-component registry prefix.\n  #\n  # If set, this value is *prepended* to the image repository that the chart would otherwise render.\n  # This applies both when `image.repository` is set and when the repository is computed from\n  # `imageRegistry` + `imageNamespace` + `image.name`.\n  #\n  # This can produce \"double registry\" style references such as\n  # `legacy.example.io/quay.io/jetstack/...`. Prefer using the global\n  # `imageRegistry`/`imageNamespace` values.\n  # +docs:property\n  # registry: quay.io\n\n  # Full repository override (takes precedence over `imageRegistry`, `imageNamespace`,\n  # and `image.name`).\n  # Example: quay.io/jetstack/disco-agent\n  # +docs:property\n  repository: \"\"\n\n  # The image name for the Discovery Agent.\n  # This is used (together with `imageRegistry` and `imageNamespace`) to construct the full\n  # image reference.\n  # +docs:property\n  name: disco-agent\n\n  # This sets the pull policy for images.\n  pullPolicy: IfNotPresent\n\n  # Override the image tag to deploy by setting this variable.\n  # If no value is set, the chart's appVersion is used.\n  tag: \"\"\n\n  # Override the image digest to deploy by setting this variable.\n  # If set together with `image.tag`, the rendered image will include both tag and digest.\n  digest: \"\"\n\n# This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/\nimagePullSecrets: []\n# This is to override the chart name.\nnameOverride: \"\"\nfullnameOverride: \"\"\n\n# This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/\nserviceAccount:\n  # Specifies whether a service account should be created\n  create: true\n  # Automatically mount a ServiceAccount's API credentials?\n  automount: true\n  # Annotations to add to the service account\n  annotations: {}\n  # The name of the service account to use.\n  # If not set and create is true, a name is generated using the fullname template\n  name: \"\"\n\n# This is for setting Kubernetes Annotations to a Pod.\n# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/\npodAnnotations: {}\n# This is for setting Kubernetes Labels to a Pod.\n# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/\npodLabels: {}\n\npodSecurityContext: {}\n  # fsGroup: 2000\n\n# Add Container specific SecurityContext settings to the container. Takes\n# precedence over `podSecurityContext` when set. See\n# https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container\n# +docs:property\nsecurityContext:\n  capabilities:\n    drop:\n      - ALL\n  readOnlyRootFilesystem: true\n  runAsNonRoot: true\n  allowPrivilegeEscalation: false\n  seccompProfile: { type: RuntimeDefault }\n\nresources: {}\n  # We usually recommend not to specify default resources and to leave this as a conscious\n  # choice for the user. This also increases chances charts run on environments with little\n  # resources, such as Minikube. If you do want to specify resources, uncomment the following\n  # lines, adjust them as necessary, and remove the curly braces after 'resources:'.\n  # limits:\n  #   cpu: 100m\n  #   memory: 128Mi\n  # requests:\n  #   cpu: 100m\n  #   memory: 128Mi\n\n# Additional volumes on the output Deployment definition.\nvolumes: []\n# - name: foo\n#   secret:\n#     secretName: mysecret\n#     optional: false\n\n# Additional volumeMounts on the output Deployment definition.\nvolumeMounts: []\n# - name: foo\n#   mountPath: \"/etc/foo\"\n#   readOnly: true\n\nnodeSelector: {}\n\ntolerations: []\n\naffinity: {}\n\n# Configures the HTTP_PROXY environment variable where a HTTP proxy is required.\n# +docs:property\n# http_proxy: \"http://proxy:8080\"\n\n# Configures the HTTPS_PROXY environment variable where a HTTP proxy is required.\n# +docs:property\n# https_proxy: \"https://proxy:8080\"\n\n# Configures the NO_PROXY environment variable where a HTTP proxy is required,\n# but certain domains should be excluded.\n# +docs:property\n# no_proxy: 127.0.0.1,localhost\n\n# Configure a PodDisruptionBudget for the agent's Deployment. If running with multiple\n# replicas, consider setting podDisruptionBudget.enabled to true.\n# +docs:property\npodDisruptionBudget:\n  # Enable or disable the PodDisruptionBudget resource, which helps prevent downtime\n  # during voluntary disruptions such as during a Node upgrade.\n  enabled: false\n\n  # Configure the minimum available pods for disruptions. Can either be set to\n  # an integer (e.g. 1) or a percentage value (e.g. 25%).\n  # Cannot be used if `maxUnavailable` is set.\n  # +docs:property\n  # minAvailable: 1\n\n  # Configure the maximum unavailable pods for disruptions. Can either be set to\n  # an integer (e.g. 1) or a percentage value (e.g. 25%).\n  # Cannot be used if `minAvailable` is set.\n  # +docs:property\n  # maxUnavailable: 1\n\n# Configuration for the agent\nconfig:\n  # Push data every 12 hours unless changed.\n  period: \"12h0m0s\"\n\n  # You can configure the agent to exclude some annotations or\n  # labels from being pushed . All Kubernetes objects\n  # are affected. The objects are still pushed, but the specified annotations\n  # and labels are removed before being pushed.\n  #\n  # Dots is the only character that needs to be escaped in the regex. Use either\n  # double quotes with escaped single quotes or unquoted strings for the regex\n  # to avoid YAML parsing issues with `\\.`.\n  #\n  # Example: excludeAnnotationKeysRegex: ['^kapp\\.k14s\\.io/original.*']\n  excludeAnnotationKeysRegex: []\n  excludeLabelKeysRegex: []\n\n  # A human readable name for the cluster where the agent is deployed (optional).\n  #\n  # This cluster name will be associated with the data that the agent uploads to\n  # the Discovery and Context service. If empty (the default), the service\n  # account name will be used instead.\n  clusterName: \"\"\n\n  # A short description of the cluster where the agent is deployed (optional).\n  #\n  # This description will be associated with the data that the agent uploads to\n  # the Discovery and Context service. The description may include contact\n  # information such as the email address of the cluster administrator, so that\n  # any problems and risks identified by the Discovery and Context service can\n  # be communicated to the people responsible for the affected secrets.\n  clusterDescription: \"\"\n\n  # Enable sending of Secret values to CyberArk in addition to metadata.\n  # Metadata is always sent, but the actual values of Secrets are not sent by default.\n  # When enabled, Secret data is encrypted using envelope encryption using\n  # a key managed by CyberArk, fetched from the Discovery and Context service.\n  sendSecretValues: true\n\nauthentication:\n  secretName: agent-credentials\n\n#  extraArgs:\n#  - --logging-format=json\n#  - --log-level=6 # To enable HTTP request logging\nextraArgs: []\n\npprof:\n  # Enable profiling with the pprof endpoint\n  enabled: false\n\nmetrics:\n  # Enable the metrics server.\n  # If false, the metrics server will be disabled and the other metrics fields below will be ignored.\n  enabled: true\n  podmonitor:\n    # Create a PodMonitor to add the metrics to Prometheus, if you are using Prometheus Operator.\n    # See https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor\n    enabled: false\n\n    # The namespace that the pod monitor should live in.\n    # Defaults to the disco-agent namespace.\n    # +docs:property\n    # namespace: cyberark\n\n    # Specifies the `prometheus` label on the created PodMonitor.\n    # This is used when different Prometheus instances have label selectors\n    # matching different PodMonitors.\n    prometheusInstance: default\n\n    # The interval to scrape metrics.\n    interval: 60s\n\n    # The timeout before a metrics scrape fails.\n    scrapeTimeout: 30s\n\n    # Additional labels to add to the PodMonitor.\n    labels: {}\n\n    # Additional annotations to add to the PodMonitor.\n    annotations: {}\n\n    # Keep labels from scraped data, overriding server-side labels.\n    honorLabels: false\n\n    # EndpointAdditionalProperties allows setting additional properties on the endpoint such as relabelings, metricRelabelings etc.\n    #\n    # For example:\n    #  endpointAdditionalProperties:\n    #   relabelings:\n    #   - action: replace\n    #     sourceLabels:\n    #     - __meta_kubernetes_pod_node_name\n    #     targetLabel: instance\n    endpointAdditionalProperties: {}\n\n"
  },
  {
    "path": "deploy/charts/discovery-agent/.helmignore",
    "content": "# Patterns to ignore when building packages.\n# This supports shell glob matching, relative path matching, and\n# negation (prefixed with !). Only one pattern per line.\n.DS_Store\n# Common VCS dirs\n.git/\n.gitignore\n.bzr/\n.bzrignore\n.hg/\n.hgignore\n.svn/\n# Common backup files\n*.swp\n*.bak\n*.tmp\n*.orig\n*~\n# Various IDEs\n.project\n.idea/\n*.tmproj\n.vscode/\n"
  },
  {
    "path": "deploy/charts/discovery-agent/Chart.yaml",
    "content": "apiVersion: v2\nname: discovery-agent\ndescription: |-\n  The discovery-agent connects your Kubernetes or Openshift cluster to NGTS for discovery and monitoring.\n\nmaintainers:\n  - name: Palo Alto Networks\n    url: https://www.paloaltonetworks.com\n\nsources:\n  - https://github.com/jetstack/jetstack-secure\n\n# These versions are meant to be overridden by `make helm-chart`. No `v` prefix\n# for the `version` because Helm doesn't support auto-determining the latest\n# version for OCI Helm charts that use a `v` prefix.\nversion: 0.0.0\nappVersion: \"v0.0.0\"\n"
  },
  {
    "path": "deploy/charts/discovery-agent/README.md",
    "content": "# discovery-agent\n\nThe Discovery Agent connects your Kubernetes or OpenShift cluster to Palo Alto NGTS.\n\n## Values\n\n<!-- AUTO-GENERATED -->\n\n#### **config.tsgID** ~ `string`\n> Default value:\n> ```yaml\n> \"\"\n> ```\n\nRequired: The TSG (Tenant Service Group) ID to use when connecting to SCM. NB: TSG IDs are numeric, but must be treated as strings to avoid issues with YAML data types. With the Helm CLI use `--set-string`; with YAML always pass TSG IDs in double quotes.\n\n\n#### **config.clusterName** ~ `string`\n> Default value:\n> ```yaml\n> \"\"\n> ```\n\nRequired: A human readable name for the cluster into which the agent is being deployed.  \n  \nThis cluster name will be associated with the data that the agent uploads to the backend.\n\n#### **config.clusterDescription** ~ `string`\n> Default value:\n> ```yaml\n> \"\"\n> ```\n\nA short description of the cluster where the agent is deployed (optional).  \n  \nThis description will be associated with the data that the agent uploads to the backend.\n\n#### **config.claimableCerts** ~ `bool`\n> Default value:\n> ```yaml\n> false\n> ```\n\nWhether discovered certs can be claimed by other tenants (optional). true = certs are left unassigned, available for any tenant to claim. false (default) = certs are owned by this cluster's tenant.\n#### **config.period** ~ `string`\n> Default value:\n> ```yaml\n> 0h1m0s\n> ```\n\nHow often to push data to the remote server\n\n#### **config.excludeAnnotationKeysRegex** ~ `array`\n> Default value:\n> ```yaml\n> []\n> ```\n\nYou can configure the agent to exclude some annotations or labels from being pushed. All Kubernetes objects are affected. The objects are still pushed, but the specified annotations and labels are removed before being pushed.  \n  \nDots is the only character that needs to be escaped in the regex. Use either double quotes with escaped single quotes or unquoted strings for the regex to avoid YAML parsing issues with `\\.`.  \n  \nExample: excludeAnnotationKeysRegex: ['^kapp\\.k14s\\.io/original.*']\n#### **config.excludeLabelKeysRegex** ~ `array`\n> Default value:\n> ```yaml\n> []\n> ```\n#### **config.clientID** ~ `string`\n> Default value:\n> ```yaml\n> \"\"\n> ```\n\nDeprecated: Client ID for the configured service account. The client ID should be provided in the \"clientID\" field of the authentication secret (see config.secretName). This field is provided for compatibility for users migrating from the \"venafi-kubernetes-agent\" chart.\n\n#### **config.secretName** ~ `string`\n> Default value:\n> ```yaml\n> discovery-agent-credentials\n> ```\n\nThe name of the Secret containing the NGTS built-in service account credentials.  \nThe Secret must contain the following key:  \n- privatekey.pem: PEM-encoded private key for the service account  \nThe Secret should also contain the following key:  \n- clientID:       Service account client ID (config.clientID must be set if not present)\n\n#### **replicaCount** ~ `number`\n> Default value:\n> ```yaml\n> 1\n> ```\n\nThis will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/\n#### **imageRegistry** ~ `string`\n> Default value:\n> ```yaml\n> quay.io\n> ```\n\nThe container registry used for discovery-agent images by default. This can include path prefixes (e.g. \"artifactory.example.com/docker\").\n\n#### **imageNamespace** ~ `string`\n> Default value:\n> ```yaml\n> jetstack\n> ```\n\nThe repository namespace used for discovery-agent images by default.  \nExamples:  \n- jetstack  \n- custom-namespace\n\n#### **image.repository** ~ `string`\n> Default value:\n> ```yaml\n> \"\"\n> ```\n\nFull repository override (takes precedence over `imageRegistry`, `imageNamespace`, and `image.name`).  \nExample: quay.io/jetstack/discovery-agent\n\n#### **image.name** ~ `string`\n> Default value:\n> ```yaml\n> discovery-agent\n> ```\n\nThe image name for the Discovery Agent.  \nThis is used (together with `imageRegistry` and `imageNamespace`) to construct the full image reference.\n\n#### **image.pullPolicy** ~ `string`\n> Default value:\n> ```yaml\n> IfNotPresent\n> ```\n\nThis sets the pull policy for images.\n#### **image.tag** ~ `string`\n> Default value:\n> ```yaml\n> \"\"\n> ```\n\nOverride the image tag to deploy by setting this variable. If no value is set, the chart's appVersion is used.\n#### **image.digest** ~ `string`\n> Default value:\n> ```yaml\n> \"\"\n> ```\n\nOverride the image digest to deploy by setting this variable. If set together with `image.tag`, the rendered image will include both tag and digest.\n#### **imagePullSecrets** ~ `array`\n> Default value:\n> ```yaml\n> []\n> ```\n\nThis is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/\n#### **nameOverride** ~ `string`\n> Default value:\n> ```yaml\n> \"\"\n> ```\n\nThis is to override the chart name.\n#### **fullnameOverride** ~ `string`\n> Default value:\n> ```yaml\n> \"\"\n> ```\n#### **serviceAccount.create** ~ `bool`\n> Default value:\n> ```yaml\n> true\n> ```\n\nSpecifies whether a service account should be created\n#### **serviceAccount.automount** ~ `bool`\n> Default value:\n> ```yaml\n> true\n> ```\n\nAutomatically mount a ServiceAccount's API credentials?\n#### **serviceAccount.annotations** ~ `object`\n> Default value:\n> ```yaml\n> {}\n> ```\n\nAnnotations to add to the service account\n#### **serviceAccount.name** ~ `string`\n> Default value:\n> ```yaml\n> \"\"\n> ```\n\nThe name of the service account to use.  \nIf not set and create is true, a name is generated using the fullname template\n#### **podAnnotations** ~ `object`\n> Default value:\n> ```yaml\n> {}\n> ```\n\nThis is for setting Kubernetes Annotations to a Pod. For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/\n#### **podLabels** ~ `object`\n> Default value:\n> ```yaml\n> {}\n> ```\n\nThis is for setting Kubernetes Labels to a Pod.  \nFor more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/\n#### **podSecurityContext** ~ `object`\n> Default value:\n> ```yaml\n> {}\n> ```\n#### **securityContext** ~ `object`\n> Default value:\n> ```yaml\n> allowPrivilegeEscalation: false\n> capabilities:\n>   drop:\n>     - ALL\n> readOnlyRootFilesystem: true\n> runAsNonRoot: true\n> seccompProfile:\n>   type: RuntimeDefault\n> ```\n\nAdd Container specific SecurityContext settings to the container. Takes precedence over `podSecurityContext` when set. See https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container\n\n#### **resources** ~ `object`\n> Default value:\n> ```yaml\n> {}\n> ```\n#### **volumes** ~ `array`\n> Default value:\n> ```yaml\n> []\n> ```\n\nAdditional volumes on the output Deployment definition.\n#### **volumeMounts** ~ `array`\n> Default value:\n> ```yaml\n> []\n> ```\n\nAdditional volumeMounts on the output Deployment definition.\n#### **nodeSelector** ~ `object`\n> Default value:\n> ```yaml\n> {}\n> ```\n#### **tolerations** ~ `array`\n> Default value:\n> ```yaml\n> []\n> ```\n#### **affinity** ~ `object`\n> Default value:\n> ```yaml\n> {}\n> ```\n#### **http_proxy** ~ `string`\n\nConfigures the HTTP_PROXY environment variable where a HTTP proxy is required.\n\n#### **https_proxy** ~ `string`\n\nConfigures the HTTPS_PROXY environment variable where a HTTP proxy is required.\n\n#### **no_proxy** ~ `string`\n\nConfigures the NO_PROXY environment variable where a HTTP proxy is required, but certain domains should be excluded.\n\n#### **podDisruptionBudget** ~ `object`\n> Default value:\n> ```yaml\n> enabled: false\n> ```\n\nConfigure a PodDisruptionBudget for the agent's Deployment. If running with multiple replicas, consider setting podDisruptionBudget.enabled to true.\n\n#### **extraArgs** ~ `array`\n> Default value:\n> ```yaml\n> []\n> ```\n\n```yaml\nextraArgs:\n- --logging-format=json\n- --log-level=6 # To enable HTTP request logging\n```\n#### **pprof.enabled** ~ `bool`\n> Default value:\n> ```yaml\n> false\n> ```\n\nEnable profiling with the pprof endpoint\n#### **metrics.enabled** ~ `bool`\n> Default value:\n> ```yaml\n> true\n> ```\n\nEnable the metrics server.  \nIf false, the metrics server will be disabled and the other metrics fields below will be ignored.\n#### **metrics.podmonitor.enabled** ~ `bool`\n> Default value:\n> ```yaml\n> false\n> ```\n\nCreate a PodMonitor to add the metrics to Prometheus, if you are using Prometheus Operator. See https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor\n#### **metrics.podmonitor.namespace** ~ `string`\n\nThe namespace that the pod monitor should live in.  \nDefaults to the discovery-agent namespace.\n\n#### **metrics.podmonitor.prometheusInstance** ~ `string`\n> Default value:\n> ```yaml\n> default\n> ```\n\nSpecifies the `prometheus` label on the created PodMonitor. This is used when different Prometheus instances have label selectors matching different PodMonitors.\n#### **metrics.podmonitor.interval** ~ `string`\n> Default value:\n> ```yaml\n> 60s\n> ```\n\nThe interval to scrape metrics.\n#### **metrics.podmonitor.scrapeTimeout** ~ `string`\n> Default value:\n> ```yaml\n> 30s\n> ```\n\nThe timeout before a metrics scrape fails.\n#### **metrics.podmonitor.labels** ~ `object`\n> Default value:\n> ```yaml\n> {}\n> ```\n\nAdditional labels to add to the PodMonitor.\n#### **metrics.podmonitor.annotations** ~ `object`\n> Default value:\n> ```yaml\n> {}\n> ```\n\nAdditional annotations to add to the PodMonitor.\n#### **metrics.podmonitor.honorLabels** ~ `bool`\n> Default value:\n> ```yaml\n> false\n> ```\n\nKeep labels from scraped data, overriding server-side labels.\n#### **metrics.podmonitor.endpointAdditionalProperties** ~ `object`\n> Default value:\n> ```yaml\n> {}\n> ```\n\nEndpointAdditionalProperties allows setting additional properties on the endpoint such as relabelings, metricRelabelings etc.  \n  \nFor example:\n\n```yaml\nendpointAdditionalProperties:\n relabelings:\n - action: replace\n   sourceLabels:\n   - __meta_kubernetes_pod_node_name\n   targetLabel: instance\n```\n\n<!-- /AUTO-GENERATED -->\n"
  },
  {
    "path": "deploy/charts/discovery-agent/templates/NOTES.txt",
    "content": "CHART NAME: {{ .Chart.Name }}\nCHART VERSION: {{ .Chart.Version }}\nAPP VERSION: {{ .Chart.AppVersion }}\n\n- Check the application is running:\n> kubectl get pods -n {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }}\n\n- Check the application logs for successful connection to NGTS:\n> kubectl logs -n {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }}\n"
  },
  {
    "path": "deploy/charts/discovery-agent/templates/_helpers.tpl",
    "content": "{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"discovery-agent.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" }}\n{{- end }}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"discovery-agent.fullname\" -}}\n{{- if .Values.fullnameOverride }}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" }}\n{{- else }}\n{{- $name := default .Chart.Name .Values.nameOverride }}\n{{- if contains $name .Release.Name }}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" }}\n{{- else }}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" }}\n{{- end }}\n{{- end }}\n{{- end }}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"discovery-agent.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" }}\n{{- end }}\n\n{{/*\nCommon labels\n*/}}\n{{- define \"discovery-agent.labels\" -}}\nhelm.sh/chart: {{ include \"discovery-agent.chart\" . }}\n{{ include \"discovery-agent.selectorLabels\" . }}\n{{- if .Chart.AppVersion }}\napp.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\n{{- end }}\napp.kubernetes.io/managed-by: {{ .Release.Service }}\n{{- end }}\n\n{{/*\nSelector labels\n*/}}\n{{- define \"discovery-agent.selectorLabels\" -}}\napp.kubernetes.io/name: {{ include \"discovery-agent.name\" . }}\napp.kubernetes.io/instance: {{ .Release.Name }}\n{{- end }}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"discovery-agent.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create }}\n{{- default (include \"discovery-agent.fullname\" .) .Values.serviceAccount.name }}\n{{- else }}\n{{- default \"default\" .Values.serviceAccount.name }}\n{{- end }}\n{{- end }}\n\n{{/*\nUtil function for generating an image reference based on the provided options.\nThis function is derived from similar functions used in the cert-manager GitHub organization\n*/}}\n{{- define \"discovery-agent.image\" -}}\n{{- /*\nCalling convention:\n- (tuple <imageValues> <imageRegistry> <imageNamespace> <defaultReference>)\nWe intentionally pass imageRegistry/imageNamespace as explicit arguments rather than reading\nfrom `.Values` inside this helper, because `helm-tool lint` does not reliably track `.Values.*`\nusage through tuple/variable indirection.\n*/ -}}\n{{- if ne (len .) 4 -}}\n\t{{- fail (printf \"ERROR: template \\\"discovery-agent.image\\\" expects (tuple <imageValues> <imageRegistry> <imageNamespace> <defaultReference>), got %d arguments\" (len .)) -}}\n{{- end -}}\n{{- $image := index . 0 -}}\n{{- $imageRegistry := index . 1 | default \"\" -}}\n{{- $imageNamespace := index . 2 | default \"\" -}}\n{{- $defaultReference := index . 3 -}}\n{{- $repository := \"\" -}}\n{{- if $image.repository -}}\n\t{{- $repository = $image.repository -}}\n{{- else -}}\n\t{{- $name := required \"ERROR: image.name must be set when image.repository is empty\" $image.name -}}\n\t{{- $repository = $name -}}\n\t{{- if $imageNamespace -}}\n\t\t{{- $repository = printf \"%s/%s\" $imageNamespace $repository -}}\n\t{{- end -}}\n\t{{- if $imageRegistry -}}\n\t\t{{- $repository = printf \"%s/%s\" $imageRegistry $repository -}}\n\t{{- end -}}\n{{- end -}}\n{{- $repository -}}\n{{- if and $image.tag $image.digest -}}\n\t{{- printf \":%s@%s\" $image.tag $image.digest -}}\n{{- else if $image.tag -}}\n\t{{- printf \":%s\" $image.tag -}}\n{{- else if $image.digest -}}\n\t{{- printf \"@%s\" $image.digest -}}\n{{- else -}}\n\t{{- printf \"%s\" $defaultReference -}}\n{{- end -}}\n{{- end }}\n"
  },
  {
    "path": "deploy/charts/discovery-agent/templates/configmap.yaml",
    "content": "apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: {{ include \"discovery-agent.fullname\" . }}-config\n  namespace: {{ .Release.Namespace }}\n  labels:\n    {{- include \"discovery-agent.labels\" . | nindent 4 }}\ndata:\n  config.yaml: |-\n    cluster_name: {{ required \"config.clusterName is required\" .Values.config.clusterName | quote }}\n    cluster_description: {{ .Values.config.clusterDescription | quote }}\n    {{- if .Values.config.claimableCerts }}\n    claimable_certs: true\n    {{- end }}\n    period: {{ .Values.config.period | quote }}\n    {{- with .Values.config.excludeAnnotationKeysRegex }}\n    exclude-annotation-keys-regex:\n      {{- . | toYaml | nindent 6 }}\n    {{- end }}\n    {{- with .Values.config.excludeLabelKeysRegex }}\n    exclude-label-keys-regex:\n      {{- . | toYaml | nindent 6 }}\n    {{- end }}\n    data-gatherers:\n    - kind: k8s-discovery\n      name: k8s/discovery\n    - kind: k8s-dynamic\n      name: k8s/secrets\n      config:\n        resource-type:\n          version: v1\n          resource: secrets\n        field-selectors:\n        - type!=kubernetes.io/dockercfg\n        - type!=kubernetes.io/dockerconfigjson\n        - type!=bootstrap.kubernetes.io/token\n        - type!=helm.sh/release.v1\n    - kind: k8s-dynamic\n      name: k8s/jobs\n      config:\n        resource-type:\n          version: v1\n          group: batch\n          resource: jobs\n    - kind: k8s-dynamic\n      name: k8s/cronjobs\n      config:\n        resource-type:\n          version: v1\n          group: batch\n          resource: cronjobs\n    - kind: k8s-dynamic\n      name: k8s/deployments\n      config:\n        resource-type:\n          version: v1\n          group: apps\n          resource: deployments\n    - kind: k8s-dynamic\n      name: k8s/statefulsets\n      config:\n        resource-type:\n          version: v1\n          group: apps\n          resource: statefulsets\n    - kind: k8s-dynamic\n      name: k8s/daemonsets\n      config:\n        resource-type:\n          version: v1\n          group: apps\n          resource: daemonsets\n    - kind: k8s-dynamic\n      name: k8s/pods\n      config:\n        resource-type:\n          version: v1\n          resource: pods\n"
  },
  {
    "path": "deploy/charts/discovery-agent/templates/deployment.yaml",
    "content": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: {{ include \"discovery-agent.fullname\" . }}\n  labels:\n    {{- include \"discovery-agent.labels\" . | nindent 4 }}\nspec:\n  replicas: {{ .Values.replicaCount }}\n  selector:\n    matchLabels:\n      {{- include \"discovery-agent.selectorLabels\" . | nindent 6 }}\n  template:\n    metadata:\n      {{- with .Values.podAnnotations }}\n      annotations:\n        {{- toYaml . | nindent 8 }}\n      {{- end }}\n      labels:\n        {{- include \"discovery-agent.labels\" . | nindent 8 }}\n        {{- with .Values.podLabels }}\n        {{- toYaml . | nindent 8 }}\n        {{- end }}\n    spec:\n      {{- with .Values.imagePullSecrets }}\n      imagePullSecrets:\n        {{- toYaml . | nindent 8 }}\n      {{- end }}\n      serviceAccountName: {{ include \"discovery-agent.serviceAccountName\" . }}\n      {{- with .Values.podSecurityContext }}\n      securityContext:\n        {{- toYaml . | nindent 8 }}\n      {{- end }}\n      containers:\n        - name: agent\n          {{- with .Values.securityContext }}\n          securityContext:\n            {{- toYaml . | nindent 12 }}\n          {{- end }}\n          image: \"{{ template \"discovery-agent.image\" (tuple .Values.image .Values.imageRegistry .Values.imageNamespace (printf \":%s\" .Chart.AppVersion)) }}\"\n          imagePullPolicy: {{ .Values.image.pullPolicy }}\n          env:\n          - name: POD_NAMESPACE\n            valueFrom:\n              fieldRef:\n                fieldPath: metadata.namespace\n          - name: POD_NAME\n            valueFrom:\n              fieldRef:\n                fieldPath: metadata.name\n          - name: POD_UID\n            valueFrom:\n              fieldRef:\n                fieldPath: metadata.uid\n          - name: POD_NODE\n            valueFrom:\n              fieldRef:\n                fieldPath: spec.nodeName\n          {{- with .Values.http_proxy }}\n          - name: HTTP_PROXY\n            value: {{ . }}\n          {{- end }}\n          {{- with .Values.https_proxy }}\n          - name: HTTPS_PROXY\n            value: {{ . }}\n          {{- end }}\n          {{- with .Values.no_proxy }}\n          - name: NO_PROXY\n            value: {{ . }}\n          {{- end }}\n          args:\n            - \"agent\"\n            - \"-c\"\n            - \"/etc/discovery-agent/config.yaml\"\n            - --ngts\n            - --tsg-id\n            - {{ required \"config.tsgID is required\" .Values.config.tsgID | toString | quote }}\n            {{- with .Values.config.serverURL }}\n            - --ngts-server-url\n            - {{ . | quote }}\n            {{- end }}\n            {{- if or .Values.config.clientID .Values.config.clientId }}\n            - --client-id\n            - {{ .Values.config.clientID | default .Values.config.clientId }}\n            {{- end }}\n            - --private-key-path\n            - /etc/discovery-agent/credentials/privatekey.pem\n            - --logging-format=json\n            {{- if .Values.metrics.enabled }}\n            - --enable-metrics\n            {{- end }}\n            {{- if .Values.pprof.enabled }}\n            - --enable-pprof\n            {{- end }}\n            {{- range .Values.extraArgs }}\n            - {{ . | quote }}\n            {{- end }}\n          {{- with .Values.resources }}\n          resources:\n            {{- toYaml . | nindent 12 }}\n          {{- end }}\n          volumeMounts:\n            - name: config\n              mountPath: \"/etc/discovery-agent\"\n              readOnly: true\n            - name: credentials\n              mountPath: \"/etc/discovery-agent/credentials\"\n              readOnly: true\n            {{- with .Values.volumeMounts }}\n            {{- toYaml . | nindent 12 }}\n            {{- end }}\n          ports:\n            - name: agent-api\n              containerPort: 8081\n      volumes:\n        - name: config\n          configMap:\n            name: {{ include \"discovery-agent.fullname\" . }}-config\n            optional: false\n        - name: credentials\n          secret:\n            secretName: {{ .Values.config.secretName }}\n            optional: false\n        {{- with .Values.volumes }}\n        {{- toYaml . | nindent 8 }}\n        {{- end }}\n      {{- with .Values.nodeSelector }}\n      nodeSelector:\n        {{- toYaml . | nindent 8 }}\n      {{- end }}\n      {{- with .Values.affinity }}\n      affinity:\n        {{- toYaml . | nindent 8 }}\n      {{- end }}\n      {{- with .Values.tolerations }}\n      tolerations:\n        {{- toYaml . | nindent 8 }}\n      {{- end }}\n"
  },
  {
    "path": "deploy/charts/discovery-agent/templates/poddisruptionbudget.yaml",
    "content": "{{- if .Values.podDisruptionBudget.enabled }}\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: {{ include \"discovery-agent.fullname\" . }}\n  namespace: {{ .Release.Namespace }}\n  labels:\n    {{- include \"discovery-agent.labels\" . | nindent 4 }}\nspec:\n  selector:\n    matchLabels:\n      {{- include \"discovery-agent.selectorLabels\" . | nindent 6 }}\n\n  {{- if not (or (hasKey .Values.podDisruptionBudget \"minAvailable\") (hasKey .Values.podDisruptionBudget \"maxUnavailable\")) }}\n  minAvailable: 1 # Default value because minAvailable and maxUnavailable are not set\n  {{- end }}\n  {{- if hasKey .Values.podDisruptionBudget \"minAvailable\" }}\n  minAvailable: {{ .Values.podDisruptionBudget.minAvailable }}\n  {{- end }}\n  {{- if hasKey .Values.podDisruptionBudget \"maxUnavailable\" }}\n  maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }}\n  {{- end }}\n{{- end }}\n"
  },
  {
    "path": "deploy/charts/discovery-agent/templates/podmonitor.yaml",
    "content": "{{- if and .Values.metrics.enabled .Values.metrics.podmonitor.enabled }}\napiVersion: monitoring.coreos.com/v1\nkind: PodMonitor\nmetadata:\n  name: {{ include \"discovery-agent.fullname\" . }}\n{{- if .Values.metrics.podmonitor.namespace }}\n  namespace: {{ .Values.metrics.podmonitor.namespace }}\n{{- else }}\n  namespace: {{ .Release.Namespace | quote }}\n{{- end }}\n  labels:\n    {{- include \"discovery-agent.labels\" . | nindent 4 }}\n    prometheus: {{ .Values.metrics.podmonitor.prometheusInstance }}\n    {{- with .Values.metrics.podmonitor.labels }}\n    {{- toYaml . | nindent 4 }}\n    {{- end }}\n{{- with .Values.metrics.podmonitor.annotations }}\n  annotations:\n    {{- toYaml . | nindent 4 }}\n{{- end }}\nspec:\n  jobLabel: {{ include \"discovery-agent.fullname\" . }}\n  selector:\n    matchLabels:\n      {{- include \"discovery-agent.selectorLabels\" . | nindent 6 }}\n{{- if .Values.metrics.podmonitor.namespace }}\n  namespaceSelector:\n    matchNames:\n      - {{ .Release.Namespace | quote }}\n{{- end }}\n  podMetricsEndpoints:\n    - port: agent-api\n      path: /metrics\n      interval: {{ .Values.metrics.podmonitor.interval }}\n      scrapeTimeout: {{ .Values.metrics.podmonitor.scrapeTimeout }}\n      honorLabels: {{ .Values.metrics.podmonitor.honorLabels }}\n      {{- with .Values.metrics.podmonitor.endpointAdditionalProperties }}\n      {{- toYaml . | nindent 4 }}\n      {{- end }}\n{{- end }}\n"
  },
  {
    "path": "deploy/charts/discovery-agent/templates/rbac.yaml",
    "content": "---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ include \"discovery-agent.fullname\" . }}-event-emitted\n  labels:\n    {{- include \"discovery-agent.labels\" . | nindent 4 }}\nrules:\n  - apiGroups: [\"\"]\n    resources: [\"events\"]\n    verbs: [\"create\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ include \"discovery-agent.fullname\" . }}-event-emitted\n  labels:\n    {{- include \"discovery-agent.labels\" . | nindent 4 }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ include \"discovery-agent.fullname\" . }}-event-emitted\nsubjects:\n  - kind: ServiceAccount\n    name: {{ include \"discovery-agent.serviceAccountName\" . }}\n    namespace: {{ .Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ include \"discovery-agent.fullname\" . }}-cluster-viewer\n  labels:\n    {{- include \"discovery-agent.labels\" . | nindent 4 }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: view\nsubjects:\n  - kind: ServiceAccount\n    name: {{ include \"discovery-agent.serviceAccountName\" . }}\n    namespace: {{ .Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: {{ include \"discovery-agent.fullname\" . }}-secret-reader\n  labels:\n    {{- include \"discovery-agent.labels\" . | nindent 4 }}\nrules:\n  - apiGroups: [\"\"]\n    resources: [\"secrets\"]\n    verbs: [\"get\", \"list\", \"watch\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ include \"discovery-agent.fullname\" . }}-secret-reader\n  labels:\n    {{- include \"discovery-agent.labels\" . | nindent 4 }}\nroleRef:\n  kind: ClusterRole\n  name: {{ include \"discovery-agent.fullname\" . }}-secret-reader\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - kind: ServiceAccount\n    name: {{ include \"discovery-agent.serviceAccountName\" . }}\n    namespace: {{ .Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: {{ include \"discovery-agent.fullname\" . }}-rbac-reader\n  labels:\n    {{- include \"discovery-agent.labels\" . | nindent 4 }}\nrules:\n  - apiGroups: [\"rbac.authorization.k8s.io\"]\n    resources:\n    - roles\n    - clusterroles\n    - rolebindings\n    - clusterrolebindings\n    verbs: [\"get\", \"list\", \"watch\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ include \"discovery-agent.fullname\" . }}-rbac-reader\n  labels:\n    {{- include \"discovery-agent.labels\" . | nindent 4 }}\nroleRef:\n  kind: ClusterRole\n  name: {{ include \"discovery-agent.fullname\" . }}-rbac-reader\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - kind: ServiceAccount\n    name: {{ include \"discovery-agent.serviceAccountName\" . }}\n    namespace: {{ .Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ include \"discovery-agent.fullname\" . }}-oidc-discovery\n  labels:\n    {{- include \"discovery-agent.labels\" . | nindent 4 }}\nroleRef:\n  kind: ClusterRole\n  name: system:service-account-issuer-discovery\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - kind: ServiceAccount\n    name: {{ include \"discovery-agent.serviceAccountName\" . }}\n    namespace: {{ .Release.Namespace }}\n"
  },
  {
    "path": "deploy/charts/discovery-agent/templates/serviceaccount.yaml",
    "content": "{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: {{ include \"discovery-agent.serviceAccountName\" . }}\n  labels:\n    {{- include \"discovery-agent.labels\" . | nindent 4 }}\n  {{- with .Values.serviceAccount.annotations }}\n  annotations:\n    {{- toYaml . | nindent 4 }}\n  {{- end }}\nautomountServiceAccountToken: {{ .Values.serviceAccount.automount }}\n{{- end }}\n"
  },
  {
    "path": "deploy/charts/discovery-agent/tests/configmap_test.yaml",
    "content": "suite: test configmap\ntemplates:\n  - configmap.yaml\n\ntests:\n  # Test basic ConfigMap rendering\n  - it: should create ConfigMap with required values\n    set:\n      config.clusterName: my-test-cluster\n      config.tsgID: \"123456\"\n    asserts:\n      - isKind:\n          of: ConfigMap\n      - equal:\n          path: metadata.name\n          value: RELEASE-NAME-discovery-agent-config\n      - matchRegex:\n          path: data[\"config.yaml\"]\n          pattern: 'cluster_name: \"my-test-cluster\"'\n\n  # Test cluster description\n  - it: should include cluster description when set\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      config.clusterDescription: \"This is a test cluster\"\n    asserts:\n      - matchRegex:\n          path: data[\"config.yaml\"]\n          pattern: 'cluster_description: \"This is a test cluster\"'\n\n  # Test period configuration\n  - it: should set custom period\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      config.period: \"0h5m0s\"\n    asserts:\n      - matchRegex:\n          path: data[\"config.yaml\"]\n          pattern: 'period: \"0h5m0s\"'\n\n  # Test exclude annotation keys regex\n  - it: should include excludeAnnotationKeysRegex when set\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      config.excludeAnnotationKeysRegex:\n        - \"^kapp\\\\.k14s\\\\.io/original.*\"\n        - \"^kubectl\\\\.kubernetes\\\\.io/.*\"\n    asserts:\n      - matchRegex:\n          path: data[\"config.yaml\"]\n          pattern: 'exclude-annotation-keys-regex:'\n      - matchRegex:\n          path: data[\"config.yaml\"]\n          pattern: '\\^kapp\\\\\\.k14s\\\\\\.io/original\\.\\*'\n\n  # Test exclude label keys regex\n  - it: should include excludeLabelKeysRegex when set\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      config.excludeLabelKeysRegex:\n        - \"^helm\\\\.sh/.*\"\n    asserts:\n      - matchRegex:\n          path: data[\"config.yaml\"]\n          pattern: 'exclude-label-keys-regex:'\n      - matchRegex:\n          path: data[\"config.yaml\"]\n          pattern: '\\^helm\\\\\\.sh/\\.\\*'\n\n  # Test data-gatherers are present\n  - it: should include all data-gatherers\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n    asserts:\n      - matchRegex:\n          path: data[\"config.yaml\"]\n          pattern: 'kind: k8s-discovery'\n      - matchRegex:\n          path: data[\"config.yaml\"]\n          pattern: 'name: k8s/secrets'\n      - matchRegex:\n          path: data[\"config.yaml\"]\n          pattern: 'name: k8s/jobs'\n      - matchRegex:\n          path: data[\"config.yaml\"]\n          pattern: 'name: k8s/deployments'\n"
  },
  {
    "path": "deploy/charts/discovery-agent/tests/deployment_test.yaml",
    "content": "suite: test deployment\ntemplates:\n  - deployment.yaml\n\ntests:\n  # Test that tsgID is rendered correctly as a string\n  - it: tsgID is rendered as string in deployment args\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"987654321\"\n    template: deployment.yaml\n    asserts:\n      - isKind:\n          of: Deployment\n      - contains:\n          path: spec.template.spec.containers[0].args\n          content: --tsg-id\n      - contains:\n          path: spec.template.spec.containers[0].args\n          content: \"987654321\"\n\n  # Test that tsgID preserves leading zeros (only possible with string type)\n  # NB: TSG IDs are defined to start with \"1\", but this test is defence in depth\n  - it: tsgID preserves leading zeros when provided as string\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"0001234\"\n    template: deployment.yaml\n    asserts:\n      - isKind:\n          of: Deployment\n      - contains:\n          path: spec.template.spec.containers[0].args\n          content: --tsg-id\n      - contains:\n          path: spec.template.spec.containers[0].args\n          content: \"0001234\"\n\n  # Test basic deployment rendering with all required values\n  - it: deployment templates correctly with required values\n    set:\n      config.clusterName: my-test-cluster\n      config.tsgID: \"123456\"\n    template: deployment.yaml\n    asserts:\n      - isKind:\n          of: Deployment\n      - matchRegex:\n          path: metadata.name\n          pattern: ^.*-discovery-agent$\n\n  # Test replica count\n  - it: should set replica count correctly\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      replicaCount: 3\n    asserts:\n      - equal:\n          path: spec.replicas\n          value: 3\n\n  # Test security contexts\n  - it: should apply pod security context\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      podSecurityContext:\n        fsGroup: 2000\n    asserts:\n      - equal:\n          path: spec.template.spec.securityContext.fsGroup\n          value: 2000\n\n  - it: should apply container security context defaults\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n    asserts:\n      - equal:\n          path: spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem\n          value: true\n      - equal:\n          path: spec.template.spec.containers[0].securityContext.runAsNonRoot\n          value: true\n      - equal:\n          path: spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation\n          value: false\n\n  # Test resources\n  - it: should set resources when specified\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      resources:\n        limits:\n          cpu: 200m\n          memory: 256Mi\n        requests:\n          cpu: 100m\n          memory: 128Mi\n    asserts:\n      - equal:\n          path: spec.template.spec.containers[0].resources.limits.cpu\n          value: 200m\n      - equal:\n          path: spec.template.spec.containers[0].resources.requests.memory\n          value: 128Mi\n\n  # Test environment variables\n  - it: should set HTTP_PROXY environment variable\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      http_proxy: \"http://proxy:8080\"\n    asserts:\n      - contains:\n          path: spec.template.spec.containers[0].env\n          content:\n            name: HTTP_PROXY\n            value: \"http://proxy:8080\"\n\n  - it: should set HTTPS_PROXY environment variable\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      https_proxy: \"https://proxy:8443\"\n    asserts:\n      - contains:\n          path: spec.template.spec.containers[0].env\n          content:\n            name: HTTPS_PROXY\n            value: \"https://proxy:8443\"\n\n  - it: should set NO_PROXY environment variable\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      no_proxy: \"127.0.0.1,localhost\"\n    asserts:\n      - contains:\n          path: spec.template.spec.containers[0].env\n          content:\n            name: NO_PROXY\n            value: \"127.0.0.1,localhost\"\n\n  # Test command line arguments\n  - it: should include metrics flag when enabled\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      metrics.enabled: true\n    asserts:\n      - contains:\n          path: spec.template.spec.containers[0].args\n          content: --enable-metrics\n\n  - it: should include pprof flag when enabled\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      pprof.enabled: true\n    asserts:\n      - contains:\n          path: spec.template.spec.containers[0].args\n          content: --enable-pprof\n\n  - it: should include custom server URL when set\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      config.serverURL: \"https://custom.example.com\"\n    asserts:\n      - contains:\n          path: spec.template.spec.containers[0].args\n          content: --ngts-server-url\n      - contains:\n          path: spec.template.spec.containers[0].args\n          content: \"https://custom.example.com\"\n\n  - it: should include client ID when set\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      config.clientID: \"test-client-id\"\n    asserts:\n      - contains:\n          path: spec.template.spec.containers[0].args\n          content: --client-id\n      - contains:\n          path: spec.template.spec.containers[0].args\n          content: test-client-id\n\n  - it: should include client ID when clientId is set (lowercase d)\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      config.clientId: \"test-client-id-lowercase\"\n    asserts:\n      - contains:\n          path: spec.template.spec.containers[0].args\n          content: --client-id\n      - contains:\n          path: spec.template.spec.containers[0].args\n          content: test-client-id-lowercase\n\n  - it: should prefer clientID over clientId when both are set\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      config.clientID: \"uppercase-takes-precedence\"\n      config.clientId: \"lowercase-ignored\"\n    asserts:\n      - contains:\n          path: spec.template.spec.containers[0].args\n          content: --client-id\n      - contains:\n          path: spec.template.spec.containers[0].args\n          content: uppercase-takes-precedence\n\n  - it: should include extra args\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      extraArgs:\n        - --log-level=6\n        - --custom-flag=value\n    asserts:\n      - contains:\n          path: spec.template.spec.containers[0].args\n          content: \"--log-level=6\"\n      - contains:\n          path: spec.template.spec.containers[0].args\n          content: \"--custom-flag=value\"\n\n  # Test volumes and volume mounts\n  - it: should mount config and credentials volumes\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n    asserts:\n      - contains:\n          path: spec.template.spec.containers[0].volumeMounts\n          content:\n            name: config\n            mountPath: \"/etc/discovery-agent\"\n            readOnly: true\n      - contains:\n          path: spec.template.spec.containers[0].volumeMounts\n          content:\n            name: credentials\n            mountPath: \"/etc/discovery-agent/credentials\"\n            readOnly: true\n      - contains:\n          path: spec.template.spec.volumes\n          content:\n            name: config\n            configMap:\n              name: RELEASE-NAME-discovery-agent-config\n              optional: false\n\n  - it: should use custom secret name\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      config.secretName: custom-secret\n    asserts:\n      - contains:\n          path: spec.template.spec.volumes\n          content:\n            name: credentials\n            secret:\n              secretName: custom-secret\n              optional: false\n\n  # Test pod annotations and labels\n  - it: should apply pod annotations\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      podAnnotations:\n        annotation-key: annotation-value\n    asserts:\n      - equal:\n          path: spec.template.metadata.annotations.annotation-key\n          value: annotation-value\n\n  - it: should apply pod labels\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      podLabels:\n        custom-label: label-value\n    asserts:\n      - equal:\n          path: spec.template.metadata.labels.custom-label\n          value: label-value\n\n  # Test node selector, tolerations, and affinity\n  - it: should apply node selector\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      nodeSelector:\n        disktype: ssd\n    asserts:\n      - equal:\n          path: spec.template.spec.nodeSelector.disktype\n          value: ssd\n\n  - it: should apply tolerations\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      tolerations:\n        - key: \"key1\"\n          operator: \"Equal\"\n          value: \"value1\"\n          effect: \"NoSchedule\"\n    asserts:\n      - contains:\n          path: spec.template.spec.tolerations\n          content:\n            key: \"key1\"\n            operator: \"Equal\"\n            value: \"value1\"\n            effect: \"NoSchedule\"\n\n  - it: should apply affinity\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      affinity:\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n              - matchExpressions:\n                  - key: kubernetes.io/hostname\n                    operator: In\n                    values:\n                      - node1\n    asserts:\n      - isNotEmpty:\n          path: spec.template.spec.affinity.nodeAffinity\n\n  # Test image pull secrets\n  - it: should apply image pull secrets\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      imagePullSecrets:\n        - name: my-secret\n    asserts:\n      - contains:\n          path: spec.template.spec.imagePullSecrets\n          content:\n            name: my-secret\n"
  },
  {
    "path": "deploy/charts/discovery-agent/tests/poddisruptionbudget_test.yaml",
    "content": "suite: test poddisruptionbudget\ntemplates:\n  - poddisruptionbudget.yaml\n\ntests:\n  # Test PodDisruptionBudget is not created by default\n  - it: should not create PodDisruptionBudget when disabled\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      podDisruptionBudget.enabled: false\n    asserts:\n      - hasDocuments:\n          count: 0\n\n  # Test PodDisruptionBudget is created when enabled\n  - it: should create PodDisruptionBudget when enabled\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      podDisruptionBudget.enabled: true\n    asserts:\n      - isKind:\n          of: PodDisruptionBudget\n      - equal:\n          path: metadata.name\n          value: RELEASE-NAME-discovery-agent\n\n  # Test default minAvailable when neither minAvailable nor maxUnavailable is set\n  - it: should set default minAvailable when no disruption values are set\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      podDisruptionBudget.enabled: true\n    asserts:\n      - equal:\n          path: spec.minAvailable\n          value: 1\n      - isNull:\n          path: spec.maxUnavailable\n\n  # Test custom minAvailable\n  - it: should set custom minAvailable\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      podDisruptionBudget.enabled: true\n      podDisruptionBudget.minAvailable: 2\n    asserts:\n      - equal:\n          path: spec.minAvailable\n          value: 2\n      - isNull:\n          path: spec.maxUnavailable\n\n  # Test minAvailable as percentage\n  - it: should set minAvailable as percentage\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      podDisruptionBudget.enabled: true\n      podDisruptionBudget.minAvailable: \"50%\"\n    asserts:\n      - equal:\n          path: spec.minAvailable\n          value: \"50%\"\n\n  # Test custom maxUnavailable\n  - it: should set custom maxUnavailable\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      podDisruptionBudget.enabled: true\n      podDisruptionBudget.maxUnavailable: 1\n    asserts:\n      - equal:\n          path: spec.maxUnavailable\n          value: 1\n      - isNull:\n          path: spec.minAvailable\n\n  # Test maxUnavailable as percentage\n  - it: should set maxUnavailable as percentage\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      podDisruptionBudget.enabled: true\n      podDisruptionBudget.maxUnavailable: \"25%\"\n    asserts:\n      - equal:\n          path: spec.maxUnavailable\n          value: \"25%\"\n\n  # Test selector labels\n  - it: should use correct selector labels\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      podDisruptionBudget.enabled: true\n    asserts:\n      - isNotEmpty:\n          path: spec.selector.matchLabels\n"
  },
  {
    "path": "deploy/charts/discovery-agent/tests/podmonitor_test.yaml",
    "content": "suite: test podmonitor\ntemplates:\n  - podmonitor.yaml\n\ntests:\n  # Test PodMonitor is not created by default\n  - it: should not create PodMonitor when metrics.podmonitor.enabled is false\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      metrics.enabled: true\n      metrics.podmonitor.enabled: false\n    asserts:\n      - hasDocuments:\n          count: 0\n\n  # Test PodMonitor is not created when metrics are disabled\n  - it: should not create PodMonitor when metrics.enabled is false\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      metrics.enabled: false\n      metrics.podmonitor.enabled: true\n    asserts:\n      - hasDocuments:\n          count: 0\n\n  # Test PodMonitor is created when both metrics and podmonitor are enabled\n  - it: should create PodMonitor when both metrics and podmonitor are enabled\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      metrics.enabled: true\n      metrics.podmonitor.enabled: true\n    asserts:\n      - isKind:\n          of: PodMonitor\n      - equal:\n          path: metadata.name\n          value: RELEASE-NAME-discovery-agent\n\n  # Test PodMonitor namespace defaults to Release namespace\n  - it: should use Release namespace by default\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      metrics.enabled: true\n      metrics.podmonitor.enabled: true\n    release:\n      namespace: my-namespace\n    asserts:\n      - equal:\n          path: metadata.namespace\n          value: my-namespace\n\n  # Test custom PodMonitor namespace\n  - it: should use custom namespace when specified\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      metrics.enabled: true\n      metrics.podmonitor.enabled: true\n      metrics.podmonitor.namespace: monitoring\n    release:\n      namespace: default\n    asserts:\n      - equal:\n          path: metadata.namespace\n          value: monitoring\n      - contains:\n          path: spec.namespaceSelector.matchNames\n          content: default\n\n  # Test prometheus instance label\n  - it: should set prometheus instance label\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      metrics.enabled: true\n      metrics.podmonitor.enabled: true\n      metrics.podmonitor.prometheusInstance: custom-prometheus\n    asserts:\n      - equal:\n          path: metadata.labels.prometheus\n          value: custom-prometheus\n\n  # Test custom labels\n  - it: should apply custom labels\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      metrics.enabled: true\n      metrics.podmonitor.enabled: true\n      metrics.podmonitor.labels:\n        custom-label: custom-value\n        another-label: another-value\n    asserts:\n      - equal:\n          path: metadata.labels.custom-label\n          value: custom-value\n      - equal:\n          path: metadata.labels.another-label\n          value: another-value\n\n  # Test custom annotations\n  - it: should apply custom annotations\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      metrics.enabled: true\n      metrics.podmonitor.enabled: true\n      metrics.podmonitor.annotations:\n        custom-annotation: custom-value\n    asserts:\n      - equal:\n          path: metadata.annotations.custom-annotation\n          value: custom-value\n\n  # Test scrape configuration\n  - it: should configure scrape interval and timeout\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      metrics.enabled: true\n      metrics.podmonitor.enabled: true\n      metrics.podmonitor.interval: 30s\n      metrics.podmonitor.scrapeTimeout: 15s\n    asserts:\n      - equal:\n          path: spec.podMetricsEndpoints[0].interval\n          value: 30s\n      - equal:\n          path: spec.podMetricsEndpoints[0].scrapeTimeout\n          value: 15s\n\n  # Test honorLabels setting\n  - it: should set honorLabels correctly\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      metrics.enabled: true\n      metrics.podmonitor.enabled: true\n      metrics.podmonitor.honorLabels: true\n    asserts:\n      - equal:\n          path: spec.podMetricsEndpoints[0].honorLabels\n          value: true\n\n  # Test metrics endpoint configuration\n  - it: should configure metrics endpoint correctly\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      metrics.enabled: true\n      metrics.podmonitor.enabled: true\n    asserts:\n      - equal:\n          path: spec.podMetricsEndpoints[0].port\n          value: agent-api\n      - equal:\n          path: spec.podMetricsEndpoints[0].path\n          value: /metrics\n"
  },
  {
    "path": "deploy/charts/discovery-agent/tests/rbac_test.yaml",
    "content": "suite: test rbac\ntemplates:\n  - rbac.yaml\n\ntests:\n  # Test that all RBAC resources are created\n  - it: should create all RBAC resources\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n    asserts:\n      - hasDocuments:\n          count: 8\n\n  # Test Role for event emission\n  - it: should create Role for event emission\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n    documentIndex: 0\n    asserts:\n      - isKind:\n          of: Role\n      - equal:\n          path: metadata.name\n          value: RELEASE-NAME-discovery-agent-event-emitted\n      - contains:\n          path: rules\n          content:\n            apiGroups: [\"\"]\n            resources: [\"events\"]\n            verbs: [\"create\"]\n\n  # Test RoleBinding for event emission\n  - it: should create RoleBinding for event emission\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n    documentIndex: 1\n    asserts:\n      - isKind:\n          of: RoleBinding\n      - equal:\n          path: metadata.name\n          value: RELEASE-NAME-discovery-agent-event-emitted\n      - equal:\n          path: roleRef.kind\n          value: Role\n      - equal:\n          path: roleRef.name\n          value: RELEASE-NAME-discovery-agent-event-emitted\n      - contains:\n          path: subjects\n          content:\n            kind: ServiceAccount\n            name: RELEASE-NAME-discovery-agent\n            namespace: NAMESPACE\n\n  # Test ClusterRoleBinding for cluster viewer\n  - it: should create ClusterRoleBinding for cluster viewer\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n    documentIndex: 2\n    asserts:\n      - isKind:\n          of: ClusterRoleBinding\n      - equal:\n          path: metadata.name\n          value: RELEASE-NAME-discovery-agent-cluster-viewer\n      - equal:\n          path: roleRef.kind\n          value: ClusterRole\n      - equal:\n          path: roleRef.name\n          value: view\n      - contains:\n          path: subjects\n          content:\n            kind: ServiceAccount\n            name: RELEASE-NAME-discovery-agent\n            namespace: NAMESPACE\n\n  # Test ClusterRole for secret reader\n  - it: should create ClusterRole for secret reader\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n    documentIndex: 3\n    asserts:\n      - isKind:\n          of: ClusterRole\n      - equal:\n          path: metadata.name\n          value: RELEASE-NAME-discovery-agent-secret-reader\n      - contains:\n          path: rules\n          content:\n            apiGroups: [\"\"]\n            resources: [\"secrets\"]\n            verbs: [\"get\", \"list\", \"watch\"]\n\n  # Test ClusterRoleBinding for secret reader\n  - it: should create ClusterRoleBinding for secret reader\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n    documentIndex: 4\n    asserts:\n      - isKind:\n          of: ClusterRoleBinding\n      - equal:\n          path: metadata.name\n          value: RELEASE-NAME-discovery-agent-secret-reader\n      - equal:\n          path: roleRef.kind\n          value: ClusterRole\n      - equal:\n          path: roleRef.name\n          value: RELEASE-NAME-discovery-agent-secret-reader\n\n  # Test ClusterRole for RBAC reader\n  - it: should create ClusterRole for RBAC reader\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n    documentIndex: 5\n    asserts:\n      - isKind:\n          of: ClusterRole\n      - equal:\n          path: metadata.name\n          value: RELEASE-NAME-discovery-agent-rbac-reader\n      - contains:\n          path: rules[0].resources\n          content: roles\n      - contains:\n          path: rules[0].resources\n          content: clusterroles\n      - contains:\n          path: rules[0].resources\n          content: rolebindings\n      - contains:\n          path: rules[0].resources\n          content: clusterrolebindings\n\n  # Test ClusterRoleBinding for RBAC reader\n  - it: should create ClusterRoleBinding for RBAC reader\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n    documentIndex: 6\n    asserts:\n      - isKind:\n          of: ClusterRoleBinding\n      - equal:\n          path: metadata.name\n          value: RELEASE-NAME-discovery-agent-rbac-reader\n      - equal:\n          path: roleRef.kind\n          value: ClusterRole\n      - equal:\n          path: roleRef.name\n          value: RELEASE-NAME-discovery-agent-rbac-reader\n\n  # Test ClusterRoleBinding for OIDC discovery\n  - it: should create ClusterRoleBinding for OIDC discovery\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n    documentIndex: 7\n    asserts:\n      - isKind:\n          of: ClusterRoleBinding\n      - equal:\n          path: metadata.name\n          value: RELEASE-NAME-discovery-agent-oidc-discovery\n      - equal:\n          path: roleRef.kind\n          value: ClusterRole\n      - equal:\n          path: roleRef.name\n          value: system:service-account-issuer-discovery\n"
  },
  {
    "path": "deploy/charts/discovery-agent/tests/serviceaccount_test.yaml",
    "content": "suite: test serviceaccount\ntemplates:\n  - serviceaccount.yaml\n\ntests:\n  # Test ServiceAccount is created by default\n  - it: should create ServiceAccount when serviceAccount.create is true\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      serviceAccount.create: true\n    asserts:\n      - isKind:\n          of: ServiceAccount\n      - equal:\n          path: metadata.name\n          value: RELEASE-NAME-discovery-agent\n\n  # Test ServiceAccount is not created when disabled\n  - it: should not create ServiceAccount when serviceAccount.create is false\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      serviceAccount.create: false\n    asserts:\n      - hasDocuments:\n          count: 0\n\n  # Test custom ServiceAccount name\n  - it: should use custom name when serviceAccount.name is set\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      serviceAccount.create: true\n      serviceAccount.name: custom-sa-name\n    asserts:\n      - equal:\n          path: metadata.name\n          value: custom-sa-name\n\n  # Test automountServiceAccountToken setting\n  - it: should set automountServiceAccountToken correctly\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      serviceAccount.create: true\n      serviceAccount.automount: false\n    asserts:\n      - equal:\n          path: automountServiceAccountToken\n          value: false\n\n  - it: should enable automountServiceAccountToken by default\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      serviceAccount.create: true\n    asserts:\n      - equal:\n          path: automountServiceAccountToken\n          value: true\n\n  # Test ServiceAccount annotations\n  - it: should apply annotations to ServiceAccount\n    set:\n      config.clusterName: test-cluster\n      config.tsgID: \"123456\"\n      serviceAccount.create: true\n      serviceAccount.annotations:\n        eks.amazonaws.com/role-arn: arn:aws:iam::123456789012:role/my-role\n        custom-annotation: custom-value\n    asserts:\n      - equal:\n          path: metadata.annotations[\"eks.amazonaws.com/role-arn\"]\n          value: arn:aws:iam::123456789012:role/my-role\n      - equal:\n          path: metadata.annotations.custom-annotation\n          value: custom-value\n"
  },
  {
    "path": "deploy/charts/discovery-agent/values.linter.exceptions",
    "content": ""
  },
  {
    "path": "deploy/charts/discovery-agent/values.schema.json",
    "content": "{\n  \"$defs\": {\n    \"helm-values\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"affinity\": {\n          \"$ref\": \"#/$defs/helm-values.affinity\"\n        },\n        \"config\": {\n          \"$ref\": \"#/$defs/helm-values.config\"\n        },\n        \"extraArgs\": {\n          \"$ref\": \"#/$defs/helm-values.extraArgs\"\n        },\n        \"fullnameOverride\": {\n          \"$ref\": \"#/$defs/helm-values.fullnameOverride\"\n        },\n        \"global\": {\n          \"$ref\": \"#/$defs/helm-values.global\"\n        },\n        \"http_proxy\": {\n          \"$ref\": \"#/$defs/helm-values.http_proxy\"\n        },\n        \"https_proxy\": {\n          \"$ref\": \"#/$defs/helm-values.https_proxy\"\n        },\n        \"image\": {\n          \"$ref\": \"#/$defs/helm-values.image\"\n        },\n        \"imageNamespace\": {\n          \"$ref\": \"#/$defs/helm-values.imageNamespace\"\n        },\n        \"imagePullSecrets\": {\n          \"$ref\": \"#/$defs/helm-values.imagePullSecrets\"\n        },\n        \"imageRegistry\": {\n          \"$ref\": \"#/$defs/helm-values.imageRegistry\"\n        },\n        \"metrics\": {\n          \"$ref\": \"#/$defs/helm-values.metrics\"\n        },\n        \"nameOverride\": {\n          \"$ref\": \"#/$defs/helm-values.nameOverride\"\n        },\n        \"no_proxy\": {\n          \"$ref\": \"#/$defs/helm-values.no_proxy\"\n        },\n        \"nodeSelector\": {\n          \"$ref\": \"#/$defs/helm-values.nodeSelector\"\n        },\n        \"podAnnotations\": {\n          \"$ref\": \"#/$defs/helm-values.podAnnotations\"\n        },\n        \"podDisruptionBudget\": {\n          \"$ref\": \"#/$defs/helm-values.podDisruptionBudget\"\n        },\n        \"podLabels\": {\n          \"$ref\": \"#/$defs/helm-values.podLabels\"\n        },\n        \"podSecurityContext\": {\n          \"$ref\": \"#/$defs/helm-values.podSecurityContext\"\n        },\n        \"pprof\": {\n          \"$ref\": \"#/$defs/helm-values.pprof\"\n        },\n        \"replicaCount\": {\n          \"$ref\": \"#/$defs/helm-values.replicaCount\"\n        },\n        \"resources\": {\n          \"$ref\": \"#/$defs/helm-values.resources\"\n        },\n        \"securityContext\": {\n          \"$ref\": \"#/$defs/helm-values.securityContext\"\n        },\n        \"serviceAccount\": {\n          \"$ref\": \"#/$defs/helm-values.serviceAccount\"\n        },\n        \"tolerations\": {\n          \"$ref\": \"#/$defs/helm-values.tolerations\"\n        },\n        \"volumeMounts\": {\n          \"$ref\": \"#/$defs/helm-values.volumeMounts\"\n        },\n        \"volumes\": {\n          \"$ref\": \"#/$defs/helm-values.volumes\"\n        }\n      },\n      \"type\": \"object\"\n    },\n    \"helm-values.affinity\": {\n      \"default\": {},\n      \"type\": \"object\"\n    },\n    \"helm-values.config\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"claimableCerts\": {\n          \"$ref\": \"#/$defs/helm-values.config.claimableCerts\"\n        },\n        \"clientID\": {\n          \"$ref\": \"#/$defs/helm-values.config.clientID\"\n        },\n        \"clientId\": {\n          \"$ref\": \"#/$defs/helm-values.config.clientId\"\n        },\n        \"clusterDescription\": {\n          \"$ref\": \"#/$defs/helm-values.config.clusterDescription\"\n        },\n        \"clusterName\": {\n          \"$ref\": \"#/$defs/helm-values.config.clusterName\"\n        },\n        \"excludeAnnotationKeysRegex\": {\n          \"$ref\": \"#/$defs/helm-values.config.excludeAnnotationKeysRegex\"\n        },\n        \"excludeLabelKeysRegex\": {\n          \"$ref\": \"#/$defs/helm-values.config.excludeLabelKeysRegex\"\n        },\n        \"period\": {\n          \"$ref\": \"#/$defs/helm-values.config.period\"\n        },\n        \"secretName\": {\n          \"$ref\": \"#/$defs/helm-values.config.secretName\"\n        },\n        \"serverURL\": {\n          \"$ref\": \"#/$defs/helm-values.config.serverURL\"\n        },\n        \"tsgID\": {\n          \"$ref\": \"#/$defs/helm-values.config.tsgID\"\n        }\n      },\n      \"type\": \"object\"\n    },\n    \"helm-values.config.claimableCerts\": {\n      \"default\": false,\n      \"description\": \"Whether discovered certs can be claimed by other tenants (optional). true = certs are left unassigned, available for any tenant to claim. false (default) = certs are owned by this cluster's tenant.\",\n      \"type\": \"boolean\"\n    },\n    \"helm-values.config.clientID\": {\n      \"default\": \"\",\n      \"description\": \"Deprecated: Client ID for the configured service account. The client ID should be provided in the \\\"clientID\\\" field of the authentication secret (see config.secretName). This field is provided for compatibility for users migrating from the \\\"venafi-kubernetes-agent\\\" chart.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.config.clientId\": {\n      \"default\": \"\",\n      \"description\": \"Deprecated: Client ID for the configured service account (alternative to clientID). The client ID should be provided in the \\\"clientID\\\" field of the authentication secret (see config.secretName). This field is provided for compatibility for users migrating from the \\\"venafi-kubernetes-agent\\\" chart. If both clientID and clientId are set, clientID takes precedence.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.config.clusterDescription\": {\n      \"default\": \"\",\n      \"description\": \"A short description of the cluster where the agent is deployed (optional).\\n\\nThis description will be associated with the data that the agent uploads to the backend.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.config.clusterName\": {\n      \"default\": \"\",\n      \"description\": \"Required: A human readable name for the cluster into which the agent is being deployed.\\n\\nThis cluster name will be associated with the data that the agent uploads to the backend.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.config.excludeAnnotationKeysRegex\": {\n      \"default\": [],\n      \"description\": \"You can configure the agent to exclude some annotations or labels from being pushed. All Kubernetes objects are affected. The objects are still pushed, but the specified annotations and labels are removed before being pushed.\\n\\nDots is the only character that needs to be escaped in the regex. Use either double quotes with escaped single quotes or unquoted strings for the regex to avoid YAML parsing issues with `\\\\.`.\\n\\nExample: excludeAnnotationKeysRegex: ['^kapp\\\\.k14s\\\\.io/original.*']\",\n      \"items\": {},\n      \"type\": \"array\"\n    },\n    \"helm-values.config.excludeLabelKeysRegex\": {\n      \"default\": [],\n      \"items\": {},\n      \"type\": \"array\"\n    },\n    \"helm-values.config.period\": {\n      \"default\": \"0h1m0s\",\n      \"description\": \"How often to push data to the remote server\",\n      \"type\": \"string\"\n    },\n    \"helm-values.config.secretName\": {\n      \"default\": \"discovery-agent-credentials\",\n      \"description\": \"The name of the Secret containing the NGTS built-in service account credentials.\\nThe Secret must contain the following key:\\n- privatekey.pem: PEM-encoded private key for the service account\\nThe Secret should also contain the following key:\\n- clientID:       Service account client ID (config.clientID must be set if not present)\",\n      \"type\": \"string\"\n    },\n    \"helm-values.config.serverURL\": {\n      \"default\": \"\",\n      \"description\": \"Explicit SCM server URL (optional).\\nIf not set, a production SCM server URL will be created based on the TSG ID. This value is intended for development purposes only and should not be set in production.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.config.tsgID\": {\n      \"default\": \"\",\n      \"description\": \"Required: The TSG (Tenant Service Group) ID to use when connecting to SCM. NB: TSG IDs are numeric, but must be treated as strings to avoid issues with YAML data types. With the Helm CLI use `--set-string`; with YAML always pass TSG IDs in double quotes.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.extraArgs\": {\n      \"default\": [],\n      \"description\": \"extraArgs:\\n- --logging-format=json\\n- --log-level=6 # To enable HTTP request logging\",\n      \"items\": {},\n      \"type\": \"array\"\n    },\n    \"helm-values.fullnameOverride\": {\n      \"default\": \"\",\n      \"type\": \"string\"\n    },\n    \"helm-values.global\": {\n      \"description\": \"Global values shared across all (sub)charts\"\n    },\n    \"helm-values.http_proxy\": {\n      \"description\": \"Configures the HTTP_PROXY environment variable where a HTTP proxy is required.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.https_proxy\": {\n      \"description\": \"Configures the HTTPS_PROXY environment variable where a HTTP proxy is required.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.image\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"digest\": {\n          \"$ref\": \"#/$defs/helm-values.image.digest\"\n        },\n        \"name\": {\n          \"$ref\": \"#/$defs/helm-values.image.name\"\n        },\n        \"pullPolicy\": {\n          \"$ref\": \"#/$defs/helm-values.image.pullPolicy\"\n        },\n        \"repository\": {\n          \"$ref\": \"#/$defs/helm-values.image.repository\"\n        },\n        \"tag\": {\n          \"$ref\": \"#/$defs/helm-values.image.tag\"\n        }\n      },\n      \"type\": \"object\"\n    },\n    \"helm-values.image.digest\": {\n      \"default\": \"\",\n      \"description\": \"Override the image digest to deploy by setting this variable. If set together with `image.tag`, the rendered image will include both tag and digest.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.image.name\": {\n      \"default\": \"discovery-agent\",\n      \"description\": \"The image name for the Discovery Agent.\\nThis is used (together with `imageRegistry` and `imageNamespace`) to construct the full image reference.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.image.pullPolicy\": {\n      \"default\": \"IfNotPresent\",\n      \"description\": \"This sets the pull policy for images.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.image.repository\": {\n      \"default\": \"\",\n      \"description\": \"Full repository override (takes precedence over `imageRegistry`, `imageNamespace`, and `image.name`).\\nExample: quay.io/jetstack/discovery-agent\",\n      \"type\": \"string\"\n    },\n    \"helm-values.image.tag\": {\n      \"default\": \"\",\n      \"description\": \"Override the image tag to deploy by setting this variable. If no value is set, the chart's appVersion is used.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.imageNamespace\": {\n      \"default\": \"jetstack\",\n      \"description\": \"The repository namespace used for discovery-agent images by default.\\nExamples:\\n- jetstack\\n- custom-namespace\",\n      \"type\": \"string\"\n    },\n    \"helm-values.imagePullSecrets\": {\n      \"default\": [],\n      \"description\": \"This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/\",\n      \"items\": {},\n      \"type\": \"array\"\n    },\n    \"helm-values.imageRegistry\": {\n      \"default\": \"quay.io\",\n      \"description\": \"The container registry used for discovery-agent images by default. This can include path prefixes (e.g. \\\"artifactory.example.com/docker\\\").\",\n      \"type\": \"string\"\n    },\n    \"helm-values.metrics\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"enabled\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.enabled\"\n        },\n        \"podmonitor\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor\"\n        }\n      },\n      \"type\": \"object\"\n    },\n    \"helm-values.metrics.enabled\": {\n      \"default\": true,\n      \"description\": \"Enable the metrics server.\\nIf false, the metrics server will be disabled and the other metrics fields below will be ignored.\",\n      \"type\": \"boolean\"\n    },\n    \"helm-values.metrics.podmonitor\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"annotations\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor.annotations\"\n        },\n        \"enabled\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor.enabled\"\n        },\n        \"endpointAdditionalProperties\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor.endpointAdditionalProperties\"\n        },\n        \"honorLabels\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor.honorLabels\"\n        },\n        \"interval\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor.interval\"\n        },\n        \"labels\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor.labels\"\n        },\n        \"namespace\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor.namespace\"\n        },\n        \"prometheusInstance\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor.prometheusInstance\"\n        },\n        \"scrapeTimeout\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor.scrapeTimeout\"\n        }\n      },\n      \"type\": \"object\"\n    },\n    \"helm-values.metrics.podmonitor.annotations\": {\n      \"default\": {},\n      \"description\": \"Additional annotations to add to the PodMonitor.\",\n      \"type\": \"object\"\n    },\n    \"helm-values.metrics.podmonitor.enabled\": {\n      \"default\": false,\n      \"description\": \"Create a PodMonitor to add the metrics to Prometheus, if you are using Prometheus Operator. See https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor\",\n      \"type\": \"boolean\"\n    },\n    \"helm-values.metrics.podmonitor.endpointAdditionalProperties\": {\n      \"default\": {},\n      \"description\": \"EndpointAdditionalProperties allows setting additional properties on the endpoint such as relabelings, metricRelabelings etc.\\n\\nFor example:\\nendpointAdditionalProperties:\\n relabelings:\\n - action: replace\\n   sourceLabels:\\n   - __meta_kubernetes_pod_node_name\\n   targetLabel: instance\",\n      \"type\": \"object\"\n    },\n    \"helm-values.metrics.podmonitor.honorLabels\": {\n      \"default\": false,\n      \"description\": \"Keep labels from scraped data, overriding server-side labels.\",\n      \"type\": \"boolean\"\n    },\n    \"helm-values.metrics.podmonitor.interval\": {\n      \"default\": \"60s\",\n      \"description\": \"The interval to scrape metrics.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.metrics.podmonitor.labels\": {\n      \"default\": {},\n      \"description\": \"Additional labels to add to the PodMonitor.\",\n      \"type\": \"object\"\n    },\n    \"helm-values.metrics.podmonitor.namespace\": {\n      \"description\": \"The namespace that the pod monitor should live in.\\nDefaults to the discovery-agent namespace.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.metrics.podmonitor.prometheusInstance\": {\n      \"default\": \"default\",\n      \"description\": \"Specifies the `prometheus` label on the created PodMonitor. This is used when different Prometheus instances have label selectors matching different PodMonitors.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.metrics.podmonitor.scrapeTimeout\": {\n      \"default\": \"30s\",\n      \"description\": \"The timeout before a metrics scrape fails.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.nameOverride\": {\n      \"default\": \"\",\n      \"description\": \"This is to override the chart name.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.no_proxy\": {\n      \"description\": \"Configures the NO_PROXY environment variable where a HTTP proxy is required, but certain domains should be excluded.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.nodeSelector\": {\n      \"default\": {},\n      \"type\": \"object\"\n    },\n    \"helm-values.podAnnotations\": {\n      \"default\": {},\n      \"description\": \"This is for setting Kubernetes Annotations to a Pod. For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/\",\n      \"type\": \"object\"\n    },\n    \"helm-values.podDisruptionBudget\": {\n      \"default\": {\n        \"enabled\": false\n      },\n      \"description\": \"Configure a PodDisruptionBudget for the agent's Deployment. If running with multiple replicas, consider setting podDisruptionBudget.enabled to true.\",\n      \"type\": \"object\"\n    },\n    \"helm-values.podLabels\": {\n      \"default\": {},\n      \"description\": \"This is for setting Kubernetes Labels to a Pod.\\nFor more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/\",\n      \"type\": \"object\"\n    },\n    \"helm-values.podSecurityContext\": {\n      \"default\": {},\n      \"type\": \"object\"\n    },\n    \"helm-values.pprof\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"enabled\": {\n          \"$ref\": \"#/$defs/helm-values.pprof.enabled\"\n        }\n      },\n      \"type\": \"object\"\n    },\n    \"helm-values.pprof.enabled\": {\n      \"default\": false,\n      \"description\": \"Enable profiling with the pprof endpoint\",\n      \"type\": \"boolean\"\n    },\n    \"helm-values.replicaCount\": {\n      \"default\": 1,\n      \"description\": \"This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/\",\n      \"type\": \"number\"\n    },\n    \"helm-values.resources\": {\n      \"default\": {},\n      \"type\": \"object\"\n    },\n    \"helm-values.securityContext\": {\n      \"default\": {\n        \"allowPrivilegeEscalation\": false,\n        \"capabilities\": {\n          \"drop\": [\n            \"ALL\"\n          ]\n        },\n        \"readOnlyRootFilesystem\": true,\n        \"runAsNonRoot\": true,\n        \"seccompProfile\": {\n          \"type\": \"RuntimeDefault\"\n        }\n      },\n      \"description\": \"Add Container specific SecurityContext settings to the container. Takes precedence over `podSecurityContext` when set. See https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container\",\n      \"type\": \"object\"\n    },\n    \"helm-values.serviceAccount\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"annotations\": {\n          \"$ref\": \"#/$defs/helm-values.serviceAccount.annotations\"\n        },\n        \"automount\": {\n          \"$ref\": \"#/$defs/helm-values.serviceAccount.automount\"\n        },\n        \"create\": {\n          \"$ref\": \"#/$defs/helm-values.serviceAccount.create\"\n        },\n        \"name\": {\n          \"$ref\": \"#/$defs/helm-values.serviceAccount.name\"\n        }\n      },\n      \"type\": \"object\"\n    },\n    \"helm-values.serviceAccount.annotations\": {\n      \"default\": {},\n      \"description\": \"Annotations to add to the service account\",\n      \"type\": \"object\"\n    },\n    \"helm-values.serviceAccount.automount\": {\n      \"default\": true,\n      \"description\": \"Automatically mount a ServiceAccount's API credentials?\",\n      \"type\": \"boolean\"\n    },\n    \"helm-values.serviceAccount.create\": {\n      \"default\": true,\n      \"description\": \"Specifies whether a service account should be created\",\n      \"type\": \"boolean\"\n    },\n    \"helm-values.serviceAccount.name\": {\n      \"default\": \"\",\n      \"description\": \"The name of the service account to use.\\nIf not set and create is true, a name is generated using the fullname template\",\n      \"type\": \"string\"\n    },\n    \"helm-values.tolerations\": {\n      \"default\": [],\n      \"items\": {},\n      \"type\": \"array\"\n    },\n    \"helm-values.volumeMounts\": {\n      \"default\": [],\n      \"description\": \"Additional volumeMounts on the output Deployment definition.\",\n      \"items\": {},\n      \"type\": \"array\"\n    },\n    \"helm-values.volumes\": {\n      \"default\": [],\n      \"description\": \"Additional volumes on the output Deployment definition.\",\n      \"items\": {},\n      \"type\": \"array\"\n    }\n  },\n  \"$ref\": \"#/$defs/helm-values\",\n  \"$schema\": \"http://json-schema.org/draft-07/schema#\"\n}\n"
  },
  {
    "path": "deploy/charts/discovery-agent/values.yaml",
    "content": "# Configuration for the Discovery Agent\nconfig:\n  # Required: The TSG (Tenant Service Group) ID to use when connecting to SCM.\n  # NB: TSG IDs are numeric, but must be treated as strings to avoid issues with YAML data types.\n  # With the Helm CLI use `--set-string`; with YAML always pass TSG IDs in double quotes.\n  # +docs:property\n  # +docs:type=string\n  tsgID: \"\"\n\n  # Required: A human readable name for the cluster into which the agent is being deployed.\n  #\n  # This cluster name will be associated with the data that the agent uploads to the backend.\n  # +docs:property\n  clusterName: \"\"\n\n  # A short description of the cluster where the agent is deployed (optional).\n  #\n  # This description will be associated with the data that the agent uploads to the backend.\n  # +docs:property\n  clusterDescription: \"\"\n\n  # Whether discovered certs can be claimed by other tenants (optional).\n  # true = certs are left unassigned, available for any tenant to claim.\n  # false (default) = certs are owned by this cluster's tenant.\n  claimableCerts: false\n\n  # How often to push data to the remote server\n  # +docs:property\n  period: \"0h1m0s\"\n\n  # You can configure the agent to exclude some annotations or\n  # labels from being pushed. All Kubernetes objects\n  # are affected. The objects are still pushed, but the specified annotations\n  # and labels are removed before being pushed.\n  #\n  # Dots is the only character that needs to be escaped in the regex. Use either\n  # double quotes with escaped single quotes or unquoted strings for the regex\n  # to avoid YAML parsing issues with `\\.`.\n  #\n  # Example: excludeAnnotationKeysRegex: ['^kapp\\.k14s\\.io/original.*']\n  excludeAnnotationKeysRegex: []\n  excludeLabelKeysRegex: []\n\n  # Deprecated: Client ID for the configured service account.\n  # The client ID should be provided in the \"clientID\" field of the authentication secret (see config.secretName).\n  # This field is provided for compatibility for users migrating from the \"venafi-kubernetes-agent\" chart.\n  # +docs:property\n  clientID: \"\"\n\n  # Deprecated: Client ID for the configured service account (alternative to clientID).\n  # The client ID should be provided in the \"clientID\" field of the authentication secret (see config.secretName).\n  # This field is provided for compatibility for users migrating from the \"venafi-kubernetes-agent\" chart.\n  # If both clientID and clientId are set, clientID takes precedence.\n  # +docs:hidden\n  clientId: \"\"\n\n  # The name of the Secret containing the NGTS built-in service account credentials.\n  # The Secret must contain the following key:\n  # - privatekey.pem: PEM-encoded private key for the service account\n  # The Secret should also contain the following key:\n  # - clientID:       Service account client ID (config.clientID must be set if not present)\n  # +docs:property\n  secretName: discovery-agent-credentials\n\n  # Explicit SCM server URL (optional).\n  # If not set, a production SCM server URL will be created based on the TSG ID.\n  # This value is intended for development purposes only and should not be set in production.\n  # +docs:hidden\n  serverURL: \"\"\n\n# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/\nreplicaCount: 1\n\n# The container registry used for discovery-agent images by default.\n# This can include path prefixes (e.g. \"artifactory.example.com/docker\").\n# +docs:property\nimageRegistry: \"quay.io\"\n\n# The repository namespace used for discovery-agent images by default.\n# Examples:\n# - jetstack\n# - custom-namespace\n# +docs:property\nimageNamespace: \"jetstack\"\n\n# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/\nimage:\n  # Full repository override (takes precedence over `imageRegistry`, `imageNamespace`,\n  # and `image.name`).\n  # Example: quay.io/jetstack/discovery-agent\n  # +docs:property\n  repository: \"\"\n\n  # The image name for the Discovery Agent.\n  # This is used (together with `imageRegistry` and `imageNamespace`) to construct the full\n  # image reference.\n  # +docs:property\n  name: discovery-agent\n\n  # This sets the pull policy for images.\n  pullPolicy: IfNotPresent\n\n  # Override the image tag to deploy by setting this variable.\n  # If no value is set, the chart's appVersion is used.\n  tag: \"\"\n\n  # Override the image digest to deploy by setting this variable.\n  # If set together with `image.tag`, the rendered image will include both tag and digest.\n  digest: \"\"\n\n# This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/\nimagePullSecrets: []\n# This is to override the chart name.\nnameOverride: \"\"\nfullnameOverride: \"\"\n\n# This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/\nserviceAccount:\n  # Specifies whether a service account should be created\n  create: true\n  # Automatically mount a ServiceAccount's API credentials?\n  automount: true\n  # Annotations to add to the service account\n  annotations: {}\n  # The name of the service account to use.\n  # If not set and create is true, a name is generated using the fullname template\n  name: \"\"\n\n# This is for setting Kubernetes Annotations to a Pod.\n# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/\npodAnnotations: {}\n# This is for setting Kubernetes Labels to a Pod.\n# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/\npodLabels: {}\n\npodSecurityContext: {}\n  # fsGroup: 2000\n\n# Add Container specific SecurityContext settings to the container. Takes\n# precedence over `podSecurityContext` when set. See\n# https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container\n# +docs:property\nsecurityContext:\n  capabilities:\n    drop:\n      - ALL\n  readOnlyRootFilesystem: true\n  runAsNonRoot: true\n  allowPrivilegeEscalation: false\n  seccompProfile: { type: RuntimeDefault }\n\nresources: {}\n  # We usually recommend not to specify default resources and to leave this as a conscious\n  # choice for the user. This also increases chances charts run on environments with little\n  # resources, such as Minikube. If you do want to specify resources, uncomment the following\n  # lines, adjust them as necessary, and remove the curly braces after 'resources:'.\n  # limits:\n  #   cpu: 100m\n  #   memory: 128Mi\n  # requests:\n  #   cpu: 100m\n  #   memory: 128Mi\n\n# Additional volumes on the output Deployment definition.\nvolumes: []\n# - name: foo\n#   secret:\n#     secretName: mysecret\n#     optional: false\n\n# Additional volumeMounts on the output Deployment definition.\nvolumeMounts: []\n# - name: foo\n#   mountPath: \"/etc/foo\"\n#   readOnly: true\n\nnodeSelector: {}\n\ntolerations: []\n\naffinity: {}\n\n# Configures the HTTP_PROXY environment variable where a HTTP proxy is required.\n# +docs:property\n# http_proxy: \"http://proxy:8080\"\n\n# Configures the HTTPS_PROXY environment variable where a HTTP proxy is required.\n# +docs:property\n# https_proxy: \"https://proxy:8080\"\n\n# Configures the NO_PROXY environment variable where a HTTP proxy is required,\n# but certain domains should be excluded.\n# +docs:property\n# no_proxy: 127.0.0.1,localhost\n\n# Configure a PodDisruptionBudget for the agent's Deployment. If running with multiple\n# replicas, consider setting podDisruptionBudget.enabled to true.\n# +docs:property\npodDisruptionBudget:\n  # Enable or disable the PodDisruptionBudget resource, which helps prevent downtime\n  # during voluntary disruptions such as during a Node upgrade.\n  enabled: false\n\n  # Configure the minimum available pods for disruptions. Can either be set to\n  # an integer (e.g. 1) or a percentage value (e.g. 25%).\n  # Cannot be used if `maxUnavailable` is set.\n  # +docs:property\n  # minAvailable: 1\n\n  # Configure the maximum unavailable pods for disruptions. Can either be set to\n  # an integer (e.g. 1) or a percentage value (e.g. 25%).\n  # Cannot be used if `minAvailable` is set.\n  # +docs:property\n  # maxUnavailable: 1\n\n#  extraArgs:\n#  - --logging-format=json\n#  - --log-level=6 # To enable HTTP request logging\nextraArgs: []\n\npprof:\n  # Enable profiling with the pprof endpoint\n  enabled: false\n\nmetrics:\n  # Enable the metrics server.\n  # If false, the metrics server will be disabled and the other metrics fields below will be ignored.\n  enabled: true\n  podmonitor:\n    # Create a PodMonitor to add the metrics to Prometheus, if you are using Prometheus Operator.\n    # See https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor\n    enabled: false\n\n    # The namespace that the pod monitor should live in.\n    # Defaults to the discovery-agent namespace.\n    # +docs:property\n    # namespace: ngts\n\n    # Specifies the `prometheus` label on the created PodMonitor.\n    # This is used when different Prometheus instances have label selectors\n    # matching different PodMonitors.\n    prometheusInstance: default\n\n    # The interval to scrape metrics.\n    interval: 60s\n\n    # The timeout before a metrics scrape fails.\n    scrapeTimeout: 30s\n\n    # Additional labels to add to the PodMonitor.\n    labels: {}\n\n    # Additional annotations to add to the PodMonitor.\n    annotations: {}\n\n    # Keep labels from scraped data, overriding server-side labels.\n    honorLabels: false\n\n    # EndpointAdditionalProperties allows setting additional properties on the endpoint such as relabelings, metricRelabelings etc.\n    #\n    # For example:\n    #  endpointAdditionalProperties:\n    #   relabelings:\n    #   - action: replace\n    #     sourceLabels:\n    #     - __meta_kubernetes_pod_node_name\n    #     targetLabel: instance\n    endpointAdditionalProperties: {}\n"
  },
  {
    "path": "deploy/charts/venafi-kubernetes-agent/.helmignore",
    "content": "# Patterns to ignore when building packages.\n# This supports shell glob matching, relative path matching, and\n# negation (prefixed with !). Only one pattern per line.\n.DS_Store\n# Common VCS dirs\n.git/\n.gitignore\n.bzr/\n.bzrignore\n.hg/\n.hgignore\n.svn/\n# Common backup files\n*.swp\n*.bak\n*.tmp\n*.orig\n*~\n# Various IDEs\n.project\n.idea/\n*.tmproj\n.vscode/\n"
  },
  {
    "path": "deploy/charts/venafi-kubernetes-agent/Chart.yaml",
    "content": "apiVersion: v2\nname: venafi-kubernetes-agent\ntype: application\n\ndescription: |-\n  The Discovery Agent connects your Kubernetes or OpenShift cluster to the CyberArk Certificate Manager.\n\nmaintainers:\n  - name: CyberArk\n    email: mis.support@cyberark.com\n    url: https://www.cyberark.com\n\nsources:\n  - https://github.com/jetstack/jetstack-secure\n\n# These versions are meant to be overridden by `make helm-chart`. No `v` prefix\n# for the `version` because Helm doesn't support auto-determining the latest\n# version for OCI Helm charts that use a `v` prefix.\nversion: 0.0.0\nappVersion: \"v0.0.0\"\n"
  },
  {
    "path": "deploy/charts/venafi-kubernetes-agent/README.md",
    "content": "# venafi-kubernetes-agent\n\nThe Discovery Agent connects your Kubernetes or OpenShift cluster to the CyberArk Certificate Manager (formerly Venafi Control Plane).\nYou will require a CyberArk Certificate Manager account to connect your cluster.\nIf you do not have one, you can sign up for a free trial now at:\n\n- https://www.cyberark.com/try-buy/certificate-manager-saas-trial/\n\n> 📖 Read the [Discovery Agent documentation](https://docs.cyberark.com/mis-saas/vaas/k8s-components/c-tlspk-agent-overview/),\n> to learn how install and configure this Helm chart.\n\n## Values\n\n<!-- AUTO-GENERATED -->\n\n#### **metrics.enabled** ~ `bool`\n> Default value:\n> ```yaml\n> true\n> ```\n\nEnable the metrics server.  \nIf false, the metrics server will be disabled and the other metrics fields below will be ignored.\n#### **metrics.podmonitor.enabled** ~ `bool`\n> Default value:\n> ```yaml\n> false\n> ```\n\nCreate a PodMonitor to add the metrics to Prometheus, if you are using Prometheus Operator. See https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor\n#### **metrics.podmonitor.namespace** ~ `string`\n\nThe namespace that the pod monitor should live in. Defaults to the venafi-kubernetes-agent namespace.\n\n#### **metrics.podmonitor.prometheusInstance** ~ `string`\n> Default value:\n> ```yaml\n> default\n> ```\n\nSpecifies the `prometheus` label on the created PodMonitor. This is used when different Prometheus instances have label selectors matching different PodMonitors.\n#### **metrics.podmonitor.interval** ~ `string`\n> Default value:\n> ```yaml\n> 60s\n> ```\n\nThe interval to scrape metrics.\n#### **metrics.podmonitor.scrapeTimeout** ~ `string`\n> Default value:\n> ```yaml\n> 30s\n> ```\n\nThe timeout before a metrics scrape fails.\n#### **metrics.podmonitor.labels** ~ `object`\n> Default value:\n> ```yaml\n> {}\n> ```\n\nAdditional labels to add to the PodMonitor.\n#### **metrics.podmonitor.annotations** ~ `object`\n> Default value:\n> ```yaml\n> {}\n> ```\n\nAdditional annotations to add to the PodMonitor.\n#### **metrics.podmonitor.honorLabels** ~ `bool`\n> Default value:\n> ```yaml\n> false\n> ```\n\nKeep labels from scraped data, overriding server-side labels.\n#### **metrics.podmonitor.endpointAdditionalProperties** ~ `object`\n> Default value:\n> ```yaml\n> {}\n> ```\n\nEndpointAdditionalProperties allows setting additional properties on the endpoint such as relabelings, metricRelabelings etc.  \n  \nFor example:\n\n```yaml\nendpointAdditionalProperties:\n relabelings:\n - action: replace\n   sourceLabels:\n   - __meta_kubernetes_pod_node_name\n   targetLabel: instance\n```\n#### **replicaCount** ~ `number`\n> Default value:\n> ```yaml\n> 1\n> ```\n\ndefault replicas, do not scale up\n#### **imageRegistry** ~ `string`\n> Default value:\n> ```yaml\n> registry.venafi.cloud\n> ```\n\nThe container registry used for venafi-kubernetes-agent images by default. This can include path prefixes (e.g. \"artifactory.example.com/docker\").\n\n#### **imageNamespace** ~ `string`\n> Default value:\n> ```yaml\n> venafi-agent\n> ```\n\nThe repository namespace used for venafi-kubernetes-agent images by default.  \nExamples:  \n- venafi-agent  \n- custom-namespace\n\n#### **image.registry** ~ `string`\n\nDeprecated: per-component registry prefix.  \n  \nIf set, this value is *prepended* to the image repository that the chart would otherwise render. This applies both when `image.repository` is set and when the repository is computed from  \n`imageRegistry` + `imageNamespace` + `image.name`.  \n  \nThis can produce \"double registry\" style references such as  \n`legacy.example.io/registry.venafi.cloud/venafi-agent/...`. Prefer using the global  \n`imageRegistry`/`imageNamespace` values.\n\n#### **image.repository** ~ `string`\n> Default value:\n> ```yaml\n> \"\"\n> ```\n\nFull repository override (takes precedence over `imageRegistry`, `imageNamespace`, and `image.name`). Example: registry.venafi.cloud/venafi-agent/venafi-agent\n\n#### **image.name** ~ `string`\n> Default value:\n> ```yaml\n> venafi-agent\n> ```\n\nThe image name for the Discovery Agent.  \nThis is used (together with `imageRegistry` and `imageNamespace`) to construct the full image reference.\n\n#### **image.pullPolicy** ~ `string`\n> Default value:\n> ```yaml\n> IfNotPresent\n> ```\n\nKubernetes imagePullPolicy on Deployment.\n#### **image.tag** ~ `string`\n> Default value:\n> ```yaml\n> \"\"\n> ```\n\nOverride the image tag to deploy by setting this variable. If no value is set, the chart's appVersion is used.\n#### **image.digest** ~ `string`\n> Default value:\n> ```yaml\n> \"\"\n> ```\n\nOverride the image digest to deploy by setting this variable. If set together with `image.tag`, the rendered image will include both tag and digest.\n#### **imagePullSecrets** ~ `array`\n> Default value:\n> ```yaml\n> []\n> ```\n\nSpecify image pull credentials if using a private registry. Example:  \n - name: my-pull-secret\n#### **nameOverride** ~ `string`\n> Default value:\n> ```yaml\n> \"\"\n> ```\n\nHelm default setting to override release name, usually leave blank.\n#### **fullnameOverride** ~ `string`\n> Default value:\n> ```yaml\n> \"\"\n> ```\n\nHelm default setting, use this to shorten the full install name.\n#### **serviceAccount.create** ~ `bool`\n> Default value:\n> ```yaml\n> true\n> ```\n\nSpecifies whether a service account should be created.\n#### **serviceAccount.annotations** ~ `object`\n> Default value:\n> ```yaml\n> {}\n> ```\n\nAnnotations YAML to add to the service account.\n#### **serviceAccount.name** ~ `string`\n> Default value:\n> ```yaml\n> \"\"\n> ```\n\nThe name of the service account to use. If blank and `serviceAccount.create` is true, a name is generated using the fullname template of the release.\n#### **podAnnotations** ~ `object`\n> Default value:\n> ```yaml\n> {}\n> ```\n\nAdditional YAML annotations to add the the pod.\n#### **podSecurityContext** ~ `object`\n> Default value:\n> ```yaml\n> {}\n> ```\n\nOptional Pod (all containers) `SecurityContext` options, see https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod.  \n  \nExample:  \n  \n podSecurityContext\n\n```yaml\nrunAsUser: 1000\nrunAsGroup: 3000\nfsGroup: 2000\n```\n#### **http_proxy** ~ `string`\n\nConfigures the HTTP_PROXY environment variable where a HTTP proxy is required.\n\n#### **https_proxy** ~ `string`\n\nConfigures the HTTPS_PROXY environment variable where a HTTP proxy is required.\n\n#### **no_proxy** ~ `string`\n\nConfigures the NO_PROXY environment variable where a HTTP proxy is required, but certain domains should be excluded.\n\n#### **securityContext** ~ `object`\n> Default value:\n> ```yaml\n> allowPrivilegeEscalation: false\n> capabilities:\n>   drop:\n>     - ALL\n> readOnlyRootFilesystem: true\n> runAsNonRoot: true\n> seccompProfile:\n>   type: RuntimeDefault\n> ```\n\nAdd Container specific SecurityContext settings to the container. Takes precedence over `podSecurityContext` when set. See https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container\n\n#### **resources** ~ `object`\n> Default value:\n> ```yaml\n> limits:\n>   memory: 500Mi\n> requests:\n>   cpu: 200m\n>   memory: 200Mi\n> ```\n\nSet resource requests and limits for the pod.  \n  \nRead [Venafi Kubernetes components deployment best practices](https://docs.cyberark.com/mis-saas/vaas/k8s-components/c-k8s-components-best-practice/#scaling) to learn how to choose suitable CPU and memory resource requests and limits.\n\n#### **nodeSelector** ~ `object`\n> Default value:\n> ```yaml\n> {}\n> ```\n\nEmbed YAML for nodeSelector settings, see  \nhttps://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/\n#### **tolerations** ~ `array`\n> Default value:\n> ```yaml\n> []\n> ```\n\nEmbed YAML for toleration settings, see  \nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/\n#### **affinity** ~ `object`\n> Default value:\n> ```yaml\n> {}\n> ```\n\nEmbed YAML for Node affinity settings, see  \nhttps://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/.\n#### **command** ~ `array`\n> Default value:\n> ```yaml\n> []\n> ```\n\nSpecify the command to run overriding default binary.\n#### **extraArgs** ~ `array`\n> Default value:\n> ```yaml\n> []\n> ```\n\nSpecify additional arguments to pass to the agent binary. For example, to enable JSON logging use `--logging-format`, or to increase the logging verbosity use `--log-level`.  \nThe log levels are: 0=Info, 1=Debug, 2=Trace.  \nUse 6-9 for increasingly verbose HTTP request logging.  \nThe default log level is 0.  \n  \nExample:\n\n```yaml\nextraArgs:\n- --logging-format=json\n- --log-level=6 # To enable HTTP request logging\n```\n#### **volumes** ~ `array`\n> Default value:\n> ```yaml\n> []\n> ```\n\nAdditional volumes to add to the Discovery Agent container. This is useful for mounting a custom CA bundle. For example:\n\n```yaml\nvolumes:\n  - name: cabundle\n    configMap:\n      name: cabundle\n      optional: false\n      defaultMode: 0644\n```\n\nIn order to create the ConfigMap, you can use the following command:  \n  \n    kubectl create configmap cabundle \\  \n      --from-file=cabundle=./your/custom/ca/bundle.pem\n#### **volumeMounts** ~ `array`\n> Default value:\n> ```yaml\n> []\n> ```\n\nAdditional volume mounts to add to the Discovery Agent container. This is useful for mounting a custom CA bundle. Any PEM certificate mounted under /etc/ssl/certs will be loaded by the Discovery Agent. For\n\n```yaml\nexample:\n```\n\n\n\n```yaml\nvolumeMounts:\n  - name: cabundle\n    mountPath: /etc/ssl/certs/cabundle\n    subPath: cabundle\n    readOnly: true\n```\n#### **authentication.secretName** ~ `string`\n> Default value:\n> ```yaml\n> agent-credentials\n> ```\n\nName of the secret containing the private key\n#### **authentication.secretKey** ~ `string`\n> Default value:\n> ```yaml\n> privatekey.pem\n> ```\n\nKey name in the referenced secret\n### Venafi Connection\n\n\nConfigure VenafiConnection authentication\n#### **authentication.venafiConnection.enabled** ~ `bool`\n> Default value:\n> ```yaml\n> false\n> ```\n\nWhen set to true, the Discovery Agent will authenticate to CyberArk Certificate Manager using the configuration in a VenafiConnection resource. Use `venafiConnection.enabled=true` for [secretless authentication](https://docs.cyberark.com/mis-saas/vaas/k8s-components/t-install-tlspk-agent/). When set to true, the `authentication.secret` values will be ignored and the. Secret with `authentication.secretName` will _not_ be mounted into the  \nDiscovery Agent Pod.\n#### **authentication.venafiConnection.name** ~ `string`\n> Default value:\n> ```yaml\n> venafi-components\n> ```\n\nThe name of a VenafiConnection resource which contains the configuration for authenticating to Venafi.\n#### **authentication.venafiConnection.namespace** ~ `string`\n> Default value:\n> ```yaml\n> venafi\n> ```\n\nThe namespace of a VenafiConnection resource which contains the configuration for authenticating to Venafi.\n#### **config.server** ~ `string`\n> Default value:\n> ```yaml\n> https://api.venafi.cloud/\n> ```\n\nAPI URL of the CyberArk Certificate Manager API. For EU tenants, set this value to https://api.venafi.eu/. If you are using the VenafiConnection authentication method, you must set the API URL using the field `spec.vcp.url` on the  \nVenafiConnection resource instead.\n#### **config.clientId** ~ `string`\n> Default value:\n> ```yaml\n> \"\"\n> ```\n\nThe client-id to be used for authenticating with the Venafi Control. Plane. Only useful when using a Key Pair Service Account in the Venafi. Control Plane. You can obtain the cliend ID by creating a Key Pair Service  \nAccount in the CyberArk Certificate Manager.\n#### **config.period** ~ `string`\n> Default value:\n> ```yaml\n> 0h1m0s\n> ```\n\nSend data back to the platform every minute unless changed.\n#### **config.clusterName** ~ `string`\n> Default value:\n> ```yaml\n> \"\"\n> ```\n\nName for the cluster resource if it needs to be created in Venafi Control  \nPlane.\n#### **config.clusterDescription** ~ `string`\n> Default value:\n> ```yaml\n> \"\"\n> ```\n\nDescription for the cluster resource if it needs to be created in Venafi  \nControl Plane.\n#### **config.ignoredSecretTypes[0]** ~ `string`\n> Default value:\n> ```yaml\n> kubernetes.io/service-account-token\n> ```\n#### **config.ignoredSecretTypes[1]** ~ `string`\n> Default value:\n> ```yaml\n> kubernetes.io/dockercfg\n> ```\n#### **config.ignoredSecretTypes[2]** ~ `string`\n> Default value:\n> ```yaml\n> kubernetes.io/dockerconfigjson\n> ```\n#### **config.ignoredSecretTypes[3]** ~ `string`\n> Default value:\n> ```yaml\n> kubernetes.io/basic-auth\n> ```\n#### **config.ignoredSecretTypes[4]** ~ `string`\n> Default value:\n> ```yaml\n> kubernetes.io/ssh-auth\n> ```\n#### **config.ignoredSecretTypes[5]** ~ `string`\n> Default value:\n> ```yaml\n> bootstrap.kubernetes.io/token\n> ```\n#### **config.ignoredSecretTypes[6]** ~ `string`\n> Default value:\n> ```yaml\n> helm.sh/release.v1\n> ```\n#### **config.excludeAnnotationKeysRegex** ~ `array`\n> Default value:\n> ```yaml\n> []\n> ```\n\nYou can configure Discovery Agent to exclude some annotations or labels from being pushed to the CyberArk Certificate Manager. All Kubernetes objects are affected. The objects are still pushed, but the specified annotations and labels are removed before being sent to the CyberArk Certificate Manager.  \n  \nDots is the only character that needs to be escaped in the regex. Use either double quotes with escaped single quotes or unquoted strings for the regex to avoid YAML parsing issues with `\\.`.  \n  \nExample: excludeAnnotationKeysRegex: ['^kapp\\.k14s\\.io/original.*']\n#### **config.excludeLabelKeysRegex** ~ `array`\n> Default value:\n> ```yaml\n> []\n> ```\n#### **config.configmap.name** ~ `unknown`\n> Default value:\n> ```yaml\n> null\n> ```\n#### **config.configmap.key** ~ `unknown`\n> Default value:\n> ```yaml\n> null\n> ```\n#### **podDisruptionBudget.enabled** ~ `bool`\n> Default value:\n> ```yaml\n> false\n> ```\n\nEnable or disable the PodDisruptionBudget resource, which helps prevent downtime during voluntary disruptions such as during a Node upgrade.\n#### **podDisruptionBudget.minAvailable** ~ `number`\n\nConfigure the minimum available pods for disruptions. Can either be set to an integer (e.g. 1) or a percentage value (e.g. 25%).  \nCannot be used if `maxUnavailable` is set.\n\n#### **podDisruptionBudget.maxUnavailable** ~ `number`\n\nConfigure the maximum unavailable pods for disruptions. Can either be set to an integer (e.g. 1) or a percentage value (e.g. 25%).  \nCannot be used if `minAvailable` is set.\n\n### CRDs\n\n\nThe CRDs installed by this chart are annotated with \"helm.sh/resource-policy: keep\", this prevents them from being accidentally removed by Helm when this chart is deleted. After deleting the installed chart, the user still has to manually remove the remaining CRDs.\n#### **crds.forceRemoveValidationAnnotations** ~ `bool`\n> Default value:\n> ```yaml\n> false\n> ```\n\nThe 'x-kubernetes-validations' annotation is not supported in Kubernetes 1.22 and below. This annotation is used by CEL, which is a feature introduced in Kubernetes 1.25 that improves how validation is performed. This option allows to force the 'x-kubernetes-validations' annotation to be excluded, even on Kubernetes 1.25+ clusters.\n#### **crds.keep** ~ `bool`\n> Default value:\n> ```yaml\n> false\n> ```\n\nThis option makes it so that the \"helm.sh/resource-policy\": keep annotation is added to the CRD. This will prevent Helm from uninstalling the CRD when the Helm release is uninstalled.\n#### **crds.venafiConnection.include** ~ `bool`\n> Default value:\n> ```yaml\n> false\n> ```\n\nWhen set to false, the rendered output does not contain the. VenafiConnection CRDs and RBAC. This is useful for when the. Venafi Connection resources are already installed separately.\n\n<!-- /AUTO-GENERATED -->\n"
  },
  {
    "path": "deploy/charts/venafi-kubernetes-agent/crd_bases/crd.footer.yaml",
    "content": "{{ end }}\n{{ end }}\n"
  },
  {
    "path": "deploy/charts/venafi-kubernetes-agent/crd_bases/crd.header-without-validations.yaml",
    "content": "{{/* DO NOT EDIT. Use 'make generate-crds-venconn' to regenerate. */}}\n{{- if .Values.crds.venafiConnection.include }}\n{{- if (or (semverCompare \"<1.25\" .Capabilities.KubeVersion.GitVersion) .Values.crds.forceRemoveValidationAnnotations) }}\napiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n  name: \"venaficonnections.jetstack.io\"\n  {{- if .Values.crds.keep }}\n  annotations:\n    # This annotation prevents the CRD from being pruned by Helm when this chart\n    # is deleted.\n    helm.sh/resource-policy: keep\n  {{- end }}\n  labels:\n  {{- include \"venafi-connection.labels\" . | nindent 4 }}\n"
  },
  {
    "path": "deploy/charts/venafi-kubernetes-agent/crd_bases/crd.header.yaml",
    "content": "{{/* DO NOT EDIT. Use 'make generate-crds-venconn' to regenerate. */}}\n{{- if .Values.crds.venafiConnection.include }}\n{{- if not (or (semverCompare \"<1.25\" .Capabilities.KubeVersion.GitVersion) .Values.crds.forceRemoveValidationAnnotations) }}\napiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n  name: \"venaficonnections.jetstack.io\"\n  {{- if .Values.crds.keep }}\n  annotations:\n    # This annotation prevents the CRD from being pruned by Helm when this chart\n    # is deleted.\n    helm.sh/resource-policy: keep\n  {{- end }}\n  labels:\n  {{- include \"venafi-connection.labels\" . | nindent 4 }}\n"
  },
  {
    "path": "deploy/charts/venafi-kubernetes-agent/crd_bases/jetstack.io_venaficonnections.yaml",
    "content": "# DO NOT EDIT: Use 'make generate-crds-venconn' to regenerate.\n---\napiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n  annotations:\n    controller-gen.kubebuilder.io/version: v0.19.0\n  name: venaficonnections.jetstack.io\nspec:\n  group: jetstack.io\n  names:\n    kind: VenafiConnection\n    listKind: VenafiConnectionList\n    plural: venaficonnections\n    shortNames:\n    - vc\n    singular: venaficonnection\n  scope: Namespaced\n  versions:\n  - name: v1alpha1\n    schema:\n      openAPIV3Schema:\n        description: VenafiConnection is the Schema for the VenafiConnection API\n        properties:\n          apiVersion:\n            description: |-\n              APIVersion defines the versioned schema of this representation of an object.\n              Servers should convert recognized schemas to the latest internal value, and\n              may reject unrecognized values.\n              More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\n            type: string\n          kind:\n            description: |-\n              Kind is a string value representing the REST resource this object represents.\n              Servers may infer this from the endpoint the client submits requests to.\n              Cannot be updated.\n              In CamelCase.\n              More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n            type: string\n          metadata:\n            type: object\n          spec:\n            properties:\n              allowReferencesFrom:\n                description: |-\n                  A namespace selector that specifies what namespaces this VenafiConnection\n                  is allowed to be used from.\n                  If not set/ null, the VenafiConnection can only be used within its namespace.\n                  An empty selector ({}) matches all namespaces.\n                  If set to a non-empty selector, the VenafiConnection can only be used from\n                  namespaces that match the selector. This possibly excludes the namespace\n                  the VenafiConnection is in.\n                properties:\n                  matchExpressions:\n                    description: matchExpressions is a list of label selector requirements.\n                      The requirements are ANDed.\n                    items:\n                      description: |-\n                        A label selector requirement is a selector that contains values, a key, and an operator that\n                        relates the key and values.\n                      properties:\n                        key:\n                          description: key is the label key that the selector applies\n                            to.\n                          type: string\n                        operator:\n                          description: |-\n                            operator represents a key's relationship to a set of values.\n                            Valid operators are In, NotIn, Exists and DoesNotExist.\n                          type: string\n                        values:\n                          description: |-\n                            values is an array of string values. If the operator is In or NotIn,\n                            the values array must be non-empty. If the operator is Exists or DoesNotExist,\n                            the values array must be empty. This array is replaced during a strategic\n                            merge patch.\n                          items:\n                            type: string\n                          type: array\n                          x-kubernetes-list-type: atomic\n                      required:\n                      - key\n                      - operator\n                      type: object\n                    type: array\n                    x-kubernetes-list-type: atomic\n                  matchLabels:\n                    additionalProperties:\n                      type: string\n                    description: |-\n                      matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\n                      map is equivalent to an element of matchExpressions, whose key field is \"key\", the\n                      operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.\n                    type: object\n                type: object\n                x-kubernetes-map-type: atomic\n              firefly:\n                properties:\n                  accessToken:\n                    description: |-\n                      The list of steps to retrieve the Access Token that will be used to connect\n                      to Firefly.\n                    items:\n                      properties:\n                        hashicorpVaultLDAP:\n                          description: |-\n                            HashicorpVaultLDAP is a SecretSource step that requires a Vault token in\n                            the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                            then fetches the requested secrets from Vault for use in the next step.\n                          properties:\n                            ldapPath:\n                              description: |-\n                                The full HTTP path to the secret in Vault. Example:\n                                /v1/ldap/static-cred/:role_name\n                                or\n                                /v1/ldap/creds/:role_name\n                              type: string\n                            url:\n                              description: The URL to connect to your HashiCorp Vault\n                                instance.\n                              type: string\n                          required:\n                          - ldapPath\n                          type: object\n                        hashicorpVaultOAuth:\n                          description: |-\n                            HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource\n                            step to provide an OAuth token, which this step uses to authenticate to\n                            Vault. The output of this step is a Vault token. This step allows you to use\n                            the step `HashicorpVaultSecret` afterwards.\n                          properties:\n                            authInputType:\n                              description: |-\n                                AuthInputType is the authentication method to be used to authenticate\n                                with HashiCorp Vault. The only supported value is \"OIDC\".\n                              enum:\n                              - OIDC\n                              type: string\n                            authPath:\n                              description: |-\n                                The login URL used for obtaining the Vault token. Example:\n                                /v1/auth/oidc/login\n                              type: string\n                            clientId:\n                              description: 'Deprecated: This field does nothing and\n                                will be removed in the future.'\n                              type: string\n                            role:\n                              description: |-\n                                The role defined in Vault that we want to use when authenticating to\n                                Vault.\n                              type: string\n                            url:\n                              description: The URL to connect to your HashiCorp Vault\n                                instance.\n                              type: string\n                          required:\n                          - authInputType\n                          - authPath\n                          - role\n                          type: object\n                        hashicorpVaultSecret:\n                          description: |-\n                            HashicorpVaultSecret is a SecretSource step that requires a Vault token in\n                            the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                            then fetches the requested secrets from Vault for use in the next step.\n                          properties:\n                            fields:\n                              description: |-\n                                The fields are Vault keys pointing to the secrets passed to the next\n                                SecretSource step.\n\n                                Example 1 (TPP, username and password): imagining that you have stored\n                                the username and password for TPP under the keys \"username\" and\n                                \"password\", you will want to set this field to `[\"username\",\n                                \"password\"]`. The username is expected to be given first, the password\n                                second.\n                              items:\n                                type: string\n                              type: array\n                            secretPath:\n                              description: |-\n                                The full HTTP path to the secret in Vault. Example:\n                                /v1/secret/data/application-team-a/tpp-username-password\n                              type: string\n                            url:\n                              description: The URL to connect to your HashiCorp Vault\n                                instance.\n                              type: string\n                          required:\n                          - fields\n                          - secretPath\n                          type: object\n                        secret:\n                          description: |-\n                            Secret is a SecretSource step meant to be the first step. It retrieves secret\n                            values from a Kubernetes Secret, and passes them to the next step.\n                          properties:\n                            fields:\n                              description: |-\n                                The names of the fields we want to extract from the Kubernetes secret.\n                                These fields are passed to the next step in the chain.\n                              items:\n                                type: string\n                              type: array\n                            name:\n                              description: The name of the Kubernetes secret.\n                              type: string\n                          required:\n                          - fields\n                          - name\n                          type: object\n                        serviceAccountToken:\n                          description: |-\n                            ServiceAccountToken is a SecretSource step meant to be the first step. It\n                            uses the Kubernetes TokenRequest API to retrieve a token for a given service\n                            account, and passes it to the next step.\n                          properties:\n                            audiences:\n                              description: |-\n                                Audiences are the intendend audiences of the token. A recipient of a\n                                token must identify themself with an identifier in the list of\n                                audiences of the token, and otherwise should reject the token. A\n                                token issued for multiple audiences may be used to authenticate\n                                against any of the audiences listed but implies a high degree of\n                                trust between the target audiences.\n                              items:\n                                type: string\n                              type: array\n                            expirationSeconds:\n                              description: |-\n                                ExpirationSeconds is the requested duration of validity of the request. The\n                                token issuer may return a token with a different validity duration so a\n                                client needs to check the 'expiration' field in a response.\n                              format: int64\n                              type: integer\n                            name:\n                              description: The name of the Kubernetes service account.\n                              type: string\n                          required:\n                          - audiences\n                          - name\n                          type: object\n                        tppOAuth:\n                          description: |-\n                            TPPOAuth is a SecretSource step that authenticates to a TPP server. This\n                            step is meant to be the last step and requires a prior step that depends\n                            on the `authInputType`.\n                          properties:\n                            authInputType:\n                              description: |-\n                                AuthInputType is the authentication method to be used to authenticate\n                                with TPP. The supported values are \"UsernamePassword\" and \"JWT\".\n                              enum:\n                              - UsernamePassword\n                              - JWT\n                              type: string\n                            clientId:\n                              description: ClientID is the clientId used to authenticate\n                                with TPP.\n                              type: string\n                            url:\n                              description: |-\n                                The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs\n                                https://tpp.example.com and https://tpp.example.com/vedsdk are\n                                equivalent. The ending `/vedsdk` is optional and is stripped out\n                                by our client.\n                                If not set, defaults to the URL defined at the top-level of the\n                                TPP configuration.\n                              type: string\n                          required:\n                          - authInputType\n                          type: object\n                        vcpOAuth:\n                          description: |-\n                            VCPOAuth is a SecretSource step that authenticates to the\n                            Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step\n                            that outputs a JWT token.\n                          properties:\n                            tenantID:\n                              description: TenantID is the tenant ID used to authenticate\n                                with Certificate Manager, SaaS.\n                              type: string\n                          type: object\n                      type: object\n                      x-kubernetes-validations:\n                      - message: must have exactly one field set\n                        rule: '((has(self.secret) ? 1 : 0) + (has(self.serviceAccountToken)\n                          ? 1 : 0) + (has(self.hashicorpVaultOAuth) ? 1 : 0) + (has(self.hashicorpVaultSecret)\n                          ? 1 : 0) + (has(self.hashicorpVaultLDAP) ? 1 : 0) + (has(self.tppOAuth)\n                          ? 1 : 0) + (has(self.vcpOAuth) ? 1 : 0)) == 1'\n                    maxItems: 50\n                    type: array\n                    x-kubernetes-list-type: atomic\n                  url:\n                    description: The URL to connect to the Workload Identity Manager\n                      instance.\n                    type: string\n                required:\n                - url\n                type: object\n              tpp:\n                properties:\n                  accessToken:\n                    description: The list of steps to retrieve a TPP access token.\n                    items:\n                      properties:\n                        hashicorpVaultLDAP:\n                          description: |-\n                            HashicorpVaultLDAP is a SecretSource step that requires a Vault token in\n                            the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                            then fetches the requested secrets from Vault for use in the next step.\n                          properties:\n                            ldapPath:\n                              description: |-\n                                The full HTTP path to the secret in Vault. Example:\n                                /v1/ldap/static-cred/:role_name\n                                or\n                                /v1/ldap/creds/:role_name\n                              type: string\n                            url:\n                              description: The URL to connect to your HashiCorp Vault\n                                instance.\n                              type: string\n                          required:\n                          - ldapPath\n                          type: object\n                        hashicorpVaultOAuth:\n                          description: |-\n                            HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource\n                            step to provide an OAuth token, which this step uses to authenticate to\n                            Vault. The output of this step is a Vault token. This step allows you to use\n                            the step `HashicorpVaultSecret` afterwards.\n                          properties:\n                            authInputType:\n                              description: |-\n                                AuthInputType is the authentication method to be used to authenticate\n                                with HashiCorp Vault. The only supported value is \"OIDC\".\n                              enum:\n                              - OIDC\n                              type: string\n                            authPath:\n                              description: |-\n                                The login URL used for obtaining the Vault token. Example:\n                                /v1/auth/oidc/login\n                              type: string\n                            clientId:\n                              description: 'Deprecated: This field does nothing and\n                                will be removed in the future.'\n                              type: string\n                            role:\n                              description: |-\n                                The role defined in Vault that we want to use when authenticating to\n                                Vault.\n                              type: string\n                            url:\n                              description: The URL to connect to your HashiCorp Vault\n                                instance.\n                              type: string\n                          required:\n                          - authInputType\n                          - authPath\n                          - role\n                          type: object\n                        hashicorpVaultSecret:\n                          description: |-\n                            HashicorpVaultSecret is a SecretSource step that requires a Vault token in\n                            the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                            then fetches the requested secrets from Vault for use in the next step.\n                          properties:\n                            fields:\n                              description: |-\n                                The fields are Vault keys pointing to the secrets passed to the next\n                                SecretSource step.\n\n                                Example 1 (TPP, username and password): imagining that you have stored\n                                the username and password for TPP under the keys \"username\" and\n                                \"password\", you will want to set this field to `[\"username\",\n                                \"password\"]`. The username is expected to be given first, the password\n                                second.\n                              items:\n                                type: string\n                              type: array\n                            secretPath:\n                              description: |-\n                                The full HTTP path to the secret in Vault. Example:\n                                /v1/secret/data/application-team-a/tpp-username-password\n                              type: string\n                            url:\n                              description: The URL to connect to your HashiCorp Vault\n                                instance.\n                              type: string\n                          required:\n                          - fields\n                          - secretPath\n                          type: object\n                        secret:\n                          description: |-\n                            Secret is a SecretSource step meant to be the first step. It retrieves secret\n                            values from a Kubernetes Secret, and passes them to the next step.\n                          properties:\n                            fields:\n                              description: |-\n                                The names of the fields we want to extract from the Kubernetes secret.\n                                These fields are passed to the next step in the chain.\n                              items:\n                                type: string\n                              type: array\n                            name:\n                              description: The name of the Kubernetes secret.\n                              type: string\n                          required:\n                          - fields\n                          - name\n                          type: object\n                        serviceAccountToken:\n                          description: |-\n                            ServiceAccountToken is a SecretSource step meant to be the first step. It\n                            uses the Kubernetes TokenRequest API to retrieve a token for a given service\n                            account, and passes it to the next step.\n                          properties:\n                            audiences:\n                              description: |-\n                                Audiences are the intendend audiences of the token. A recipient of a\n                                token must identify themself with an identifier in the list of\n                                audiences of the token, and otherwise should reject the token. A\n                                token issued for multiple audiences may be used to authenticate\n                                against any of the audiences listed but implies a high degree of\n                                trust between the target audiences.\n                              items:\n                                type: string\n                              type: array\n                            expirationSeconds:\n                              description: |-\n                                ExpirationSeconds is the requested duration of validity of the request. The\n                                token issuer may return a token with a different validity duration so a\n                                client needs to check the 'expiration' field in a response.\n                              format: int64\n                              type: integer\n                            name:\n                              description: The name of the Kubernetes service account.\n                              type: string\n                          required:\n                          - audiences\n                          - name\n                          type: object\n                        tppOAuth:\n                          description: |-\n                            TPPOAuth is a SecretSource step that authenticates to a TPP server. This\n                            step is meant to be the last step and requires a prior step that depends\n                            on the `authInputType`.\n                          properties:\n                            authInputType:\n                              description: |-\n                                AuthInputType is the authentication method to be used to authenticate\n                                with TPP. The supported values are \"UsernamePassword\" and \"JWT\".\n                              enum:\n                              - UsernamePassword\n                              - JWT\n                              type: string\n                            clientId:\n                              description: ClientID is the clientId used to authenticate\n                                with TPP.\n                              type: string\n                            url:\n                              description: |-\n                                The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs\n                                https://tpp.example.com and https://tpp.example.com/vedsdk are\n                                equivalent. The ending `/vedsdk` is optional and is stripped out\n                                by our client.\n                                If not set, defaults to the URL defined at the top-level of the\n                                TPP configuration.\n                              type: string\n                          required:\n                          - authInputType\n                          type: object\n                        vcpOAuth:\n                          description: |-\n                            VCPOAuth is a SecretSource step that authenticates to the\n                            Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step\n                            that outputs a JWT token.\n                          properties:\n                            tenantID:\n                              description: TenantID is the tenant ID used to authenticate\n                                with Certificate Manager, SaaS.\n                              type: string\n                          type: object\n                      type: object\n                      x-kubernetes-validations:\n                      - message: must have exactly one field set\n                        rule: '((has(self.secret) ? 1 : 0) + (has(self.serviceAccountToken)\n                          ? 1 : 0) + (has(self.hashicorpVaultOAuth) ? 1 : 0) + (has(self.hashicorpVaultSecret)\n                          ? 1 : 0) + (has(self.hashicorpVaultLDAP) ? 1 : 0) + (has(self.tppOAuth)\n                          ? 1 : 0) + (has(self.vcpOAuth) ? 1 : 0)) == 1'\n                    maxItems: 50\n                    type: array\n                    x-kubernetes-list-type: atomic\n                  url:\n                    description: |-\n                      The URL to connect to the Certificate Manager Self-Hosted instance. The two URLs\n                      https://tpp.example.com and https://tpp.example.com/vedsdk are\n                      equivalent. The ending `/vedsdk` is optional and is stripped out by\n                      venafi-connection-lib.\n                    type: string\n                required:\n                - url\n                type: object\n              vaas:\n                description: 'Deprecated: The ''vaas'' field is deprecated use the\n                  field called ''vcp'' instead.'\n                properties:\n                  accessToken:\n                    description: |-\n                      The list of steps to retrieve the Access Token that will be used to connect\n                      to Certificate Manager, SaaS.\n                    items:\n                      properties:\n                        hashicorpVaultLDAP:\n                          description: |-\n                            HashicorpVaultLDAP is a SecretSource step that requires a Vault token in\n                            the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                            then fetches the requested secrets from Vault for use in the next step.\n                          properties:\n                            ldapPath:\n                              description: |-\n                                The full HTTP path to the secret in Vault. Example:\n                                /v1/ldap/static-cred/:role_name\n                                or\n                                /v1/ldap/creds/:role_name\n                              type: string\n                            url:\n                              description: The URL to connect to your HashiCorp Vault\n                                instance.\n                              type: string\n                          required:\n                          - ldapPath\n                          type: object\n                        hashicorpVaultOAuth:\n                          description: |-\n                            HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource\n                            step to provide an OAuth token, which this step uses to authenticate to\n                            Vault. The output of this step is a Vault token. This step allows you to use\n                            the step `HashicorpVaultSecret` afterwards.\n                          properties:\n                            authInputType:\n                              description: |-\n                                AuthInputType is the authentication method to be used to authenticate\n                                with HashiCorp Vault. The only supported value is \"OIDC\".\n                              enum:\n                              - OIDC\n                              type: string\n                            authPath:\n                              description: |-\n                                The login URL used for obtaining the Vault token. Example:\n                                /v1/auth/oidc/login\n                              type: string\n                            clientId:\n                              description: 'Deprecated: This field does nothing and\n                                will be removed in the future.'\n                              type: string\n                            role:\n                              description: |-\n                                The role defined in Vault that we want to use when authenticating to\n                                Vault.\n                              type: string\n                            url:\n                              description: The URL to connect to your HashiCorp Vault\n                                instance.\n                              type: string\n                          required:\n                          - authInputType\n                          - authPath\n                          - role\n                          type: object\n                        hashicorpVaultSecret:\n                          description: |-\n                            HashicorpVaultSecret is a SecretSource step that requires a Vault token in\n                            the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                            then fetches the requested secrets from Vault for use in the next step.\n                          properties:\n                            fields:\n                              description: |-\n                                The fields are Vault keys pointing to the secrets passed to the next\n                                SecretSource step.\n\n                                Example 1 (TPP, username and password): imagining that you have stored\n                                the username and password for TPP under the keys \"username\" and\n                                \"password\", you will want to set this field to `[\"username\",\n                                \"password\"]`. The username is expected to be given first, the password\n                                second.\n                              items:\n                                type: string\n                              type: array\n                            secretPath:\n                              description: |-\n                                The full HTTP path to the secret in Vault. Example:\n                                /v1/secret/data/application-team-a/tpp-username-password\n                              type: string\n                            url:\n                              description: The URL to connect to your HashiCorp Vault\n                                instance.\n                              type: string\n                          required:\n                          - fields\n                          - secretPath\n                          type: object\n                        secret:\n                          description: |-\n                            Secret is a SecretSource step meant to be the first step. It retrieves secret\n                            values from a Kubernetes Secret, and passes them to the next step.\n                          properties:\n                            fields:\n                              description: |-\n                                The names of the fields we want to extract from the Kubernetes secret.\n                                These fields are passed to the next step in the chain.\n                              items:\n                                type: string\n                              type: array\n                            name:\n                              description: The name of the Kubernetes secret.\n                              type: string\n                          required:\n                          - fields\n                          - name\n                          type: object\n                        serviceAccountToken:\n                          description: |-\n                            ServiceAccountToken is a SecretSource step meant to be the first step. It\n                            uses the Kubernetes TokenRequest API to retrieve a token for a given service\n                            account, and passes it to the next step.\n                          properties:\n                            audiences:\n                              description: |-\n                                Audiences are the intendend audiences of the token. A recipient of a\n                                token must identify themself with an identifier in the list of\n                                audiences of the token, and otherwise should reject the token. A\n                                token issued for multiple audiences may be used to authenticate\n                                against any of the audiences listed but implies a high degree of\n                                trust between the target audiences.\n                              items:\n                                type: string\n                              type: array\n                            expirationSeconds:\n                              description: |-\n                                ExpirationSeconds is the requested duration of validity of the request. The\n                                token issuer may return a token with a different validity duration so a\n                                client needs to check the 'expiration' field in a response.\n                              format: int64\n                              type: integer\n                            name:\n                              description: The name of the Kubernetes service account.\n                              type: string\n                          required:\n                          - audiences\n                          - name\n                          type: object\n                        tppOAuth:\n                          description: |-\n                            TPPOAuth is a SecretSource step that authenticates to a TPP server. This\n                            step is meant to be the last step and requires a prior step that depends\n                            on the `authInputType`.\n                          properties:\n                            authInputType:\n                              description: |-\n                                AuthInputType is the authentication method to be used to authenticate\n                                with TPP. The supported values are \"UsernamePassword\" and \"JWT\".\n                              enum:\n                              - UsernamePassword\n                              - JWT\n                              type: string\n                            clientId:\n                              description: ClientID is the clientId used to authenticate\n                                with TPP.\n                              type: string\n                            url:\n                              description: |-\n                                The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs\n                                https://tpp.example.com and https://tpp.example.com/vedsdk are\n                                equivalent. The ending `/vedsdk` is optional and is stripped out\n                                by our client.\n                                If not set, defaults to the URL defined at the top-level of the\n                                TPP configuration.\n                              type: string\n                          required:\n                          - authInputType\n                          type: object\n                        vcpOAuth:\n                          description: |-\n                            VCPOAuth is a SecretSource step that authenticates to the\n                            Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step\n                            that outputs a JWT token.\n                          properties:\n                            tenantID:\n                              description: TenantID is the tenant ID used to authenticate\n                                with Certificate Manager, SaaS.\n                              type: string\n                          type: object\n                      type: object\n                      x-kubernetes-validations:\n                      - message: must have exactly one field set\n                        rule: '((has(self.secret) ? 1 : 0) + (has(self.serviceAccountToken)\n                          ? 1 : 0) + (has(self.hashicorpVaultOAuth) ? 1 : 0) + (has(self.hashicorpVaultSecret)\n                          ? 1 : 0) + (has(self.hashicorpVaultLDAP) ? 1 : 0) + (has(self.tppOAuth)\n                          ? 1 : 0) + (has(self.vcpOAuth) ? 1 : 0)) == 1'\n                    maxItems: 50\n                    type: array\n                    x-kubernetes-list-type: atomic\n                  apiKey:\n                    description: |-\n                      The list of steps to retrieve the API key that will be used to connect to\n                      Certificate Manager, SaaS.\n                    items:\n                      properties:\n                        hashicorpVaultLDAP:\n                          description: |-\n                            HashicorpVaultLDAP is a SecretSource step that requires a Vault token in\n                            the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                            then fetches the requested secrets from Vault for use in the next step.\n                          properties:\n                            ldapPath:\n                              description: |-\n                                The full HTTP path to the secret in Vault. Example:\n                                /v1/ldap/static-cred/:role_name\n                                or\n                                /v1/ldap/creds/:role_name\n                              type: string\n                            url:\n                              description: The URL to connect to your HashiCorp Vault\n                                instance.\n                              type: string\n                          required:\n                          - ldapPath\n                          type: object\n                        hashicorpVaultOAuth:\n                          description: |-\n                            HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource\n                            step to provide an OAuth token, which this step uses to authenticate to\n                            Vault. The output of this step is a Vault token. This step allows you to use\n                            the step `HashicorpVaultSecret` afterwards.\n                          properties:\n                            authInputType:\n                              description: |-\n                                AuthInputType is the authentication method to be used to authenticate\n                                with HashiCorp Vault. The only supported value is \"OIDC\".\n                              enum:\n                              - OIDC\n                              type: string\n                            authPath:\n                              description: |-\n                                The login URL used for obtaining the Vault token. Example:\n                                /v1/auth/oidc/login\n                              type: string\n                            clientId:\n                              description: 'Deprecated: This field does nothing and\n                                will be removed in the future.'\n                              type: string\n                            role:\n                              description: |-\n                                The role defined in Vault that we want to use when authenticating to\n                                Vault.\n                              type: string\n                            url:\n                              description: The URL to connect to your HashiCorp Vault\n                                instance.\n                              type: string\n                          required:\n                          - authInputType\n                          - authPath\n                          - role\n                          type: object\n                        hashicorpVaultSecret:\n                          description: |-\n                            HashicorpVaultSecret is a SecretSource step that requires a Vault token in\n                            the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                            then fetches the requested secrets from Vault for use in the next step.\n                          properties:\n                            fields:\n                              description: |-\n                                The fields are Vault keys pointing to the secrets passed to the next\n                                SecretSource step.\n\n                                Example 1 (TPP, username and password): imagining that you have stored\n                                the username and password for TPP under the keys \"username\" and\n                                \"password\", you will want to set this field to `[\"username\",\n                                \"password\"]`. The username is expected to be given first, the password\n                                second.\n                              items:\n                                type: string\n                              type: array\n                            secretPath:\n                              description: |-\n                                The full HTTP path to the secret in Vault. Example:\n                                /v1/secret/data/application-team-a/tpp-username-password\n                              type: string\n                            url:\n                              description: The URL to connect to your HashiCorp Vault\n                                instance.\n                              type: string\n                          required:\n                          - fields\n                          - secretPath\n                          type: object\n                        secret:\n                          description: |-\n                            Secret is a SecretSource step meant to be the first step. It retrieves secret\n                            values from a Kubernetes Secret, and passes them to the next step.\n                          properties:\n                            fields:\n                              description: |-\n                                The names of the fields we want to extract from the Kubernetes secret.\n                                These fields are passed to the next step in the chain.\n                              items:\n                                type: string\n                              type: array\n                            name:\n                              description: The name of the Kubernetes secret.\n                              type: string\n                          required:\n                          - fields\n                          - name\n                          type: object\n                        serviceAccountToken:\n                          description: |-\n                            ServiceAccountToken is a SecretSource step meant to be the first step. It\n                            uses the Kubernetes TokenRequest API to retrieve a token for a given service\n                            account, and passes it to the next step.\n                          properties:\n                            audiences:\n                              description: |-\n                                Audiences are the intendend audiences of the token. A recipient of a\n                                token must identify themself with an identifier in the list of\n                                audiences of the token, and otherwise should reject the token. A\n                                token issued for multiple audiences may be used to authenticate\n                                against any of the audiences listed but implies a high degree of\n                                trust between the target audiences.\n                              items:\n                                type: string\n                              type: array\n                            expirationSeconds:\n                              description: |-\n                                ExpirationSeconds is the requested duration of validity of the request. The\n                                token issuer may return a token with a different validity duration so a\n                                client needs to check the 'expiration' field in a response.\n                              format: int64\n                              type: integer\n                            name:\n                              description: The name of the Kubernetes service account.\n                              type: string\n                          required:\n                          - audiences\n                          - name\n                          type: object\n                        tppOAuth:\n                          description: |-\n                            TPPOAuth is a SecretSource step that authenticates to a TPP server. This\n                            step is meant to be the last step and requires a prior step that depends\n                            on the `authInputType`.\n                          properties:\n                            authInputType:\n                              description: |-\n                                AuthInputType is the authentication method to be used to authenticate\n                                with TPP. The supported values are \"UsernamePassword\" and \"JWT\".\n                              enum:\n                              - UsernamePassword\n                              - JWT\n                              type: string\n                            clientId:\n                              description: ClientID is the clientId used to authenticate\n                                with TPP.\n                              type: string\n                            url:\n                              description: |-\n                                The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs\n                                https://tpp.example.com and https://tpp.example.com/vedsdk are\n                                equivalent. The ending `/vedsdk` is optional and is stripped out\n                                by our client.\n                                If not set, defaults to the URL defined at the top-level of the\n                                TPP configuration.\n                              type: string\n                          required:\n                          - authInputType\n                          type: object\n                        vcpOAuth:\n                          description: |-\n                            VCPOAuth is a SecretSource step that authenticates to the\n                            Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step\n                            that outputs a JWT token.\n                          properties:\n                            tenantID:\n                              description: TenantID is the tenant ID used to authenticate\n                                with Certificate Manager, SaaS.\n                              type: string\n                          type: object\n                      type: object\n                      x-kubernetes-validations:\n                      - message: must have exactly one field set\n                        rule: '((has(self.secret) ? 1 : 0) + (has(self.serviceAccountToken)\n                          ? 1 : 0) + (has(self.hashicorpVaultOAuth) ? 1 : 0) + (has(self.hashicorpVaultSecret)\n                          ? 1 : 0) + (has(self.hashicorpVaultLDAP) ? 1 : 0) + (has(self.tppOAuth)\n                          ? 1 : 0) + (has(self.vcpOAuth) ? 1 : 0)) == 1'\n                    maxItems: 50\n                    type: array\n                    x-kubernetes-list-type: atomic\n                  url:\n                    description: |-\n                      The URL to connect to the Certificate Manager, SaaS instance. If not set, the default\n                      value https://api.venafi.cloud is used.\n                    type: string\n                type: object\n                x-kubernetes-validations:\n                - message: 'must have exactly ONE of the following fields set: apiKey\n                    or accessToken'\n                  rule: '(has(self.apiKey) ? 1 : 0) + (has(self.accessToken) ? 1 :\n                    0) == 1'\n              vcp:\n                properties:\n                  accessToken:\n                    description: |-\n                      The list of steps to retrieve the Access Token that will be used to connect\n                      to Certificate Manager, SaaS.\n                    items:\n                      properties:\n                        hashicorpVaultLDAP:\n                          description: |-\n                            HashicorpVaultLDAP is a SecretSource step that requires a Vault token in\n                            the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                            then fetches the requested secrets from Vault for use in the next step.\n                          properties:\n                            ldapPath:\n                              description: |-\n                                The full HTTP path to the secret in Vault. Example:\n                                /v1/ldap/static-cred/:role_name\n                                or\n                                /v1/ldap/creds/:role_name\n                              type: string\n                            url:\n                              description: The URL to connect to your HashiCorp Vault\n                                instance.\n                              type: string\n                          required:\n                          - ldapPath\n                          type: object\n                        hashicorpVaultOAuth:\n                          description: |-\n                            HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource\n                            step to provide an OAuth token, which this step uses to authenticate to\n                            Vault. The output of this step is a Vault token. This step allows you to use\n                            the step `HashicorpVaultSecret` afterwards.\n                          properties:\n                            authInputType:\n                              description: |-\n                                AuthInputType is the authentication method to be used to authenticate\n                                with HashiCorp Vault. The only supported value is \"OIDC\".\n                              enum:\n                              - OIDC\n                              type: string\n                            authPath:\n                              description: |-\n                                The login URL used for obtaining the Vault token. Example:\n                                /v1/auth/oidc/login\n                              type: string\n                            clientId:\n                              description: 'Deprecated: This field does nothing and\n                                will be removed in the future.'\n                              type: string\n                            role:\n                              description: |-\n                                The role defined in Vault that we want to use when authenticating to\n                                Vault.\n                              type: string\n                            url:\n                              description: The URL to connect to your HashiCorp Vault\n                                instance.\n                              type: string\n                          required:\n                          - authInputType\n                          - authPath\n                          - role\n                          type: object\n                        hashicorpVaultSecret:\n                          description: |-\n                            HashicorpVaultSecret is a SecretSource step that requires a Vault token in\n                            the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                            then fetches the requested secrets from Vault for use in the next step.\n                          properties:\n                            fields:\n                              description: |-\n                                The fields are Vault keys pointing to the secrets passed to the next\n                                SecretSource step.\n\n                                Example 1 (TPP, username and password): imagining that you have stored\n                                the username and password for TPP under the keys \"username\" and\n                                \"password\", you will want to set this field to `[\"username\",\n                                \"password\"]`. The username is expected to be given first, the password\n                                second.\n                              items:\n                                type: string\n                              type: array\n                            secretPath:\n                              description: |-\n                                The full HTTP path to the secret in Vault. Example:\n                                /v1/secret/data/application-team-a/tpp-username-password\n                              type: string\n                            url:\n                              description: The URL to connect to your HashiCorp Vault\n                                instance.\n                              type: string\n                          required:\n                          - fields\n                          - secretPath\n                          type: object\n                        secret:\n                          description: |-\n                            Secret is a SecretSource step meant to be the first step. It retrieves secret\n                            values from a Kubernetes Secret, and passes them to the next step.\n                          properties:\n                            fields:\n                              description: |-\n                                The names of the fields we want to extract from the Kubernetes secret.\n                                These fields are passed to the next step in the chain.\n                              items:\n                                type: string\n                              type: array\n                            name:\n                              description: The name of the Kubernetes secret.\n                              type: string\n                          required:\n                          - fields\n                          - name\n                          type: object\n                        serviceAccountToken:\n                          description: |-\n                            ServiceAccountToken is a SecretSource step meant to be the first step. It\n                            uses the Kubernetes TokenRequest API to retrieve a token for a given service\n                            account, and passes it to the next step.\n                          properties:\n                            audiences:\n                              description: |-\n                                Audiences are the intendend audiences of the token. A recipient of a\n                                token must identify themself with an identifier in the list of\n                                audiences of the token, and otherwise should reject the token. A\n                                token issued for multiple audiences may be used to authenticate\n                                against any of the audiences listed but implies a high degree of\n                                trust between the target audiences.\n                              items:\n                                type: string\n                              type: array\n                            expirationSeconds:\n                              description: |-\n                                ExpirationSeconds is the requested duration of validity of the request. The\n                                token issuer may return a token with a different validity duration so a\n                                client needs to check the 'expiration' field in a response.\n                              format: int64\n                              type: integer\n                            name:\n                              description: The name of the Kubernetes service account.\n                              type: string\n                          required:\n                          - audiences\n                          - name\n                          type: object\n                        tppOAuth:\n                          description: |-\n                            TPPOAuth is a SecretSource step that authenticates to a TPP server. This\n                            step is meant to be the last step and requires a prior step that depends\n                            on the `authInputType`.\n                          properties:\n                            authInputType:\n                              description: |-\n                                AuthInputType is the authentication method to be used to authenticate\n                                with TPP. The supported values are \"UsernamePassword\" and \"JWT\".\n                              enum:\n                              - UsernamePassword\n                              - JWT\n                              type: string\n                            clientId:\n                              description: ClientID is the clientId used to authenticate\n                                with TPP.\n                              type: string\n                            url:\n                              description: |-\n                                The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs\n                                https://tpp.example.com and https://tpp.example.com/vedsdk are\n                                equivalent. The ending `/vedsdk` is optional and is stripped out\n                                by our client.\n                                If not set, defaults to the URL defined at the top-level of the\n                                TPP configuration.\n                              type: string\n                          required:\n                          - authInputType\n                          type: object\n                        vcpOAuth:\n                          description: |-\n                            VCPOAuth is a SecretSource step that authenticates to the\n                            Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step\n                            that outputs a JWT token.\n                          properties:\n                            tenantID:\n                              description: TenantID is the tenant ID used to authenticate\n                                with Certificate Manager, SaaS.\n                              type: string\n                          type: object\n                      type: object\n                      x-kubernetes-validations:\n                      - message: must have exactly one field set\n                        rule: '((has(self.secret) ? 1 : 0) + (has(self.serviceAccountToken)\n                          ? 1 : 0) + (has(self.hashicorpVaultOAuth) ? 1 : 0) + (has(self.hashicorpVaultSecret)\n                          ? 1 : 0) + (has(self.hashicorpVaultLDAP) ? 1 : 0) + (has(self.tppOAuth)\n                          ? 1 : 0) + (has(self.vcpOAuth) ? 1 : 0)) == 1'\n                    maxItems: 50\n                    type: array\n                    x-kubernetes-list-type: atomic\n                  apiKey:\n                    description: |-\n                      The list of steps to retrieve the API key that will be used to connect to\n                      Certificate Manager, SaaS.\n                    items:\n                      properties:\n                        hashicorpVaultLDAP:\n                          description: |-\n                            HashicorpVaultLDAP is a SecretSource step that requires a Vault token in\n                            the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                            then fetches the requested secrets from Vault for use in the next step.\n                          properties:\n                            ldapPath:\n                              description: |-\n                                The full HTTP path to the secret in Vault. Example:\n                                /v1/ldap/static-cred/:role_name\n                                or\n                                /v1/ldap/creds/:role_name\n                              type: string\n                            url:\n                              description: The URL to connect to your HashiCorp Vault\n                                instance.\n                              type: string\n                          required:\n                          - ldapPath\n                          type: object\n                        hashicorpVaultOAuth:\n                          description: |-\n                            HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource\n                            step to provide an OAuth token, which this step uses to authenticate to\n                            Vault. The output of this step is a Vault token. This step allows you to use\n                            the step `HashicorpVaultSecret` afterwards.\n                          properties:\n                            authInputType:\n                              description: |-\n                                AuthInputType is the authentication method to be used to authenticate\n                                with HashiCorp Vault. The only supported value is \"OIDC\".\n                              enum:\n                              - OIDC\n                              type: string\n                            authPath:\n                              description: |-\n                                The login URL used for obtaining the Vault token. Example:\n                                /v1/auth/oidc/login\n                              type: string\n                            clientId:\n                              description: 'Deprecated: This field does nothing and\n                                will be removed in the future.'\n                              type: string\n                            role:\n                              description: |-\n                                The role defined in Vault that we want to use when authenticating to\n                                Vault.\n                              type: string\n                            url:\n                              description: The URL to connect to your HashiCorp Vault\n                                instance.\n                              type: string\n                          required:\n                          - authInputType\n                          - authPath\n                          - role\n                          type: object\n                        hashicorpVaultSecret:\n                          description: |-\n                            HashicorpVaultSecret is a SecretSource step that requires a Vault token in\n                            the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                            then fetches the requested secrets from Vault for use in the next step.\n                          properties:\n                            fields:\n                              description: |-\n                                The fields are Vault keys pointing to the secrets passed to the next\n                                SecretSource step.\n\n                                Example 1 (TPP, username and password): imagining that you have stored\n                                the username and password for TPP under the keys \"username\" and\n                                \"password\", you will want to set this field to `[\"username\",\n                                \"password\"]`. The username is expected to be given first, the password\n                                second.\n                              items:\n                                type: string\n                              type: array\n                            secretPath:\n                              description: |-\n                                The full HTTP path to the secret in Vault. Example:\n                                /v1/secret/data/application-team-a/tpp-username-password\n                              type: string\n                            url:\n                              description: The URL to connect to your HashiCorp Vault\n                                instance.\n                              type: string\n                          required:\n                          - fields\n                          - secretPath\n                          type: object\n                        secret:\n                          description: |-\n                            Secret is a SecretSource step meant to be the first step. It retrieves secret\n                            values from a Kubernetes Secret, and passes them to the next step.\n                          properties:\n                            fields:\n                              description: |-\n                                The names of the fields we want to extract from the Kubernetes secret.\n                                These fields are passed to the next step in the chain.\n                              items:\n                                type: string\n                              type: array\n                            name:\n                              description: The name of the Kubernetes secret.\n                              type: string\n                          required:\n                          - fields\n                          - name\n                          type: object\n                        serviceAccountToken:\n                          description: |-\n                            ServiceAccountToken is a SecretSource step meant to be the first step. It\n                            uses the Kubernetes TokenRequest API to retrieve a token for a given service\n                            account, and passes it to the next step.\n                          properties:\n                            audiences:\n                              description: |-\n                                Audiences are the intendend audiences of the token. A recipient of a\n                                token must identify themself with an identifier in the list of\n                                audiences of the token, and otherwise should reject the token. A\n                                token issued for multiple audiences may be used to authenticate\n                                against any of the audiences listed but implies a high degree of\n                                trust between the target audiences.\n                              items:\n                                type: string\n                              type: array\n                            expirationSeconds:\n                              description: |-\n                                ExpirationSeconds is the requested duration of validity of the request. The\n                                token issuer may return a token with a different validity duration so a\n                                client needs to check the 'expiration' field in a response.\n                              format: int64\n                              type: integer\n                            name:\n                              description: The name of the Kubernetes service account.\n                              type: string\n                          required:\n                          - audiences\n                          - name\n                          type: object\n                        tppOAuth:\n                          description: |-\n                            TPPOAuth is a SecretSource step that authenticates to a TPP server. This\n                            step is meant to be the last step and requires a prior step that depends\n                            on the `authInputType`.\n                          properties:\n                            authInputType:\n                              description: |-\n                                AuthInputType is the authentication method to be used to authenticate\n                                with TPP. The supported values are \"UsernamePassword\" and \"JWT\".\n                              enum:\n                              - UsernamePassword\n                              - JWT\n                              type: string\n                            clientId:\n                              description: ClientID is the clientId used to authenticate\n                                with TPP.\n                              type: string\n                            url:\n                              description: |-\n                                The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs\n                                https://tpp.example.com and https://tpp.example.com/vedsdk are\n                                equivalent. The ending `/vedsdk` is optional and is stripped out\n                                by our client.\n                                If not set, defaults to the URL defined at the top-level of the\n                                TPP configuration.\n                              type: string\n                          required:\n                          - authInputType\n                          type: object\n                        vcpOAuth:\n                          description: |-\n                            VCPOAuth is a SecretSource step that authenticates to the\n                            Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step\n                            that outputs a JWT token.\n                          properties:\n                            tenantID:\n                              description: TenantID is the tenant ID used to authenticate\n                                with Certificate Manager, SaaS.\n                              type: string\n                          type: object\n                      type: object\n                      x-kubernetes-validations:\n                      - message: must have exactly one field set\n                        rule: '((has(self.secret) ? 1 : 0) + (has(self.serviceAccountToken)\n                          ? 1 : 0) + (has(self.hashicorpVaultOAuth) ? 1 : 0) + (has(self.hashicorpVaultSecret)\n                          ? 1 : 0) + (has(self.hashicorpVaultLDAP) ? 1 : 0) + (has(self.tppOAuth)\n                          ? 1 : 0) + (has(self.vcpOAuth) ? 1 : 0)) == 1'\n                    maxItems: 50\n                    type: array\n                    x-kubernetes-list-type: atomic\n                  url:\n                    description: |-\n                      The URL to connect to the Certificate Manager, SaaS instance. If not set, the default\n                      value https://api.venafi.cloud is used.\n                    type: string\n                type: object\n                x-kubernetes-validations:\n                - message: 'must have exactly ONE of the following fields set: apiKey\n                    or accessToken'\n                  rule: '(has(self.apiKey) ? 1 : 0) + (has(self.accessToken) ? 1 :\n                    0) == 1'\n            type: object\n            x-kubernetes-validations:\n            - message: 'must have exactly ONE of the following fields set: tpp or\n                vcp'\n              rule: '(has(self.tpp) ? 1 : 0) + (has(self.vaas) ? 1 : 0) + (has(self.vcp)\n                ? 1 : 0) + (has(self.firefly) ? 1 : 0) == 1'\n          status:\n            properties:\n              conditions:\n                description: List of status conditions to indicate the status of a\n                  VenafiConnection.\n                items:\n                  description: ConnectionCondition contains condition information\n                    for a VenafiConnection.\n                  properties:\n                    lastTransitionTime:\n                      description: |-\n                        LastTransitionTime is the timestamp corresponding to the last status\n                        change of this condition.\n                      format: date-time\n                      type: string\n                    lastUpdateTime:\n                      description: lastUpdateTime is the time of the last update to\n                        this condition\n                      format: date-time\n                      type: string\n                    message:\n                      description: |-\n                        Message is a human readable description of the details of the last\n                        transition, complementing reason.\n                      type: string\n                    observedGeneration:\n                      description: |-\n                        If set, this represents the .metadata.generation that the condition was\n                        set based upon.\n                        For instance, if .metadata.generation is currently 12, but the\n                        .status.condition[x].observedGeneration is 9, the condition is out of date\n                        with respect to the current state of the Issuer.\n                      format: int64\n                      type: integer\n                    reason:\n                      description: |-\n                        Reason is a brief machine readable explanation for the condition's last\n                        transition.\n                      type: string\n                    status:\n                      description: Status of the condition, one of (`True`, `False`,\n                        `Unknown`).\n                      type: string\n                    tokenValidUntil:\n                      description: |-\n                        The ValidUntil time of the token used to authenticate with the\n                        Certificate Manager, SaaS.\n                      format: date-time\n                      type: string\n                    type:\n                      description: |-\n                        Type of the condition, should be a combination of the unique name of the\n                        operator and the type of condition.\n                        eg. `VenafiEnhancedIssuerReady`\n                      type: string\n                  required:\n                  - status\n                  - type\n                  type: object\n                type: array\n                x-kubernetes-list-map-keys:\n                - type\n                x-kubernetes-list-type: map\n            type: object\n        required:\n        - metadata\n        - spec\n        type: object\n    served: true\n    storage: true\n    subresources:\n      status: {}\n"
  },
  {
    "path": "deploy/charts/venafi-kubernetes-agent/templates/NOTES.txt",
    "content": "{{- if .Values.config.configmap.name }}\nYou are using a custom configuration in the following ConfigMap: {{ .Values.config.configmap.name | quote }}.\n\nDEPRECATION: The `cluster_id` configuration field is deprecated.\nIf your configuration contains `cluster_id`, it will continue to work as a\nfallback, but please migrate to `cluster_name` to avoid ambiguity.\n{{- end }}\n\n{{- if .Values.authentication.venafiConnection.enabled }}\n- Check the VenafiConnection exists: \"{{ .Values.authentication.venafiConnection.namespace }}/{{ .Values.authentication.venafiConnection.name }}\"\n> kubectl get VenafiConnection -n {{ .Values.authentication.venafiConnection.namespace }} {{ .Values.authentication.venafiConnection.name }}\n{{- else }}\n- Check the credentials Secret exists: \"{{ .Values.authentication.secretName }}\"\n> kubectl get secret -n {{ .Release.Namespace }} {{ .Values.authentication.secretName }}\n{{- end }}\n- Check the application is running:\n> kubectl get pods -n {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }}\n\n- Check the application logs for successful connection to the platform:\n> kubectl logs -n {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }}\n"
  },
  {
    "path": "deploy/charts/venafi-kubernetes-agent/templates/_helpers.tpl",
    "content": "{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"venafi-kubernetes-agent.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" }}\n{{- end }}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"venafi-kubernetes-agent.fullname\" -}}\n{{- if .Values.fullnameOverride }}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" }}\n{{- else }}\n{{- $name := default .Chart.Name .Values.nameOverride }}\n{{- if contains $name .Release.Name }}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" }}\n{{- else }}\n{{- printf \"%s-%s\" $name .Release.Name | trunc 63 | trimSuffix \"-\" }}\n{{- end }}\n{{- end }}\n{{- end }}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"venafi-kubernetes-agent.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" }}\n{{- end }}\n\n{{/*\nCommon labels\n*/}}\n{{- define \"venafi-kubernetes-agent.labels\" -}}\nhelm.sh/chart: {{ include \"venafi-kubernetes-agent.chart\" . }}\n{{ include \"venafi-kubernetes-agent.selectorLabels\" . }}\n{{- if .Chart.AppVersion }}\napp.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\n{{- end }}\napp.kubernetes.io/managed-by: {{ .Release.Service }}\n{{- end }}\n\n{{/*\nSelector labels\n*/}}\n{{- define \"venafi-kubernetes-agent.selectorLabels\" -}}\napp.kubernetes.io/name: {{ include \"venafi-kubernetes-agent.name\" . }}\napp.kubernetes.io/instance: {{ .Release.Name }}\n{{- end }}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"venafi-kubernetes-agent.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create }}\n{{- default (include \"venafi-kubernetes-agent.fullname\" .) .Values.serviceAccount.name }}\n{{- else }}\n{{- default \"default\" .Values.serviceAccount.name }}\n{{- end }}\n{{- end }}\n\n{{/*\nUtil function for generating an image reference based on the provided options.\nThis function is derviced from similar functions used in the cert-manager GitHub organization\n*/}}\n{{- define \"venafi-kubernetes-agent.image\" -}}\n{{- /*\nCalling convention:\n- (tuple <imageValues> <imageRegistry> <imageNamespace> <defaultReference>)\nWe intentionally pass imageRegistry/imageNamespace as explicit arguments rather than reading\nfrom `.Values` inside this helper, because `helm-tool lint` does not reliably track `.Values.*`\nusage through tuple/variable indirection.\n*/ -}}\n{{- if ne (len .) 4 -}}\n\t{{- fail (printf \"ERROR: template \\\"venafi-kubernetes-agent.image\\\" expects (tuple <imageValues> <imageRegistry> <imageNamespace> <defaultReference>), got %d arguments\" (len .)) -}}\n{{- end -}}\n{{- $image := index . 0 -}}\n{{- $imageRegistry := index . 1 | default \"\" -}}\n{{- $imageNamespace := index . 2 | default \"\" -}}\n{{- $defaultReference := index . 3 -}}\n{{- $repository := \"\" -}}\n{{- if $image.repository -}}\n\t{{- $repository = $image.repository -}}\n\t{{- /*\n\t\tBackwards compatibility: if image.registry is set, additionally prefix the repository with this registry.\n\t*/ -}}\n\t{{- if $image.registry -}}\n\t\t{{- $repository = printf \"%s/%s\" $image.registry $repository -}}\n\t{{- end -}}\n{{- else -}}\n\t{{- $name := required \"ERROR: image.name must be set when image.repository is empty\" $image.name -}}\n\t{{- $repository = $name -}}\n\t{{- if $imageNamespace -}}\n\t\t{{- $repository = printf \"%s/%s\" $imageNamespace $repository -}}\n\t{{- end -}}\n\t{{- if $imageRegistry -}}\n\t\t{{- $repository = printf \"%s/%s\" $imageRegistry $repository -}}\n\t{{- end -}}\n\t{{- /*\n\t\tBackwards compatibility: if image.registry is set, additionally prefix the repository with this registry.\n\t*/ -}}\n\t{{- if $image.registry -}}\n\t\t{{- $repository = printf \"%s/%s\" $image.registry $repository -}}\n\t{{- end -}}\n{{- end -}}\n{{- $repository -}}\n{{- if and $image.tag $image.digest -}}\n\t{{- printf \":%s@%s\" $image.tag $image.digest -}}\n{{- else if $image.tag -}}\n\t{{- printf \":%s\" $image.tag -}}\n{{- else if $image.digest -}}\n\t{{- printf \"@%s\" $image.digest -}}\n{{- else -}}\n\t{{- printf \"%s\" $defaultReference -}}\n{{- end -}}\n{{- end }}\n"
  },
  {
    "path": "deploy/charts/venafi-kubernetes-agent/templates/_venafi-connection.tpl",
    "content": "{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"venafi-connection.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" }}\n{{- end }}\n\n{{/*\nCommon labels\n*/}}\n{{- define \"venafi-connection.labels\" -}}\nhelm.sh/chart: {{ include \"venafi-connection.chart\" . }}\n{{ include \"venafi-connection.selectorLabels\" . }}\n{{- if .Chart.AppVersion }}\napp.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\n{{- end }}\napp.kubernetes.io/managed-by: {{ .Release.Service }}\n{{- end }}\n\n{{/*\nSelector labels\n*/}}\n{{- define \"venafi-connection.selectorLabels\" -}}\napp.kubernetes.io/name: \"venafi-connection\"\napp.kubernetes.io/instance: {{ .Release.Name }}\n{{- end }}\n"
  },
  {
    "path": "deploy/charts/venafi-kubernetes-agent/templates/configmap.yaml",
    "content": "{{ if not .Values.config.configmap.name }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: agent-config\n  namespace: {{ .Release.Namespace }}\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\ndata:\n  config.yaml: |-\n    cluster_name: {{ .Values.config.clusterName | quote }}\n    cluster_description: {{ .Values.config.clusterDescription | quote }}\n    server: {{ .Values.config.server | quote }}\n    period: {{ .Values.config.period | quote }}\n    {{- with .Values.config.excludeAnnotationKeysRegex }}\n    exclude-annotation-keys-regex:\n      {{- . | toYaml | nindent 6 }}\n    {{- end }}\n    {{- with .Values.config.excludeLabelKeysRegex }}\n    exclude-label-keys-regex:\n      {{- . | toYaml | nindent 6 }}\n    {{- end }}\n    venafi-cloud:\n      uploader_id: \"no\"\n      upload_path: \"/v1/tlspk/upload/clusterdata\"\n    data-gatherers:\n    # gather k8s apiserver version information\n    - kind: \"k8s-discovery\"\n      name: \"k8s-discovery\"\n    # pods data is used in the pods and application_versions packages\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/pods\"\n      config:\n        resource-type:\n          resource: pods\n          version: v1\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/namespaces\"\n      config:\n        resource-type:\n          resource: namespaces\n          version: v1\n    # gather services for pod readiness probe rules\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/services\"\n      config:\n        resource-type:\n          resource: services\n          version: v1\n    # gather higher level resources to ensure data to determine ownership is present\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/deployments\"\n      config:\n        resource-type:\n          version: v1\n          resource: deployments\n          group: apps\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/statefulsets\"\n      config:\n        resource-type:\n          version: v1\n          resource: statefulsets\n          group: apps\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/daemonsets\"\n      config:\n        resource-type:\n          version: v1\n          resource: daemonsets\n          group: apps\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/jobs\"\n      config:\n        resource-type:\n          version: v1\n          resource: jobs\n          group: batch\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/cronjobs\"\n      config:\n        resource-type:\n          version: v1\n          resource: cronjobs\n          group: batch\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/ingresses\"\n      config:\n        resource-type:\n          group: networking.k8s.io\n          version: v1\n          resource: ingresses\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/secrets\"\n      config:\n        resource-type:\n          version: v1\n          resource: secrets\n        {{- with .Values.config.ignoredSecretTypes }}\n        field-selectors:\n        {{- range . }}\n        - type!={{ . }}\n        {{- end }}\n        {{- end }}\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/certificates\"\n      config:\n        resource-type:\n          group: cert-manager.io\n          version: v1\n          resource: certificates\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/certificaterequests\"\n      config:\n        resource-type:\n          group: cert-manager.io\n          version: v1\n          resource: certificaterequests\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/issuers\"\n      config:\n        resource-type:\n          group: cert-manager.io\n          version: v1\n          resource: issuers\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/clusterissuers\"\n      config:\n        resource-type:\n          group: cert-manager.io\n          version: v1\n          resource: clusterissuers\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/googlecasissuers\"\n      config:\n        resource-type:\n          group: cas-issuer.jetstack.io\n          version: v1beta1\n          resource: googlecasissuers\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/googlecasclusterissuers\"\n      config:\n        resource-type:\n          group: cas-issuer.jetstack.io\n          version: v1beta1\n          resource: googlecasclusterissuers\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/awspcaissuer\"\n      config:\n        resource-type:\n          group: awspca.cert-manager.io\n          version: v1beta1\n          resource: awspcaissuers\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/awspcaclusterissuers\"\n      config:\n        resource-type:\n          group: awspca.cert-manager.io\n          version: v1beta1\n          resource: awspcaclusterissuers\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/mutatingwebhookconfigurations\"\n      config:\n        resource-type:\n          group: admissionregistration.k8s.io\n          version: v1\n          resource: mutatingwebhookconfigurations\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/validatingwebhookconfigurations\"\n      config:\n        resource-type:\n          group: admissionregistration.k8s.io\n          version: v1\n          resource: validatingwebhookconfigurations\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/gateways\"\n      config:\n        resource-type:\n          group: networking.istio.io\n          version: v1alpha3\n          resource: gateways\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/virtualservices\"\n      config:\n        resource-type:\n          group: networking.istio.io\n          version: v1alpha3\n          resource: virtualservices\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/routes\"\n      config:\n        resource-type:\n          version: v1\n          group: route.openshift.io\n          resource: routes\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/venaficonnections\"\n      config:\n        resource-type:\n          group: jetstack.io\n          version: v1alpha1\n          resource: venaficonnections\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/venaficlusterissuers\"\n      config:\n        resource-type:\n          group: jetstack.io\n          version: v1alpha1\n          resource: venaficlusterissuers\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/venafiissuers\"\n      config:\n        resource-type:\n          group: jetstack.io\n          version: v1alpha1\n          resource: venafiissuers\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/fireflyissuers\"\n      config:\n        resource-type:\n          group: firefly.venafi.com\n          version: v1\n          resource: issuers\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/stepissuers\"\n      config:\n        resource-type:\n          group: certmanager.step.sm\n          version: v1beta1\n          resource: stepissuers\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/stepclusterissuers\"\n      config:\n        resource-type:\n          group: certmanager.step.sm\n          version: v1beta1\n          resource: stepclusterissuers\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/originissuers\"\n      config:\n        resource-type:\n          group: cert-manager.k8s.cloudflare.com\n          version: v1\n          resource: originissuers\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/clusteroriginissuers\"\n      config:\n        resource-type:\n          group: cert-manager.k8s.cloudflare.com\n          version: v1\n          resource: clusteroriginissuers\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/freeipaissuers\"\n      config:\n        resource-type:\n          group: certmanager.freeipa.org\n          version: v1beta1\n          resource: issuers\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/freeipaclusterissuers\"\n      config:\n        resource-type:\n          group: certmanager.freeipa.org\n          version: v1beta1\n          resource: clusterissuers\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/ejbcaissuers\"\n      config:\n        resource-type:\n          group: ejbca-issuer.keyfactor.com\n          version: v1alpha1\n          resource: issuers\n    - kind: \"k8s-dynamic\"\n      name: \"k8s/ejbcaclusterissuers\"\n      config:\n        resource-type:\n          group: ejbca-issuer.keyfactor.com\n          version: v1alpha1\n          resource: clusterissuers\n{{- end }}\n"
  },
  {
    "path": "deploy/charts/venafi-kubernetes-agent/templates/deployment.yaml",
    "content": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}\n  namespace: {{ .Release.Namespace }}\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nspec:\n  replicas: {{ .Values.replicaCount }}\n  selector:\n    matchLabels:\n      {{- include \"venafi-kubernetes-agent.selectorLabels\" . | nindent 6 }}\n  template:\n    metadata:\n      {{- with .Values.podAnnotations }}\n      annotations:\n        {{- toYaml . | nindent 8 }}\n      {{- end }}\n      labels:\n        {{- include \"venafi-kubernetes-agent.selectorLabels\" . | nindent 8 }}\n    spec:\n      {{- with .Values.imagePullSecrets }}\n      imagePullSecrets:\n        {{- toYaml . | nindent 8 }}\n      {{- end }}\n      serviceAccountName: {{ include \"venafi-kubernetes-agent.serviceAccountName\" . }}\n      securityContext:\n        {{- toYaml .Values.podSecurityContext | nindent 8 }}\n      containers:\n        - name: {{ .Chart.Name }}\n          securityContext:\n            {{- toYaml .Values.securityContext | nindent 12 }}\n          image: \"{{ template \"venafi-kubernetes-agent.image\" (tuple .Values.image .Values.imageRegistry .Values.imageNamespace (printf \":%s\" .Chart.AppVersion)) }}\"\n          imagePullPolicy: {{ .Values.image.pullPolicy }}\n          env:\n          - name: POD_NAMESPACE\n            valueFrom:\n              fieldRef:\n                fieldPath: metadata.namespace\n          - name: POD_NAME\n            valueFrom:\n              fieldRef:\n                fieldPath: metadata.name\n          - name: POD_UID\n            valueFrom:\n              fieldRef:\n                fieldPath: metadata.uid\n          - name: POD_NODE\n            valueFrom:\n              fieldRef:\n                fieldPath: spec.nodeName\n          {{- with .Values.http_proxy }}\n          - name: HTTP_PROXY\n            value: {{ . }}\n          {{- end }}\n          {{- with .Values.https_proxy }}\n          - name: HTTPS_PROXY\n            value: {{ . }}\n          {{- end }}\n          {{- with .Values.no_proxy }}\n          - name: NO_PROXY\n            value: {{ . }}\n          {{- end }}\n          {{- if not (empty .Values.command) }}\n          command:\n          {{- range .Values.command }}\n            - {{ . | quote }}\n          {{- end }}\n          {{- end }}\n          args:\n            - \"agent\"\n            - \"-c\"\n            - \"/etc/venafi/agent/config/{{ default \"config.yaml\" .Values.config.configmap.key }}\"\n            {{- if .Values.authentication.venafiConnection.enabled }}\n            - --venafi-connection\n            - {{ .Values.authentication.venafiConnection.name | quote }}\n            - --venafi-connection-namespace\n            - {{ .Values.authentication.venafiConnection.namespace | quote }}\n            {{- else }}\n            - \"--client-id\"\n            - {{ .Values.config.clientId | quote }}\n            - \"--private-key-path\"\n            - \"/etc/venafi/agent/key/{{ .Values.authentication.secretKey }}\"\n            {{- end }}\n            - --venafi-cloud\n            {{- if .Values.metrics.enabled }}\n            - --enable-metrics\n            {{- end }}\n            {{- range .Values.extraArgs }}\n            - {{ . | quote }}\n            {{- end }}\n          resources:\n            {{- toYaml .Values.resources | nindent 12 }}\n          volumeMounts:\n            - name: config\n              mountPath: \"/etc/venafi/agent/config\"\n              readOnly: true\n            {{- if not .Values.authentication.venafiConnection.enabled }}\n            - name: credentials\n              mountPath: \"/etc/venafi/agent/key\"\n              readOnly: true\n            {{- end }}\n            {{- with .Values.volumeMounts }}\n            {{- toYaml . | nindent 12 }}\n            {{- end }}\n          {{- if .Values.metrics.enabled }}\n          ports:\n            - containerPort: 8081\n              name: http-metrics\n          {{- end }}\n          livenessProbe:\n            httpGet:\n              path: /healthz\n              port: 8081\n            initialDelaySeconds: 15\n            periodSeconds: 20\n          readinessProbe:\n            httpGet:\n              path: /readyz\n              port: 8081\n            initialDelaySeconds: 5\n            periodSeconds: 10\n      {{- with .Values.nodeSelector }}\n      nodeSelector:\n        {{- toYaml . | nindent 8 }}\n      {{- end }}\n      {{- with .Values.affinity }}\n      affinity:\n        {{- toYaml . | nindent 8 }}\n      {{- end }}\n      {{- with .Values.tolerations }}\n      tolerations:\n        {{- toYaml . | nindent 8 }}\n      {{- end }}\n      volumes:\n        - name: config\n          configMap:\n            name: {{ default \"agent-config\" .Values.config.configmap.name }}\n            optional: false\n        {{- if not .Values.authentication.venafiConnection.enabled }}\n        - name: credentials\n          secret:\n            secretName: {{ .Values.authentication.secretName }}\n            optional: false\n        {{- end }}\n        {{- with .Values.volumes }}\n        {{- toYaml . | nindent 8 }}\n        {{- end }}\n"
  },
  {
    "path": "deploy/charts/venafi-kubernetes-agent/templates/poddisruptionbudget.yaml",
    "content": "{{- if .Values.podDisruptionBudget.enabled }}\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}\n  namespace: {{ .Release.Namespace }}\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nspec:\n  selector:\n    matchLabels:\n      {{- include \"venafi-kubernetes-agent.selectorLabels\" . | nindent 6 }}\n\n  {{- if not (or (hasKey .Values.podDisruptionBudget \"minAvailable\") (hasKey .Values.podDisruptionBudget \"maxUnavailable\")) }}\n  minAvailable: 1 # Default value because minAvailable and maxUnavailable are not set\n  {{- end }}\n  {{- if hasKey .Values.podDisruptionBudget \"minAvailable\" }}\n  minAvailable: {{ .Values.podDisruptionBudget.minAvailable }}\n  {{- end }}\n  {{- if hasKey .Values.podDisruptionBudget \"maxUnavailable\" }}\n  maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }}\n  {{- end }}\n{{- end }}\n"
  },
  {
    "path": "deploy/charts/venafi-kubernetes-agent/templates/podmonitor.yaml",
    "content": "{{- if and .Values.metrics.enabled .Values.metrics.podmonitor.enabled }}\napiVersion: monitoring.coreos.com/v1\nkind: PodMonitor\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}\n{{- if .Values.metrics.podmonitor.namespace }}\n  namespace: {{ .Values.metrics.podmonitor.namespace }}\n{{- else }}\n  namespace: {{ .Release.Namespace | quote }}\n{{- end }}\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\n    prometheus: {{ .Values.metrics.podmonitor.prometheusInstance }}\n    {{- with .Values.metrics.podmonitor.labels }}\n    {{- toYaml . | nindent 4 }}\n    {{- end }}\n{{- with .Values.metrics.podmonitor.annotations }}\n  annotations:\n    {{- toYaml . | nindent 4 }}\n{{- end }}\nspec:\n  jobLabel: {{ include \"venafi-kubernetes-agent.fullname\" . }}\n  selector:\n    matchLabels:\n      {{- include \"venafi-kubernetes-agent.selectorLabels\" . | nindent 6 }}\n{{- if .Values.metrics.podmonitor.namespace }}\n  namespaceSelector:\n    matchNames:\n      - {{ .Release.Namespace | quote }}\n{{- end }}\n  podMetricsEndpoints:\n    - port: http-metrics\n      path: /metrics\n      interval: {{ .Values.metrics.podmonitor.interval }}\n      scrapeTimeout: {{ .Values.metrics.podmonitor.scrapeTimeout }}\n      honorLabels: {{ .Values.metrics.podmonitor.honorLabels }}\n      {{- with .Values.metrics.podmonitor.endpointAdditionalProperties }}\n      {{- toYaml . | nindent 4 }}\n      {{- end }}\n{{- end }}\n"
  },
  {
    "path": "deploy/charts/venafi-kubernetes-agent/templates/rbac.yaml",
    "content": "---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-event-emitted\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nrules:\n  - apiGroups: [\"\"]\n    resources: [\"events\"]\n    verbs: [\"create\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-event-emitted\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-event-emitted\nsubjects:\n  - kind: ServiceAccount\n    name: {{ include \"venafi-kubernetes-agent.serviceAccountName\" . }}\n    namespace: {{ .Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-cluster-viewer\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: view\nsubjects:\n  - kind: ServiceAccount\n    name: {{ include \"venafi-kubernetes-agent.serviceAccountName\" . }}\n    namespace: {{ .Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-node-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nrules:\n  - apiGroups: [\"\"]\n    resources: [\"nodes\"]\n    verbs: [\"get\", \"list\", \"watch\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-node-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nroleRef:\n  kind: ClusterRole\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-node-reader\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - kind: ServiceAccount\n    name: {{ include \"venafi-kubernetes-agent.serviceAccountName\" . }}\n    namespace: {{ .Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-secret-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nrules:\n  - apiGroups: [\"\"]\n    resources: [\"secrets\"]\n    verbs: [\"get\", \"list\", \"watch\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-secret-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nroleRef:\n  kind: ClusterRole\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-secret-reader\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - kind: ServiceAccount\n    name: {{ include \"venafi-kubernetes-agent.serviceAccountName\" . }}\n    namespace: {{ .Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-cert-manager-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nrules:\n  - apiGroups: [\"cert-manager.io\"]\n    resources:\n      - certificates\n      - certificaterequests\n      - issuers\n      - clusterissuers\n    verbs: [\"get\", \"list\", \"watch\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-cert-manager-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nroleRef:\n  kind: ClusterRole\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-cert-manager-reader\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - kind: ServiceAccount\n    name: {{ include \"venafi-kubernetes-agent.serviceAccountName\" . }}\n    namespace: {{ .Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-googlecas-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nrules:\n  - apiGroups: [\"cas-issuer.jetstack.io\"]\n    resources:\n      - googlecasissuers\n      - googlecasclusterissuers\n    verbs: [\"get\", \"list\", \"watch\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-googlecas-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nroleRef:\n  kind: ClusterRole\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-googlecas-reader\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - kind: ServiceAccount\n    name: {{ include \"venafi-kubernetes-agent.serviceAccountName\" . }}\n    namespace: {{ .Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-awspca-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nrules:\n  - apiGroups: [\"awspca.cert-manager.io\"]\n    resources:\n      - awspcaissuers\n      - awspcaclusterissuers\n    verbs: [\"get\", \"list\", \"watch\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-awspca-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nroleRef:\n  kind: ClusterRole\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-awspca-reader\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - kind: ServiceAccount\n    name: {{ include \"venafi-kubernetes-agent.serviceAccountName\" . }}\n    namespace: {{ .Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-webhook-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nrules:\n  - apiGroups: [\"admissionregistration.k8s.io\"]\n    resources:\n      - validatingwebhookconfigurations\n      - mutatingwebhookconfigurations\n    verbs: [\"get\", \"list\", \"watch\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-webhook-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nroleRef:\n  kind: ClusterRole\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-webhook-reader\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - kind: ServiceAccount\n    name: {{ include \"venafi-kubernetes-agent.serviceAccountName\" . }}\n    namespace: {{ .Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-openshift-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nrules:\n  - apiGroups: [\"route.openshift.io\"]\n    resources:\n      - routes\n    verbs: [\"get\", \"list\", \"watch\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-openshift-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nroleRef:\n  kind: ClusterRole\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-openshift-reader\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - kind: ServiceAccount\n    name: {{ include \"venafi-kubernetes-agent.serviceAccountName\" . }}\n    namespace: {{ .Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-istio-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nrules:\n  - apiGroups: [\"networking.istio.io\"]\n    resources:\n      - virtualservices\n      - gateways\n    verbs: [\"get\", \"list\", \"watch\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-istio-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nroleRef:\n  kind: ClusterRole\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-istio-reader\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - kind: ServiceAccount\n    name: {{ include \"venafi-kubernetes-agent.serviceAccountName\" . }}\n    namespace: {{ .Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-venafi-connection-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nrules:\n  - apiGroups: [\"jetstack.io\"]\n    resources:\n      - venaficonnections\n    verbs: [\"get\", \"list\", \"watch\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-venafi-connection-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nroleRef:\n  kind: ClusterRole\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-venafi-connection-reader\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - kind: ServiceAccount\n    name: {{ include \"venafi-kubernetes-agent.serviceAccountName\" . }}\n    namespace: {{ .Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-venafi-enhanced-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nrules:\n  - apiGroups: [\"jetstack.io\"]\n    resources:\n      - venafiissuers\n      - venaficlusterissuers\n    verbs: [\"get\", \"list\", \"watch\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-venafi-enhanced-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nroleRef:\n  kind: ClusterRole\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-venafi-enhanced-reader\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - kind: ServiceAccount\n    name: {{ include \"venafi-kubernetes-agent.serviceAccountName\" . }}\n    namespace: {{ .Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-firefly-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nrules:\n  - apiGroups: [\"firefly.venafi.com\"]\n    resources:\n      - issuers\n    verbs: [\"get\", \"list\", \"watch\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-firefly-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nroleRef:\n  kind: ClusterRole\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-firefly-reader\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - kind: ServiceAccount\n    name: {{ include \"venafi-kubernetes-agent.serviceAccountName\" . }}\n    namespace: {{ .Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-step-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nrules:\n  - apiGroups: [\"certmanager.step.sm\"]\n    resources:\n      - stepissuers\n      - stepclusterissuers\n    verbs: [\"get\", \"list\", \"watch\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-step-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nroleRef:\n  kind: ClusterRole\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-step-reader\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - kind: ServiceAccount\n    name: {{ include \"venafi-kubernetes-agent.serviceAccountName\" . }}\n    namespace: {{ .Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-cloudflare-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nrules:\n  - apiGroups: [\"cert-manager.k8s.cloudflare.com\"]\n    resources:\n      - originissuers\n      - clusteroriginissuers\n    verbs: [\"get\", \"list\", \"watch\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-cloudflare-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nroleRef:\n  kind: ClusterRole\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-cloudflare-reader\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - kind: ServiceAccount\n    name: {{ include \"venafi-kubernetes-agent.serviceAccountName\" . }}\n    namespace: {{ .Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-freeipa-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nrules:\n  - apiGroups: [\"certmanager.freeipa.org\"]\n    resources:\n      - issuers\n      - clusterissuers\n    verbs: [\"get\", \"list\", \"watch\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-freeipa-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nroleRef:\n  kind: ClusterRole\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-freeipa-reader\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - kind: ServiceAccount\n    name: {{ include \"venafi-kubernetes-agent.serviceAccountName\" . }}\n    namespace: {{ .Release.Namespace }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-keyfactor-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nrules:\n  - apiGroups: [\"ejbca-issuer.keyfactor.com\"]\n    resources:\n      - issuers\n      - clusterissuers\n    verbs: [\"get\", \"list\", \"watch\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-keyfactor-reader\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nroleRef:\n  kind: ClusterRole\n  name: {{ include \"venafi-kubernetes-agent.fullname\" . }}-keyfactor-reader\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - kind: ServiceAccount\n    name: {{ include \"venafi-kubernetes-agent.serviceAccountName\" . }}\n    namespace: {{ .Release.Namespace }}\n\n"
  },
  {
    "path": "deploy/charts/venafi-kubernetes-agent/templates/serviceaccount.yaml",
    "content": "{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: {{ include \"venafi-kubernetes-agent.serviceAccountName\" . }}\n  namespace: {{ .Release.Namespace }}\n  labels:\n    {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\n  {{- with .Values.serviceAccount.annotations }}\n  annotations:\n    {{- toYaml . | nindent 4 }}\n  {{- end }}\n{{- end }}\n"
  },
  {
    "path": "deploy/charts/venafi-kubernetes-agent/templates/venafi-connection-crd.without-validations.yaml",
    "content": "{{/* DO NOT EDIT. Use 'make generate-crds-venconn' to regenerate. */}}\n{{- if .Values.crds.venafiConnection.include }}\n{{- if (or (semverCompare \"<1.25\" .Capabilities.KubeVersion.GitVersion) .Values.crds.forceRemoveValidationAnnotations) }}\napiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n  name: \"venaficonnections.jetstack.io\"\n  {{- if .Values.crds.keep }}\n  annotations:\n    # This annotation prevents the CRD from being pruned by Helm when this chart\n    # is deleted.\n    helm.sh/resource-policy: keep\n  {{- end }}\n  labels:\n  {{- include \"venafi-connection.labels\" . | nindent 4 }}\nspec:\n  group: jetstack.io\n  names:\n    kind: VenafiConnection\n    listKind: VenafiConnectionList\n    plural: venaficonnections\n    shortNames:\n      - vc\n    singular: venaficonnection\n  scope: Namespaced\n  versions:\n    - name: v1alpha1\n      schema:\n        openAPIV3Schema:\n          description: VenafiConnection is the Schema for the VenafiConnection API\n          properties:\n            apiVersion:\n              description: |-\n                APIVersion defines the versioned schema of this representation of an object.\n                Servers should convert recognized schemas to the latest internal value, and\n                may reject unrecognized values.\n                More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\n              type: string\n            kind:\n              description: |-\n                Kind is a string value representing the REST resource this object represents.\n                Servers may infer this from the endpoint the client submits requests to.\n                Cannot be updated.\n                In CamelCase.\n                More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n              type: string\n            metadata:\n              type: object\n            spec:\n              properties:\n                allowReferencesFrom:\n                  description: |-\n                    A namespace selector that specifies what namespaces this VenafiConnection\n                    is allowed to be used from.\n                    If not set/ null, the VenafiConnection can only be used within its namespace.\n                    An empty selector ({}) matches all namespaces.\n                    If set to a non-empty selector, the VenafiConnection can only be used from\n                    namespaces that match the selector. This possibly excludes the namespace\n                    the VenafiConnection is in.\n                  properties:\n                    matchExpressions:\n                      description: matchExpressions is a list of label selector requirements. The requirements are ANDed.\n                      items:\n                        description: |-\n                          A label selector requirement is a selector that contains values, a key, and an operator that\n                          relates the key and values.\n                        properties:\n                          key:\n                            description: key is the label key that the selector applies to.\n                            type: string\n                          operator:\n                            description: |-\n                              operator represents a key's relationship to a set of values.\n                              Valid operators are In, NotIn, Exists and DoesNotExist.\n                            type: string\n                          values:\n                            description: |-\n                              values is an array of string values. If the operator is In or NotIn,\n                              the values array must be non-empty. If the operator is Exists or DoesNotExist,\n                              the values array must be empty. This array is replaced during a strategic\n                              merge patch.\n                            items:\n                              type: string\n                            type: array\n                            x-kubernetes-list-type: atomic\n                        required:\n                          - key\n                          - operator\n                        type: object\n                      type: array\n                      x-kubernetes-list-type: atomic\n                    matchLabels:\n                      additionalProperties:\n                        type: string\n                      description: |-\n                        matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\n                        map is equivalent to an element of matchExpressions, whose key field is \"key\", the\n                        operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.\n                      type: object\n                  type: object\n                  x-kubernetes-map-type: atomic\n                firefly:\n                  properties:\n                    accessToken:\n                      description: |-\n                        The list of steps to retrieve the Access Token that will be used to connect\n                        to Firefly.\n                      items:\n                        properties:\n                          hashicorpVaultLDAP:\n                            description: |-\n                              HashicorpVaultLDAP is a SecretSource step that requires a Vault token in\n                              the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                              then fetches the requested secrets from Vault for use in the next step.\n                            properties:\n                              ldapPath:\n                                description: |-\n                                  The full HTTP path to the secret in Vault. Example:\n                                  /v1/ldap/static-cred/:role_name\n                                  or\n                                  /v1/ldap/creds/:role_name\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - ldapPath\n                            type: object\n                          hashicorpVaultOAuth:\n                            description: |-\n                              HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource\n                              step to provide an OAuth token, which this step uses to authenticate to\n                              Vault. The output of this step is a Vault token. This step allows you to use\n                              the step `HashicorpVaultSecret` afterwards.\n                            properties:\n                              authInputType:\n                                description: |-\n                                  AuthInputType is the authentication method to be used to authenticate\n                                  with HashiCorp Vault. The only supported value is \"OIDC\".\n                                enum:\n                                  - OIDC\n                                type: string\n                              authPath:\n                                description: |-\n                                  The login URL used for obtaining the Vault token. Example:\n                                  /v1/auth/oidc/login\n                                type: string\n                              clientId:\n                                description: 'Deprecated: This field does nothing and will be removed in the future.'\n                                type: string\n                              role:\n                                description: |-\n                                  The role defined in Vault that we want to use when authenticating to\n                                  Vault.\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - authInputType\n                              - authPath\n                              - role\n                            type: object\n                          hashicorpVaultSecret:\n                            description: |-\n                              HashicorpVaultSecret is a SecretSource step that requires a Vault token in\n                              the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                              then fetches the requested secrets from Vault for use in the next step.\n                            properties:\n                              fields:\n                                description: |-\n                                  The fields are Vault keys pointing to the secrets passed to the next\n                                  SecretSource step.\n\n                                  Example 1 (TPP, username and password): imagining that you have stored\n                                  the username and password for TPP under the keys \"username\" and\n                                  \"password\", you will want to set this field to `[\"username\",\n                                  \"password\"]`. The username is expected to be given first, the password\n                                  second.\n                                items:\n                                  type: string\n                                type: array\n                              secretPath:\n                                description: |-\n                                  The full HTTP path to the secret in Vault. Example:\n                                  /v1/secret/data/application-team-a/tpp-username-password\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - fields\n                              - secretPath\n                            type: object\n                          secret:\n                            description: |-\n                              Secret is a SecretSource step meant to be the first step. It retrieves secret\n                              values from a Kubernetes Secret, and passes them to the next step.\n                            properties:\n                              fields:\n                                description: |-\n                                  The names of the fields we want to extract from the Kubernetes secret.\n                                  These fields are passed to the next step in the chain.\n                                items:\n                                  type: string\n                                type: array\n                              name:\n                                description: The name of the Kubernetes secret.\n                                type: string\n                            required:\n                              - fields\n                              - name\n                            type: object\n                          serviceAccountToken:\n                            description: |-\n                              ServiceAccountToken is a SecretSource step meant to be the first step. It\n                              uses the Kubernetes TokenRequest API to retrieve a token for a given service\n                              account, and passes it to the next step.\n                            properties:\n                              audiences:\n                                description: |-\n                                  Audiences are the intendend audiences of the token. A recipient of a\n                                  token must identify themself with an identifier in the list of\n                                  audiences of the token, and otherwise should reject the token. A\n                                  token issued for multiple audiences may be used to authenticate\n                                  against any of the audiences listed but implies a high degree of\n                                  trust between the target audiences.\n                                items:\n                                  type: string\n                                type: array\n                              expirationSeconds:\n                                description: |-\n                                  ExpirationSeconds is the requested duration of validity of the request. The\n                                  token issuer may return a token with a different validity duration so a\n                                  client needs to check the 'expiration' field in a response.\n                                format: int64\n                                type: integer\n                              name:\n                                description: The name of the Kubernetes service account.\n                                type: string\n                            required:\n                              - audiences\n                              - name\n                            type: object\n                          tppOAuth:\n                            description: |-\n                              TPPOAuth is a SecretSource step that authenticates to a TPP server. This\n                              step is meant to be the last step and requires a prior step that depends\n                              on the `authInputType`.\n                            properties:\n                              authInputType:\n                                description: |-\n                                  AuthInputType is the authentication method to be used to authenticate\n                                  with TPP. The supported values are \"UsernamePassword\" and \"JWT\".\n                                enum:\n                                  - UsernamePassword\n                                  - JWT\n                                type: string\n                              clientId:\n                                description: ClientID is the clientId used to authenticate with TPP.\n                                type: string\n                              url:\n                                description: |-\n                                  The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs\n                                  https://tpp.example.com and https://tpp.example.com/vedsdk are\n                                  equivalent. The ending `/vedsdk` is optional and is stripped out\n                                  by our client.\n                                  If not set, defaults to the URL defined at the top-level of the\n                                  TPP configuration.\n                                type: string\n                            required:\n                              - authInputType\n                            type: object\n                          vcpOAuth:\n                            description: |-\n                              VCPOAuth is a SecretSource step that authenticates to the\n                              Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step\n                              that outputs a JWT token.\n                            properties:\n                              tenantID:\n                                description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS.\n                                type: string\n                            type: object\n                        type: object\n                      maxItems: 50\n                      type: array\n                      x-kubernetes-list-type: atomic\n                    url:\n                      description: The URL to connect to the Workload Identity Manager instance.\n                      type: string\n                  required:\n                    - url\n                  type: object\n                tpp:\n                  properties:\n                    accessToken:\n                      description: The list of steps to retrieve a TPP access token.\n                      items:\n                        properties:\n                          hashicorpVaultLDAP:\n                            description: |-\n                              HashicorpVaultLDAP is a SecretSource step that requires a Vault token in\n                              the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                              then fetches the requested secrets from Vault for use in the next step.\n                            properties:\n                              ldapPath:\n                                description: |-\n                                  The full HTTP path to the secret in Vault. Example:\n                                  /v1/ldap/static-cred/:role_name\n                                  or\n                                  /v1/ldap/creds/:role_name\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - ldapPath\n                            type: object\n                          hashicorpVaultOAuth:\n                            description: |-\n                              HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource\n                              step to provide an OAuth token, which this step uses to authenticate to\n                              Vault. The output of this step is a Vault token. This step allows you to use\n                              the step `HashicorpVaultSecret` afterwards.\n                            properties:\n                              authInputType:\n                                description: |-\n                                  AuthInputType is the authentication method to be used to authenticate\n                                  with HashiCorp Vault. The only supported value is \"OIDC\".\n                                enum:\n                                  - OIDC\n                                type: string\n                              authPath:\n                                description: |-\n                                  The login URL used for obtaining the Vault token. Example:\n                                  /v1/auth/oidc/login\n                                type: string\n                              clientId:\n                                description: 'Deprecated: This field does nothing and will be removed in the future.'\n                                type: string\n                              role:\n                                description: |-\n                                  The role defined in Vault that we want to use when authenticating to\n                                  Vault.\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - authInputType\n                              - authPath\n                              - role\n                            type: object\n                          hashicorpVaultSecret:\n                            description: |-\n                              HashicorpVaultSecret is a SecretSource step that requires a Vault token in\n                              the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                              then fetches the requested secrets from Vault for use in the next step.\n                            properties:\n                              fields:\n                                description: |-\n                                  The fields are Vault keys pointing to the secrets passed to the next\n                                  SecretSource step.\n\n                                  Example 1 (TPP, username and password): imagining that you have stored\n                                  the username and password for TPP under the keys \"username\" and\n                                  \"password\", you will want to set this field to `[\"username\",\n                                  \"password\"]`. The username is expected to be given first, the password\n                                  second.\n                                items:\n                                  type: string\n                                type: array\n                              secretPath:\n                                description: |-\n                                  The full HTTP path to the secret in Vault. Example:\n                                  /v1/secret/data/application-team-a/tpp-username-password\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - fields\n                              - secretPath\n                            type: object\n                          secret:\n                            description: |-\n                              Secret is a SecretSource step meant to be the first step. It retrieves secret\n                              values from a Kubernetes Secret, and passes them to the next step.\n                            properties:\n                              fields:\n                                description: |-\n                                  The names of the fields we want to extract from the Kubernetes secret.\n                                  These fields are passed to the next step in the chain.\n                                items:\n                                  type: string\n                                type: array\n                              name:\n                                description: The name of the Kubernetes secret.\n                                type: string\n                            required:\n                              - fields\n                              - name\n                            type: object\n                          serviceAccountToken:\n                            description: |-\n                              ServiceAccountToken is a SecretSource step meant to be the first step. It\n                              uses the Kubernetes TokenRequest API to retrieve a token for a given service\n                              account, and passes it to the next step.\n                            properties:\n                              audiences:\n                                description: |-\n                                  Audiences are the intendend audiences of the token. A recipient of a\n                                  token must identify themself with an identifier in the list of\n                                  audiences of the token, and otherwise should reject the token. A\n                                  token issued for multiple audiences may be used to authenticate\n                                  against any of the audiences listed but implies a high degree of\n                                  trust between the target audiences.\n                                items:\n                                  type: string\n                                type: array\n                              expirationSeconds:\n                                description: |-\n                                  ExpirationSeconds is the requested duration of validity of the request. The\n                                  token issuer may return a token with a different validity duration so a\n                                  client needs to check the 'expiration' field in a response.\n                                format: int64\n                                type: integer\n                              name:\n                                description: The name of the Kubernetes service account.\n                                type: string\n                            required:\n                              - audiences\n                              - name\n                            type: object\n                          tppOAuth:\n                            description: |-\n                              TPPOAuth is a SecretSource step that authenticates to a TPP server. This\n                              step is meant to be the last step and requires a prior step that depends\n                              on the `authInputType`.\n                            properties:\n                              authInputType:\n                                description: |-\n                                  AuthInputType is the authentication method to be used to authenticate\n                                  with TPP. The supported values are \"UsernamePassword\" and \"JWT\".\n                                enum:\n                                  - UsernamePassword\n                                  - JWT\n                                type: string\n                              clientId:\n                                description: ClientID is the clientId used to authenticate with TPP.\n                                type: string\n                              url:\n                                description: |-\n                                  The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs\n                                  https://tpp.example.com and https://tpp.example.com/vedsdk are\n                                  equivalent. The ending `/vedsdk` is optional and is stripped out\n                                  by our client.\n                                  If not set, defaults to the URL defined at the top-level of the\n                                  TPP configuration.\n                                type: string\n                            required:\n                              - authInputType\n                            type: object\n                          vcpOAuth:\n                            description: |-\n                              VCPOAuth is a SecretSource step that authenticates to the\n                              Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step\n                              that outputs a JWT token.\n                            properties:\n                              tenantID:\n                                description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS.\n                                type: string\n                            type: object\n                        type: object\n                      maxItems: 50\n                      type: array\n                      x-kubernetes-list-type: atomic\n                    url:\n                      description: |-\n                        The URL to connect to the Certificate Manager Self-Hosted instance. The two URLs\n                        https://tpp.example.com and https://tpp.example.com/vedsdk are\n                        equivalent. The ending `/vedsdk` is optional and is stripped out by\n                        venafi-connection-lib.\n                      type: string\n                  required:\n                    - url\n                  type: object\n                vaas:\n                  description: 'Deprecated: The ''vaas'' field is deprecated use the field called ''vcp'' instead.'\n                  properties:\n                    accessToken:\n                      description: |-\n                        The list of steps to retrieve the Access Token that will be used to connect\n                        to Certificate Manager, SaaS.\n                      items:\n                        properties:\n                          hashicorpVaultLDAP:\n                            description: |-\n                              HashicorpVaultLDAP is a SecretSource step that requires a Vault token in\n                              the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                              then fetches the requested secrets from Vault for use in the next step.\n                            properties:\n                              ldapPath:\n                                description: |-\n                                  The full HTTP path to the secret in Vault. Example:\n                                  /v1/ldap/static-cred/:role_name\n                                  or\n                                  /v1/ldap/creds/:role_name\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - ldapPath\n                            type: object\n                          hashicorpVaultOAuth:\n                            description: |-\n                              HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource\n                              step to provide an OAuth token, which this step uses to authenticate to\n                              Vault. The output of this step is a Vault token. This step allows you to use\n                              the step `HashicorpVaultSecret` afterwards.\n                            properties:\n                              authInputType:\n                                description: |-\n                                  AuthInputType is the authentication method to be used to authenticate\n                                  with HashiCorp Vault. The only supported value is \"OIDC\".\n                                enum:\n                                  - OIDC\n                                type: string\n                              authPath:\n                                description: |-\n                                  The login URL used for obtaining the Vault token. Example:\n                                  /v1/auth/oidc/login\n                                type: string\n                              clientId:\n                                description: 'Deprecated: This field does nothing and will be removed in the future.'\n                                type: string\n                              role:\n                                description: |-\n                                  The role defined in Vault that we want to use when authenticating to\n                                  Vault.\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - authInputType\n                              - authPath\n                              - role\n                            type: object\n                          hashicorpVaultSecret:\n                            description: |-\n                              HashicorpVaultSecret is a SecretSource step that requires a Vault token in\n                              the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                              then fetches the requested secrets from Vault for use in the next step.\n                            properties:\n                              fields:\n                                description: |-\n                                  The fields are Vault keys pointing to the secrets passed to the next\n                                  SecretSource step.\n\n                                  Example 1 (TPP, username and password): imagining that you have stored\n                                  the username and password for TPP under the keys \"username\" and\n                                  \"password\", you will want to set this field to `[\"username\",\n                                  \"password\"]`. The username is expected to be given first, the password\n                                  second.\n                                items:\n                                  type: string\n                                type: array\n                              secretPath:\n                                description: |-\n                                  The full HTTP path to the secret in Vault. Example:\n                                  /v1/secret/data/application-team-a/tpp-username-password\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - fields\n                              - secretPath\n                            type: object\n                          secret:\n                            description: |-\n                              Secret is a SecretSource step meant to be the first step. It retrieves secret\n                              values from a Kubernetes Secret, and passes them to the next step.\n                            properties:\n                              fields:\n                                description: |-\n                                  The names of the fields we want to extract from the Kubernetes secret.\n                                  These fields are passed to the next step in the chain.\n                                items:\n                                  type: string\n                                type: array\n                              name:\n                                description: The name of the Kubernetes secret.\n                                type: string\n                            required:\n                              - fields\n                              - name\n                            type: object\n                          serviceAccountToken:\n                            description: |-\n                              ServiceAccountToken is a SecretSource step meant to be the first step. It\n                              uses the Kubernetes TokenRequest API to retrieve a token for a given service\n                              account, and passes it to the next step.\n                            properties:\n                              audiences:\n                                description: |-\n                                  Audiences are the intendend audiences of the token. A recipient of a\n                                  token must identify themself with an identifier in the list of\n                                  audiences of the token, and otherwise should reject the token. A\n                                  token issued for multiple audiences may be used to authenticate\n                                  against any of the audiences listed but implies a high degree of\n                                  trust between the target audiences.\n                                items:\n                                  type: string\n                                type: array\n                              expirationSeconds:\n                                description: |-\n                                  ExpirationSeconds is the requested duration of validity of the request. The\n                                  token issuer may return a token with a different validity duration so a\n                                  client needs to check the 'expiration' field in a response.\n                                format: int64\n                                type: integer\n                              name:\n                                description: The name of the Kubernetes service account.\n                                type: string\n                            required:\n                              - audiences\n                              - name\n                            type: object\n                          tppOAuth:\n                            description: |-\n                              TPPOAuth is a SecretSource step that authenticates to a TPP server. This\n                              step is meant to be the last step and requires a prior step that depends\n                              on the `authInputType`.\n                            properties:\n                              authInputType:\n                                description: |-\n                                  AuthInputType is the authentication method to be used to authenticate\n                                  with TPP. The supported values are \"UsernamePassword\" and \"JWT\".\n                                enum:\n                                  - UsernamePassword\n                                  - JWT\n                                type: string\n                              clientId:\n                                description: ClientID is the clientId used to authenticate with TPP.\n                                type: string\n                              url:\n                                description: |-\n                                  The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs\n                                  https://tpp.example.com and https://tpp.example.com/vedsdk are\n                                  equivalent. The ending `/vedsdk` is optional and is stripped out\n                                  by our client.\n                                  If not set, defaults to the URL defined at the top-level of the\n                                  TPP configuration.\n                                type: string\n                            required:\n                              - authInputType\n                            type: object\n                          vcpOAuth:\n                            description: |-\n                              VCPOAuth is a SecretSource step that authenticates to the\n                              Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step\n                              that outputs a JWT token.\n                            properties:\n                              tenantID:\n                                description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS.\n                                type: string\n                            type: object\n                        type: object\n                      maxItems: 50\n                      type: array\n                      x-kubernetes-list-type: atomic\n                    apiKey:\n                      description: |-\n                        The list of steps to retrieve the API key that will be used to connect to\n                        Certificate Manager, SaaS.\n                      items:\n                        properties:\n                          hashicorpVaultLDAP:\n                            description: |-\n                              HashicorpVaultLDAP is a SecretSource step that requires a Vault token in\n                              the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                              then fetches the requested secrets from Vault for use in the next step.\n                            properties:\n                              ldapPath:\n                                description: |-\n                                  The full HTTP path to the secret in Vault. Example:\n                                  /v1/ldap/static-cred/:role_name\n                                  or\n                                  /v1/ldap/creds/:role_name\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - ldapPath\n                            type: object\n                          hashicorpVaultOAuth:\n                            description: |-\n                              HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource\n                              step to provide an OAuth token, which this step uses to authenticate to\n                              Vault. The output of this step is a Vault token. This step allows you to use\n                              the step `HashicorpVaultSecret` afterwards.\n                            properties:\n                              authInputType:\n                                description: |-\n                                  AuthInputType is the authentication method to be used to authenticate\n                                  with HashiCorp Vault. The only supported value is \"OIDC\".\n                                enum:\n                                  - OIDC\n                                type: string\n                              authPath:\n                                description: |-\n                                  The login URL used for obtaining the Vault token. Example:\n                                  /v1/auth/oidc/login\n                                type: string\n                              clientId:\n                                description: 'Deprecated: This field does nothing and will be removed in the future.'\n                                type: string\n                              role:\n                                description: |-\n                                  The role defined in Vault that we want to use when authenticating to\n                                  Vault.\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - authInputType\n                              - authPath\n                              - role\n                            type: object\n                          hashicorpVaultSecret:\n                            description: |-\n                              HashicorpVaultSecret is a SecretSource step that requires a Vault token in\n                              the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                              then fetches the requested secrets from Vault for use in the next step.\n                            properties:\n                              fields:\n                                description: |-\n                                  The fields are Vault keys pointing to the secrets passed to the next\n                                  SecretSource step.\n\n                                  Example 1 (TPP, username and password): imagining that you have stored\n                                  the username and password for TPP under the keys \"username\" and\n                                  \"password\", you will want to set this field to `[\"username\",\n                                  \"password\"]`. The username is expected to be given first, the password\n                                  second.\n                                items:\n                                  type: string\n                                type: array\n                              secretPath:\n                                description: |-\n                                  The full HTTP path to the secret in Vault. Example:\n                                  /v1/secret/data/application-team-a/tpp-username-password\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - fields\n                              - secretPath\n                            type: object\n                          secret:\n                            description: |-\n                              Secret is a SecretSource step meant to be the first step. It retrieves secret\n                              values from a Kubernetes Secret, and passes them to the next step.\n                            properties:\n                              fields:\n                                description: |-\n                                  The names of the fields we want to extract from the Kubernetes secret.\n                                  These fields are passed to the next step in the chain.\n                                items:\n                                  type: string\n                                type: array\n                              name:\n                                description: The name of the Kubernetes secret.\n                                type: string\n                            required:\n                              - fields\n                              - name\n                            type: object\n                          serviceAccountToken:\n                            description: |-\n                              ServiceAccountToken is a SecretSource step meant to be the first step. It\n                              uses the Kubernetes TokenRequest API to retrieve a token for a given service\n                              account, and passes it to the next step.\n                            properties:\n                              audiences:\n                                description: |-\n                                  Audiences are the intendend audiences of the token. A recipient of a\n                                  token must identify themself with an identifier in the list of\n                                  audiences of the token, and otherwise should reject the token. A\n                                  token issued for multiple audiences may be used to authenticate\n                                  against any of the audiences listed but implies a high degree of\n                                  trust between the target audiences.\n                                items:\n                                  type: string\n                                type: array\n                              expirationSeconds:\n                                description: |-\n                                  ExpirationSeconds is the requested duration of validity of the request. The\n                                  token issuer may return a token with a different validity duration so a\n                                  client needs to check the 'expiration' field in a response.\n                                format: int64\n                                type: integer\n                              name:\n                                description: The name of the Kubernetes service account.\n                                type: string\n                            required:\n                              - audiences\n                              - name\n                            type: object\n                          tppOAuth:\n                            description: |-\n                              TPPOAuth is a SecretSource step that authenticates to a TPP server. This\n                              step is meant to be the last step and requires a prior step that depends\n                              on the `authInputType`.\n                            properties:\n                              authInputType:\n                                description: |-\n                                  AuthInputType is the authentication method to be used to authenticate\n                                  with TPP. The supported values are \"UsernamePassword\" and \"JWT\".\n                                enum:\n                                  - UsernamePassword\n                                  - JWT\n                                type: string\n                              clientId:\n                                description: ClientID is the clientId used to authenticate with TPP.\n                                type: string\n                              url:\n                                description: |-\n                                  The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs\n                                  https://tpp.example.com and https://tpp.example.com/vedsdk are\n                                  equivalent. The ending `/vedsdk` is optional and is stripped out\n                                  by our client.\n                                  If not set, defaults to the URL defined at the top-level of the\n                                  TPP configuration.\n                                type: string\n                            required:\n                              - authInputType\n                            type: object\n                          vcpOAuth:\n                            description: |-\n                              VCPOAuth is a SecretSource step that authenticates to the\n                              Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step\n                              that outputs a JWT token.\n                            properties:\n                              tenantID:\n                                description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS.\n                                type: string\n                            type: object\n                        type: object\n                      maxItems: 50\n                      type: array\n                      x-kubernetes-list-type: atomic\n                    url:\n                      description: |-\n                        The URL to connect to the Certificate Manager, SaaS instance. If not set, the default\n                        value https://api.venafi.cloud is used.\n                      type: string\n                  type: object\n                vcp:\n                  properties:\n                    accessToken:\n                      description: |-\n                        The list of steps to retrieve the Access Token that will be used to connect\n                        to Certificate Manager, SaaS.\n                      items:\n                        properties:\n                          hashicorpVaultLDAP:\n                            description: |-\n                              HashicorpVaultLDAP is a SecretSource step that requires a Vault token in\n                              the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                              then fetches the requested secrets from Vault for use in the next step.\n                            properties:\n                              ldapPath:\n                                description: |-\n                                  The full HTTP path to the secret in Vault. Example:\n                                  /v1/ldap/static-cred/:role_name\n                                  or\n                                  /v1/ldap/creds/:role_name\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - ldapPath\n                            type: object\n                          hashicorpVaultOAuth:\n                            description: |-\n                              HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource\n                              step to provide an OAuth token, which this step uses to authenticate to\n                              Vault. The output of this step is a Vault token. This step allows you to use\n                              the step `HashicorpVaultSecret` afterwards.\n                            properties:\n                              authInputType:\n                                description: |-\n                                  AuthInputType is the authentication method to be used to authenticate\n                                  with HashiCorp Vault. The only supported value is \"OIDC\".\n                                enum:\n                                  - OIDC\n                                type: string\n                              authPath:\n                                description: |-\n                                  The login URL used for obtaining the Vault token. Example:\n                                  /v1/auth/oidc/login\n                                type: string\n                              clientId:\n                                description: 'Deprecated: This field does nothing and will be removed in the future.'\n                                type: string\n                              role:\n                                description: |-\n                                  The role defined in Vault that we want to use when authenticating to\n                                  Vault.\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - authInputType\n                              - authPath\n                              - role\n                            type: object\n                          hashicorpVaultSecret:\n                            description: |-\n                              HashicorpVaultSecret is a SecretSource step that requires a Vault token in\n                              the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                              then fetches the requested secrets from Vault for use in the next step.\n                            properties:\n                              fields:\n                                description: |-\n                                  The fields are Vault keys pointing to the secrets passed to the next\n                                  SecretSource step.\n\n                                  Example 1 (TPP, username and password): imagining that you have stored\n                                  the username and password for TPP under the keys \"username\" and\n                                  \"password\", you will want to set this field to `[\"username\",\n                                  \"password\"]`. The username is expected to be given first, the password\n                                  second.\n                                items:\n                                  type: string\n                                type: array\n                              secretPath:\n                                description: |-\n                                  The full HTTP path to the secret in Vault. Example:\n                                  /v1/secret/data/application-team-a/tpp-username-password\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - fields\n                              - secretPath\n                            type: object\n                          secret:\n                            description: |-\n                              Secret is a SecretSource step meant to be the first step. It retrieves secret\n                              values from a Kubernetes Secret, and passes them to the next step.\n                            properties:\n                              fields:\n                                description: |-\n                                  The names of the fields we want to extract from the Kubernetes secret.\n                                  These fields are passed to the next step in the chain.\n                                items:\n                                  type: string\n                                type: array\n                              name:\n                                description: The name of the Kubernetes secret.\n                                type: string\n                            required:\n                              - fields\n                              - name\n                            type: object\n                          serviceAccountToken:\n                            description: |-\n                              ServiceAccountToken is a SecretSource step meant to be the first step. It\n                              uses the Kubernetes TokenRequest API to retrieve a token for a given service\n                              account, and passes it to the next step.\n                            properties:\n                              audiences:\n                                description: |-\n                                  Audiences are the intendend audiences of the token. A recipient of a\n                                  token must identify themself with an identifier in the list of\n                                  audiences of the token, and otherwise should reject the token. A\n                                  token issued for multiple audiences may be used to authenticate\n                                  against any of the audiences listed but implies a high degree of\n                                  trust between the target audiences.\n                                items:\n                                  type: string\n                                type: array\n                              expirationSeconds:\n                                description: |-\n                                  ExpirationSeconds is the requested duration of validity of the request. The\n                                  token issuer may return a token with a different validity duration so a\n                                  client needs to check the 'expiration' field in a response.\n                                format: int64\n                                type: integer\n                              name:\n                                description: The name of the Kubernetes service account.\n                                type: string\n                            required:\n                              - audiences\n                              - name\n                            type: object\n                          tppOAuth:\n                            description: |-\n                              TPPOAuth is a SecretSource step that authenticates to a TPP server. This\n                              step is meant to be the last step and requires a prior step that depends\n                              on the `authInputType`.\n                            properties:\n                              authInputType:\n                                description: |-\n                                  AuthInputType is the authentication method to be used to authenticate\n                                  with TPP. The supported values are \"UsernamePassword\" and \"JWT\".\n                                enum:\n                                  - UsernamePassword\n                                  - JWT\n                                type: string\n                              clientId:\n                                description: ClientID is the clientId used to authenticate with TPP.\n                                type: string\n                              url:\n                                description: |-\n                                  The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs\n                                  https://tpp.example.com and https://tpp.example.com/vedsdk are\n                                  equivalent. The ending `/vedsdk` is optional and is stripped out\n                                  by our client.\n                                  If not set, defaults to the URL defined at the top-level of the\n                                  TPP configuration.\n                                type: string\n                            required:\n                              - authInputType\n                            type: object\n                          vcpOAuth:\n                            description: |-\n                              VCPOAuth is a SecretSource step that authenticates to the\n                              Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step\n                              that outputs a JWT token.\n                            properties:\n                              tenantID:\n                                description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS.\n                                type: string\n                            type: object\n                        type: object\n                      maxItems: 50\n                      type: array\n                      x-kubernetes-list-type: atomic\n                    apiKey:\n                      description: |-\n                        The list of steps to retrieve the API key that will be used to connect to\n                        Certificate Manager, SaaS.\n                      items:\n                        properties:\n                          hashicorpVaultLDAP:\n                            description: |-\n                              HashicorpVaultLDAP is a SecretSource step that requires a Vault token in\n                              the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                              then fetches the requested secrets from Vault for use in the next step.\n                            properties:\n                              ldapPath:\n                                description: |-\n                                  The full HTTP path to the secret in Vault. Example:\n                                  /v1/ldap/static-cred/:role_name\n                                  or\n                                  /v1/ldap/creds/:role_name\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - ldapPath\n                            type: object\n                          hashicorpVaultOAuth:\n                            description: |-\n                              HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource\n                              step to provide an OAuth token, which this step uses to authenticate to\n                              Vault. The output of this step is a Vault token. This step allows you to use\n                              the step `HashicorpVaultSecret` afterwards.\n                            properties:\n                              authInputType:\n                                description: |-\n                                  AuthInputType is the authentication method to be used to authenticate\n                                  with HashiCorp Vault. The only supported value is \"OIDC\".\n                                enum:\n                                  - OIDC\n                                type: string\n                              authPath:\n                                description: |-\n                                  The login URL used for obtaining the Vault token. Example:\n                                  /v1/auth/oidc/login\n                                type: string\n                              clientId:\n                                description: 'Deprecated: This field does nothing and will be removed in the future.'\n                                type: string\n                              role:\n                                description: |-\n                                  The role defined in Vault that we want to use when authenticating to\n                                  Vault.\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - authInputType\n                              - authPath\n                              - role\n                            type: object\n                          hashicorpVaultSecret:\n                            description: |-\n                              HashicorpVaultSecret is a SecretSource step that requires a Vault token in\n                              the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                              then fetches the requested secrets from Vault for use in the next step.\n                            properties:\n                              fields:\n                                description: |-\n                                  The fields are Vault keys pointing to the secrets passed to the next\n                                  SecretSource step.\n\n                                  Example 1 (TPP, username and password): imagining that you have stored\n                                  the username and password for TPP under the keys \"username\" and\n                                  \"password\", you will want to set this field to `[\"username\",\n                                  \"password\"]`. The username is expected to be given first, the password\n                                  second.\n                                items:\n                                  type: string\n                                type: array\n                              secretPath:\n                                description: |-\n                                  The full HTTP path to the secret in Vault. Example:\n                                  /v1/secret/data/application-team-a/tpp-username-password\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - fields\n                              - secretPath\n                            type: object\n                          secret:\n                            description: |-\n                              Secret is a SecretSource step meant to be the first step. It retrieves secret\n                              values from a Kubernetes Secret, and passes them to the next step.\n                            properties:\n                              fields:\n                                description: |-\n                                  The names of the fields we want to extract from the Kubernetes secret.\n                                  These fields are passed to the next step in the chain.\n                                items:\n                                  type: string\n                                type: array\n                              name:\n                                description: The name of the Kubernetes secret.\n                                type: string\n                            required:\n                              - fields\n                              - name\n                            type: object\n                          serviceAccountToken:\n                            description: |-\n                              ServiceAccountToken is a SecretSource step meant to be the first step. It\n                              uses the Kubernetes TokenRequest API to retrieve a token for a given service\n                              account, and passes it to the next step.\n                            properties:\n                              audiences:\n                                description: |-\n                                  Audiences are the intendend audiences of the token. A recipient of a\n                                  token must identify themself with an identifier in the list of\n                                  audiences of the token, and otherwise should reject the token. A\n                                  token issued for multiple audiences may be used to authenticate\n                                  against any of the audiences listed but implies a high degree of\n                                  trust between the target audiences.\n                                items:\n                                  type: string\n                                type: array\n                              expirationSeconds:\n                                description: |-\n                                  ExpirationSeconds is the requested duration of validity of the request. The\n                                  token issuer may return a token with a different validity duration so a\n                                  client needs to check the 'expiration' field in a response.\n                                format: int64\n                                type: integer\n                              name:\n                                description: The name of the Kubernetes service account.\n                                type: string\n                            required:\n                              - audiences\n                              - name\n                            type: object\n                          tppOAuth:\n                            description: |-\n                              TPPOAuth is a SecretSource step that authenticates to a TPP server. This\n                              step is meant to be the last step and requires a prior step that depends\n                              on the `authInputType`.\n                            properties:\n                              authInputType:\n                                description: |-\n                                  AuthInputType is the authentication method to be used to authenticate\n                                  with TPP. The supported values are \"UsernamePassword\" and \"JWT\".\n                                enum:\n                                  - UsernamePassword\n                                  - JWT\n                                type: string\n                              clientId:\n                                description: ClientID is the clientId used to authenticate with TPP.\n                                type: string\n                              url:\n                                description: |-\n                                  The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs\n                                  https://tpp.example.com and https://tpp.example.com/vedsdk are\n                                  equivalent. The ending `/vedsdk` is optional and is stripped out\n                                  by our client.\n                                  If not set, defaults to the URL defined at the top-level of the\n                                  TPP configuration.\n                                type: string\n                            required:\n                              - authInputType\n                            type: object\n                          vcpOAuth:\n                            description: |-\n                              VCPOAuth is a SecretSource step that authenticates to the\n                              Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step\n                              that outputs a JWT token.\n                            properties:\n                              tenantID:\n                                description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS.\n                                type: string\n                            type: object\n                        type: object\n                      maxItems: 50\n                      type: array\n                      x-kubernetes-list-type: atomic\n                    url:\n                      description: |-\n                        The URL to connect to the Certificate Manager, SaaS instance. If not set, the default\n                        value https://api.venafi.cloud is used.\n                      type: string\n                  type: object\n              type: object\n            status:\n              properties:\n                conditions:\n                  description: List of status conditions to indicate the status of a VenafiConnection.\n                  items:\n                    description: ConnectionCondition contains condition information for a VenafiConnection.\n                    properties:\n                      lastTransitionTime:\n                        description: |-\n                          LastTransitionTime is the timestamp corresponding to the last status\n                          change of this condition.\n                        format: date-time\n                        type: string\n                      lastUpdateTime:\n                        description: lastUpdateTime is the time of the last update to this condition\n                        format: date-time\n                        type: string\n                      message:\n                        description: |-\n                          Message is a human readable description of the details of the last\n                          transition, complementing reason.\n                        type: string\n                      observedGeneration:\n                        description: |-\n                          If set, this represents the .metadata.generation that the condition was\n                          set based upon.\n                          For instance, if .metadata.generation is currently 12, but the\n                          .status.condition[x].observedGeneration is 9, the condition is out of date\n                          with respect to the current state of the Issuer.\n                        format: int64\n                        type: integer\n                      reason:\n                        description: |-\n                          Reason is a brief machine readable explanation for the condition's last\n                          transition.\n                        type: string\n                      status:\n                        description: Status of the condition, one of (`True`, `False`, `Unknown`).\n                        type: string\n                      tokenValidUntil:\n                        description: |-\n                          The ValidUntil time of the token used to authenticate with the\n                          Certificate Manager, SaaS.\n                        format: date-time\n                        type: string\n                      type:\n                        description: |-\n                          Type of the condition, should be a combination of the unique name of the\n                          operator and the type of condition.\n                          eg. `VenafiEnhancedIssuerReady`\n                        type: string\n                    required:\n                      - status\n                      - type\n                    type: object\n                  type: array\n                  x-kubernetes-list-map-keys:\n                    - type\n                  x-kubernetes-list-type: map\n              type: object\n          required:\n            - metadata\n            - spec\n          type: object\n      served: true\n      storage: true\n      subresources:\n        status: {}\n{{ end }}\n{{ end }}\n"
  },
  {
    "path": "deploy/charts/venafi-kubernetes-agent/templates/venafi-connection-crd.yaml",
    "content": "{{/* DO NOT EDIT. Use 'make generate-crds-venconn' to regenerate. */}}\n{{- if .Values.crds.venafiConnection.include }}\n{{- if not (or (semverCompare \"<1.25\" .Capabilities.KubeVersion.GitVersion) .Values.crds.forceRemoveValidationAnnotations) }}\napiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n  name: \"venaficonnections.jetstack.io\"\n  {{- if .Values.crds.keep }}\n  annotations:\n    # This annotation prevents the CRD from being pruned by Helm when this chart\n    # is deleted.\n    helm.sh/resource-policy: keep\n  {{- end }}\n  labels:\n  {{- include \"venafi-connection.labels\" . | nindent 4 }}\nspec:\n  group: jetstack.io\n  names:\n    kind: VenafiConnection\n    listKind: VenafiConnectionList\n    plural: venaficonnections\n    shortNames:\n      - vc\n    singular: venaficonnection\n  scope: Namespaced\n  versions:\n    - name: v1alpha1\n      schema:\n        openAPIV3Schema:\n          description: VenafiConnection is the Schema for the VenafiConnection API\n          properties:\n            apiVersion:\n              description: |-\n                APIVersion defines the versioned schema of this representation of an object.\n                Servers should convert recognized schemas to the latest internal value, and\n                may reject unrecognized values.\n                More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\n              type: string\n            kind:\n              description: |-\n                Kind is a string value representing the REST resource this object represents.\n                Servers may infer this from the endpoint the client submits requests to.\n                Cannot be updated.\n                In CamelCase.\n                More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n              type: string\n            metadata:\n              type: object\n            spec:\n              properties:\n                allowReferencesFrom:\n                  description: |-\n                    A namespace selector that specifies what namespaces this VenafiConnection\n                    is allowed to be used from.\n                    If not set/ null, the VenafiConnection can only be used within its namespace.\n                    An empty selector ({}) matches all namespaces.\n                    If set to a non-empty selector, the VenafiConnection can only be used from\n                    namespaces that match the selector. This possibly excludes the namespace\n                    the VenafiConnection is in.\n                  properties:\n                    matchExpressions:\n                      description: matchExpressions is a list of label selector requirements. The requirements are ANDed.\n                      items:\n                        description: |-\n                          A label selector requirement is a selector that contains values, a key, and an operator that\n                          relates the key and values.\n                        properties:\n                          key:\n                            description: key is the label key that the selector applies to.\n                            type: string\n                          operator:\n                            description: |-\n                              operator represents a key's relationship to a set of values.\n                              Valid operators are In, NotIn, Exists and DoesNotExist.\n                            type: string\n                          values:\n                            description: |-\n                              values is an array of string values. If the operator is In or NotIn,\n                              the values array must be non-empty. If the operator is Exists or DoesNotExist,\n                              the values array must be empty. This array is replaced during a strategic\n                              merge patch.\n                            items:\n                              type: string\n                            type: array\n                            x-kubernetes-list-type: atomic\n                        required:\n                          - key\n                          - operator\n                        type: object\n                      type: array\n                      x-kubernetes-list-type: atomic\n                    matchLabels:\n                      additionalProperties:\n                        type: string\n                      description: |-\n                        matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\n                        map is equivalent to an element of matchExpressions, whose key field is \"key\", the\n                        operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.\n                      type: object\n                  type: object\n                  x-kubernetes-map-type: atomic\n                firefly:\n                  properties:\n                    accessToken:\n                      description: |-\n                        The list of steps to retrieve the Access Token that will be used to connect\n                        to Firefly.\n                      items:\n                        properties:\n                          hashicorpVaultLDAP:\n                            description: |-\n                              HashicorpVaultLDAP is a SecretSource step that requires a Vault token in\n                              the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                              then fetches the requested secrets from Vault for use in the next step.\n                            properties:\n                              ldapPath:\n                                description: |-\n                                  The full HTTP path to the secret in Vault. Example:\n                                  /v1/ldap/static-cred/:role_name\n                                  or\n                                  /v1/ldap/creds/:role_name\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - ldapPath\n                            type: object\n                          hashicorpVaultOAuth:\n                            description: |-\n                              HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource\n                              step to provide an OAuth token, which this step uses to authenticate to\n                              Vault. The output of this step is a Vault token. This step allows you to use\n                              the step `HashicorpVaultSecret` afterwards.\n                            properties:\n                              authInputType:\n                                description: |-\n                                  AuthInputType is the authentication method to be used to authenticate\n                                  with HashiCorp Vault. The only supported value is \"OIDC\".\n                                enum:\n                                  - OIDC\n                                type: string\n                              authPath:\n                                description: |-\n                                  The login URL used for obtaining the Vault token. Example:\n                                  /v1/auth/oidc/login\n                                type: string\n                              clientId:\n                                description: 'Deprecated: This field does nothing and will be removed in the future.'\n                                type: string\n                              role:\n                                description: |-\n                                  The role defined in Vault that we want to use when authenticating to\n                                  Vault.\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - authInputType\n                              - authPath\n                              - role\n                            type: object\n                          hashicorpVaultSecret:\n                            description: |-\n                              HashicorpVaultSecret is a SecretSource step that requires a Vault token in\n                              the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                              then fetches the requested secrets from Vault for use in the next step.\n                            properties:\n                              fields:\n                                description: |-\n                                  The fields are Vault keys pointing to the secrets passed to the next\n                                  SecretSource step.\n\n                                  Example 1 (TPP, username and password): imagining that you have stored\n                                  the username and password for TPP under the keys \"username\" and\n                                  \"password\", you will want to set this field to `[\"username\",\n                                  \"password\"]`. The username is expected to be given first, the password\n                                  second.\n                                items:\n                                  type: string\n                                type: array\n                              secretPath:\n                                description: |-\n                                  The full HTTP path to the secret in Vault. Example:\n                                  /v1/secret/data/application-team-a/tpp-username-password\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - fields\n                              - secretPath\n                            type: object\n                          secret:\n                            description: |-\n                              Secret is a SecretSource step meant to be the first step. It retrieves secret\n                              values from a Kubernetes Secret, and passes them to the next step.\n                            properties:\n                              fields:\n                                description: |-\n                                  The names of the fields we want to extract from the Kubernetes secret.\n                                  These fields are passed to the next step in the chain.\n                                items:\n                                  type: string\n                                type: array\n                              name:\n                                description: The name of the Kubernetes secret.\n                                type: string\n                            required:\n                              - fields\n                              - name\n                            type: object\n                          serviceAccountToken:\n                            description: |-\n                              ServiceAccountToken is a SecretSource step meant to be the first step. It\n                              uses the Kubernetes TokenRequest API to retrieve a token for a given service\n                              account, and passes it to the next step.\n                            properties:\n                              audiences:\n                                description: |-\n                                  Audiences are the intendend audiences of the token. A recipient of a\n                                  token must identify themself with an identifier in the list of\n                                  audiences of the token, and otherwise should reject the token. A\n                                  token issued for multiple audiences may be used to authenticate\n                                  against any of the audiences listed but implies a high degree of\n                                  trust between the target audiences.\n                                items:\n                                  type: string\n                                type: array\n                              expirationSeconds:\n                                description: |-\n                                  ExpirationSeconds is the requested duration of validity of the request. The\n                                  token issuer may return a token with a different validity duration so a\n                                  client needs to check the 'expiration' field in a response.\n                                format: int64\n                                type: integer\n                              name:\n                                description: The name of the Kubernetes service account.\n                                type: string\n                            required:\n                              - audiences\n                              - name\n                            type: object\n                          tppOAuth:\n                            description: |-\n                              TPPOAuth is a SecretSource step that authenticates to a TPP server. This\n                              step is meant to be the last step and requires a prior step that depends\n                              on the `authInputType`.\n                            properties:\n                              authInputType:\n                                description: |-\n                                  AuthInputType is the authentication method to be used to authenticate\n                                  with TPP. The supported values are \"UsernamePassword\" and \"JWT\".\n                                enum:\n                                  - UsernamePassword\n                                  - JWT\n                                type: string\n                              clientId:\n                                description: ClientID is the clientId used to authenticate with TPP.\n                                type: string\n                              url:\n                                description: |-\n                                  The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs\n                                  https://tpp.example.com and https://tpp.example.com/vedsdk are\n                                  equivalent. The ending `/vedsdk` is optional and is stripped out\n                                  by our client.\n                                  If not set, defaults to the URL defined at the top-level of the\n                                  TPP configuration.\n                                type: string\n                            required:\n                              - authInputType\n                            type: object\n                          vcpOAuth:\n                            description: |-\n                              VCPOAuth is a SecretSource step that authenticates to the\n                              Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step\n                              that outputs a JWT token.\n                            properties:\n                              tenantID:\n                                description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS.\n                                type: string\n                            type: object\n                        type: object\n                        x-kubernetes-validations:\n                          - message: must have exactly one field set\n                            rule: '((has(self.secret) ? 1 : 0) + (has(self.serviceAccountToken) ? 1 : 0) + (has(self.hashicorpVaultOAuth) ? 1 : 0) + (has(self.hashicorpVaultSecret) ? 1 : 0) + (has(self.hashicorpVaultLDAP) ? 1 : 0) + (has(self.tppOAuth) ? 1 : 0) + (has(self.vcpOAuth) ? 1 : 0)) == 1'\n                      maxItems: 50\n                      type: array\n                      x-kubernetes-list-type: atomic\n                    url:\n                      description: The URL to connect to the Workload Identity Manager instance.\n                      type: string\n                  required:\n                    - url\n                  type: object\n                tpp:\n                  properties:\n                    accessToken:\n                      description: The list of steps to retrieve a TPP access token.\n                      items:\n                        properties:\n                          hashicorpVaultLDAP:\n                            description: |-\n                              HashicorpVaultLDAP is a SecretSource step that requires a Vault token in\n                              the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                              then fetches the requested secrets from Vault for use in the next step.\n                            properties:\n                              ldapPath:\n                                description: |-\n                                  The full HTTP path to the secret in Vault. Example:\n                                  /v1/ldap/static-cred/:role_name\n                                  or\n                                  /v1/ldap/creds/:role_name\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - ldapPath\n                            type: object\n                          hashicorpVaultOAuth:\n                            description: |-\n                              HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource\n                              step to provide an OAuth token, which this step uses to authenticate to\n                              Vault. The output of this step is a Vault token. This step allows you to use\n                              the step `HashicorpVaultSecret` afterwards.\n                            properties:\n                              authInputType:\n                                description: |-\n                                  AuthInputType is the authentication method to be used to authenticate\n                                  with HashiCorp Vault. The only supported value is \"OIDC\".\n                                enum:\n                                  - OIDC\n                                type: string\n                              authPath:\n                                description: |-\n                                  The login URL used for obtaining the Vault token. Example:\n                                  /v1/auth/oidc/login\n                                type: string\n                              clientId:\n                                description: 'Deprecated: This field does nothing and will be removed in the future.'\n                                type: string\n                              role:\n                                description: |-\n                                  The role defined in Vault that we want to use when authenticating to\n                                  Vault.\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - authInputType\n                              - authPath\n                              - role\n                            type: object\n                          hashicorpVaultSecret:\n                            description: |-\n                              HashicorpVaultSecret is a SecretSource step that requires a Vault token in\n                              the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                              then fetches the requested secrets from Vault for use in the next step.\n                            properties:\n                              fields:\n                                description: |-\n                                  The fields are Vault keys pointing to the secrets passed to the next\n                                  SecretSource step.\n\n                                  Example 1 (TPP, username and password): imagining that you have stored\n                                  the username and password for TPP under the keys \"username\" and\n                                  \"password\", you will want to set this field to `[\"username\",\n                                  \"password\"]`. The username is expected to be given first, the password\n                                  second.\n                                items:\n                                  type: string\n                                type: array\n                              secretPath:\n                                description: |-\n                                  The full HTTP path to the secret in Vault. Example:\n                                  /v1/secret/data/application-team-a/tpp-username-password\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - fields\n                              - secretPath\n                            type: object\n                          secret:\n                            description: |-\n                              Secret is a SecretSource step meant to be the first step. It retrieves secret\n                              values from a Kubernetes Secret, and passes them to the next step.\n                            properties:\n                              fields:\n                                description: |-\n                                  The names of the fields we want to extract from the Kubernetes secret.\n                                  These fields are passed to the next step in the chain.\n                                items:\n                                  type: string\n                                type: array\n                              name:\n                                description: The name of the Kubernetes secret.\n                                type: string\n                            required:\n                              - fields\n                              - name\n                            type: object\n                          serviceAccountToken:\n                            description: |-\n                              ServiceAccountToken is a SecretSource step meant to be the first step. It\n                              uses the Kubernetes TokenRequest API to retrieve a token for a given service\n                              account, and passes it to the next step.\n                            properties:\n                              audiences:\n                                description: |-\n                                  Audiences are the intendend audiences of the token. A recipient of a\n                                  token must identify themself with an identifier in the list of\n                                  audiences of the token, and otherwise should reject the token. A\n                                  token issued for multiple audiences may be used to authenticate\n                                  against any of the audiences listed but implies a high degree of\n                                  trust between the target audiences.\n                                items:\n                                  type: string\n                                type: array\n                              expirationSeconds:\n                                description: |-\n                                  ExpirationSeconds is the requested duration of validity of the request. The\n                                  token issuer may return a token with a different validity duration so a\n                                  client needs to check the 'expiration' field in a response.\n                                format: int64\n                                type: integer\n                              name:\n                                description: The name of the Kubernetes service account.\n                                type: string\n                            required:\n                              - audiences\n                              - name\n                            type: object\n                          tppOAuth:\n                            description: |-\n                              TPPOAuth is a SecretSource step that authenticates to a TPP server. This\n                              step is meant to be the last step and requires a prior step that depends\n                              on the `authInputType`.\n                            properties:\n                              authInputType:\n                                description: |-\n                                  AuthInputType is the authentication method to be used to authenticate\n                                  with TPP. The supported values are \"UsernamePassword\" and \"JWT\".\n                                enum:\n                                  - UsernamePassword\n                                  - JWT\n                                type: string\n                              clientId:\n                                description: ClientID is the clientId used to authenticate with TPP.\n                                type: string\n                              url:\n                                description: |-\n                                  The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs\n                                  https://tpp.example.com and https://tpp.example.com/vedsdk are\n                                  equivalent. The ending `/vedsdk` is optional and is stripped out\n                                  by our client.\n                                  If not set, defaults to the URL defined at the top-level of the\n                                  TPP configuration.\n                                type: string\n                            required:\n                              - authInputType\n                            type: object\n                          vcpOAuth:\n                            description: |-\n                              VCPOAuth is a SecretSource step that authenticates to the\n                              Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step\n                              that outputs a JWT token.\n                            properties:\n                              tenantID:\n                                description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS.\n                                type: string\n                            type: object\n                        type: object\n                        x-kubernetes-validations:\n                          - message: must have exactly one field set\n                            rule: '((has(self.secret) ? 1 : 0) + (has(self.serviceAccountToken) ? 1 : 0) + (has(self.hashicorpVaultOAuth) ? 1 : 0) + (has(self.hashicorpVaultSecret) ? 1 : 0) + (has(self.hashicorpVaultLDAP) ? 1 : 0) + (has(self.tppOAuth) ? 1 : 0) + (has(self.vcpOAuth) ? 1 : 0)) == 1'\n                      maxItems: 50\n                      type: array\n                      x-kubernetes-list-type: atomic\n                    url:\n                      description: |-\n                        The URL to connect to the Certificate Manager Self-Hosted instance. The two URLs\n                        https://tpp.example.com and https://tpp.example.com/vedsdk are\n                        equivalent. The ending `/vedsdk` is optional and is stripped out by\n                        venafi-connection-lib.\n                      type: string\n                  required:\n                    - url\n                  type: object\n                vaas:\n                  description: 'Deprecated: The ''vaas'' field is deprecated use the field called ''vcp'' instead.'\n                  properties:\n                    accessToken:\n                      description: |-\n                        The list of steps to retrieve the Access Token that will be used to connect\n                        to Certificate Manager, SaaS.\n                      items:\n                        properties:\n                          hashicorpVaultLDAP:\n                            description: |-\n                              HashicorpVaultLDAP is a SecretSource step that requires a Vault token in\n                              the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                              then fetches the requested secrets from Vault for use in the next step.\n                            properties:\n                              ldapPath:\n                                description: |-\n                                  The full HTTP path to the secret in Vault. Example:\n                                  /v1/ldap/static-cred/:role_name\n                                  or\n                                  /v1/ldap/creds/:role_name\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - ldapPath\n                            type: object\n                          hashicorpVaultOAuth:\n                            description: |-\n                              HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource\n                              step to provide an OAuth token, which this step uses to authenticate to\n                              Vault. The output of this step is a Vault token. This step allows you to use\n                              the step `HashicorpVaultSecret` afterwards.\n                            properties:\n                              authInputType:\n                                description: |-\n                                  AuthInputType is the authentication method to be used to authenticate\n                                  with HashiCorp Vault. The only supported value is \"OIDC\".\n                                enum:\n                                  - OIDC\n                                type: string\n                              authPath:\n                                description: |-\n                                  The login URL used for obtaining the Vault token. Example:\n                                  /v1/auth/oidc/login\n                                type: string\n                              clientId:\n                                description: 'Deprecated: This field does nothing and will be removed in the future.'\n                                type: string\n                              role:\n                                description: |-\n                                  The role defined in Vault that we want to use when authenticating to\n                                  Vault.\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - authInputType\n                              - authPath\n                              - role\n                            type: object\n                          hashicorpVaultSecret:\n                            description: |-\n                              HashicorpVaultSecret is a SecretSource step that requires a Vault token in\n                              the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                              then fetches the requested secrets from Vault for use in the next step.\n                            properties:\n                              fields:\n                                description: |-\n                                  The fields are Vault keys pointing to the secrets passed to the next\n                                  SecretSource step.\n\n                                  Example 1 (TPP, username and password): imagining that you have stored\n                                  the username and password for TPP under the keys \"username\" and\n                                  \"password\", you will want to set this field to `[\"username\",\n                                  \"password\"]`. The username is expected to be given first, the password\n                                  second.\n                                items:\n                                  type: string\n                                type: array\n                              secretPath:\n                                description: |-\n                                  The full HTTP path to the secret in Vault. Example:\n                                  /v1/secret/data/application-team-a/tpp-username-password\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - fields\n                              - secretPath\n                            type: object\n                          secret:\n                            description: |-\n                              Secret is a SecretSource step meant to be the first step. It retrieves secret\n                              values from a Kubernetes Secret, and passes them to the next step.\n                            properties:\n                              fields:\n                                description: |-\n                                  The names of the fields we want to extract from the Kubernetes secret.\n                                  These fields are passed to the next step in the chain.\n                                items:\n                                  type: string\n                                type: array\n                              name:\n                                description: The name of the Kubernetes secret.\n                                type: string\n                            required:\n                              - fields\n                              - name\n                            type: object\n                          serviceAccountToken:\n                            description: |-\n                              ServiceAccountToken is a SecretSource step meant to be the first step. It\n                              uses the Kubernetes TokenRequest API to retrieve a token for a given service\n                              account, and passes it to the next step.\n                            properties:\n                              audiences:\n                                description: |-\n                                  Audiences are the intendend audiences of the token. A recipient of a\n                                  token must identify themself with an identifier in the list of\n                                  audiences of the token, and otherwise should reject the token. A\n                                  token issued for multiple audiences may be used to authenticate\n                                  against any of the audiences listed but implies a high degree of\n                                  trust between the target audiences.\n                                items:\n                                  type: string\n                                type: array\n                              expirationSeconds:\n                                description: |-\n                                  ExpirationSeconds is the requested duration of validity of the request. The\n                                  token issuer may return a token with a different validity duration so a\n                                  client needs to check the 'expiration' field in a response.\n                                format: int64\n                                type: integer\n                              name:\n                                description: The name of the Kubernetes service account.\n                                type: string\n                            required:\n                              - audiences\n                              - name\n                            type: object\n                          tppOAuth:\n                            description: |-\n                              TPPOAuth is a SecretSource step that authenticates to a TPP server. This\n                              step is meant to be the last step and requires a prior step that depends\n                              on the `authInputType`.\n                            properties:\n                              authInputType:\n                                description: |-\n                                  AuthInputType is the authentication method to be used to authenticate\n                                  with TPP. The supported values are \"UsernamePassword\" and \"JWT\".\n                                enum:\n                                  - UsernamePassword\n                                  - JWT\n                                type: string\n                              clientId:\n                                description: ClientID is the clientId used to authenticate with TPP.\n                                type: string\n                              url:\n                                description: |-\n                                  The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs\n                                  https://tpp.example.com and https://tpp.example.com/vedsdk are\n                                  equivalent. The ending `/vedsdk` is optional and is stripped out\n                                  by our client.\n                                  If not set, defaults to the URL defined at the top-level of the\n                                  TPP configuration.\n                                type: string\n                            required:\n                              - authInputType\n                            type: object\n                          vcpOAuth:\n                            description: |-\n                              VCPOAuth is a SecretSource step that authenticates to the\n                              Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step\n                              that outputs a JWT token.\n                            properties:\n                              tenantID:\n                                description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS.\n                                type: string\n                            type: object\n                        type: object\n                        x-kubernetes-validations:\n                          - message: must have exactly one field set\n                            rule: '((has(self.secret) ? 1 : 0) + (has(self.serviceAccountToken) ? 1 : 0) + (has(self.hashicorpVaultOAuth) ? 1 : 0) + (has(self.hashicorpVaultSecret) ? 1 : 0) + (has(self.hashicorpVaultLDAP) ? 1 : 0) + (has(self.tppOAuth) ? 1 : 0) + (has(self.vcpOAuth) ? 1 : 0)) == 1'\n                      maxItems: 50\n                      type: array\n                      x-kubernetes-list-type: atomic\n                    apiKey:\n                      description: |-\n                        The list of steps to retrieve the API key that will be used to connect to\n                        Certificate Manager, SaaS.\n                      items:\n                        properties:\n                          hashicorpVaultLDAP:\n                            description: |-\n                              HashicorpVaultLDAP is a SecretSource step that requires a Vault token in\n                              the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                              then fetches the requested secrets from Vault for use in the next step.\n                            properties:\n                              ldapPath:\n                                description: |-\n                                  The full HTTP path to the secret in Vault. Example:\n                                  /v1/ldap/static-cred/:role_name\n                                  or\n                                  /v1/ldap/creds/:role_name\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - ldapPath\n                            type: object\n                          hashicorpVaultOAuth:\n                            description: |-\n                              HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource\n                              step to provide an OAuth token, which this step uses to authenticate to\n                              Vault. The output of this step is a Vault token. This step allows you to use\n                              the step `HashicorpVaultSecret` afterwards.\n                            properties:\n                              authInputType:\n                                description: |-\n                                  AuthInputType is the authentication method to be used to authenticate\n                                  with HashiCorp Vault. The only supported value is \"OIDC\".\n                                enum:\n                                  - OIDC\n                                type: string\n                              authPath:\n                                description: |-\n                                  The login URL used for obtaining the Vault token. Example:\n                                  /v1/auth/oidc/login\n                                type: string\n                              clientId:\n                                description: 'Deprecated: This field does nothing and will be removed in the future.'\n                                type: string\n                              role:\n                                description: |-\n                                  The role defined in Vault that we want to use when authenticating to\n                                  Vault.\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - authInputType\n                              - authPath\n                              - role\n                            type: object\n                          hashicorpVaultSecret:\n                            description: |-\n                              HashicorpVaultSecret is a SecretSource step that requires a Vault token in\n                              the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                              then fetches the requested secrets from Vault for use in the next step.\n                            properties:\n                              fields:\n                                description: |-\n                                  The fields are Vault keys pointing to the secrets passed to the next\n                                  SecretSource step.\n\n                                  Example 1 (TPP, username and password): imagining that you have stored\n                                  the username and password for TPP under the keys \"username\" and\n                                  \"password\", you will want to set this field to `[\"username\",\n                                  \"password\"]`. The username is expected to be given first, the password\n                                  second.\n                                items:\n                                  type: string\n                                type: array\n                              secretPath:\n                                description: |-\n                                  The full HTTP path to the secret in Vault. Example:\n                                  /v1/secret/data/application-team-a/tpp-username-password\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - fields\n                              - secretPath\n                            type: object\n                          secret:\n                            description: |-\n                              Secret is a SecretSource step meant to be the first step. It retrieves secret\n                              values from a Kubernetes Secret, and passes them to the next step.\n                            properties:\n                              fields:\n                                description: |-\n                                  The names of the fields we want to extract from the Kubernetes secret.\n                                  These fields are passed to the next step in the chain.\n                                items:\n                                  type: string\n                                type: array\n                              name:\n                                description: The name of the Kubernetes secret.\n                                type: string\n                            required:\n                              - fields\n                              - name\n                            type: object\n                          serviceAccountToken:\n                            description: |-\n                              ServiceAccountToken is a SecretSource step meant to be the first step. It\n                              uses the Kubernetes TokenRequest API to retrieve a token for a given service\n                              account, and passes it to the next step.\n                            properties:\n                              audiences:\n                                description: |-\n                                  Audiences are the intendend audiences of the token. A recipient of a\n                                  token must identify themself with an identifier in the list of\n                                  audiences of the token, and otherwise should reject the token. A\n                                  token issued for multiple audiences may be used to authenticate\n                                  against any of the audiences listed but implies a high degree of\n                                  trust between the target audiences.\n                                items:\n                                  type: string\n                                type: array\n                              expirationSeconds:\n                                description: |-\n                                  ExpirationSeconds is the requested duration of validity of the request. The\n                                  token issuer may return a token with a different validity duration so a\n                                  client needs to check the 'expiration' field in a response.\n                                format: int64\n                                type: integer\n                              name:\n                                description: The name of the Kubernetes service account.\n                                type: string\n                            required:\n                              - audiences\n                              - name\n                            type: object\n                          tppOAuth:\n                            description: |-\n                              TPPOAuth is a SecretSource step that authenticates to a TPP server. This\n                              step is meant to be the last step and requires a prior step that depends\n                              on the `authInputType`.\n                            properties:\n                              authInputType:\n                                description: |-\n                                  AuthInputType is the authentication method to be used to authenticate\n                                  with TPP. The supported values are \"UsernamePassword\" and \"JWT\".\n                                enum:\n                                  - UsernamePassword\n                                  - JWT\n                                type: string\n                              clientId:\n                                description: ClientID is the clientId used to authenticate with TPP.\n                                type: string\n                              url:\n                                description: |-\n                                  The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs\n                                  https://tpp.example.com and https://tpp.example.com/vedsdk are\n                                  equivalent. The ending `/vedsdk` is optional and is stripped out\n                                  by our client.\n                                  If not set, defaults to the URL defined at the top-level of the\n                                  TPP configuration.\n                                type: string\n                            required:\n                              - authInputType\n                            type: object\n                          vcpOAuth:\n                            description: |-\n                              VCPOAuth is a SecretSource step that authenticates to the\n                              Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step\n                              that outputs a JWT token.\n                            properties:\n                              tenantID:\n                                description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS.\n                                type: string\n                            type: object\n                        type: object\n                        x-kubernetes-validations:\n                          - message: must have exactly one field set\n                            rule: '((has(self.secret) ? 1 : 0) + (has(self.serviceAccountToken) ? 1 : 0) + (has(self.hashicorpVaultOAuth) ? 1 : 0) + (has(self.hashicorpVaultSecret) ? 1 : 0) + (has(self.hashicorpVaultLDAP) ? 1 : 0) + (has(self.tppOAuth) ? 1 : 0) + (has(self.vcpOAuth) ? 1 : 0)) == 1'\n                      maxItems: 50\n                      type: array\n                      x-kubernetes-list-type: atomic\n                    url:\n                      description: |-\n                        The URL to connect to the Certificate Manager, SaaS instance. If not set, the default\n                        value https://api.venafi.cloud is used.\n                      type: string\n                  type: object\n                  x-kubernetes-validations:\n                    - message: 'must have exactly ONE of the following fields set: apiKey or accessToken'\n                      rule: '(has(self.apiKey) ? 1 : 0) + (has(self.accessToken) ? 1 : 0) == 1'\n                vcp:\n                  properties:\n                    accessToken:\n                      description: |-\n                        The list of steps to retrieve the Access Token that will be used to connect\n                        to Certificate Manager, SaaS.\n                      items:\n                        properties:\n                          hashicorpVaultLDAP:\n                            description: |-\n                              HashicorpVaultLDAP is a SecretSource step that requires a Vault token in\n                              the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                              then fetches the requested secrets from Vault for use in the next step.\n                            properties:\n                              ldapPath:\n                                description: |-\n                                  The full HTTP path to the secret in Vault. Example:\n                                  /v1/ldap/static-cred/:role_name\n                                  or\n                                  /v1/ldap/creds/:role_name\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - ldapPath\n                            type: object\n                          hashicorpVaultOAuth:\n                            description: |-\n                              HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource\n                              step to provide an OAuth token, which this step uses to authenticate to\n                              Vault. The output of this step is a Vault token. This step allows you to use\n                              the step `HashicorpVaultSecret` afterwards.\n                            properties:\n                              authInputType:\n                                description: |-\n                                  AuthInputType is the authentication method to be used to authenticate\n                                  with HashiCorp Vault. The only supported value is \"OIDC\".\n                                enum:\n                                  - OIDC\n                                type: string\n                              authPath:\n                                description: |-\n                                  The login URL used for obtaining the Vault token. Example:\n                                  /v1/auth/oidc/login\n                                type: string\n                              clientId:\n                                description: 'Deprecated: This field does nothing and will be removed in the future.'\n                                type: string\n                              role:\n                                description: |-\n                                  The role defined in Vault that we want to use when authenticating to\n                                  Vault.\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - authInputType\n                              - authPath\n                              - role\n                            type: object\n                          hashicorpVaultSecret:\n                            description: |-\n                              HashicorpVaultSecret is a SecretSource step that requires a Vault token in\n                              the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                              then fetches the requested secrets from Vault for use in the next step.\n                            properties:\n                              fields:\n                                description: |-\n                                  The fields are Vault keys pointing to the secrets passed to the next\n                                  SecretSource step.\n\n                                  Example 1 (TPP, username and password): imagining that you have stored\n                                  the username and password for TPP under the keys \"username\" and\n                                  \"password\", you will want to set this field to `[\"username\",\n                                  \"password\"]`. The username is expected to be given first, the password\n                                  second.\n                                items:\n                                  type: string\n                                type: array\n                              secretPath:\n                                description: |-\n                                  The full HTTP path to the secret in Vault. Example:\n                                  /v1/secret/data/application-team-a/tpp-username-password\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - fields\n                              - secretPath\n                            type: object\n                          secret:\n                            description: |-\n                              Secret is a SecretSource step meant to be the first step. It retrieves secret\n                              values from a Kubernetes Secret, and passes them to the next step.\n                            properties:\n                              fields:\n                                description: |-\n                                  The names of the fields we want to extract from the Kubernetes secret.\n                                  These fields are passed to the next step in the chain.\n                                items:\n                                  type: string\n                                type: array\n                              name:\n                                description: The name of the Kubernetes secret.\n                                type: string\n                            required:\n                              - fields\n                              - name\n                            type: object\n                          serviceAccountToken:\n                            description: |-\n                              ServiceAccountToken is a SecretSource step meant to be the first step. It\n                              uses the Kubernetes TokenRequest API to retrieve a token for a given service\n                              account, and passes it to the next step.\n                            properties:\n                              audiences:\n                                description: |-\n                                  Audiences are the intendend audiences of the token. A recipient of a\n                                  token must identify themself with an identifier in the list of\n                                  audiences of the token, and otherwise should reject the token. A\n                                  token issued for multiple audiences may be used to authenticate\n                                  against any of the audiences listed but implies a high degree of\n                                  trust between the target audiences.\n                                items:\n                                  type: string\n                                type: array\n                              expirationSeconds:\n                                description: |-\n                                  ExpirationSeconds is the requested duration of validity of the request. The\n                                  token issuer may return a token with a different validity duration so a\n                                  client needs to check the 'expiration' field in a response.\n                                format: int64\n                                type: integer\n                              name:\n                                description: The name of the Kubernetes service account.\n                                type: string\n                            required:\n                              - audiences\n                              - name\n                            type: object\n                          tppOAuth:\n                            description: |-\n                              TPPOAuth is a SecretSource step that authenticates to a TPP server. This\n                              step is meant to be the last step and requires a prior step that depends\n                              on the `authInputType`.\n                            properties:\n                              authInputType:\n                                description: |-\n                                  AuthInputType is the authentication method to be used to authenticate\n                                  with TPP. The supported values are \"UsernamePassword\" and \"JWT\".\n                                enum:\n                                  - UsernamePassword\n                                  - JWT\n                                type: string\n                              clientId:\n                                description: ClientID is the clientId used to authenticate with TPP.\n                                type: string\n                              url:\n                                description: |-\n                                  The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs\n                                  https://tpp.example.com and https://tpp.example.com/vedsdk are\n                                  equivalent. The ending `/vedsdk` is optional and is stripped out\n                                  by our client.\n                                  If not set, defaults to the URL defined at the top-level of the\n                                  TPP configuration.\n                                type: string\n                            required:\n                              - authInputType\n                            type: object\n                          vcpOAuth:\n                            description: |-\n                              VCPOAuth is a SecretSource step that authenticates to the\n                              Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step\n                              that outputs a JWT token.\n                            properties:\n                              tenantID:\n                                description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS.\n                                type: string\n                            type: object\n                        type: object\n                        x-kubernetes-validations:\n                          - message: must have exactly one field set\n                            rule: '((has(self.secret) ? 1 : 0) + (has(self.serviceAccountToken) ? 1 : 0) + (has(self.hashicorpVaultOAuth) ? 1 : 0) + (has(self.hashicorpVaultSecret) ? 1 : 0) + (has(self.hashicorpVaultLDAP) ? 1 : 0) + (has(self.tppOAuth) ? 1 : 0) + (has(self.vcpOAuth) ? 1 : 0)) == 1'\n                      maxItems: 50\n                      type: array\n                      x-kubernetes-list-type: atomic\n                    apiKey:\n                      description: |-\n                        The list of steps to retrieve the API key that will be used to connect to\n                        Certificate Manager, SaaS.\n                      items:\n                        properties:\n                          hashicorpVaultLDAP:\n                            description: |-\n                              HashicorpVaultLDAP is a SecretSource step that requires a Vault token in\n                              the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                              then fetches the requested secrets from Vault for use in the next step.\n                            properties:\n                              ldapPath:\n                                description: |-\n                                  The full HTTP path to the secret in Vault. Example:\n                                  /v1/ldap/static-cred/:role_name\n                                  or\n                                  /v1/ldap/creds/:role_name\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - ldapPath\n                            type: object\n                          hashicorpVaultOAuth:\n                            description: |-\n                              HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource\n                              step to provide an OAuth token, which this step uses to authenticate to\n                              Vault. The output of this step is a Vault token. This step allows you to use\n                              the step `HashicorpVaultSecret` afterwards.\n                            properties:\n                              authInputType:\n                                description: |-\n                                  AuthInputType is the authentication method to be used to authenticate\n                                  with HashiCorp Vault. The only supported value is \"OIDC\".\n                                enum:\n                                  - OIDC\n                                type: string\n                              authPath:\n                                description: |-\n                                  The login URL used for obtaining the Vault token. Example:\n                                  /v1/auth/oidc/login\n                                type: string\n                              clientId:\n                                description: 'Deprecated: This field does nothing and will be removed in the future.'\n                                type: string\n                              role:\n                                description: |-\n                                  The role defined in Vault that we want to use when authenticating to\n                                  Vault.\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - authInputType\n                              - authPath\n                              - role\n                            type: object\n                          hashicorpVaultSecret:\n                            description: |-\n                              HashicorpVaultSecret is a SecretSource step that requires a Vault token in\n                              the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It\n                              then fetches the requested secrets from Vault for use in the next step.\n                            properties:\n                              fields:\n                                description: |-\n                                  The fields are Vault keys pointing to the secrets passed to the next\n                                  SecretSource step.\n\n                                  Example 1 (TPP, username and password): imagining that you have stored\n                                  the username and password for TPP under the keys \"username\" and\n                                  \"password\", you will want to set this field to `[\"username\",\n                                  \"password\"]`. The username is expected to be given first, the password\n                                  second.\n                                items:\n                                  type: string\n                                type: array\n                              secretPath:\n                                description: |-\n                                  The full HTTP path to the secret in Vault. Example:\n                                  /v1/secret/data/application-team-a/tpp-username-password\n                                type: string\n                              url:\n                                description: The URL to connect to your HashiCorp Vault instance.\n                                type: string\n                            required:\n                              - fields\n                              - secretPath\n                            type: object\n                          secret:\n                            description: |-\n                              Secret is a SecretSource step meant to be the first step. It retrieves secret\n                              values from a Kubernetes Secret, and passes them to the next step.\n                            properties:\n                              fields:\n                                description: |-\n                                  The names of the fields we want to extract from the Kubernetes secret.\n                                  These fields are passed to the next step in the chain.\n                                items:\n                                  type: string\n                                type: array\n                              name:\n                                description: The name of the Kubernetes secret.\n                                type: string\n                            required:\n                              - fields\n                              - name\n                            type: object\n                          serviceAccountToken:\n                            description: |-\n                              ServiceAccountToken is a SecretSource step meant to be the first step. It\n                              uses the Kubernetes TokenRequest API to retrieve a token for a given service\n                              account, and passes it to the next step.\n                            properties:\n                              audiences:\n                                description: |-\n                                  Audiences are the intendend audiences of the token. A recipient of a\n                                  token must identify themself with an identifier in the list of\n                                  audiences of the token, and otherwise should reject the token. A\n                                  token issued for multiple audiences may be used to authenticate\n                                  against any of the audiences listed but implies a high degree of\n                                  trust between the target audiences.\n                                items:\n                                  type: string\n                                type: array\n                              expirationSeconds:\n                                description: |-\n                                  ExpirationSeconds is the requested duration of validity of the request. The\n                                  token issuer may return a token with a different validity duration so a\n                                  client needs to check the 'expiration' field in a response.\n                                format: int64\n                                type: integer\n                              name:\n                                description: The name of the Kubernetes service account.\n                                type: string\n                            required:\n                              - audiences\n                              - name\n                            type: object\n                          tppOAuth:\n                            description: |-\n                              TPPOAuth is a SecretSource step that authenticates to a TPP server. This\n                              step is meant to be the last step and requires a prior step that depends\n                              on the `authInputType`.\n                            properties:\n                              authInputType:\n                                description: |-\n                                  AuthInputType is the authentication method to be used to authenticate\n                                  with TPP. The supported values are \"UsernamePassword\" and \"JWT\".\n                                enum:\n                                  - UsernamePassword\n                                  - JWT\n                                type: string\n                              clientId:\n                                description: ClientID is the clientId used to authenticate with TPP.\n                                type: string\n                              url:\n                                description: |-\n                                  The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs\n                                  https://tpp.example.com and https://tpp.example.com/vedsdk are\n                                  equivalent. The ending `/vedsdk` is optional and is stripped out\n                                  by our client.\n                                  If not set, defaults to the URL defined at the top-level of the\n                                  TPP configuration.\n                                type: string\n                            required:\n                              - authInputType\n                            type: object\n                          vcpOAuth:\n                            description: |-\n                              VCPOAuth is a SecretSource step that authenticates to the\n                              Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step\n                              that outputs a JWT token.\n                            properties:\n                              tenantID:\n                                description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS.\n                                type: string\n                            type: object\n                        type: object\n                        x-kubernetes-validations:\n                          - message: must have exactly one field set\n                            rule: '((has(self.secret) ? 1 : 0) + (has(self.serviceAccountToken) ? 1 : 0) + (has(self.hashicorpVaultOAuth) ? 1 : 0) + (has(self.hashicorpVaultSecret) ? 1 : 0) + (has(self.hashicorpVaultLDAP) ? 1 : 0) + (has(self.tppOAuth) ? 1 : 0) + (has(self.vcpOAuth) ? 1 : 0)) == 1'\n                      maxItems: 50\n                      type: array\n                      x-kubernetes-list-type: atomic\n                    url:\n                      description: |-\n                        The URL to connect to the Certificate Manager, SaaS instance. If not set, the default\n                        value https://api.venafi.cloud is used.\n                      type: string\n                  type: object\n                  x-kubernetes-validations:\n                    - message: 'must have exactly ONE of the following fields set: apiKey or accessToken'\n                      rule: '(has(self.apiKey) ? 1 : 0) + (has(self.accessToken) ? 1 : 0) == 1'\n              type: object\n              x-kubernetes-validations:\n                - message: 'must have exactly ONE of the following fields set: tpp or vcp'\n                  rule: '(has(self.tpp) ? 1 : 0) + (has(self.vaas) ? 1 : 0) + (has(self.vcp) ? 1 : 0) + (has(self.firefly) ? 1 : 0) == 1'\n            status:\n              properties:\n                conditions:\n                  description: List of status conditions to indicate the status of a VenafiConnection.\n                  items:\n                    description: ConnectionCondition contains condition information for a VenafiConnection.\n                    properties:\n                      lastTransitionTime:\n                        description: |-\n                          LastTransitionTime is the timestamp corresponding to the last status\n                          change of this condition.\n                        format: date-time\n                        type: string\n                      lastUpdateTime:\n                        description: lastUpdateTime is the time of the last update to this condition\n                        format: date-time\n                        type: string\n                      message:\n                        description: |-\n                          Message is a human readable description of the details of the last\n                          transition, complementing reason.\n                        type: string\n                      observedGeneration:\n                        description: |-\n                          If set, this represents the .metadata.generation that the condition was\n                          set based upon.\n                          For instance, if .metadata.generation is currently 12, but the\n                          .status.condition[x].observedGeneration is 9, the condition is out of date\n                          with respect to the current state of the Issuer.\n                        format: int64\n                        type: integer\n                      reason:\n                        description: |-\n                          Reason is a brief machine readable explanation for the condition's last\n                          transition.\n                        type: string\n                      status:\n                        description: Status of the condition, one of (`True`, `False`, `Unknown`).\n                        type: string\n                      tokenValidUntil:\n                        description: |-\n                          The ValidUntil time of the token used to authenticate with the\n                          Certificate Manager, SaaS.\n                        format: date-time\n                        type: string\n                      type:\n                        description: |-\n                          Type of the condition, should be a combination of the unique name of the\n                          operator and the type of condition.\n                          eg. `VenafiEnhancedIssuerReady`\n                        type: string\n                    required:\n                      - status\n                      - type\n                    type: object\n                  type: array\n                  x-kubernetes-list-map-keys:\n                    - type\n                  x-kubernetes-list-type: map\n              type: object\n          required:\n            - metadata\n            - spec\n          type: object\n      served: true\n      storage: true\n      subresources:\n        status: {}\n{{ end }}\n{{ end }}\n"
  },
  {
    "path": "deploy/charts/venafi-kubernetes-agent/templates/venafi-connection-rbac.yaml",
    "content": "{{- if .Values.crds.venafiConnection.include }}\n# The 'venafi-connection' service account is used by multiple\n# controllers. When configuring which resources a VenafiConnection\n# can access, the RBAC rules you create manually must point to this SA.\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: venafi-connection\n  namespace: {{ $.Release.Namespace | quote }}\n  labels:\n  {{- include \"venafi-connection.labels\" $ | nindent 4 }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: venafi-connection-role\n  labels:\n  {{- include \"venafi-connection.labels\" $ | nindent 4 }}\nrules:\n- apiGroups: [ \"\" ]\n  resources: [ \"namespaces\" ]\n  verbs: [ \"get\", \"list\", \"watch\" ]\n\n- apiGroups: [ \"jetstack.io\" ]\n  resources: [ \"venaficonnections\" ]\n  verbs: [ \"get\", \"list\", \"watch\" ]\n\n- apiGroups: [ \"jetstack.io\" ]\n  resources: [ \"venaficonnections/status\" ]\n  verbs: [ \"get\", \"patch\" ]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: venafi-connection-rolebinding\n  labels:\n  {{- include \"venafi-connection.labels\" $ | nindent 4 }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: venafi-connection-role\nsubjects:\n- kind: ServiceAccount\n  name: venafi-connection\n  namespace: {{ $.Release.Namespace | quote }}\n{{- end }}\n"
  },
  {
    "path": "deploy/charts/venafi-kubernetes-agent/templates/venafi-rbac.yaml",
    "content": "{{- if .Values.authentication.venafiConnection.enabled }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: venafi-kubernetes-agent-impersonate-role\n  namespace: {{ $.Release.Namespace | quote }}\n  labels:\n  {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nrules:\n- apiGroups: [ \"\" ]\n  resources: [ \"serviceaccounts\" ]\n  verbs: [ \"impersonate\" ]\n  resourceNames: [ \"venafi-connection\" ]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: venafi-kubernetes-agent-impersonate-rolebinding\n  namespace: {{ $.Release.Namespace | quote }}\n  labels:\n  {{- include \"venafi-kubernetes-agent.labels\" . | nindent 4 }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: venafi-kubernetes-agent-impersonate-role\nsubjects:\n- kind: ServiceAccount\n  name: {{ include \"venafi-kubernetes-agent.serviceAccountName\" . }}\n  namespace: {{ $.Release.Namespace | quote }}\n{{- end }}\n"
  },
  {
    "path": "deploy/charts/venafi-kubernetes-agent/tests/__snapshot__/configmap_test.yaml.snap",
    "content": "custom-cluster-description:\n  1: |\n    raw: |\n      - Check the credentials Secret exists: \"agent-credentials\"\n      > kubectl get secret -n test-ns agent-credentials\n      - Check the application is running:\n      > kubectl get pods -n test-ns -l app.kubernetes.io/instance=test\n\n      - Check the application logs for successful connection to the platform:\n      > kubectl logs -n test-ns -l app.kubernetes.io/instance=test\n  2: |\n    apiVersion: v1\n    data:\n      config.yaml: |-\n        cluster_name: \"\"\n        cluster_description: \"A cloud hosted Kubernetes cluster hosting production workloads.\\n\\nteam: team-1\\nemail: team-1@example.com\\npurpose: Production workloads\\n\"\n        server: \"https://api.venafi.cloud/\"\n        period: \"0h1m0s\"\n        venafi-cloud:\n          uploader_id: \"no\"\n          upload_path: \"/v1/tlspk/upload/clusterdata\"\n        data-gatherers:\n        # gather k8s apiserver version information\n        - kind: \"k8s-discovery\"\n          name: \"k8s-discovery\"\n        # pods data is used in the pods and application_versions packages\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/pods\"\n          config:\n            resource-type:\n              resource: pods\n              version: v1\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/namespaces\"\n          config:\n            resource-type:\n              resource: namespaces\n              version: v1\n        # gather services for pod readiness probe rules\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/services\"\n          config:\n            resource-type:\n              resource: services\n              version: v1\n        # gather higher level resources to ensure data to determine ownership is present\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/deployments\"\n          config:\n            resource-type:\n              version: v1\n              resource: deployments\n              group: apps\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/statefulsets\"\n          config:\n            resource-type:\n              version: v1\n              resource: statefulsets\n              group: apps\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/daemonsets\"\n          config:\n            resource-type:\n              version: v1\n              resource: daemonsets\n              group: apps\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/jobs\"\n          config:\n            resource-type:\n              version: v1\n              resource: jobs\n              group: batch\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/cronjobs\"\n          config:\n            resource-type:\n              version: v1\n              resource: cronjobs\n              group: batch\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/ingresses\"\n          config:\n            resource-type:\n              group: networking.k8s.io\n              version: v1\n              resource: ingresses\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/secrets\"\n          config:\n            resource-type:\n              version: v1\n              resource: secrets\n            field-selectors:\n            - type!=kubernetes.io/service-account-token\n            - type!=kubernetes.io/dockercfg\n            - type!=kubernetes.io/dockerconfigjson\n            - type!=kubernetes.io/basic-auth\n            - type!=kubernetes.io/ssh-auth\n            - type!=bootstrap.kubernetes.io/token\n            - type!=helm.sh/release.v1\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/certificates\"\n          config:\n            resource-type:\n              group: cert-manager.io\n              version: v1\n              resource: certificates\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/certificaterequests\"\n          config:\n            resource-type:\n              group: cert-manager.io\n              version: v1\n              resource: certificaterequests\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/issuers\"\n          config:\n            resource-type:\n              group: cert-manager.io\n              version: v1\n              resource: issuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/clusterissuers\"\n          config:\n            resource-type:\n              group: cert-manager.io\n              version: v1\n              resource: clusterissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/googlecasissuers\"\n          config:\n            resource-type:\n              group: cas-issuer.jetstack.io\n              version: v1beta1\n              resource: googlecasissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/googlecasclusterissuers\"\n          config:\n            resource-type:\n              group: cas-issuer.jetstack.io\n              version: v1beta1\n              resource: googlecasclusterissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/awspcaissuer\"\n          config:\n            resource-type:\n              group: awspca.cert-manager.io\n              version: v1beta1\n              resource: awspcaissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/awspcaclusterissuers\"\n          config:\n            resource-type:\n              group: awspca.cert-manager.io\n              version: v1beta1\n              resource: awspcaclusterissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/mutatingwebhookconfigurations\"\n          config:\n            resource-type:\n              group: admissionregistration.k8s.io\n              version: v1\n              resource: mutatingwebhookconfigurations\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/validatingwebhookconfigurations\"\n          config:\n            resource-type:\n              group: admissionregistration.k8s.io\n              version: v1\n              resource: validatingwebhookconfigurations\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/gateways\"\n          config:\n            resource-type:\n              group: networking.istio.io\n              version: v1alpha3\n              resource: gateways\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/virtualservices\"\n          config:\n            resource-type:\n              group: networking.istio.io\n              version: v1alpha3\n              resource: virtualservices\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/routes\"\n          config:\n            resource-type:\n              version: v1\n              group: route.openshift.io\n              resource: routes\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/venaficonnections\"\n          config:\n            resource-type:\n              group: jetstack.io\n              version: v1alpha1\n              resource: venaficonnections\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/venaficlusterissuers\"\n          config:\n            resource-type:\n              group: jetstack.io\n              version: v1alpha1\n              resource: venaficlusterissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/venafiissuers\"\n          config:\n            resource-type:\n              group: jetstack.io\n              version: v1alpha1\n              resource: venafiissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/fireflyissuers\"\n          config:\n            resource-type:\n              group: firefly.venafi.com\n              version: v1\n              resource: issuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/stepissuers\"\n          config:\n            resource-type:\n              group: certmanager.step.sm\n              version: v1beta1\n              resource: stepissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/stepclusterissuers\"\n          config:\n            resource-type:\n              group: certmanager.step.sm\n              version: v1beta1\n              resource: stepclusterissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/originissuers\"\n          config:\n            resource-type:\n              group: cert-manager.k8s.cloudflare.com\n              version: v1\n              resource: originissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/clusteroriginissuers\"\n          config:\n            resource-type:\n              group: cert-manager.k8s.cloudflare.com\n              version: v1\n              resource: clusteroriginissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/freeipaissuers\"\n          config:\n            resource-type:\n              group: certmanager.freeipa.org\n              version: v1beta1\n              resource: issuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/freeipaclusterissuers\"\n          config:\n            resource-type:\n              group: certmanager.freeipa.org\n              version: v1beta1\n              resource: clusterissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/ejbcaissuers\"\n          config:\n            resource-type:\n              group: ejbca-issuer.keyfactor.com\n              version: v1alpha1\n              resource: issuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/ejbcaclusterissuers\"\n          config:\n            resource-type:\n              group: ejbca-issuer.keyfactor.com\n              version: v1alpha1\n              resource: clusterissuers\n    kind: ConfigMap\n    metadata:\n      labels:\n        app.kubernetes.io/instance: test\n        app.kubernetes.io/managed-by: Helm\n        app.kubernetes.io/name: venafi-kubernetes-agent\n        app.kubernetes.io/version: v0.0.0\n        helm.sh/chart: venafi-kubernetes-agent-0.0.0\n      name: agent-config\n      namespace: test-ns\ncustom-cluster-name:\n  1: |\n    raw: |\n      - Check the credentials Secret exists: \"agent-credentials\"\n      > kubectl get secret -n test-ns agent-credentials\n      - Check the application is running:\n      > kubectl get pods -n test-ns -l app.kubernetes.io/instance=test\n\n      - Check the application logs for successful connection to the platform:\n      > kubectl logs -n test-ns -l app.kubernetes.io/instance=test\n  2: |\n    apiVersion: v1\n    data:\n      config.yaml: |-\n        cluster_name: \"cluster-1 region-1 cloud-1 \"\n        cluster_description: \"\"\n        server: \"https://api.venafi.cloud/\"\n        period: \"0h1m0s\"\n        venafi-cloud:\n          uploader_id: \"no\"\n          upload_path: \"/v1/tlspk/upload/clusterdata\"\n        data-gatherers:\n        # gather k8s apiserver version information\n        - kind: \"k8s-discovery\"\n          name: \"k8s-discovery\"\n        # pods data is used in the pods and application_versions packages\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/pods\"\n          config:\n            resource-type:\n              resource: pods\n              version: v1\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/namespaces\"\n          config:\n            resource-type:\n              resource: namespaces\n              version: v1\n        # gather services for pod readiness probe rules\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/services\"\n          config:\n            resource-type:\n              resource: services\n              version: v1\n        # gather higher level resources to ensure data to determine ownership is present\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/deployments\"\n          config:\n            resource-type:\n              version: v1\n              resource: deployments\n              group: apps\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/statefulsets\"\n          config:\n            resource-type:\n              version: v1\n              resource: statefulsets\n              group: apps\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/daemonsets\"\n          config:\n            resource-type:\n              version: v1\n              resource: daemonsets\n              group: apps\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/jobs\"\n          config:\n            resource-type:\n              version: v1\n              resource: jobs\n              group: batch\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/cronjobs\"\n          config:\n            resource-type:\n              version: v1\n              resource: cronjobs\n              group: batch\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/ingresses\"\n          config:\n            resource-type:\n              group: networking.k8s.io\n              version: v1\n              resource: ingresses\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/secrets\"\n          config:\n            resource-type:\n              version: v1\n              resource: secrets\n            field-selectors:\n            - type!=kubernetes.io/service-account-token\n            - type!=kubernetes.io/dockercfg\n            - type!=kubernetes.io/dockerconfigjson\n            - type!=kubernetes.io/basic-auth\n            - type!=kubernetes.io/ssh-auth\n            - type!=bootstrap.kubernetes.io/token\n            - type!=helm.sh/release.v1\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/certificates\"\n          config:\n            resource-type:\n              group: cert-manager.io\n              version: v1\n              resource: certificates\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/certificaterequests\"\n          config:\n            resource-type:\n              group: cert-manager.io\n              version: v1\n              resource: certificaterequests\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/issuers\"\n          config:\n            resource-type:\n              group: cert-manager.io\n              version: v1\n              resource: issuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/clusterissuers\"\n          config:\n            resource-type:\n              group: cert-manager.io\n              version: v1\n              resource: clusterissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/googlecasissuers\"\n          config:\n            resource-type:\n              group: cas-issuer.jetstack.io\n              version: v1beta1\n              resource: googlecasissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/googlecasclusterissuers\"\n          config:\n            resource-type:\n              group: cas-issuer.jetstack.io\n              version: v1beta1\n              resource: googlecasclusterissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/awspcaissuer\"\n          config:\n            resource-type:\n              group: awspca.cert-manager.io\n              version: v1beta1\n              resource: awspcaissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/awspcaclusterissuers\"\n          config:\n            resource-type:\n              group: awspca.cert-manager.io\n              version: v1beta1\n              resource: awspcaclusterissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/mutatingwebhookconfigurations\"\n          config:\n            resource-type:\n              group: admissionregistration.k8s.io\n              version: v1\n              resource: mutatingwebhookconfigurations\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/validatingwebhookconfigurations\"\n          config:\n            resource-type:\n              group: admissionregistration.k8s.io\n              version: v1\n              resource: validatingwebhookconfigurations\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/gateways\"\n          config:\n            resource-type:\n              group: networking.istio.io\n              version: v1alpha3\n              resource: gateways\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/virtualservices\"\n          config:\n            resource-type:\n              group: networking.istio.io\n              version: v1alpha3\n              resource: virtualservices\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/routes\"\n          config:\n            resource-type:\n              version: v1\n              group: route.openshift.io\n              resource: routes\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/venaficonnections\"\n          config:\n            resource-type:\n              group: jetstack.io\n              version: v1alpha1\n              resource: venaficonnections\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/venaficlusterissuers\"\n          config:\n            resource-type:\n              group: jetstack.io\n              version: v1alpha1\n              resource: venaficlusterissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/venafiissuers\"\n          config:\n            resource-type:\n              group: jetstack.io\n              version: v1alpha1\n              resource: venafiissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/fireflyissuers\"\n          config:\n            resource-type:\n              group: firefly.venafi.com\n              version: v1\n              resource: issuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/stepissuers\"\n          config:\n            resource-type:\n              group: certmanager.step.sm\n              version: v1beta1\n              resource: stepissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/stepclusterissuers\"\n          config:\n            resource-type:\n              group: certmanager.step.sm\n              version: v1beta1\n              resource: stepclusterissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/originissuers\"\n          config:\n            resource-type:\n              group: cert-manager.k8s.cloudflare.com\n              version: v1\n              resource: originissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/clusteroriginissuers\"\n          config:\n            resource-type:\n              group: cert-manager.k8s.cloudflare.com\n              version: v1\n              resource: clusteroriginissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/freeipaissuers\"\n          config:\n            resource-type:\n              group: certmanager.freeipa.org\n              version: v1beta1\n              resource: issuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/freeipaclusterissuers\"\n          config:\n            resource-type:\n              group: certmanager.freeipa.org\n              version: v1beta1\n              resource: clusterissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/ejbcaissuers\"\n          config:\n            resource-type:\n              group: ejbca-issuer.keyfactor.com\n              version: v1alpha1\n              resource: issuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/ejbcaclusterissuers\"\n          config:\n            resource-type:\n              group: ejbca-issuer.keyfactor.com\n              version: v1alpha1\n              resource: clusterissuers\n    kind: ConfigMap\n    metadata:\n      labels:\n        app.kubernetes.io/instance: test\n        app.kubernetes.io/managed-by: Helm\n        app.kubernetes.io/name: venafi-kubernetes-agent\n        app.kubernetes.io/version: v0.0.0\n        helm.sh/chart: venafi-kubernetes-agent-0.0.0\n      name: agent-config\n      namespace: test-ns\ncustom-configmap:\n  1: |\n    |\n      You are using a custom configuration in the following ConfigMap: \"agent-custom-config\".\n\n      DEPRECATION: The `cluster_id` configuration field is deprecated.\n      If your configuration contains `cluster_id`, it will continue to work as a\n      fallback, but please migrate to `cluster_name` to avoid ambiguity.\n      - Check the credentials Secret exists: \"agent-credentials\"\n      > kubectl get secret -n test-ns agent-credentials\n      - Check the application is running:\n      > kubectl get pods -n test-ns -l app.kubernetes.io/instance=test\n\n      - Check the application logs for successful connection to the platform:\n      > kubectl logs -n test-ns -l app.kubernetes.io/instance=test\ncustom-period:\n  1: |\n    raw: |\n      - Check the credentials Secret exists: \"agent-credentials\"\n      > kubectl get secret -n test-ns agent-credentials\n      - Check the application is running:\n      > kubectl get pods -n test-ns -l app.kubernetes.io/instance=test\n\n      - Check the application logs for successful connection to the platform:\n      > kubectl logs -n test-ns -l app.kubernetes.io/instance=test\n  2: |\n    apiVersion: v1\n    data:\n      config.yaml: |-\n        cluster_name: \"\"\n        cluster_description: \"\"\n        server: \"https://api.venafi.cloud/\"\n        period: \"1m\"\n        venafi-cloud:\n          uploader_id: \"no\"\n          upload_path: \"/v1/tlspk/upload/clusterdata\"\n        data-gatherers:\n        # gather k8s apiserver version information\n        - kind: \"k8s-discovery\"\n          name: \"k8s-discovery\"\n        # pods data is used in the pods and application_versions packages\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/pods\"\n          config:\n            resource-type:\n              resource: pods\n              version: v1\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/namespaces\"\n          config:\n            resource-type:\n              resource: namespaces\n              version: v1\n        # gather services for pod readiness probe rules\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/services\"\n          config:\n            resource-type:\n              resource: services\n              version: v1\n        # gather higher level resources to ensure data to determine ownership is present\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/deployments\"\n          config:\n            resource-type:\n              version: v1\n              resource: deployments\n              group: apps\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/statefulsets\"\n          config:\n            resource-type:\n              version: v1\n              resource: statefulsets\n              group: apps\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/daemonsets\"\n          config:\n            resource-type:\n              version: v1\n              resource: daemonsets\n              group: apps\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/jobs\"\n          config:\n            resource-type:\n              version: v1\n              resource: jobs\n              group: batch\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/cronjobs\"\n          config:\n            resource-type:\n              version: v1\n              resource: cronjobs\n              group: batch\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/ingresses\"\n          config:\n            resource-type:\n              group: networking.k8s.io\n              version: v1\n              resource: ingresses\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/secrets\"\n          config:\n            resource-type:\n              version: v1\n              resource: secrets\n            field-selectors:\n            - type!=kubernetes.io/service-account-token\n            - type!=kubernetes.io/dockercfg\n            - type!=kubernetes.io/dockerconfigjson\n            - type!=kubernetes.io/basic-auth\n            - type!=kubernetes.io/ssh-auth\n            - type!=bootstrap.kubernetes.io/token\n            - type!=helm.sh/release.v1\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/certificates\"\n          config:\n            resource-type:\n              group: cert-manager.io\n              version: v1\n              resource: certificates\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/certificaterequests\"\n          config:\n            resource-type:\n              group: cert-manager.io\n              version: v1\n              resource: certificaterequests\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/issuers\"\n          config:\n            resource-type:\n              group: cert-manager.io\n              version: v1\n              resource: issuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/clusterissuers\"\n          config:\n            resource-type:\n              group: cert-manager.io\n              version: v1\n              resource: clusterissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/googlecasissuers\"\n          config:\n            resource-type:\n              group: cas-issuer.jetstack.io\n              version: v1beta1\n              resource: googlecasissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/googlecasclusterissuers\"\n          config:\n            resource-type:\n              group: cas-issuer.jetstack.io\n              version: v1beta1\n              resource: googlecasclusterissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/awspcaissuer\"\n          config:\n            resource-type:\n              group: awspca.cert-manager.io\n              version: v1beta1\n              resource: awspcaissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/awspcaclusterissuers\"\n          config:\n            resource-type:\n              group: awspca.cert-manager.io\n              version: v1beta1\n              resource: awspcaclusterissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/mutatingwebhookconfigurations\"\n          config:\n            resource-type:\n              group: admissionregistration.k8s.io\n              version: v1\n              resource: mutatingwebhookconfigurations\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/validatingwebhookconfigurations\"\n          config:\n            resource-type:\n              group: admissionregistration.k8s.io\n              version: v1\n              resource: validatingwebhookconfigurations\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/gateways\"\n          config:\n            resource-type:\n              group: networking.istio.io\n              version: v1alpha3\n              resource: gateways\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/virtualservices\"\n          config:\n            resource-type:\n              group: networking.istio.io\n              version: v1alpha3\n              resource: virtualservices\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/routes\"\n          config:\n            resource-type:\n              version: v1\n              group: route.openshift.io\n              resource: routes\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/venaficonnections\"\n          config:\n            resource-type:\n              group: jetstack.io\n              version: v1alpha1\n              resource: venaficonnections\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/venaficlusterissuers\"\n          config:\n            resource-type:\n              group: jetstack.io\n              version: v1alpha1\n              resource: venaficlusterissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/venafiissuers\"\n          config:\n            resource-type:\n              group: jetstack.io\n              version: v1alpha1\n              resource: venafiissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/fireflyissuers\"\n          config:\n            resource-type:\n              group: firefly.venafi.com\n              version: v1\n              resource: issuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/stepissuers\"\n          config:\n            resource-type:\n              group: certmanager.step.sm\n              version: v1beta1\n              resource: stepissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/stepclusterissuers\"\n          config:\n            resource-type:\n              group: certmanager.step.sm\n              version: v1beta1\n              resource: stepclusterissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/originissuers\"\n          config:\n            resource-type:\n              group: cert-manager.k8s.cloudflare.com\n              version: v1\n              resource: originissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/clusteroriginissuers\"\n          config:\n            resource-type:\n              group: cert-manager.k8s.cloudflare.com\n              version: v1\n              resource: clusteroriginissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/freeipaissuers\"\n          config:\n            resource-type:\n              group: certmanager.freeipa.org\n              version: v1beta1\n              resource: issuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/freeipaclusterissuers\"\n          config:\n            resource-type:\n              group: certmanager.freeipa.org\n              version: v1beta1\n              resource: clusterissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/ejbcaissuers\"\n          config:\n            resource-type:\n              group: ejbca-issuer.keyfactor.com\n              version: v1alpha1\n              resource: issuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/ejbcaclusterissuers\"\n          config:\n            resource-type:\n              group: ejbca-issuer.keyfactor.com\n              version: v1alpha1\n              resource: clusterissuers\n    kind: ConfigMap\n    metadata:\n      labels:\n        app.kubernetes.io/instance: test\n        app.kubernetes.io/managed-by: Helm\n        app.kubernetes.io/name: venafi-kubernetes-agent\n        app.kubernetes.io/version: v0.0.0\n        helm.sh/chart: venafi-kubernetes-agent-0.0.0\n      name: agent-config\n      namespace: test-ns\ndefaults:\n  1: |\n    raw: |\n      - Check the credentials Secret exists: \"agent-credentials\"\n      > kubectl get secret -n test-ns agent-credentials\n      - Check the application is running:\n      > kubectl get pods -n test-ns -l app.kubernetes.io/instance=test\n\n      - Check the application logs for successful connection to the platform:\n      > kubectl logs -n test-ns -l app.kubernetes.io/instance=test\n  2: |\n    apiVersion: v1\n    data:\n      config.yaml: |-\n        cluster_name: \"\"\n        cluster_description: \"\"\n        server: \"https://api.venafi.cloud/\"\n        period: \"0h1m0s\"\n        venafi-cloud:\n          uploader_id: \"no\"\n          upload_path: \"/v1/tlspk/upload/clusterdata\"\n        data-gatherers:\n        # gather k8s apiserver version information\n        - kind: \"k8s-discovery\"\n          name: \"k8s-discovery\"\n        # pods data is used in the pods and application_versions packages\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/pods\"\n          config:\n            resource-type:\n              resource: pods\n              version: v1\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/namespaces\"\n          config:\n            resource-type:\n              resource: namespaces\n              version: v1\n        # gather services for pod readiness probe rules\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/services\"\n          config:\n            resource-type:\n              resource: services\n              version: v1\n        # gather higher level resources to ensure data to determine ownership is present\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/deployments\"\n          config:\n            resource-type:\n              version: v1\n              resource: deployments\n              group: apps\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/statefulsets\"\n          config:\n            resource-type:\n              version: v1\n              resource: statefulsets\n              group: apps\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/daemonsets\"\n          config:\n            resource-type:\n              version: v1\n              resource: daemonsets\n              group: apps\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/jobs\"\n          config:\n            resource-type:\n              version: v1\n              resource: jobs\n              group: batch\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/cronjobs\"\n          config:\n            resource-type:\n              version: v1\n              resource: cronjobs\n              group: batch\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/ingresses\"\n          config:\n            resource-type:\n              group: networking.k8s.io\n              version: v1\n              resource: ingresses\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/secrets\"\n          config:\n            resource-type:\n              version: v1\n              resource: secrets\n            field-selectors:\n            - type!=kubernetes.io/service-account-token\n            - type!=kubernetes.io/dockercfg\n            - type!=kubernetes.io/dockerconfigjson\n            - type!=kubernetes.io/basic-auth\n            - type!=kubernetes.io/ssh-auth\n            - type!=bootstrap.kubernetes.io/token\n            - type!=helm.sh/release.v1\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/certificates\"\n          config:\n            resource-type:\n              group: cert-manager.io\n              version: v1\n              resource: certificates\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/certificaterequests\"\n          config:\n            resource-type:\n              group: cert-manager.io\n              version: v1\n              resource: certificaterequests\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/issuers\"\n          config:\n            resource-type:\n              group: cert-manager.io\n              version: v1\n              resource: issuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/clusterissuers\"\n          config:\n            resource-type:\n              group: cert-manager.io\n              version: v1\n              resource: clusterissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/googlecasissuers\"\n          config:\n            resource-type:\n              group: cas-issuer.jetstack.io\n              version: v1beta1\n              resource: googlecasissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/googlecasclusterissuers\"\n          config:\n            resource-type:\n              group: cas-issuer.jetstack.io\n              version: v1beta1\n              resource: googlecasclusterissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/awspcaissuer\"\n          config:\n            resource-type:\n              group: awspca.cert-manager.io\n              version: v1beta1\n              resource: awspcaissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/awspcaclusterissuers\"\n          config:\n            resource-type:\n              group: awspca.cert-manager.io\n              version: v1beta1\n              resource: awspcaclusterissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/mutatingwebhookconfigurations\"\n          config:\n            resource-type:\n              group: admissionregistration.k8s.io\n              version: v1\n              resource: mutatingwebhookconfigurations\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/validatingwebhookconfigurations\"\n          config:\n            resource-type:\n              group: admissionregistration.k8s.io\n              version: v1\n              resource: validatingwebhookconfigurations\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/gateways\"\n          config:\n            resource-type:\n              group: networking.istio.io\n              version: v1alpha3\n              resource: gateways\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/virtualservices\"\n          config:\n            resource-type:\n              group: networking.istio.io\n              version: v1alpha3\n              resource: virtualservices\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/routes\"\n          config:\n            resource-type:\n              version: v1\n              group: route.openshift.io\n              resource: routes\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/venaficonnections\"\n          config:\n            resource-type:\n              group: jetstack.io\n              version: v1alpha1\n              resource: venaficonnections\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/venaficlusterissuers\"\n          config:\n            resource-type:\n              group: jetstack.io\n              version: v1alpha1\n              resource: venaficlusterissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/venafiissuers\"\n          config:\n            resource-type:\n              group: jetstack.io\n              version: v1alpha1\n              resource: venafiissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/fireflyissuers\"\n          config:\n            resource-type:\n              group: firefly.venafi.com\n              version: v1\n              resource: issuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/stepissuers\"\n          config:\n            resource-type:\n              group: certmanager.step.sm\n              version: v1beta1\n              resource: stepissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/stepclusterissuers\"\n          config:\n            resource-type:\n              group: certmanager.step.sm\n              version: v1beta1\n              resource: stepclusterissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/originissuers\"\n          config:\n            resource-type:\n              group: cert-manager.k8s.cloudflare.com\n              version: v1\n              resource: originissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/clusteroriginissuers\"\n          config:\n            resource-type:\n              group: cert-manager.k8s.cloudflare.com\n              version: v1\n              resource: clusteroriginissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/freeipaissuers\"\n          config:\n            resource-type:\n              group: certmanager.freeipa.org\n              version: v1beta1\n              resource: issuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/freeipaclusterissuers\"\n          config:\n            resource-type:\n              group: certmanager.freeipa.org\n              version: v1beta1\n              resource: clusterissuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/ejbcaissuers\"\n          config:\n            resource-type:\n              group: ejbca-issuer.keyfactor.com\n              version: v1alpha1\n              resource: issuers\n        - kind: \"k8s-dynamic\"\n          name: \"k8s/ejbcaclusterissuers\"\n          config:\n            resource-type:\n              group: ejbca-issuer.keyfactor.com\n              version: v1alpha1\n              resource: clusterissuers\n    kind: ConfigMap\n    metadata:\n      labels:\n        app.kubernetes.io/instance: test\n        app.kubernetes.io/managed-by: Helm\n        app.kubernetes.io/name: venafi-kubernetes-agent\n        app.kubernetes.io/version: v0.0.0\n        helm.sh/chart: venafi-kubernetes-agent-0.0.0\n      name: agent-config\n      namespace: test-ns\n"
  },
  {
    "path": "deploy/charts/venafi-kubernetes-agent/tests/configmap_test.yaml",
    "content": "suite: test the contents of the config.yaml\ntemplates:\n  - configmap.yaml\n  - NOTES.txt\nrelease:\n  name: test\n  namespace: test-ns\ntests:\n  - it: defaults\n    asserts:\n      - matchSnapshot: {}\n\n  - it: custom-period\n    set:\n      config.period: 1m\n    asserts:\n      - matchSnapshot: {}\n\n  - it: custom-cluster-name\n    set:\n      config.clusterName: \"cluster-1 region-1 cloud-1 \"\n    asserts:\n      - matchSnapshot: {}\n\n  - it: custom-cluster-description\n    set:\n      config.clusterDescription: |\n        A cloud hosted Kubernetes cluster hosting production workloads.\n\n        team: team-1\n        email: team-1@example.com\n        purpose: Production workloads\n    asserts:\n      - matchSnapshot: {}\n\n  - it: custom-configmap\n    set:\n      config:\n        configmap:\n          name: agent-custom-config\n    asserts:\n      - matchSnapshotRaw: {}\n"
  },
  {
    "path": "deploy/charts/venafi-kubernetes-agent/tests/deployment_test.yaml",
    "content": "suite: test deployment\ntemplates:\n  - deployment.yaml\n\ntests:\n  # Basic checks on deployment\n  - it: templates as expected\n    set:\n      image.tag: latest\n      config.clientId: \"00000000-0000-0000-0000-000000000000\"\n    template: deployment.yaml\n    asserts:\n      - isKind:\n          of: Deployment\n      # Validate name matches\n      - matchRegex:\n          path: metadata.name\n          pattern: ^venafi-kubernetes-agent-*\n      # Check is latest is set as tag that it uses that tag\n      - equal:\n          path: spec.template.spec.containers[0].image\n          value: registry.venafi.cloud/venafi-agent/venafi-agent:latest\n\n  # Check naming works with nameOverride\n  - it: Deployment name is set when nameOverride is used\n    set:\n      nameOverride: example\n    template: deployment.yaml\n    asserts:\n      - isKind:\n          of: Deployment\n      - matchRegex:\n          path: metadata.name\n          pattern: ^example-RELEASE-NAME$\n\n  # Check similar with fullnameOverride\n  - it: Deployment name is set when fullnameOverride is used\n    set:\n      config.clientId: \"00000000-0000-0000-0000-000000000000\"\n      fullnameOverride: example\n    template: deployment.yaml\n    asserts:\n      - isKind:\n          of: Deployment\n      - equal:\n          path: metadata.name\n          value: example\n\n  # Checking extraArgs are passed\n  - it: Extra Args passed in a valid format when supplied\n    set:\n      config.clientId: \"00000000-0000-0000-0000-000000000000\"\n      extraArgs: [\"--strict\", \"--one-shot\"]\n    template: deployment.yaml\n    asserts:\n      - isKind:\n          of: Deployment\n      - contains:\n          path: spec.template.spec.containers[0].args\n          content: --strict\n      - contains:\n          path: spec.template.spec.containers[0].args\n          content: --one-shot\n\n  # Check command is present when configured\n  - it: Command passes to deployment manifest\n    set:\n      config.clientId: \"00000000-0000-0000-0000-000000000000\"\n      command: [\"notpreflight\"]\n    template: deployment.yaml\n    asserts:\n      - isKind:\n          of: Deployment\n      - contains:\n          path: spec.template.spec.containers[0].command\n          content: notpreflight\n\n  # Check the volumes and volumeMounts works correctly\n  - it: Volumes and VolumeMounts added correctly\n    values:\n      - ./values/custom-volumes.yaml\n    asserts:\n      - isKind:\n          of: Deployment\n      - equal:\n          # In template this comes after credentials and agent config volumeMounts\n          path: spec.template.spec.containers[0].volumeMounts[?(@.name == \"cabundle\")]\n          value:\n            name: cabundle\n            mountPath: /etc/ssl/certs/ca-certificates.crt\n            subPath: ca-certificates.crt\n            readOnly: true\n      - equal:\n          path: spec.template.spec.volumes[?(@.name == \"cabundle\")].configMap\n          value:\n            name: cabundle\n            optional: false\n            defaultMode: 0644\n            items:\n              - key: cabundle\n                path: ca-certificates.crt\n\n  # Check proxy settings are additive not overriding and set to correct values.\n  # Values from our documentation: https://docs.cyberark.com/mis-saas/vaas/k8s-components/c-vcp-network-requirements/#modifying-network-settings-for-kubernetes\n  - it: All environment variables present when all proxy settings are supplied\n    set:\n      http_proxy: \"http://<proxy server>:<port>\"\n      https_proxy: \"https://<proxy server>:<port>\"\n      no_proxy: \"127.0.0.1,localhost,kubernetes.default.svc,kubernetes.default.svc.cluster.local\"\n    template: deployment.yaml\n    asserts:\n      - isKind:\n          of: Deployment\n      - lengthEqual :\n          path: spec.template.spec.containers[0].env\n          count: 7\n      - equal:\n          path: spec.template.spec.containers[0].env[?(@.name == \"NO_PROXY\")].value\n          value: \"127.0.0.1,localhost,kubernetes.default.svc,kubernetes.default.svc.cluster.local\"\n      - equal:\n          path: spec.template.spec.containers[0].env[?(@.name == \"HTTPS_PROXY\")].value\n          value: \"https://<proxy server>:<port>\"\n      - equal:\n          path: spec.template.spec.containers[0].env[?(@.name == \"HTTP_PROXY\")].value\n          value: \"http://<proxy server>:<port>\"\n\n  # Check no proxy settings are set when no proxy settings are provided\n  - it: Only default environment variables are set when no proxy settings are provided\n    template: deployment.yaml\n    asserts:\n      - isKind:\n          of: Deployment\n      - lengthEqual :\n          path: spec.template.spec.containers[0].env\n          count: 4\n"
  },
  {
    "path": "deploy/charts/venafi-kubernetes-agent/tests/values/custom-volumes.yaml",
    "content": "volumes:\n  - name: cabundle\n    configMap:\n      name: cabundle\n      optional: false\n      defaultMode: 0644\n      items:\n        - key: cabundle\n          path: ca-certificates.crt\n\nvolumeMounts:\n  - name: cabundle\n    mountPath: /etc/ssl/certs/ca-certificates.crt\n    subPath: ca-certificates.crt\n    readOnly: true\n"
  },
  {
    "path": "deploy/charts/venafi-kubernetes-agent/values.linter.exceptions",
    "content": ""
  },
  {
    "path": "deploy/charts/venafi-kubernetes-agent/values.schema.json",
    "content": "{\n  \"$defs\": {\n    \"helm-values\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"affinity\": {\n          \"$ref\": \"#/$defs/helm-values.affinity\"\n        },\n        \"authentication\": {\n          \"$ref\": \"#/$defs/helm-values.authentication\"\n        },\n        \"command\": {\n          \"$ref\": \"#/$defs/helm-values.command\"\n        },\n        \"config\": {\n          \"$ref\": \"#/$defs/helm-values.config\"\n        },\n        \"crds\": {\n          \"$ref\": \"#/$defs/helm-values.crds\"\n        },\n        \"extraArgs\": {\n          \"$ref\": \"#/$defs/helm-values.extraArgs\"\n        },\n        \"fullnameOverride\": {\n          \"$ref\": \"#/$defs/helm-values.fullnameOverride\"\n        },\n        \"global\": {\n          \"$ref\": \"#/$defs/helm-values.global\"\n        },\n        \"http_proxy\": {\n          \"$ref\": \"#/$defs/helm-values.http_proxy\"\n        },\n        \"https_proxy\": {\n          \"$ref\": \"#/$defs/helm-values.https_proxy\"\n        },\n        \"image\": {\n          \"$ref\": \"#/$defs/helm-values.image\"\n        },\n        \"imageNamespace\": {\n          \"$ref\": \"#/$defs/helm-values.imageNamespace\"\n        },\n        \"imagePullSecrets\": {\n          \"$ref\": \"#/$defs/helm-values.imagePullSecrets\"\n        },\n        \"imageRegistry\": {\n          \"$ref\": \"#/$defs/helm-values.imageRegistry\"\n        },\n        \"metrics\": {\n          \"$ref\": \"#/$defs/helm-values.metrics\"\n        },\n        \"nameOverride\": {\n          \"$ref\": \"#/$defs/helm-values.nameOverride\"\n        },\n        \"no_proxy\": {\n          \"$ref\": \"#/$defs/helm-values.no_proxy\"\n        },\n        \"nodeSelector\": {\n          \"$ref\": \"#/$defs/helm-values.nodeSelector\"\n        },\n        \"podAnnotations\": {\n          \"$ref\": \"#/$defs/helm-values.podAnnotations\"\n        },\n        \"podDisruptionBudget\": {\n          \"$ref\": \"#/$defs/helm-values.podDisruptionBudget\"\n        },\n        \"podSecurityContext\": {\n          \"$ref\": \"#/$defs/helm-values.podSecurityContext\"\n        },\n        \"replicaCount\": {\n          \"$ref\": \"#/$defs/helm-values.replicaCount\"\n        },\n        \"resources\": {\n          \"$ref\": \"#/$defs/helm-values.resources\"\n        },\n        \"securityContext\": {\n          \"$ref\": \"#/$defs/helm-values.securityContext\"\n        },\n        \"serviceAccount\": {\n          \"$ref\": \"#/$defs/helm-values.serviceAccount\"\n        },\n        \"tolerations\": {\n          \"$ref\": \"#/$defs/helm-values.tolerations\"\n        },\n        \"volumeMounts\": {\n          \"$ref\": \"#/$defs/helm-values.volumeMounts\"\n        },\n        \"volumes\": {\n          \"$ref\": \"#/$defs/helm-values.volumes\"\n        }\n      },\n      \"type\": \"object\"\n    },\n    \"helm-values.affinity\": {\n      \"default\": {},\n      \"description\": \"Embed YAML for Node affinity settings, see\\nhttps://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/.\",\n      \"type\": \"object\"\n    },\n    \"helm-values.authentication\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"secretKey\": {\n          \"$ref\": \"#/$defs/helm-values.authentication.secretKey\"\n        },\n        \"secretName\": {\n          \"$ref\": \"#/$defs/helm-values.authentication.secretName\"\n        },\n        \"venafiConnection\": {\n          \"$ref\": \"#/$defs/helm-values.authentication.venafiConnection\"\n        }\n      },\n      \"type\": \"object\"\n    },\n    \"helm-values.authentication.secretKey\": {\n      \"default\": \"privatekey.pem\",\n      \"description\": \"Key name in the referenced secret\",\n      \"type\": \"string\"\n    },\n    \"helm-values.authentication.secretName\": {\n      \"default\": \"agent-credentials\",\n      \"description\": \"Name of the secret containing the private key\",\n      \"type\": \"string\"\n    },\n    \"helm-values.authentication.venafiConnection\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"enabled\": {\n          \"$ref\": \"#/$defs/helm-values.authentication.venafiConnection.enabled\"\n        },\n        \"name\": {\n          \"$ref\": \"#/$defs/helm-values.authentication.venafiConnection.name\"\n        },\n        \"namespace\": {\n          \"$ref\": \"#/$defs/helm-values.authentication.venafiConnection.namespace\"\n        }\n      },\n      \"type\": \"object\"\n    },\n    \"helm-values.authentication.venafiConnection.enabled\": {\n      \"default\": false,\n      \"description\": \"When set to true, the Discovery Agent will authenticate to CyberArk Certificate Manager using the configuration in a VenafiConnection resource. Use `venafiConnection.enabled=true` for [secretless authentication](https://docs.cyberark.com/mis-saas/vaas/k8s-components/t-install-tlspk-agent/). When set to true, the `authentication.secret` values will be ignored and the. Secret with `authentication.secretName` will _not_ be mounted into the\\nDiscovery Agent Pod.\",\n      \"type\": \"boolean\"\n    },\n    \"helm-values.authentication.venafiConnection.name\": {\n      \"default\": \"venafi-components\",\n      \"description\": \"The name of a VenafiConnection resource which contains the configuration for authenticating to Venafi.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.authentication.venafiConnection.namespace\": {\n      \"default\": \"venafi\",\n      \"description\": \"The namespace of a VenafiConnection resource which contains the configuration for authenticating to Venafi.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.command\": {\n      \"default\": [],\n      \"description\": \"Specify the command to run overriding default binary.\",\n      \"items\": {},\n      \"type\": \"array\"\n    },\n    \"helm-values.config\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"clientId\": {\n          \"$ref\": \"#/$defs/helm-values.config.clientId\"\n        },\n        \"clusterDescription\": {\n          \"$ref\": \"#/$defs/helm-values.config.clusterDescription\"\n        },\n        \"clusterName\": {\n          \"$ref\": \"#/$defs/helm-values.config.clusterName\"\n        },\n        \"configmap\": {\n          \"$ref\": \"#/$defs/helm-values.config.configmap\"\n        },\n        \"excludeAnnotationKeysRegex\": {\n          \"$ref\": \"#/$defs/helm-values.config.excludeAnnotationKeysRegex\"\n        },\n        \"excludeLabelKeysRegex\": {\n          \"$ref\": \"#/$defs/helm-values.config.excludeLabelKeysRegex\"\n        },\n        \"ignoredSecretTypes\": {\n          \"$ref\": \"#/$defs/helm-values.config.ignoredSecretTypes\"\n        },\n        \"period\": {\n          \"$ref\": \"#/$defs/helm-values.config.period\"\n        },\n        \"server\": {\n          \"$ref\": \"#/$defs/helm-values.config.server\"\n        }\n      },\n      \"type\": \"object\"\n    },\n    \"helm-values.config.clientId\": {\n      \"default\": \"\",\n      \"description\": \"The client-id to be used for authenticating with the Venafi Control. Plane. Only useful when using a Key Pair Service Account in the Venafi. Control Plane. You can obtain the cliend ID by creating a Key Pair Service\\nAccount in the CyberArk Certificate Manager.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.config.clusterDescription\": {\n      \"default\": \"\",\n      \"description\": \"Description for the cluster resource if it needs to be created in Venafi\\nControl Plane.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.config.clusterName\": {\n      \"default\": \"\",\n      \"description\": \"Name for the cluster resource if it needs to be created in Venafi Control\\nPlane.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.config.configmap\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"key\": {\n          \"$ref\": \"#/$defs/helm-values.config.configmap.key\"\n        },\n        \"name\": {\n          \"$ref\": \"#/$defs/helm-values.config.configmap.name\"\n        }\n      },\n      \"type\": \"object\"\n    },\n    \"helm-values.config.configmap.key\": {},\n    \"helm-values.config.configmap.name\": {},\n    \"helm-values.config.excludeAnnotationKeysRegex\": {\n      \"default\": [],\n      \"description\": \"You can configure Discovery Agent to exclude some annotations or labels from being pushed to the CyberArk Certificate Manager. All Kubernetes objects are affected. The objects are still pushed, but the specified annotations and labels are removed before being sent to the CyberArk Certificate Manager.\\n\\nDots is the only character that needs to be escaped in the regex. Use either double quotes with escaped single quotes or unquoted strings for the regex to avoid YAML parsing issues with `\\\\.`.\\n\\nExample: excludeAnnotationKeysRegex: ['^kapp\\\\.k14s\\\\.io/original.*']\",\n      \"items\": {},\n      \"type\": \"array\"\n    },\n    \"helm-values.config.excludeLabelKeysRegex\": {\n      \"default\": [],\n      \"items\": {},\n      \"type\": \"array\"\n    },\n    \"helm-values.config.ignoredSecretTypes\": {\n      \"items\": {\n        \"$ref\": \"#/$defs/helm-values.config.ignoredSecretTypes[0]\"\n      },\n      \"type\": \"array\"\n    },\n    \"helm-values.config.ignoredSecretTypes[0]\": {\n      \"default\": \"kubernetes.io/service-account-token\",\n      \"type\": \"string\"\n    },\n    \"helm-values.config.ignoredSecretTypes[1]\": {\n      \"default\": \"kubernetes.io/dockercfg\",\n      \"type\": \"string\"\n    },\n    \"helm-values.config.ignoredSecretTypes[2]\": {\n      \"default\": \"kubernetes.io/dockerconfigjson\",\n      \"type\": \"string\"\n    },\n    \"helm-values.config.ignoredSecretTypes[3]\": {\n      \"default\": \"kubernetes.io/basic-auth\",\n      \"type\": \"string\"\n    },\n    \"helm-values.config.ignoredSecretTypes[4]\": {\n      \"default\": \"kubernetes.io/ssh-auth\",\n      \"type\": \"string\"\n    },\n    \"helm-values.config.ignoredSecretTypes[5]\": {\n      \"default\": \"bootstrap.kubernetes.io/token\",\n      \"type\": \"string\"\n    },\n    \"helm-values.config.ignoredSecretTypes[6]\": {\n      \"default\": \"helm.sh/release.v1\",\n      \"type\": \"string\"\n    },\n    \"helm-values.config.period\": {\n      \"default\": \"0h1m0s\",\n      \"description\": \"Send data back to the platform every minute unless changed.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.config.server\": {\n      \"default\": \"https://api.venafi.cloud/\",\n      \"description\": \"API URL of the CyberArk Certificate Manager API. For EU tenants, set this value to https://api.venafi.eu/. If you are using the VenafiConnection authentication method, you must set the API URL using the field `spec.vcp.url` on the\\nVenafiConnection resource instead.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.crds\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"forceRemoveValidationAnnotations\": {\n          \"$ref\": \"#/$defs/helm-values.crds.forceRemoveValidationAnnotations\"\n        },\n        \"keep\": {\n          \"$ref\": \"#/$defs/helm-values.crds.keep\"\n        },\n        \"venafiConnection\": {\n          \"$ref\": \"#/$defs/helm-values.crds.venafiConnection\"\n        }\n      },\n      \"type\": \"object\"\n    },\n    \"helm-values.crds.forceRemoveValidationAnnotations\": {\n      \"default\": false,\n      \"description\": \"The 'x-kubernetes-validations' annotation is not supported in Kubernetes 1.22 and below. This annotation is used by CEL, which is a feature introduced in Kubernetes 1.25 that improves how validation is performed. This option allows to force the 'x-kubernetes-validations' annotation to be excluded, even on Kubernetes 1.25+ clusters.\",\n      \"type\": \"boolean\"\n    },\n    \"helm-values.crds.keep\": {\n      \"default\": false,\n      \"description\": \"This option makes it so that the \\\"helm.sh/resource-policy\\\": keep annotation is added to the CRD. This will prevent Helm from uninstalling the CRD when the Helm release is uninstalled.\",\n      \"type\": \"boolean\"\n    },\n    \"helm-values.crds.venafiConnection\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"include\": {\n          \"$ref\": \"#/$defs/helm-values.crds.venafiConnection.include\"\n        }\n      },\n      \"type\": \"object\"\n    },\n    \"helm-values.crds.venafiConnection.include\": {\n      \"default\": false,\n      \"description\": \"When set to false, the rendered output does not contain the. VenafiConnection CRDs and RBAC. This is useful for when the. Venafi Connection resources are already installed separately.\",\n      \"type\": \"boolean\"\n    },\n    \"helm-values.extraArgs\": {\n      \"default\": [],\n      \"description\": \"Specify additional arguments to pass to the agent binary. For example, to enable JSON logging use `--logging-format`, or to increase the logging verbosity use `--log-level`.\\nThe log levels are: 0=Info, 1=Debug, 2=Trace.\\nUse 6-9 for increasingly verbose HTTP request logging.\\nThe default log level is 0.\\n\\nExample:\\nextraArgs:\\n- --logging-format=json\\n- --log-level=6 # To enable HTTP request logging\",\n      \"items\": {},\n      \"type\": \"array\"\n    },\n    \"helm-values.fullnameOverride\": {\n      \"default\": \"\",\n      \"description\": \"Helm default setting, use this to shorten the full install name.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.global\": {\n      \"description\": \"Global values shared across all (sub)charts\"\n    },\n    \"helm-values.http_proxy\": {\n      \"description\": \"Configures the HTTP_PROXY environment variable where a HTTP proxy is required.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.https_proxy\": {\n      \"description\": \"Configures the HTTPS_PROXY environment variable where a HTTP proxy is required.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.image\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"digest\": {\n          \"$ref\": \"#/$defs/helm-values.image.digest\"\n        },\n        \"name\": {\n          \"$ref\": \"#/$defs/helm-values.image.name\"\n        },\n        \"pullPolicy\": {\n          \"$ref\": \"#/$defs/helm-values.image.pullPolicy\"\n        },\n        \"registry\": {\n          \"$ref\": \"#/$defs/helm-values.image.registry\"\n        },\n        \"repository\": {\n          \"$ref\": \"#/$defs/helm-values.image.repository\"\n        },\n        \"tag\": {\n          \"$ref\": \"#/$defs/helm-values.image.tag\"\n        }\n      },\n      \"type\": \"object\"\n    },\n    \"helm-values.image.digest\": {\n      \"default\": \"\",\n      \"description\": \"Override the image digest to deploy by setting this variable. If set together with `image.tag`, the rendered image will include both tag and digest.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.image.name\": {\n      \"default\": \"venafi-agent\",\n      \"description\": \"The image name for the Discovery Agent.\\nThis is used (together with `imageRegistry` and `imageNamespace`) to construct the full image reference.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.image.pullPolicy\": {\n      \"default\": \"IfNotPresent\",\n      \"description\": \"Kubernetes imagePullPolicy on Deployment.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.image.registry\": {\n      \"description\": \"Deprecated: per-component registry prefix.\\n\\nIf set, this value is *prepended* to the image repository that the chart would otherwise render. This applies both when `image.repository` is set and when the repository is computed from\\n`imageRegistry` + `imageNamespace` + `image.name`.\\n\\nThis can produce \\\"double registry\\\" style references such as\\n`legacy.example.io/registry.venafi.cloud/venafi-agent/...`. Prefer using the global\\n`imageRegistry`/`imageNamespace` values.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.image.repository\": {\n      \"default\": \"\",\n      \"description\": \"Full repository override (takes precedence over `imageRegistry`, `imageNamespace`, and `image.name`). Example: registry.venafi.cloud/venafi-agent/venafi-agent\",\n      \"type\": \"string\"\n    },\n    \"helm-values.image.tag\": {\n      \"default\": \"\",\n      \"description\": \"Override the image tag to deploy by setting this variable. If no value is set, the chart's appVersion is used.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.imageNamespace\": {\n      \"default\": \"venafi-agent\",\n      \"description\": \"The repository namespace used for venafi-kubernetes-agent images by default.\\nExamples:\\n- venafi-agent\\n- custom-namespace\",\n      \"type\": \"string\"\n    },\n    \"helm-values.imagePullSecrets\": {\n      \"default\": [],\n      \"description\": \"Specify image pull credentials if using a private registry. Example:\\n - name: my-pull-secret\",\n      \"items\": {},\n      \"type\": \"array\"\n    },\n    \"helm-values.imageRegistry\": {\n      \"default\": \"registry.venafi.cloud\",\n      \"description\": \"The container registry used for venafi-kubernetes-agent images by default. This can include path prefixes (e.g. \\\"artifactory.example.com/docker\\\").\",\n      \"type\": \"string\"\n    },\n    \"helm-values.metrics\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"enabled\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.enabled\"\n        },\n        \"podmonitor\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor\"\n        }\n      },\n      \"type\": \"object\"\n    },\n    \"helm-values.metrics.enabled\": {\n      \"default\": true,\n      \"description\": \"Enable the metrics server.\\nIf false, the metrics server will be disabled and the other metrics fields below will be ignored.\",\n      \"type\": \"boolean\"\n    },\n    \"helm-values.metrics.podmonitor\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"annotations\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor.annotations\"\n        },\n        \"enabled\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor.enabled\"\n        },\n        \"endpointAdditionalProperties\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor.endpointAdditionalProperties\"\n        },\n        \"honorLabels\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor.honorLabels\"\n        },\n        \"interval\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor.interval\"\n        },\n        \"labels\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor.labels\"\n        },\n        \"namespace\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor.namespace\"\n        },\n        \"prometheusInstance\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor.prometheusInstance\"\n        },\n        \"scrapeTimeout\": {\n          \"$ref\": \"#/$defs/helm-values.metrics.podmonitor.scrapeTimeout\"\n        }\n      },\n      \"type\": \"object\"\n    },\n    \"helm-values.metrics.podmonitor.annotations\": {\n      \"default\": {},\n      \"description\": \"Additional annotations to add to the PodMonitor.\",\n      \"type\": \"object\"\n    },\n    \"helm-values.metrics.podmonitor.enabled\": {\n      \"default\": false,\n      \"description\": \"Create a PodMonitor to add the metrics to Prometheus, if you are using Prometheus Operator. See https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor\",\n      \"type\": \"boolean\"\n    },\n    \"helm-values.metrics.podmonitor.endpointAdditionalProperties\": {\n      \"default\": {},\n      \"description\": \"EndpointAdditionalProperties allows setting additional properties on the endpoint such as relabelings, metricRelabelings etc.\\n\\nFor example:\\nendpointAdditionalProperties:\\n relabelings:\\n - action: replace\\n   sourceLabels:\\n   - __meta_kubernetes_pod_node_name\\n   targetLabel: instance\",\n      \"type\": \"object\"\n    },\n    \"helm-values.metrics.podmonitor.honorLabels\": {\n      \"default\": false,\n      \"description\": \"Keep labels from scraped data, overriding server-side labels.\",\n      \"type\": \"boolean\"\n    },\n    \"helm-values.metrics.podmonitor.interval\": {\n      \"default\": \"60s\",\n      \"description\": \"The interval to scrape metrics.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.metrics.podmonitor.labels\": {\n      \"default\": {},\n      \"description\": \"Additional labels to add to the PodMonitor.\",\n      \"type\": \"object\"\n    },\n    \"helm-values.metrics.podmonitor.namespace\": {\n      \"description\": \"The namespace that the pod monitor should live in. Defaults to the venafi-kubernetes-agent namespace.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.metrics.podmonitor.prometheusInstance\": {\n      \"default\": \"default\",\n      \"description\": \"Specifies the `prometheus` label on the created PodMonitor. This is used when different Prometheus instances have label selectors matching different PodMonitors.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.metrics.podmonitor.scrapeTimeout\": {\n      \"default\": \"30s\",\n      \"description\": \"The timeout before a metrics scrape fails.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.nameOverride\": {\n      \"default\": \"\",\n      \"description\": \"Helm default setting to override release name, usually leave blank.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.no_proxy\": {\n      \"description\": \"Configures the NO_PROXY environment variable where a HTTP proxy is required, but certain domains should be excluded.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.nodeSelector\": {\n      \"default\": {},\n      \"description\": \"Embed YAML for nodeSelector settings, see\\nhttps://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/\",\n      \"type\": \"object\"\n    },\n    \"helm-values.podAnnotations\": {\n      \"default\": {},\n      \"description\": \"Additional YAML annotations to add the the pod.\",\n      \"type\": \"object\"\n    },\n    \"helm-values.podDisruptionBudget\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"enabled\": {\n          \"$ref\": \"#/$defs/helm-values.podDisruptionBudget.enabled\"\n        },\n        \"maxUnavailable\": {\n          \"$ref\": \"#/$defs/helm-values.podDisruptionBudget.maxUnavailable\"\n        },\n        \"minAvailable\": {\n          \"$ref\": \"#/$defs/helm-values.podDisruptionBudget.minAvailable\"\n        }\n      },\n      \"type\": \"object\"\n    },\n    \"helm-values.podDisruptionBudget.enabled\": {\n      \"default\": false,\n      \"description\": \"Enable or disable the PodDisruptionBudget resource, which helps prevent downtime during voluntary disruptions such as during a Node upgrade.\",\n      \"type\": \"boolean\"\n    },\n    \"helm-values.podDisruptionBudget.maxUnavailable\": {\n      \"description\": \"Configure the maximum unavailable pods for disruptions. Can either be set to an integer (e.g. 1) or a percentage value (e.g. 25%).\\nCannot be used if `minAvailable` is set.\",\n      \"type\": \"number\"\n    },\n    \"helm-values.podDisruptionBudget.minAvailable\": {\n      \"description\": \"Configure the minimum available pods for disruptions. Can either be set to an integer (e.g. 1) or a percentage value (e.g. 25%).\\nCannot be used if `maxUnavailable` is set.\",\n      \"type\": \"number\"\n    },\n    \"helm-values.podSecurityContext\": {\n      \"default\": {},\n      \"description\": \"Optional Pod (all containers) `SecurityContext` options, see https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod.\\n\\nExample:\\n\\n podSecurityContext\\nrunAsUser: 1000\\nrunAsGroup: 3000\\nfsGroup: 2000\",\n      \"type\": \"object\"\n    },\n    \"helm-values.replicaCount\": {\n      \"default\": 1,\n      \"description\": \"default replicas, do not scale up\",\n      \"type\": \"number\"\n    },\n    \"helm-values.resources\": {\n      \"default\": {\n        \"limits\": {\n          \"memory\": \"500Mi\"\n        },\n        \"requests\": {\n          \"cpu\": \"200m\",\n          \"memory\": \"200Mi\"\n        }\n      },\n      \"description\": \"Set resource requests and limits for the pod.\\n\\nRead [Venafi Kubernetes components deployment best practices](https://docs.cyberark.com/mis-saas/vaas/k8s-components/c-k8s-components-best-practice/#scaling) to learn how to choose suitable CPU and memory resource requests and limits.\",\n      \"type\": \"object\"\n    },\n    \"helm-values.securityContext\": {\n      \"default\": {\n        \"allowPrivilegeEscalation\": false,\n        \"capabilities\": {\n          \"drop\": [\n            \"ALL\"\n          ]\n        },\n        \"readOnlyRootFilesystem\": true,\n        \"runAsNonRoot\": true,\n        \"seccompProfile\": {\n          \"type\": \"RuntimeDefault\"\n        }\n      },\n      \"description\": \"Add Container specific SecurityContext settings to the container. Takes precedence over `podSecurityContext` when set. See https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container\",\n      \"type\": \"object\"\n    },\n    \"helm-values.serviceAccount\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"annotations\": {\n          \"$ref\": \"#/$defs/helm-values.serviceAccount.annotations\"\n        },\n        \"create\": {\n          \"$ref\": \"#/$defs/helm-values.serviceAccount.create\"\n        },\n        \"name\": {\n          \"$ref\": \"#/$defs/helm-values.serviceAccount.name\"\n        }\n      },\n      \"type\": \"object\"\n    },\n    \"helm-values.serviceAccount.annotations\": {\n      \"default\": {},\n      \"description\": \"Annotations YAML to add to the service account.\",\n      \"type\": \"object\"\n    },\n    \"helm-values.serviceAccount.create\": {\n      \"default\": true,\n      \"description\": \"Specifies whether a service account should be created.\",\n      \"type\": \"boolean\"\n    },\n    \"helm-values.serviceAccount.name\": {\n      \"default\": \"\",\n      \"description\": \"The name of the service account to use. If blank and `serviceAccount.create` is true, a name is generated using the fullname template of the release.\",\n      \"type\": \"string\"\n    },\n    \"helm-values.tolerations\": {\n      \"default\": [],\n      \"description\": \"Embed YAML for toleration settings, see\\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/\",\n      \"items\": {},\n      \"type\": \"array\"\n    },\n    \"helm-values.volumeMounts\": {\n      \"default\": [],\n      \"description\": \"Additional volume mounts to add to the Discovery Agent container. This is useful for mounting a custom CA bundle. Any PEM certificate mounted under /etc/ssl/certs will be loaded by the Discovery Agent. For\\nexample:\\n\\nvolumeMounts:\\n  - name: cabundle\\n    mountPath: /etc/ssl/certs/cabundle\\n    subPath: cabundle\\n    readOnly: true\",\n      \"items\": {},\n      \"type\": \"array\"\n    },\n    \"helm-values.volumes\": {\n      \"default\": [],\n      \"description\": \"Additional volumes to add to the Discovery Agent container. This is useful for mounting a custom CA bundle. For example:\\nvolumes:\\n  - name: cabundle\\n    configMap:\\n      name: cabundle\\n      optional: false\\n      defaultMode: 0644\\nIn order to create the ConfigMap, you can use the following command:\\n\\n    kubectl create configmap cabundle \\\\\\n      --from-file=cabundle=./your/custom/ca/bundle.pem\",\n      \"items\": {},\n      \"type\": \"array\"\n    }\n  },\n  \"$ref\": \"#/$defs/helm-values\",\n  \"$schema\": \"http://json-schema.org/draft-07/schema#\"\n}\n"
  },
  {
    "path": "deploy/charts/venafi-kubernetes-agent/values.yaml",
    "content": "# Default values for jetstack-agent.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\nmetrics:\n  # Enable the metrics server.\n  # If false, the metrics server will be disabled and the other metrics fields below will be ignored.\n  enabled: true\n  podmonitor:\n    # Create a PodMonitor to add the metrics to Prometheus, if you are using Prometheus Operator.\n    # See https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor\n    enabled: false\n\n    # The namespace that the pod monitor should live in.\n    # Defaults to the venafi-kubernetes-agent namespace.\n    # +docs:property\n    # namespace: venafi\n\n    # Specifies the `prometheus` label on the created PodMonitor.\n    # This is used when different Prometheus instances have label selectors\n    # matching different PodMonitors.\n    prometheusInstance: default\n\n    # The interval to scrape metrics.\n    interval: 60s\n\n    # The timeout before a metrics scrape fails.\n    scrapeTimeout: 30s\n\n    # Additional labels to add to the PodMonitor.\n    labels: {}\n\n    # Additional annotations to add to the PodMonitor.\n    annotations: {}\n\n    # Keep labels from scraped data, overriding server-side labels.\n    honorLabels: false\n\n    # EndpointAdditionalProperties allows setting additional properties on the endpoint such as relabelings, metricRelabelings etc.\n    #\n    # For example:\n    #  endpointAdditionalProperties:\n    #   relabelings:\n    #   - action: replace\n    #     sourceLabels:\n    #     - __meta_kubernetes_pod_node_name\n    #     targetLabel: instance\n    endpointAdditionalProperties: {}\n\n# default replicas, do not scale up\nreplicaCount: 1\n\n# The container registry used for venafi-kubernetes-agent images by default.\n# This can include path prefixes (e.g. \"artifactory.example.com/docker\").\n# +docs:property\nimageRegistry: registry.venafi.cloud\n\n# The repository namespace used for venafi-kubernetes-agent images by default.\n# Examples:\n# - venafi-agent\n# - custom-namespace\n# +docs:property\nimageNamespace: venafi-agent\n\nimage:\n  # Deprecated: per-component registry prefix.\n  #\n  # If set, this value is *prepended* to the image repository that the chart would otherwise render.\n  # This applies both when `image.repository` is set and when the repository is computed from\n  # `imageRegistry` + `imageNamespace` + `image.name`.\n  #\n  # This can produce \"double registry\" style references such as\n  # `legacy.example.io/registry.venafi.cloud/venafi-agent/...`. Prefer using the global\n  # `imageRegistry`/`imageNamespace` values.\n  # +docs:property\n  # registry: registry.venafi.cloud\n\n  # Full repository override (takes precedence over `imageRegistry`, `imageNamespace`,\n  # and `image.name`).\n  # Example: registry.venafi.cloud/venafi-agent/venafi-agent\n  # +docs:property\n  repository: \"\"\n\n  # The image name for the Discovery Agent.\n  # This is used (together with `imageRegistry` and `imageNamespace`) to construct the full\n  # image reference.\n  # +docs:property\n  name: venafi-agent\n\n  # Kubernetes imagePullPolicy on Deployment.\n  pullPolicy: IfNotPresent\n\n  # Override the image tag to deploy by setting this variable.\n  # If no value is set, the chart's appVersion is used.\n  tag: \"\"\n\n  # Override the image digest to deploy by setting this variable.\n  # If set together with `image.tag`, the rendered image will include both tag and digest.\n  digest: \"\"\n\n# Specify image pull credentials if using a private registry. Example:\n#  - name: my-pull-secret\nimagePullSecrets: []\n\n# Helm default setting to override release name, usually leave blank.\nnameOverride: \"\"\n\n# Helm default setting, use this to shorten the full install name.\nfullnameOverride: \"\"\n\nserviceAccount:\n  # Specifies whether a service account should be created.\n  create: true\n  # Annotations YAML to add to the service account.\n  annotations: {}\n  # The name of the service account to use. If blank and `serviceAccount.create`\n  # is true, a name is generated using the fullname template of the release.\n  name: \"\"\n\n# Additional YAML annotations to add the the pod.\npodAnnotations: {}\n\n# Optional Pod (all containers) `SecurityContext` options, see\n# https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod.\n#\n# Example:\n#\n#  podSecurityContext\n#    runAsUser: 1000\n#    runAsGroup: 3000\n#    fsGroup: 2000\npodSecurityContext: {}\n\n# Use these variables to configure the HTTP_PROXY environment variables.\n\n# Configures the HTTP_PROXY environment variable where a HTTP proxy is required.\n# +docs:property\n# http_proxy: \"http://proxy:8080\"\n\n# Configures the HTTPS_PROXY environment variable where a HTTP proxy is required.\n# +docs:property\n# https_proxy: \"https://proxy:8080\"\n\n# Configures the NO_PROXY environment variable where a HTTP proxy is required,\n# but certain domains should be excluded.\n# +docs:property\n# no_proxy: 127.0.0.1,localhost\n\n# Add Container specific SecurityContext settings to the container. Takes\n# precedence over `podSecurityContext` when set. See\n# https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container\n# +docs:property\nsecurityContext:\n  capabilities:\n    drop:\n      - ALL\n  readOnlyRootFilesystem: true\n  runAsNonRoot: true\n  allowPrivilegeEscalation: false\n  seccompProfile: { type: RuntimeDefault }\n\n# Set resource requests and limits for the pod.\n#\n# Read [Venafi Kubernetes components deployment best\n# practices](https://docs.cyberark.com/mis-saas/vaas/k8s-components/c-k8s-components-best-practice/#scaling)\n# to learn how to choose suitable CPU and memory resource requests and limits.\n# +docs:property\nresources:\n  requests:\n    memory: 200Mi\n    cpu: 200m\n  limits:\n    memory: 500Mi\n\n# Embed YAML for nodeSelector settings, see\n# https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/\nnodeSelector: {}\n\n# Embed YAML for toleration settings, see\n# https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/\ntolerations: []\n\n# Embed YAML for Node affinity settings, see\n# https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/.\naffinity: {}\n\n# Specify the command to run overriding default binary.\ncommand: []\n\n# Specify additional arguments to pass to the agent binary.\n# For example, to enable JSON logging use `--logging-format`, or\n# to increase the logging verbosity use `--log-level`.\n# The log levels are: 0=Info, 1=Debug, 2=Trace.\n# Use 6-9 for increasingly verbose HTTP request logging.\n# The default log level is 0.\n#\n# Example:\n#  extraArgs:\n#  - --logging-format=json\n#  - --log-level=6 # To enable HTTP request logging\nextraArgs: []\n\n# Additional volumes to add to the Discovery Agent container. This is\n# useful for mounting a custom CA bundle. For example:\n#\n#     volumes:\n#       - name: cabundle\n#         configMap:\n#           name: cabundle\n#           optional: false\n#           defaultMode: 0644\n#\n# In order to create the ConfigMap, you can use the following command:\n#\n#     kubectl create configmap cabundle \\\n#       --from-file=cabundle=./your/custom/ca/bundle.pem\nvolumes: []\n\n# Additional volume mounts to add to the Discovery Agent container.\n# This is useful for mounting a custom CA bundle. Any PEM certificate mounted\n# under /etc/ssl/certs will be loaded by the Discovery Agent. For\n# example:\n#\n#     volumeMounts:\n#       - name: cabundle\n#         mountPath: /etc/ssl/certs/cabundle\n#         subPath: cabundle\n#         readOnly: true\nvolumeMounts: []\n\n# Authentication details for the Discovery Agent\nauthentication:\n  # Name of the secret containing the private key\n  secretName: agent-credentials\n  # Key name in the referenced secret\n  secretKey: \"privatekey.pem\"\n\n  # +docs:section=Venafi Connection\n  # Configure VenafiConnection authentication\n  venafiConnection:\n    # When set to true, the Discovery Agent will authenticate to CyberArk Certificate Manager\n    # using the configuration in a VenafiConnection resource.\n    # Use `venafiConnection.enabled=true` for [secretless authentication](https://docs.cyberark.com/mis-saas/vaas/k8s-components/t-install-tlspk-agent/).\n    # When set to true, the `authentication.secret` values will be ignored and the\n    # Secret with `authentication.secretName` will _not_ be mounted into the\n    # Discovery Agent Pod.\n    enabled: false\n    # The name of a VenafiConnection resource which contains the configuration\n    # for authenticating to Venafi.\n    name: venafi-components\n    # The namespace of a VenafiConnection resource which contains the\n    # configuration for authenticating to Venafi.\n    namespace: venafi\n\n# Configuration section for the Discovery Agent itself\nconfig:\n  # API URL of the CyberArk Certificate Manager API. For EU tenants, set this value to\n  # https://api.venafi.eu/. If you are using the VenafiConnection authentication\n  # method, you must set the API URL using the field `spec.vcp.url` on the\n  # VenafiConnection resource instead.\n  server: \"https://api.venafi.cloud/\"\n  # The client-id to be used for authenticating with the Venafi Control\n  # Plane. Only useful when using a Key Pair Service Account in the Venafi\n  # Control Plane. You can obtain the cliend ID by creating a Key Pair Service\n  # Account in the CyberArk Certificate Manager.\n  clientId: \"\"\n  # Send data back to the platform every minute unless changed.\n  period: \"0h1m0s\"\n  # Name for the cluster resource if it needs to be created in Venafi Control\n  # Plane.\n  clusterName: \"\"\n  # Description for the cluster resource if it needs to be created in Venafi\n  # Control Plane.\n  clusterDescription: \"\"\n\n  # Reduce the memory usage of the agent and reduce the load on the Kubernetes\n  # API server by omitting various common Secret types when listing Secrets.\n  # These Secret types will be added to a \"type!=<type>\" field selector in the\n  # agent config.\n  # * https://docs.cyberark.com/mis-saas/vaas/k8s-components/t-cfg-tlspk-agent/#configuration\n  # * https://kubernetes.io/docs/concepts/configuration/secret/#secret-types\n  # * https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/#list-of-supported-fields\n  ignoredSecretTypes:\n    - kubernetes.io/service-account-token\n    - kubernetes.io/dockercfg\n    - kubernetes.io/dockerconfigjson\n    - kubernetes.io/basic-auth\n    - kubernetes.io/ssh-auth\n    - bootstrap.kubernetes.io/token\n    - helm.sh/release.v1\n\n  # You can configure Discovery Agent to exclude some annotations or\n  # labels from being pushed to the CyberArk Certificate Manager. All Kubernetes objects\n  # are affected. The objects are still pushed, but the specified annotations\n  # and labels are removed before being sent to the CyberArk Certificate Manager.\n  #\n  # Dots is the only character that needs to be escaped in the regex. Use either\n  # double quotes with escaped single quotes or unquoted strings for the regex\n  # to avoid YAML parsing issues with `\\.`.\n  #\n  # Example: excludeAnnotationKeysRegex: ['^kapp\\.k14s\\.io/original.*']\n  excludeAnnotationKeysRegex: []\n  excludeLabelKeysRegex: []\n\n  # Specify ConfigMap details to load config from an existing resource.\n  # This should be blank by default unless you have you own config.\n  configmap:\n    name:\n    key:\n\n# Configure a PodDisruptionBudget for the agent's Deployment. If running with multiple\n# replicas, consider setting podDisruptionBudget.enabled to true.\npodDisruptionBudget:\n  # Enable or disable the PodDisruptionBudget resource, which helps prevent downtime\n  # during voluntary disruptions such as during a Node upgrade.\n  enabled: false\n\n  # Configure the minimum available pods for disruptions. Can either be set to\n  # an integer (e.g. 1) or a percentage value (e.g. 25%).\n  # Cannot be used if `maxUnavailable` is set.\n  # +docs:property\n  # minAvailable: 1\n\n  # Configure the maximum unavailable pods for disruptions. Can either be set to\n  # an integer (e.g. 1) or a percentage value (e.g. 25%).\n  # Cannot be used if `minAvailable` is set.\n  # +docs:property\n  # maxUnavailable: 1\n\n# +docs:section=CRDs\n# The CRDs installed by this chart are annotated with \"helm.sh/resource-policy: keep\", this\n# prevents them from being accidentally removed by Helm when this chart is deleted. After\n# deleting the installed chart, the user still has to manually remove the remaining CRDs.\ncrds:\n  # The 'x-kubernetes-validations' annotation is not supported in Kubernetes 1.22 and below.\n  # This annotation is used by CEL, which is a feature introduced in Kubernetes 1.25 that\n  # improves how validation is performed.\n  # This option allows to force the 'x-kubernetes-validations' annotation to be excluded,\n  # even on Kubernetes 1.25+ clusters.\n  forceRemoveValidationAnnotations: false\n\n  # This option makes it so that the \"helm.sh/resource-policy\": keep\n  # annotation is added to the CRD. This will prevent Helm from uninstalling\n  # the CRD when the Helm release is uninstalled.\n  keep: false\n\n  # Optionally include the VenafiConnection CRDs\n  venafiConnection:\n    # When set to false, the rendered output does not contain the\n    # VenafiConnection CRDs and RBAC. This is useful for when the\n    # Venafi Connection resources are already installed separately.\n    include: false\n"
  },
  {
    "path": "docs/datagatherers/k8s-discovery.md",
    "content": "# k8s-discovery\n\nThis datagatherer uses the [DiscoveryClient](https://godoc.org/k8s.io/client-go/discovery#DiscoveryClient)\nto get API server version information.\n\nInclude the following in your agent config:\n\n```\ndata-gatherers:\n- kind: \"k8s-discovery\"\n  name: \"k8s-discovery\"\n```\n\nor specify a kubeconfig file:\n\n```\ndata-gatherers:\n- kind: \"k8s-discovery\"\n  name: \"k8s-discovery\"\n  config:\n    kubeconfig: other_kube_config_path\n```\n"
  },
  {
    "path": "docs/datagatherers/k8s-dynamic.md",
    "content": "# Kubernetes Data Gatherer\n\nThe Kubernetes dynamic data gatherer collects information about resources stored\nin the Kubernetes API.\n\n## Data\n\nThe data gathered depends on your configuration. Resources are selected based on\ntheir Group-Version-Kind identifiers, e.g.:\n\n* Core resources such as `Service`, use: `k8s/services.v1`\n* `Ingress`, use: `k8s/ingresses.v1beta1.networking.k8s.io`\n* Custom resources such as `Certificates`, use:\n  `k8s/certificates.v1alpha2.cert-manager.io`\n\nTo see an example of the data being gathered, using `k8s/services.v1` is\ncomparable to the output from:\n\n```bash\nkubectl get services --all-namespaces -o json\n```\n\n## Configuration\n\nYou can collect different resources using difference Group-Version-Kind as\nbelow:\n\n```yaml\ndata-gatherers:\n# basic usage\n- kind: \"k8s-dynamic\"\n  name: \"k8s/pods\"\n  config:\n    resource-type:\n      resource: pods\n      version: v1\n\n# CRD usage\n- kind: \"k8s-dynamic\"\n  name: \"k8s/certificates.v1alpha2.cert-manager.io\"\n  config:\n    resource-type:\n      group: cert-manager.io\n      version: v1alpha2\n      resource: certificates\n\n# you might event want to gather resources from another cluster\n- kind: \"k8s-dynamic\"\n  name: \"k8s/pods\"\n  config:\n    kubeconfig: other_kube_config_path\n```\n\nThe `kubeconfig` field should point to your Kubernetes config file - this is\ntypically found at `~/.kube/config`. Preflight will use the context that is\nactive in that config file.\n\n## Permissions\n\nThe user or service account used by the Kubernetes config to authenticate with\nthe Kubernetes API must have permission to perform `list` and `get` on the\nresource referenced in the `kind` for that datagatherer.\n\nThere is an example `ClusterRole` and `ClusterRoleBinding` which can be found in\n[`./deployment/kubernetes/base/00-rbac.yaml`](./deployment/kubernetes/base/00-rbac.yaml).\n\n## Secrets\n\nSecrets can be gathered using the following config:\n\n```yaml\n- kind: \"k8s-dynamic\"\n  name: \"k8s/secrets\"\n  config:\n    resource-type:\n      version: v1\n      resource: secrets\n```\n\nBefore Secrets are sent to the Preflight backend, they are redacted so no secret data is transmitted. See [`fieldfilter.go`](./../../pkg/datagatherer/k8s/fieldfilter.go) to see the details of which fields are filtered and which ones are redacted.\n\n> **All resource other than Kubernetes Secrets are sent in full, so make sure that you don't store secret information on arbitrary resources.**\n\n\n## Field Selectors\n\nYou can use [field selectors](https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/#list-of-supported-fields)\nto include or exclude certain resources.\nFor example, you can reduce the memory usage of the agent and reduce the load on the Kubernetes\nAPI server by omitting various common [Secret types](https://kubernetes.io/docs/concepts/configuration/secret/#secret-types)\nwhen listing Secrets.\n\n```yaml\n- kind: \"k8s-dynamic\"\n  name: \"k8s/secrets\"\n  config:\n    resource-type:\n      version: v1\n      resource: secrets\n    field-selectors:\n    - type!=kubernetes.io/service-account-token\n    - type!=kubernetes.io/dockercfg\n    - type!=kubernetes.io/dockerconfigjson\n    - type!=kubernetes.io/basic-auth\n    - type!=kubernetes.io/ssh-auth,\n    - type!=bootstrap.kubernetes.io/token\n    - type!=helm.sh/release.v1\n```\n"
  },
  {
    "path": "docs/datagatherers/local.md",
    "content": "# Local Data Gatherer\n\nThe Local data gatherer is intended to be used for reading data for evaluation\nfrom the local file system. It can also be used for 'stubbing' remote data\nsources when testing other data gatherers.\n\n## Configuration\n\nStubbing another datagatherer for testing:\n\n```yaml\ndata-gatherers:\n- kind: \"gke\"\n  name: \"gke\"\n  config:\n    # fetch from local path instead of GKE\n    data-path: ./examples/data/example.json\n```\n\nLoading other data as 'local':\n\n```yaml\ndata-gatherers:\n- kind: \"local\"\n  name: \"local\"\n  config:\n    data-path: ./examples/data/example.json\n```\n\n## Data\n\nData is gathered from the local file system - whatever is read from the file is\nused.\n\n## Permissions\n\nPermissions to read the local path.\n"
  },
  {
    "path": "examples/cert-manager-agent.yaml",
    "content": "organization_id: \"my-organization\"\ncluster_id: \"my_cluster\"\nschedule: \"* * * *\"\ntoken: xxxx\nendpoint:\n  protocol: https\n  host: \"preflight.jetstack.io\"\n  path: \"/api/v1/datareadings\"\ndata-gatherers:\n- kind: \"k8s-dynamic\"\n  name: \"k8s/secrets.v1\"\n  config:\n    resource-type:\n      version: v1\n      resource: secrets\n- kind: \"k8s-dynamic\"\n  name: \"k8s/certificates.v1.cert-manager.io\"\n  config:\n    resource-type:\n      group: cert-manager.io\n      version: v1\n      resource: certificates\n- kind: \"k8s-dynamic\"\n  name: \"k8s/ingresses.v1.networking.k8s.io\"\n  config:\n    resource-type:\n      group: networking.k8s.io\n      version: v1\n      resource: ingresses\n- kind: \"k8s-dynamic\"\n  name: \"k8s/certificaterequests.v1.cert-manager.io\"\n  config:\n    resource-type:\n      group: cert-manager.io\n      version: v1\n      resource: certificaterequests\n"
  },
  {
    "path": "examples/echo/example.json",
    "content": "{\n  \"sampledata\": 1\n}\n"
  },
  {
    "path": "examples/echo/example2.json",
    "content": "{\n\t\"sampledata\": 1\n}"
  },
  {
    "path": "examples/localfile/config.yaml",
    "content": "# No config is required to run the agent with an input file and an output file.\n"
  },
  {
    "path": "examples/localfile/input.json",
    "content": "[]\n"
  },
  {
    "path": "examples/machinehub/config.yaml",
    "content": "# Not used\n"
  },
  {
    "path": "examples/machinehub/input.json",
    "content": "[\n    {\n        \"data-gatherer\": \"ark/oidc\",\n        \"data\": {\n            \"openid_configuration\": {\n                \"id_token_signing_alg_values_supported\": [\n                    \"RS256\"\n                ],\n                \"issuer\": \"https://kubernetes.default.svc.cluster.local\",\n                \"jwks_uri\": \"https://10.10.1.2:6443/openid/v1/jwks\",\n                \"response_types_supported\": [\n                    \"id_token\"\n                ],\n                \"subject_types_supported\": [\n                    \"public\"\n                ]\n            },\n            \"jwks\": {\n                \"keys\": [\n                    {\n                        \"alg\": \"RS256\",\n                        \"e\": \"AQAB\",\n                        \"kid\": \"C-2916LkMJqepqULK2nqhq6uzVB6So_yyGnqyuor71Q\",\n                        \"kty\": \"RSA\",\n                        \"n\": \"sYh6rDpl5DyzBk8qlnYXo6Sf9WbplnXJv3tPxWTvhCFsVu9G5oWjknkafVDq5UOJrlybJJNjBmUyiEi1wbdnuhceJS7rZ3sRnNp3aNoS0omCR6iHJCOuoboSlcaPuRmYw4oWXlVUXlKyw8PYPVbNCcTLuq9nqf8y33mIqe7XJsf5-Z5P05WbK9Rzj-SJvlZLQ4dSFtIiwqLkm_2fpRLj0d8Af1F6vuztnhhUE2_PDsfIWdl_kJKkrK3B5x7k5tgTyFrNQPzlRBgK9jmK0HskwAFIDaLKb7FUWuUiQjn94rjKCED4iy201YPAoZBKIHFDlFVkQ_S3quwPcRyOS18r7w\",\n                        \"use\": \"sig\"\n                    }\n                ]\n            }\n        }\n    },\n    {\n        \"data-gatherer\": \"ark/discovery\",\n        \"data\": {\n            \"cluster_id\": \"0e069229-d83b-4075-a4c8-95838ff5c437\",\n            \"server_version\": {\n                \"gitVersion\": \"v1.27.6\"\n            }\n        }\n    },\n    {\n        \"data-gatherer\": \"ark/secrets\",\n        \"data\": {\n            \"items\": [\n                {\n                    \"resource\": {\n                        \"kind\": \"Secret\",\n                        \"apiVersion\": \"v1\",\n                        \"metadata\": {\n                            \"name\": \"app-1-secret-1\",\n                            \"namespace\": \"team-1\"\n                        }\n                    }\n                },\n                {\n                    \"deleted_at\": \"2024-06-10T12:00:00Z\",\n                    \"resource\": {\n                        \"kind\": \"Secret\",\n                        \"apiVersion\": \"v1\",\n                        \"metadata\": {\n                            \"name\": \"deleted-secret-1\",\n                            \"namespace\": \"team-2\"\n                        }\n                    }\n                }\n            ]\n        }\n    },\n    {\n        \"data-gatherer\": \"ark/pods\",\n        \"data\": {\n            \"items\": [\n                {\n                    \"resource\": {\n                        \"kind\": \"Pod\",\n                        \"apiVersion\": \"v1\",\n                        \"metadata\": {\n                            \"name\": \"app-1-pod-1\",\n                            \"namespace\": \"team-1\"\n                        }\n                    }\n                },\n                {\n                    \"deleted_at\": \"2024-06-10T12:00:00Z\",\n                    \"resource\": {\n                        \"kind\": \"Pod\",\n                        \"apiVersion\": \"v1\",\n                        \"metadata\": {\n                            \"name\": \"deleted-pod-1\",\n                            \"namespace\": \"team-2\"\n                        }\n                    }\n                }\n            ]\n        }\n    },\n    {\n        \"data-gatherer\": \"ark/statefulsets\",\n        \"data\": {\n            \"items\": []\n        }\n    },\n    {\n        \"data-gatherer\": \"ark/deployments\",\n        \"data\": {\n            \"items\": []\n        }\n    },\n    {\n        \"data-gatherer\": \"ark/clusterroles\",\n        \"data\": {\n            \"items\": []\n        }\n    },\n    {\n        \"data-gatherer\": \"ark/roles\",\n        \"data\": {\n            \"items\": []\n        }\n    },\n    {\n        \"data-gatherer\": \"ark/clusterrolebindings\",\n        \"data\": {\n            \"items\": []\n        }\n    },\n    {\n        \"data-gatherer\": \"ark/rolebindings\",\n        \"data\": {\n            \"items\": []\n        }\n    },\n    {\n        \"data-gatherer\": \"ark/cronjobs\",\n        \"data\": {\n            \"items\": []\n        }\n    },\n    {\n        \"data-gatherer\": \"ark/jobs\",\n        \"data\": {\n            \"items\": []\n        }\n    },\n    {\n        \"data-gatherer\": \"ark/daemonsets\",\n        \"data\": {\n            \"items\": []\n        }\n    },\n    {\n        \"data-gatherer\": \"ark/serviceaccounts\",\n        \"data\": {\n            \"items\": []\n        }\n    },\n    {\n        \"data-gatherer\": \"ark/configmaps\",\n        \"data\": {\n            \"items\": []\n        }\n    },\n    {\n        \"data-gatherer\": \"ark/esoexternalsecrets\",\n        \"data\": {\n            \"items\": []\n        }\n    },\n    {\n        \"data-gatherer\": \"ark/esosecretstores\",\n        \"data\": {\n            \"items\": []\n        }\n    },\n    {\n        \"data-gatherer\": \"ark/esoclusterexternalsecrets\",\n        \"data\": {\n            \"items\": []\n        }\n    },\n    {\n        \"data-gatherer\": \"ark/esoclustersecretstores\",\n        \"data\": {\n            \"items\": []\n        }\n    }\n]\n"
  },
  {
    "path": "examples/machinehub.yaml",
    "content": "# An example agent config for MachineHub output mode.\n#\n# For example:\n#\n#  export ARK_SUBDOMAIN=      # your CyberArk tenant subdomain\n#  export ARK_USERNAME=       # your CyberArk username\n#  export ARK_SECRET=         # your CyberArk password\n#\n#  OPTIONAL: the URL for the CyberArk Discovery API if not using the production environment\n#  # export ARK_DISCOVERY_API=https://platform-discovery.integration-cyberark.cloud/api/v2\n#\n#  go run . agent --one-shot --machine-hub -v 6 --agent-config-file ./examples/machinehub.yaml\n\ndata-gatherers:\n# Gather Kubernetes OIDC information\n- name: ark/oidc\n  kind: oidc\n\n# Gather Kubernetes API server version information\n- name: ark/discovery\n  kind: k8s-discovery\n\n# Gather Kubernetes secrets, excluding specific types\n- name: ark/secrets\n  kind: k8s-dynamic\n  config:\n    resource-type:\n      version: v1\n      resource: secrets\n    field-selectors:\n    - type!=kubernetes.io/dockercfg\n    - type!=kubernetes.io/dockerconfigjson\n    - type!=bootstrap.kubernetes.io/token\n    - type!=helm.sh/release.v1\n\n# Gather Kubernetes service accounts\n- name: ark/serviceaccounts\n  kind: k8s-dynamic\n  config:\n    resource-type:\n      resource: serviceaccounts\n      version: v1\n\n# Gather Kubernetes roles\n- name: ark/roles\n  kind: k8s-dynamic\n  config:\n    resource-type:\n      version: v1\n      group: rbac.authorization.k8s.io\n      resource: roles\n\n# Gather Kubernetes cluster roles\n- name: ark/clusterroles\n  kind: k8s-dynamic\n  config:\n    resource-type:\n      version: v1\n      group: rbac.authorization.k8s.io\n      resource: clusterroles\n\n# Gather Kubernetes role bindings\n- name: ark/rolebindings\n  kind: k8s-dynamic\n  config:\n    resource-type:\n      version: v1\n      group: rbac.authorization.k8s.io\n      resource: rolebindings\n\n# Gather Kubernetes cluster role bindings\n- name: ark/clusterrolebindings\n  kind: k8s-dynamic\n  config:\n    resource-type:\n      version: v1\n      group: rbac.authorization.k8s.io\n      resource: clusterrolebindings\n\n# Gather Kubernetes jobs\n- name: ark/jobs\n  kind: k8s-dynamic\n  config:\n    resource-type:\n      version: v1\n      group: batch\n      resource: jobs\n\n# Gather Kubernetes cron jobs\n- name: ark/cronjobs\n  kind: k8s-dynamic\n  config:\n    resource-type:\n      version: v1\n      group: batch\n      resource: cronjobs\n\n# Gather Kubernetes deployments\n- name: ark/deployments\n  kind: k8s-dynamic\n  config:\n    resource-type:\n      version: v1\n      group: apps\n      resource: deployments\n\n# Gather Kubernetes stateful sets\n- name: ark/statefulsets\n  kind: k8s-dynamic\n  config:\n    resource-type:\n      version: v1\n      group: apps\n      resource: statefulsets\n\n# Gather Kubernetes daemon sets\n- name: ark/daemonsets\n  kind: k8s-dynamic\n  config:\n    resource-type:\n      version: v1\n      group: apps\n      resource: daemonsets\n\n# Gather Kubernetes pods\n- name: ark/pods\n  kind: k8s-dynamic\n  config:\n    resource-type:\n      version: v1\n      resource: pods\n\n# Gather Kubernetes config maps with specific conjur.org label\n- name: ark/configmaps\n  kind: k8s-dynamic\n  config:\n    resource-type:\n      resource: configmaps\n      version: v1\n    label-selectors:\n    - conjur.org/name=conjur-connect-configmap\n\n# Gather External Secrets Operator ExternalSecret resources\n- name: ark/esoexternalsecrets\n  kind: k8s-dynamic\n  config:\n    resource-type:\n      group: external-secrets.io\n      version: v1\n      resource: externalsecrets\n\n# Gather External Secrets Operator SecretStore resources\n- name: ark/esosecretstores\n  kind: k8s-dynamic\n  config:\n    resource-type:\n      group: external-secrets.io\n      version: v1\n      resource: secretstores\n\n# Gather External Secrets Operator ClusterExternalSecret resources\n- name: ark/esoclusterexternalsecrets\n  kind: k8s-dynamic\n  config:\n    resource-type:\n      group: external-secrets.io\n      version: v1\n      resource: clusterexternalsecrets\n\n# Gather External Secrets Operator ClusterSecretStore resources\n- name: ark/esoclustersecretstores\n  kind: k8s-dynamic\n  config:\n    resource-type:\n      group: external-secrets.io\n      version: v1\n      resource: clustersecretstores\n"
  },
  {
    "path": "examples/one-shot-oidc.yaml",
    "content": "# one-shot-oidc.yaml\n#\n# An example configuration file which can be used for local testing.\n# For example:\n#\n#  go run . agent \\\n#     --agent-config-file examples/one-shot-oidc.yaml \\\n#     --one-shot \\\n#     --output-path output.json\n#\norganization_id: \"my-organization\"\ncluster_id: \"my_cluster\"\nperiod: 1m\ndata-gatherers:\n- kind: \"oidc\"\n  name: \"ark/oidc\"\n"
  },
  {
    "path": "examples/one-shot-secret.yaml",
    "content": "# one-shot-secret.yaml\n#\n# An example configuration file which can be used for local testing.\n# It gathers only secrets and it does not attempt to upload to Venafi.\n# For example:\n#\n#  go run . agent \\\n#     --agent-config-file examples/one-shot-secret.yaml \\\n#     --one-shot \\\n#     --output-path output.json\n#\norganization_id: \"my-organization\"\ncluster_id: \"my_cluster\"\nperiod: 1m\ndata-gatherers:\n- kind: \"k8s-dynamic\"\n  name: \"k8s/secrets\"\n  config:\n    resource-type:\n      version: v1\n      resource: secrets\n    field-selectors:\n    - type!=kubernetes.io/service-account-token\n    - type!=kubernetes.io/dockercfg\n    - type!=kubernetes.io/dockerconfigjson\n    - type!=kubernetes.io/basic-auth\n    - type!=kubernetes.io/ssh-auth,\n    - type!=bootstrap.kubernetes.io/token\n    - type!=helm.sh/release.v1\n"
  },
  {
    "path": "go.mod",
    "content": "// TODO(wallrj): Rename the Go module to match the repository name\nmodule github.com/jetstack/preflight\n\ngo 1.24.4\n\nrequire (\n\tgithub.com/Venafi/vcert/v5 v5.12.2\n\tgithub.com/cenkalti/backoff/v5 v5.0.3\n\tgithub.com/fatih/color v1.18.0\n\tgithub.com/google/uuid v1.6.0\n\tgithub.com/hashicorp/go-multierror v1.1.1\n\tgithub.com/jetstack/venafi-connection-lib v0.5.2\n\tgithub.com/lestrrat-go/jwx/v3 v3.0.13\n\tgithub.com/microcosm-cc/bluemonday v1.0.27\n\tgithub.com/pmylund/go-cache v2.1.0+incompatible\n\tgithub.com/prometheus/client_golang v1.23.2\n\tgithub.com/spf13/cobra v1.10.2\n\tgithub.com/spf13/pflag v1.0.10\n\tgithub.com/stretchr/testify v1.11.1\n\tgolang.org/x/sync v0.19.0\n\tgopkg.in/yaml.v2 v2.4.0\n\tk8s.io/api v0.34.3\n\tk8s.io/apimachinery v0.34.3\n\tk8s.io/client-go v0.34.3\n\tk8s.io/component-base v0.34.3\n\tsigs.k8s.io/controller-runtime v0.22.4\n\tsigs.k8s.io/yaml v1.6.0\n)\n\nrequire (\n\tcel.dev/expr v0.24.0 // indirect\n\tgithub.com/Khan/genqlient v0.8.1 // indirect\n\tgithub.com/antlr4-go/antlr/v4 v4.13.0 // indirect\n\tgithub.com/aymerick/douceur v0.2.0 // indirect\n\tgithub.com/blang/semver/v4 v4.0.0 // indirect\n\tgithub.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect\n\tgithub.com/evanphx/json-patch/v5 v5.9.11 // indirect\n\tgithub.com/fsnotify/fsnotify v1.9.0 // indirect\n\tgithub.com/fxamacker/cbor/v2 v2.9.0 // indirect\n\tgithub.com/go-http-utils/headers v0.0.0-20181008091004-fed159eddc2a // indirect\n\tgithub.com/go-logr/zapr v1.3.0 // indirect\n\tgithub.com/go418/concurrentcache v0.6.0 // indirect\n\tgithub.com/go418/concurrentcache/logger v0.0.0-20250207095056-c0b7f8cc8bc2 // indirect\n\tgithub.com/goccy/go-json v0.10.3 // indirect\n\tgithub.com/golang-jwt/jwt/v5 v5.3.0 // indirect\n\tgithub.com/google/btree v1.1.3 // indirect\n\tgithub.com/google/cel-go v0.26.0 // indirect\n\tgithub.com/google/gnostic-models v0.7.0 // indirect\n\tgithub.com/gorilla/css v1.0.1 // indirect\n\tgithub.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect\n\tgithub.com/json-iterator/go v1.1.12 // indirect\n\tgithub.com/lestrrat-go/blackmagic v1.0.4 // indirect\n\tgithub.com/lestrrat-go/httpcc v1.0.1 // indirect\n\tgithub.com/lestrrat-go/httprc/v3 v3.0.2 // indirect\n\tgithub.com/lestrrat-go/option/v2 v2.0.0 // indirect\n\tgithub.com/pkg/errors v0.9.1 // indirect\n\tgithub.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect\n\tgithub.com/segmentio/asm v1.2.1 // indirect\n\tgithub.com/sosodev/duration v1.3.1 // indirect\n\tgithub.com/stoewer/go-strcase v1.3.0 // indirect\n\tgithub.com/vektah/gqlparser/v2 v2.5.30 // indirect\n\tgithub.com/x448/float16 v0.8.4 // indirect\n\tgithub.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect\n\tgo.opentelemetry.io/otel v1.35.0 // indirect\n\tgo.opentelemetry.io/otel/trace v1.35.0 // indirect\n\tgo.uber.org/multierr v1.11.0 // indirect\n\tgo.uber.org/zap v1.27.0 // indirect\n\tgo.yaml.in/yaml/v2 v2.4.2 // indirect\n\tgo.yaml.in/yaml/v3 v3.0.4 // indirect\n\tgolang.org/x/crypto v0.46.0 // indirect\n\tgolang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect\n\tgolang.org/x/net v0.47.0 // indirect\n\tgomodules.xyz/jsonpatch/v2 v2.4.0 // indirect\n\tgoogle.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect\n\tgoogle.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect\n\tgopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect\n\tgopkg.in/ini.v1 v1.67.0 // indirect\n\tk8s.io/apiextensions-apiserver v0.34.3 // indirect\n\tk8s.io/apiserver v0.34.3 // indirect\n\tsigs.k8s.io/randfill v1.0.0 // indirect\n\tsigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect\n)\n\nrequire (\n\tgithub.com/beorn7/perks v1.0.1 // indirect\n\tgithub.com/cespare/xxhash/v2 v2.3.0 // indirect\n\tgithub.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect\n\tgithub.com/emicklei/go-restful/v3 v3.12.2 // indirect\n\tgithub.com/go-logr/logr v1.4.3\n\tgithub.com/go-openapi/jsonpointer v0.21.0 // indirect\n\tgithub.com/go-openapi/jsonreference v0.20.4 // indirect\n\tgithub.com/go-openapi/swag v0.23.0 // indirect\n\tgithub.com/gogo/protobuf v1.3.2 // indirect\n\tgithub.com/golang-jwt/jwt/v4 v4.5.2\n\tgithub.com/google/go-cmp v0.7.0 // indirect\n\tgithub.com/hashicorp/errwrap v1.1.0 // indirect\n\tgithub.com/inconshreveable/mousetrap v1.1.0 // indirect\n\tgithub.com/josharian/intern v1.0.0 // indirect\n\tgithub.com/mailru/easyjson v0.7.7 // indirect\n\tgithub.com/mattn/go-colorable v0.1.13 // indirect\n\tgithub.com/mattn/go-isatty v0.0.20 // indirect\n\tgithub.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect\n\tgithub.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect\n\tgithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect\n\tgithub.com/prometheus/client_model v0.6.2 // indirect\n\tgithub.com/prometheus/common v0.66.1 // indirect\n\tgithub.com/prometheus/procfs v0.16.1 // indirect\n\tgolang.org/x/oauth2 v0.30.0 // indirect\n\tgolang.org/x/sys v0.39.0 // indirect\n\tgolang.org/x/term v0.38.0 // indirect\n\tgolang.org/x/text v0.32.0 // indirect\n\tgolang.org/x/time v0.9.0 // indirect\n\tgoogle.golang.org/protobuf v1.36.8 // indirect\n\tgopkg.in/inf.v0 v0.9.1 // indirect\n\tgopkg.in/yaml.v3 v3.0.1\n\tk8s.io/klog/v2 v2.130.1\n\tk8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect\n\tk8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect\n\tsigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect\n)\n"
  },
  {
    "path": "go.sum",
    "content": "cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=\ncel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=\ngithub.com/Khan/genqlient v0.8.1 h1:wtOCc8N9rNynRLXN3k3CnfzheCUNKBcvXmVv5zt6WCs=\ngithub.com/Khan/genqlient v0.8.1/go.mod h1:R2G6DzjBvCbhjsEajfRjbWdVglSH/73kSivC9TLWVjU=\ngithub.com/Venafi/vcert/v5 v5.12.2 h1:Ee3/A9fZRiisuwuz22/Nqgl19H0ztQjWv35AC63qPcA=\ngithub.com/Venafi/vcert/v5 v5.12.2/go.mod h1:x3l0pB0q0E6wuhPe7nzfkUEwwraK7amnBWQ4LtT1bbw=\ngithub.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=\ngithub.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=\ngithub.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=\ngithub.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=\ngithub.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=\ngithub.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=\ngithub.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=\ngithub.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=\ngithub.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=\ngithub.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=\ngithub.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=\ngithub.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=\ngithub.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=\ngithub.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=\ngithub.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=\ngithub.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=\ngithub.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=\ngithub.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=\ngithub.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=\ngithub.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=\ngithub.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=\ngithub.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=\ngithub.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=\ngithub.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=\ngithub.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=\ngithub.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=\ngithub.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=\ngithub.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=\ngithub.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=\ngithub.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=\ngithub.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=\ngithub.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=\ngithub.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=\ngithub.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=\ngithub.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=\ngithub.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=\ngithub.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=\ngithub.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=\ngithub.com/go-http-utils/headers v0.0.0-20181008091004-fed159eddc2a h1:v6zMvHuY9yue4+QkG/HQ/W67wvtQmWJ4SDo9aK/GIno=\ngithub.com/go-http-utils/headers v0.0.0-20181008091004-fed159eddc2a/go.mod h1:I79BieaU4fxrw4LMXby6q5OS9XnoR9UIKLOzDFjUmuw=\ngithub.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=\ngithub.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=\ngithub.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=\ngithub.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=\ngithub.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=\ngithub.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=\ngithub.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=\ngithub.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=\ngithub.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU=\ngithub.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4=\ngithub.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=\ngithub.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=\ngithub.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=\ngithub.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=\ngithub.com/go418/concurrentcache v0.6.0 h1:36A7j+c0dChEAMotq+lBQwQPyI4CMCy5HgMCcw8sY1g=\ngithub.com/go418/concurrentcache v0.6.0/go.mod h1:F498AylMP488QhU9KSE8VoN3u2FhGt7hXOgJ2CdvysM=\ngithub.com/go418/concurrentcache/logger v0.0.0-20250207095056-c0b7f8cc8bc2 h1:wVvBhfD+7srZ470Z06t5rp93faukGddvUJR4+owL0Kw=\ngithub.com/go418/concurrentcache/logger v0.0.0-20250207095056-c0b7f8cc8bc2/go.mod h1:DpmmUFByr4p8fGMbp2gsGJhqgcP1SXjyVZDiW0f8aSY=\ngithub.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=\ngithub.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=\ngithub.com/goccy/go-yaml v1.19.0 h1:EmkZ9RIsX+Uq4DYFowegAuJo8+xdX3T/2dwNPXbxEYE=\ngithub.com/goccy/go-yaml v1.19.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=\ngithub.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=\ngithub.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=\ngithub.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=\ngithub.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=\ngithub.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=\ngithub.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=\ngithub.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=\ngithub.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=\ngithub.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=\ngithub.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=\ngithub.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI=\ngithub.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM=\ngithub.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=\ngithub.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=\ngithub.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=\ngithub.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=\ngithub.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=\ngithub.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=\ngithub.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=\ngithub.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=\ngithub.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=\ngithub.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=\ngithub.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8=\ngithub.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0=\ngithub.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=\ngithub.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=\ngithub.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=\ngithub.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=\ngithub.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=\ngithub.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=\ngithub.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=\ngithub.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=\ngithub.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=\ngithub.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=\ngithub.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=\ngithub.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=\ngithub.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=\ngithub.com/jetstack/venafi-connection-lib v0.5.2 h1:Mzn8PANYQc5mBPHOhgkTW0VsvnKJsQmO+WcAjDwoR8E=\ngithub.com/jetstack/venafi-connection-lib v0.5.2/go.mod h1:0seQ/uP6MpB3KVMxf56jUzs/HBVpmRQLKU3Juak9p3Q=\ngithub.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=\ngithub.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=\ngithub.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=\ngithub.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=\ngithub.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=\ngithub.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=\ngithub.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=\ngithub.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=\ngithub.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=\ngithub.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=\ngithub.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=\ngithub.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=\ngithub.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=\ngithub.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=\ngithub.com/lestrrat-go/blackmagic v1.0.4 h1:IwQibdnf8l2KoO+qC3uT4OaTWsW7tuRQXy9TRN9QanA=\ngithub.com/lestrrat-go/blackmagic v1.0.4/go.mod h1:6AWFyKNNj0zEXQYfTMPfZrAXUWUfTIZ5ECEUEJaijtw=\ngithub.com/lestrrat-go/dsig v1.0.0 h1:OE09s2r9Z81kxzJYRn07TFM9XA4akrUdoMwr0L8xj38=\ngithub.com/lestrrat-go/dsig v1.0.0/go.mod h1:dEgoOYYEJvW6XGbLasr8TFcAxoWrKlbQvmJgCR0qkDo=\ngithub.com/lestrrat-go/dsig-secp256k1 v1.0.0 h1:JpDe4Aybfl0soBvoVwjqDbp+9S1Y2OM7gcrVVMFPOzY=\ngithub.com/lestrrat-go/dsig-secp256k1 v1.0.0/go.mod h1:CxUgAhssb8FToqbL8NjSPoGQlnO4w3LG1P0qPWQm/NU=\ngithub.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE=\ngithub.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E=\ngithub.com/lestrrat-go/httprc/v3 v3.0.2 h1:7u4HUaD0NQbf2/n5+fyp+T10hNCsAnwKfqn4A4Baif0=\ngithub.com/lestrrat-go/httprc/v3 v3.0.2/go.mod h1:mSMtkZW92Z98M5YoNNztbRGxbXHql7tSitCvaxvo9l0=\ngithub.com/lestrrat-go/jwx/v3 v3.0.13 h1:AdHKiPIYeCSnOJtvdpipPg/0SuFh9rdkN+HF3O0VdSk=\ngithub.com/lestrrat-go/jwx/v3 v3.0.13/go.mod h1:2m0PV1A9tM4b/jVLMx8rh6rBl7F6WGb3EG2hufN9OQU=\ngithub.com/lestrrat-go/option/v2 v2.0.0 h1:XxrcaJESE1fokHy3FpaQ/cXW8ZsIdWcdFzzLOcID3Ss=\ngithub.com/lestrrat-go/option/v2 v2.0.0/go.mod h1:oSySsmzMoR0iRzCDCaUfsCzxQHUEuhOViQObyy7S6Vg=\ngithub.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=\ngithub.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=\ngithub.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=\ngithub.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=\ngithub.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=\ngithub.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=\ngithub.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=\ngithub.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk=\ngithub.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA=\ngithub.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=\ngithub.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=\ngithub.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=\ngithub.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=\ngithub.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=\ngithub.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=\ngithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=\ngithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=\ngithub.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg=\ngithub.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=\ngithub.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw=\ngithub.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=\ngithub.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=\ngithub.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=\ngithub.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/pmylund/go-cache v2.1.0+incompatible h1:n+7K51jLz6a3sCvff3BppuCAkixuDHuJ/C57Vw/XjTE=\ngithub.com/pmylund/go-cache v2.1.0+incompatible/go.mod h1:hmz95dGvINpbRZGsqPcd7B5xXY5+EKb5PpGhQY3NTHk=\ngithub.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=\ngithub.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=\ngithub.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=\ngithub.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=\ngithub.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=\ngithub.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=\ngithub.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=\ngithub.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=\ngithub.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=\ngithub.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=\ngithub.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=\ngithub.com/segmentio/asm v1.2.1 h1:DTNbBqs57ioxAD4PrArqftgypG4/qNpXoJx8TVXxPR0=\ngithub.com/segmentio/asm v1.2.1/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=\ngithub.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=\ngithub.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=\ngithub.com/sosodev/duration v1.3.1 h1:qtHBDMQ6lvMQsL15g4aopM4HEfOaYuhWBw3NPTtlqq4=\ngithub.com/sosodev/duration v1.3.1/go.mod h1:RQIBBX0+fMLc/D9+Jb/fwvVmo0eZvDDEERAikUR6SDg=\ngithub.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=\ngithub.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=\ngithub.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=\ngithub.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=\ngithub.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=\ngithub.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=\ngithub.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=\ngithub.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=\ngithub.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=\ngithub.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=\ngithub.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=\ngithub.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=\ngithub.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=\ngithub.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=\ngithub.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=\ngithub.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=\ngithub.com/valyala/fastjson v1.6.7 h1:ZE4tRy0CIkh+qDc5McjatheGX2czdn8slQjomexVpBM=\ngithub.com/valyala/fastjson v1.6.7/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=\ngithub.com/vektah/gqlparser/v2 v2.5.30 h1:EqLwGAFLIzt1wpx1IPpY67DwUujF1OfzgEyDsLrN6kE=\ngithub.com/vektah/gqlparser/v2 v2.5.30/go.mod h1:D1/VCZtV3LPnQrcPBeR/q5jkSQIPti0uYCP/RI0gIeo=\ngithub.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=\ngithub.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=\ngithub.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM=\ngithub.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI=\ngithub.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngithub.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngo.etcd.io/etcd/api/v3 v3.6.4 h1:7F6N7toCKcV72QmoUKa23yYLiiljMrT4xCeBL9BmXdo=\ngo.etcd.io/etcd/api/v3 v3.6.4/go.mod h1:eFhhvfR8Px1P6SEuLT600v+vrhdDTdcfMzmnxVXXSbk=\ngo.etcd.io/etcd/client/pkg/v3 v3.6.4 h1:9HBYrjppeOfFjBjaMTRxT3R7xT0GLK8EJMVC4xg6ok0=\ngo.etcd.io/etcd/client/pkg/v3 v3.6.4/go.mod h1:sbdzr2cl3HzVmxNw//PH7aLGVtY4QySjQFuaCgcRFAI=\ngo.etcd.io/etcd/client/v3 v3.6.4 h1:YOMrCfMhRzY8NgtzUsHl8hC2EBSnuqbR3dh84Uryl7A=\ngo.etcd.io/etcd/client/v3 v3.6.4/go.mod h1:jaNNHCyg2FdALyKWnd7hxZXZxZANb0+KGY+YQaEMISo=\ngo.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=\ngo.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=\ngo.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw=\ngo.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM=\ngo.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=\ngo.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=\ngo.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=\ngo.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=\ngo.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60=\ngo.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM=\ngo.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U=\ngo.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE=\ngo.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=\ngo.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=\ngo.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A=\ngo.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU=\ngo.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=\ngo.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=\ngo.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=\ngo.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=\ngo.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=\ngo.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=\ngo.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y=\ngo.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU=\ngo.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=\ngo.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=\ngo.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=\ngo.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=\ngo.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=\ngo.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=\ngo.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=\ngo.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=\ngolang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=\ngolang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=\ngolang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=\ngolang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=\ngolang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=\ngolang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=\ngolang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=\ngolang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=\ngolang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=\ngolang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=\ngolang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=\ngolang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=\ngolang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=\ngolang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=\ngolang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=\ngolang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=\ngolang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=\ngolang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=\ngolang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=\ngolang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=\ngolang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=\ngolang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=\ngolang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=\ngolang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=\ngolang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=\ngolang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=\ngomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=\ngoogle.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950=\ngoogle.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg=\ngoogle.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4=\ngoogle.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=\ngoogle.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA=\ngoogle.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=\ngoogle.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc=\ngoogle.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=\ngopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=\ngopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=\ngopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=\ngopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=\ngopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=\ngopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=\ngopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=\ngopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=\ngopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=\ngopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=\ngopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\nk8s.io/api v0.34.3 h1:D12sTP257/jSH2vHV2EDYrb16bS7ULlHpdNdNhEw2S4=\nk8s.io/api v0.34.3/go.mod h1:PyVQBF886Q5RSQZOim7DybQjAbVs8g7gwJNhGtY5MBk=\nk8s.io/apiextensions-apiserver v0.34.3 h1:p10fGlkDY09eWKOTeUSioxwLukJnm+KuDZdrW71y40g=\nk8s.io/apiextensions-apiserver v0.34.3/go.mod h1:aujxvqGFRdb/cmXYfcRTeppN7S2XV/t7WMEc64zB5A0=\nk8s.io/apimachinery v0.34.3 h1:/TB+SFEiQvN9HPldtlWOTp0hWbJ+fjU+wkxysf/aQnE=\nk8s.io/apimachinery v0.34.3/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=\nk8s.io/apiserver v0.34.3 h1:uGH1qpDvSiYG4HVFqc6A3L4CKiX+aBWDrrsxHYK0Bdo=\nk8s.io/apiserver v0.34.3/go.mod h1:QPnnahMO5C2m3lm6fPW3+JmyQbvHZQ8uudAu/493P2w=\nk8s.io/client-go v0.34.3 h1:wtYtpzy/OPNYf7WyNBTj3iUA0XaBHVqhv4Iv3tbrF5A=\nk8s.io/client-go v0.34.3/go.mod h1:OxxeYagaP9Kdf78UrKLa3YZixMCfP6bgPwPwNBQBzpM=\nk8s.io/component-base v0.34.3 h1:zsEgw6ELqK0XncCQomgO9DpUIzlrYuZYA0Cgo+JWpVk=\nk8s.io/component-base v0.34.3/go.mod h1:5iIlD8wPfWE/xSHTRfbjuvUul2WZbI2nOUK65XL0E/c=\nk8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=\nk8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=\nk8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA=\nk8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=\nk8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=\nk8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=\nsigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM=\nsigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=\nsigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A=\nsigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8=\nsigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=\nsigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=\nsigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=\nsigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=\nsigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=\nsigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=\nsigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=\nsigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=\n"
  },
  {
    "path": "hack/ark/cluster-external-secret.yaml",
    "content": "# Sample ClusterExternalSecret for e2e testing\n# This is a minimal ClusterExternalSecret CR that will be discovered by the agent.\n# This is a cluster-scoped resource that can create ExternalSecrets in multiple namespaces.\napiVersion: external-secrets.io/v1\nkind: ClusterExternalSecret\nmetadata:\n  name: e2e-test-cluster-external-secret\n  labels:\n    app.kubernetes.io/name: e2e-test\n    app.kubernetes.io/component: cluster-external-secret\nspec:\n  externalSecretSpec:\n    refreshInterval: 1h\n    secretStoreRef:\n      name: e2e-test-cluster-secret-store\n      kind: ClusterSecretStore\n    target:\n      name: e2e-test-synced-secret\n      creationPolicy: Owner\n    data:\n    - secretKey: example-key\n      remoteRef:\n        key: dummy/path/to/secret\n        property: password\n  namespaceSelector:\n    matchLabels:\n      environment: test\n"
  },
  {
    "path": "hack/ark/cluster-secret-store.yaml",
    "content": "# Sample ClusterSecretStore for e2e testing\n# This is a minimal ClusterSecretStore CR that will be discovered by the agent.\n# This is a cluster-scoped resource that can be referenced by ExternalSecrets in any namespace.\napiVersion: external-secrets.io/v1\nkind: ClusterSecretStore\nmetadata:\n  name: e2e-test-cluster-secret-store\n  labels:\n    app.kubernetes.io/name: e2e-test\n    app.kubernetes.io/component: cluster-secret-store\nspec:\n  provider:\n    # Fake provider configuration - this won't actually work but allows the CR to be created\n    fake:\n      data:\n      - key: dummy/path/to/secret\n        value: dummy-value\n        version: \"1\"\n"
  },
  {
    "path": "hack/ark/conjur-connect-configmap.yaml",
    "content": "apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: conjur-connect-configmap\n  namespace: default\n  labels:\n    conjur.org/name: conjur-connect-configmap\n    app.kubernetes.io/name: authn-k8s\n    app.kubernetes.io/component: conjur-conn-configmap\n    app.kubernetes.io/instance: pet-store-authn-k8s\n    app.kubernetes.io/part-of: app-namespace-config\n    app.kubernetes.io/managed-by: helm\n    helm.sh/chart: authn-k8s-namespace-prep-1.0.0\ndata:\n  CONJUR_ACCOUNT: myConjurAccount\n  CONJUR_APPLIANCE_URL: https://conjur.conjur-ns.svc.cluster.local\n  CONJUR_AUTHN_URL: https://conjur.conjur-ns.svc.cluster.local/authn-k8s/my-authenticator-id\n  CONJUR_AUTHENTICATOR_ID: my-authenticator-id\n  CONJUR_SSL_CERTIFICATE: |\n    -----BEGIN CERTIFICATE-----\n    MIIDYTCCAkmgAwIBAgIUTXBJk7Fm+M9kVD5x66jPiwU2JfcwDQYJKoZIhvcNAQEL\n    BQAwQDErMCkGA1UEAwwiY29uanVyLmNvbmp1ci1ucy5zdmMuY2x1c3Rlci5sb2Nh\n    bDERMA8GA1UECgwIRTJFIFRlc3QwHhcNMjYwMTI4MTMwNzA5WhcNMzYwMTI2MTMw\n    NzA5WjBAMSswKQYDVQQDDCJjb25qdXIuY29uanVyLW5zLnN2Yy5jbHVzdGVyLmxv\n    Y2FsMREwDwYDVQQKDAhFMkUgVGVzdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC\n    AQoCggEBALdJ9InvV4oOy5LzP/JfZ7iAuM7RIQzeD1fDjm1EEfQcLqSgobH2yZtA\n    YETlj/c2bfJ8Cc2dTJMoTefwofwjA6iR43SBf0e78raKsGSmR3ors9BqaulvgII5\n    Tk3y5jdZxty7UNIGOJP9QoJ4kPQHu37HhSfaA517yQJNCOa4NSLkpHWK155o6Cvf\n    k03M6Szzs5uL7GTK/8IJnl0WSXJezC7lQ8Q+0VVCR6Cq4CzAKm2ZoVCPGkYDZb+Y\n    2i0aGe8ideO0JgTOsHzXiv5x1DzaEdX0+DhV+aQKbRJYENa2w5LCG0b1Z6Hpyvm6\n    uT0LobEgNLxJ8fOxa3LEq2IryzHFZjUCAwEAAaNTMFEwHQYDVR0OBBYEFHuXVFoC\n    IaF7T3Iic7fKxyKwVhpkMB8GA1UdIwQYMBaAFHuXVFoCIaF7T3Iic7fKxyKwVhpk\n    MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAF/7DwNERFTpucWi\n    roDVME2SH1kTKiemcKzguoeOkDBZd70GbLejy64gWF9nIbcQ9WYxRIuqSI2h0j8d\n    ED9SGQ66nic3uw16GN5IJk21ucFwAJstgQG3kvWPBbSrxMO9TB0pounRozZ5DkZe\n    ZI+vZ4BNOZDT9TAE08xXLrzVhzVDM8DGAydzXUlvscfhYpTe77Cm7yMxmItO7QTA\n    xTrBaamgxM1XYbx+DiS8nTm1U2G3UVACCv9zH6MXDe2DDREBuX1U3skqqbJlsypf\n    68ckx8fzdxIU5OLx0LZ4QZOR66cHyambDtngoD3iKqDcR1L8EdXajq+IaPRZfcD6\n    VLEtA4Y=\n    -----END CERTIFICATE-----\n"
  },
  {
    "path": "hack/ark/external-secret.yaml",
    "content": "# Sample ExternalSecret for e2e testing\n# This is a minimal ExternalSecret CR that will be discovered by the agent.\n# Note: This requires the External Secrets Operator CRDs to be installed,\n# but does not require a working secrets backend.\napiVersion: external-secrets.io/v1\nkind: ExternalSecret\nmetadata:\n  name: e2e-test-external-secret\n  namespace: default\n  labels:\n    app.kubernetes.io/name: e2e-test\n    app.kubernetes.io/component: external-secret\nspec:\n  refreshInterval: 1h\n  secretStoreRef:\n    name: e2e-test-secret-store\n    kind: SecretStore\n  target:\n    name: e2e-test-synced-secret\n    creationPolicy: Owner\n  data:\n  - secretKey: example-key\n    remoteRef:\n      key: dummy/path/to/secret\n      property: password\n"
  },
  {
    "path": "hack/ark/secret-store.yaml",
    "content": "# Sample SecretStore for e2e testing\n# This is a minimal SecretStore CR that will be discovered by the agent.\n# Note: This requires the External Secrets Operator CRDs to be installed,\n# but does not require a working secrets backend.\napiVersion: external-secrets.io/v1\nkind: SecretStore\nmetadata:\n  name: e2e-test-secret-store\n  namespace: default\n  labels:\n    app.kubernetes.io/name: e2e-test\n    app.kubernetes.io/component: secret-store\nspec:\n  provider:\n    # Fake provider configuration - this won't actually work but allows the CR to be created\n    fake:\n      data:\n      - key: dummy/path/to/secret\n        value: dummy-value\n        version: \"1\"\n"
  },
  {
    "path": "hack/ark/test-e2e.sh",
    "content": "#!/usr/bin/env bash\n#\n# Build and deploy the disco-agent Helm chart.\n# Wait for the agent to log a message indicating successful data upload.\n#\n# Prerequisites:\n# * kubectl: https://kubernetes.io/docs/tasks/tools/#kubectl\n# * kind: https://kind.sigs.k8s.io/docs/user/quick-start/\n# * helm: https://helm.sh/docs/intro/install/\n# * jq: https://jqlang.github.io/jq/download/\n# * make: https://www.gnu.org/software/make/\n#\n# You can run `make ark-test-e2e` which will automatically download all\n# prerequisites and then run this script.\n\nset -o nounset\nset -o errexit\nset -o pipefail\n\n# CyberArk API configuration\n: ${ARK_USERNAME?}\n: ${ARK_SECRET?}\n: ${ARK_SUBDOMAIN?}\n: ${ARK_DISCOVERY_API?}\n\n# The base URL of the OCI registry used for Docker images and Helm charts\n# E.g. ttl.sh/7e6ca67c-96dc-4dea-9437-80b0f3a69fb1\n: ${OCI_BASE?}\n\n# The Kubernetes namespace to install into\n: ${NAMESPACE:=cyberark}\n\n# Set to true to use an existing cluster, otherwise a new kind cluster will be created.\n# Note: the cluster will not be deleted after the test completes.\n: ${USE_EXISTING_CLUSTER:=false}\n\nscript_dir=$(cd -- \"$(dirname -- \"${BASH_SOURCE[0]}\")\" &>/dev/null && pwd)\nroot_dir=$(cd \"${script_dir}/../..\" && pwd)\nexport TERM=dumb\n\ntmp_dir=\"$(mktemp -d /tmp/jetstack-secure.XXXXX)\"\ntrap 'rm -rf \"${tmp_dir}\"' EXIT\n\npushd \"${tmp_dir}\"\n> release.env\nmake -C \"$root_dir\" ark-release \\\n     GITHUB_OUTPUT=\"${tmp_dir}/release.env\" \\\n     OCI_SIGN_ON_PUSH=false \\\n     oci_platforms=\"\" \\\n     ARK_OCI_BASE=\"${OCI_BASE}\"\ncat release.env\nsource release.env\n\nif [[ \"$USE_EXISTING_CLUSTER\" != true ]]; then\n  kind create cluster || true\nfi\n\nkubectl create ns \"$NAMESPACE\" || true\n\nkubectl delete secret agent-credentials --namespace \"$NAMESPACE\" --ignore-not-found\nkubectl create secret generic agent-credentials \\\n        --namespace \"$NAMESPACE\" \\\n        --from-literal=ARK_USERNAME=$ARK_USERNAME \\\n        --from-literal=ARK_SECRET=$ARK_SECRET \\\n        --from-literal=ARK_SUBDOMAIN=$ARK_SUBDOMAIN \\\n        --from-literal=ARK_DISCOVERY_API=$ARK_DISCOVERY_API\n\n# Create a sample secret in the cluster\n#\n# TODO(wallrj): See if there's an API for checking that this secret has been\n# imported by the backend. For now we have to log into the Disco web UI and\n# search for this secret.\nkubectl create secret generic e2e-sample-secret-$(date '+%s') \\\n        --namespace default \\\n        --from-literal=username=${RANDOM}\n\n# Create a sample ConfigMap in the cluster that will be discovered by the agent\n#\n# This ConfigMap has the label that matches the default label-selector configured\n# in the ark/configmaps data gatherer (conjur.org/name=conjur-connect-configmap).\nkubectl apply -f \"${root_dir}/hack/ark/conjur-connect-configmap.yaml\"\n\n# Install External Secrets Operator CRDs and controller\n#\n# This is required for the agent to discover ExternalSecret and SecretStore resources.\necho \"Installing External Secrets Operator...\"\nhelm repo add external-secrets https://charts.external-secrets.io\nhelm repo update\nhelm upgrade --install external-secrets \\\n     external-secrets/external-secrets \\\n     --namespace external-secrets-system \\\n     --create-namespace \\\n     --wait \\\n     --set installCRDs=true\n\n# Create sample External Secrets Operator resources that will be discovered by the agent\nkubectl apply -f \"${root_dir}/hack/ark/secret-store.yaml\"\nkubectl apply -f \"${root_dir}/hack/ark/external-secret.yaml\"\nkubectl apply -f \"${root_dir}/hack/ark/cluster-secret-store.yaml\"\nkubectl apply -f \"${root_dir}/hack/ark/cluster-external-secret.yaml\"\n\n# We use a non-existent tag and omit the `--version` flag, to work around a Helm\n# v4 bug. See: https://github.com/helm/helm/issues/31600\nhelm upgrade agent \"oci://${ARK_CHART}:NON_EXISTENT_TAG@${ARK_CHART_DIGEST}\" \\\n     --install \\\n     --wait \\\n     --create-namespace \\\n     --namespace \"$NAMESPACE\" \\\n     --set-json extraArgs='[\"--log-level=6\"]' \\\n     --set pprof.enabled=true \\\n     --set fullnameOverride=disco-agent \\\n     --set \"imageRegistry=${OCI_BASE}\" \\\n     --set \"imageNamespace=\" \\\n     --set \"image.digest=${ARK_IMAGE_DIGEST}\" \\\n     --set config.clusterName=\"e2e-test-cluster\" \\\n     --set config.clusterDescription=\"A temporary cluster for E2E testing. Contact @wallrj-cyberark.\" \\\n     --set config.period=60s \\\n     --set acceptTerms=true \\\n     --set-json \"podLabels={\\\"disco-agent.cyberark.cloud/test-id\\\": \\\"${RANDOM}\\\"}\"\n\nkubectl rollout status deployments/disco-agent --namespace \"${NAMESPACE}\"\n\n# Wait 60s for log message indicating success.\n# Parse logs as JSON using jq to ensure logs are all JSON formatted.\ntimeout 60 jq -n \\\n        'inputs | if .msg | test(\"Data sent successfully\") then . | halt_error(0) else . end' \\\n        <(kubectl logs deployments/disco-agent --namespace \"${NAMESPACE}\" --follow)\n\n# Query the Prometheus metrics endpoint to ensure it's working.\nkubectl get pod \\\n        --namespace $NAMESPACE \\\n        --selector app.kubernetes.io/name=disco-agent \\\n        --output jsonpath={.items[*].metadata.name} \\\n    | xargs -I{} kubectl get --raw /api/v1/namespaces/$NAMESPACE/pods/{}:8081/proxy/metrics \\\n    | grep '^process_'\n\n# Query the pprof endpoint to ensure it's working.\nkubectl get pod \\\n        --namespace $NAMESPACE \\\n        --selector app.kubernetes.io/name=disco-agent \\\n        --output jsonpath={.items[*].metadata.name} \\\n    | xargs -I{} kubectl get --raw /api/v1/namespaces/$NAMESPACE/pods/{}:8081/proxy/debug/pprof/cmdline \\\n    | xargs -0\n\n"
  },
  {
    "path": "hack/e2e/application-team-1.yaml",
    "content": "apiVersion: v1\nkind: Namespace\nmetadata:\n  name: team-1\n---\napiVersion: policy.cert-manager.io/v1alpha1\nkind: CertificateRequestPolicy\nmetadata:\n  name: team-1\nspec:\n  allowed:\n    commonName:\n      value: '*'\n    dnsNames:\n      values:\n      - '*'\n    subject:\n      countries:\n        values:\n        - '*'\n      localities:\n        values:\n        - '*'\n      organizationalUnits:\n        values:\n        - '*'\n      organizations:\n        values:\n        - '*'\n      postalCodes:\n        values:\n        - '*'\n      provinces:\n        values:\n        - '*'\n      serialNumber:\n        value: '*'\n      streetAddresses:\n        values:\n        - '*'\n    usages:\n    - digital signature\n    - key encipherment\n    - server auth\n    - client auth\n  plugins:\n    venafi:\n      values:\n        venafiConnectionName: venafi-components\n        zone: ${VEN_ZONE}\n  selector:\n    issuerRef:\n      group: jetstack.io\n      kind: VenafiIssuer\n      name: venafi-cloud\n    namespace:\n      matchNames:\n      - team-1\n---\napiVersion: jetstack.io/v1alpha1\nkind: VenafiIssuer\nmetadata:\n  name: venafi-cloud\n  namespace: team-1\nspec:\n  certificateNameExpression: request.namespace + \"_\" + request.name\n  venafiConnectionName: venafi-components\n  venafiConnectionNamespace: venafi\n  zone: ${VEN_ZONE}\n---\napiVersion: cert-manager.io/v1\nkind: Certificate\nmetadata:\n  name: app-0\n  namespace: team-1\nspec:\n  commonName: app-0.team-1\n  duration: 720h0m0s\n  renewBefore: 719h0m0s\n  issuerRef:\n    group: jetstack.io\n    kind: VenafiIssuer\n    name: venafi-cloud\n  privateKey:\n    algorithm: RSA\n    rotationPolicy: Always\n    size: 2048\n  revisionHistoryLimit: 1\n  secretName: app-0\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: cert-manager-policy:allow\n  namespace: team-1\nrules:\n  - apiGroups: [\"policy.cert-manager.io\"]\n    resources: [\"certificaterequestpolicies\"]\n    verbs: [\"use\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: cert-manager-policy:allow\n  namespace: team-1\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: cert-manager-policy:allow\nsubjects:\n- kind: Group\n  name: system:authenticated\n  apiGroup: rbac.authorization.k8s.io\n"
  },
  {
    "path": "hack/e2e/test.sh",
    "content": "#!/usr/bin/env bash\n#\n# Build and install venafi-kubernetes-agent for VenafiConnection based authentication.\n# Wait for it to log a message indicating successful data upload.\n#\n# A VenafiConnection resource is created which directly loads a bearer token\n# from a Kubernetes Secret.\n# This is the simplest way of testing the VenafiConnection integration,\n# but it does not fully test \"secretless\" (workload identity federation) authentication.\n#\n# Prerequisites:\n# * kubectl: https://kubernetes.io/docs/tasks/tools/#kubectl\n# * venctl: https://docs.cyberark.com/mis-saas/vaas/venctl/t-venctl-install/\n# * jq: https://jqlang.github.io/jq/download/\n# * step: https://smallstep.com/docs/step-cli/installation/\n# * curl: https://www.man7.org/linux/man-pages/man1/curl.1.html\n# * envsubst: https://www.man7.org/linux/man-pages/man1/envsubst.1.html\n# * gcloud: https://cloud.google.com/sdk/docs/install\n# * gke-gcloud-auth-plugin: https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl\n# > :warning: If you installed gcloud using snap, you have to install the kubectl plugin using apt:\n# > https://github.com/actions/runner-images/issues/6778#issuecomment-1360360603\n#\n# In case metrics and logs are missing from your cluster, see:\n# * https://cloud.google.com/kubernetes-engine/docs/troubleshooting/dashboards#write_permissions\n\nset -o nounset\nset -o errexit\nset -o pipefail\n# Commenting out for CI, uncomment for local debugging\n#set -o xtrace\n\nscript_dir=$(cd -- \"$(dirname -- \"${BASH_SOURCE[0]}\")\" &>/dev/null && pwd)\nroot_dir=$(cd \"${script_dir}/../..\" && pwd)\nexport TERM=dumb\n\n# Your Venafi Cloud API key.\n: ${VEN_API_KEY?}\n# Separate API Key for getting a pull secret, if your main venafi cloud tenant\n# doesn't allow you to create registry service accounts.\n: ${VEN_API_KEY_PULL?}\n\n# The Venafi Cloud zone (application/issuing_template) which will be used by the\n# issuer an policy.\n: ${VEN_ZONE?}\n\n# The hostname of the Venafi API server.\n# US: api.venafi.cloud\n# EU: api.venafi.eu\n: ${VEN_API_HOST?}\n\n# The base URL of the OCI registry used for Docker images and Helm charts\n# E.g. ttl.sh/63773370-0bcf-4ac0-bd42-5515616089ff\n: ${OCI_BASE?}\n\n# Required gcloud environment variables\n# https://cloud.google.com/sdk/docs/configurations#setting_configuration_properties\n: ${CLOUDSDK_CORE_PROJECT?}\n: ${CLOUDSDK_COMPUTE_ZONE?}\n\n# The name of the cluster to create\n: ${CLUSTER_NAME?}\n\n\ncd \"${script_dir}\"\n\npushd \"${root_dir}\"\n> release.env\nmake release \\\n     OCI_SIGN_ON_PUSH=false \\\n     oci_platforms=linux/amd64 \\\n     oci_preflight_image_name=$OCI_BASE/images/venafi-agent \\\n     helm_chart_image_name=$OCI_BASE/charts/venafi-kubernetes-agent \\\n     GITHUB_OUTPUT=release.env\nsource release.env\npopd\n\nexport USE_GKE_GCLOUD_AUTH_PLUGIN=True\nif ! gcloud container clusters get-credentials \"${CLUSTER_NAME}\"; then\n  gcloud container clusters create \"${CLUSTER_NAME}\" \\\n    --preemptible \\\n    --machine-type e2-small \\\n    --num-nodes 3\nfi\nkubectl create ns venafi || true\n\n# Pull secret for Venafi OCI registry\n# IMPORTANT: we pick the first team as the owning team for the registry and\n# workload identity service account as it doesn't matter.\nif ! kubectl get secret venafi-image-pull-secret -n venafi; then\n  venctl iam service-accounts registry create \\\n    --api-key $VEN_API_KEY_PULL \\\n    --no-prompts \\\n    --owning-team \"$(curl --fail-with-body -sS \"https://${VEN_API_HOST}/v1/teams\" -H \"tppl-api-key: ${VEN_API_KEY_PULL}\" | jq '.teams[0].id' -r)\" \\\n    --name \"venafi-kubernetes-agent-e2e-registry-${RANDOM}\" \\\n    --scopes enterprise-cert-manager,enterprise-venafi-issuer,enterprise-approver-policy \\\n    | jq '{\n            \"apiVersion\": \"v1\",\n            \"kind\": \"Secret\",\n            \"metadata\": {\n              \"name\": \"venafi-image-pull-secret\"\n            },\n            \"type\": \"kubernetes.io/dockerconfigjson\",\n            \"stringData\": {\n              \".dockerconfigjson\": {\n                \"auths\": {\n                  \"\\(.oci_registry)\": {\n                    \"username\": .username,\n                    \"password\": .password\n                  }\n                }\n              } | tostring\n            }\n          }' \\\n    | kubectl create -n venafi -f -\nfi\n\nexport VENAFI_KUBERNETES_AGENT_CLIENT_ID=\"not-used-but-required-by-venctl\"\nvenctl components kubernetes apply \\\n  --region $VEN_VCP_REGION \\\n  --cert-manager \\\n  --venafi-enhanced-issuer \\\n  --approver-policy-enterprise \\\n  --venafi-kubernetes-agent \\\n  --venafi-kubernetes-agent-version \"${RELEASE_HELM_CHART_VERSION}\" \\\n  --venafi-kubernetes-agent-values-files \"${script_dir}/values.venafi-kubernetes-agent.yaml\" \\\n  --venafi-kubernetes-agent-custom-image-registry \"${OCI_BASE}/images\" \\\n  --venafi-kubernetes-agent-custom-chart-repository \"oci://${OCI_BASE}/charts\"\n\nkubectl apply -n venafi -f venafi-components.yaml\n\nsubject=\"system:serviceaccount:venafi:venafi-components\"\naudience=\"https://${VEN_API_HOST}\"\nissuerURL=\"$(kubectl create token -n venafi venafi-components | step crypto jwt inspect --insecure | jq -r '.payload.iss')\"\nopenidDiscoveryURL=\"${issuerURL}/.well-known/openid-configuration\"\njwksURI=$(curl --fail-with-body -sSL ${openidDiscoveryURL} | jq -r '.jwks_uri')\n\n# Create the Venafi agent service account if one does not already exist\n# IMPORTANT: we pick the first team as the owning team for the registry and\n# workload identity service account as it doesn't matter.\nwhile true; do\n  tenantID=$(curl --fail-with-body -sSL -H \"tppl-api-key: $VEN_API_KEY\" https://${VEN_API_HOST}/v1/serviceaccounts \\\n    | jq -r '.[] | select(.issuerURL==$issuerURL and .subject == $subject) | .companyId' \\\n      --arg issuerURL \"${issuerURL}\" \\\n      --arg subject \"${subject}\")\n\n  if [[ \"${tenantID}\" != \"\" ]]; then\n    break\n  fi\n\n  jq -n '{\n      \"name\": \"venafi-kubernetes-agent-e2e-agent-\\($random)\",\n      \"authenticationType\": \"rsaKeyFederated\",\n      \"scopes\": [\"kubernetes-discovery-federated\", \"certificate-issuance\"],\n      \"subject\": $subject,\n      \"audience\": $audience,\n      \"issuerURL\": $issuerURL,\n      \"jwksURI\": $jwksURI,\n      \"applications\": [$applications.applications[].id],\n      \"owner\": $owningTeamID\n    }' \\\n    --arg random \"${RANDOM}\" \\\n    --arg subject \"${subject}\" \\\n    --arg audience \"${audience}\" \\\n    --arg issuerURL \"${issuerURL}\" \\\n    --arg jwksURI \"${jwksURI}\" \\\n    --arg owningTeamID \"$(curl --fail-with-body -sS \"https://${VEN_API_HOST}/v1/teams\" -H \"tppl-api-key: $VEN_API_KEY\" | jq '.teams[0].id' -r)\" \\\n    --argjson applications \"$(curl https://${VEN_API_HOST}/outagedetection/v1/applications --fail-with-body -sSL -H tppl-api-key:\\ ${VEN_API_KEY})\" \\\n    | curl https://${VEN_API_HOST}/v1/serviceaccounts \\\n      -H \"tppl-api-key: $VEN_API_KEY\" \\\n      --fail-with-body \\\n      -sSL --json @-\ndone\n\nkubectl apply -n venafi -f - <<EOF\napiVersion: jetstack.io/v1alpha1\nkind: VenafiConnection\nmetadata:\n  name: venafi-components\nspec:\n  allowReferencesFrom: {}\n  vcp:\n    url: https://${VEN_API_HOST}\n    accessToken:\n    - serviceAccountToken:\n        name: venafi-components\n        audiences:\n        - ${audience}\n    - vcpOAuth:\n        tenantID: ${tenantID}\nEOF\n\nenvsubst <application-team-1.yaml | kubectl apply -f -\nkubectl -n team-1 wait certificate app-0 --for=condition=Ready\n\n# Wait 60s for log message indicating success.\n# Parse logs as JSON using jq to ensure logs are all JSON formatted.\n# Disable pipefail to prevent SIGPIPE (141) errors from tee\n# See https://unix.stackexchange.com/questions/274120/pipe-fail-141-when-piping-output-into-tee-why\nset +o pipefail\nkubectl logs deployments/venafi-kubernetes-agent \\\n        --follow \\\n        --namespace venafi \\\n    | timeout 60 jq 'if .msg | test(\"Data sent successfully\") then . | halt_error(0) end'\nset -o pipefail\n\n# Create a unique TLS Secret and wait for it to appear in the Venafi certificate\n# inventory API. The case conversion is due to macOS' version of uuidgen which\n# prints UUIDs in upper case, but DNS labels need lower case characters.\ncommonname=\"venafi-kubernetes-agent-e2e.$(uuidgen | tr '[:upper:]' '[:lower:]')\"\nopenssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /tmp/tls.key -out /tmp/tls.crt -subj \"/CN=$commonname\"\nkubectl create secret tls \"$commonname\" --cert=/tmp/tls.crt --key=/tmp/tls.key -o yaml --dry-run=client | kubectl apply -f -\n\ngetCertificate() {\n    jq -n '{\n        \"expression\": {\n            \"field\": \"subjectCN\",\n            \"operator\": \"MATCH\",\n            \"value\": $commonname\n        },\n        \"ordering\": {\n            \"orders\": [\n                { \"direction\": \"DESC\", \"field\": \"certificatInstanceModificationDate\" }\n            ]\n        },\n        \"paging\": { \"pageNumber\": 0, \"pageSize\": 10 }\n    }' --arg commonname \"${commonname}\" \\\n    | curl \"https://${VEN_API_HOST}/outagedetection/v1/certificatesearch?excludeSupersededInstances=true&ownershipTree=true\" \\\n         -fsSL \\\n         -H \"tppl-api-key: $VEN_API_KEY\" \\\n         --json @- \\\n    | jq 'if .count == 0 then . | halt_error(1) end'\n}\n\n# Wait 5 minutes for the certificate to appear.\nfor ((i=0;;i++)); do if getCertificate; then exit 0; fi; sleep 30; done | timeout -v -- 5m cat\n"
  },
  {
    "path": "hack/e2e/values.venafi-kubernetes-agent.yaml",
    "content": "config:\n  clusterName: venafi-kubernetes-agent-e2e\n  clusterDescription: |\n      A cluster used for testing the venafi-kubernetes-agent.\n  excludeAnnotationKeysRegex: ['^kapp\\.k14s\\.io/original.*']\n\nauthentication:\n  venafiConnection:\n    enabled: true\n\nextraArgs:\n- --logging-format=json\n- --log-level=4\n"
  },
  {
    "path": "hack/e2e/venafi-components.yaml",
    "content": "apiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: venafi-components\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: venafi-components-create-token\nrules:\n- apiGroups: [ \"\" ]\n  resources: [ \"serviceaccounts/token\" ]\n  verbs: [ \"create\" ]\n  resourceNames: [ \"venafi-components\" ]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: venafi-components-create-token\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: venafi-components-create-token\nsubjects:\n- kind: ServiceAccount\n  name: venafi-connection\n  namespace: venafi\n"
  },
  {
    "path": "hack/ngts/custom_ca.yaml",
    "content": "# These values are used to set a custom CA bundle during the NGTS test.\n# Only used when developing locally, as detected by logic in test-e2e.sh\n\nvolumes:\n- name: custom-ca-volume\n  configMap:\n    name: custom-ca\n\nvolumeMounts:\n- name: custom-ca-volume\n  mountPath: /etc/ssl/certs\n\n"
  },
  {
    "path": "hack/ngts/test-e2e.sh",
    "content": "#!/usr/bin/env bash\n#\n# Build and deploy the discovery-agent Helm chart for NGTS.\n# Wait for the agent to log a message indicating successful data upload.\n#\n# Prerequisites:\n# * kubectl: https://kubernetes.io/docs/tasks/tools/#kubectl\n# * kind: https://kind.sigs.k8s.io/docs/user/quick-start/\n# * helm: https://helm.sh/docs/intro/install/\n# * jq: https://jqlang.github.io/jq/download/\n# * make: https://www.gnu.org/software/make/\n#\n# You can run `make ngts-test-e2e` which will automatically download all\n# prerequisites and then run this script.\n\nset -o nounset\nset -o errexit\nset -o pipefail\n\n# NGTS API configuration\n: ${NGTS_CLIENT_ID?}\n: ${NGTS_PRIVATE_KEY?}\n: ${NGTS_TSG_ID?}\n\n# The base URL of the OCI registry used for Docker images and Helm charts\n# E.g. ttl.sh/7e6ca67c-96dc-4dea-9437-80b0f3a69fb1\n: ${OCI_BASE?}\n\n# The Kubernetes namespace to install into\n: ${NAMESPACE:=ngts}\n\n# Set to true to use an existing cluster, otherwise a new kind cluster will be created.\n# Note: the cluster will not be deleted after the test completes.\n: ${USE_EXISTING_CLUSTER:=false}\n\nscript_dir=$(cd -- \"$(dirname -- \"${BASH_SOURCE[0]}\")\" &>/dev/null && pwd)\nroot_dir=$(cd \"${script_dir}/../..\" && pwd)\nexport TERM=dumb\n\ntmp_dir=\"$(mktemp -d /tmp/jetstack-secure.XXXXX)\"\ntrap 'rm -rf \"${tmp_dir}\"' EXIT\n\npushd \"${tmp_dir}\"\n> release.env\nmake -C \"$root_dir\" ngts-release \\\n     GITHUB_OUTPUT=\"${tmp_dir}/release.env\" \\\n     OCI_SIGN_ON_PUSH=false \\\n     oci_platforms=\"\" \\\n     NGTS_OCI_BASE=\"${OCI_BASE}\"\ncat release.env\nsource release.env\n\nif [[ \"$USE_EXISTING_CLUSTER\" != true ]]; then\n  kind create cluster || true\nfi\n\nkubectl create ns \"$NAMESPACE\" || true\n\nkubectl delete secret discovery-agent-credentials --namespace \"$NAMESPACE\" --ignore-not-found\nkubectl create secret generic discovery-agent-credentials \\\n        --namespace \"$NAMESPACE\" \\\n        --from-literal=clientID=$NGTS_CLIENT_ID \\\n        --from-literal=privatekey.pem=\"$NGTS_PRIVATE_KEY\"\n\n# Create a sample secret in the cluster\nkubectl create secret generic e2e-sample-secret-$(date '+%s') \\\n        --namespace default \\\n        --from-literal=username=${RANDOM}\n\n# Create values.yaml file for the helm chart\ncat > \"${tmp_dir}/values.yaml\" <<EOF\nextraArgs:\n  - \"--log-level=6\"\n\npprof:\n  enabled: true\n\nfullnameOverride: discovery-agent\n\nimageRegistry: ${OCI_BASE}\nimageNamespace: \"\"\n\nimage:\n  digest: ${NGTS_IMAGE_DIGEST}\n\nconfig:\n  clusterName: \"e2e-test-cluster-ngts\"\n  clusterDescription: \"A temporary cluster for E2E testing NGTS\"\n  period: 10s\n  tsgID: \"${NGTS_TSG_ID}\"\n  serverURL: \"https://${NGTS_TSG_ID}.ngts.dev.venafi.io\"\n\npodLabels:\n  \"discovery-agent.ngts/test-id\": \"${RANDOM}\"\nEOF\n\n# Detect running locally on macOS, and if so inject a custom CA bundle to be used\nif [[ \"$OSTYPE\" == \"darwin\"* ]]; then\n  echo \"Detected running on macOS - adding system trust bundle to cluster + updating values.yaml to mount in agent pod\"\n\n  CA_BUNDLE_FILE=${tmp_dir}/system_certs.pem\n\n  (security find-certificate -a -p /System/Library/Keychains/SystemRootCertificates.keychain && \\\n   security find-certificate -a -p /Library/Keychains/System.keychain) >  $CA_BUNDLE_FILE\n\n  kubectl create configmap custom-ca --namespace=\"$NAMESPACE\" --from-file=ca_certs.crt=\"$CA_BUNDLE_FILE\"\n\n  # Need to update values.yaml to add the custom CA bundle\n  custom_ca_yaml=\"${script_dir}/custom_ca.yaml\"\n  yq eval-all '. as $item ireduce ({}; . * $item)' \"${tmp_dir}/values.yaml\" \"$custom_ca_yaml\" > \"${tmp_dir}/values.merged.yaml\"\n  mv \"${tmp_dir}/values.merged.yaml\" \"${tmp_dir}/values.yaml\"\nfi\n\n# We use a non-existent tag and omit the `--version` flag, to work around a Helm\n# v4 bug. See: https://github.com/helm/helm/issues/31600\nhelm upgrade agent \"oci://${NGTS_CHART}:NON_EXISTENT_TAG@${NGTS_CHART_DIGEST}\" \\\n     --install \\\n     --wait \\\n     --create-namespace \\\n     --namespace \"$NAMESPACE\" \\\n     --values \"${tmp_dir}/values.yaml\"\n\nkubectl rollout status deployments/discovery-agent --namespace \"${NAMESPACE}\"\n\n# Wait for log message indicating success.\n# Parse logs as JSON using jq to ensure logs are all JSON formatted.\ntimeout 120 jq -n \\\n        'inputs | if .msg | test(\"Data sent successfully\") then . | halt_error(0) else . end' \\\n        <(kubectl logs deployments/discovery-agent --namespace \"${NAMESPACE}\" --follow)\n\n# Query the Prometheus metrics endpoint to ensure it's working.\nkubectl get pod \\\n        --namespace ${NAMESPACE} \\\n        --selector app.kubernetes.io/name=discovery-agent \\\n        --output jsonpath={.items[*].metadata.name} \\\n    | xargs -I{} kubectl get --raw /api/v1/namespaces/$NAMESPACE/pods/{}:8081/proxy/metrics \\\n    | grep '^process_'\n\n# Query the pprof endpoint to ensure it's working.\nkubectl get pod \\\n        --namespace ${NAMESPACE} \\\n        --selector app.kubernetes.io/name=discovery-agent \\\n        --output jsonpath={.items[*].metadata.name} \\\n    | xargs -I{} kubectl get --raw /api/v1/namespaces/$NAMESPACE/pods/{}:8081/proxy/debug/pprof/cmdline \\\n    | xargs -0\n\n# TODO: should call to SCM and verify that certs are actually uploaded\n"
  },
  {
    "path": "internal/cyberark/api/telemetry.go",
    "content": "package api\n\nimport (\n\t\"encoding/base64\"\n\t\"net/http\"\n\t\"net/url\"\n\n\t\"github.com/jetstack/preflight/pkg/version\"\n)\n\n// Integrations working with the Identity Security Platform, should add metadata\n// in their API calls, to provide insights into how customers utilize each API.\n//\n// - IntegrationName (in): The vendor integration name (required)\n// - IntegrationType (it): Integration Type\t(required)\n// - IntegrationVersion (iv): The plugin version being used (required)\n// - VendorName (vn): Vendor name (required)\n// - VendorVersion (vv): Version of the vendor product in which the plugin is used (if applicable)\n\nconst (\n\t// TelemetryHeaderKey is the name of the HTTP header to use for telemetry\n\tTelemetryHeaderKey = \"X-Cybr-Telemetry\"\n)\n\nvar (\n\ttelemetryValues       url.Values\n\ttelemetryValueEncoded string\n)\n\nfunc init() {\n\ttelemetryValues = url.Values{}\n\ttelemetryValues.Set(\"in\", \"disco-agent\")\n\ttelemetryValues.Set(\"vn\", \"CyberArk\")\n\ttelemetryValues.Set(\"it\", \"KubernetesAgent\")\n\ttelemetryValues.Set(\"iv\", version.PreflightVersion)\n\ttelemetryValueEncoded = base64.URLEncoding.EncodeToString([]byte(telemetryValues.Encode()))\n}\n\n// SetTelemetryRequestHeader adds the x-cybr-telemetry header to the given HTTP\n// request, with information about this integration.\nfunc SetTelemetryRequestHeader(req *http.Request) {\n\treq.Header.Set(TelemetryHeaderKey, telemetryValueEncoded)\n}\n"
  },
  {
    "path": "internal/cyberark/api/telemetry_test.go",
    "content": "package api\n\nimport (\n\t\"encoding/base64\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\n// Test the SetTelemetryRequestHeader function\nfunc TestSetTelemetryRequestHeader(t *testing.T) {\n\t// Create a new HTTP request\n\treq, err := http.NewRequestWithContext(t.Context(), http.MethodGet, \"http://example.com\", nil)\n\trequire.NoError(t, err, \"failed to create HTTP request\")\n\n\t// Call the function to set the telemetry header\n\tSetTelemetryRequestHeader(req)\n\n\tbase64Value := req.Header.Get(TelemetryHeaderKey)\n\t// Check that the header is set\n\trequire.NotEmpty(t, base64Value, \"telemetry header should be set\")\n\n\tqueryString, err := base64.URLEncoding.DecodeString(base64Value)\n\trequire.NoError(t, err, \"failed to decode telemetry header value\")\n\n\tvalues, err := url.ParseQuery(string(queryString))\n\trequire.NoError(t, err, \"failed to parse telemetry header value\")\n\trequire.Equal(t, telemetryValues, values, \"telemetry header value should match expected values\")\n}\n"
  },
  {
    "path": "internal/cyberark/client.go",
    "content": "package cyberark\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"net/http\"\n\t\"os\"\n\n\t\"github.com/jetstack/preflight/internal/cyberark/dataupload\"\n\t\"github.com/jetstack/preflight/internal/cyberark/identity\"\n\t\"github.com/jetstack/preflight/internal/cyberark/servicediscovery\"\n)\n\n// ClientConfig holds the configuration needed to initialize a CyberArk client.\ntype ClientConfig struct {\n\tSubdomain string\n\tUsername  string\n\tSecret    string\n}\n\n// ClientConfigLoader is a function type that loads and returns a ClientConfig.\ntype ClientConfigLoader func() (ClientConfig, error)\n\n// ErrMissingEnvironmentVariables is returned when required environment variables are not set.\nvar ErrMissingEnvironmentVariables = errors.New(\"missing environment variables: ARK_SUBDOMAIN, ARK_USERNAME, ARK_SECRET\")\n\n// LoadClientConfigFromEnvironment loads the CyberArk client configuration from environment variables.\n// It expects the following environment variables to be set:\n// - ARK_SUBDOMAIN: The CyberArk subdomain to use.\n// - ARK_USERNAME: The username for authentication.\n// - ARK_SECRET: The secret for authentication.\nfunc LoadClientConfigFromEnvironment() (ClientConfig, error) {\n\tsubdomain := os.Getenv(\"ARK_SUBDOMAIN\")\n\tusername := os.Getenv(\"ARK_USERNAME\")\n\tsecret := os.Getenv(\"ARK_SECRET\")\n\n\tif subdomain == \"\" || username == \"\" || secret == \"\" {\n\t\treturn ClientConfig{}, ErrMissingEnvironmentVariables\n\t}\n\n\treturn ClientConfig{\n\t\tSubdomain: subdomain,\n\t\tUsername:  username,\n\t\tSecret:    secret,\n\t}, nil\n\n}\n\n// NewDatauploadClient initializes and returns a new CyberArk Data Upload client.\n// It performs service discovery to find the necessary API endpoints and authenticates\n// using the provided client configuration.\nfunc NewDatauploadClient(ctx context.Context, httpClient *http.Client, serviceMap *servicediscovery.Services, tenantUUID string, cfg ClientConfig) (*dataupload.CyberArkClient, error) {\n\tidentityAPI := serviceMap.Identity.API\n\tif identityAPI == \"\" {\n\t\treturn nil, errors.New(\"service discovery returned an empty identity API\")\n\t}\n\n\tdiscoveryAPI := serviceMap.DiscoveryContext.API\n\tif discoveryAPI == \"\" {\n\t\treturn nil, errors.New(\"service discovery returned an empty discovery API\")\n\t}\n\n\tidentityClient := identity.New(httpClient, identityAPI, cfg.Subdomain)\n\n\terr := identityClient.LoginUsernamePassword(ctx, cfg.Username, []byte(cfg.Secret))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn dataupload.New(httpClient, discoveryAPI, tenantUUID, identityClient.AuthenticateRequest), nil\n}\n"
  },
  {
    "path": "internal/cyberark/client_test.go",
    "content": "package cyberark_test\n\nimport (\n\t\"crypto/x509\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/jetstack/venafi-connection-lib/http_client\"\n\t\"github.com/stretchr/testify/require\"\n\t\"k8s.io/klog/v2\"\n\t\"k8s.io/klog/v2/ktesting\"\n\n\t\"github.com/jetstack/preflight/internal/cyberark\"\n\t\"github.com/jetstack/preflight/internal/cyberark/dataupload\"\n\t\"github.com/jetstack/preflight/internal/cyberark/servicediscovery\"\n\tarktesting \"github.com/jetstack/preflight/internal/cyberark/testing\"\n\t\"github.com/jetstack/preflight/pkg/testutil\"\n\t\"github.com/jetstack/preflight/pkg/version\"\n\n\t_ \"k8s.io/klog/v2/ktesting/init\"\n)\n\n// TestCyberArkClient_PutSnapshot_MockAPI demonstrates that NewDatauploadClient works with the mock API.\nfunc TestCyberArkClient_PutSnapshot_MockAPI(t *testing.T) {\n\tlogger := ktesting.NewLogger(t, ktesting.DefaultConfig)\n\tctx := klog.NewContext(t.Context(), logger)\n\n\thttpClient := testutil.FakeCyberArk(t)\n\n\tcfg := cyberark.ClientConfig{\n\t\tSubdomain: servicediscovery.MockDiscoverySubdomain,\n\t\tUsername:  \"test@example.com\",\n\t\tSecret:    \"somepassword\",\n\t}\n\n\tdiscoveryClient := servicediscovery.New(httpClient, cfg.Subdomain)\n\n\tserviceMap, tenantUUID, err := discoveryClient.DiscoverServices(t.Context())\n\tif err != nil {\n\t\tt.Fatalf(\"failed to discover mock services: %v\", err)\n\t}\n\n\tcl, err := cyberark.NewDatauploadClient(ctx, httpClient, serviceMap, tenantUUID, cfg)\n\trequire.NoError(t, err)\n\n\terr = cl.PutSnapshot(ctx, dataupload.Snapshot{\n\t\tClusterID:    \"ffffffff-ffff-ffff-ffff-ffffffffffff\",\n\t\tAgentVersion: version.PreflightVersion,\n\t})\n\n\trequire.NoError(t, err)\n}\n\n// TestCyberArkClient_PutSnapshot_RealAPI demonstrates that NewDatauploadClient works with the real inventory API.\n//\n// An API token is obtained by authenticating with the ARK_USERNAME and ARK_SECRET from the environment.\n// ARK_SUBDOMAIN should be your tenant subdomain.\n//\n// To test against a tenant on the integration platform, also set:\n// ARK_DISCOVERY_API=https://platform-discovery.integration-cyberark.cloud/\n//\n// To enable verbose request logging:\n//\n//\tgo test ./internal/cyberark \\\n//\t  -v -count 1 -run TestCyberArkClient_PutSnapshot_RealAPI -args -testing.v 6\nfunc TestCyberArkClient_PutSnapshot_RealAPI(t *testing.T) {\n\tif strings.ToLower(os.Getenv(\"ARK_LIVE_TEST\")) != \"true\" {\n\t\tt.Skip(\"set ARK_LIVE_TEST=true to run this test against the live service\")\n\t\treturn\n\t}\n\n\tarktesting.SkipIfNoEnv(t)\n\n\tt.Log(\"This test runs against a live service and has been known to flake. If you see timeout issues it's possible that the test is flaking and it could be unrelated to your changes.\")\n\n\tlogger := ktesting.NewLogger(t, ktesting.DefaultConfig)\n\tctx := klog.NewContext(t.Context(), logger)\n\n\tvar rootCAs *x509.CertPool\n\thttpClient := http_client.NewDefaultClient(version.UserAgent(), rootCAs)\n\n\tcfg, err := cyberark.LoadClientConfigFromEnvironment()\n\trequire.NoError(t, err)\n\n\tdiscoveryClient := servicediscovery.New(httpClient, cfg.Subdomain)\n\n\tserviceMap, tenantUUID, err := discoveryClient.DiscoverServices(t.Context())\n\tif err != nil {\n\t\tt.Fatalf(\"failed to discover services: %v\", err)\n\t}\n\n\tcl, err := cyberark.NewDatauploadClient(ctx, httpClient, serviceMap, tenantUUID, cfg)\n\trequire.NoError(t, err)\n\n\terr = cl.PutSnapshot(ctx, dataupload.Snapshot{\n\t\tClusterID:    \"ffffffff-ffff-ffff-ffff-ffffffffffff\",\n\t\tAgentVersion: version.PreflightVersion,\n\t})\n\n\trequire.NoError(t, err)\n}\n"
  },
  {
    "path": "internal/cyberark/dataupload/dataupload.go",
    "content": "package dataupload\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/sha256\"\n\t\"encoding/base64\"\n\t\"encoding/hex\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\n\tarkapi \"github.com/jetstack/preflight/internal/cyberark/api\"\n\t\"github.com/jetstack/preflight/internal/cyberark/identity\"\n\t\"github.com/jetstack/preflight/pkg/version\"\n)\n\nconst (\n\t// maxRetrievePresignedUploadURLBodySize is the maximum allowed size for a response body from the\n\t// Retrieve Presigned Upload URL service.\n\tmaxRetrievePresignedUploadURLBodySize = 10 * 1024\n\n\t// apiPathSnapshotLinks is the URL path of the snapshot-links endpoint of the inventory API.\n\t// This endpoint returns an AWS presigned URL.\n\t// TODO(wallrj): Link to CyberArk API documentation when it is published.\n\tapiPathSnapshotLinks = \"/ingestions/kubernetes/snapshot-links\"\n)\n\ntype CyberArkClient struct {\n\tbaseURL    string\n\thttpClient *http.Client\n\n\ttenantUUID string\n\n\tauthenticateRequest identity.RequestAuthenticator\n}\n\n// New creates a new CyberArkClient. The tenant UUID is best sourced from service discovery along with the base URL.\nfunc New(httpClient *http.Client, baseURL string, tenantUUID string, authenticateRequest identity.RequestAuthenticator) *CyberArkClient {\n\treturn &CyberArkClient{\n\t\tbaseURL:    baseURL,\n\t\thttpClient: httpClient,\n\n\t\ttenantUUID: tenantUUID,\n\n\t\tauthenticateRequest: authenticateRequest,\n\t}\n}\n\n// Snapshot is the JSON that the CyberArk Discovery and Context API expects to\n// be uploaded to the AWS presigned URL.\ntype Snapshot struct {\n\t// AgentVersion is the version of the Venafi Kubernetes Agent which is uploading this snapshot.\n\tAgentVersion string `json:\"agent_version\"`\n\t// ClusterID is the unique ID of the Kubernetes cluster which this snapshot was taken from.\n\tClusterID string `json:\"cluster_id\"`\n\t// ClusterName is the name of the Kubernetes cluster which this snapshot was taken from.\n\tClusterName string `json:\"cluster_name\"`\n\t// ClusterDescription is an optional description of the Kubernetes cluster which this snapshot was taken from.\n\tClusterDescription string `json:\"cluster_description,omitempty\"`\n\t// K8SVersion is the version of Kubernetes which the cluster is running.\n\tK8SVersion string `json:\"k8s_version\"`\n\t// OIDCConfig contains OIDC configuration data from the API server's\n\t// `/.well-known/openid-configuration` endpoint\n\tOIDCConfig map[string]any `json:\"openid_configuration,omitempty\"`\n\t// OIDCConfigError contains any error encountered while fetching the OIDC configuration\n\tOIDCConfigError string `json:\"openid_configuration_error,omitempty\"`\n\t// JWKS contains JWKS data from the API server's `/openid/v1/jwks` endpoint\n\tJWKS map[string]any `json:\"jwks,omitempty\"`\n\t// JWKSError contains any error encountered while fetching the JWKS\n\tJWKSError string `json:\"jwks_error,omitempty\"`\n\t// Secrets is a list of Secret resources in the cluster. Not all Secret\n\t// types are included and only a subset of the Secret data is included.\n\tSecrets []runtime.Object `json:\"secrets\"`\n\t// ServiceAccounts is a list of ServiceAccount resources in the cluster.\n\tServiceAccounts []runtime.Object `json:\"serviceaccounts\"`\n\t// ConfigMaps is a list of ConfigMap resources in the cluster.\n\tConfigMaps []runtime.Object `json:\"configmaps\"`\n\t// ExternalSecrets is a list of ExternalSecret resources in the cluster.\n\tExternalSecrets []runtime.Object `json:\"externalsecrets\"`\n\t// SecretStores is a list of SecretStore resources in the cluster.\n\tSecretStores []runtime.Object `json:\"secretstores\"`\n\t// ClusterExternalSecrets is a list of ClusterExternalSecret resources in the cluster.\n\tClusterExternalSecrets []runtime.Object `json:\"clusterexternalsecrets\"`\n\t// ClusterSecretStores is a list of ClusterSecretStore resources in the cluster.\n\tClusterSecretStores []runtime.Object `json:\"clustersecretstores\"`\n\t// Roles is a list of Role resources in the cluster.\n\tRoles []runtime.Object `json:\"roles\"`\n\t// ClusterRoles is a list of ClusterRole resources in the cluster.\n\tClusterRoles []runtime.Object `json:\"clusterroles\"`\n\t// RoleBindings is a list of RoleBinding resources in the cluster.\n\tRoleBindings []runtime.Object `json:\"rolebindings\"`\n\t// ClusterRoleBindings is a list of ClusterRoleBinding resources in the cluster.\n\tClusterRoleBindings []runtime.Object `json:\"clusterrolebindings\"`\n\t// Jobs is a list of Job resources in the cluster.\n\tJobs []runtime.Object `json:\"jobs\"`\n\t// CronJobs is a list of CronJob resources in the cluster.\n\tCronJobs []runtime.Object `json:\"cronjobs\"`\n\t// Deployments is a list of Deployment resources in the cluster.\n\tDeployments []runtime.Object `json:\"deployments\"`\n\t// Statefulsets is a list of StatefulSet resources in the cluster.\n\tStatefulsets []runtime.Object `json:\"statefulsets\"`\n\t// Daemonsets is a list of DaemonSet resources in the cluster.\n\tDaemonsets []runtime.Object `json:\"daemonsets\"`\n\t// Pods is a list of Pod resources in the cluster.\n\tPods []runtime.Object `json:\"pods\"`\n}\n\n// PutSnapshot PUTs the supplied snapshot to an [AWS presigned URL] which it obtains via the CyberArk inventory API.\n// [AWS presigned URL]: https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html\n//\n// A SHA256 checksum header is included in the request, to verify that the payload\n// has been received intact.\n// Read [Checking object integrity for data uploads in Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity-upload.html),\n// to learn more.\nfunc (c *CyberArkClient) PutSnapshot(ctx context.Context, snapshot Snapshot) error {\n\tif snapshot.ClusterID == \"\" {\n\t\treturn fmt.Errorf(\"programmer mistake: the snapshot cluster ID cannot be left empty\")\n\t}\n\n\tencodedBody := &bytes.Buffer{}\n\thash := sha256.New()\n\tif err := json.NewEncoder(io.MultiWriter(encodedBody, hash)).Encode(snapshot); err != nil {\n\t\treturn err\n\t}\n\n\tchecksum := hash.Sum(nil)\n\tchecksumHex := hex.EncodeToString(checksum)\n\tchecksumBase64 := base64.StdEncoding.EncodeToString(checksum)\n\n\tpresignedUploadURL, username, err := c.retrievePresignedUploadURL(ctx, checksumHex, snapshot.ClusterID, int64(encodedBody.Len()))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"while retrieving snapshot upload URL: %s\", err)\n\t}\n\n\t// The snapshot-links endpoint returns an AWS presigned URL which only supports the PUT verb.\n\treq, err := http.NewRequestWithContext(ctx, http.MethodPut, presignedUploadURL, encodedBody)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq.Header.Set(\"X-Amz-Checksum-Sha256\", checksumBase64)\n\treq.Header.Set(\"X-Amz-Server-Side-Encryption\", \"AES256\")\n\n\tq := url.Values{}\n\n\tq.Add(\"agent_version\", snapshot.AgentVersion)\n\tq.Add(\"tenant_id\", c.tenantUUID)\n\tq.Add(\"upload_type\", \"k8s_snapshot\")\n\tq.Add(\"uploader_id\", snapshot.ClusterID)\n\tq.Add(\"username\", username)\n\tq.Add(\"vendor\", \"k8s\")\n\n\treq.Header.Set(\"X-Amz-Tagging\", q.Encode())\n\n\tversion.SetUserAgent(req)\n\n\tres, err := c.httpClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\n\tif code := res.StatusCode; code < 200 || code >= 300 {\n\t\tbody, _ := io.ReadAll(io.LimitReader(res.Body, 500))\n\t\tif len(body) == 0 {\n\t\t\tbody = []byte(`<empty body>`)\n\t\t}\n\t\treturn fmt.Errorf(\"received response with status code %d: %s\", code, bytes.TrimSpace(body))\n\t}\n\n\treturn nil\n}\n\nconst SigV4Support = \"sigv4\"\n\n// RetrievePresignedUploadURLRequest is the JSON body sent to the inventory API to request a presigned upload URL.\ntype RetrievePresignedUploadURLRequest struct {\n\tClusterID string `json:\"cluster_id\"`\n\tChecksum  string `json:\"checksum_sha256\"`\n\n\t// AgentVersion is the v-prefixed version of the agent uploading the snapshot.\n\t// Note that some versions of the backend rely on this version being v-prefixed semver,\n\t// but that requirement was dropped in favour of the SigV4Support field below.\n\tAgentVersion string `json:\"agent_version\"`\n\n\t// FileSize is the size of the data we'll upload in bytes\n\tFileSize int64 `json:\"file_size\"`\n\n\t// SignatureVersion allows the agent to specify which version of AWS's signature scheme it expects for the presigned URL.\n\t// Older versions of the agent will not send this. All versions which support this field will unconditionally set it to the\n\t// value of SigV4Support, so the backend can rely on this field being set.\n\tSignatureVersion string `json:\"signature_version\"`\n}\n\nfunc (c *CyberArkClient) retrievePresignedUploadURL(ctx context.Context, checksum string, clusterID string, fileSize int64) (string, string, error) {\n\tuploadURL, err := url.JoinPath(c.baseURL, apiPathSnapshotLinks)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\trequest := RetrievePresignedUploadURLRequest{\n\t\tClusterID:        clusterID,\n\t\tChecksum:         checksum,\n\t\tAgentVersion:     version.PreflightVersion,\n\t\tFileSize:         fileSize,\n\t\tSignatureVersion: SigV4Support,\n\t}\n\n\tencodedBody := &bytes.Buffer{}\n\tif err := json.NewEncoder(encodedBody).Encode(request); err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\treq, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadURL, encodedBody)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\n\tusername, err := c.authenticateRequest(req)\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"failed to authenticate request: %s\", err)\n\t}\n\n\tversion.SetUserAgent(req)\n\n\t// Add telemetry headers\n\tarkapi.SetTelemetryRequestHeader(req)\n\n\tres, err := c.httpClient.Do(req)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tdefer res.Body.Close()\n\n\tif code := res.StatusCode; code < 200 || code >= 300 {\n\t\tbody, _ := io.ReadAll(io.LimitReader(res.Body, 500))\n\t\tif len(body) == 0 {\n\t\t\tbody = []byte(`<empty body>`)\n\t\t}\n\t\treturn \"\", \"\", fmt.Errorf(\"received response with status code %d: %s\", code, bytes.TrimSpace(body))\n\t}\n\n\tresponse := struct {\n\t\tURL string `json:\"url\"`\n\t}{}\n\n\tif err := json.NewDecoder(io.LimitReader(res.Body, maxRetrievePresignedUploadURLBodySize)).Decode(&response); err != nil {\n\t\tif err == io.ErrUnexpectedEOF {\n\t\t\treturn \"\", \"\", fmt.Errorf(\"rejecting JSON response from server as it was too large or was truncated\")\n\t\t}\n\n\t\treturn \"\", \"\", fmt.Errorf(\"failed to parse JSON from otherwise successful request to start data upload: %s\", err)\n\t}\n\n\treturn response.URL, username, nil\n}\n"
  },
  {
    "path": "internal/cyberark/dataupload/dataupload_test.go",
    "content": "package dataupload_test\n\nimport (\n\t\"fmt\"\n\t\"net/http\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n\t\"k8s.io/klog/v2\"\n\t\"k8s.io/klog/v2/ktesting\"\n\n\t\"github.com/jetstack/preflight/internal/cyberark/dataupload\"\n\t\"github.com/jetstack/preflight/internal/cyberark/identity\"\n\t\"github.com/jetstack/preflight/pkg/version\"\n\n\t_ \"k8s.io/klog/v2/ktesting/init\"\n)\n\n// TestCyberArkClient_PutSnapshot_MockAPI tests the dataupload code against a\n// mock API server. The mock server is configured to return different responses\n// based on the cluster ID and bearer token used in the request.\nfunc TestCyberArkClient_PutSnapshot_MockAPI(t *testing.T) {\n\tsetToken := func(token string) identity.RequestAuthenticator {\n\t\treturn func(req *http.Request) (string, error) {\n\t\t\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", token))\n\t\t\treturn \"foo@example.com\", nil // set a dummy username for testing purposes; the actual value is not important for these tests\n\t\t}\n\t}\n\n\ttests := []struct {\n\t\tname         string\n\t\tsnapshot     dataupload.Snapshot\n\t\tauthenticate identity.RequestAuthenticator\n\t\trequireFn    func(t *testing.T, err error)\n\t}{\n\t\t{\n\t\t\tname: \"successful upload\",\n\t\t\tsnapshot: dataupload.Snapshot{\n\t\t\t\tClusterID:    \"ffffffff-ffff-ffff-ffff-ffffffffffff\",\n\t\t\t\tAgentVersion: version.PreflightVersion,\n\t\t\t},\n\t\t\tauthenticate: setToken(\"success-token\"),\n\t\t\trequireFn: func(t *testing.T, err error) {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"error when cluster ID is empty\",\n\t\t\tsnapshot: dataupload.Snapshot{\n\t\t\t\tClusterID:    \"\",\n\t\t\t\tAgentVersion: \"test-version\",\n\t\t\t},\n\t\t\tauthenticate: setToken(\"success-token\"),\n\t\t\trequireFn: func(t *testing.T, err error) {\n\t\t\t\trequire.ErrorContains(t, err, \"programmer mistake: the snapshot cluster ID cannot be left empty\")\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"error when bearer token is incorrect\",\n\t\t\tsnapshot: dataupload.Snapshot{\n\t\t\t\tClusterID:    \"test\",\n\t\t\t\tAgentVersion: \"test-version\",\n\t\t\t},\n\t\t\tauthenticate: setToken(\"fail-token\"),\n\t\t\trequireFn: func(t *testing.T, err error) {\n\t\t\t\trequire.ErrorContains(t, err, \"while retrieving snapshot upload URL: received response with status code 500: should authenticate using the correct bearer token\")\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"invalid JSON from server (RetrievePresignedUploadURL step)\",\n\t\t\tsnapshot: dataupload.Snapshot{\n\t\t\t\tClusterID:    \"invalid-json-retrieve-presigned\",\n\t\t\t\tAgentVersion: \"test-version\",\n\t\t\t},\n\t\t\tauthenticate: setToken(\"success-token\"),\n\t\t\trequireFn: func(t *testing.T, err error) {\n\t\t\t\trequire.ErrorContains(t, err, \"while retrieving snapshot upload URL: rejecting JSON response from server as it was too large or was truncated\")\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"500 from server (RetrievePresignedUploadURL step)\",\n\t\t\tsnapshot: dataupload.Snapshot{\n\t\t\t\tClusterID:    \"invalid-response-post-data\",\n\t\t\t\tAgentVersion: \"test-version\",\n\t\t\t},\n\t\t\tauthenticate: setToken(\"success-token\"),\n\t\t\trequireFn: func(t *testing.T, err error) {\n\t\t\t\trequire.ErrorContains(t, err, \"while retrieving snapshot upload URL: received response with status code 500: mock error\")\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tlogger := ktesting.NewLogger(t, ktesting.DefaultConfig)\n\t\t\tctx := klog.NewContext(t.Context(), logger)\n\n\t\t\tdatauploadAPIBaseURL, httpClient := dataupload.MockDataUploadServer(t)\n\n\t\t\tcyberArkClient := dataupload.New(httpClient, datauploadAPIBaseURL, \"test-tenant-uuid\", tc.authenticate)\n\n\t\t\terr := cyberArkClient.PutSnapshot(ctx, tc.snapshot)\n\t\t\ttc.requireFn(t, err)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "internal/cyberark/dataupload/mock.go",
    "content": "package dataupload\n\nimport (\n\t\"bytes\"\n\t\"crypto/rand\"\n\t\"crypto/sha256\"\n\t\"encoding/base64\"\n\t\"encoding/hex\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"k8s.io/client-go/transport\"\n\n\tarkapi \"github.com/jetstack/preflight/internal/cyberark/api\"\n\t\"github.com/jetstack/preflight/pkg/version\"\n)\n\nconst (\n\tsuccessBearerToken = \"success-token\"\n\n\tsuccessClusterID = \"ffffffff-ffff-ffff-ffff-ffffffffffff\"\n)\n\ntype uploadValues struct {\n\tClusterID string\n\tFileSize  int64\n}\n\ntype mockDataUploadServer struct {\n\tt         testing.TB\n\tserverURL string\n\n\tmux *http.ServeMux\n\n\texpectedUploadValues      map[string]uploadValues\n\texpectedUploadValuesMutex sync.Mutex\n}\n\n// MockDataUploadServer starts a server which mocks the CyberArk\n// Discovery and Context API, and an HTTP client with the CA certs needed to\n// connect to it.\n//\n// The returned URL can be supplied to the `dataupload.New` function as the base\n// URL for the discoverycontext API.\n//\n// The returned HTTP client has a transport which logs requests and responses\n// depending on log level of the logger supplied in the context.\n//\n// The mock server will return a successful response when the cluster ID matches\n// successClusterID. Other cluster IDs can be used to trigger various failure\n// responses.\nfunc MockDataUploadServer(t testing.TB) (string, *http.Client) {\n\tmux := http.NewServeMux()\n\tmds := &mockDataUploadServer{\n\t\tt: t,\n\n\t\texpectedUploadValues: make(map[string]uploadValues),\n\t}\n\n\tmux.HandleFunc(\"POST \"+apiPathSnapshotLinks, mds.handleSnapshotLinks)\n\n\t// The path includes random data to ensure that each request is treated separately by the mock server, allowing us to track data across calls.\n\t// It also ensures that the client isn't using some pre-saved path and is actually using the presigned URL returned by the mock server in the previous step, which is important for test validity.\n\tmux.HandleFunc(\"PUT /presigned-upload/{randData}\", mds.handlePresignedUpload)\n\n\tserver := httptest.NewTLSServer(mds)\n\tt.Cleanup(server.Close)\n\n\tmds.mux = mux\n\tmds.serverURL = server.URL\n\n\thttpClient := server.Client()\n\thttpClient.Transport = transport.NewDebuggingRoundTripper(httpClient.Transport, transport.DebugByContext)\n\treturn server.URL, httpClient\n}\n\nfunc (mds *mockDataUploadServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tmds.t.Log(r.Method, r.RequestURI)\n\n\tmds.mux.ServeHTTP(w, r)\n}\n\n// randHex reads 8 random bytes and returns them as a hex string. It is used to generate\n// unique paths per-request to ensure that file size is tracked across calls.\nfunc randHex() string {\n\tb := make([]byte, 8)\n\t_, err := rand.Read(b)\n\tif err != nil {\n\t\tpanic(\"failed to read random bytes: \" + err.Error())\n\t}\n\n\treturn hex.EncodeToString(b)\n}\n\nfunc (mds *mockDataUploadServer) handleSnapshotLinks(w http.ResponseWriter, r *http.Request) {\n\tif r.Header.Get(\"User-Agent\") != version.UserAgent() {\n\t\thttp.Error(w, \"should set user agent on all requests\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif r.Header.Get(arkapi.TelemetryHeaderKey) == \"\" {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_, _ = w.Write([]byte(\"should set telemetry header on all requests\"))\n\t\treturn\n\t}\n\n\tif r.Header.Get(\"Content-Type\") != \"application/json\" {\n\t\thttp.Error(w, \"should send JSON on all requests\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif r.Header.Get(\"Authorization\") != \"Bearer \"+successBearerToken {\n\t\thttp.Error(w, \"should authenticate using the correct bearer token\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tvar req RetrievePresignedUploadURLRequest\n\n\tdecoder := json.NewDecoder(r.Body)\n\tdecoder.DisallowUnknownFields()\n\n\tif err := decoder.Decode(&req); err != nil {\n\t\thttp.Error(w, `{\"error\": \"Invalid request format\"}`, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif req.SignatureVersion != SigV4Support {\n\t\thttp.Error(w, fmt.Sprintf(\"post body does not set signature_version=%s\", SigV4Support), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif req.AgentVersion != version.PreflightVersion {\n\t\thttp.Error(w, fmt.Sprintf(\"post body contains unexpected agent version: %s\", req.AgentVersion), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// Simulate invalid JSON response for RetrievePresignedUploadURL step\n\tif req.ClusterID == \"invalid-json-retrieve-presigned\" {\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t_, _ = w.Write([]byte(`{\"url\":`)) // invalid JSON\n\t\treturn\n\t}\n\n\t// Simulate invalid JSON response for RetrievePresignedUploadURL step\n\tif req.ClusterID == \"invalid-response-post-data\" {\n\t\thttp.Error(w, \"mock error\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif req.ClusterID != successClusterID {\n\t\thttp.Error(w, \"post body contains cluster ID\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif req.FileSize <= 0 {\n\t\thttp.Error(w, \"file size must be greater than 0\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\trandomData := randHex()\n\n\tmds.expectedUploadValuesMutex.Lock()\n\tdefer mds.expectedUploadValuesMutex.Unlock()\n\n\tuploadValues := uploadValues{\n\t\tClusterID: req.ClusterID,\n\t\tFileSize:  req.FileSize,\n\t}\n\n\tmds.expectedUploadValues[randomData] = uploadValues\n\n\tpresignedURL, err := url.JoinPath(mds.serverURL, \"presigned-upload\", randomData)\n\tif err != nil {\n\t\thttp.Error(w, \"failed to generate presigned URL\", http.StatusInternalServerError)\n\t\tmds.t.Logf(\"failed to generate presigned URL: %v\", err)\n\t\treturn\n\t}\n\n\t// Write response body\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\t_ = json.NewEncoder(w).Encode(struct {\n\t\tURL string `json:\"url\"`\n\t}{presignedURL})\n}\n\n// An example of a real checksum mismatch error from the AWS API when the\n// request body does not match the checksum in the request header.\nconst amzExampleChecksumError = `<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Error>\n  <Code>BadDigest</Code>\n  <Message>The SHA256 you specified did not match the calculated checksum.</Message>\n  <RequestId>THR2V1RX700Z8SC7</RequestId>\n  <HostId>F0xSC0H93Xs0BlCx6RjasZgrtjNkNB7lF4+yz1AiPQHswpdEoqj3iTgEN8SUWgV2Qm/laPobVIMz9SYTNHqdoA==</HostId>\n</Error>`\n\nfunc (mds *mockDataUploadServer) handlePresignedUpload(w http.ResponseWriter, r *http.Request) {\n\trandData := r.PathValue(\"randData\")\n\tif randData == \"\" {\n\t\thttp.Error(w, \"missing randData in path; should match that returned in presigned url\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tmds.expectedUploadValuesMutex.Lock()\n\tuploadValues, ok := mds.expectedUploadValues[randData]\n\tmds.expectedUploadValuesMutex.Unlock()\n\n\tif !ok {\n\t\thttp.Error(w, \"didn't find a prior call to generate presigned URL\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif r.Header.Get(\"User-Agent\") != version.UserAgent() {\n\t\thttp.Error(w, \"should set user agent on all requests\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif r.Header.Get(arkapi.TelemetryHeaderKey) != \"\" {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_, _ = w.Write([]byte(\"should NOT set telemetry header on requests to presigned URL\"))\n\t\treturn\n\t}\n\n\tamzChecksum := r.Header.Get(\"X-Amz-Checksum-Sha256\")\n\tif amzChecksum == \"\" {\n\t\thttp.Error(w, \"should set x-amz-checksum-sha256 header on all requests\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tsseHeader := r.Header.Get(\"X-Amz-Server-Side-Encryption\")\n\tif sseHeader != \"AES256\" {\n\t\thttp.Error(w, \"should set x-amz-server-side-encryption header to AES256 on all requests\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\ttaggingHeader := r.Header.Get(\"X-Amz-Tagging\")\n\tif taggingHeader == \"\" {\n\t\thttp.Error(w, \"should set x-amz-tagging header on all requests\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\ttags, err := url.ParseQuery(taggingHeader)\n\tif err != nil {\n\t\thttp.Error(w, \"x-amz-tagging header should be encoded as a valid query string\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif tags.Get(\"agent_version\") != version.PreflightVersion {\n\t\thttp.Error(w, fmt.Sprintf(\"x-amz-tagging should contain an agent_version tag with value %s\", version.PreflightVersion), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif tags.Get(\"tenant_id\") == \"\" {\n\t\t// TODO: if we change setup a bit, we can check the tenant_id matches the expected tenant_id from the test config, but for now, just check it's set\n\t\thttp.Error(w, \"x-amz-tagging should contain a tenant_id tag\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif tags.Get(\"upload_type\") != \"k8s_snapshot\" {\n\t\thttp.Error(w, \"x-amz-tagging should contain an upload_type tag with value k8s_snapshot\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif tags.Get(\"uploader_id\") != uploadValues.ClusterID {\n\t\thttp.Error(w, \"x-amz-tagging should contain an uploader_id tag which matches the cluster ID sent in the RetrievePresignedUploadURL request\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif tags.Get(\"username\") == \"\" {\n\t\t// TODO: if we change setup a bit, we can check the username matches the expected username from the test config\n\t\t// but for now, just check it's set\n\t\thttp.Error(w, \"x-amz-tagging should contain a username tag\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif tags.Get(\"vendor\") != \"k8s\" {\n\t\thttp.Error(w, \"x-amz-tagging should contain a vendor tag with value k8s\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tbody, err := io.ReadAll(r.Body)\n\trequire.NoError(mds.t, err)\n\n\tif uploadValues.FileSize != int64(len(body)) {\n\t\thttp.Error(w, fmt.Sprintf(\"file size in request body should match that sent in RetrievePresignedUploadURL request; expected %d, got %d\", uploadValues.FileSize, len(body)), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\thash := sha256.New()\n\t_, err = hash.Write(body)\n\trequire.NoError(mds.t, err)\n\n\t// AWS S3 responds with a BadDigest error if the request body has a\n\t// different checksum than the checksum supplied in the request header.\n\tif amzChecksum != base64.StdEncoding.EncodeToString(hash.Sum(nil)) {\n\t\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\t\thttp.Error(w, amzExampleChecksumError, http.StatusBadRequest)\n\t}\n\n\t// Verifies that the new Snapshot format is used in the request body.\n\tvar snapshot Snapshot\n\td := json.NewDecoder(bytes.NewBuffer(body))\n\td.DisallowUnknownFields()\n\terr = d.Decode(&snapshot)\n\trequire.NoError(mds.t, err)\n\tassert.Equal(mds.t, successClusterID, snapshot.ClusterID)\n\tassert.Equal(mds.t, version.PreflightVersion, snapshot.AgentVersion)\n\n\t// AWS S3 responds with an empty body if the PUT succeeds\n\tw.WriteHeader(http.StatusOK)\n}\n"
  },
  {
    "path": "internal/cyberark/identity/advance_authentication_test.go",
    "content": "package identity\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"k8s.io/klog/v2\"\n\t\"k8s.io/klog/v2/ktesting\"\n\n\t\"github.com/jetstack/preflight/internal/cyberark/servicediscovery\"\n\n\t_ \"k8s.io/klog/v2/ktesting/init\"\n)\n\nfunc Test_IdentityAdvanceAuthentication(t *testing.T) {\n\ttests := map[string]struct {\n\t\tusername    string\n\t\tpassword    []byte\n\t\tadvanceBody advanceAuthenticationRequestBody\n\n\t\texpectedError error\n\t}{\n\t\t\"success\": {\n\t\t\tusername: successUser,\n\t\t\tpassword: []byte(successPassword),\n\t\t\tadvanceBody: advanceAuthenticationRequestBody{\n\t\t\t\tAction:          ActionAnswer,\n\t\t\t\tMechanismID:     successMechanismID,\n\t\t\t\tSessionID:       successSessionID,\n\t\t\t\tTenantID:        \"foo\",\n\t\t\t\tPersistentLogin: true,\n\t\t\t},\n\n\t\t\texpectedError: nil,\n\t\t},\n\t\t\"incorrect password\": {\n\t\t\tusername: successUser,\n\t\t\tpassword: []byte(\"foo\"),\n\t\t\tadvanceBody: advanceAuthenticationRequestBody{\n\t\t\t\tAction:          ActionAnswer,\n\t\t\t\tMechanismID:     successMechanismID,\n\t\t\t\tSessionID:       successSessionID,\n\t\t\t\tTenantID:        \"foo\",\n\t\t\t\tPersistentLogin: true,\n\t\t\t},\n\n\t\t\texpectedError: fmt.Errorf(`got a failure response from request to advance authentication: message=\"Authentication (login or challenge) has failed. Please try again or contact your system administrator.\", error=\"aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee:55555555555555555555555555555555\"`),\n\t\t},\n\t\t\"bad action\": {\n\t\t\tusername: successUser,\n\t\t\tpassword: []byte(successPassword),\n\t\t\tadvanceBody: advanceAuthenticationRequestBody{\n\t\t\t\tAction:          \"foo\",\n\t\t\t\tMechanismID:     successMechanismID,\n\t\t\t\tSessionID:       successSessionID,\n\t\t\t\tTenantID:        \"foo\",\n\t\t\t\tPersistentLogin: true,\n\t\t\t},\n\n\t\t\texpectedError: fmt.Errorf(`got a failure response from request to advance authentication: message=\"Authentication (login or challenge) has failed. Please try again or contact your system administrator.\", error=\"aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee:55555555555555555555555555555555\"`),\n\t\t},\n\t\t\"bad mechanism id\": {\n\t\t\tusername: successUser,\n\t\t\tpassword: []byte(successPassword),\n\t\t\tadvanceBody: advanceAuthenticationRequestBody{\n\t\t\t\tAction:          ActionAnswer,\n\t\t\t\tMechanismID:     \"foo\",\n\t\t\t\tSessionID:       successSessionID,\n\t\t\t\tTenantID:        \"foo\",\n\t\t\t\tPersistentLogin: true,\n\t\t\t},\n\n\t\t\texpectedError: fmt.Errorf(`got a failure response from request to advance authentication: message=\"Authentication (login or challenge) has failed. Please try again or contact your system administrator.\", error=\"aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee:55555555555555555555555555555555\"`),\n\t\t},\n\t\t\"bad session id\": {\n\t\t\tusername: successUser,\n\t\t\tpassword: []byte(successPassword),\n\t\t\tadvanceBody: advanceAuthenticationRequestBody{\n\t\t\t\tAction:          ActionAnswer,\n\t\t\t\tMechanismID:     successMechanismID,\n\t\t\t\tSessionID:       \"foo\",\n\t\t\t\tTenantID:        \"foo\",\n\t\t\t\tPersistentLogin: true,\n\t\t\t},\n\n\t\t\texpectedError: fmt.Errorf(`got a failure response from request to advance authentication: message=\"Authentication (login or challenge) has failed. Please try again or contact your system administrator.\", error=\"aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee:55555555555555555555555555555555\"`),\n\t\t},\n\t\t\"persistent login not set\": {\n\t\t\tusername: successUser,\n\t\t\tpassword: []byte(successPassword),\n\t\t\tadvanceBody: advanceAuthenticationRequestBody{\n\t\t\t\tAction:          ActionAnswer,\n\t\t\t\tMechanismID:     successMechanismID,\n\t\t\t\tSessionID:       successSessionID,\n\t\t\t\tTenantID:        \"foo\",\n\t\t\t\tPersistentLogin: false,\n\t\t\t},\n\n\t\t\texpectedError: fmt.Errorf(\"got unexpected status code 403 Forbidden from request to advance authentication in CyberArk Identity API\"),\n\t\t},\n\t}\n\n\tfor name, testSpec := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tlogger := ktesting.NewLogger(t, ktesting.DefaultConfig)\n\t\t\tctx := klog.NewContext(t.Context(), logger)\n\n\t\t\tidentityAPI, httpClient := MockIdentityServer(t)\n\n\t\t\tclient := New(httpClient, identityAPI, servicediscovery.MockDiscoverySubdomain)\n\n\t\t\terr := client.doAdvanceAuthentication(ctx, testSpec.username, &testSpec.password, testSpec.advanceBody)\n\t\t\tif testSpec.expectedError != err {\n\t\t\t\tif testSpec.expectedError == nil {\n\t\t\t\t\tt.Errorf(\"didn't expect an error but got %v\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif err == nil {\n\t\t\t\t\tt.Errorf(\"expected no error but got err=%v\", testSpec.expectedError)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif err.Error() != testSpec.expectedError.Error() {\n\t\t\t\t\tt.Errorf(\"expected err=%v\\nbut got err=%v\", testSpec.expectedError, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif testSpec.expectedError != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif client.tokenCached.Username != testSpec.username {\n\t\t\t\tt.Errorf(\"expected username %s to be set on cached token after authentication but got %q\", testSpec.username, client.tokenCached.Username)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif len(client.tokenCached.Token) == 0 {\n\t\t\t\tt.Errorf(\"expected token for %s to be set to %q but wasn't found\", testSpec.username, mockSuccessfulStartAuthenticationToken)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif client.tokenCached.Token != mockSuccessfulStartAuthenticationToken {\n\t\t\t\tt.Errorf(\"expected token for %s to be set to %q but was set to %q\", testSpec.username, mockSuccessfulStartAuthenticationToken, client.tokenCached.Token)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "internal/cyberark/identity/authenticated_http_client.go",
    "content": "package identity\n\nimport (\n\t\"fmt\"\n\t\"net/http\"\n)\n\ntype RequestAuthenticator func(req *http.Request) (string, error)\n\n// AuthenticateRequest is a helper function that adds the Authorization header to an HTTP request using a cached token.\n// It sets the Header directly, and if successful returns the username corresponding to the token.\nfunc (c *Client) AuthenticateRequest(req *http.Request) (string, error) {\n\tc.tokenCachedMutex.Lock()\n\tdefer c.tokenCachedMutex.Unlock()\n\n\tif len(c.tokenCached.Token) == 0 {\n\t\treturn \"\", fmt.Errorf(\"no token cached\")\n\t}\n\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", c.tokenCached.Token))\n\n\treturn c.tokenCached.Username, nil\n}\n"
  },
  {
    "path": "internal/cyberark/identity/cmd/testidentity/main.go",
    "content": "package main\n\nimport (\n\t\"context\"\n\t\"crypto/x509\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"os/signal\"\n\n\t\"github.com/jetstack/venafi-connection-lib/http_client\"\n\t\"k8s.io/klog/v2\"\n\n\t\"github.com/jetstack/preflight/internal/cyberark/identity\"\n\t\"github.com/jetstack/preflight/internal/cyberark/servicediscovery\"\n\t\"github.com/jetstack/preflight/pkg/version\"\n)\n\n// This is a trivial CLI application for testing our identity client end-to-end.\n// It's not intended for distribution; it simply allows us to run our client and check\n// the login is successful.\n//\n// To test against a tenant on the integration platform, set:\n// ARK_DISCOVERY_API=https://platform-discovery.integration-cyberark.cloud/\nconst (\n\tsubdomainFlag = \"subdomain\"\n\tusernameFlag  = \"username\"\n\tpasswordEnv   = \"ARK_SECRET\"\n)\n\nvar (\n\tsubdomain string\n\tusername  string\n)\n\nfunc run(ctx context.Context) error {\n\tif subdomain == \"\" {\n\t\treturn fmt.Errorf(\"no %s flag provided\", subdomainFlag)\n\t}\n\n\tif username == \"\" {\n\t\treturn fmt.Errorf(\"no %s flag provided\", usernameFlag)\n\t}\n\n\tpassword := os.Getenv(passwordEnv)\n\tif password == \"\" {\n\t\treturn fmt.Errorf(\"no password provided in %s\", passwordEnv)\n\t}\n\n\tvar rootCAs *x509.CertPool\n\thttpClient := http_client.NewDefaultClient(version.UserAgent(), rootCAs)\n\n\tsdClient := servicediscovery.New(httpClient, subdomain)\n\tservices, _, err := sdClient.DiscoverServices(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"while performing service discovery: %s\", err)\n\t}\n\n\tclient := identity.New(httpClient, services.Identity.API, subdomain)\n\n\terr = client.LoginUsernamePassword(ctx, username, []byte(password))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"while performing login with username and password: %s\", err)\n\t}\n\n\treturn nil\n}\n\nfunc main() {\n\tdefer klog.Flush()\n\n\tflagSet := flag.NewFlagSet(\"test\", flag.ExitOnError)\n\tklog.InitFlags(flagSet)\n\t_ = flagSet.Parse([]string{\"--v\", \"6\"})\n\n\tlogger := klog.Background()\n\n\tctx := klog.NewContext(context.Background(), logger)\n\tctx, cancel := signal.NotifyContext(ctx, os.Interrupt)\n\tdefer cancel()\n\n\tflag.StringVar(&subdomain, subdomainFlag, \"cert-manager\", \"The subdomain to use for service discovery\")\n\tflag.StringVar(&username, usernameFlag, \"\",\n\t\tfmt.Sprintf(\"Username to log in with. Password should be provided via %s envvar\", passwordEnv),\n\t)\n\n\tflag.Parse()\n\n\terrCode := 0\n\n\terr := run(ctx)\n\tif err != nil {\n\t\tlogger.Error(err, \"execution failed\")\n\t\terrCode = 1\n\t}\n\n\tklog.FlushAndExit(klog.ExitFlushTimeout, errCode)\n}\n"
  },
  {
    "path": "internal/cyberark/identity/identity.go",
    "content": "package identity\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"sync\"\n\t\"time\"\n\n\t\"k8s.io/klog/v2\"\n\n\tarkapi \"github.com/jetstack/preflight/internal/cyberark/api\"\n\t\"github.com/jetstack/preflight/pkg/logs\"\n\t\"github.com/jetstack/preflight/pkg/version\"\n)\n\nconst (\n\t// MechanismUsernamePassword is the string which identifies the username/password mechanism for completing\n\t// a login attempt\n\tMechanismUsernamePassword = \"UP\"\n\n\t// ActionAnswer is the string which is sent to an AdvanceAuthentication request to indicate we're providing\n\t// the credentials in band in text format (i.e., we're sending a password)\n\tActionAnswer = \"Answer\"\n\n\t// SummaryLoginSuccess is returned by a StartAuthentication to indicate that login does not need\n\t// to proceed to the AdvanceAuthentication step.\n\t// We don't handle this because we don't expect it to happen.\n\tSummaryLoginSuccess = \"LoginSuccess\"\n\n\t// SummaryNewPackage is returned by a StartAuthentication call when the user must complete a challenge\n\t// to complete the log in. This is expected on a first login.\n\tSummaryNewPackage = \"NewPackage\"\n\n\t// maxStartAuthenticationBodySize is the maximum allowed size for a response body from the CyberArk Identity\n\t// StartAuthentication endpoint.\n\t// As of 2025-04-30, a response from the integration environment is ~1kB\n\tmaxStartAuthenticationBodySize = 10 * 1024\n\n\t// maxAdvanceAuthenticationBodySize is the maximum allowed size for a response body from the CyberArk Identity\n\t// AdvanceAuthentication endpoint.\n\t// As of 2025-04-30, a response from the integration environment is ~3kB\n\tmaxAdvanceAuthenticationBodySize = 30 * 1024\n)\n\nvar (\n\terrNoUPMechanism = fmt.Errorf(\"found no authentication mechanism with the username + password type (%s); unable to complete login using this identity\", MechanismUsernamePassword)\n)\n\n// startAuthenticationRequestBody is the body sent to the StartAuthentication endpoint in CyberArk Identity;\n// see https://api-docs.cyberark.com/identity-docs-api/docs/security-api#/Login/start-authentication\ntype startAuthenticationRequestBody struct {\n\t// TenantID is the internal ID of the tenant containing the user attempting to log in. In testing,\n\t// it seems that the subdomain works in this field.\n\tTenantID string `json:\"TenantId\"`\n\n\t// Version is set to 1.0\n\tVersion string `json:\"Version\"`\n\n\t// User is the username of the user trying to log in. For a human, this is likely to be an email address.\n\tUser string `json:\"User\"`\n}\n\n// identityResponseBody generically wraps a response from the Identity server; the Result will differ for\n// responses from different endpoint, but the other fields are similar.\n// Not all fields in the JSON returned from the server are replicated here, since we only need a subset.\ntype identityResponseBody[T any] struct {\n\t// Success is a simple boolean indicator from the server of success.\n\t// NB: The JSON key is lowercase, in contrast to other JSON keys in the response.\n\tSuccess bool `json:\"success\"`\n\n\t// Result holds the information we need to parse from successful responses\n\tResult T `json:\"Result\"`\n\n\t// Message holds an information message such as an error message. Experimentally it seems to be null\n\t// for successful attempts.\n\tMessage string `json:\"Message\"`\n\n\t// ErrorID holds an error ID when something goes wrong with the call.\n\t// Not to be confused with ErrorCode; for failure messages, we see ErrorID set and ErrorCode null.\n\tErrorID string `json:\"ErrorID\"`\n\n\t// NB: Other fields omitted since we don't need them\n}\n\n// startAuthenticationResponseBody is the response returned by the server from a request to StartAuthentication.\ntype startAuthenticationResponseBody identityResponseBody[startAuthenticationResponseResult]\n\n// advanceAuthenticationResponseBody is the response from the AdvanceAuthentication endpoint.\ntype advanceAuthenticationResponseBody identityResponseBody[advanceAuthenticationResponseResult]\n\n// startAuthenticationResponseResult holds the important data we need to pass to AdvanceAuthentication\ntype startAuthenticationResponseResult struct {\n\t// SessionID identifies this login attempt, and must be passed with the\n\t// follow-up AdvanceAuthentication request.\n\tSessionID string `json:\"SessionId\"`\n\n\t// Challenges provides a list of methods for logging in. We need to look\n\t// for the correct login method we want to use, and then find the MechanismId\n\t// for that login method to pass to the AdvanceAuthentication request.\n\tChallenges []startAuthenticationChallenge `json:\"Challenges\"`\n\n\t// Summary indicates whether a StartAuthentication calls needs to be followed up with an AdvanceAuthentication\n\t// call. From the docs:\n\t// > If the user exists, the response contains a Summary of either LoginSuccess or NewPackage.\n\t// > You receive LoginSuccess when the request includes an .ASPXAUTH cookie from prior successful authentication.\n\tSummary string `json:\"Summary\"`\n}\n\n// startAuthenticationChallenge is an entry in the array of MFA mechanisms;\n// at least one MFA mechanism should be satisfied by the user.\ntype startAuthenticationChallenge struct {\n\tMechanisms []startAuthenticationMechanism `json:\"Mechanisms\"`\n}\n\n// startAuthenticationMechanism holds details of a given mechanism for authenticating.\n// This corresponds to \"how\" the user authenticates, e.g. via password or email, etc\ntype startAuthenticationMechanism struct {\n\t// Name represents the name of the challenge mechanism. This is usually an upper-case\n\t// string, such as \"UP\" for \"username / password\"\n\tName string `json:\"Name\"`\n\n\t// Enrolled is true if the given mechanism is available for the user attempting\n\t// to authenticate.\n\tEnrolled bool `json:\"Enrolled\"`\n\n\t// MechanismID uniquely identifies a particular mechanism, and must be passed\n\t// to the AdvanceAuthentication request when authenticating.\n\tMechanismID string `json:\"MechanismId\"`\n}\n\n// advanceAuthenticationRequestBody is a request body for the AdvanceAuthentication call to CyberArk Identity,\n// which should usually be obtained by making requests to StartAuthentication first.\n// WARNING: This struct can hold secret data (a user's password)\n// See: https://api-docs.cyberark.com/identity-docs-api/docs/security-api#/Login/advance-authentication\ntype advanceAuthenticationRequestBody struct {\n\t// Action is a string identifying how we're intending to log in; for username/password, this is\n\t// set to \"Answer\" to indicate that the password is held in the Answer field\n\tAction string `json:\"Action\"`\n\n\t// Answer holds the user's password to send to the server\n\t// WARNING: THIS IS SECRET DATA.\n\tAnswer string `json:\"Answer\"`\n\n\t// MechanismID identifies the login mechanism and must be retrieved from a call to StartAuthentication\n\tMechanismID string `json:\"MechanismId\"`\n\n\t// SessionID identifies the login session and must be retrieved from a call to StartAuthentication\n\tSessionID string `json:\"SessionId\"`\n\n\t// TenantID identifies the tenant; this can be inferred from the URL if we used service discovery to\n\t// get the Identity API URL, but we set it anyway to be explicit.\n\tTenantID string `json:\"TenantId\"`\n\n\t// PersistentLogin is documented to \"[indicate] whether the session should persist after the user\n\t// closes the browser\"; for service-to-service auth which we're trying to do, we set this to true.\n\tPersistentLogin bool `json:\"PersistentLogin\"`\n}\n\n// advanceAuthenticationResponseResult is the specific information returned for a successful AdvanceAuthentication call\ntype advanceAuthenticationResponseResult struct {\n\t// Summary holds a \"brief summary of the authentication outcome\"\n\tSummary string `json:\"Summary\"`\n\n\t// Token is the auth token we need to save; this is the result of the login\n\t// process which can be sent as a bearer token to other services.\n\tToken string `json:\"Token\"`\n\n\t// Other fields omitted as they're not needed\n}\n\n// Client is an client for interacting with the CyberArk Identity API and performing a login using a username and password.\n// For context on the behaviour of this client, see the Python SDK: https://github.com/cyberark/ark-sdk-python/blob/3be12c3f2d3a2d0407025028943e584b6edc5996/ark_sdk_python/auth/identity/ark_identity.py\ntype Client struct {\n\thttpClient *http.Client\n\tbaseURL    string\n\tsubdomain  string\n\n\ttokenCached      token\n\ttokenCachedMutex sync.Mutex\n\ttokenCachedTime  time.Time\n}\n\n// token is a wrapper type for holding auth tokens we want to cache.\ntype token struct {\n\tUsername string\n\tToken    string\n}\n\n// New returns an initialized CyberArk Identity client using a default service discovery client.\nfunc New(httpClient *http.Client, baseURL string, subdomain string) *Client {\n\treturn &Client{\n\t\thttpClient: httpClient,\n\t\tbaseURL:    baseURL,\n\t\tsubdomain:  subdomain,\n\n\t\ttokenCached:      token{},\n\t\ttokenCachedMutex: sync.Mutex{},\n\t}\n}\n\n// LoginUsernamePassword performs a blocking call to fetch an auth token from CyberArk Identity using the given username and password.\n// The password is zeroed after use.\n// Tokens are cached internally and are not directly accessible to code; use Client.AuthenticateRequest to add credentials\n// to an *http.Request.\nfunc (c *Client) LoginUsernamePassword(ctx context.Context, username string, password []byte) error {\n\t// note: we hold the mutex for the whole login attempt to ensure that only one login attempt can be in flight at once,\n\t// and to ensure that the token cache is correctly updated\n\tc.tokenCachedMutex.Lock()\n\tdefer c.tokenCachedMutex.Unlock()\n\n\tdefer func() {\n\t\tfor i := range password {\n\t\t\tpassword[i] = 0x00\n\t\t}\n\t}()\n\n\tif time.Since(c.tokenCachedTime) < 15*time.Minute && c.tokenCached.Username == username {\n\t\t// If the cached token is recent and for the same username, we can reuse it.\n\t\tklog.FromContext(ctx).V(2).Info(\"reusing cached token for user\", \"username\", username)\n\t\treturn nil\n\t}\n\n\tadvanceRequestBody, err := c.doStartAuthentication(ctx, username)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// NB: We explicitly pass advanceRequestBody by value here so that when we add the password\n\t// in doAdvanceAuthentication we don't create a copy of the password slice elsewhere.\n\terr = c.doAdvanceAuthentication(ctx, username, &password, advanceRequestBody)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn err\n}\n\n// doStartAuthentication performs the initial request to start the login process using a username and password.\n// It returns a partially initialized advanceAuthenticationRequestBody ready to send to the server to complete\n// the login. As this function doesn't have access to the password, it must be added to the returned request body\n// by the caller before being used as a request to AdvanceAuthentication.\n// See https://api-docs.cyberark.com/identity-docs-api/docs/security-api#/Login/start-authentication\nfunc (c *Client) doStartAuthentication(ctx context.Context, username string) (advanceAuthenticationRequestBody, error) {\n\tresponse := advanceAuthenticationRequestBody{}\n\n\tlogger := klog.FromContext(ctx).WithValues(\"source\", \"Identity.doStartAuthentication\")\n\n\tbody := startAuthenticationRequestBody{\n\t\tVersion: \"1.0\", // this is the only value in the docs\n\n\t\tTenantID: c.subdomain,\n\n\t\tUser: username,\n\t}\n\n\tbodyJSON, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn response, fmt.Errorf(\"failed to marshal JSON for request to StartAuthentication endpoint: %s\", err)\n\t}\n\n\tendpoint, err := url.JoinPath(c.baseURL, \"Security\", \"StartAuthentication\")\n\tif err != nil {\n\t\treturn response, fmt.Errorf(\"failed to create URL for request to CyberArk Identity StartAuthentication: %s\", err)\n\t}\n\n\trequest, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewReader(bodyJSON))\n\tif err != nil {\n\t\treturn response, fmt.Errorf(\"failed to initialise request to Identity endpoint %s: %s\", endpoint, err)\n\t}\n\n\tsetIdentityHeaders(request)\n\n\thttpResponse, err := c.httpClient.Do(request)\n\tif err != nil {\n\t\treturn response, fmt.Errorf(\"failed to perform HTTP request to start authentication: %s\", err)\n\t}\n\n\tdefer httpResponse.Body.Close()\n\n\tif httpResponse.StatusCode != http.StatusOK {\n\t\terr := fmt.Errorf(\"got unexpected status code %s from request to start authentication in CyberArk Identity API\", httpResponse.Status)\n\t\tif httpResponse.StatusCode >= 500 || httpResponse.StatusCode < 400 {\n\t\t\treturn response, err\n\t\t}\n\n\t\t// If we got a 4xx error, we shouldn't retry\n\t\treturn response, err\n\t}\n\n\tstartAuthResponse := startAuthenticationResponseBody{}\n\n\terr = json.NewDecoder(io.LimitReader(httpResponse.Body, maxStartAuthenticationBodySize)).Decode(&startAuthResponse)\n\tif err != nil {\n\t\tif err == io.ErrUnexpectedEOF {\n\t\t\treturn response, fmt.Errorf(\"rejecting JSON response from server as it was too large or was truncated\")\n\t\t}\n\n\t\treturn response, fmt.Errorf(\"failed to parse JSON from otherwise successful request to start authentication: %s\", err)\n\t}\n\n\tif !startAuthResponse.Success {\n\t\treturn response, fmt.Errorf(\"got a failure response from request to start authentication: message=%q, error=%q\", startAuthResponse.Message, startAuthResponse.ErrorID)\n\t}\n\n\tlogger.V(logs.Debug).Info(\"made successful request to StartAuthentication\", \"summary\", startAuthResponse.Result.Summary)\n\n\tif startAuthResponse.Result.Summary != SummaryNewPackage {\n\t\t// This means we can't respond to whatever summary the server sent.\n\t\t// The best thing to do is try and find a challenge we can solve anyway.\n\t\tklog.FromContext(ctx).Info(\"got an unexpected Summary from StartAuthentication response; will attempt to complete a login challenge anyway\", \"summary\", startAuthResponse.Result.Summary)\n\t}\n\n\t// We can only handle a UP type challenge, and if there are any other challenges, we'll have to fail because we can't handle them.\n\t// https://github.com/cyberark/ark-sdk-python/blob/3be12c3f2d3a2d0407025028943e584b6edc5996/ark_sdk_python/auth/identity/ark_identity.py#L405\n\tswitch len(startAuthResponse.Result.Challenges) {\n\tcase 0:\n\t\treturn response, fmt.Errorf(\"got no valid challenges in response to start authentication; unable to log in\")\n\n\tcase 1:\n\t\t// do nothing, this is ideal\n\n\tdefault:\n\t\treturn response, fmt.Errorf(\"got %d challenges in response to start authentication, which means MFA may be enabled; unable to log in\", len(startAuthResponse.Result.Challenges))\n\t}\n\n\tchallenge := startAuthResponse.Result.Challenges[0]\n\n\tswitch len(challenge.Mechanisms) {\n\tcase 0:\n\t\t// presumably this shouldn't happen, but handle the case anyway\n\t\treturn response, fmt.Errorf(\"got no mechanisms for challenge from Identity server\")\n\n\tcase 1:\n\t\t// do nothing, this is ideal\n\n\tdefault:\n\t\treturn response, fmt.Errorf(\"got %d mechanisms in response to start authentication, which means MFA may be enabled; unable to log in\", len(challenge.Mechanisms))\n\t}\n\n\tmechanism := challenge.Mechanisms[0]\n\n\tif !mechanism.Enrolled || mechanism.Name != MechanismUsernamePassword {\n\t\treturn response, errNoUPMechanism\n\t}\n\n\tresponse.Action = ActionAnswer\n\tresponse.MechanismID = mechanism.MechanismID\n\tresponse.SessionID = startAuthResponse.Result.SessionID\n\tresponse.TenantID = c.subdomain\n\tresponse.PersistentLogin = true\n\n\treturn response, nil\n}\n\n// doAdvanceAuthentication performs the second step of the login process, sending the password to the server\n// and receiving a token in response.\n// See: https://api-docs.cyberark.com/identity-docs-api/docs/security-api#/Login/advance-authentication\nfunc (c *Client) doAdvanceAuthentication(ctx context.Context, username string, password *[]byte, requestBody advanceAuthenticationRequestBody) error {\n\tif password == nil {\n\t\treturn fmt.Errorf(\"password must not be nil; this is a programming error\")\n\t}\n\n\trequestBody.Answer = string(*password)\n\n\tbodyJSON, err := json.Marshal(requestBody)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal JSON for request to AdvanceAuthentication endpoint: %s\", err)\n\t}\n\n\tendpoint, err := url.JoinPath(c.baseURL, \"Security\", \"AdvanceAuthentication\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create URL for request to CyberArk Identity AdvanceAuthentication: %s\", err)\n\t}\n\n\trequest, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewReader(bodyJSON))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to initialise request to Identity endpoint %s: %s\", endpoint, err)\n\t}\n\n\tsetIdentityHeaders(request)\n\n\thttpResponse, err := c.httpClient.Do(request)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to perform HTTP request to advance authentication: %s\", err)\n\t}\n\n\tdefer httpResponse.Body.Close()\n\n\t// Important: Even login failures can produce a 200 status code, so this\n\t// check won't catch all failures\n\tif httpResponse.StatusCode != http.StatusOK {\n\t\treturn fmt.Errorf(\"got unexpected status code %s from request to advance authentication in CyberArk Identity API\", httpResponse.Status)\n\t}\n\n\tadvanceAuthResponse := advanceAuthenticationResponseBody{}\n\n\terr = json.NewDecoder(io.LimitReader(httpResponse.Body, maxAdvanceAuthenticationBodySize)).Decode(&advanceAuthResponse)\n\tif err != nil {\n\t\tif err == io.ErrUnexpectedEOF {\n\t\t\treturn fmt.Errorf(\"rejecting JSON response from server as it was too large or was truncated\")\n\t\t}\n\n\t\treturn fmt.Errorf(\"failed to parse JSON from otherwise successful request to advance authentication: %s\", err)\n\t}\n\n\tif !advanceAuthResponse.Success {\n\t\treturn fmt.Errorf(\"got a failure response from request to advance authentication: message=%q, error=%q\", advanceAuthResponse.Message, advanceAuthResponse.ErrorID)\n\t}\n\n\tif advanceAuthResponse.Result.Summary != SummaryLoginSuccess {\n\t\t// IF MFA was enabled and we got here, there's probably nothing to be gained from a retry\n\t\t// and the best thing to do is fail now so the user can fix MFA settings.\n\t\treturn fmt.Errorf(\"got a %s response from AdvanceAuthentication; this implies that the user account %s requires MFA, which is not supported. Try unlocking MFA for this user\", advanceAuthResponse.Result.Summary, username)\n\t}\n\n\tklog.FromContext(ctx).Info(\"successfully completed AdvanceAuthentication request to CyberArk Identity; login complete\", \"username\", username)\n\n\t// NB: This assumes we already hold the token cache mutex, which we do in LoginUsernamePassword, so this is safe.\n\tc.tokenCachedTime = time.Now()\n\tc.tokenCached = token{\n\t\tUsername: username,\n\t\tToken:    advanceAuthResponse.Result.Token,\n\t}\n\n\treturn nil\n}\n\n// setIdentityHeaders sets the headers required for requests to the CyberArk Identity API.\n// From the docs:\n// Your request header must contain X-IDAP-NATIVE-CLIENT:true to indicate that an application is invoking\n// the CyberArk Identity endpoint, and\n// Content-Type: application/json to indicate that the body is in JSON format.\n// Experimentally, it seems the X-IDAP-NATIVE-CLIENT is not required but we'll follow the docs.\nfunc setIdentityHeaders(r *http.Request) {\n\t// The \"canonicalheader\" linter warns us that the IDAP-NATIVE-CLIENT header isn't canonical, but we silence it here\n\t// since we want to exactly match the docs.\n\tr.Header.Set(\"Content-Type\", \"application/json\")\n\tr.Header.Set(\"X-IDAP-NATIVE-CLIENT\", \"true\") //nolint: canonicalheader\n\tversion.SetUserAgent(r)\n\t// Add telemetry headers\n\tarkapi.SetTelemetryRequestHeader(r)\n}\n"
  },
  {
    "path": "internal/cyberark/identity/identity_test.go",
    "content": "package identity\n\n// This file contains tests for the LoginUsernamePassword function in the\n// identity package. The tests cover both a mock API server and the real API,\n// depending on the environment variables set. The tests are intended to\n// demonstrate that the mock API behaves the same as the real API\n\nimport (\n\t\"net/http\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"k8s.io/klog/v2\"\n\t\"k8s.io/klog/v2/ktesting\"\n\n\t\"github.com/jetstack/preflight/internal/cyberark/servicediscovery\"\n\tarktesting \"github.com/jetstack/preflight/internal/cyberark/testing\"\n\n\t_ \"k8s.io/klog/v2/ktesting/init\"\n)\n\n// inputs holds the various input values for the tests.\ntype inputs struct {\n\thttpClient *http.Client\n\tbaseURL    string\n\tsubdomain  string\n\tusername   string\n\tpassword   string\n}\n\n// TestLoginUsernamePassword_MockAPI tests the LoginUsernamePassword function\n// against a mock API server. The mock server is configured to return different\n// responses based on the username and password used in the request.\nfunc TestLoginUsernamePassword_MockAPI(t *testing.T) {\n\tloginUsernamePasswordTests(t, func(t testing.TB) inputs {\n\t\tbaseURL, httpClient := MockIdentityServer(t)\n\t\treturn inputs{\n\t\t\thttpClient: httpClient,\n\t\t\tbaseURL:    baseURL,\n\t\t\tsubdomain:  \"subdomain-ignored-by-mock\",\n\t\t\tusername:   successUser,\n\t\t\tpassword:   successPassword,\n\t\t}\n\t})\n}\n\n// TestLoginUsernamePassword_RealAPI tests the LoginUsernamePassword function\n// against the real API. The environment variables are used to configure the\n// client.\nfunc TestLoginUsernamePassword_RealAPI(t *testing.T) {\n\tarktesting.SkipIfNoEnv(t)\n\tsubdomain := os.Getenv(\"ARK_SUBDOMAIN\")\n\thttpClient := http.DefaultClient\n\tservices, _, err := servicediscovery.New(httpClient, subdomain).DiscoverServices(t.Context())\n\trequire.NoError(t, err)\n\n\tloginUsernamePasswordTests(t, func(t testing.TB) inputs {\n\t\treturn inputs{\n\t\t\thttpClient: httpClient,\n\t\t\tbaseURL:    services.Identity.API,\n\t\t\tsubdomain:  subdomain,\n\t\t\tusername:   os.Getenv(\"ARK_USERNAME\"),\n\t\t\tpassword:   os.Getenv(\"ARK_SECRET\"),\n\t\t}\n\t})\n}\n\n// loginUsernamePasswordTests runs tests which are expected to pass regardless of\n// whether the mock or real API is used.\nfunc loginUsernamePasswordTests(t *testing.T, inputsGenerator func(t testing.TB) inputs) {\n\ttype testCase struct {\n\t\tname          string\n\t\tmodifier      func(in *inputs)\n\t\texpectedError string\n\t}\n\ttests := []testCase{\n\t\t{\n\t\t\tname: \"success\",\n\t\t},\n\t\t{\n\t\t\tname: \"bad-username\",\n\t\t\tmodifier: func(in *inputs) {\n\t\t\t\tin.username = failureUser\n\t\t\t},\n\t\t\texpectedError: `^got a failure response from request to advance authentication: ` +\n\t\t\t\t`message=\"Authentication \\(login or challenge\\) has failed\\. ` +\n\t\t\t\t`Please try again or contact your system administrator\\.\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"empty-username\",\n\t\t\tmodifier: func(in *inputs) {\n\t\t\t\tin.username = \"\"\n\t\t\t},\n\t\t\texpectedError: `^got a failure response from request to start authentication: ` +\n\t\t\t\t`message=\"Authentication \\(login or challenge\\) has failed\\. ` +\n\t\t\t\t`Please try again or contact your system administrator\\.\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"bad-password\",\n\t\t\tmodifier: func(in *inputs) {\n\t\t\t\tin.password = \"bad-password\"\n\t\t\t},\n\t\t\texpectedError: `^got a failure response from request to advance authentication: ` +\n\t\t\t\t`message=\"Authentication \\(login or challenge\\) has failed\\. ` +\n\t\t\t\t`Please try again or contact your system administrator\\.\"`,\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tlogger := ktesting.NewLogger(t, ktesting.DefaultConfig)\n\t\t\tctx := klog.NewContext(t.Context(), logger)\n\n\t\t\tin := inputsGenerator(t)\n\t\t\tif test.modifier != nil {\n\t\t\t\ttest.modifier(&in)\n\t\t\t}\n\t\t\tcl := New(in.httpClient, in.baseURL, in.subdomain)\n\t\t\terr := cl.LoginUsernamePassword(ctx, in.username, []byte(in.password))\n\t\t\tif test.expectedError != \"\" {\n\t\t\t\tif assert.Error(t, err) {\n\t\t\t\t\tassert.Regexp(t, test.expectedError, err.Error())\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\trequire.NoError(t, err)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "internal/cyberark/identity/mock.go",
    "content": "package identity\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"k8s.io/client-go/transport\"\n\n\tarkapi \"github.com/jetstack/preflight/internal/cyberark/api\"\n\t\"github.com/jetstack/preflight/pkg/version\"\n\n\t_ \"embed\"\n)\n\nconst (\n\tsuccessUser                   = \"test@example.com\"\n\tfailureUser                   = \"test-fail@example.com\"\n\tsuccessUserMultipleChallenges = \"test-multiple-challenges@example.com\"\n\tsuccessUserMultipleMechanisms = \"test-multiple-mechanisms@example.com\"\n\tnoUPMechanism                 = \"noup@example.com\"\n\n\tsuccessMechanismID = \"aaaaaaa_AAAAAAAAAAAAAAAAAAAAAAAAAAAA-1111111\"\n\tsuccessSessionID   = \"mysessionid101\"\n\tsuccessPassword    = \"somepassword\"\n\n\t// mockSuccessfulStartAuthenticationToken is the token returned by the\n\t// mock server in response to a successful AdvanceAuthentication request\n\t// Must match what's in testdata/advance_authentication_success.json\n\tmockSuccessfulStartAuthenticationToken = \"success-token\"\n)\n\nvar (\n\t//go:embed testdata/start_authentication_success.json\n\tstartAuthenticationSuccessResponse string\n\n\t//go:embed testdata/start_authentication_bad_user_session_id.json\n\tstartAuthenticationBadUserResponse string\n\n\t//go:embed testdata/start_authentication_success_multiple_challenges.json\n\tstartAuthenticationSuccessMultipleChallengesResponse string\n\n\t//go:embed testdata/start_authentication_success_multiple_mechanisms.json\n\tstartAuthenticationSuccessMultipleMechanismsResponse string\n\n\t//go:embed testdata/start_authentication_success_no_up_mechanism.json\n\tstartAuthenticationNoUPMechanismResponse string\n\n\t//go:embed testdata/start_authentication_failure.json\n\tstartAuthenticationFailureResponse string\n\n\t//go:embed testdata/advance_authentication_success.json\n\tadvanceAuthenticationSuccessResponse string\n\n\t//go:embed testdata/advance_authentication_failure.json\n\tadvanceAuthenticationFailureResponse string\n)\n\ntype mockIdentityServer struct {\n\tt testing.TB\n}\n\n// MockIdentityServer returns a URL of a mocked CyberArk identity server and an\n// HTTP client with the CA certs needed to connect to it..\nfunc MockIdentityServer(t testing.TB) (string, *http.Client) {\n\tmis := &mockIdentityServer{\n\t\tt: t,\n\t}\n\tserver := httptest.NewTLSServer(mis)\n\tt.Cleanup(server.Close)\n\thttpClient := server.Client()\n\thttpClient.Transport = transport.NewDebuggingRoundTripper(httpClient.Transport, transport.DebugByContext)\n\treturn server.URL, httpClient\n}\n\nfunc (mis *mockIdentityServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tmis.t.Log(r.Method, r.RequestURI)\n\tswitch r.URL.String() {\n\tcase \"/Security/StartAuthentication\":\n\t\tmis.handleStartAuthentication(w, r)\n\t\treturn\n\n\tcase \"/Security/AdvanceAuthentication\":\n\t\tmis.handleAdvanceAuthentication(w, r)\n\t\treturn\n\n\tdefault:\n\t\t// The server returns an HTML page for this case, but that doesn't seem important for us to replicate\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\t_, _ = w.Write([]byte(\"not found\"))\n\t}\n}\n\nfunc checkRequestHeaders(r *http.Request) error {\n\tvar errs []error\n\n\tif r.Header.Get(\"User-Agent\") != version.UserAgent() {\n\t\terrs = append(errs, fmt.Errorf(\"should set user agent on all requests\"))\n\t}\n\n\tif r.Header.Get(\"Content-Type\") != \"application/json\" {\n\t\terrs = append(errs, fmt.Errorf(\"should request JSON on all requests\"))\n\t}\n\n\tif r.Header.Get(\"X-IDAP-NATIVE-CLIENT\") != \"true\" { //nolint: canonicalheader\n\t\terrs = append(errs, fmt.Errorf(\"should set X-IDAP-NATIVE-CLIENT header to true on all requests\"))\n\t}\n\n\tif r.Header.Get(arkapi.TelemetryHeaderKey) == \"\" {\n\t\terrs = append(errs, fmt.Errorf(\"should set telemetry header on all requests\"))\n\t}\n\n\treturn errors.Join(errs...)\n}\n\nfunc (mis *mockIdentityServer) handleStartAuthentication(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != http.MethodPost {\n\t\t// Empirically we saw that a PUT and a DELETE request to this endpoint was actually successful,\n\t\t// but the endpoint is documented to use POST so we'll ensure that only that method is used.\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\t_, _ = w.Write([]byte(`{\"message\":\"endpoint is documented to only accept POST\"}`))\n\t\treturn\n\t}\n\n\tif err := checkRequestHeaders(r); !assert.NoError(mis.t, err, \"request headers are not correct\") {\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\treturn\n\t}\n\n\treqBody := startAuthenticationRequestBody{}\n\n\tdecoder := json.NewDecoder(r.Body)\n\tdecoder.DisallowUnknownFields()\n\n\tif err := decoder.Decode(&reqBody); err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(w, `{\"message\":\"failed to unmarshal request body: %s\"}`, err)\n\t\treturn\n\t}\n\n\tswitch reqBody.User {\n\tcase successUser:\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_, _ = w.Write([]byte(startAuthenticationSuccessResponse))\n\n\tcase successUserMultipleChallenges:\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_, _ = w.Write([]byte(startAuthenticationSuccessMultipleChallengesResponse))\n\n\tcase successUserMultipleMechanisms:\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_, _ = w.Write([]byte(startAuthenticationSuccessMultipleMechanismsResponse))\n\n\tcase noUPMechanism:\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_, _ = w.Write([]byte(startAuthenticationNoUPMechanismResponse))\n\n\tcase \"\":\n\t\t// experimentally, this case produces a 200 response but a \"failed\" body\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_, _ = w.Write([]byte(startAuthenticationFailureResponse))\n\n\tcase failureUser:\n\t\t// Experimentally, the real API produces a 200 response and what looks\n\t\t// like a success response body. but the login is rejected later by the\n\t\t// AdvanceAuthentication stage, perhaps by virtue of the sessionID which\n\t\t// is returned here and supplied to AdvanceAuthentication.\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_, _ = w.Write([]byte(startAuthenticationBadUserResponse))\n\n\tdefault:\n\t\tpanic(\"programmer error: should not be reached\")\n\t}\n}\n\nfunc (mis *mockIdentityServer) handleAdvanceAuthentication(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != http.MethodPost {\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\t_, _ = w.Write([]byte(`{\"message\":\"endpoint is documented to only accept POST\"}`))\n\t\treturn\n\t}\n\n\tif err := checkRequestHeaders(r); err != nil {\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\tfmt.Fprintf(w, `{\"message\":\"issues with headers sent to mock server: %s\"}`, err.Error())\n\t}\n\n\tdecoder := json.NewDecoder(r.Body)\n\tdecoder.DisallowUnknownFields()\n\n\tadvanceBody := &advanceAuthenticationRequestBody{}\n\n\tif err := decoder.Decode(&advanceBody); err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(w, `{\"message\":\"failed to unmarshal request body: %s\"}`, err)\n\t\treturn\n\t}\n\n\t// Important: The actual server will return 200 OK even if the login fails.\n\t// Most failure responses should copy that.\n\n\tif !advanceBody.PersistentLogin {\n\t\t// this is something we enforce but wouldn't actually be an error from\n\t\t// a real server, so we return a different error here\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\t_, _ = w.Write([]byte(`expected PersistentLogin to be true`))\n\t\treturn\n\t}\n\n\tif advanceBody.SessionID != successSessionID ||\n\t\tadvanceBody.MechanismID != successMechanismID ||\n\t\tadvanceBody.Action != ActionAnswer ||\n\t\tadvanceBody.Answer != successPassword {\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_, _ = w.Write([]byte(advanceAuthenticationFailureResponse))\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\t_, _ = w.Write([]byte(advanceAuthenticationSuccessResponse))\n}\n"
  },
  {
    "path": "internal/cyberark/identity/start_authentication_test.go",
    "content": "package identity\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/jetstack/preflight/internal/cyberark/servicediscovery\"\n)\n\nfunc Test_IdentityStartAuthentication(t *testing.T) {\n\ttests := map[string]struct {\n\t\tusername string\n\n\t\texpectedError error\n\t}{\n\t\t\"successful request\": {\n\t\t\tusername:      successUser,\n\t\t\texpectedError: nil,\n\t\t},\n\t\t\"successful request, multiple challenges\": {\n\t\t\tusername:      successUserMultipleChallenges,\n\t\t\texpectedError: fmt.Errorf(\"got 2 challenges in response to start authentication, which means MFA may be enabled; unable to log in\"),\n\t\t},\n\t\t\"successful request, multiple mechanisms\": {\n\t\t\tusername:      successUserMultipleMechanisms,\n\t\t\texpectedError: fmt.Errorf(\"got 2 mechanisms in response to start authentication, which means MFA may be enabled; unable to log in\"),\n\t\t},\n\t\t\"successful request, no username / password (UP) mechanism available\": {\n\t\t\tusername:      noUPMechanism,\n\t\t\texpectedError: errNoUPMechanism,\n\t\t},\n\t\t\"failed request\": {\n\t\t\t// experimentally we've seen the failure response when passing an empty username\n\t\t\tusername:      \"\",\n\t\t\texpectedError: fmt.Errorf(`got a failure response from request to start authentication: message=\"Authentication (login or challenge) has failed. Please try again or contact your system administrator.\", error=\"00000000-0400-4000-1111-222222222222:01234567890abcdef\"`),\n\t\t},\n\t}\n\n\tfor name, testSpec := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tctx := t.Context()\n\n\t\t\tidentityServer, httpClient := MockIdentityServer(t)\n\n\t\t\tclient := New(httpClient, identityServer, servicediscovery.MockDiscoverySubdomain)\n\n\t\t\tadvanceBody, err := client.doStartAuthentication(ctx, testSpec.username)\n\t\t\tif err != nil {\n\t\t\t\tif testSpec.expectedError == nil {\n\t\t\t\t\tt.Errorf(\"didn't expect an error but got %v\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif err.Error() != testSpec.expectedError.Error() {\n\t\t\t\t\tt.Errorf(\"expected err=%v\\nbut got err=%v\", testSpec.expectedError, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif testSpec.expectedError != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif advanceBody.TenantID != client.subdomain {\n\t\t\t\tt.Errorf(\"expected advanceAuthenticationRequestBody.TenantID to be %s but got %s\", client.subdomain, advanceBody.TenantID)\n\t\t\t}\n\n\t\t\tif advanceBody.SessionID != successSessionID {\n\t\t\t\tt.Errorf(\"expected advanceAuthenticationRequestBody.SessionID to be %s but got %s\", successSessionID, advanceBody.SessionID)\n\t\t\t}\n\n\t\t\tif advanceBody.MechanismID != successMechanismID {\n\t\t\t\tt.Errorf(\"expected advanceAuthenticationRequestBody.MechanismID to be %s but got %s\", successMechanismID, advanceBody.MechanismID)\n\t\t\t}\n\n\t\t\tif advanceBody.Action != ActionAnswer {\n\t\t\t\tt.Errorf(\"expected advanceAuthenticationRequestBody.Action to be %s but got %s\", ActionAnswer, advanceBody.Action)\n\t\t\t}\n\n\t\t\tif !advanceBody.PersistentLogin {\n\t\t\t\tt.Error(\"expected advanceAuthenticationRequestBody.PersistentLogin to be true but it wasn't\")\n\t\t\t}\n\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "internal/cyberark/identity/testdata/advance_authentication_failure.json",
    "content": "{\n  \"success\": false,\n  \"Result\": {\n    \"Summary\": \"Failure\"\n  },\n  \"Message\": \"Authentication (login or challenge) has failed. Please try again or contact your system administrator.\",\n  \"MessageID\": null,\n  \"Exception\": null,\n  \"ErrorID\": \"aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee:55555555555555555555555555555555\",\n  \"ErrorCode\": null,\n  \"IsSoftError\": false,\n  \"InnerExceptions\": null\n}\n"
  },
  {
    "path": "internal/cyberark/identity/testdata/advance_authentication_success.json",
    "content": "{\n  \"success\": true,\n  \"Result\": {\n    \"AuthLevel\": \"Normal\",\n    \"DisplayName\": \"Namey McNamerson\",\n    \"Token\": \"success-token\",\n    \"Auth\": \"auth-auth\",\n    \"UserId\": \"11111111-2222-3333-4444-555555555555\",\n    \"EmailAddress\": \"name@example.com\",\n    \"UserDirectory\": \"CDS\",\n    \"PodFqdn\": \"xxx0000.id.integration-cyberark.cloud\",\n    \"User\": \"name@example.org.111111\",\n    \"CustomerID\": \"XXX0000\",\n    \"SystemID\": \"XXX0000\",\n    \"SourceDsType\": \"CDS\",\n    \"Summary\": \"LoginSuccess\"\n  },\n  \"Message\": null,\n  \"MessageID\": null,\n  \"Exception\": null,\n  \"ErrorID\": null,\n  \"ErrorCode\": null,\n  \"IsSoftError\": false,\n  \"InnerExceptions\": null\n}\n"
  },
  {
    "path": "internal/cyberark/identity/testdata/start_authentication_bad_user_session_id.json",
    "content": "{\n  \"success\": true,\n  \"Result\": {\n    \"ClientHints\": {\n      \"PersistDefault\": false,\n      \"AllowPersist\": true,\n      \"AllowForgotPassword\": true,\n      \"EndpointAuthenticationEnabled\": false\n    },\n    \"Version\": \"1.0\",\n    \"SessionId\": \"bad-user-session-id\",\n    \"EventDescription\": null,\n    \"RetryWaitingTime\": 0,\n    \"SecurityImageName\": null,\n    \"AllowLoginMfaCache\": false,\n    \"Challenges\": [\n      {\n        \"Mechanisms\": [\n          {\n            \"AnswerType\": \"Text\",\n            \"Name\": \"UP\",\n            \"PromptMechChosen\": \"Enter Password\",\n            \"PromptSelectMech\": \"Password\",\n            \"MechanismId\": \"aaaaaaa_AAAAAAAAAAAAAAAAAAAAAAAAAAAA-1111111\",\n            \"Enrolled\": true\n          }\n        ]\n      }\n    ],\n    \"Summary\": \"NewPackage\",\n    \"TenantId\": \"TENANTID\"\n  },\n  \"Message\": null,\n  \"MessageID\": null,\n  \"Exception\": null,\n  \"ErrorID\": null,\n  \"ErrorCode\": null,\n  \"IsSoftError\": false,\n  \"InnerExceptions\": null\n}\n"
  },
  {
    "path": "internal/cyberark/identity/testdata/start_authentication_failure.json",
    "content": "{\n  \"success\": false,\n  \"Result\": {\n    \"Summary\": \"Undefined\"\n  },\n  \"Message\": \"Authentication (login or challenge) has failed. Please try again or contact your system administrator.\",\n  \"MessageID\": null,\n  \"Exception\": null,\n  \"ErrorID\": \"00000000-0400-4000-1111-222222222222:01234567890abcdef\",\n  \"ErrorCode\": null,\n  \"IsSoftError\": false,\n  \"InnerExceptions\": null\n}\n"
  },
  {
    "path": "internal/cyberark/identity/testdata/start_authentication_success.json",
    "content": "{\n  \"success\": true,\n  \"Result\": {\n    \"ClientHints\": {\n      \"PersistDefault\": false,\n      \"AllowPersist\": true,\n      \"AllowForgotPassword\": true,\n      \"EndpointAuthenticationEnabled\": false\n    },\n    \"Version\": \"1.0\",\n    \"SessionId\": \"mysessionid101\",\n    \"EventDescription\": null,\n    \"RetryWaitingTime\": 0,\n    \"SecurityImageName\": null,\n    \"AllowLoginMfaCache\": false,\n    \"Challenges\": [\n      {\n        \"Mechanisms\": [\n          {\n            \"AnswerType\": \"Text\",\n            \"Name\": \"UP\",\n            \"PromptMechChosen\": \"Enter Password\",\n            \"PromptSelectMech\": \"Password\",\n            \"MechanismId\": \"aaaaaaa_AAAAAAAAAAAAAAAAAAAAAAAAAAAA-1111111\",\n            \"Enrolled\": true\n          }\n        ]\n      }\n    ],\n    \"Summary\": \"NewPackage\",\n    \"TenantId\": \"TENANTID\"\n  },\n  \"Message\": null,\n  \"MessageID\": null,\n  \"Exception\": null,\n  \"ErrorID\": null,\n  \"ErrorCode\": null,\n  \"IsSoftError\": false,\n  \"InnerExceptions\": null\n}\n"
  },
  {
    "path": "internal/cyberark/identity/testdata/start_authentication_success_multiple_challenges.json",
    "content": "{\n  \"success\": true,\n  \"Result\": {\n    \"ClientHints\": {\n      \"PersistDefault\": false,\n      \"AllowPersist\": true,\n      \"AllowForgotPassword\": true,\n      \"EndpointAuthenticationEnabled\": false\n    },\n    \"Version\": \"1.0\",\n    \"SessionId\": \"mysessionid101\",\n    \"EventDescription\": null,\n    \"RetryWaitingTime\": 0,\n    \"SecurityImageName\": null,\n    \"AllowLoginMfaCache\": false,\n    \"Challenges\": [\n      {\n        \"Mechanisms\": [\n          {\n            \"AnswerType\": \"StartOob\",\n            \"Name\": \"PF\",\n            \"PartialPhoneNumber\": \"0775\",\n            \"PromptMechChosen\": \"We will now attempt to call your phone (0000). Please follow the instructions to proceed with authentication.\",\n            \"PromptSelectMech\": \"Phone Call... XXX-0000\",\n            \"MechanismId\": \"bbbbbbb_BBBBBBBBBBBBBBBBBBBBBBBBBBBB-2222222\",\n            \"Enrolled\": true\n          }\n        ]\n      },\n      {\n        \"Mechanisms\": [\n          {\n            \"AnswerType\": \"Text\",\n            \"Name\": \"UP\",\n            \"PromptMechChosen\": \"Enter Password\",\n            \"PromptSelectMech\": \"Password\",\n            \"MechanismId\": \"aaaaaaa_AAAAAAAAAAAAAAAAAAAAAAAAAAAA-1111111\",\n            \"Enrolled\": true\n          }\n        ]\n      }\n    ],\n    \"Summary\": \"NewPackage\",\n    \"TenantId\": \"TENANTID\"\n  },\n  \"Message\": null,\n  \"MessageID\": null,\n  \"Exception\": null,\n  \"ErrorID\": null,\n  \"ErrorCode\": null,\n  \"IsSoftError\": false,\n  \"InnerExceptions\": null\n}\n"
  },
  {
    "path": "internal/cyberark/identity/testdata/start_authentication_success_multiple_mechanisms.json",
    "content": "{\n  \"success\": true,\n  \"Result\": {\n    \"ClientHints\": {\n      \"PersistDefault\": false,\n      \"AllowPersist\": true,\n      \"AllowForgotPassword\": true,\n      \"EndpointAuthenticationEnabled\": false\n    },\n    \"Version\": \"1.0\",\n    \"SessionId\": \"mysessionid101\",\n    \"EventDescription\": null,\n    \"RetryWaitingTime\": 0,\n    \"SecurityImageName\": null,\n    \"AllowLoginMfaCache\": false,\n    \"Challenges\": [\n      {\n        \"Mechanisms\": [\n          {\n            \"AnswerType\": \"Text\",\n            \"Name\": \"UP\",\n            \"PromptMechChosen\": \"Enter Password\",\n            \"PromptSelectMech\": \"Password\",\n            \"MechanismId\": \"aaaaaaa_AAAAAAAAAAAAAAAAAAAAAAAAAAAA-1111111\",\n            \"Enrolled\": true\n          },\n          {\n            \"AnswerType\": \"StartOob\",\n            \"Name\": \"PF\",\n            \"PartialPhoneNumber\": \"0775\",\n            \"PromptMechChosen\": \"We will now attempt to call your phone (0000). Please follow the instructions to proceed with authentication.\",\n            \"PromptSelectMech\": \"Phone Call... XXX-0000\",\n            \"MechanismId\": \"bbbbbbb_BBBBBBBBBBBBBBBBBBBBBBBBBBBB-2222222\",\n            \"Enrolled\": true\n          }\n        ]\n      }\n    ],\n    \"Summary\": \"NewPackage\",\n    \"TenantId\": \"TENANTID\"\n  },\n  \"Message\": null,\n  \"MessageID\": null,\n  \"Exception\": null,\n  \"ErrorID\": null,\n  \"ErrorCode\": null,\n  \"IsSoftError\": false,\n  \"InnerExceptions\": null\n}\n"
  },
  {
    "path": "internal/cyberark/identity/testdata/start_authentication_success_no_up_mechanism.json",
    "content": "{\n  \"success\": true,\n  \"Result\": {\n    \"ClientHints\": {\n      \"PersistDefault\": false,\n      \"AllowPersist\": true,\n      \"AllowForgotPassword\": true,\n      \"EndpointAuthenticationEnabled\": false\n    },\n    \"Version\": \"1.0\",\n    \"SessionId\": \"mysessionid101\",\n    \"EventDescription\": null,\n    \"RetryWaitingTime\": 0,\n    \"SecurityImageName\": null,\n    \"AllowLoginMfaCache\": false,\n    \"Challenges\": [\n      {\n        \"Mechanisms\": [\n          {\n            \"AnswerType\": \"StartOob\",\n            \"Name\": \"PF\",\n            \"PartialPhoneNumber\": \"0775\",\n            \"PromptMechChosen\": \"We will now attempt to call your phone (0000). Please follow the instructions to proceed with authentication.\",\n            \"PromptSelectMech\": \"Phone Call... XXX-0000\",\n            \"MechanismId\": \"bbbbbbb_BBBBBBBBBBBBBBBBBBBBBBBBBBBB-2222222\",\n            \"Enrolled\": true\n          }\n        ]\n      }\n    ],\n    \"Summary\": \"NewPackage\",\n    \"TenantId\": \"TENANTID\"\n  },\n  \"Message\": null,\n  \"MessageID\": null,\n  \"Exception\": null,\n  \"ErrorID\": null,\n  \"ErrorCode\": null,\n  \"IsSoftError\": false,\n  \"InnerExceptions\": null\n}\n"
  },
  {
    "path": "internal/cyberark/servicediscovery/discovery.go",
    "content": "package servicediscovery\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"path\"\n\t\"sync\"\n\t\"time\"\n\n\tarkapi \"github.com/jetstack/preflight/internal/cyberark/api\"\n\t\"github.com/jetstack/preflight/pkg/version\"\n)\n\nconst (\n\t// ProdDiscoveryAPIBaseURL is the base URL for the production CyberArk Service Discovery API\n\tProdDiscoveryAPIBaseURL = \"https://platform-discovery.cyberark.cloud/\"\n\n\t// IdentityServiceName is the name of the identity service we're looking for in responses from the Service Discovery API\n\t// We were told to use the identity_administration field, not the identity_user_portal field.\n\tIdentityServiceName = \"identity_administration\"\n\n\t// DiscoveryContextServiceName is the name of the discovery and context API\n\t// in responses from the Service Discovery API.\n\tDiscoveryContextServiceName = \"discoverycontext\"\n\n\t// maxDiscoverBodySize is the maximum allowed size for a response body from the CyberArk Service Discovery subdomain endpoint\n\t// As of 2025-04-16, a response from the integration environment is ~4kB\n\tmaxDiscoverBodySize = 2 * 1024 * 1024\n)\n\n// Client is a Golang client for interacting with the CyberArk Discovery Service. It allows\n// users to fetch URLs for various APIs available in CyberArk. This client is specialised to\n// fetch only API endpoints, since only API endpoints are required by the Venafi Kubernetes Agent currently.\ntype Client struct {\n\tclient    *http.Client\n\tbaseURL   string\n\tsubdomain string\n\n\tcachedResponse      *Services\n\tcachedTenantID      string\n\tcachedResponseTime  time.Time\n\tcachedResponseMutex sync.Mutex\n}\n\n// New creates a new CyberArk Service Discovery client. If the ARK_DISCOVERY_API\n// environment variable is set, it is used as the base URL for the service\n// discovery API. Otherwise, the production URL is used.\nfunc New(httpClient *http.Client, subdomain string) *Client {\n\tbaseURL := os.Getenv(\"ARK_DISCOVERY_API\")\n\tif baseURL == \"\" {\n\t\tbaseURL = ProdDiscoveryAPIBaseURL\n\t}\n\n\tclient := &Client{\n\t\tclient:    httpClient,\n\t\tbaseURL:   baseURL,\n\t\tsubdomain: subdomain,\n\n\t\tcachedResponse:      nil,\n\t\tcachedTenantID:      \"\",\n\t\tcachedResponseTime:  time.Time{},\n\t\tcachedResponseMutex: sync.Mutex{},\n\t}\n\n\treturn client\n}\n\n// DiscoveryResponse represents the full JSON response returned by the CyberArk api/tenant-discovery/public API\n// The API is documented here https://ca-il-confluence.il.cyber-ark.com/spaces/EV/pages/575618345/Updated+PD+APIs+doc\ntype DiscoveryResponse struct {\n\tRegion      string         `json:\"region\"`\n\tDRRegion    string         `json:\"dr_region\"`\n\tSubdomain   string         `json:\"subdomain\"`\n\tTenantID    string         `json:\"tenant_id\"`\n\tPlatformID  string         `json:\"platform_id\"`\n\tIdentityID  string         `json:\"identity_id\"`\n\tDefaultURL  string         `json:\"default_url\"`\n\tTenantFlags map[string]any `json:\"tenant_flags\"`\n\tServices    []Service      `json:\"services\"`\n}\n\ntype Service struct {\n\tServiceName       string            `json:\"service_name\"`\n\tServiceSubdomains []string          `json:\"service_subdomains\"`\n\tRegion            string            `json:\"region\"`\n\tEndpoints         []ServiceEndpoint `json:\"endpoints\"`\n}\n\n// ServiceEndpoint represents a single service endpoint returned by the CyberArk\n// Service Discovery API. The JSON field names here must match the field names\n// returned by the Service Discovery API.\ntype ServiceEndpoint struct {\n\tIsActive bool   `json:\"is_active\"`\n\tType     string `json:\"type\"`\n\tUI       string `json:\"ui\"`\n\tAPI      string `json:\"api\"`\n}\n\n// This is a convenience struct to hold the two ServiceEndpoints we care about.\n// Currently, we only care about the Identity API and the Discovery Context API.\ntype Services struct {\n\tIdentity         ServiceEndpoint\n\tDiscoveryContext ServiceEndpoint\n}\n\n// DiscoverServices fetches from the service discovery service for the configured subdomain\n// and parses the CyberArk Identity API URL and Inventory API URL.\n// It also returns the Tenant ID UUID corresponding to the subdomain.\nfunc (c *Client) DiscoverServices(ctx context.Context) (*Services, string, error) {\n\tc.cachedResponseMutex.Lock()\n\tdefer c.cachedResponseMutex.Unlock()\n\n\tif c.cachedResponse != nil && time.Since(c.cachedResponseTime) < 1*time.Hour {\n\t\treturn c.cachedResponse, c.cachedTenantID, nil\n\t}\n\n\tu, err := url.Parse(c.baseURL)\n\tif err != nil {\n\t\treturn nil, \"\", fmt.Errorf(\"invalid base URL for service discovery: %w\", err)\n\t}\n\n\tu.Path = path.Join(u.Path, \"api/public/tenant-discovery\")\n\tu.RawQuery = url.Values{\"bySubdomain\": []string{c.subdomain}}.Encode()\n\n\tendpoint := u.String()\n\n\trequest, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)\n\tif err != nil {\n\t\treturn nil, \"\", fmt.Errorf(\"failed to initialise request to %s: %s\", endpoint, err)\n\t}\n\n\trequest.Header.Set(\"Accept\", \"application/json\")\n\tversion.SetUserAgent(request)\n\t// Add telemetry headers\n\tarkapi.SetTelemetryRequestHeader(request)\n\tresp, err := c.client.Do(request)\n\tif err != nil {\n\t\treturn nil, \"\", fmt.Errorf(\"failed to perform HTTP request: %s\", err)\n\t}\n\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\t// a 404 error is returned with an empty JSON body \"{}\" if the subdomain is unknown; at the time of writing, we haven't observed\n\t\t// any other errors and so we can't special case them\n\t\tif resp.StatusCode == http.StatusNotFound {\n\t\t\treturn nil, \"\", fmt.Errorf(\"got an HTTP 404 response from service discovery; maybe the subdomain %q is incorrect or does not exist?\", c.subdomain)\n\t\t}\n\n\t\treturn nil, \"\", fmt.Errorf(\"got unexpected status code %s from request to service discovery API\", resp.Status)\n\t}\n\n\tvar discoveryResp DiscoveryResponse\n\terr = json.NewDecoder(io.LimitReader(resp.Body, maxDiscoverBodySize)).Decode(&discoveryResp)\n\tif err != nil {\n\t\tif err == io.ErrUnexpectedEOF {\n\t\t\treturn nil, \"\", fmt.Errorf(\"rejecting JSON response from server as it was too large or was truncated\")\n\t\t}\n\t\treturn nil, \"\", fmt.Errorf(\"failed to parse JSON from otherwise successful request to service discovery endpoint: %s\", err)\n\t}\n\tvar identityAPI, discoveryContextAPI string\n\tfor _, svc := range discoveryResp.Services {\n\t\tswitch svc.ServiceName {\n\t\tcase IdentityServiceName:\n\t\t\tfor _, ep := range svc.Endpoints {\n\t\t\t\tif ep.Type == \"main\" && ep.IsActive && ep.API != \"\" {\n\t\t\t\t\tidentityAPI = ep.API\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\tcase DiscoveryContextServiceName:\n\t\t\tfor _, ep := range svc.Endpoints {\n\t\t\t\tif ep.Type == \"main\" && ep.IsActive && ep.API != \"\" {\n\t\t\t\t\tdiscoveryContextAPI = ep.API\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif identityAPI == \"\" {\n\t\treturn nil, \"\", fmt.Errorf(\"didn't find %s in service discovery response, \"+\n\t\t\t\"which may indicate a suspended tenant; unable to detect CyberArk Identity API URL\", IdentityServiceName)\n\t}\n\t//TODO: Should add a check for discoveryContextAPI too?\n\n\tservices := &Services{\n\t\tIdentity:         ServiceEndpoint{API: identityAPI},\n\t\tDiscoveryContext: ServiceEndpoint{API: discoveryContextAPI},\n\t}\n\n\tc.cachedResponse = services\n\tc.cachedTenantID = discoveryResp.TenantID\n\tc.cachedResponseTime = time.Now()\n\n\treturn services, discoveryResp.TenantID, nil\n}\n"
  },
  {
    "path": "internal/cyberark/servicediscovery/discovery_test.go",
    "content": "package servicediscovery\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"k8s.io/klog/v2\"\n\t\"k8s.io/klog/v2/ktesting\"\n\n\t_ \"k8s.io/klog/v2/ktesting/init\"\n)\n\nfunc Test_DiscoverIdentityAPIURL(t *testing.T) {\n\ttests := map[string]struct {\n\t\tsubdomain     string\n\t\texpectedURL   string\n\t\texpectedError error\n\t}{\n\t\t\"successful request\": {\n\t\t\tsubdomain:     MockDiscoverySubdomain,\n\t\t\texpectedURL:   \"https://ajp5871.id.integration-cyberark.cloud\",\n\t\t\texpectedError: nil,\n\t\t},\n\t\t\"subdomain not found\": {\n\t\t\tsubdomain:     \"something-random\",\n\t\t\texpectedURL:   \"\",\n\t\t\texpectedError: fmt.Errorf(\"got an HTTP 404 response from service discovery; maybe the subdomain %q is incorrect or does not exist?\", \"something-random\"),\n\t\t},\n\t\t\"no identity service in response\": {\n\t\t\tsubdomain:     \"no-identity\",\n\t\t\texpectedURL:   \"\",\n\t\t\texpectedError: fmt.Errorf(\"didn't find %s in service discovery response, which may indicate a suspended tenant; unable to detect CyberArk Identity API URL\", IdentityServiceName),\n\t\t},\n\t\t\"unexpected HTTP response\": {\n\t\t\tsubdomain:     \"bad-request\",\n\t\t\texpectedURL:   \"\",\n\t\t\texpectedError: fmt.Errorf(\"got unexpected status code 400 Bad Request from request to service discovery API\"),\n\t\t},\n\t\t\"response JSON too long\": {\n\t\t\tsubdomain:     \"json-too-long\",\n\t\t\texpectedURL:   \"\",\n\t\t\texpectedError: fmt.Errorf(\"rejecting JSON response from server as it was too large or was truncated\"),\n\t\t},\n\t\t\"response JSON invalid\": {\n\t\t\tsubdomain:     \"json-invalid\",\n\t\t\texpectedURL:   \"\",\n\t\t\texpectedError: fmt.Errorf(\"failed to parse JSON from otherwise successful request to service discovery endpoint: invalid character 'a' looking for beginning of value\"),\n\t\t},\n\t}\n\n\tfor name, testSpec := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tlogger := ktesting.NewLogger(t, ktesting.DefaultConfig)\n\t\t\tctx := klog.NewContext(t.Context(), logger)\n\n\t\t\thttpClient := MockDiscoveryServer(t, Services{\n\t\t\t\tIdentity: ServiceEndpoint{\n\t\t\t\t\tAPI: mockIdentityAPIURL,\n\t\t\t\t},\n\t\t\t\tDiscoveryContext: ServiceEndpoint{\n\t\t\t\t\tAPI: mockDiscoveryContextAPIURL,\n\t\t\t\t},\n\t\t\t})\n\n\t\t\tclient := New(httpClient, testSpec.subdomain)\n\n\t\t\tservices, _, err := client.DiscoverServices(ctx)\n\t\t\tif testSpec.expectedError != nil {\n\t\t\t\tassert.EqualError(t, err, testSpec.expectedError.Error())\n\t\t\t\tassert.Nil(t, services)\n\t\t\t\treturn\n\t\t\t}\n\t\t\trequire.NoError(t, err)\n\t\t\tif services.Identity.API != testSpec.expectedURL {\n\t\t\t\tt.Errorf(\"expected API URL=%s\\nobserved API URL=%s\", testSpec.expectedURL, services.Identity.API)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "internal/cyberark/servicediscovery/mock.go",
    "content": "package servicediscovery\n\nimport (\n\t\"bytes\"\n\t\"crypto/rand\"\n\t\"encoding/hex\"\n\t\"encoding/json\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"strings\"\n\t\"testing\"\n\t\"text/template\"\n\n\t\"k8s.io/client-go/transport\"\n\n\tarkapi \"github.com/jetstack/preflight/internal/cyberark/api\"\n\t\"github.com/jetstack/preflight/pkg/version\"\n\n\t_ \"embed\"\n)\n\nconst (\n\t// MockDiscoverySubdomain is the subdomain for which the MockDiscoveryServer will return a success response\n\tMockDiscoverySubdomain = \"tlskp-test\"\n\n\tmockIdentityAPIURL         = \"https://ajp5871.id.integration-cyberark.cloud\"\n\tmockDiscoveryContextAPIURL = \"https://venafi-test.inventory.integration-cyberark.cloud/\"\n\tprefix                     = \"/api/public/tenant-discovery?bySubdomain=\"\n)\n\n//go:embed testdata/discovery_success.json.template\nvar discoverySuccessTemplate string\n\ntype mockDiscoveryServer struct {\n\tt               testing.TB\n\tsuccessResponse string\n}\n\n// MockDiscoveryServer starts a mocked CyberArk service discovery server and\n// returns an HTTP client with the CA certs needed to connect to it.\n//\n// The URL of the mock server is set in the `ARK_DISCOVERY_API` environment\n// variable, so any code using the `servicediscovery.Client` will use this mock\n// server.\n//\n// The mock server will return a successful response when the subdomain is\n// `MockDiscoverySubdomain`, and the API URLs in the response will match those\n// supplied in `services`.\n// Other subdomains, can be used to trigger various failure responses.\n//\n// The returned HTTP client has a transport which logs requests and responses\n// depending on log level of the logger supplied in the context.\nfunc MockDiscoveryServer(t testing.TB, services Services) *http.Client {\n\ttmpl := template.Must(template.New(\"mockDiscoverySuccess\").Parse(discoverySuccessTemplate))\n\tbuf := &bytes.Buffer{}\n\terr := tmpl.Execute(buf, services)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tmds := &mockDiscoveryServer{\n\t\tt:               t,\n\t\tsuccessResponse: buf.String(),\n\t}\n\tserver := httptest.NewTLSServer(mds)\n\tt.Cleanup(server.Close)\n\tt.Setenv(\"ARK_DISCOVERY_API\", server.URL)\n\thttpClient := server.Client()\n\thttpClient.Transport = transport.NewDebuggingRoundTripper(httpClient.Transport, transport.DebugByContext)\n\treturn httpClient\n}\n\nfunc (mds *mockDiscoveryServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tmds.t.Log(r.Method, r.RequestURI)\n\tif r.Method != http.MethodGet {\n\t\t// This was observed by making a POST request to the integration environment\n\t\t// Normally, we'd expect 405 Method Not Allowed but we match the observed response here\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\t_, _ = w.Write([]byte(`{\"message\":\"Missing Authentication Token\"}`))\n\t\treturn\n\t}\n\n\tif !strings.HasPrefix(r.URL.String(), prefix) {\n\t\t// This was observed by making a request to /api/v2/services/asd\n\t\t// Normally, we'd expect 404 Not Found but we match the observed response here\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\t_, _ = w.Write([]byte(`{\"message\":\"Missing Authentication Token\"}`))\n\t\treturn\n\t}\n\n\tif r.Header.Get(\"User-Agent\") != version.UserAgent() {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_, _ = w.Write([]byte(\"should set user agent on all requests\"))\n\t\treturn\n\t}\n\n\tif r.Header.Get(arkapi.TelemetryHeaderKey) == \"\" {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_, _ = w.Write([]byte(\"should set telemetry header on all requests\"))\n\t\treturn\n\t}\n\n\tif r.Header.Get(\"Accept\") != \"application/json\" {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_, _ = w.Write([]byte(\"should request JSON on all requests\"))\n\t\treturn\n\t}\n\n\tsubdomain := strings.TrimPrefix(r.URL.String(), prefix)\n\n\tswitch subdomain {\n\tcase MockDiscoverySubdomain:\n\t\t_, _ = w.Write([]byte(mds.successResponse))\n\n\tcase \"no-identity\":\n\t\t// return a snippet of valid service discovery JSON, but don't include the identity service\n\t\t_, _ = w.Write([]byte(`{\n\t\t\t\"services\": [\n\t\t\t\t{\n\t\t\t\t\t\"service_name\": \"data_privacy\",\n\t\t\t\t\t\"region\": \"us-east-1\",\n\t\t\t\t\t\"endpoints\": [\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"is_active\": true,\n\t\t\t\t\t\t\t\"type\": \"main\",\n\t\t\t\t\t\t\t\"ui\": \"https://ui.dataprivacy.integration-cyberark.cloud/\",\n\t\t\t\t\t\t\t\"api\": \"https://us-east-1.dataprivacy.integration-cyberark.cloud/api\"\n\t\t\t\t\t\t}\n\t\t\t\t\t]\n\t\t\t\t}\n\t\t\t]\n\t\t}`))\n\n\tcase \"bad-request\":\n\t\t// test how the client handles a random unexpected response\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t_, _ = w.Write([]byte(\"{}\"))\n\n\tcase \"json-invalid\":\n\t\t// test that the client correctly rejects handles invalid JSON\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_, _ = w.Write([]byte(`{\"a\": a}`))\n\n\tcase \"json-too-long\":\n\t\t// test that the client correctly rejects JSON which is too long\n\t\tw.WriteHeader(http.StatusOK)\n\n\t\t// we'll hex encode the random bytes (doubling the size)\n\t\tlongData := make([]byte, 1+maxDiscoverBodySize/2)\n\t\t_, _ = rand.Read(longData)\n\n\t\tlongJSON, err := json.Marshal(map[string]string{\"key\": hex.EncodeToString(longData)})\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\t_, _ = w.Write(longJSON)\n\n\tdefault:\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\t_, _ = w.Write([]byte(\"{}\"))\n\t}\n}\n"
  },
  {
    "path": "internal/cyberark/servicediscovery/testdata/README.md",
    "content": "# Test data for CyberArk Discovery\n\nAll data in this folder is derived from an unauthenticated endpoint accessible from the public Internet.\n\nTo get the original data:\n\nNOTE: This API is not implemented yet as of 02.09.2025 but is expected to be finalised by end of PI3 2025.\n```bash\ncurl -fsSL \"${ARK_DISCOVERY_API}?bySubdomain=${ARK_SUBDOMAIN}\" | jq\n```\n\nThen replace `identity_administration.api` with `{{ .Identity.API }}` and\n`discoverycontext.api` with `{{ .DiscoveryContext.API }}`. Those Go template\nfields will be substituted in the tests.\n"
  },
  {
    "path": "internal/cyberark/servicediscovery/testdata/discovery_success.json.template",
    "content": "{\n  \"region\": \"us-east-1\",\n  \"dr_region\": \"us-east-2\",\n  \"subdomain\": \"venafi-test\",\n  \"platform_id\": \"platform-123\",\n  \"tenant_id\": \"tenant-123\",\n  \"identity_id\": \"identity-456\",\n  \"default_url\": \"https://venafi-test.integration-cyberark.cloud\",\n  \"tenant_flags\": {\n    \"is_crdr_supported\": \"true\",\n    \"is_crdr_active\": \"true\"\n  },\n  \"services\": [\n    {\n      \"service_name\": \"data_privacy\",\n      \"region\": \"us-east-1\",\n      \"endpoints\": [\n        {\n          \"is_active\": true,\n          \"type\": \"main\",\n          \"ui\": \"https://ui.dataprivacy.integration-cyberark.cloud/\",\n          \"api\": \"https://us-east-1.dataprivacy.integration-cyberark.cloud/api\"\n        }\n      ]\n    },\n    {\n      \"service_name\": \"secrets_manager\",\n      \"region\": \"us-east-2\",\n      \"endpoints\": [\n        {\n          \"is_active\": true,\n          \"type\": \"main\",\n          \"ui\": \"https://ui.test-conjur.cloud\",\n          \"api\": \"https://venafi-test.secretsmgr.integration-cyberark.cloud/api\"\n        }\n      ]\n    },\n    {\n      \"service_name\": \"idaptive_risk_analytics\",\n      \"region\": \"US-East-Pod\",\n      \"endpoints\": [\n        {\n          \"is_active\": true,\n          \"type\": \"main\",\n          \"ui\": \"https://ajp5871-my.analytics.idaptive.qa\",\n          \"api\": \"https://ajp5871-my.analytics.idaptive.qa\"\n        }\n      ]\n    },\n    {\n      \"service_name\": \"component_manager\",\n      \"region\": \"us-east-1\",\n      \"endpoints\": [\n        {\n          \"is_active\": true,\n          \"type\": \"main\",\n          \"ui\": \"https://ui-connectormanagement.connectormanagement.integration-cyberark.cloud\",\n          \"api\": \"https://venafi-test.connectormanagement.integration-cyberark.cloud/api\"\n        }\n      ]\n    },\n    {\n      \"service_name\": \"recording\",\n      \"region\": \"us-east-1\",\n      \"endpoints\": [\n        {\n          \"is_active\": true,\n          \"type\": \"main\",\n          \"ui\": \"https://us-east-1.rec-ui.recording.integration-cyberark.cloud\",\n          \"api\": \"https://venafi-test.recording.integration-cyberark.cloud/api\"\n        }\n      ]\n    },\n    {\n      \"service_name\": \"identity_user_portal\",\n      \"region\": \"US-East-Pod\",\n      \"endpoints\": [\n        {\n          \"is_active\": true,\n          \"type\": \"main\",\n          \"ui\": \"https://ajp5871.id.integration-cyberark.cloud\",\n          \"api\": \"https://ajp5871.id.integration-cyberark.cloud\"\n        }\n      ]\n    },\n    {\n      \"service_name\": \"userportal\",\n      \"region\": \"us-east-1\",\n      \"endpoints\": [\n        {\n          \"is_active\": true,\n          \"type\": \"main\",\n          \"ui\": \"https://us-east-1.ui.userportal.integration-cyberark.cloud/\",\n          \"api\": \"https://venafi-test.api.userportal.integration-cyberark.cloud/api\"\n        }\n      ]\n    },\n    {\n      \"service_name\": \"cloud_onboarding\",\n      \"region\": \"us-east-1\",\n      \"endpoints\": [\n        {\n          \"is_active\": true,\n          \"type\": \"main\",\n          \"ui\": \"https://ui-cloudonboarding.cloudonboarding.integration-cyberark.cloud/\",\n          \"api\": \"https://venafi-test.cloudonboarding.integration-cyberark.cloud/api\"\n        }\n      ]\n    },\n    {\n      \"service_name\": \"identity_administration\",\n      \"region\": \"US-East-Pod\",\n      \"endpoints\": [\n        {\n          \"is_active\": true,\n          \"type\": \"main\",\n          \"ui\": \"https://ajp5871.id.integration-cyberark.cloud\",\n          \"api\": \"{{ .Identity.API }}\"\n        }\n      ]\n    },\n    {\n      \"service_name\": \"adminportal\",\n      \"region\": \"us-east-1\",\n      \"endpoints\": [\n        {\n          \"is_active\": true,\n          \"type\": \"main\",\n          \"ui\": \"https://ui-adminportal.adminportal.integration-cyberark.cloud\",\n          \"api\": \"https://venafi-test.adminportal.integration-cyberark.cloud/api\"\n        }\n      ]\n    },\n    {\n      \"service_name\": \"analytics\",\n      \"region\": \"us-east-1\",\n      \"endpoints\": [\n        {\n          \"is_active\": true,\n          \"type\": \"main\",\n          \"ui\": \"https://venafi-test.analytics.integration-cyberark.cloud/\",\n          \"api\": \"https://venafi-test.analytics.integration-cyberark.cloud/api\"\n        }\n      ]\n    },\n    {\n      \"service_name\": \"session_monitoring\",\n      \"region\": \"us-east-1\",\n      \"endpoints\": [\n        {\n          \"is_active\": true,\n          \"type\": \"main\",\n          \"ui\": \"https://us-east-1.sm-ui.sessionmonitoring.integration-cyberark.cloud\",\n          \"api\": \"https://venafi-test.sessionmonitoring.integration-cyberark.cloud/api\"\n        }\n      ]\n    },\n    {\n      \"service_name\": \"audit\",\n      \"region\": \"us-east-1\",\n      \"endpoints\": [\n        {\n          \"is_active\": true,\n          \"type\": \"main\",\n          \"ui\": \"https://ui.audit-ui.integration-cyberark.cloud\",\n          \"api\": \"https://venafi-test.audit.integration-cyberark.cloud/api\"\n        }\n      ]\n    },\n    {\n      \"service_name\": \"fmcdp\",\n      \"region\": \"us-east-1\",\n      \"endpoints\": [\n        {\n          \"is_active\": true,\n          \"type\": \"main\",\n          \"ui\": \"https://tagtig.io/\",\n          \"api\": \"https://tagtig.io/api\"\n        }\n      ]\n    },\n    {\n      \"service_name\": \"featureadopt\",\n      \"region\": \"us-east-1\",\n      \"endpoints\": [\n        {\n          \"is_active\": true,\n          \"type\": \"main\",\n          \"ui\": \"https://ui-featureadopt.featureadopt.integration-cyberark.cloud/\",\n          \"api\": \"https://us-east-1-featureadopt.featureadopt.integration-cyberark.cloud/api\"\n        }\n      ]\n    },\n    {\n      \"service_name\": \"discoverycontext\",\n      \"region\": \"us-east-1\",\n      \"endpoints\": [\n        {\n          \"is_active\": true,\n          \"type\": \"main\",\n          \"ui\": \"https://ui-inventory.inventory.integration-cyberark.cloud/\",\n          \"api\": \"{{ .DiscoveryContext.API }}\"\n        }\n      ]\n    }\n  ]\n}\n"
  },
  {
    "path": "internal/cyberark/testing/testing.go",
    "content": "package testing\n\nimport (\n\t\"os\"\n\t\"testing\"\n)\n\n// SkipIfNoEnv skips the test if the required CyberArk environment variables are not set.\nfunc SkipIfNoEnv(t testing.TB) {\n\tt.Helper()\n\n\tif os.Getenv(\"ARK_SUBDOMAIN\") == \"\" ||\n\t\tos.Getenv(\"ARK_USERNAME\") == \"\" ||\n\t\tos.Getenv(\"ARK_SECRET\") == \"\" {\n\t\tt.Skip(\"Skipping test because one of ARK_SUBDOMAIN, ARK_USERNAME or ARK_SECRET isn't set\")\n\t}\n\n}\n"
  },
  {
    "path": "internal/envelope/doc.go",
    "content": "// Package envelope provides types and interfaces for envelope encryption.\n//\n// Envelope encryption combines asymmetric and symmetric cryptography to\n// efficiently encrypt data. The Encryptor interface defines the encryption\n// operation, returning data in JWE (JSON Web Encryption) format as defined\n// in RFC 7516.\n//\n// Implementations are available in subpackages:\n//\n//   - internal/envelope/rsa: RSA-OAEP-256 + AES-256-GCM using JWE\n//\n// See subpackage documentation for usage examples.\npackage envelope\n"
  },
  {
    "path": "internal/envelope/keyfetch/client.go",
    "content": "package keyfetch\n\nimport (\n\t\"context\"\n\t\"crypto/rsa\"\n\t\"crypto/x509\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/jetstack/venafi-connection-lib/http_client\"\n\t\"github.com/lestrrat-go/jwx/v3/jwk\"\n\t\"k8s.io/klog/v2\"\n\n\t\"github.com/jetstack/preflight/internal/cyberark\"\n\t\"github.com/jetstack/preflight/internal/cyberark/identity\"\n\t\"github.com/jetstack/preflight/internal/cyberark/servicediscovery\"\n\t\"github.com/jetstack/preflight/pkg/version\"\n)\n\nconst (\n\t// minRSAKeySize is the minimum RSA key size in bits; we'd expect that keys will be larger but 2048 is a sane floor\n\t// to enforce to ensure that a weak key can't accidentally be used\n\tminRSAKeySize = 2048\n)\n\n// KeyFetcher is an interface for fetching public keys.\ntype KeyFetcher interface {\n\t// FetchKey retrieves a public key from the key source.\n\tFetchKey(ctx context.Context) (PublicKey, error)\n}\n\n// Compile-time check that Client implements KeyFetcher\nvar _ KeyFetcher = (*Client)(nil)\n\n// PublicKey represents an RSA public key retrieved from the key server.\ntype PublicKey struct {\n\t// KeyID is the unique identifier for this key\n\tKeyID string\n\n\t// Key is the actual RSA public key\n\tKey *rsa.PublicKey\n}\n\n// Client fetches public keys from a CyberArk HTTP endpoint that provides keys in JWKS format.\n// It can be expanded in future to support other key types and formats, but for now it only supports RSA keys\n// and ignored other types.\ntype Client struct {\n\tdiscoveryClient *servicediscovery.Client\n\tidentityClient  *identity.Client\n\tcfg             cyberark.ClientConfig\n\n\t// httpClient is the HTTP client used for requests\n\thttpClient *http.Client\n\n\tcachedKey      PublicKey\n\tcachedKeyMutex sync.Mutex\n\tcachedKeyTime  time.Time\n}\n\n// NewClient creates a new key fetching client.\n// Uses CyberArk service discovery to derive the JWKS endpoint and CyberArk identity client for authentication.\n// Constructing the client involves a service discovery call to initialise the identity client,\n// so this may return an error if the discovery client is not able to connect to the service discovery endpoint.\n// If httpClient is nil, a default HTTP client will be created.\nfunc NewClient(ctx context.Context, discoveryClient *servicediscovery.Client, cfg cyberark.ClientConfig, httpClient *http.Client) (*Client, error) {\n\tif httpClient == nil {\n\t\tvar rootCAs *x509.CertPool\n\t\thttpClient = http_client.NewDefaultClient(version.UserAgent(), rootCAs)\n\t}\n\n\tservices, _, err := discoveryClient.DiscoverServices(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get services from discovery client for initialising identity client: %w\", err)\n\t}\n\n\treturn &Client{\n\t\tdiscoveryClient: discoveryClient,\n\t\tidentityClient:  identity.New(httpClient, services.Identity.API, cfg.Subdomain),\n\t\tcfg:             cfg,\n\t\thttpClient:      httpClient,\n\t}, nil\n}\n\n// FetchKey retrieves the public keys from the configured endpoint.\n// It returns a slice of PublicKey structs containing the key material and metadata.\nfunc (c *Client) FetchKey(ctx context.Context) (PublicKey, error) {\n\tlogger := klog.FromContext(ctx).WithName(\"keyfetch\")\n\tc.cachedKeyMutex.Lock()\n\tdefer c.cachedKeyMutex.Unlock()\n\n\tif time.Since(c.cachedKeyTime) < 15*time.Minute {\n\t\tklog.FromContext(ctx).WithName(\"keyfetch\").V(2).Info(\"using cached key\", \"fetchedAt\", c.cachedKeyTime.Format(time.RFC3339Nano), \"kid\", c.cachedKey.KeyID)\n\t\treturn c.cachedKey, nil\n\t}\n\n\tservices, _, err := c.discoveryClient.DiscoverServices(ctx)\n\tif err != nil {\n\t\treturn PublicKey{}, fmt.Errorf(\"failed to get services from discovery client: %w\", err)\n\t}\n\n\terr = c.identityClient.LoginUsernamePassword(ctx, c.cfg.Username, []byte(c.cfg.Secret))\n\tif err != nil {\n\t\treturn PublicKey{}, fmt.Errorf(\"failed to authenticate for fetching JWKs: %w\", err)\n\t}\n\n\tendpoint, err := url.JoinPath(services.DiscoveryContext.API, \"discovery-context/jwks\")\n\tif err != nil {\n\t\treturn PublicKey{}, fmt.Errorf(\"failed to construct endpoint URL: %w\", err)\n\t}\n\n\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)\n\tif err != nil {\n\t\treturn PublicKey{}, fmt.Errorf(\"failed to create request: %w\", err)\n\t}\n\n\t_, err = c.identityClient.AuthenticateRequest(req)\n\tif err != nil {\n\t\treturn PublicKey{}, fmt.Errorf(\"failed to authenticate request: %s\", err)\n\t}\n\n\treq.Header.Set(\"Accept\", \"application/json\")\n\tversion.SetUserAgent(req)\n\n\tresp, err := c.httpClient.Do(req)\n\tif err != nil {\n\t\treturn PublicKey{}, fmt.Errorf(\"failed to fetch keys from %s: %w\", endpoint, err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tbody, _ := io.ReadAll(resp.Body)\n\t\treturn PublicKey{}, fmt.Errorf(\"unexpected status code %d from %s: %s\", resp.StatusCode, endpoint, string(body))\n\t}\n\n\tbody, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn PublicKey{}, fmt.Errorf(\"failed to read response body: %w\", err)\n\t}\n\n\tkeySet, err := jwk.Parse(body)\n\tif err != nil {\n\t\treturn PublicKey{}, fmt.Errorf(\"failed to parse JWKs response: %w\", err)\n\t}\n\n\tfor i := range keySet.Len() {\n\t\tkey, ok := keySet.Key(i)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Only process RSA keys\n\t\tif key.KeyType().String() != \"RSA\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar rawKey any\n\t\tif err := jwk.Export(key, &rawKey); err != nil {\n\t\t\t// skip unparseable keys\n\t\t\tcontinue\n\t\t}\n\n\t\trsaKey, ok := rawKey.(*rsa.PublicKey)\n\t\tif !ok {\n\t\t\t// only process RSA keys (for now)\n\t\t\tcontinue\n\t\t}\n\n\t\tif rsaKey.N.BitLen() < minRSAKeySize {\n\t\t\t// skip keys that are too small to be secure\n\t\t\tcontinue\n\t\t}\n\n\t\tkid, ok := key.KeyID()\n\t\tif !ok {\n\t\t\t// skip any keys which don't have an ID\n\t\t\tcontinue\n\t\t}\n\n\t\talg, ok := key.Algorithm()\n\t\tif !ok {\n\t\t\t// skip any keys which don't have an algorithm specified\n\t\t\tcontinue\n\t\t}\n\n\t\tif alg.String() != \"RSA-OAEP-256\" {\n\t\t\t// we only use RSA keys for RSA-OAEP-256\n\t\t\tcontinue\n\t\t}\n\n\t\t// return the first valid key we find\n\n\t\tlogger.Info(\"fetched valid RSA key\", \"kid\", kid)\n\n\t\tc.cachedKey = PublicKey{\n\t\t\tKeyID: kid,\n\t\t\tKey:   rsaKey,\n\t\t}\n\t\tc.cachedKeyTime = time.Now()\n\n\t\treturn c.cachedKey, nil\n\t}\n\n\treturn PublicKey{}, fmt.Errorf(\"no valid RSA keys found at %s\", endpoint)\n}\n"
  },
  {
    "path": "internal/envelope/keyfetch/client_test.go",
    "content": "package keyfetch\n\nimport (\n\t\"context\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/jetstack/preflight/internal/cyberark\"\n\t\"github.com/jetstack/preflight/internal/cyberark/identity\"\n\t\"github.com/jetstack/preflight/internal/cyberark/servicediscovery\"\n)\n\n// testClientSetup sets up a complete test environment with mock identity and discovery servers\n// and returns a configured client along with the test ClientConfig\nfunc testClientSetup(t *testing.T, jwksServerURL string) (*Client, cyberark.ClientConfig) {\n\tt.Helper()\n\n\t// Create mock identity server\n\tidentityURL, httpClient := identity.MockIdentityServer(t)\n\n\t// Set up services for mock discovery server\n\tservices := servicediscovery.Services{\n\t\tIdentity: servicediscovery.ServiceEndpoint{\n\t\t\tIsActive: true,\n\t\t\tType:     \"main\",\n\t\t\tAPI:      identityURL,\n\t\t},\n\t\tDiscoveryContext: servicediscovery.ServiceEndpoint{\n\t\t\tIsActive: true,\n\t\t\tType:     \"main\",\n\t\t\tAPI:      jwksServerURL,\n\t\t},\n\t}\n\n\t// Create mock discovery server\n\t_ = servicediscovery.MockDiscoveryServer(t, services)\n\n\t// Create discovery client\n\tdiscoveryClient := servicediscovery.New(httpClient, servicediscovery.MockDiscoverySubdomain)\n\n\t// Create test config with credentials that match the mock identity server\n\tcfg := cyberark.ClientConfig{\n\t\tSubdomain: servicediscovery.MockDiscoverySubdomain,\n\t\tUsername:  \"test@example.com\", // matches successUser in mock identity server\n\t\tSecret:    \"somepassword\",     // matches successPassword in mock identity server\n\t}\n\n\t// Create the keyfetch client with the properly configured httpClient\n\tclient, err := NewClient(t.Context(), discoveryClient, cfg, httpClient)\n\trequire.NoError(t, err)\n\n\treturn client, cfg\n}\n\nfunc mockJWKSServer(t *testing.T, statusCode int, jwksResponse string) *httptest.Server {\n\tt.Helper()\n\n\tserver := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// Check if this is the JWKS endpoint\n\t\tif r.URL.Path == \"/discovery-context/jwks\" {\n\t\t\tassert.Equal(t, http.MethodGet, r.Method)\n\t\t\tassert.Equal(t, \"application/json\", r.Header.Get(\"Accept\"))\n\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.WriteHeader(statusCode)\n\t\t\t_, err := w.Write([]byte(jwksResponse))\n\t\t\trequire.NoError(t, err)\n\t\t}\n\t}))\n\n\tt.Cleanup(server.Close)\n\n\treturn server\n}\n\nfunc TestClient_FetchKey(t *testing.T) {\n\t// Sample JWKs response with a valid RSA key\n\t// This is a minimal example with the required fields, used in multiple tests\n\tjwksResponse := `{\n\"keys\": [\n\t\t\t{\n\t\t\t\t\"kty\": \"RSA\",\n\t\t\t\t\"use\": \"enc\",\n\t\t\t\t\"kid\": \"test-key-1\",\n\t\t\t\t\"alg\": \"RSA-OAEP-256\",\n\t\t\t\t\"n\": \"vDdioGpDuAEQDd4WRXyWa4sZ5EeS9OPsRrU_jU3PbZdDcANxfh_WSeSvSBKGfGXGC3fIzu0Ernk9VjXcs3LeFdRq2N4nNRZvCzsd_MjBtn7CWgjM_Sk9DXEGn3cHHilcJUJQ4i2YgX9bHu0odNgE6cSVIUEMIC2EGuGk_I7lwroinAAwXpNLLQkV_25kv_QQof2i5f7AocY6QTd0SAo8ZUqFBzanupkeFpl3-Bsz6_zdt_N0x9k5XHQn42Q2oTupTwvXFbE1x8XtCpiaP3_fsQ9dN7t4z6HtwlNUJB2tFfF6PgdKZ9LuJpYjFPYzJQ6Rv28fuc8YHcF7Jittjyzmew\",\n\t\t\t\t\"e\": \"AQAB\"\n\t\t\t}\n\t\t]\n\t}`\n\n\tt.Run(\"successful fetch\", func(t *testing.T) {\n\n\t\tserver := mockJWKSServer(t, http.StatusOK, jwksResponse)\n\n\t\tclient, _ := testClientSetup(t, server.URL)\n\t\tkey, err := client.FetchKey(t.Context())\n\n\t\trequire.NoError(t, err)\n\n\t\tassert.Equal(t, \"test-key-1\", key.KeyID)\n\t\tassert.NotNil(t, key.Key)\n\t\tassert.NotNil(t, key.Key.N)\n\t\tassert.Greater(t, key.Key.E, 0)\n\t})\n\n\tt.Run(\"multiple keys\", func(t *testing.T) {\n\t\t// want to check that FetchKey returns the first valid RSA key, even if there are multiple keys in the response\n\t\tmultiKeyResponse := `{\n\t\t\t\"keys\": [\n\t\t\t\t{\n\t\t\t\t\t\"kty\": \"RSA\",\n\t\t\t\t\t\"kid\": \"key-1\",\n\t\t\t\t\t\"alg\": \"RSA-OAEP-256\",\n\t\t\t\t\t\"n\": \"vDdioGpDuAEQDd4WRXyWa4sZ5EeS9OPsRrU_jU3PbZdDcANxfh_WSeSvSBKGfGXGC3fIzu0Ernk9VjXcs3LeFdRq2N4nNRZvCzsd_MjBtn7CWgjM_Sk9DXEGn3cHHilcJUJQ4i2YgX9bHu0odNgE6cSVIUEMIC2EGuGk_I7lwroinAAwXpNLLQkV_25kv_QQof2i5f7AocY6QTd0SAo8ZUqFBzanupkeFpl3-Bsz6_zdt_N0x9k5XHQn42Q2oTupTwvXFbE1x8XtCpiaP3_fsQ9dN7t4z6HtwlNUJB2tFfF6PgdKZ9LuJpYjFPYzJQ6Rv28fuc8YHcF7Jittjyzmew\",\n\t\t\t\t\t\"e\": \"AQAB\"\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"kty\": \"RSA\",\n\t\t\t\t\t\"kid\": \"key-2\",\n\t\t\t\t\t\"alg\": \"RSA-OAEP-256\",\n\t\t\t\t\t\"n\": \"4J0VE8FK1rSQUBGiLpk4MkPyFApCyCugOfkuH0hiHclxZay96JgyZylH97eqs-ZmWXtv42ynYctIj2ZleaoqVDfMOqZ1GsbccyNAYReDtUYgeUtJEajpfUo1vitoh6OEB6nB0Hau07ELLqcUoxH_zkH5Kwoi_BgxByJDQ1HOut6nyEPTXLTMrAYK_pqL_kzsU0OtrCgSBh6j-11ToqUfxsLupbadRC0t5zrq4-3mZKqxBUz4XB2g3b9d2lH7mOTl5J_E8jcD4tK9DePzjdbkRWonBEJetWl9f2mh_VD1sxJbie1kzM5cdQylXzV_AvhSr58w00qy6XR_QXI10UU16Q\",\n\t\t\t\t\t\"e\": \"AQAB\"\n\t\t\t\t}\n\t\t\t]\n\t\t}`\n\n\t\tserver := mockJWKSServer(t, http.StatusOK, multiKeyResponse)\n\n\t\tclient, _ := testClientSetup(t, server.URL)\n\t\tkey, err := client.FetchKey(t.Context())\n\n\t\trequire.NoError(t, err)\n\n\t\tassert.Equal(t, \"key-1\", key.KeyID)\n\t})\n\n\tt.Run(\"filters non-RSA keys\", func(t *testing.T) {\n\t\t// check that the client correctly filters out non-RSA keys and returns the first valid RSA key\n\t\tmixedKeyResponse := `{\n\t\t\t\"keys\": [\n\t\t\t\t{\n\t\t\t\t\t\"kty\": \"EC\",\n\t\t\t\t\t\"kid\": \"ec-key-1\",\n\t\t\t\t\t\"alg\": \"ES256\",\n\t\t\t\t\t\"crv\": \"P-256\",\n\t\t\t\t\t\"x\": \"WKn-ZIGevcwGIyyrzFoZNBdaq9_TsqzGl96oc0CWuis\",\n\t\t\t\t\t\"y\": \"y77t-RvAHRKTsSGdIYUfweuOvwrvDD-Q3Hv5J0fSKbE\"\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"kty\": \"RSA\",\n\t\t\t\t\t\"kid\": \"rsa-key-1\",\n\t\t\t\t\t\"alg\": \"RSA-OAEP-256\",\n\t\t\t\t\t\"n\": \"vDdioGpDuAEQDd4WRXyWa4sZ5EeS9OPsRrU_jU3PbZdDcANxfh_WSeSvSBKGfGXGC3fIzu0Ernk9VjXcs3LeFdRq2N4nNRZvCzsd_MjBtn7CWgjM_Sk9DXEGn3cHHilcJUJQ4i2YgX9bHu0odNgE6cSVIUEMIC2EGuGk_I7lwroinAAwXpNLLQkV_25kv_QQof2i5f7AocY6QTd0SAo8ZUqFBzanupkeFpl3-Bsz6_zdt_N0x9k5XHQn42Q2oTupTwvXFbE1x8XtCpiaP3_fsQ9dN7t4z6HtwlNUJB2tFfF6PgdKZ9LuJpYjFPYzJQ6Rv28fuc8YHcF7Jittjyzmew\",\n\t\t\t\t\t\"e\": \"AQAB\"\n\t\t\t\t}\n\t\t\t]\n\t\t}`\n\n\t\tserver := mockJWKSServer(t, http.StatusOK, mixedKeyResponse)\n\n\t\tclient, _ := testClientSetup(t, server.URL)\n\t\tkey, err := client.FetchKey(t.Context())\n\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, \"rsa-key-1\", key.KeyID)\n\t})\n\n\tt.Run(\"error on non-200 status\", func(t *testing.T) {\n\t\tserver := mockJWKSServer(t, http.StatusInternalServerError, \"\") // Response body won't be used since we return 500\n\n\t\tclient, _ := testClientSetup(t, server.URL)\n\t\t_, err := client.FetchKey(t.Context())\n\n\t\trequire.Error(t, err)\n\t\tassert.Contains(t, err.Error(), \"unexpected status code 500\")\n\t})\n\n\tt.Run(\"error on invalid JSON\", func(t *testing.T) {\n\t\tserver := mockJWKSServer(t, http.StatusOK, \"invalid json\")\n\n\t\tclient, _ := testClientSetup(t, server.URL)\n\t\t_, err := client.FetchKey(t.Context())\n\n\t\trequire.Error(t, err)\n\t\tassert.Contains(t, err.Error(), \"failed to parse JWKs response\")\n\t})\n\n\tt.Run(\"error on no RSA keys\", func(t *testing.T) {\n\t\tecOnlyResponse := `{\n\t\t\t\"keys\": [\n\t\t\t\t{\n\t\t\t\t\t\"kty\": \"EC\",\n\t\t\t\t\t\"kid\": \"ec-key-1\",\n\t\t\t\t\t\"alg\": \"ES256\",\n\t\t\t\t\t\"crv\": \"P-256\",\n\t\t\t\t\t\"x\": \"WKn-ZIGevcwGIyyrzFoZNBdaq9_TsqzGl96oc0CWuis\",\n\t\t\t\t\t\"y\": \"y77t-RvAHRKTsSGdIYUfweuOvwrvDD-Q3Hv5J0fSKbE\"\n\t\t\t\t}\n\t\t\t]\n\t\t}`\n\n\t\tserver := mockJWKSServer(t, http.StatusOK, ecOnlyResponse)\n\n\t\tclient, _ := testClientSetup(t, server.URL)\n\t\t_, err := client.FetchKey(t.Context())\n\n\t\trequire.Error(t, err)\n\t\tassert.Contains(t, err.Error(), \"no valid RSA keys found\")\n\t})\n\n\tt.Run(\"context cancellation\", func(t *testing.T) {\n\t\tserver := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t// This handler will never respond\n\t\t\t<-r.Context().Done()\n\t\t}))\n\t\tdefer server.Close()\n\n\t\tclient, _ := testClientSetup(t, server.URL)\n\t\tctx, cancel := context.WithCancel(context.Background())\n\t\tcancel() // Cancel immediately\n\n\t\t_, err := client.FetchKey(ctx)\n\n\t\trequire.Error(t, err)\n\t\tassert.Contains(t, err.Error(), \"context canceled\")\n\t})\n\n\tt.Run(\"authentication failure\", func(t *testing.T) {\n\t\tserver := mockJWKSServer(t, http.StatusOK, jwksResponse)\n\n\t\t// Create mock identity server\n\t\tidentityURL, httpClient := identity.MockIdentityServer(t)\n\n\t\t// Set up services for mock discovery server\n\t\tservices := servicediscovery.Services{\n\t\t\tIdentity: servicediscovery.ServiceEndpoint{\n\t\t\t\tIsActive: true,\n\t\t\t\tType:     \"main\",\n\t\t\t\tAPI:      identityURL,\n\t\t\t},\n\t\t\tDiscoveryContext: servicediscovery.ServiceEndpoint{\n\t\t\t\tIsActive: true,\n\t\t\t\tType:     \"main\",\n\t\t\t\tAPI:      server.URL,\n\t\t\t},\n\t\t}\n\n\t\t// Create mock discovery server\n\t\t_ = servicediscovery.MockDiscoveryServer(t, services)\n\n\t\t// Create discovery client\n\t\tdiscoveryClient := servicediscovery.New(httpClient, servicediscovery.MockDiscoverySubdomain)\n\n\t\t// Create test config with WRONG credentials\n\t\t// Use the failureUser from the mock identity server\n\t\tcfg := cyberark.ClientConfig{\n\t\t\tSubdomain: servicediscovery.MockDiscoverySubdomain,\n\t\t\tUsername:  \"test-fail@example.com\", // This user is configured to fail in the mock server // TODO: export these constants from the identity package to avoid hardcoding them here\n\t\t\tSecret:    \"somepassword\",\n\t\t}\n\n\t\t// Create the keyfetch client\n\t\tclient, err := NewClient(t.Context(), discoveryClient, cfg, httpClient)\n\t\trequire.NoError(t, err)\n\n\t\t_, err = client.FetchKey(t.Context())\n\n\t\trequire.Error(t, err)\n\t\tassert.Contains(t, err.Error(), \"failed to authenticate\")\n\t})\n\n\tt.Run(\"service discovery fails\", func(t *testing.T) {\n\t\t// Create mock identity server (won't be used but needed for setup)\n\t\tidentityURL, httpClient := identity.MockIdentityServer(t)\n\n\t\t// Set up services for mock discovery server\n\t\tservices := servicediscovery.Services{\n\t\t\tIdentity: servicediscovery.ServiceEndpoint{\n\t\t\t\tIsActive: true,\n\t\t\t\tType:     \"main\",\n\t\t\t\tAPI:      identityURL,\n\t\t\t},\n\t\t}\n\n\t\t// Create mock discovery server\n\t\t_ = servicediscovery.MockDiscoveryServer(t, services)\n\n\t\t// Create discovery client with a subdomain that triggers failure\n\t\tdiscoveryClient := servicediscovery.New(httpClient, \"bad-request\")\n\n\t\tcfg := cyberark.ClientConfig{\n\t\t\tSubdomain: \"bad-request\",\n\t\t\tUsername:  \"test@example.com\",\n\t\t\tSecret:    \"somepassword\",\n\t\t}\n\n\t\t_, err := NewClient(t.Context(), discoveryClient, cfg, httpClient)\n\n\t\trequire.Error(t, err)\n\t\tassert.Contains(t, err.Error(), \"failed to get services from discovery client\")\n\t})\n\n\tt.Run(\"ignores small RSA keys\", func(t *testing.T) {\n\t\t// This is a 1024-bit RSA key (half the minimum size)\n\t\t// Generated with: openssl genrsa 1024 | openssl rsa -pubin -outform der | base64url\n\t\tsmallKeyResponse := `{\n\t\t\t\"keys\": [\n\t\t\t\t{\n\t\t\t\t\t\"kty\": \"RSA\",\n\t\t\t\t\t\"kid\": \"small-key-1\",\n\t\t\t\t\t\"alg\": \"RSA-OAEP-256\",\n\t\t\t\t\t\"n\": \"wKhJSKlx9aO_TmT4qAqN5EZ8FeXCXmh5F_hGHWL6c4lKvdKc_jBq1YI0H8pCIWZ6WhPKmBZ8JQ4Q2q0TjvdKLYQ8jqzMZxz4J_z4ySbN7yBn7N7xKqL5JN7KqVr7N8KQ\",\n\t\t\t\t\t\"e\": \"AQAB\"\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"kty\": \"RSA\",\n\t\t\t\t\t\"kid\": \"valid-key\",\n\t\t\t\t\t\"alg\": \"RSA-OAEP-256\",\n\t\t\t\t\t\"n\": \"vDdioGpDuAEQDd4WRXyWa4sZ5EeS9OPsRrU_jU3PbZdDcANxfh_WSeSvSBKGfGXGC3fIzu0Ernk9VjXcs3LeFdRq2N4nNRZvCzsd_MjBtn7CWgjM_Sk9DXEGn3cHHilcJUJQ4i2YgX9bHu0odNgE6cSVIUEMIC2EGuGk_I7lwroinAAwXpNLLQkV_25kv_QQof2i5f7AocY6QTd0SAo8ZUqFBzanupkeFpl3-Bsz6_zdt_N0x9k5XHQn42Q2oTupTwvXFbE1x8XtCpiaP3_fsQ9dN7t4z6HtwlNUJB2tFfF6PgdKZ9LuJpYjFPYzJQ6Rv28fuc8YHcF7Jittjyzmew\",\n\t\t\t\t\t\"e\": \"AQAB\"\n\t\t\t\t}\n\t\t\t]\n\t\t}`\n\n\t\tserver := mockJWKSServer(t, http.StatusOK, smallKeyResponse)\n\n\t\tclient, _ := testClientSetup(t, server.URL)\n\t\tkey, err := client.FetchKey(t.Context())\n\n\t\trequire.NoError(t, err)\n\t\t// Should skip the small key and return the valid one\n\t\tassert.Equal(t, \"valid-key\", key.KeyID)\n\t})\n\n\tt.Run(\"skips keys without kid\", func(t *testing.T) {\n\t\tnoKidResponse := `{\n\t\t\t\"keys\": [\n\t\t\t\t{\n\t\t\t\t\t\"kty\": \"RSA\",\n\t\t\t\t\t\"alg\": \"RSA-OAEP-256\",\n\t\t\t\t\t\"n\": \"vDdioGpDuAEQDd4WRXyWa4sZ5EeS9OPsRrU_jU3PbZdDcANxfh_WSeSvSBKGfGXGC3fIzu0Ernk9VjXcs3LeFdRq2N4nNRZvCzsd_MjBtn7CWgjM_Sk9DXEGn3cHHilcJUJQ4i2YgX9bHu0odNgE6cSVIUEMIC2EGuGk_I7lwroinAAwXpNLLQkV_25kv_QQof2i5f7AocY6QTd0SAo8ZUqFBzanupkeFpl3-Bsz6_zdt_N0x9k5XHQn42Q2oTupTwvXFbE1x8XtCpiaP3_fsQ9dN7t4z6HtwlNUJB2tFfF6PgdKZ9LuJpYjFPYzJQ6Rv28fuc8YHcF7Jittjyzmew\",\n\t\t\t\t\t\"e\": \"AQAB\"\n\t\t\t\t}\n\t\t\t]\n\t\t}`\n\n\t\tserver := mockJWKSServer(t, http.StatusOK, noKidResponse)\n\n\t\tclient, _ := testClientSetup(t, server.URL)\n\t\t_, err := client.FetchKey(t.Context())\n\n\t\trequire.Error(t, err)\n\t\tassert.Contains(t, err.Error(), \"no valid RSA keys found\")\n\t})\n\n\tt.Run(\"filters keys with wrong algorithm\", func(t *testing.T) {\n\t\twrongAlgResponse := `{\n\t\t\t\"keys\": [\n\t\t\t\t{\n\t\t\t\t\t\"kty\": \"RSA\",\n\t\t\t\t\t\"kid\": \"wrong-alg-key\",\n\t\t\t\t\t\"alg\": \"RS256\",\n\t\t\t\t\t\"n\": \"vDdioGpDuAEQDd4WRXyWa4sZ5EeS9OPsRrU_jU3PbZdDcANxfh_WSeSvSBKGfGXGC3fIzu0Ernk9VjXcs3LeFdRq2N4nNRZvCzsd_MjBtn7CWgjM_Sk9DXEGn3cHHilcJUJQ4i2YgX9bHu0odNgE6cSVIUEMIC2EGuGk_I7lwroinAAwXpNLLQkV_25kv_QQof2i5f7AocY6QTd0SAo8ZUqFBzanupkeFpl3-Bsz6_zdt_N0x9k5XHQn42Q2oTupTwvXFbE1x8XtCpiaP3_fsQ9dN7t4z6HtwlNUJB2tFfF6PgdKZ9LuJpYjFPYzJQ6Rv28fuc8YHcF7Jittjyzmew\",\n\t\t\t\t\t\"e\": \"AQAB\"\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"kty\": \"RSA\",\n\t\t\t\t\t\"kid\": \"correct-alg-key\",\n\t\t\t\t\t\"alg\": \"RSA-OAEP-256\",\n\t\t\t\t\t\"n\": \"4J0VE8FK1rSQUBGiLpk4MkPyFApCyCugOfkuH0hiHclxZay96JgyZylH97eqs-ZmWXtv42ynYctIj2ZleaoqVDfMOqZ1GsbccyNAYReDtUYgeUtJEajpfUo1vitoh6OEB6nB0Hau07ELLqcUoxH_zkH5Kwoi_BgxByJDQ1HOut6nyEPTXLTMrAYK_pqL_kzsU0OtrCgSBh6j-11ToqUfxsLupbadRC0t5zrq4-3mZKqxBUz4XB2g3b9d2lH7mOTl5J_E8jcD4tK9DePzjdbkRWonBEJetWl9f2mh_VD1sxJbie1kzM5cdQylXzV_AvhSr58w00qy6XR_QXI10UU16Q\",\n\t\t\t\t\t\"e\": \"AQAB\"\n\t\t\t\t}\n\t\t\t]\n\t\t}`\n\n\t\tserver := mockJWKSServer(t, http.StatusOK, wrongAlgResponse)\n\n\t\tclient, _ := testClientSetup(t, server.URL)\n\t\tkey, err := client.FetchKey(t.Context())\n\n\t\trequire.NoError(t, err)\n\t\t// Should skip the RS256 key and return the RSA-OAEP-256 key\n\t\tassert.Equal(t, \"correct-alg-key\", key.KeyID)\n\t})\n\n\tt.Run(\"skips keys without algorithm\", func(t *testing.T) {\n\t\tnoAlgResponse := `{\n\t\t\t\"keys\": [\n\t\t\t\t{\n\t\t\t\t\t\"kty\": \"RSA\",\n\t\t\t\t\t\"kid\": \"no-alg-key\",\n\t\t\t\t\t\"n\": \"vDdioGpDuAEQDd4WRXyWa4sZ5EeS9OPsRrU_jU3PbZdDcANxfh_WSeSvSBKGfGXGC3fIzu0Ernk9VjXcs3LeFdRq2N4nNRZvCzsd_MjBtn7CWgjM_Sk9DXEGn3cHHilcJUJQ4i2YgX9bHu0odNgE6cSVIUEMIC2EGuGk_I7lwroinAAwXpNLLQkV_25kv_QQof2i5f7AocY6QTd0SAo8ZUqFBzanupkeFpl3-Bsz6_zdt_N0x9k5XHQn42Q2oTupTwvXFbE1x8XtCpiaP3_fsQ9dN7t4z6HtwlNUJB2tFfF6PgdKZ9LuJpYjFPYzJQ6Rv28fuc8YHcF7Jittjyzmew\",\n\t\t\t\t\t\"e\": \"AQAB\"\n\t\t\t\t}\n\t\t\t]\n\t\t}`\n\n\t\tserver := mockJWKSServer(t, http.StatusOK, noAlgResponse)\n\n\t\tclient, _ := testClientSetup(t, server.URL)\n\t\t_, err := client.FetchKey(t.Context())\n\n\t\trequire.Error(t, err)\n\t\tassert.Contains(t, err.Error(), \"no valid RSA keys found\")\n\t})\n\n\tt.Run(\"handles empty key set\", func(t *testing.T) {\n\t\temptyKeysResponse := `{\n\t\t\t\"keys\": []\n\t\t}`\n\n\t\tserver := mockJWKSServer(t, http.StatusOK, emptyKeysResponse)\n\n\t\tclient, _ := testClientSetup(t, server.URL)\n\t\t_, err := client.FetchKey(t.Context())\n\n\t\trequire.Error(t, err)\n\t\tassert.Contains(t, err.Error(), \"no valid RSA keys found\")\n\t})\n}\n"
  },
  {
    "path": "internal/envelope/keyfetch/doc.go",
    "content": "// Package keyfetch provides a client for fetching encryption keys from an HTTP endpoint.\n//\n// The client retrieves public keys in JSON Web Key Set (JWKs) format from a remote\n// server and converts them into usable cryptographic keys for envelope encryption.\n//\n// This package uses github.com/lestrrat-go/jwx/v3/jwk for JWK parsing and handling.\n//\n// Currently, keyfetch only supports RSA keys for envelope encryption.\npackage keyfetch\n"
  },
  {
    "path": "internal/envelope/keyfetch/fake.go",
    "content": "package keyfetch\n\nimport (\n\t\"context\"\n\t\"crypto/rand\"\n\t\"crypto/rsa\"\n\t\"fmt\"\n)\n\n// Compile-time check that FakeClient implements KeyFetcher\nvar _ KeyFetcher = (*FakeClient)(nil)\n\n// FakeClient is a fake implementation of the key fetcher for testing.\n// It can be configured to return specific keys or errors for testing different scenarios.\ntype FakeClient struct {\n\t// Key is the public key that will be returned by FetchKey.\n\t// If nil, a random key will be generated on the first call.\n\tKey *PublicKey\n\n\t// Err is the error that will be returned by FetchKey.\n\t// If both Key and Err are set, Err takes precedence.\n\tErr error\n\n\t// FetchKeyCalls tracks how many times FetchKey was called\n\tFetchKeyCalls int\n}\n\n// NewFakeClient creates a new fake client for testing.\nfunc NewFakeClient() *FakeClient {\n\treturn &FakeClient{}\n}\n\n// NewFakeClientWithKey creates a new fake client that returns the specified key.\nfunc NewFakeClientWithKey(keyID string, key *rsa.PublicKey) *FakeClient {\n\treturn &FakeClient{\n\t\tKey: &PublicKey{\n\t\t\tKeyID: keyID,\n\t\t\tKey:   key,\n\t\t},\n\t}\n}\n\n// NewFakeClientWithError creates a new fake client that returns the specified error.\nfunc NewFakeClientWithError(err error) *FakeClient {\n\treturn &FakeClient{\n\t\tErr: err,\n\t}\n}\n\n// FetchKey implements the key fetching interface for testing.\n// It returns the configured key or error, or generates a random key if none is configured.\nfunc (f *FakeClient) FetchKey(ctx context.Context) (PublicKey, error) {\n\tf.FetchKeyCalls++\n\n\t// Check if context is canceled\n\tif ctx.Err() != nil {\n\t\treturn PublicKey{}, ctx.Err()\n\t}\n\n\t// If an error is configured, return it\n\tif f.Err != nil {\n\t\treturn PublicKey{}, f.Err\n\t}\n\n\t// If a key is configured, return it\n\tif f.Key != nil {\n\t\treturn *f.Key, nil\n\t}\n\n\t// Generate a random key for testing\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, minRSAKeySize)\n\tif err != nil {\n\t\treturn PublicKey{}, fmt.Errorf(\"failed to generate test key: %w\", err)\n\t}\n\n\tgeneratedKey := PublicKey{\n\t\tKeyID: \"test-key\",\n\t\tKey:   &privateKey.PublicKey,\n\t}\n\n\t// Cache the generated key for subsequent calls\n\tf.Key = &generatedKey\n\n\treturn generatedKey, nil\n}\n"
  },
  {
    "path": "internal/envelope/keyfetch/fake_test.go",
    "content": "package keyfetch\n\nimport (\n\t\"context\"\n\t\"crypto/rand\"\n\t\"crypto/rsa\"\n\t\"errors\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestFakeClient(t *testing.T) {\n\tt.Run(\"returns generated key by default\", func(t *testing.T) {\n\t\tfake := NewFakeClient()\n\n\t\tkey, err := fake.FetchKey(t.Context())\n\t\trequire.NoError(t, err)\n\n\t\tassert.Equal(t, \"test-key\", key.KeyID)\n\t\tassert.NotNil(t, key.Key)\n\t\tassert.Equal(t, 1, fake.FetchKeyCalls)\n\n\t\t// Subsequent calls return the same key\n\t\tkey2, err := fake.FetchKey(t.Context())\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, key.KeyID, key2.KeyID)\n\t\tassert.Equal(t, key.Key, key2.Key)\n\t\tassert.Equal(t, 2, fake.FetchKeyCalls)\n\t})\n\n\tt.Run(\"returns configured key\", func(t *testing.T) {\n\t\tprivateKey, err := rsa.GenerateKey(rand.Reader, minRSAKeySize)\n\t\trequire.NoError(t, err)\n\n\t\tfake := NewFakeClientWithKey(\"custom-key\", &privateKey.PublicKey)\n\n\t\tkey, err := fake.FetchKey(t.Context())\n\t\trequire.NoError(t, err)\n\n\t\tassert.Equal(t, \"custom-key\", key.KeyID)\n\t\tassert.Equal(t, &privateKey.PublicKey, key.Key)\n\t\tassert.Equal(t, 1, fake.FetchKeyCalls)\n\t})\n\n\tt.Run(\"returns configured error\", func(t *testing.T) {\n\t\texpectedErr := errors.New(\"test error\")\n\t\tfake := NewFakeClientWithError(expectedErr)\n\n\t\t_, err := fake.FetchKey(t.Context())\n\t\trequire.Error(t, err)\n\n\t\tassert.Equal(t, expectedErr, err)\n\t\tassert.Equal(t, 1, fake.FetchKeyCalls)\n\t})\n\n\tt.Run(\"respects context cancellation\", func(t *testing.T) {\n\t\tfake := NewFakeClient()\n\n\t\tctx, cancel := context.WithCancel(t.Context())\n\t\tcancel()\n\n\t\t_, err := fake.FetchKey(ctx)\n\t\trequire.Error(t, err)\n\n\t\tassert.Equal(t, context.Canceled, err)\n\t\tassert.Equal(t, 1, fake.FetchKeyCalls)\n\t})\n\n\tt.Run(\"error takes precedence over key\", func(t *testing.T) {\n\t\tprivateKey, err := rsa.GenerateKey(rand.Reader, minRSAKeySize)\n\t\trequire.NoError(t, err)\n\n\t\texpectedErr := errors.New(\"test error\")\n\t\tfake := &FakeClient{\n\t\t\tKey: &PublicKey{\n\t\t\t\tKeyID: \"custom-key\",\n\t\t\t\tKey:   &privateKey.PublicKey,\n\t\t\t},\n\t\t\tErr: expectedErr,\n\t\t}\n\n\t\t_, err = fake.FetchKey(t.Context())\n\t\trequire.Error(t, err)\n\n\t\tassert.Equal(t, expectedErr, err)\n\t})\n}\n"
  },
  {
    "path": "internal/envelope/rsa/doc.go",
    "content": "// Package rsa implements RSA envelope encryption using JWE (JSON Web Encryption) format.\n// It conforms to the interface in the envelope package.\n//\n// The implementation uses:\n//   - RSA-OAEP-256 (RSA-OAEP with SHA-256) for key encryption\n//   - AES-256-GCM (A256GCM) for content encryption\n//   - JWE Compact Serialization format as defined in RFC 7516\n//\n// The output is a JWE string with 5 base64url-encoded parts separated by dots:\n// header.encryptedKey.iv.ciphertext.tag\npackage rsa\n"
  },
  {
    "path": "internal/envelope/rsa/encryptor.go",
    "content": "package rsa\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com/lestrrat-go/jwx/v3/jwa\"\n\t\"github.com/lestrrat-go/jwx/v3/jwe\"\n\n\t\"github.com/jetstack/preflight/internal/envelope\"\n\t\"github.com/jetstack/preflight/internal/envelope/keyfetch\"\n)\n\nconst (\n\t// EncryptionType is the type identifier for RSA JWE encryption\n\tEncryptionType = \"JWE-RSA\"\n)\n\n// Compile-time check that Encryptor implements envelope.Encryptor\nvar _ envelope.Encryptor = (*Encryptor)(nil)\n\n// Encryptor provides envelope encryption using RSA-OAEP-256 for key wrapping\n// and AES-256-GCM for data encryption, outputting JWE Compact Serialization format.\ntype Encryptor struct {\n\tfetcher keyfetch.KeyFetcher\n}\n\n// NewEncryptor creates a new Encryptor with the provided key fetcher.\n// The encryptor will use RSA-OAEP-256 for key encryption and A256GCM for content encryption.\nfunc NewEncryptor(fetcher keyfetch.KeyFetcher) (*Encryptor, error) {\n\treturn &Encryptor{\n\t\tfetcher: fetcher,\n\t}, nil\n}\n\n// Encrypt performs envelope encryption on the provided data.\n// It returns an EncryptedData struct containing JWE Compact Serialization format and type metadata.\n// The JWE uses RSA-OAEP-256 for key encryption and A256GCM for content encryption.\nfunc (e *Encryptor) Encrypt(ctx context.Context, data []byte) (*envelope.EncryptedData, error) {\n\tif len(data) == 0 {\n\t\treturn nil, fmt.Errorf(\"data to encrypt cannot be empty\")\n\t}\n\n\tkey, err := e.fetcher.FetchKey(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to fetch encryption key: %w\", err)\n\t}\n\n\t// Create headers with the key ID\n\theaders := jwe.NewHeaders()\n\tif err := headers.Set(\"kid\", key.KeyID); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to set key ID header: %w\", err)\n\t}\n\n\t// Encrypt using RSA-OAEP-256 for key algorithm and A256GCM for content encryption\n\t// TODO: When standardised, consider using secret.Do to wrap this call, since it will generate an AES key\n\t// (see https://pkg.go.dev/runtime/secret)\n\tencrypted, err := jwe.Encrypt(\n\t\tdata,\n\t\tjwe.WithKey(jwa.RSA_OAEP_256(), key.Key, jwe.WithPerRecipientHeaders(headers)),\n\t\tjwe.WithContentEncryption(jwa.A256GCM()),\n\t\tjwe.WithCompact(),\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to encrypt data: %w\", err)\n\t}\n\n\treturn &envelope.EncryptedData{\n\t\tData: encrypted,\n\t\tType: EncryptionType,\n\t}, nil\n}\n"
  },
  {
    "path": "internal/envelope/rsa/encryptor_test.go",
    "content": "package rsa\n\nimport (\n\t\"crypto/rand\"\n\t\"crypto/rsa\"\n\t\"encoding/base64\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com/lestrrat-go/jwx/v3/jwa\"\n\t\"github.com/lestrrat-go/jwx/v3/jwe\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/jetstack/preflight/internal/envelope/keyfetch\"\n)\n\nconst (\n\ttestKeyID = \"test-key-id\"\n\t// minRSAKeySize is the minimum RSA key size used for test key generation\n\tminRSAKeySize = 2048\n)\n\nvar (\n\ttestKeyOnce     sync.Once\n\tinternalTestKey *rsa.PrivateKey\n)\n\n// testKey generates and returns a singleton RSA private key for testing purposes,\n// to avoid needing to generate a new key for each test.\nfunc testKey() *rsa.PrivateKey {\n\ttestKeyOnce.Do(func() {\n\t\tkey, err := rsa.GenerateKey(rand.Reader, minRSAKeySize)\n\t\tif err != nil {\n\t\t\tpanic(\"failed to generate test RSA key: \" + err.Error())\n\t\t}\n\n\t\tinternalTestKey = key\n\t})\n\n\treturn internalTestKey\n}\n\nfunc TestEncrypt_VariousDataSizes(t *testing.T) {\n\tfetcher := keyfetch.NewFakeClient()\n\n\tenc, err := NewEncryptor(fetcher)\n\trequire.NoError(t, err)\n\n\ttests := []struct {\n\t\tname     string\n\t\tdataSize int\n\t}{\n\t\t{\"small (10 bytes)\", 10},\n\t\t{\"medium (1 KB)\", 1024},\n\t\t{\"large (1 MB)\", 1024 * 1024},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tdata := make([]byte, tt.dataSize)\n\t\t\t_, err := rand.Read(data)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tresult, err := enc.Encrypt(t.Context(), data)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NotNil(t, result)\n\t\t\trequire.Equal(t, EncryptionType, result.Type, \"Type should be JWE-RSA\")\n\n\t\t\t// Verify JWE Compact Serialization format (5 base64url parts separated by dots)\n\t\t\tjweString := string(result.Data)\n\t\t\tparts := strings.Split(jweString, \".\")\n\t\t\trequire.Len(t, parts, 5, \"JWE Compact Serialization should have 5 parts\")\n\n\t\t\t// Verify each part is non-empty\n\t\t\tfor i, part := range parts {\n\t\t\t\trequire.NotEmpty(t, part, \"JWE part %d should not be empty\", i)\n\n\t\t\t\t_, err = base64.RawURLEncoding.DecodeString(part)\n\t\t\t\trequire.NoError(t, err, \"JWE part %d should be valid base64url: %s\", i, part)\n\t\t\t}\n\n\t\t\t// Verify the result differs from input\n\t\t\trequire.NotEqual(t, data, result.Data)\n\t\t})\n\t}\n}\n\nfunc TestEncrypt_EmptyData(t *testing.T) {\n\tfetcher := keyfetch.NewFakeClient()\n\n\tenc, err := NewEncryptor(fetcher)\n\trequire.NoError(t, err)\n\n\tresult, err := enc.Encrypt(t.Context(), []byte{})\n\trequire.Error(t, err)\n\trequire.Nil(t, result)\n\trequire.Contains(t, err.Error(), \"cannot be empty\")\n}\n\nfunc TestEncrypt_NonDeterministic(t *testing.T) {\n\tfetcher := keyfetch.NewFakeClient()\n\n\tenc, err := NewEncryptor(fetcher)\n\trequire.NoError(t, err)\n\n\tdata := []byte(\"test data for encryption\")\n\n\t// Encrypt the same data twice\n\tresult1, err := enc.Encrypt(t.Context(), data)\n\trequire.NoError(t, err)\n\trequire.Equal(t, EncryptionType, result1.Type, \"Type should be JWE-RSA\")\n\n\tresult2, err := enc.Encrypt(t.Context(), data)\n\trequire.NoError(t, err)\n\trequire.Equal(t, EncryptionType, result2.Type, \"Type should be JWE-RSA\")\n\n\t// Results should be different due to random nonces and RSA-OAEP randomness\n\trequire.NotEqual(t, result1.Data, result2.Data, \"Encrypting the same data twice should produce different JWE outputs\")\n}\n\nfunc TestEncrypt_JWEFormat(t *testing.T) {\n\tkey := testKey()\n\tfetcher := keyfetch.NewFakeClientWithKey(testKeyID, &key.PublicKey)\n\n\tenc, err := NewEncryptor(fetcher)\n\trequire.NoError(t, err)\n\n\tdata := []byte(\"test data\")\n\tresult, err := enc.Encrypt(t.Context(), data)\n\trequire.NoError(t, err)\n\trequire.Equal(t, EncryptionType, result.Type, \"Type should be JWE-RSA\")\n\n\t// Parse and decrypt the JWE to verify format and algorithms\n\tdecrypted, err := jwe.Decrypt(result.Data, jwe.WithKey(jwa.RSA_OAEP_256(), key), jwe.WithContext(t.Context()))\n\trequire.NoError(t, err, \"Result should be valid JWE with RSA-OAEP-256 and A256GCM, and should decrypt successfully\")\n\trequire.Equal(t, data, decrypted, \"Decrypted data should match original\")\n}\n\nfunc TestEncrypt_DecryptRoundtrip(t *testing.T) {\n\tkey := testKey()\n\tfetcher := keyfetch.NewFakeClientWithKey(testKeyID, &key.PublicKey)\n\n\tenc, err := NewEncryptor(fetcher)\n\trequire.NoError(t, err)\n\n\toriginalData := []byte(\"test data for roundtrip encryption and decryption\")\n\n\t// Encrypt the data\n\tencrypted, err := enc.Encrypt(t.Context(), originalData)\n\trequire.NoError(t, err)\n\trequire.Equal(t, EncryptionType, encrypted.Type, \"Type should be JWE-RSA\")\n\n\tmsg, err := jwe.Parse(encrypted.Data)\n\trequire.NoError(t, err)\n\n\theaders := msg.ProtectedHeaders()\n\n\tkidHeader, ok := headers.KeyID()\n\trequire.True(t, ok, \"JWE should contain 'kid' header\")\n\trequire.Equal(t, testKeyID, kidHeader, \"JWE 'kid' header should match the encryptor's key ID\")\n\n\t// Decrypt using the private key\n\tdecrypted, err := jwe.Decrypt(encrypted.Data, jwe.WithKey(jwa.RSA_OAEP_256(), key), jwe.WithContext(t.Context()))\n\trequire.NoError(t, err, \"Decryption should succeed with the correct private key\")\n\n\t// Verify the decrypted data matches the original\n\trequire.Equal(t, originalData, decrypted, \"Decrypted data should match original data\")\n}\n"
  },
  {
    "path": "internal/envelope/rsa/keys.go",
    "content": "package rsa\n\nimport (\n\t\"crypto/rsa\"\n\t\"crypto/x509\"\n\t\"encoding/pem\"\n\t\"fmt\"\n\t\"os\"\n)\n\n// This file contains helpers for loading keys. In practice we'll retrieve keys in some format from a DisCo endpoint\n\nconst (\n\t// HardcodedPublicKeyPEM contains a temporary hardcoded RSA public key (2048-bit) for envelope encryption.\n\t// This is a TEMPORARY solution for initial development and testing.\n\t// TODO: Replace with dynamic key fetching from CyberArk Discovery & Context API.\n\tHardcodedPublicKeyPEM = `-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAoeq+dk4aoGdV9xjrnGJt\nVbUh5jvkQgynkP+9Ph2NVeoasXWqYOmOVeKOI7Yr58W/L8Mro6C22iSEJrPFgPF6\nt+RJsLAsAY6w1Pocq16COeelAWtxhHQGXt77WQKk0kmwhOJZ4VSeiQC4hWLUnq4N\nFt7lwLw/50opTXLuSErrwec/bEV7G/Xp11BMsHGEL7dzpwWAfIrbCEomyWrO/L6p\nO3SAgYMdfup5ddnszeCU2FbFQziOkuMLOyir91XXk8wgdSy4IGAEGpwNx88i8fuj\nQafze2aGWUtpWlOEQPP8lH2cj2TGUgLxGITbczJRcwuGIoJBOzAmPDWi/bapj4b6\nzQIDAQAB\n-----END PUBLIC KEY-----`\n\n\t// hardcodedUID is a temporary hardcoded UID associated with the hardcoded public key\n\t// It was randomly generated with the macOS \"uuidgen\" command\n\thardcodedUID = \"A39798E6-8CE7-4E6E-9CF6-24A3C923B3A7\"\n)\n\n// LoadPublicKeyFromPEM parses an RSA public key from PEM-encoded bytes.\n// The PEM block should be of type \"PUBLIC KEY\" or \"RSA PUBLIC KEY\".\nfunc LoadPublicKeyFromPEM(pemBytes []byte) (*rsa.PublicKey, error) {\n\tblock, _ := pem.Decode(pemBytes)\n\tif block == nil {\n\t\treturn nil, fmt.Errorf(\"failed to decode PEM block\")\n\t}\n\n\t// Try parsing as PKIX public key first (most common format)\n\tif block.Type == \"PUBLIC KEY\" {\n\t\tpubKey, err := x509.ParsePKIXPublicKey(block.Bytes)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse PKIX public key: %w\", err)\n\t\t}\n\n\t\trsaKey, ok := pubKey.(*rsa.PublicKey)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"key is not an RSA public key, got %T\", pubKey)\n\t\t}\n\n\t\treturn rsaKey, nil\n\t}\n\n\t// Try parsing as PKCS1 RSA public key\n\tif block.Type == \"RSA PUBLIC KEY\" {\n\t\trsaKey, err := x509.ParsePKCS1PublicKey(block.Bytes)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse PKCS1 RSA public key: %w\", err)\n\t\t}\n\n\t\treturn rsaKey, nil\n\t}\n\n\treturn nil, fmt.Errorf(\"unsupported PEM block type: %s (expected PUBLIC KEY or RSA PUBLIC KEY)\", block.Type)\n}\n\n// LoadPublicKeyFromPEMFile reads and parses an RSA public key from a PEM file.\nfunc LoadPublicKeyFromPEMFile(path string) (*rsa.PublicKey, error) {\n\tpemBytes, err := os.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read PEM file: %w\", err)\n\t}\n\n\treturn LoadPublicKeyFromPEM(pemBytes)\n}\n\n// LoadHardcodedPublicKey loads and parses the hardcoded RSA public key.\n// Returns a hardcoded UID associated with the key.\n// This is a temporary solution for initial development and testing.\n// Returns an error if the hardcoded key is invalid or cannot be parsed.\nfunc LoadHardcodedPublicKey() (*rsa.PublicKey, string, error) {\n\tkey, err := LoadPublicKeyFromPEM([]byte(HardcodedPublicKeyPEM))\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\n\treturn key, hardcodedUID, nil\n}\n"
  },
  {
    "path": "internal/envelope/rsa/keys_test.go",
    "content": "package rsa_test\n\nimport (\n\t\"crypto/ecdsa\"\n\t\"crypto/elliptic\"\n\t\"crypto/rand\"\n\t\"crypto/rsa\"\n\t\"crypto/x509\"\n\t\"encoding/pem\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/jetstack/preflight/internal/envelope/keyfetch\"\n\tinternalrsa \"github.com/jetstack/preflight/internal/envelope/rsa\"\n)\n\nfunc generateTestKeyPEM(t *testing.T, keySize int, pemType string) []byte {\n\tt.Helper()\n\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, keySize)\n\trequire.NoError(t, err)\n\n\tvar pemBytes []byte\n\tif pemType == \"PUBLIC KEY\" {\n\t\t// PKIX format\n\t\tpublicKeyBytes, err := x509.MarshalPKIXPublicKey(&privateKey.PublicKey)\n\t\trequire.NoError(t, err)\n\n\t\tpemBytes = pem.EncodeToMemory(&pem.Block{\n\t\t\tType:  \"PUBLIC KEY\",\n\t\t\tBytes: publicKeyBytes,\n\t\t})\n\t} else {\n\t\t// PKCS1 format\n\t\tpublicKeyBytes := x509.MarshalPKCS1PublicKey(&privateKey.PublicKey)\n\n\t\tpemBytes = pem.EncodeToMemory(&pem.Block{\n\t\t\tType:  \"RSA PUBLIC KEY\",\n\t\t\tBytes: publicKeyBytes,\n\t\t})\n\t}\n\n\trequire.NotNil(t, pemBytes)\n\treturn pemBytes\n}\n\nfunc TestLoadPublicKeyFromPEM_PKIX(t *testing.T) {\n\tpemBytes := generateTestKeyPEM(t, 2048, \"PUBLIC KEY\")\n\n\tkey, err := internalrsa.LoadPublicKeyFromPEM(pemBytes)\n\trequire.NoError(t, err)\n\trequire.NotNil(t, key)\n\trequire.Equal(t, 2048, key.N.BitLen())\n}\n\nfunc TestLoadPublicKeyFromPEM_PKCS1(t *testing.T) {\n\tpemBytes := generateTestKeyPEM(t, 2048, \"RSA PUBLIC KEY\")\n\n\tkey, err := internalrsa.LoadPublicKeyFromPEM(pemBytes)\n\trequire.NoError(t, err)\n\trequire.NotNil(t, key)\n\trequire.Equal(t, 2048, key.N.BitLen())\n}\n\nfunc TestLoadPublicKeyFromPEM_InvalidPEM(t *testing.T) {\n\tinvalidPEM := []byte(\"this is not a valid PEM\")\n\n\tkey, err := internalrsa.LoadPublicKeyFromPEM(invalidPEM)\n\trequire.Error(t, err)\n\trequire.Nil(t, key)\n\trequire.Contains(t, err.Error(), \"failed to decode PEM block\")\n}\n\nfunc TestLoadPublicKeyFromPEM_WrongPEMType(t *testing.T) {\n\t// Create a PEM block with wrong type\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\trequire.NoError(t, err)\n\n\tprivateKeyBytes := x509.MarshalPKCS1PrivateKey(privateKey)\n\tpemBytes := pem.EncodeToMemory(&pem.Block{\n\t\tType:  \"RSA PRIVATE KEY\",\n\t\tBytes: privateKeyBytes,\n\t})\n\n\tkey, err := internalrsa.LoadPublicKeyFromPEM(pemBytes)\n\trequire.Error(t, err)\n\trequire.Nil(t, key)\n\trequire.Contains(t, err.Error(), \"unsupported PEM block type\")\n}\n\nfunc TestLoadPublicKeyFromPEM_NonRSAKey(t *testing.T) {\n\t// Generate a real ECDSA key and try to load it as RSA\n\tecdsaKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)\n\trequire.NoError(t, err)\n\n\t// Marshal as PKIX public key\n\tpublicKeyBytes, err := x509.MarshalPKIXPublicKey(&ecdsaKey.PublicKey)\n\trequire.NoError(t, err)\n\n\tpemBytes := pem.EncodeToMemory(&pem.Block{\n\t\tType:  \"PUBLIC KEY\",\n\t\tBytes: publicKeyBytes,\n\t})\n\n\tkey, err := internalrsa.LoadPublicKeyFromPEM(pemBytes)\n\trequire.Error(t, err)\n\trequire.Nil(t, key)\n\trequire.Contains(t, err.Error(), \"not an RSA public key\")\n}\n\nfunc TestLoadPublicKeyFromPEMFile_ValidFile(t *testing.T) {\n\ttmpDir := t.TempDir()\n\tkeyPath := filepath.Join(tmpDir, \"test_key.pem\")\n\n\tpemBytes := generateTestKeyPEM(t, 2048, \"PUBLIC KEY\")\n\terr := os.WriteFile(keyPath, pemBytes, 0600)\n\trequire.NoError(t, err)\n\n\tkey, err := internalrsa.LoadPublicKeyFromPEMFile(keyPath)\n\trequire.NoError(t, err)\n\trequire.NotNil(t, key)\n\trequire.Equal(t, 2048, key.N.BitLen())\n}\n\nfunc TestLoadPublicKeyFromPEMFile_MissingFile(t *testing.T) {\n\tkey, err := internalrsa.LoadPublicKeyFromPEMFile(\"/nonexistent/path/key.pem\")\n\trequire.Error(t, err)\n\trequire.Nil(t, key)\n\trequire.Contains(t, err.Error(), \"failed to read PEM file\")\n}\n\nfunc TestLoadPublicKeyFromPEMFile_InvalidContent(t *testing.T) {\n\ttmpDir := t.TempDir()\n\tkeyPath := filepath.Join(tmpDir, \"invalid_key.pem\")\n\n\terr := os.WriteFile(keyPath, []byte(\"not a valid PEM\"), 0600)\n\trequire.NoError(t, err)\n\n\tkey, err := internalrsa.LoadPublicKeyFromPEMFile(keyPath)\n\trequire.Error(t, err)\n\trequire.Nil(t, key)\n}\n\nfunc TestLoadHardcodedPublicKey_CanBeUsedWithEncryptor(t *testing.T) {\n\t// Test that the hardcoded key can be used to create an encryptor\n\t// First, test that the key can be loaded successfully\n\tkey, uid, err := internalrsa.LoadHardcodedPublicKey()\n\trequire.NoError(t, err)\n\trequire.NotNil(t, key)\n\trequire.NotEmpty(t, uid)\n\n\tfetcher := keyfetch.NewFakeClientWithKey(uid, key)\n\tencryptor, err := internalrsa.NewEncryptor(fetcher)\n\trequire.NoError(t, err)\n\trequire.NotNil(t, encryptor)\n\n\t// Test that the encryptor can encrypt data\n\ttestData := []byte(\"test data for encryption\")\n\tencryptedData, err := encryptor.Encrypt(t.Context(), testData)\n\trequire.NoError(t, err)\n\trequire.NotNil(t, encryptedData)\n\trequire.NotEmpty(t, encryptedData.Data)\n\trequire.Equal(t, \"JWE-RSA\", encryptedData.Type)\n}\n"
  },
  {
    "path": "internal/envelope/types.go",
    "content": "package envelope\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n)\n\n// EncryptedData represents encrypted data along with metadata about the encryption type.\ntype EncryptedData struct {\n\t// Data contains the encrypted payload\n\tData []byte `json:\"data\"`\n\t// Type indicates the encryption format (e.g., \"JWE-RSA\")\n\tType string `json:\"type\"`\n}\n\n// ToMap converts the EncryptedData struct to a map representation. Since we store data as an \"_encryptedData\" field in\n// a Kubernetes unstructured object, passing a raw struct would cause a panic due to the behaviour of\n// https://pkg.go.dev/k8s.io/apimachinery/pkg/runtime#DeepCopyJSONValue\n// Passing a map to unstructured.SetNestedField avoids this issue.\nfunc (ed *EncryptedData) ToMap() map[string]any {\n\tmarshalled, err := json.Marshal(ed)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tvar out map[string]any\n\n\terr = json.Unmarshal(marshalled, &out)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\treturn out\n}\n\n// Encryptor performs envelope encryption on arbitrary data.\ntype Encryptor interface {\n\t// Encrypt encrypts data using envelope encryption, returning an EncryptedData struct\n\t// containing the encrypted payload and encryption type metadata.\n\tEncrypt(ctx context.Context, data []byte) (*EncryptedData, error)\n}\n"
  },
  {
    "path": "klone.yaml",
    "content": "# This klone.yaml file describes the Makefile modules and versions that are\n# cloned into the \"make/_shared\" folder. These modules are dynamically imported\n# by the root Makefile. The \"make upgrade-klone\" target can be used to pull\n# the latest version from the upstream repositories (using the repo_ref value).\n#\n# More info can be found here: https://github.com/cert-manager/makefile-modules\n\ntargets:\n  make/_shared:\n    - folder_name: generate-verify\n      repo_url: https://github.com/cert-manager/makefile-modules.git\n      repo_ref: main\n      repo_hash: 962eeffd065691abd2644eb514a7ec4cc47808fb\n      repo_path: modules/generate-verify\n    - folder_name: go\n      repo_url: https://github.com/cert-manager/makefile-modules.git\n      repo_ref: main\n      repo_hash: 962eeffd065691abd2644eb514a7ec4cc47808fb\n      repo_path: modules/go\n    - folder_name: helm\n      repo_url: https://github.com/cert-manager/makefile-modules.git\n      repo_ref: main\n      repo_hash: 962eeffd065691abd2644eb514a7ec4cc47808fb\n      repo_path: modules/helm\n    - folder_name: help\n      repo_url: https://github.com/cert-manager/makefile-modules.git\n      repo_ref: main\n      repo_hash: 962eeffd065691abd2644eb514a7ec4cc47808fb\n      repo_path: modules/help\n    - folder_name: kind\n      repo_url: https://github.com/cert-manager/makefile-modules.git\n      repo_ref: main\n      repo_hash: 962eeffd065691abd2644eb514a7ec4cc47808fb\n      repo_path: modules/kind\n    - folder_name: klone\n      repo_url: https://github.com/cert-manager/makefile-modules.git\n      repo_ref: main\n      repo_hash: 962eeffd065691abd2644eb514a7ec4cc47808fb\n      repo_path: modules/klone\n    - folder_name: licenses\n      repo_url: https://github.com/cert-manager/makefile-modules.git\n      repo_ref: main\n      repo_hash: 962eeffd065691abd2644eb514a7ec4cc47808fb\n      repo_path: modules/licenses\n    - folder_name: oci-build\n      repo_url: https://github.com/cert-manager/makefile-modules.git\n      repo_ref: main\n      repo_hash: 962eeffd065691abd2644eb514a7ec4cc47808fb\n      repo_path: modules/oci-build\n    - folder_name: oci-publish\n      repo_url: https://github.com/cert-manager/makefile-modules.git\n      repo_ref: main\n      repo_hash: 962eeffd065691abd2644eb514a7ec4cc47808fb\n      repo_path: modules/oci-publish\n    - folder_name: repository-base\n      repo_url: https://github.com/cert-manager/makefile-modules.git\n      repo_ref: main\n      repo_hash: 962eeffd065691abd2644eb514a7ec4cc47808fb\n      repo_path: modules/repository-base\n    - folder_name: tools\n      repo_url: https://github.com/cert-manager/makefile-modules.git\n      repo_ref: main\n      repo_hash: 962eeffd065691abd2644eb514a7ec4cc47808fb\n      repo_path: modules/tools\n"
  },
  {
    "path": "main.go",
    "content": "package main\n\nimport \"github.com/jetstack/preflight/cmd\"\n\nfunc main() {\n\tcmd.Execute()\n}\n"
  },
  {
    "path": "make/00_mod.mk",
    "content": "repo_name := github.com/jetstack/jetstack-secure\n# This is a work around for the mismatch between the repo name and the go module\n# name. It allows golangci-lint to group the github.com/jetstack/preflight\n# imports correctly. And it allows the version information to be injected into\n# the version package via Go ldflags.\n#\n# TODO(wallrj): Rename the Go module to match the repository name.\ngomodule_name := github.com/jetstack/preflight\n\ngenerate-golangci-lint-config: repo_name := $(gomodule_name)\n\nlicense_ignore := gitlab.com/venafi,github.com/jetstack\n\nkind_cluster_name := preflight\nkind_cluster_config := $(bin_dir)/scratch/kind_cluster.yaml\n\nbuild_names := preflight\n\ngo_preflight_main_dir := .\ngo_preflight_mod_dir := .\ngo_preflight_ldflags := \\\n\t-X $(gomodule_name)/pkg/version.PreflightVersion=$(VERSION) \\\n\t-X $(gomodule_name)/pkg/version.Commit=$(GITCOMMIT) \\\n\t-X $(gomodule_name)/pkg/version.BuildDate=$(shell date \"+%F-%T-%Z\") \\\n\t-X $(gomodule_name)/pkg/client.ClientID=k3TrDbfLhCgnpAbOiiT2kIE1AbovKzjo \\\n\t-X $(gomodule_name)/pkg/client.ClientSecret=f39w_3KT9Vp0VhzcPzvh-uVbudzqCFmHER3Huj0dvHgJwVrjxsoOQPIw_1SDiCfa \\\n\t-X $(gomodule_name)/pkg/client.AuthServerDomain=auth.jetstack.io\n\noci_preflight_base_image_flavor := static\noci_preflight_image_name := quay.io/jetstack/venafi-agent\noci_preflight_image_tag := $(VERSION)\noci_preflight_image_name_development := jetstack.local/venafi-agent\n\n# Annotations are the standardised set of annotations we set on every component we publish\noci_preflight_build_args := \\\n\t--image-annotation=\"org.opencontainers.image.vendor\"=\"CyberArk Software Ltd.\" \\\n\t--image-annotation=\"org.opencontainers.image.licenses\"=\"EULA - https://www.cyberark.com/contract-terms/\" \\\n\t--image-annotation=\"org.opencontainers.image.authors\"=\"support@cyberark.com\" \\\n\t--image-annotation=\"org.opencontainers.image.title\"=\"Discovery Agent for CyberArk Certificate Manager in Kubernetes and OpenShift Environments\" \\\n\t--image-annotation=\"org.opencontainers.image.description\"=\"Gathers machine identity data from Kubernetes clusters.\" \\\n\t--image-annotation=\"org.opencontainers.image.url\"=\"https://www.cyberark.com/products/certificate-manager-for-kubernetes/\" \\\n\t--image-annotation=\"org.opencontainers.image.documentation\"=\"https://docs.cyberark.com/mis-saas/vaas/k8s-components/c-tlspk-agent-overview/\" \\\n\t--image-annotation=\"org.opencontainers.image.version\"=\"$(VERSION)\" \\\n\t--image-annotation=\"org.opencontainers.image.revision\"=\"$(GITCOMMIT)\"\n\ndeploy_name := venafi-kubernetes-agent\ndeploy_namespace := venafi\n\nhelm_chart_source_dir := deploy/charts/venafi-kubernetes-agent\nhelm_chart_image_name := quay.io/jetstack/charts/venafi-kubernetes-agent\nhelm_chart_version := $(VERSION)\nhelm_labels_template_name := preflight.labels\n\n# We skip using the upstream govulncheck generate target because we need to customise the workflow YAML\n# locally. We provide the targets in this repo instead, and manually maintain the workflow.\ndont_generate_govulncheck := true\n\nhelm_image_name ?= $(oci_preflight_image_name)\nhelm_image_tag ?= $(oci_preflight_image_tag)\n\n# Allows us to replace the Helm values.yaml's image.repository and image.tag\n# with the right values.\ndefine helm_values_mutation_function\necho \"no mutations defined for this chart\"\nendef\n\ngolangci_lint_config := .golangci.yaml\ngo_header_file := /dev/null\n\ninclude make/extra_tools.mk\ninclude make/ark/00_mod.mk\ninclude make/ngts/00_mod.mk\n"
  },
  {
    "path": "make/02_mod.mk",
    "content": "include make/test-unit.mk\ninclude make/ark/02_mod.mk\ninclude make/ngts/02_mod.mk\n\nGITHUB_OUTPUT ?= /dev/stderr\n.PHONY: release\n## Publish all release artifacts (image + helm chart)\n## @category [shared] Release\nrelease:\n\t$(MAKE) oci-push-preflight\n\t$(MAKE) helm-chart-oci-push\n\n\t@echo \"RELEASE_OCI_PREFLIGHT_IMAGE=$(oci_preflight_image_name)\" >> \"$(GITHUB_OUTPUT)\"\n\t@echo \"RELEASE_OCI_PREFLIGHT_TAG=$(oci_preflight_image_tag)\" >> \"$(GITHUB_OUTPUT)\"\n\t@echo \"RELEASE_HELM_CHART_IMAGE=$(helm_chart_image_name)\" >> \"$(GITHUB_OUTPUT)\"\n\t@echo \"RELEASE_HELM_CHART_VERSION=$(helm_chart_version)\" >> \"$(GITHUB_OUTPUT)\"\n\n\t@echo \"Release complete!\"\n\n.PHONY: generate-crds-venconn\n## Pulls the VenafiConnection CRD from the venafi-connection-lib Go module.\n## @category [shared] Generate/ Verify\n#\n# We aren't using \"generate-crds\" because \"generate-crds\" only work for projects\n# from which controller-gen can be used to generate the plain CRDs (plain CRDs =\n# the non-templated CRDs). In this project, we generate the plain CRDs using `go\n# run ./make/connection_crd` instead.\ngenerate-crds-venconn: $(addprefix $(helm_chart_source_dir)/templates/,venafi-connection-crd.yaml venafi-connection-crd.without-validations.yaml)\n\n$(helm_chart_source_dir)/crd_bases/jetstack.io_venaficonnections.yaml: go.mod | $(NEEDS_GO)\n\techo \"# DO NOT EDIT: Use 'make generate-crds-venconn' to regenerate.\" >$@\n\t$(GO) run ./make/connection_crd >>$@\n\n$(helm_chart_source_dir)/templates/venafi-connection-crd.without-validations.yaml: $(helm_chart_source_dir)/crd_bases/jetstack.io_venaficonnections.yaml $(helm_chart_source_dir)/crd_bases/crd.header.yaml $(helm_chart_source_dir)/crd_bases/crd.footer.yaml | $(NEEDS_YQ)\n\tcat $(helm_chart_source_dir)/crd_bases/crd.header-without-validations.yaml >$@\n\t$(YQ) -I2 '{\"spec\": .spec}' $< | $(YQ) 'del(.. | .\"x-kubernetes-validations\"?) | del(.metadata.creationTimestamp)' | grep -v \"DO NOT EDIT\" >>$@\n\tcat $(helm_chart_source_dir)/crd_bases/crd.footer.yaml >>$@\n\n$(helm_chart_source_dir)/templates/venafi-connection-crd.yaml: $(helm_chart_source_dir)/crd_bases/jetstack.io_venaficonnections.yaml $(helm_chart_source_dir)/crd_bases/crd.header.yaml $(helm_chart_source_dir)/crd_bases/crd.footer.yaml | $(NEEDS_YQ)\n\tcat $(helm_chart_source_dir)/crd_bases/crd.header.yaml >$@\n\t$(YQ) -I2 '{\"spec\": .spec}' $< | $(YQ) 'del(.metadata.creationTimestamp)' | grep -v \"DO NOT EDIT\" >>$@\n\tcat $(helm_chart_source_dir)/crd_bases/crd.footer.yaml >>$@\n\n# The generate-crds target doesn't need to be run anymore when running\n# \"generate\". Let's replace it with \"generate-crds-venconn\".\nshared_generate_targets := $(filter-out generate-crds,$(shared_generate_targets))\nshared_generate_targets += generate-crds-venconn\n\n.PHONY: test-e2e-gke\n## Run a basic E2E test on a GKE cluster\n## Build and install venafi-kubernetes-agent for VenafiConnection based authentication.\n## Wait for it to log a message indicating successful data upload.\n## See `hack/e2e/test.sh` for the full test script.\n## @category Testing\ntest-e2e-gke: | $(NEEDS_HELM) $(NEEDS_STEP) $(NEEDS_VENCTL)\n\t./hack/e2e/test.sh\n\n.PHONY: test-helm-snapshot\n## Update the `helm unittest` snapshots.\n## Note that running helm unit tests is done through \"make verify\" using the Helm makefile-module\n## @category Testing\ntest-helm-snapshot: | $(NEEDS_HELM-UNITTEST)\n\t$(HELM-UNITTEST) ./deploy/charts/{venafi-kubernetes-agent,disco-agent,discovery-agent} -u\n\n.PHONY: helm-plugins\n## Install required helm plugins\nhelm-plugins: $(NEEDS_HELM)\n\t@if ! $(HELM) plugin list | grep -q diff; then \\\n\t\techo \">>> Installing helm-diff plugin\"; \\\n\t\t$(HELM) plugin install https://github.com/databus23/helm-diff --verify=false; \\\n\telse \\\n\t\techo \"helm-diff plugin already installed\"; \\\n\tfi\n\n# https://docs.cyberark.com/mis-saas/vaas/venctl/c-venctl-releases/\nvenctl_linux_amd64_SHA256SUM=f1027056ec243c7ea9183fe410d5daf99cd4fa18cff9149d64749a106832595a\nvenctl_darwin_amd64_SHA256SUM=4f75900c7b3256cc786004bd5d6193f95f505521e761a9917b3c3d243440f77e\nvenctl_darwin_arm64_SHA256SUM=1648b17020291f90b8c1195be8b963d96f7be31a6e43ba944dd104729f16d1c5\n\n.PRECIOUS: $(DOWNLOAD_DIR)/tools/venctl@$(VENCTL_VERSION)_$(HOST_OS)_$(HOST_ARCH)\n$(DOWNLOAD_DIR)/tools/venctl@$(VENCTL_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools\n\t@source $(lock_script) $@; \\\n\t\t$(CURL) https://dl.venafi.cloud/venctl/$(VENCTL_VERSION)/venctl-$(HOST_OS)-$(HOST_ARCH).zip -o $(outfile).zip; \\\n\t\t$(checkhash_script) $(outfile).zip $(venctl_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \\\n\t\tunzip -p $(outfile).zip venctl > $(outfile); \\\n\t\tchmod +x $(outfile); \\\n\t\trm -f $(outfile).zip\n\n# https://github.com/smallstep/cli/releases/\nstep_linux_amd64_SHA256SUM=2908f3c7d90181eec430070b231da5c0861e37537bf8e2388d031d3bd6c7b8c6\nstep_linux_arm64_SHA256SUM=96636a6cc980d53a98c72aa3b99e04f0b874a733d9ddf43fc6b0f1725f425c37\nstep_darwin_amd64_SHA256SUM=f6e9a9078cfc5f559c8213e023df6e8ebf8d9d36ffbd82749a41ee1c40a23623\nstep_darwin_arm64_SHA256SUM=b856702ee138a9badbe983e88758c0330907ea4f97e429000334ba038597db5b\n\n.PRECIOUS: $(DOWNLOAD_DIR)/tools/step@$(STEP_VERSION)_$(HOST_OS)_$(HOST_ARCH)\n$(DOWNLOAD_DIR)/tools/step@$(STEP_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools\n\t@source $(lock_script) $@; \\\n\t\t$(CURL) https://dl.smallstep.com/gh-release/cli/gh-release-header/v$(STEP_VERSION)/step_$(HOST_OS)_$(STEP_VERSION)_$(HOST_ARCH).tar.gz -o $(outfile).tar.gz; \\\n\t\t$(checkhash_script) $(outfile).tar.gz $(step_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \\\n\t\ttar xfO $(outfile).tar.gz step_$(STEP_VERSION)/bin/step > $(outfile); \\\n\t\tchmod +x $(outfile); \\\n\t\trm -f $(outfile).tar.gz\n"
  },
  {
    "path": "make/_shared/generate-verify/00_mod.mk",
    "content": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nshared_generate_targets ?=\nshared_generate_targets_dirty ?=\nshared_verify_targets ?=\nshared_verify_targets_dirty ?=\n"
  },
  {
    "path": "make/_shared/generate-verify/02_mod.mk",
    "content": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n.PHONY: generate\n## Generate all generate targets.\n## @category [shared] Generate/ Verify\ngenerate: $$(shared_generate_targets)\n\t@echo \"The following targets cannot be run simultaneously with each other or other generate scripts:\"\n\t$(foreach TARGET,$(shared_generate_targets_dirty), $(MAKE) $(TARGET))\n\nverify_script := $(dir $(lastword $(MAKEFILE_LIST)))/util/verify.sh\n\n# Run the supplied make target argument in a temporary workspace and diff the results.\nverify-%: FORCE\n\t+$(verify_script) $(MAKE) $*\n\nverify_generated_targets = $(shared_generate_targets:%=verify-%)\nverify_generated_targets_dirty = $(shared_generate_targets_dirty:%=verify-%)\n\nverify_targets = $(sort $(verify_generated_targets) $(shared_verify_targets))\nverify_targets_dirty = $(sort $(verify_generated_targets_dirty) $(shared_verify_targets_dirty))\n\n.PHONY: verify\n## Verify code and generate targets.\n## @category [shared] Generate/ Verify\nverify: $$(verify_targets)\n\t@echo \"The following targets create temporary files in the current directory, that is why they have to be run last:\"\n\t$(foreach TARGET,$(verify_targets_dirty), $(MAKE) $(TARGET))\n"
  },
  {
    "path": "make/_shared/generate-verify/util/verify.sh",
    "content": "#!/usr/bin/env bash\n\n# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Verify that the supplied command does not make any changes to the repository.\n#\n# This is called from the Makefile to verify that all code generation scripts\n# have been run and that their changes have been committed to the repository.\n#\n# Runs any of the scripts or Make targets in this repository, after making a\n# copy of the repository, then reports any changes to the files in the copy.\n\n# For example:\n#\n#  make verify-helm-chart-update || \\\n#    make helm-chart-update\n#\nset -o errexit\nset -o nounset\nset -o pipefail\n\nprojectdir=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )/../../../..\" && pwd )\"\n\ncd \"${projectdir}\"\n\n# Use short form arguments here to support BSD/macOS. `-d` instructs\n# it to make a directory, `-t` provides a prefix to use for the directory name.\ntmp=\"$(mktemp -d /tmp/verify.sh.XXXXXXXX)\"\n\ncleanup() {\n    rm -rf \"${tmp}\"\n}\ntrap \"cleanup\" EXIT SIGINT\n\n# Why not just \"cp\" to the tmp dir?\n# A dumb \"cp\" will fail sometimes since _bin can get changed while it's being copied if targets are run in parallel,\n# and cp doesn't have some universal \"exclude\" option to ignore \"_bin\"\n#\n# We previously used \"rsync\" here, but:\n# 1. That's another tool we need to depend on\n# 2. rsync on macOS 15.4 and newer is actually openrsync, which has different permissions and throws errors when copying git objects\n#\n# So, we use find to list all files except _bin, and then copy each in turn\nfind . -maxdepth 1 -not \\( -path \"./_bin\" \\) -not \\( -path \".\" \\) | xargs -I% cp -af \"${projectdir}/%\" \"${tmp}/\"\n\npushd \"${tmp}\" >/dev/null\n\n\"$@\"\n\npopd >/dev/null\n\nif ! diff \\\n    --exclude=\".git\" \\\n    --exclude=\"_bin\" \\\n    --new-file --unified --show-c-function --recursive \"${projectdir}\" \"${tmp}\"\nthen\n    echo\n    echo \"Project '${projectdir}' is out of date.\"\n    echo \"Please run '${*}' or apply the above diffs\"\n    exit 1\nfi\n"
  },
  {
    "path": "make/_shared/go/.golangci.override.yaml",
    "content": "version: \"2\"\nlinters:\n  default: none\n  exclusions:\n    generated: lax\n    presets: [ comments, common-false-positives, legacy, std-error-handling ]\n    paths: [ third_party, builtin$, examples$ ]\n    warn-unused: true\n  settings:\n    staticcheck:\n      checks: [ \"all\", \"-ST1000\", \"-ST1001\", \"-ST1003\", \"-ST1005\", \"-ST1012\", \"-ST1016\", \"-ST1020\", \"-ST1021\", \"-ST1022\", \"-QF1001\", \"-QF1003\", \"-QF1008\" ]\n  enable:\n    - asasalint\n    - asciicheck\n    - bidichk\n    - bodyclose\n    - canonicalheader\n    - contextcheck\n    - copyloopvar\n    - decorder\n    - dogsled\n    - dupword\n    - durationcheck\n    - errcheck\n    - errchkjson\n    - errname\n    - exhaustive\n    - exptostd\n    - forbidigo\n    - ginkgolinter\n    - gocheckcompilerdirectives\n    - gochecksumtype\n    - gocritic\n    - goheader\n    - goprintffuncname\n    - gosec\n    - gosmopolitan\n    - govet\n    - grouper\n    - importas\n    - ineffassign\n    - interfacebloat\n    - intrange\n    - loggercheck\n    - makezero\n    - mirror\n    - misspell\n    - modernize\n    - musttag\n    - nakedret\n    - nilerr\n    - nilnil\n    - noctx\n    - nosprintfhostport\n    - predeclared\n    - promlinter\n    - protogetter\n    - reassign\n    - sloglint\n    - staticcheck\n    - tagalign\n    - testableexamples\n    - unconvert\n    - unparam\n    - unused\n    - usestdlibvars\n    - usetesting\n    - wastedassign\nformatters:\n  enable: [ gci, gofmt ]\n  settings:\n    gci:\n      custom-order: true\n      sections:\n        - standard # Standard section: captures all standard packages.\n        - default # Default section: contains all imports that could not be matched to another section type.\n        - localmodule # Local module section: contains all local packages. This section is not present unless explicitly enabled.\n        - blank # Blank section: contains all blank imports. This section is not present unless explicitly enabled.\n        - dot # Dot section: contains all dot imports. This section is not present unless explicitly enabled.\n  exclusions:\n    generated: lax\n    paths: [ third_party, builtin$, examples$ ]\n"
  },
  {
    "path": "make/_shared/go/01_mod.mk",
    "content": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nifndef bin_dir\n$(error bin_dir is not set)\nendif\n\nifndef repo_name\n$(error repo_name is not set)\nendif\n\nifndef golangci_lint_config\n$(error golangci_lint_config is not set)\nendif\n\ngolangci_lint_override := $(dir $(lastword $(MAKEFILE_LIST)))/.golangci.override.yaml\n\n.PHONY: go-workspace\ngo-workspace: export GOWORK?=$(abspath go.work)\n## Create a go.work file in the repository root (or GOWORK)\n##\n## @category Development\ngo-workspace: | $(NEEDS_GO)\n\t@rm -f $(GOWORK)\n\t$(GO) work init\n\t@find . -name go.mod -not \\( -path \"./$(bin_dir)/*\" -or -path \"./make/_shared/*\" \\) \\\n\t\t| while read d; do \\\n\t\t\t\ttarget=$$(dirname $${d}); \\\n\t\t\t\t$(GO) work use \"$${target}\"; \\\n\t\t\tdone\n\n.PHONY: go-tidy\n## Alias for `make generate-go-mod-tidy`\n## @category [shared] Generate/ Verify\ngo-tidy: generate-go-mod-tidy\n\n.PHONY: generate-go-mod-tidy\n## Run `go mod tidy` on all Go modules\n## @category [shared] Generate/ Verify\ngenerate-go-mod-tidy: | $(NEEDS_GO)\n\t@find . -name go.mod -not \\( -path \"./$(bin_dir)/*\" -or -path \"./make/_shared/*\" \\) \\\n\t\t| while read d; do \\\n\t\t\t\ttarget=$$(dirname $${d}); \\\n\t\t\t\techo \"Running 'go mod tidy' in directory '$${target}'\"; \\\n\t\t\t\tpushd \"$${target}\" >/dev/null; \\\n\t\t\t\t$(GO) mod tidy || exit; \\\n\t\t\t\t$(GO) get toolchain@none || exit; \\\n\t\t\t\tpopd >/dev/null; \\\n\t\t\t\techo \"\"; \\\n\t\t\tdone\n\nshared_generate_targets := generate-go-mod-tidy $(shared_generate_targets)\n\nifndef dont_generate_govulncheck\n\ngovulncheck_base_dir := $(dir $(lastword $(MAKEFILE_LIST)))/base/\n\n.PHONY: generate-govulncheck\n## Generate base files in the repository\n## @category [shared] Generate/ Verify\ngenerate-govulncheck:\n\tcp -r $(govulncheck_base_dir)/. ./\n\tcd $(govulncheck_base_dir) && \\\n\t\tfind . -type f | while read file; do \\\n\t\t\tsed \"s|{{REPLACE:GH-REPOSITORY}}|$(repo_name:github.com/%=%)|g\" \"$$file\" > \"$(CURDIR)/$$file\"; \\\n\t\tdone\n\nshared_generate_targets += generate-govulncheck\n\nendif # dont_generate_govulncheck\n\n.PHONY: verify-govulncheck\n## Verify all Go modules for vulnerabilities using govulncheck\n## @category [shared] Generate/ Verify\n#\n# Runs `govulncheck` on all Go modules related to the project.\n# Ignores Go modules among the temporary build artifacts in _bin, to avoid\n# scanning the code of the vendored Go, after running make vendor-go.\n# Ignores Go modules in make/_shared, because those will be checked in centrally\n# in the makefile_modules repository.\n#\n# `verify-govulncheck` not added to the `shared_verify_targets` variable and is\n# not run by `make verify`, because `make verify` is run for each PR, and we do\n# not want new vulnerabilities in existing code to block the merging of PRs.\n# Instead `make verify-govulncheck` is intended to be run periodically by a CI job.\nverify-govulncheck: | $(NEEDS_GOVULNCHECK)\n\t@find . -name go.mod -not \\( -path \"./$(bin_dir)/*\" -or -path \"./make/_shared/*\" \\) \\\n\t\t| while read d; do \\\n\t\t\t\ttarget=$$(dirname $${d}); \\\n\t\t\t\techo \"Running 'GOTOOLCHAIN=go$(VENDORED_GO_VERSION) $(bin_dir)/tools/govulncheck ./...' in directory '$${target}'\"; \\\n\t\t\t\tpushd \"$${target}\" >/dev/null; \\\n\t\t\t\tGOTOOLCHAIN=go$(VENDORED_GO_VERSION) $(GOVULNCHECK) ./... || exit; \\\n\t\t\t\tpopd >/dev/null; \\\n\t\t\t\techo \"\"; \\\n\t\t\tdone\n\n.PHONY: generate-golangci-lint-config\n## Generate a golangci-lint configuration file\n## @category [shared] Generate/ Verify\ngenerate-golangci-lint-config: | $(NEEDS_GOLANGCI-LINT) $(NEEDS_YQ) $(bin_dir)/scratch\n\tif [ \"$$($(YQ) eval 'has(\"version\") | not' $(golangci_lint_config))\" == \"true\" ]; then \\\n\t\t$(GOLANGCI-LINT) migrate -c $(golangci_lint_config); \\\n\t\trm $(basename $(golangci_lint_config)).bck$(suffix $(golangci_lint_config)); \\\n\tfi\n\n\tcp $(golangci_lint_config) $(bin_dir)/scratch/golangci-lint.yaml.tmp\n\t$(YQ) -i 'del(.linters.enable)' $(bin_dir)/scratch/golangci-lint.yaml.tmp\n\t$(YQ) eval-all -i '. as $$item ireduce ({}; . * $$item)' $(bin_dir)/scratch/golangci-lint.yaml.tmp $(golangci_lint_override)\n\tmv $(bin_dir)/scratch/golangci-lint.yaml.tmp $(golangci_lint_config)\n\nshared_generate_targets += generate-golangci-lint-config\n\ngolangci_lint_timeout ?= 10m\n\n.PHONY: verify-golangci-lint\n## Verify all Go modules using golangci-lint\n## @category [shared] Generate/ Verify\nverify-golangci-lint: | $(NEEDS_GO) $(NEEDS_GOLANGCI-LINT) $(NEEDS_YQ) $(bin_dir)/scratch\n\t@find . -name go.mod -not \\( -path \"./$(bin_dir)/*\" -or -path \"./make/_shared/*\" \\) \\\n\t\t| while read d; do \\\n\t\t\t\ttarget=$$(dirname $${d}); \\\n\t\t\t\techo \"Running 'GOVERSION=$(VENDORED_GO_VERSION) $(bin_dir)/tools/golangci-lint run -c $(CURDIR)/$(golangci_lint_config) --timeout $(golangci_lint_timeout)' in directory '$${target}'\"; \\\n\t\t\t\tpushd \"$${target}\" >/dev/null; \\\n\t\t\t\tGOVERSION=$(VENDORED_GO_VERSION) $(GOLANGCI-LINT) run -c $(CURDIR)/$(golangci_lint_config) --timeout $(golangci_lint_timeout) || exit; \\\n\t\t\t\tpopd >/dev/null; \\\n\t\t\t\techo \"\"; \\\n\t\t\tdone\n\nshared_verify_targets_dirty += verify-golangci-lint\n\n.PHONY: fix-golangci-lint\n## Fix all Go modules using golangci-lint\n## @category [shared] Generate/ Verify\nfix-golangci-lint: | $(NEEDS_GOLANGCI-LINT) $(NEEDS_YQ) $(NEEDS_GCI) $(bin_dir)/scratch\n\t@find . -name go.mod -not \\( -path \"./$(bin_dir)/*\" -or -path \"./make/_shared/*\" \\) \\\n\t\t| while read d; do \\\n\t\t\t\ttarget=$$(dirname $${d}); \\\n\t\t\t\techo \"Running 'GOVERSION=$(VENDORED_GO_VERSION) $(bin_dir)/tools/golangci-lint run --fix -c $(CURDIR)/$(golangci_lint_config) --timeout $(golangci_lint_timeout)' in directory '$${target}'\"; \\\n\t\t\t\tpushd \"$${target}\" >/dev/null; \\\n\t\t\t\tGOVERSION=$(VENDORED_GO_VERSION) $(GOLANGCI-LINT) run --fix -c $(CURDIR)/$(golangci_lint_config) --timeout $(golangci_lint_timeout) || exit; \\\n\t\t\t\tpopd >/dev/null; \\\n\t\t\t\techo \"\"; \\\n\t\t\tdone\n"
  },
  {
    "path": "make/_shared/go/README.md",
    "content": "# README\n\nA module for various Go static checks.\n"
  },
  {
    "path": "make/_shared/go/base/.github/workflows/govulncheck.yaml",
    "content": "# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.\n# Edit https://github.com/cert-manager/makefile-modules/blob/main/modules/go/base/.github/workflows/govulncheck.yaml instead.\n\n# Run govulncheck at midnight every night on the main branch,\n# to alert us to recent vulnerabilities which affect the Go code in this\n# project.\nname: govulncheck\non:\n  workflow_dispatch: {}\n  schedule:\n    - cron: '0 0 * * *'\n\npermissions:\n  contents: read\n\njobs:\n  govulncheck:\n    runs-on: ubuntu-latest\n\n    if: github.repository == '{{REPLACE:GH-REPOSITORY}}'\n\n    steps:\n      - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2\n        # Adding `fetch-depth: 0` makes sure tags are also fetched. We need\n        # the tags so `git describe` returns a valid version.\n        # see https://github.com/actions/checkout/issues/701 for extra info about this option\n        with: { fetch-depth: 0 }\n\n      - id: go-version\n        run: |\n          make print-go-version >> \"$GITHUB_OUTPUT\"\n\n      - uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0\n        with:\n          go-version: ${{ steps.go-version.outputs.result }}\n\n      - run: make verify-govulncheck\n"
  },
  {
    "path": "make/_shared/helm/01_mod.mk",
    "content": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nifndef helm_dont_include_crds\ninclude $(dir $(lastword $(MAKEFILE_LIST)))/crds.mk\nendif\n\ninclude $(dir $(lastword $(MAKEFILE_LIST)))/helm.mk\ninclude $(dir $(lastword $(MAKEFILE_LIST)))/deploy.mk\n"
  },
  {
    "path": "make/_shared/helm/crd.template.footer.yaml",
    "content": "{{- end }}\n"
  },
  {
    "path": "make/_shared/helm/crd.template.header.yaml",
    "content": "{{- if REPLACE_CRD_EXPRESSION }}\napiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n  name: \"REPLACE_CRD_NAME\"\n  {{- if .Values.crds.keep }}\n  annotations:\n    helm.sh/resource-policy: keep\n  {{- end }}\n  labels:\n    {{- include \"REPLACE_LABELS_TEMPLATE\" . | nindent 4 }}\n"
  },
  {
    "path": "make/_shared/helm/crds.mk",
    "content": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n################\n# Check Inputs #\n################\n\nifndef helm_chart_source_dir\n$(error helm_chart_source_dir is not set)\nendif\n\nifndef helm_labels_template_name\n$(error helm_labels_template_name is not set)\nendif\n\n################\n# Add targets #\n################\n\ncrd_template_header := $(dir $(lastword $(MAKEFILE_LIST)))/crd.template.header.yaml\ncrd_template_footer := $(dir $(lastword $(MAKEFILE_LIST)))/crd.template.footer.yaml\n\n# see https://stackoverflow.com/a/53408233\nsed_inplace := sed -i''\nifeq ($(HOST_OS),darwin)\n\tsed_inplace := sed -i ''\nendif\n\ncrds_dir ?= deploy/crds\ncrds_dir_readme := $(dir $(lastword $(MAKEFILE_LIST)))/crds_dir.README.md\ncrds_expression ?= .Values.crds.enabled\ncrds_template_include_pattern := *.yaml\n# Space-separated list of basenames to exclude (e.g. foo.yaml *_test.yaml)\ncrds_template_exclude_pattern ?=\n\ndefine filter-out-basenames\n  $(if $(strip $(2)), \\\n    $(foreach f,$(1),$(if $(filter $(2),$(notdir $(f))),,$(f))), \\\n    $(1))\nendef\n\n.PHONY: generate-crds\n## Generate CRD manifests.\n## @category [shared] Generate/ Verify\ngenerate-crds: | $(NEEDS_CONTROLLER-GEN) $(NEEDS_YQ)\n\t$(eval crds_gen_temp := $(bin_dir)/scratch/crds)\n\t$(eval directories := $(shell ls -d */ | grep -v -e 'make' $(shell git check-ignore -- * | sed 's/^/-e /')))\n\n\trm -rf $(crds_gen_temp)\n\tmkdir -p $(crds_gen_temp)\n\n\t$(CONTROLLER-GEN) crd \\\n\t\t$(directories:%=paths=./%...) \\\n\t\toutput:crd:artifacts:config=$(crds_gen_temp)\n\n\t@echo \"Updating CRDs with helm templating, writing to $(helm_chart_source_dir)/templates\"\n\n\t$(eval crds_gen_temp_all_files := $(wildcard $(crds_gen_temp)/$(crds_template_include_pattern)))\n\t$(eval crds_gen_temp_files := $(if $(crds_template_exclude_pattern), \\\n\t\t$(call filter-out-basenames,$(crds_gen_temp_all_files),$(crds_template_exclude_pattern)), \\\n\t\t$(crds_gen_temp_all_files)))\n\n\t@for f in $(crds_gen_temp_files); do \\\n\t\tcrd_name=$$($(YQ) eval '.metadata.name' $$f); \\\n\t\tcrd_template_file=\"$(helm_chart_source_dir)/templates/crd-$$(basename $$f)\"; \\\n\t\tcat $(crd_template_header) > $$crd_template_file; \\\n\t\t$(sed_inplace) \"s/REPLACE_CRD_EXPRESSION/$(crds_expression)/g\" $$crd_template_file; \\\n\t\t$(sed_inplace) \"s/REPLACE_CRD_NAME/$$crd_name/g\" $$crd_template_file; \\\n\t\t$(sed_inplace) \"s/REPLACE_LABELS_TEMPLATE/$(helm_labels_template_name)/g\" $$crd_template_file; \\\n\t\t$(YQ) -I2 '{\"spec\": .spec}' $$f >> $$crd_template_file; \\\n\t\tcat $(crd_template_footer) >> $$crd_template_file; \\\n\tdone\n\n\t@if [ -n \"$$(ls $(crds_gen_temp) 2>/dev/null)\" ]; then \\\n\t\tcp $(crds_gen_temp)/* $(crds_dir)/ ; \\\n\t\tcp $(crds_dir_readme) $(crds_dir)/README.md ; \\\n\tfi\n\nshared_generate_targets += generate-crds\n"
  },
  {
    "path": "make/_shared/helm/crds_dir.README.md",
    "content": "# CRDs source directory\n\n> **WARNING**: if you are an end-user, you probably should NOT need to use the\n> files in this directory. These files are for **reference, development and testing purposes only**.\n\nThis directory contains 'source code' used to build our CustomResourceDefinition\nresources consumed by our officially supported deployment methods (e.g. the Helm chart).\nThe CRDs in this directory might be incomplete, and should **NOT** be used to provision the operator."
  },
  {
    "path": "make/_shared/helm/deploy.mk",
    "content": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nifndef deploy_name\n$(error deploy_name is not set)\nendif\n\nifndef deploy_namespace\n$(error deploy_namespace is not set)\nendif\n\n# Install options allows the user configuration of extra flags\nINSTALL_OPTIONS ?=\n\n##########################################\n\n.PHONY: install\n## Install controller helm chart on the current active K8S cluster.\n## @category [shared] Deployment\ninstall: $(helm_chart_archive) | $(NEEDS_HELM)\n\t$(HELM) upgrade $(deploy_name) $(helm_chart_archive) \\\n\t\t--wait \\\n\t\t--install \\\n\t\t--create-namespace \\\n\t\t$(INSTALL_OPTIONS) \\\n\t\t--namespace $(deploy_namespace)\n\n.PHONY: uninstall\n## Uninstall controller helm chart from the current active K8S cluster.\n## @category [shared] Deployment\nuninstall: | $(NEEDS_HELM)\n\t$(HELM) uninstall $(deploy_name)  \\\n\t\t--wait \\\n\t\t--namespace $(deploy_namespace)\n\n.PHONY: template\n## Template the helm chart.\n## @category [shared] Deployment\ntemplate: $(helm_chart_archive) | $(NEEDS_HELM)\n\t@$(HELM) template $(deploy_name) $(helm_chart_archive) \\\n\t\t--create-namespace \\\n\t\t$(INSTALL_OPTIONS) \\\n\t\t--namespace $(deploy_namespace)\n"
  },
  {
    "path": "make/_shared/helm/helm.mk",
    "content": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nifndef bin_dir\n$(error bin_dir is not set)\nendif\n\nifndef helm_chart_source_dir\n$(error helm_chart_source_dir is not set)\nendif\n\nifndef helm_chart_image_name\n$(error helm_chart_image_name is not set)\nendif\n\nifndef helm_chart_version\n$(error helm_chart_version is not set)\nendif\nifneq ($(helm_chart_version:v%=v),v)\n$(error helm_chart_version \"$(helm_chart_version)\" should start with a \"v\" - did you forget to pull tags from the remote repository?)\nendif\n\nifndef helm_values_mutation_function\n$(error helm_values_mutation_function is not set)\nendif\n\n##########################################\n\nhelm_chart_name := $(notdir $(helm_chart_image_name))\nhelm_chart_image_registry := $(dir $(helm_chart_image_name))\nhelm_chart_image_tag := $(helm_chart_version)\nhelm_chart_sources := $(shell find $(helm_chart_source_dir) -maxdepth 1 -type f) $(shell find $(helm_chart_source_dir)/templates -type f)\nhelm_chart_archive := $(bin_dir)/scratch/helm/$(helm_chart_name)-$(helm_chart_version).tgz\nhelm_digest_path := $(bin_dir)/scratch/helm/$(helm_chart_name)-$(helm_chart_version).digests\nhelm_digest = $(shell head -1 $(helm_digest_path) 2> /dev/null)\n\n$(bin_dir)/scratch/helm:\n\t@mkdir -p $@\n\n$(helm_chart_archive): $(helm_chart_sources) | $(NEEDS_HELM) $(NEEDS_YQ) $(bin_dir)/scratch/helm\n\t$(eval helm_chart_source_dir_versioned := $@.tmp)\n\trm -rf $(helm_chart_source_dir_versioned)\n\tmkdir -p $(dir $(helm_chart_source_dir_versioned))\n\tcp -a $(helm_chart_source_dir) $(helm_chart_source_dir_versioned)\n\n\t$(call helm_values_mutation_function,$(helm_chart_source_dir_versioned)/values.yaml)\n\n\t@if ! $(YQ) -oy '.name' $(helm_chart_source_dir_versioned)/Chart.yaml | grep -q '^$(helm_chart_name)$$'; then \\\n\t\techo \"Chart name does not match the name in the helm_chart_name variable\"; \\\n\t\texit 1; \\\n\tfi\n\n\t$(YQ) '.annotations.\"artifacthub.io/prerelease\" = \"$(IS_PRERELEASE)\"' \\\n\t\t--inplace $(helm_chart_source_dir_versioned)/Chart.yaml\n\n\tmkdir -p $(dir $@)\n\t$(HELM) package $(helm_chart_source_dir_versioned) \\\n\t\t--app-version $(helm_chart_version) \\\n\t\t--version $(helm_chart_version) \\\n\t\t--destination $(dir $@)\n\n.PHONY: helm-chart-oci-push\n## Create and push Helm chart to OCI registry.\n## Will also create a non-v-prefixed tag for the OCI image.\n## @category [shared] Publish\nhelm-chart-oci-push: $(helm_chart_archive) | $(NEEDS_HELM) $(NEEDS_CRANE)\n\t$(HELM) push \"$(helm_chart_archive)\" \"oci://$(helm_chart_image_registry)\" 2>&1 \\\n\t\t| tee >(grep -o \"sha256:.\\+\" | tee $(helm_digest_path))\n\n\t@# $(helm_chart_image_tag:v%=%) removes the v prefix from the value stored in helm_chart_image_tag.\n\t@# See https://www.gnu.org/software/make/manual/html_node/Substitution-Refs.html for the manual on the syntax.\n\thelm_digest=$$(cat $(helm_digest_path)) && \\\n\t$(CRANE) copy \"$(helm_chart_image_name)@$$helm_digest\" \"$(helm_chart_image_name):$(helm_chart_image_tag:v%=%)\"\n\n.PHONY: helm-chart\n## Create a helm chart\n## @category [shared] Helm Chart\nhelm-chart: $(helm_chart_archive)\n\nhelm_tool_header_search ?= ^<!-- AUTO-GENERATED -->\nhelm_tool_footer_search ?= ^<!-- /AUTO-GENERATED -->\n\n.PHONY: generate-helm-docs\n## Generate Helm chart documentation.\n## @category [shared] Generate/ Verify\ngenerate-helm-docs: | $(NEEDS_HELM-TOOL)\n\t$(HELM-TOOL) inject -i $(helm_chart_source_dir)/values.yaml -o $(helm_chart_source_dir)/README.md --header-search \"$(helm_tool_header_search)\" --footer-search \"$(helm_tool_footer_search)\"\n\nshared_generate_targets += generate-helm-docs\n\n.PHONY: generate-helm-schema\n## Generate Helm chart schema.\n## @category [shared] Generate/ Verify\ngenerate-helm-schema: | $(NEEDS_HELM-TOOL) $(NEEDS_GOJQ)\n\t$(HELM-TOOL) schema -i $(helm_chart_source_dir)/values.yaml | $(GOJQ) > $(helm_chart_source_dir)/values.schema.json\n\nshared_generate_targets += generate-helm-schema\n\n.PHONY: verify-helm-values\n## Verify Helm chart values using helm-tool.\n## @category [shared] Generate/ Verify\nverify-helm-values: | $(NEEDS_HELM-TOOL) $(NEEDS_GOJQ)\n\t$(HELM-TOOL) lint -i $(helm_chart_source_dir)/values.yaml -d $(helm_chart_source_dir)/templates -e $(helm_chart_source_dir)/values.linter.exceptions\n\nshared_verify_targets += verify-helm-values\n\n.PHONY: verify-helm-unittest\n## Run Helm chart unit tests using helm-unittest.\n## @category [shared] Generate/ Verify\nverify-helm-unittest: | $(NEEDS_HELM-UNITTEST)\n\t$(HELM-UNITTEST) $(helm_chart_source_dir)\n\nshared_verify_targets += verify-helm-unittest\n\n$(bin_dir)/scratch/kyverno:\n\t@mkdir -p $@\n\n$(bin_dir)/scratch/kyverno/pod-security-policy.yaml: | $(NEEDS_KUSTOMIZE) $(bin_dir)/scratch/kyverno\n\t@$(KUSTOMIZE) build https://github.com/kyverno/policies/pod-security/enforce > $@\n\n# Extra arguments for kyverno apply.\nkyverno_apply_extra_args :=\n# Allows known policy violations to be skipped by supplying Kyverno policy\n# exceptions as a Kyverno YAML resource, e.g.:\n# apiVersion: kyverno.io/v2\n# kind: PolicyException\n# metadata:\n#   name: pod-security-exceptions\n# spec:\n#   exceptions:\n#   - policyName: disallow-privilege-escalation\n#     ruleNames:\n#     - autogen-privilege-escalation\n#   - policyName: restrict-seccomp-strict\n#     ruleNames:\n#     - autogen-check-seccomp-strict\n#   match:\n#     any:\n#     - resources:\n#         kinds:\n#         - Deployment\n#         namespaces:\n#         - mynamespace\n#         names:\n#         - my-deployment\nifneq (\"$(wildcard make/verify-pod-security-standards-exceptions.yaml)\",\"\")\n\t\tkyverno_apply_extra_args += --exceptions make/verify-pod-security-standards-exceptions.yaml\nendif\n\n.PHONY: verify-pod-security-standards\n## Verify that the Helm chart complies with the pod security standards.\n##\n## You can add Kyverno policy exceptions to\n## `make/verify-pod-security-standards-exceptions.yaml`, to skip some of the pod\n## security policy rules.\n##\n## @category [shared] Generate/ Verify\nverify-pod-security-standards: $(helm_chart_archive) $(bin_dir)/scratch/kyverno/pod-security-policy.yaml | $(NEEDS_KYVERNO) $(NEEDS_HELM)\n\t@$(HELM) template $(helm_chart_archive) $(INSTALL_OPTIONS) \\\n\t| $(KYVERNO) apply $(bin_dir)/scratch/kyverno/pod-security-policy.yaml \\\n\t\t$(kyverno_apply_extra_args) \\\n\t\t--resource - \\\n\t\t--table\n\nshared_verify_targets_dirty += verify-pod-security-standards\n\n.PHONY: verify-helm-lint\n## Verify that the Helm chart is linted.\n## @category [shared] Generate/ Verify\nverify-helm-lint: $(helm_chart_archive) | $(NEEDS_HELM)\n\t$(HELM) lint $(helm_chart_archive)\n\nshared_verify_targets_dirty += verify-helm-lint\n\n.PHONY: verify-helm-kubeconform\n## Verify that the Helm chart passes a strict check using kubeconform\n## @category [shared] Generate/ Verify\nverify-helm-kubeconform: $(helm_chart_archive) | $(NEEDS_KUBECONFORM)\n\t@$(HELM) template $(helm_chart_archive) $(INSTALL_OPTIONS) \\\n\t| $(KUBECONFORM) \\\n\t\t-schema-location default \\\n\t\t-schema-location \"https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master/{{.NormalizedKubernetesVersion}}/{{.ResourceKind}}.json\" \\\n\t\t-schema-location \"https://raw.githubusercontent.com/datreeio/CRDs-catalog/main/{{.Group}}/{{.ResourceKind}}_{{.ResourceAPIVersion}}.json\" \\\n\t\t-strict\n\nshared_verify_targets_dirty += verify-helm-kubeconform\n"
  },
  {
    "path": "make/_shared/help/01_mod.mk",
    "content": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nhelp_sh := $(dir $(lastword $(MAKEFILE_LIST)))/help.sh\n\n.PHONY: help\nhelp:\n\t@MAKEFILE_LIST=\"$(MAKEFILE_LIST)\" \\\n\t\tMAKE=\"$(MAKE)\" \\\n\t\t$(help_sh)\n"
  },
  {
    "path": "make/_shared/help/help.sh",
    "content": "#!/usr/bin/env bash\n\n# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nset -o errexit\nset -o nounset\nset -o pipefail\n\n## 1. Build set of extracted line items\n\nEMPTYLINE_REGEX=\"^[[:space:]]*$\"\nDOCBLOCK_REGEX=\"^##[[:space:]]*(.*)$\"\nCATEGORY_REGEX=\"^##[[:space:]]*@category[[:space:]]*(.*)$\"\nTARGET_REGEX=\"^(([a-zA-Z0-9\\_\\/\\%\\$\\(\\)]|-)+):.*$\"\n\nEMPTY_ITEM=\"<start-category><end-category><start-target><end-target><start-comment><end-comment>\"\n\n# shellcheck disable=SC2086\nraw_lines=$(cat ${MAKEFILE_LIST} | tr '\\t' '    ' | grep -E \"($TARGET_REGEX|$DOCBLOCK_REGEX|$EMPTYLINE_REGEX)\")\nextracted_lines=\"\"\nextracted_current=\"$EMPTY_ITEM\"\nmax_target_length=0\n\n## Extract all the commented targets from the Makefile\nwhile read -r line; do\n    if [[ $line =~ $EMPTYLINE_REGEX ]]; then\n        # Reset current item.\n        extracted_current=\"$EMPTY_ITEM\"\n    elif [[ $line =~ $CATEGORY_REGEX ]]; then\n        extracted_current=${extracted_current//<start-category><end-category>/<start-category>${BASH_REMATCH[1]}<end-category>}\n    elif [[ $line =~ $TARGET_REGEX ]]; then\n        # only keep the target if there is a comment\n        if [[ $extracted_current != *\"<start-comment><end-comment>\"* ]]; then\n            max_target_length=$(( ${#BASH_REMATCH[1]} > max_target_length ? ${#BASH_REMATCH[1]} : max_target_length ))\n            extracted_current=${extracted_current//<start-target><end-target>/<start-target>${BASH_REMATCH[1]}<end-target>}\n            extracted_lines=\"$extracted_lines\\n$extracted_current\"\n        fi\n\n        extracted_current=\"$EMPTY_ITEM\"\n    elif [[ $line =~ $DOCBLOCK_REGEX ]]; then\n        extracted_current=${extracted_current//<end-comment>/${BASH_REMATCH[1]}<newline><end-comment>}\n    fi\ndone <<< \"$raw_lines\"\n\n## 2. Build mapping for expanding targets\n\nASSIGNMENT_REGEX=\"^(([a-zA-Z0-9\\_\\/\\%\\$\\(\\)]|-)+)[[:space:]]*:=[[:space:]]*(.*)$\"\n\nraw_expansions=$(${MAKE} --dry-run --print-data-base noop | tr '\\t' '    ' | grep -E \"$ASSIGNMENT_REGEX\")\nextracted_expansions=\"\"\n\nwhile read -r line; do\n    if [[ $line =~ $ASSIGNMENT_REGEX ]]; then\n        target=${BASH_REMATCH[1]}\n        expansion=${BASH_REMATCH[3]// /, }\n        extracted_expansions=\"$extracted_expansions\\n<start-target>$target<end-target><start-expansion>$expansion<end-expansion>\"\n    fi\ndone <<< \"$raw_expansions\"\n\n## 3. Sort and print the extracted line items\n\nRULE_COLOR=\"$(TERM=xterm tput setaf 6)\"\nCATEGORY_COLOR=\"$(TERM=xterm tput setaf 3)\"\nCLEAR_STYLE=\"$(TERM=xterm tput sgr0)\"\nPURPLE=$(TERM=xterm tput setaf 125)\n\nextracted_lines=$(echo -e \"$extracted_lines\" | LC_ALL=C sort -r)\ncurrent_category=\"\"\n\n## Print the help\necho \"Usage: make [target1] [target2] ...\"\n\nIFS=$'\\n'; for line in $extracted_lines; do\n    category=$([[ $line =~ \\<start-category\\>(.*)\\<end-category\\> ]] && echo \"${BASH_REMATCH[1]}\")\n    target=$([[ $line =~ \\<start-target\\>(.*)\\<end-target\\> ]] && echo \"${BASH_REMATCH[1]}\")\n    comment=$([[ $line =~ \\<start-comment\\>(.*)\\<end-comment\\> ]] && echo -e \"${BASH_REMATCH[1]//<newline>/\\\\n}\")\n\n    # Print the category header if it's changed\n    if [[ \"$current_category\" != \"$category\" ]]; then\n        current_category=$category\n        echo -e \"\\n${CATEGORY_COLOR}${current_category}${CLEAR_STYLE}\"\n    fi\n\n    # replace any $(...) with the actual value\n    if [[ $target =~ \\$\\((.*)\\) ]]; then\n        new_target=$(echo -e \"$extracted_expansions\" | grep \"<start-target>${BASH_REMATCH[1]}<end-target>\" || true)\n        if [[ -n \"$new_target\" ]]; then\n            target=$([[ $new_target =~ \\<start-expansion\\>(.*)\\<end-expansion\\> ]] && echo -e \"${BASH_REMATCH[1]}\")\n        fi\n    fi\n\n    # Print the target and its multiline comment\n    is_first_line=true\n    while read -r comment_line; do\n        if [[ \"$is_first_line\" == true ]]; then\n            is_first_line=false\n            padding=$(( max_target_length - ${#target} ))\n            printf \"    %s%${padding}s ${PURPLE}>${CLEAR_STYLE} %s\\n\" \"${RULE_COLOR}${target}${CLEAR_STYLE}\" \"\" \"${comment_line}\"\n        else\n            printf \"    %${max_target_length}s   %s\\n\" \"\" \"${comment_line}\"\n        fi\n    done <<< \"$comment\"\ndone\n"
  },
  {
    "path": "make/_shared/kind/00_kind_image_versions.mk",
    "content": "# Copyright 2024 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# This file is auto-generated by the learn_kind_images.sh script in the makefile-modules repo.\n# Do not edit manually.\n\nkind_image_kindversion := v0.31.0\n\nkind_image_kube_1.31_amd64 := docker.io/kindest/node:v1.31.14@sha256:e360318c07a2bb22ced43884c6884208a82d3da24828c9f1329222dd517adc06\nkind_image_kube_1.31_arm64 := docker.io/kindest/node:v1.31.14@sha256:cb9072fa3db2b4aaa4fa146193064cd1ddd3fe00666c12c5189e80d3735027b5\nkind_image_kube_1.32_amd64 := docker.io/kindest/node:v1.32.11@sha256:831a3aa45e399a20b3aef41d6d8572cc6ff07b1f76cac1242ce26be0ccf86402\nkind_image_kube_1.32_arm64 := docker.io/kindest/node:v1.32.11@sha256:6c3e552f3046d9e4b3602f642a54797ebe8bfcd18f3720cac129ae90bf802365\nkind_image_kube_1.33_amd64 := docker.io/kindest/node:v1.33.7@sha256:eb929cd8aca88dd03836180c65f3892ba8ccc79d80de1cc6666bcb9a35c1334e\nkind_image_kube_1.33_arm64 := docker.io/kindest/node:v1.33.7@sha256:09d327961491ceb25a987350e34c5335246f1e28aa48189d815f1905dea66079\nkind_image_kube_1.34_amd64 := docker.io/kindest/node:v1.34.3@sha256:babda82416d417f720a4d6dbd35deec5263af2a6c164c81c08cde0044c2b9f78\nkind_image_kube_1.34_arm64 := docker.io/kindest/node:v1.34.3@sha256:55cc745d5da0ef8c7a24a9f25f2df7cc6af0fadf85cf24bd639d2c2f02bacfab\nkind_image_kube_1.35_amd64 := docker.io/kindest/node:v1.35.0@sha256:b7f5e1f621afb1156eb0f27f26c804e5265c07d8e9c55516d25d66400043629b\nkind_image_kube_1.35_arm64 := docker.io/kindest/node:v1.35.0@sha256:0aa5e1a411b2c3197184286d7699424a123cd4d18c04c24317173dc5256c6110\n\nkind_image_latest_amd64 := $(kind_image_kube_1.35_amd64)\nkind_image_latest_arm64 := $(kind_image_kube_1.35_arm64)\n"
  },
  {
    "path": "make/_shared/kind/00_mod.mk",
    "content": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ninclude $(dir $(lastword $(MAKEFILE_LIST)))/00_kind_image_versions.mk\n\nimages_amd64 ?=\nimages_arm64 ?=\n\n# K8S_VERSION can be used to specify a specific\n# kubernetes version to use with Kind.\nK8S_VERSION ?=\nifeq ($(K8S_VERSION),)\nimages_amd64 += $(kind_image_latest_amd64)\nimages_arm64 += $(kind_image_latest_arm64)\nelse\nfatal_if_undefined = $(if $(findstring undefined,$(origin $1)),$(error $1 is not set))\n$(call fatal_if_undefined,kind_image_kube_$(K8S_VERSION)_amd64)\n$(call fatal_if_undefined,kind_image_kube_$(K8S_VERSION)_arm64)\n\nimages_amd64 += $(kind_image_kube_$(K8S_VERSION)_amd64)\nimages_arm64 += $(kind_image_kube_$(K8S_VERSION)_arm64)\nendif\n"
  },
  {
    "path": "make/_shared/kind/01_mod.mk",
    "content": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ninclude $(dir $(lastword $(MAKEFILE_LIST)))/kind.mk\ninclude $(dir $(lastword $(MAKEFILE_LIST)))/kind-image-preload.mk\n"
  },
  {
    "path": "make/_shared/kind/kind-image-preload.mk",
    "content": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nifndef bin_dir\n$(error bin_dir is not set)\nendif\n\nifndef images_amd64\n$(error images_amd64 is not set)\nendif\n\nifndef images_arm64\n$(error images_arm64 is not set)\nendif\n\n##########################################\n\nimages := $(images_$(HOST_ARCH))\n\nimages_tar_dir := $(bin_dir)/downloaded/containers/$(HOST_ARCH)\nimages_tars := $(foreach image,$(images),$(images_tar_dir)/$(subst :,+,$(image)).tar)\n\n# Download the images as tarballs. After downloading the image using\n# its digest, we use image-tool to modify the .[0].RepoTags[0] value in\n# the manifest.json file to have the correct tag (instead of \"i-was-a-digest\"\n# which is set when the image is pulled using its digest). This tag is used\n# to reference the image after it has been imported using docker or kind. Otherwise,\n# the image would be imported with the tag \"i-was-a-digest\" which is not very useful.\n# We would have to use digests to reference the image everywhere which might\n# not always be possible and does not match the default behavior of eg. our helm charts.\n# NOTE: the tag is fully determined based on the input, we fully allow the remote\n# tag to point to a different digest. This prevents CI from breaking due to upstream\n# changes. However, it also means that we can incorrectly combine digests with tags,\n# hence caution is advised.\n$(images_tars): $(images_tar_dir)/%.tar: | $(NEEDS_IMAGE-TOOL) $(NEEDS_CRANE) $(NEEDS_GOJQ)\n\t@$(eval full_image=$(subst +,:,$*))\n\t@$(eval bare_image=$(word 1,$(subst :, ,$(full_image))))\n\t@$(eval digest=$(word 2,$(subst @, ,$(full_image))))\n\t@$(eval tag=$(word 2,$(subst :, ,$(word 1,$(subst @, ,$(full_image))))))\n\t@mkdir -p $(dir $@)\n\t$(CRANE) pull \"$(bare_image)@$(digest)\" $@ --platform=linux/$(HOST_ARCH)\n\t$(IMAGE-TOOL) tag-docker-tar $@ \"$(bare_image):$(tag)\"\n\n# $1 = image\n# $2 = image:tag@sha256:digest\ndefine image_variables\n$1.TAR      := $(images_tar_dir)/$(subst :,+,$2).tar\n$1.REPO     := $1\n$1.TAG      := $(word 2,$(subst :, ,$(word 1,$(subst @, ,$2))))\n$1.FULL     := $(word 1,$(subst @, ,$2))\nendef\n\n$(foreach image,$(images),$(eval $(call image_variables,$(word 1,$(subst :, ,$(image))),$(image))))\n\n.PHONY: images-preload\n## Preload images.\n## @category [shared] Kind cluster\nimages-preload: | $(images_tars)\n"
  },
  {
    "path": "make/_shared/kind/kind.mk",
    "content": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nifndef bin_dir\n$(error bin_dir is not set)\nendif\n\nifndef kind_cluster_name\n$(error kind_cluster_name is not set)\nendif\n\nifndef kind_cluster_config\n$(error kind_cluster_config is not set)\nendif\n\n##########################################\n\nkind_kubeconfig := $(bin_dir)/scratch/kube.config\nabsolute_kubeconfig := $(CURDIR)/$(kind_kubeconfig)\n\n$(bin_dir)/scratch/cluster-check: FORCE | $(NEEDS_KIND) $(bin_dir)/scratch\n\t@if ! $(KIND) get clusters -q | grep -q \"^$(kind_cluster_name)\\$$\"; then \\\n\t\techo \"❌  cluster $(kind_cluster_name) not found. Starting ...\"; \\\n\t\techo \"trigger\" > $@; \\\n\telse \\\n\t\techo \"✅  existing cluster $(kind_cluster_name) found\"; \\\n\tfi\n\t$(eval export KUBECONFIG=$(absolute_kubeconfig))\n\nkind_post_create_hook ?= \n$(kind_kubeconfig): $(kind_cluster_config) $(bin_dir)/scratch/cluster-check | images-preload $(bin_dir)/scratch $(NEEDS_KIND) $(NEEDS_KUBECTL) $(NEEDS_CTR)\n\t@[ -f \"$(bin_dir)/scratch/cluster-check\" ] && ( \\\n\t\t$(KIND) delete cluster --name $(kind_cluster_name); \\\n\t\t$(CTR) load -i $(docker.io/kindest/node.TAR); \\\n\t\t$(KIND) create cluster \\\n\t\t\t--image $(docker.io/kindest/node.FULL) \\\n\t\t\t--name $(kind_cluster_name) \\\n\t\t\t--config \"$<\"; \\\n\t\t$(CTR) exec $(kind_cluster_name)-control-plane find /mounted_images/ -name \"*.tar\" -exec echo {} \\; -exec ctr --namespace=k8s.io images import --all-platforms --no-unpack --digests {} \\; ; \\\n\t\t$(MAKE) --no-print-directory noop $(kind_post_create_hook); \\\n\t\t$(KUBECTL) config use-context kind-$(kind_cluster_name); \\\n\t) || true\n\n\t$(KIND) get kubeconfig --name $(kind_cluster_name) > $@\n\n.PHONY: kind-cluster\nkind-cluster: $(kind_kubeconfig)\n\n.PHONY: kind-cluster-load\n## Create Kind cluster and wait for nodes to be ready\n## Load the kubeconfig into the default location so that\n## it can be easily queried by kubectl. This target is\n## meant to be used directly, NOT as a dependency.\n## Use `kind-cluster` as a dependency instead.\n## @category [shared] Kind cluster\nkind-cluster-load: kind-cluster | $(NEEDS_KUBECTL)\n\tmkdir -p ~/.kube\n\tKUBECONFIG=~/.kube/config:$(kind_kubeconfig) $(KUBECTL) config view --flatten > ~/.kube/config\n\t$(KUBECTL) config use-context kind-$(kind_cluster_name)\n\n.PHONY: kind-cluster-clean\n## Delete the Kind cluster\n## @category [shared] Kind cluster\nkind-cluster-clean: $(NEEDS_KIND)\n\t$(KIND) delete cluster --name $(kind_cluster_name)\n\trm -rf $(kind_kubeconfig)\n\t$(MAKE) --no-print-directory noop $(kind_post_create_hook)\n\n.PHONY: kind-logs\n## Get the Kind cluster\n## @category [shared] Kind cluster\nkind-logs: | kind-cluster $(NEEDS_KIND) $(ARTIFACTS)\n\trm -rf $(ARTIFACTS)/e2e-logs\n\tmkdir -p $(ARTIFACTS)/e2e-logs\n\t$(KIND) export logs $(ARTIFACTS)/e2e-logs --name=$(kind_cluster_name)\n"
  },
  {
    "path": "make/_shared/klone/01_mod.mk",
    "content": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n.PHONY: generate-klone\n## Generate klone shared Makefiles\n## @category [shared] Generate/ Verify\ngenerate-klone: | $(NEEDS_KLONE)\n\t$(KLONE) sync\n\nshared_generate_targets += generate-klone\n\n.PHONY: upgrade-klone\n## Upgrade klone Makefile modules to latest version\n## @category [shared] Self-upgrade\nupgrade-klone: | $(NEEDS_KLONE)\n\t$(KLONE) upgrade\n"
  },
  {
    "path": "make/_shared/licenses/00_mod.mk",
    "content": "# Copyright 2024 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Define default config for generating licenses\nlicense_ignore ?=\n"
  },
  {
    "path": "make/_shared/licenses/01_mod.mk",
    "content": "# Copyright 2024 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n###################### Generate LICENSES files ######################\n\n# _module_dir is the directory containing this Makefile, used to retrieve the path of the licenses.tmpl file\n_module_dir := $(abspath $(dir $(lastword $(MAKEFILE_LIST))))\n\n# Create a go.work file so that go-licenses can discover the LICENSE file of the\n# other modules in the repo.\n#\n# Without this, go-licenses *guesses* the wrong LICENSE for local dependencies and\n# links to the wrong versions of LICENSES for transitive dependencies.\nlicenses_go_work := $(bin_dir)/scratch/LICENSES.go.work\n$(licenses_go_work): $(bin_dir)/scratch\n\tGOWORK=$(abspath $@) \\\n\t\t$(MAKE) go-workspace\n\n## Generate licenses for the golang dependencies\n## @category [shared] Generate/ Verify\ngenerate-go-licenses: #\nshared_generate_targets += generate-go-licenses\n\ndefine licenses_target\n$1/LICENSES: $1/go.mod $(licenses_go_work) $(_module_dir)/licenses.tmpl | $(NEEDS_GO-LICENSES)\n\tcd $$(dir $$@) && \\\n\t\tGOWORK=$(abspath $(licenses_go_work)) \\\n\t\tGOOS=linux GOARCH=amd64 \\\n\t\t$(GO-LICENSES) report --ignore \"$$(license_ignore)\" --template $(_module_dir)/licenses.tmpl ./... > LICENSES\n\ngenerate-go-licenses: $1/LICENSES\n# The /LICENSE targets make sure these files exist.\n# Otherwise, make will error.\ngenerate-go-licenses: $1/LICENSE\nendef\n\n# Calculate all the go.mod directories, build targets may share go.mod dirs so\n# we use $(sort) to de-duplicate.\ngo_mod_dirs := $(foreach build_name,$(build_names),$(go_$(build_name)_mod_dir))\nifneq (\"$(wildcard go.mod)\",\"\")\n    go_mod_dirs += .\nendif\ngo_mod_dirs := $(sort $(go_mod_dirs))\n$(foreach go_mod_dir,$(go_mod_dirs),$(eval $(call licenses_target,$(go_mod_dir))))\n\n###################### Include LICENSES in OCI image ######################\n\ndefine license_layer\nlicense_layer_path_$1 := $$(abspath $(bin_dir)/scratch/licenses-$1)\n\n# Target to generate image layer containing license information\n.PHONY: oci-license-layer-$1\noci-license-layer-$1: | $(bin_dir)/scratch $(NEEDS_GO-LICENSES)\n\trm -rf $$(license_layer_path_$1)\n\tmkdir -p $$(license_layer_path_$1)/licenses\n\tcp $$(go_$1_mod_dir)/LICENSE $$(license_layer_path_$1)/licenses/LICENSE\n\tcp $$(go_$1_mod_dir)/LICENSES $$(license_layer_path_$1)/licenses/LICENSES\n\noci-build-$1: oci-license-layer-$1\noci-build-$1__local: oci-license-layer-$1\noci_$1_additional_layers += $$(license_layer_path_$1)\nendef\n\n$(foreach build_name,$(build_names),$(eval $(call license_layer,$(build_name))))\n"
  },
  {
    "path": "make/_shared/licenses/licenses.tmpl",
    "content": "This LICENSES file is generated by the `licenses` module in makefile-modules[0].\n\nThe licenses below the \"---\" are determined by the go-licenses tool[1].\n\nThe aim of this file is to collect the licenses of all dependencies, and provide\na single source of truth for licenses used by this project.\n\n## For Developers\n\nIf CI reports that this file is out of date, you should be careful to check that the\nnew licenses are acceptable for this project before running `make generate-go-licenses`\nto update this file.\n\nAcceptable licenses are those allowlisted by the CNCF[2].\n\nYou MUST NOT add any new dependencies whose licenses are not allowlisted by the CNCF,\nor which do not have an explicit license exception[3].\n\n## For Users\n\nIf this file was included in a release artifact, it is a snapshot of the licenses of all dependencies at the time of the release.\n\nYou can retrieve the actual license text by following these steps:\n\n1. Find the dependency name in this file\n2. Go to the source code repository of this project, and go to the tag corresponding to this release.\n3. Find the exact version of the dependency in the `go.mod` file\n4. Search for the dependency at the correct version in the [Go package index](https://pkg.go.dev/).\n\n## Links\n\n[0]: https://github.com/cert-manager/makefile-modules/\n[1]: https://github.com/google/go-licenses\n[2]: https://github.com/cncf/foundation/blob/db4179134ebe7fa00b140a050c19147db808b6fa/policies-guidance/allowed-third-party-license-policy.md#cncf-allowlist-license-policy\n[3]: https://github.com/cncf/foundation/blob/db4179134ebe7fa00b140a050c19147db808b6fa/license-exceptions/README.md\n\n---\n\n{{ range . -}}\n{{ .Name }},{{ .LicenseName }}\n{{ end -}}\n"
  },
  {
    "path": "make/_shared/oci-build/00_mod.mk",
    "content": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Use distroless as minimal base image to package the manager binary\n# To get latest SHA run \"crane digest quay.io/jetstack/base-static:latest\"\nbase_image_static := quay.io/jetstack/base-static@sha256:bcdce6869d855fb0b8808ebfc5315360e3413b9975776b5c9e8899744b1ee8a9\n\n# Use custom apko-built image as minimal base image to package the manager binary\n# To get latest SHA run \"crane digest quay.io/jetstack/base-static-csi:latest\"\nbase_image_csi-static := quay.io/jetstack/base-static-csi@sha256:e8c56285c3bd5bb98f8c0b3d30c5b28d81c087e333b6f9e3296c2eb51faca47e\n\n# Utility functions\nfatal_if_undefined = $(if $(findstring undefined,$(origin $1)),$(error $1 is not set))\nfatal_if_deprecated_defined = $(if $(findstring undefined,$(origin $1)),,$(error $1 is deprecated, use $2 instead))\n\n# Validate globals that are required\n$(call fatal_if_undefined,build_names)\n\n# Set default config values\nCGO_ENABLED ?= 0\nGOEXPERIMENT ?=  # empty by default\noci_platforms ?= linux/amd64,linux/arm/v7,linux/arm64,linux/ppc64le\n\n# Default variables per build_names entry\n#\n# $1 - build_name\ndefine default_per_build_variables\ngo_$1_cgo_enabled ?= $(CGO_ENABLED)\ngo_$1_goexperiment ?= $(GOEXPERIMENT)\ngo_$1_flags ?= -tags=\noci_$1_platforms ?= $(oci_platforms)\noci_$1_additional_layers ?= \noci_$1_linux_capabilities ?= \noci_$1_build_args ?= \nendef\n\n$(foreach build_name,$(build_names),$(eval $(call default_per_build_variables,$(build_name))))\n\n# Validate variables per build_names entry\n#\n# $1 - build_name\ndefine check_per_build_variables\n# Validate deprecated variables\n$(call fatal_if_deprecated_defined,cgo_enabled_$1,go_$1_cgo_enabled)\n$(call fatal_if_deprecated_defined,goexperiment_$1,go_$1_goexperiment)\n$(call fatal_if_deprecated_defined,oci_additional_layers_$1,oci_$1_additional_layers)\n\n# Validate required config exists\n$(call fatal_if_undefined,go_$1_ldflags)\n$(call fatal_if_undefined,go_$1_main_dir)\n$(call fatal_if_undefined,go_$1_mod_dir)\n$(call fatal_if_undefined,oci_$1_base_image_flavor)\n$(call fatal_if_undefined,oci_$1_image_name_development)\n\n# Validate we have valid base image config\nifeq ($(oci_$1_base_image_flavor),static)\n    oci_$1_base_image := $(base_image_static)\nelse ifeq ($(oci_$1_base_image_flavor),csi-static)\n    oci_$1_base_image := $(base_image_csi-static)\nelse ifeq ($(oci_$1_base_image_flavor),custom)\n    $$(call fatal_if_undefined,oci_$1_base_image)\nelse\n    $$(error oci_$1_base_image_flavor has unknown value \"$(oci_$1_base_image_flavor)\")\nendif\n\n# Validate the config required to build the golang based images\nifneq ($(go_$1_main_dir:.%=.),.)\n$$(error go_$1_main_dir \"$(go_$1_main_dir)\" should be a directory path that DOES start with \".\")\nendif\nifeq ($(go_$1_main_dir:%/=/),/)\n$$(error go_$1_main_dir \"$(go_$1_main_dir)\" should be a directory path that DOES NOT end with \"/\")\nendif\nifeq ($(go_$1_main_dir:%.go=.go),.go)\n$$(error go_$1_main_dir \"$(go_$1_main_dir)\" should be a directory path that DOES NOT end with \".go\")\nendif\nifneq ($(go_$1_mod_dir:.%=.),.)\n$$(error go_$1_mod_dir \"$(go_$1_mod_dir)\" should be a directory path that DOES start with \".\")\nendif\nifeq ($(go_$1_mod_dir:%/=/),/)\n$$(error go_$1_mod_dir \"$(go_$1_mod_dir)\" should be a directory path that DOES NOT end with \"/\")\nendif\nifeq ($(go_$1_mod_dir:%.go=.go),.go)\n$$(error go_$1_mod_dir \"$(go_$1_mod_dir)\" should be a directory path that DOES NOT end with \".go\")\nendif\nifeq ($(wildcard $(go_$1_mod_dir)/go.mod),)\n$$(error go_$1_mod_dir \"$(go_$1_mod_dir)\" does not contain a go.mod file)\nendif\nifeq ($(wildcard $(go_$1_mod_dir)/$(go_$1_main_dir)/main.go),)\n$$(error go_$1_main_dir \"$(go_$1_mod_dir)/$(go_$1_main_dir)\" does not contain a main.go file)\nendif\n\n# Validate the config required to build OCI images\nifneq ($(words $(oci_$1_image_name_development)),1)\n$$(error oci_$1_image_name_development \"$(oci_$1_image_name_development)\" should be a single image name)\nendif\n\n# Validate that the build name does not end in __local\nifeq ($(1:%__local=__local),__local)\n$$(error build_name \"$1\" SHOULD NOT end in __local)\nendif\nendef\n\n$(foreach build_name,$(build_names),$(eval $(call check_per_build_variables,$(build_name))))\n\n# Create variables holding targets\n#\n# We create the following targets for each $(build_names)\n# - oci-build-$(build_name) = build the oci directory (multi-arch)\n# - oci-build-$(build_name)__local = build the oci directory (local arch: linux/$(HOST_ARCH))\n# - oci-load-$(build_name) = load the image into docker using the oci_$(build_name)_image_name_development variable\n# - docker-tarball-$(build_name) = build a \"docker load\" compatible tarball of the image\noci_build_targets := $(build_names:%=oci-build-%)\noci_build_targets += $(build_names:%=oci-build-%__local)\noci_load_targets := $(build_names:%=oci-load-%)\ndocker_tarball_targets := $(build_names:%=docker-tarball-%)\n\n# Derive config based on user config\n# \n# - oci_layout_path_$(build_name) = path that the OCI image will be saved in OCI layout directory format\n# - oci_digest_path_$(build_name) = path to the file that will contain the digests\n# - docker_tarball_path_$(build_name) = path that the docker tarball that the docker-tarball-$(build_name) will produce\n$(foreach build_name,$(build_names),$(eval oci_layout_path_$(build_name) := $(bin_dir)/scratch/image/oci-layout-$(build_name)))\n$(foreach build_name,$(build_names),$(eval oci_digest_path_$(build_name) := $(CURDIR)/$(oci_layout_path_$(build_name)).digests))\n$(foreach build_name,$(build_names),$(eval docker_tarball_path_$(build_name) := $(CURDIR)/$(oci_layout_path_$(build_name)).docker.tar))\n"
  },
  {
    "path": "make/_shared/oci-build/01_mod.mk",
    "content": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n$(bin_dir)/scratch/image:\n\t@mkdir -p $@\n\n.PHONY: $(oci_build_targets)\n## Build the OCI image.\n## - oci-build-$(build_name) = build the oci directory (multi-arch)\n## - oci-build-$(build_name)__local = build the oci directory (local arch: linux/$(HOST_ARCH))\n## @category [shared] Build\n$(oci_build_targets): oci-build-%: | $(NEEDS_KO) $(NEEDS_GO) $(NEEDS_YQ) $(NEEDS_IMAGE-TOOL) $(bin_dir)/scratch/image\n\t$(eval a := $(patsubst %__local,%,$*))\n\t$(eval is_local := $(if $(findstring $a__local,$*),true))\n\t$(eval layout_path := $(if $(is_local),$(oci_layout_path_$a).local,$(oci_layout_path_$a)))\n\t$(eval digest_path := $(if $(is_local),$(oci_digest_path_$a).local,$(oci_digest_path_$a)))\n\n\trm -rf $(CURDIR)/$(layout_path)\n\n\techo '{}' | \\\n\t\t$(YQ) '.defaultBaseImage = \"$(oci_$a_base_image)\"' | \\\n\t\t$(YQ) '.builds[0].id = \"$a\"' | \\\n\t\t$(YQ) '.builds[0].dir = \"$(go_$a_mod_dir)\"' | \\\n\t\t$(YQ) '.builds[0].main = \"$(go_$a_main_dir)\"' | \\\n\t\t$(YQ) '.builds[0].env[0] = \"CGO_ENABLED=$(go_$a_cgo_enabled)\"' | \\\n\t\t$(YQ) '.builds[0].env[1] = \"GOEXPERIMENT=$(go_$a_goexperiment)\"' | \\\n\t\t$(YQ) '.builds[0].ldflags[0] = \"-s\"' | \\\n\t\t$(YQ) '.builds[0].ldflags[1] = \"-w\"' | \\\n\t\t$(YQ) '.builds[0].ldflags[2] = \"{{.Env.LDFLAGS}}\"' | \\\n\t\t$(YQ) '.builds[0].flags[0] = \"$(go_$a_flags)\"' | \\\n\t\t$(YQ) '.builds[0].linux_capabilities = \"$(oci_$a_linux_capabilities)\"' \\\n\t\t> $(CURDIR)/$(layout_path).ko_config.yaml\n\n\tGOWORK=off \\\n\tKO_DOCKER_REPO=$(oci_$a_image_name_development) \\\n\tKOCACHE=$(CURDIR)/$(bin_dir)/scratch/image/ko_cache \\\n\tKO_CONFIG_PATH=$(CURDIR)/$(layout_path).ko_config.yaml \\\n\tSOURCE_DATE_EPOCH=$(GITEPOCH) \\\n\tKO_GO_PATH=$(GO) \\\n\tLDFLAGS=\"$(go_$a_ldflags)\" \\\n\t$(KO) build $(go_$a_mod_dir)/$(go_$a_main_dir) \\\n\t\t--platform=$(if $(is_local),linux/$(HOST_ARCH),$(oci_$a_platforms)) \\\n\t\t$(oci_$a_build_args) \\\n\t\t--oci-layout-path=$(layout_path) \\\n\t\t--sbom-dir=$(CURDIR)/$(layout_path).sbom \\\n\t\t--sbom=spdx \\\n\t\t--push=false \\\n\t\t--bare\n\n\t$(IMAGE-TOOL) append-layers \\\n\t\t$(CURDIR)/$(layout_path) \\\n\t\t$(oci_$a_additional_layers)\n\n\t$(IMAGE-TOOL) list-digests \\\n\t\t$(CURDIR)/$(layout_path) \\\n\t\t> $(digest_path)\n\n# Only include the oci-load target if kind is provided by the kind makefile-module\nifdef kind_cluster_name\n.PHONY: $(oci_load_targets)\n## Build OCI image for the local architecture and load\n## it into the $(kind_cluster_name) kind cluster.\n## @category [shared] Build\n$(oci_load_targets): oci-load-%: docker-tarball-% | kind-cluster $(NEEDS_KIND)\n\t$(KIND) load image-archive --name $(kind_cluster_name) $(docker_tarball_path_$*)\nendif\n\n## Build Docker tarball image for the local architecture\n## @category [shared] Build\n.PHONY: $(docker_tarball_targets)\n$(docker_tarball_targets): docker-tarball-%: oci-build-%__local | $(NEEDS_GO) $(NEEDS_IMAGE-TOOL)\n\t$(IMAGE-TOOL) convert-to-docker-tar $(CURDIR)/$(oci_layout_path_$*).local $(docker_tarball_path_$*) $(oci_$*_image_name_development):$(oci_$*_image_tag)\n"
  },
  {
    "path": "make/_shared/oci-publish/00_mod.mk",
    "content": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Push names is equivalent to build_names, additional names can be added for \n# pushing images that are not build with the oci-build module\npush_names ?=\npush_names += $(build_names)\n\n# Sometimes we need to push to one registry, but pull from another. This allows\n# that.\n#\n# The lines should be in the format a=b\n#\n# The value on the left is the domain you include in your oci_<name>_image_name\n# variable, the one on the right is the domain that is actually pushed to.\n#\n# For example, if we set up a vanity domain for the current quay:\n# \n#   oci_controller_image_name = registry.cert-manager.io/cert-manager-controller` \n#   image_registry_rewrite += registry.cert-manager.io=quay.io/jetstack\n#\n# This would push to quay.io/jetstack/cert-manager-controller.\n#\n# The general idea is oci_<name>_image_name contains the final image name, after replication, after vanity domains etc.\n\nimage_registry_rewrite ?= \n\n# Utilities for extracting the key and value from a foo=bar style line\nkv_key = $(word 1,$(subst =, ,$1))\nkv_value = $(word 2,$(subst =, ,$1))\n\n# Apply the image_registry_rewrite rules, if no rules match an image then the \n# image name is not changed. Any rules that match will be applied.\n#\n# For example, if there was a rule vanity-domain.com=real-registry.com/foo\n# then any references to vanity-domain.com/image would be rewritten to \n# real-registry.com/foo/image\nimage_registry_rewrite_rules_for_image = $(strip $(sort $(foreach rule,$(image_registry_rewrite),$(if $(findstring $(call kv_key,$(rule)),$1),$(rule)))))\napply_image_registry_rewrite_rules_to_image = $(if $(call image_registry_rewrite_rules_for_image,$1),\\\n\t$(foreach rule,$(call image_registry_rewrite_rules_for_image,$1),$(subst $(call kv_key,$(rule)),$(call kv_value,$(rule)),$1)),\\\n\t$1)\napply_image_registry_rewrite_rules = $(foreach image_name,$1,$(call apply_image_registry_rewrite_rules_to_image,$(image_name)))\n\n# This is a helper function to return the image names for a given build_name.\n# It will apply all rewrite rules to the image names\noci_image_names_for = $(call apply_image_registry_rewrite_rules,$(oci_$1_image_name))\noci_image_tag_for = $(oci_$1_image_tag)"
  },
  {
    "path": "make/_shared/oci-publish/01_mod.mk",
    "content": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Utility functions\nfatal_if_undefined = $(if $(findstring undefined,$(origin $1)),$(error $1 is not set))\noci_digest = $(shell head -1 $(oci_digest_path_$1) 2> /dev/null)\nsanitize_target = $(subst :,-,$1)\nregistry_for = $(firstword $(subst /, ,$1))\n\n# Utility variables\ncurrent_makefile_directory := $(dir $(lastword $(MAKEFILE_LIST)))\nimage_exists_script := $(current_makefile_directory)/image-exists.sh\n\n# Validate globals that are required\n$(call fatal_if_undefined,bin_dir)\n$(call fatal_if_undefined,push_names)\n\n# Set default config values\nRELEASE_DRYRUN ?= false\nCRANE_FLAGS ?= # empty by default\nCOSIGN_FLAGS ?= # empty by default\nOCI_SIGN_ON_PUSH ?= true\n\n# Default variables per push_names entry\n#\n# $1 - build_name\ndefine default_per_build_variables\nrelease_dryrun_$1 ?= $(RELEASE_DRYRUN)\ncrane_flags_$1 ?= $(CRANE_FLAGS)\ncosign_flags_$1 ?= $(COSIGN_FLAGS)\noci_sign_on_push_$1 ?= $(OCI_SIGN_ON_PUSH)\nendef\n\n$(foreach build_name,$(push_names),$(eval $(call default_per_build_variables,$(build_name))))\n\n# Validate variables per push_names entry\n#\n# $1 - build_name\ndefine check_per_build_variables\n$(call fatal_if_undefined,oci_digest_path_$1)\n$(call fatal_if_undefined,oci_layout_path_$1)\n$(call fatal_if_undefined,oci_$1_image_name)\n$(call fatal_if_undefined,oci_$1_image_tag)\nendef\n\n$(foreach build_name,$(push_names),$(eval $(call check_per_build_variables,$(build_name))))\n\n# Create variables holding targets\n#\n# We create the following targets for each $(push_names)\n# - oci-build-$(build_name) = build the oci directory\n# - oci-load-$(build_name) = load the image into docker using the oci_$(build_name)_image_name_development variable\n# - docker-tarball-$(build_name) = build a \"docker load\" compatible tarball of the image\n# - ko-config-$(build_name) = generate \"ko\" config for a given build\noci_push_targets := $(push_names:%=oci-push-%)\noci_sign_targets := $(push_names:%=oci-sign-%)\noci_maybe_push_targets := $(push_names:%=oci-maybe-push-%)\n\n# Define push target \n# $1 - build_name\n# $2 - image_name\ndefine oci_push_target\n.PHONY: $(call sanitize_target,oci-push-$2)\n$(call sanitize_target,oci-push-$2): oci-build-$1 | $(NEEDS_CRANE)\n\t$$(CRANE) $(crane_flags_$1) push \"$(oci_layout_path_$1)\" \"$2:$(call oci_image_tag_for,$1)\"\n\t$(if $(filter true,$(oci_sign_on_push_$1)),$(MAKE) $(call sanitize_target,oci-sign-$2))\n\n.PHONY: $(call sanitize_target,oci-maybe-push-$2)\n$(call sanitize_target,oci-maybe-push-$2): oci-build-$1 | $(NEEDS_CRANE)\n\tCRANE=\"$$(CRANE) $(crane_flags_$1)\" \\\n\tsource $(image_exists_script) $2:$(call oci_image_tag_for,$1); \\\n\t\t$$(CRANE) $(crane_flags_$1) push \"$(oci_layout_path_$1)\" \"$2:$(call oci_image_tag_for,$1)\"; \\\n\t\t$(if $(filter true,$(oci_sign_on_push_$1)),$(MAKE) $(call sanitize_target,oci-sign-$2))\n\noci-push-$1: $(call sanitize_target,oci-push-$2)\noci-maybe-push-$1: $(call sanitize_target,oci-maybe-push-$2)\nendef\n\noci_push_target_per_image = $(foreach image_name,$2,$(eval $(call oci_push_target,$1,$(image_name))))\n$(foreach build_name,$(push_names),$(eval $(call oci_push_target_per_image,$(build_name),$(call oci_image_names_for,$(build_name)))))\n\n.PHONY: $(oci_push_targets)\n## Build and push OCI image.\n## If the tag already exists, this target will overwrite it.\n## If an identical image was already built before, we will add a new tag to it, but we will not sign it again.\n## Expected pushed images:\n## - :v1.2.3, @sha256:0000001\n## - :v1.2.3.sig, :sha256-0000001.sig\n## @category [shared] Publish\n$(oci_push_targets):\n\n.PHONY: $(oci_maybe_push_targets)\n## Push image if tag does not already exist in registry.\n## @category [shared] Publish\n$(oci_maybe_push_targets):\n\n# Define sign target \n# $1 - build_name\n# $2 - image_name\ndefine oci_sign_target\n.PHONY: $(call sanitize_target,oci-sign-$2)\n$(call sanitize_target,oci-sign-$2): $(oci_digest_path_$1) | $(NEEDS_CRANE) $(NEEDS_COSIGN)\n\t$$(CRANE) $(crane_flags_$1) manifest $2:$$(subst :,-,$$(call oci_digest,$1)).sig > /dev/null 2>&1 || \\\n\t\t$$(COSIGN) sign --yes=true $(cosign_flags_$1) \"$2@$$(call oci_digest,$1)\"\n\noci-sign-$1: $(call sanitize_target,oci-sign-$2)\nendef\n\noci_sign_target_per_image = $(foreach image_name,$2,$(eval $(call oci_sign_target,$1,$(image_name))))\n$(foreach build_name,$(push_names),$(eval $(call oci_sign_target_per_image,$(build_name),$(call oci_image_names_for,$(build_name)))))\n\n.PHONY: $(oci_sign_targets)\n## Sign an OCI image.\n## If a signature already exists, this will not overwrite it.\n## @category [shared] Publish\n$(oci_sign_targets):"
  },
  {
    "path": "make/_shared/oci-publish/image-exists.sh",
    "content": "#!/usr/bin/env bash\n\n# Copyright 2022 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nset -o errexit\nset -o nounset\nset -o pipefail\n\n# This script checks if a given image exists in the upstream registry, and if it\n# does, whether it contains all the expected architectures.\n\ncrane=${CRANE:-}\n\nFULL_IMAGE=${1:-}\n\nfunction print_usage() {\n\techo \"usage: $0 <full-image> [commands...]\"\n}\n\nif [[ -z $FULL_IMAGE ]]; then\n\tprint_usage\n\techo \"Missing full-image\"\n\texit 1\nfi\n\nif [[ -z $crane ]]; then\n    echo \"CRANE environment variable must be set to the path of the crane binary\"\n    exit 1\nfi\n\nshift 1\n\nmanifest=$(mktemp)\ntrap 'rm -f \"$manifest\"' EXIT SIGINT\n\nmanifest_error=$(mktemp)\ntrap 'rm -f \"$manifest_error\"' EXIT SIGINT\n\necho \"+++ searching for $FULL_IMAGE in upstream registry\"\n\nset +o errexit\n$crane manifest \"$FULL_IMAGE\" > \"$manifest\" 2> \"$manifest_error\"\nexit_code=$?\nset -o errexit\n\nmanifest_error_data=$(cat \"$manifest_error\")\nif [[ $exit_code -eq 0 ]]; then\n    echo \"+++ upstream registry appears to contain $FULL_IMAGE, exiting\"\n\texit 0\n\nelif [[ \"$manifest_error_data\" == *\"MANIFEST_UNKNOWN\"* ]]; then\n    echo \"+++ upstream registry does not contain $FULL_IMAGE, will build and push\"\n    # fall through to run the commands passed to this script\n\nelse\n\techo \"FATAL: upstream registry returned an unexpected error: $manifest_error_data, exiting\"\n\texit 1\nfi\n"
  },
  {
    "path": "make/_shared/repository-base/01_mod.mk",
    "content": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nifndef repo_name\n$(error repo_name is not set)\nendif\n\n_repository_base_module_dir := $(dir $(lastword $(MAKEFILE_LIST)))\nrepository_base_dir := $(_repository_base_module_dir)base/\n\n.PHONY: generate-base\n## Generate base files in the repository\n## @category [shared] Generate/ Verify\ngenerate-base:\n\tcp -r $(repository_base_dir)/. ./\n\tcd $(repository_base_dir) && \\\n\t\tfind . -type f | while read file; do \\\n\t\t\tsed \"s|{{REPLACE:GH-REPOSITORY}}|$(repo_name:github.com/%=%)|g\" \"$$file\" > \"$(CURDIR)/$$file\"; \\\n\t\tdone\n\tif [ ! -e ./.github/renovate.json5 ]; then \\\n\t\tmkdir -p ./.github; \\\n\t\tcp $(_repository_base_module_dir)/renovate-bootstrap-config.json5 ./.github/renovate.json5; \\\n\tfi\n\nshared_generate_targets += generate-base\n"
  },
  {
    "path": "make/_shared/repository-base/base/.github/chainguard/make-self-upgrade.sts.yaml",
    "content": "# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.\n# Edit https://github.com/cert-manager/makefile-modules/blob/main/modules/repository-base/base/.github/chainguard/make-self-upgrade.sts.yaml instead.\n\nissuer: https://token.actions.githubusercontent.com\nsubject_pattern: ^repo:{{REPLACE:GH-REPOSITORY}}:ref:refs/heads/(main|master)$\n\npermissions:\n  contents: write\n  pull_requests: write\n  workflows: write\n"
  },
  {
    "path": "make/_shared/repository-base/base/.github/workflows/make-self-upgrade.yaml",
    "content": "# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.\n# Edit https://github.com/cert-manager/makefile-modules/blob/main/modules/repository-base/base/.github/workflows/make-self-upgrade.yaml instead.\n\nname: make-self-upgrade\nconcurrency: make-self-upgrade\non:\n  workflow_dispatch: {}\n  schedule:\n    - cron: '0 0 * * *'\n\npermissions:\n  contents: read\n\njobs:\n  self_upgrade:\n    runs-on: ubuntu-latest\n\n    if: github.repository == '{{REPLACE:GH-REPOSITORY}}'\n\n    permissions:\n      id-token: write\n    \n    env:\n      SOURCE_BRANCH: \"${{ github.ref_name }}\"\n      SELF_UPGRADE_BRANCH: \"self-upgrade-${{ github.ref_name }}\"\n\n    steps:\n      - name: Fail if branch is not head of branch.\n        if: ${{ !startsWith(github.ref, 'refs/heads/') && env.SOURCE_BRANCH != '' && env.SELF_UPGRADE_BRANCH != '' }}\n        run: |\n          echo \"This workflow should not be run on a non-branch-head.\"\n          exit 1\n\n      - name: Octo STS Token Exchange\n        uses: octo-sts/action@f603d3be9d8dd9871a265776e625a27b00effe05 # v1.1.1\n        id: octo-sts\n        with:\n          scope: '{{REPLACE:GH-REPOSITORY}}'\n          identity: make-self-upgrade\n\n      - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2\n        # Adding `fetch-depth: 0` makes sure tags are also fetched. We need\n        # the tags so `git describe` returns a valid version.\n        # see https://github.com/actions/checkout/issues/701 for extra info about this option\n        with:\n          fetch-depth: 0\n          token: ${{ steps.octo-sts.outputs.token }}\n\n      - id: go-version\n        run: |\n          make print-go-version >> \"$GITHUB_OUTPUT\"\n\n      - uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0\n        with:\n          go-version: ${{ steps.go-version.outputs.result }}\n\n      - run: |\n          git checkout -B \"$SELF_UPGRADE_BRANCH\"\n\n      - run: |\n          make -j upgrade-klone\n          make -j generate\n\n      - id: is-up-to-date\n        shell: bash\n        run: |\n          git_status=$(git status -s)\n          is_up_to_date=\"true\"\n          if [ -n \"$git_status\" ]; then\n              is_up_to_date=\"false\"\n              echo \"The following changes will be committed:\"\n              echo \"$git_status\"\n          fi\n          echo \"result=$is_up_to_date\" >> \"$GITHUB_OUTPUT\"\n\n      - if: ${{ steps.is-up-to-date.outputs.result != 'true' }}\n        run: |\n          git config --global user.name \"cert-manager-bot\"\n          git config --global user.email \"cert-manager-bot@users.noreply.github.com\"\n          git add -A && git commit -m \"BOT: run 'make upgrade-klone' and 'make generate'\" --signoff\n          git push -f origin \"$SELF_UPGRADE_BRANCH\"\n\n      - if: ${{ steps.is-up-to-date.outputs.result != 'true' }}\n        uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0\n        with:\n          github-token: ${{ steps.octo-sts.outputs.token }}\n          script: |\n            const { repo, owner } = context.repo;\n            const pulls = await github.rest.pulls.list({\n              owner: owner,\n              repo: repo,\n              head: owner + ':' + process.env.SELF_UPGRADE_BRANCH,\n              base: process.env.SOURCE_BRANCH,\n              state: 'open',\n            });\n            \n            if (pulls.data.length < 1) {\n              const result = await github.rest.pulls.create({\n                title: '[CI] Merge ' + process.env.SELF_UPGRADE_BRANCH + ' into ' + process.env.SOURCE_BRANCH,\n                owner: owner,\n                repo: repo,\n                head: process.env.SELF_UPGRADE_BRANCH,\n                base: process.env.SOURCE_BRANCH,\n                body: [\n                  'This PR is auto-generated to bump the Makefile modules.',\n                ].join('\\n'),\n              });\n              await github.rest.issues.addLabels({\n                owner,\n                repo,\n                issue_number: result.data.number,\n                labels: ['ok-to-test', 'skip-review', 'release-note-none', 'kind/cleanup']\n              });\n            }\n"
  },
  {
    "path": "make/_shared/repository-base/base/Makefile",
    "content": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.\n# Edit https://github.com/cert-manager/makefile-modules/blob/main/modules/repository-base/base/Makefile instead.\n\n# NOTE FOR DEVELOPERS: \"How do the Makefiles work and how can I extend them?\"\n#\n# Shared Makefile logic lives in the make/_shared/ directory. The source of truth for these files\n# lies outside of this repository, eg. in the cert-manager/makefile-modules repository.\n#\n# Logic specific to this repository must be defined in the make/00_mod.mk and make/02_mod.mk files:\n#   - The make/00_mod.mk file is included first and contains variable definitions needed by\n#     the shared Makefile logic.\n#   - The make/02_mod.mk file is included later, it can make use of most of the shared targets\n#     defined in the make/_shared/ directory (all targets defined in 00_mod.mk and 01_mod.mk).\n#     This file should be used to define targets specific to this repository.\n\n##################################\n\n# Some modules build their dependencies from variables, we want these to be \n# evaluated at the last possible moment. For this we use second expansion to \n# re-evaluate the generate and verify targets a second time.\n#\n# See https://www.gnu.org/software/make/manual/html_node/Secondary-Expansion.html\n.SECONDEXPANSION:\n\n# For details on some of these \"prelude\" settings, see:\n# https://clarkgrubb.com/makefile-style-guide\nMAKEFLAGS += --warn-undefined-variables --no-builtin-rules\nSHELL := /usr/bin/env bash\n# The `--norc` option prevents \"PS1: unbound\" errors.\n# If Bash thinks it is being run with its standard input connected to a network\n# connection (such as via SSH or via Docker), it reads and executes commands\n# from ~/.bashrc, regardless of whether it thinks it is in interactive mode.\n# Bash does not set PS1 in non-interactive environments. But on Ubuntu 24.04 the\n# default /etc/bash.bashrc file assumes that PS1 is set.\n#\n# See https://www.gnu.org/software/bash/manual/bash.html#Invoked-by-remote-shell-daemon\n.SHELLFLAGS := --norc -uo pipefail -c\n.DEFAULT_GOAL := help\n.DELETE_ON_ERROR:\n.SUFFIXES:\nFORCE:\n\nnoop: # do nothing\n\n# Set empty value for MAKECMDGOALS to prevent the \"warning: undefined variable 'MAKECMDGOALS'\"\n# warning from happening when running make without arguments\nMAKECMDGOALS ?=\n\n##################################\n# Host OS and architecture setup #\n##################################\n\n# The reason we don't use \"go env GOOS\" or \"go env GOARCH\" is that the \"go\"\n# binary may not be available in the PATH yet when the Makefiles are\n# evaluated. HOST_OS and HOST_ARCH only support Linux, *BSD and macOS (M1\n# and Intel).\nhost_os := $(shell uname -s | tr A-Z a-z)\nhost_arch := $(shell uname -m)\nHOST_OS ?= $(host_os)\nHOST_ARCH ?= $(host_arch)\n\nifeq (x86_64, $(HOST_ARCH))\n\tHOST_ARCH = amd64\nelse ifeq (aarch64, $(HOST_ARCH))\n\t# linux reports the arm64 arch as aarch64\n\tHOST_ARCH = arm64\nendif\n\n##################################\n# Git and versioning information #\n##################################\n\ngit_version := $(shell git describe --tags --always --match='v*' --abbrev=14 --dirty)\nVERSION ?= $(git_version)\nIS_PRERELEASE := $(shell git describe --tags --always --match='v*' --abbrev=0 | grep -q '-' && echo true || echo false)\nGITCOMMIT := $(shell git rev-parse HEAD)\nGITEPOCH := $(shell git show -s --format=%ct HEAD)\n\n##################################\n# Global variables and dirs      #\n##################################\n\nbin_dir := _bin\n\n# The ARTIFACTS environment variable is set by the CI system to a directory\n# where artifacts should be placed. These artifacts are then uploaded to a\n# storage bucket by the CI system (https://docs.prow.k8s.io/docs/components/pod-utilities/).\n# An example of such an artifact is a jUnit XML file containing test results.\n# If the ARTIFACTS environment variable is not set, we default to a local\n# directory in the _bin directory.\nARTIFACTS ?= $(bin_dir)/artifacts\n\n$(bin_dir) $(ARTIFACTS) $(bin_dir)/scratch:\n\tmkdir -p $@\n\n.PHONY: clean\n## Clean all temporary files\n## @category [shared] Tools\nclean:\n\trm -rf $(bin_dir)\n\n##################################\n# Include all the Makefiles      #\n##################################\n\n-include make/00_mod.mk\n-include make/_shared/*/00_mod.mk\n-include make/_shared/*/01_mod.mk\n-include make/02_mod.mk\n-include make/_shared/*/02_mod.mk\n"
  },
  {
    "path": "make/_shared/repository-base/base/OWNERS_ALIASES",
    "content": "# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.\n# Edit https://github.com/cert-manager/makefile-modules/blob/main/modules/repository-base/base/OWNERS_ALIASES instead.\n\naliases:\n  cm-maintainers:\n    - munnerz\n    - joshvanl\n    - wallrj\n    - jakexks\n    - maelvls\n    - sgtcodfish\n    - inteon\n    - thatsmrtalbot\n    - erikgb\n    - hjoshi123\n"
  },
  {
    "path": "make/_shared/repository-base/renovate-bootstrap-config.json5",
    "content": "{\n  $schema: 'https://docs.renovatebot.com/renovate-schema.json',\n  extends: [\n    'github>cert-manager/makefile-modules:renovate-config.json5',\n  ],\n}\n"
  },
  {
    "path": "make/_shared/tools/00_mod.mk",
    "content": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nifndef bin_dir\n$(error bin_dir is not set)\nendif\n\n##########################################\n\ndefault_shared_dir := $(CURDIR)/$(bin_dir)\n# If $(HOME) is set and $(CI) is not, use the $(HOME)/.cache\n# folder to store downloaded binaries.\nifneq ($(shell printenv HOME),)\nifeq ($(shell printenv CI),)\ndefault_shared_dir := $(HOME)/.cache/makefile-modules\nendif\nendif\n\nexport DOWNLOAD_DIR ?= $(default_shared_dir)/downloaded\nexport GOVENDOR_DIR ?= $(default_shared_dir)/go_vendor\n\n# https://go.dev/dl/\n# renovate: datasource=golang-version packageName=go\nVENDORED_GO_VERSION := 1.26.2\n\n$(bin_dir)/tools $(DOWNLOAD_DIR)/tools:\n\t@mkdir -p $@\n\ncheckhash_script := $(dir $(lastword $(MAKEFILE_LIST)))/util/checkhash.sh\nlock_script := $(dir $(lastword $(MAKEFILE_LIST)))/util/lock.sh\n\n# $outfile is a variable in the lock script\n# Escape the dollar sign so it's passed literally to the shell script, not expanded by make\noutfile := $$outfile\n\n# Helper function to iterate over key=value pairs and call a function for each pair\n# Usage: $(call for_each_kv,function_name,list_of_key=value_pairs)\n# For each item, splits on \"=\" and calls function_name with key as $1 and value as $2\nfor_each_kv = $(foreach item,$2,$(eval $(call $1,$(word 1,$(subst =, ,$(item))),$(word 2,$(subst =, ,$(item))))))\n\n# To make sure we use the right version of each tool, we put symlink in\n# $(bin_dir)/tools, and the actual binaries are in $(bin_dir)/downloaded. When bumping\n# the version of the tools, this symlink gets updated.\n\n# Let's have $(bin_dir)/tools in front of the PATH so that we don't inadvertently\n# pick up the wrong binary somewhere. Watch out, $(shell echo $$PATH) will\n# still print the original PATH, since GNU make does not honor exported\n# variables: https://stackoverflow.com/questions/54726457\nexport PATH := $(CURDIR)/$(bin_dir)/tools:$(PATH)\n\nCTR ?= docker\n.PHONY: __require-ctr\nifneq ($(shell command -v $(CTR) >/dev/null || echo notfound),)\n__require-ctr:\n\t@:$(error \"$(CTR) (or set CTR to a docker-compatible tool)\")\nendif\nNEEDS_CTR = __require-ctr\n\ntools :=\n# https://github.com/helm/helm/releases\n# renovate: datasource=github-releases packageName=helm/helm\ntools += helm=v4.1.4\n# https://github.com/helm-unittest/helm-unittest/releases\n# renovate: datasource=github-releases packageName=helm-unittest/helm-unittest\ntools += helm-unittest=v1.0.3\n# https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl\n# renovate: datasource=github-releases packageName=kubernetes/kubernetes\ntools += kubectl=v1.35.4\n# https://github.com/kubernetes-sigs/kind/releases\n# renovate: datasource=github-releases packageName=kubernetes-sigs/kind\ntools += kind=v0.31.0\n# https://www.vaultproject.io/downloads\n# renovate: datasource=github-releases packageName=hashicorp/vault\ntools += vault=v1.21.4\n# https://github.com/Azure/azure-workload-identity/releases\n# renovate: datasource=github-releases packageName=Azure/azure-workload-identity\ntools += azwi=v1.5.1\n# https://github.com/kyverno/kyverno/releases\n# renovate: datasource=github-releases packageName=kyverno/kyverno\ntools += kyverno=v1.17.1\n# https://github.com/mikefarah/yq/releases\n# renovate: datasource=github-releases packageName=mikefarah/yq\ntools += yq=v4.53.2\n# https://github.com/ko-build/ko/releases\n# renovate: datasource=github-releases packageName=ko-build/ko\ntools += ko=0.18.1\n# https://github.com/protocolbuffers/protobuf/releases\n# renovate: datasource=github-releases packageName=protocolbuffers/protobuf\ntools += protoc=v34.1\n# https://github.com/aquasecurity/trivy/releases\n# renovate: datasource=github-releases packageName=aquasecurity/trivy\ntools += trivy=v0.70.0\n# https://github.com/vmware-tanzu/carvel-ytt/releases\n# renovate: datasource=github-releases packageName=vmware-tanzu/carvel-ytt\ntools += ytt=v0.53.2\n# https://github.com/rclone/rclone/releases\n# renovate: datasource=github-releases packageName=rclone/rclone\ntools += rclone=v1.73.4\n# https://github.com/istio/istio/releases\n# renovate: datasource=github-releases packageName=istio/istio\ntools += istioctl=1.29.2\n\n### go packages\n# https://pkg.go.dev/sigs.k8s.io/controller-tools/cmd/controller-gen?tab=versions\n# renovate: datasource=go packageName=sigs.k8s.io/controller-tools\ntools += controller-gen=v0.20.1\n# https://pkg.go.dev/golang.org/x/tools/cmd/goimports?tab=versions\n# renovate: datasource=go packageName=golang.org/x/tools\ntools += goimports=v0.44.0\n# https://pkg.go.dev/github.com/google/go-licenses/v2?tab=versions\n# renovate: datasource=go packageName=github.com/inteon/go-licenses/v2\ntools += go-licenses=v2.0.0-20250821024731-e4be79958780\n# https://pkg.go.dev/gotest.tools/gotestsum?tab=versions\n# renovate: datasource=github-releases packageName=gotestyourself/gotestsum\ntools += gotestsum=v1.13.0\n# https://pkg.go.dev/sigs.k8s.io/kustomize/kustomize/v5?tab=versions\n# renovate: datasource=go packageName=sigs.k8s.io/kustomize/kustomize/v5\ntools += kustomize=v5.8.1\n# https://pkg.go.dev/github.com/itchyny/gojq?tab=versions\n# renovate: datasource=go packageName=github.com/itchyny/gojq\ntools += gojq=v0.12.19\n# https://pkg.go.dev/github.com/google/go-containerregistry/pkg/crane?tab=versions\n# renovate: datasource=go packageName=github.com/google/go-containerregistry\ntools += crane=v0.21.5\n# https://pkg.go.dev/google.golang.org/protobuf/cmd/protoc-gen-go?tab=versions\n# renovate: datasource=go packageName=google.golang.org/protobuf\ntools += protoc-gen-go=v1.36.11\n# https://pkg.go.dev/github.com/sigstore/cosign/v2/cmd/cosign?tab=versions\n# renovate: datasource=go packageName=github.com/sigstore/cosign/v2\ntools += cosign=v2.6.3\n# https://pkg.go.dev/github.com/cert-manager/boilersuite?tab=versions\n# renovate: datasource=go packageName=github.com/cert-manager/boilersuite\ntools += boilersuite=v0.2.0\n# https://pkg.go.dev/github.com/princjef/gomarkdoc/cmd/gomarkdoc?tab=versions\n# renovate: datasource=go packageName=github.com/princjef/gomarkdoc\ntools += gomarkdoc=v1.1.0\n# https://pkg.go.dev/oras.land/oras/cmd/oras?tab=versions\n# renovate: datasource=go packageName=oras.land/oras\ntools += oras=v1.3.1\n# https://pkg.go.dev/github.com/onsi/ginkgo/v2/ginkgo?tab=versions\n# The gingko version should be kept in sync with the version used in code.\n# If there is no go.mod file (which is only the case for the makefile-modules\n# repo), then we default to a version that we know exists. We have to do this\n# because otherwise the awk failure renders the whole makefile unusable.\ndetected_ginkgo_version := $(shell [[ -f go.mod ]] && awk '/ginkgo\\/v2/ {print $$2}' go.mod || echo \"v2.23.4\")\ntools += ginkgo=$(detected_ginkgo_version)\n# https://pkg.go.dev/github.com/cert-manager/klone?tab=versions\n# renovate: datasource=go packageName=github.com/cert-manager/klone\ntools += klone=v0.2.0\n# https://pkg.go.dev/github.com/goreleaser/goreleaser/v2?tab=versions\n# renovate: datasource=go packageName=github.com/goreleaser/goreleaser/v2\ntools += goreleaser=v2.15.3\n# https://pkg.go.dev/github.com/anchore/syft/cmd/syft?tab=versions\n# renovate: datasource=go packageName=github.com/anchore/syft\ntools += syft=v1.42.4\n# https://github.com/cert-manager/helm-tool/releases\n# renovate: datasource=github-releases packageName=cert-manager/helm-tool\ntools += helm-tool=v0.5.3\n# https://github.com/cert-manager/image-tool/releases\n# renovate: datasource=github-releases packageName=cert-manager/image-tool\ntools += image-tool=v0.1.0\n# https://github.com/cert-manager/cmctl/releases\n# renovate: datasource=github-releases packageName=cert-manager/cmctl\ntools += cmctl=v2.4.1\n# https://pkg.go.dev/github.com/cert-manager/release/cmd/cmrel?tab=versions\n# renovate: datasource=go packageName=github.com/cert-manager/release\ntools += cmrel=v1.12.15-0.20241121151736-e3cbe5171488\n# https://pkg.go.dev/github.com/golangci/golangci-lint/v2/cmd/golangci-lint?tab=versions\n# renovate: datasource=go packageName=github.com/golangci/golangci-lint/v2\ntools += golangci-lint=v2.11.4\n# https://pkg.go.dev/golang.org/x/vuln?tab=versions\n# renovate: datasource=go packageName=golang.org/x/vuln\ntools += govulncheck=v1.2.0\n# https://github.com/operator-framework/operator-sdk/releases\n# renovate: datasource=github-releases packageName=operator-framework/operator-sdk\ntools += operator-sdk=v1.42.2\n# https://pkg.go.dev/github.com/cli/cli/v2?tab=versions\n# renovate: datasource=go packageName=github.com/cli/cli/v2\ntools += gh=v2.90.0\n# https://github.com/redhat-openshift-ecosystem/openshift-preflight/releases\n# renovate: datasource=github-releases packageName=redhat-openshift-ecosystem/openshift-preflight\ntools += preflight=1.17.1\n# https://github.com/daixiang0/gci/releases\n# renovate: datasource=github-releases packageName=daixiang0/gci\ntools += gci=v0.14.0\n# https://github.com/google/yamlfmt/releases\n# renovate: datasource=github-releases packageName=google/yamlfmt\ntools += yamlfmt=v0.21.0\n# https://github.com/yannh/kubeconform/releases\n# renovate: datasource=github-releases packageName=yannh/kubeconform\ntools += kubeconform=v0.7.0\n\n# FIXME(erikgb): cert-manager needs the ability to override the version set here\n# https://pkg.go.dev/k8s.io/code-generator/cmd?tab=versions\n# renovate: datasource=go packageName=k8s.io/code-generator\nK8S_CODEGEN_VERSION ?= v0.35.4\ntools += client-gen=$(K8S_CODEGEN_VERSION)\ntools += deepcopy-gen=$(K8S_CODEGEN_VERSION)\ntools += informer-gen=$(K8S_CODEGEN_VERSION)\ntools += lister-gen=$(K8S_CODEGEN_VERSION)\ntools += applyconfiguration-gen=$(K8S_CODEGEN_VERSION)\ntools += defaulter-gen=$(K8S_CODEGEN_VERSION)\ntools += conversion-gen=$(K8S_CODEGEN_VERSION)\n# https://github.com/kubernetes/kube-openapi\n# renovate: datasource=go packageName=k8s.io/kube-openapi\ntools += openapi-gen=v0.0.0-20260414162039-ec9c827d403f\n\n# https://raw.githubusercontent.com/kubernetes-sigs/controller-tools/master/envtest-releases.yaml\n# FIXME: Find a way to configure Renovate to suggest upgrades\nKUBEBUILDER_ASSETS_VERSION := v1.35.0\ntools += etcd=$(KUBEBUILDER_ASSETS_VERSION)\ntools += kube-apiserver=$(KUBEBUILDER_ASSETS_VERSION)\n\n# Additional tools can be defined to reuse the tooling in this file\nADDITIONAL_TOOLS ?=\ntools += $(ADDITIONAL_TOOLS)\n\n# Print the go version which can be used in GH actions\n.PHONY: print-go-version\nprint-go-version:\n\t@echo result=$(VENDORED_GO_VERSION)\n\n# When switching branches which use different versions of the tools, we\n# need a way to re-trigger the symlinking from $(bin_dir)/downloaded to $(bin_dir)/tools.\n# This pattern rule creates a version stamp file that tracks the tool version.\n# If the version changes (or file doesn't exist), update the stamp file to trigger rebuild.\n$(bin_dir)/scratch/%_VERSION: FORCE | $(bin_dir)/scratch\n\t@test \"$($*_VERSION)\" == \"$(shell cat $@ 2>/dev/null)\" || echo $($*_VERSION) > $@\n\n# --silent = don't print output like progress meters\n# --show-error = but do print errors when they happen\n# --fail = exit with a nonzero error code without the response from the server when there's an HTTP error\n# --location = follow redirects from the server\n# --retry = the number of times to retry a failed attempt to connect\n# --retry-connrefused = retry even if the initial connection was refused\nCURL := curl --silent --show-error --fail --location --retry 10 --retry-connrefused\n\n# LN is expected to be an atomic action, meaning that two Make processes\n# can run the \"link $(DOWNLOAD_DIR)/tools/xxx@$(XXX_VERSION)_$(HOST_OS)_$(HOST_ARCH)\n# to $(bin_dir)/tools/xxx\" operation simultaneously without issues (both\n# will perform the action and the second time the link will be overwritten).\n#\n# -s = Create a symbolic link\n# -f = Force the creation of the link (replace existing links)\n# -n = If destination already exists, replace it, don't use it as a directory to create a new link inside\nLN := ln -fsn\n\n# Mapping of lowercase to uppercase letters for the uc (uppercase) function\nupper_map := a:A b:B c:C d:D e:E f:F g:G h:H i:I j:J k:K l:L m:M n:N o:O p:P q:Q r:R s:S t:T u:U v:V w:W x:X y:Y z:Z\n# Function to convert a string to uppercase (e.g., \"helm\" -> \"HELM\")\n# Works by iterating through upper_map and substituting each lowercase letter with uppercase\n# Used to create variable names like HELM_VERSION from tool names like \"helm\"\nuc = $(strip \\\n\t\t$(eval __upper := $1) \\\n\t\t$(foreach p,$(upper_map), \\\n\t\t\t$(eval __upper := $(subst $(word 1,$(subst :, ,$p)),$(word 2,$(subst :, ,$p)),$(__upper))) \\\n\t\t) \\\n\t)$(__upper)\n\ntool_names :=\n\n# for each item `xxx` in the tools variable:\n# - a $(XXX_VERSION) variable is generated\n#     -> this variable contains the version of the tool\n# - a $(NEEDS_XXX) variable is generated\n#     -> this variable contains the target name for the tool,\n#        which is the relative path of the binary, this target\n#        should be used when adding the tool as a dependency to\n#        your target, you can't use $(XXX) as a dependency because\n#        make does not support an absolute path as a dependency\n# - a $(XXX) variable is generated\n#     -> this variable contains the absolute path of the binary,\n#        the absolute path should be used when executing the binary\n#        in targets or in scripts, because it is agnostic to the\n#        working directory\n# - an unversioned target $(bin_dir)/tools/xxx is generated that\n#   creates a link to the corresponding versioned target:\n#   $(DOWNLOAD_DIR)/tools/xxx@$(XXX_VERSION)_$(HOST_OS)_$(HOST_ARCH)\ndefine tool_defs\ntool_names += $1\n\n$(call uc,$1)_VERSION ?= $2\nNEEDS_$(call uc,$1) := $$(bin_dir)/tools/$1\n$(call uc,$1) := $$(CURDIR)/$$(bin_dir)/tools/$1\n\n# Create symlink from $(bin_dir)/tools/$1 to the versioned binary in $(DOWNLOAD_DIR)\n$$(bin_dir)/tools/$1: $$(bin_dir)/scratch/$(call uc,$1)_VERSION | $$(DOWNLOAD_DIR)/tools/$1@$$($(call uc,$1)_VERSION)_$$(HOST_OS)_$$(HOST_ARCH) $$(bin_dir)/tools\n\t@# cd into tools dir and create relative symlink (e.g., ../downloaded/tools/helm@v4.0.1_darwin_arm64)\n\t@# patsubst converts absolute path to relative by replacing $(bin_dir) with ..\n\t@cd $$(dir $$@) && $$(LN) $$(patsubst $$(bin_dir)/%,../%,$$(word 1,$$|)) $$(notdir $$@)\n\t@touch $$@ # making sure the target of the symlink is newer than *_VERSION\nendef\n\n# For each tool in the tools list (e.g., \"helm=v4.0.1\"), split on \"=\" and call tool_defs\n# with the tool name as first arg and version as second arg\n$(foreach tool,$(tools),$(eval $(call tool_defs,$(word 1,$(subst =, ,$(tool))),$(word 2,$(subst =, ,$(tool))))))\n\n######\n# Go #\n######\n\n# $(NEEDS_GO) is a target that is set as an order-only prerequisite in\n# any target that calls $(GO), e.g.:\n#\n#     $(bin_dir)/tools/crane: $(NEEDS_GO)\n#         $(GO) build -o $(bin_dir)/tools/crane\n#\n# $(NEEDS_GO) is empty most of the time, except when running \"make vendor-go\"\n# or when \"make vendor-go\" was previously run, in which case $(NEEDS_GO) is set\n# to $(bin_dir)/tools/go, since $(bin_dir)/tools/go is a prerequisite of\n# any target depending on Go when \"make vendor-go\" was run.\n\n# Auto-detect if Go vendoring should be enabled:\n# - Set if \"vendor-go\" is in the make command goals, OR\n# - Set if $(bin_dir)/tools/go already exists (vendoring was previously run)\ndetected_vendoring := $(findstring vendor-go,$(MAKECMDGOALS))$(shell [ -f $(bin_dir)/tools/go ] && echo yes)\nexport VENDOR_GO ?= $(detected_vendoring)\n\nifeq ($(VENDOR_GO),)\n.PHONY: __require-go\nifneq ($(shell command -v go >/dev/null || echo notfound),)\n__require-go:\n\t@:$(error \"$(GO) (or run 'make vendor-go')\")\nendif\nGO := go\nNEEDS_GO = __require-go\nelse\nexport GOROOT := $(CURDIR)/$(bin_dir)/tools/goroot\nexport PATH := $(CURDIR)/$(bin_dir)/tools/goroot/bin:$(PATH)\nGO := $(CURDIR)/$(bin_dir)/tools/go\nNEEDS_GO := $(bin_dir)/tools/go\nMAKE := $(MAKE) vendor-go\nendif\n\n.PHONY: vendor-go\n## By default, this Makefile uses the system's Go. You can use a \"vendored\"\n## version of Go that will get downloaded by running this command once. To\n## disable vendoring, run \"make unvendor-go\". When vendoring is enabled,\n## you will want to set the following:\n##\n##     export PATH=\"$PWD/$(bin_dir)/tools:$PATH\"\n##     export GOROOT=\"$PWD/$(bin_dir)/tools/goroot\"\n## @category [shared] Tools\nvendor-go: $(bin_dir)/tools/go\n\n.PHONY: unvendor-go\nunvendor-go: $(bin_dir)/tools/go\n\trm -rf $(bin_dir)/tools/go $(bin_dir)/tools/goroot\n\n.PHONY: which-go\n## Print the version and path of go which will be used for building and\n## testing in Makefile commands. Vendored go will have a path in ./bin\n## @category [shared] Tools\nwhich-go: | $(NEEDS_GO)\n\t@$(GO) version\n\t@echo \"go binary used for above version information: $(GO)\"\n\n$(bin_dir)/tools/go: $(bin_dir)/scratch/VENDORED_GO_VERSION | $(bin_dir)/tools/goroot $(bin_dir)/tools\n\t@# Create symlink to the go binary inside the goroot\n\t@cd $(dir $@) && $(LN) ./goroot/bin/go $(notdir $@)\n\t@touch $@ # making sure the target of the symlink is newer than *_VERSION\n\n# The \"_\" in \"_bin\" prevents \"go mod tidy\" from trying to tidy the vendored goroot.\n$(bin_dir)/tools/goroot: $(bin_dir)/scratch/VENDORED_GO_VERSION | $(GOVENDOR_DIR)/go@$(VENDORED_GO_VERSION)_$(HOST_OS)_$(HOST_ARCH)/goroot $(bin_dir)/tools\n\t@# Create relative symlink from $(bin_dir)/tools/goroot to $(GOVENDOR_DIR)/...\n\t@# patsubst converts the absolute path to relative (e.g., ../../go_vendor/go@1.25.4_darwin_arm64/goroot)\n\t@cd $(dir $@) && $(LN) $(patsubst $(bin_dir)/%,../%,$(word 1,$|)) $(notdir $@)\n\t@touch $@ # making sure the target of the symlink is newer than *_VERSION\n\n# Extract the tar to the $(GOVENDOR_DIR) directory, this directory is not cached across CI runs.\n$(GOVENDOR_DIR)/go@$(VENDORED_GO_VERSION)_$(HOST_OS)_$(HOST_ARCH)/goroot: | $(DOWNLOAD_DIR)/tools/go@$(VENDORED_GO_VERSION)_$(HOST_OS)_$(HOST_ARCH).tar.gz\n\t@# 1. Use lock script to prevent concurrent extraction\n\t@# 2. Extract tar.gz to temp directory (creates \"go\" folder inside)\n\t@# 3. Rename the extracted \"go\" directory to final location\n\t@source $(lock_script) $@; \\\n\t\tmkdir -p $(outfile).dir; \\\n\t\ttar xzf $| -C $(outfile).dir; \\\n\t\tmv $(outfile).dir/go $(outfile); \\\n\t\trm -rf $(outfile).dir\n\n###################\n# go dependencies #\n###################\n\ngo_dependencies :=\ngo_dependencies += ginkgo=github.com/onsi/ginkgo/v2/ginkgo\ngo_dependencies += controller-gen=sigs.k8s.io/controller-tools/cmd/controller-gen\ngo_dependencies += goimports=golang.org/x/tools/cmd/goimports\n# FIXME: Switch back to github.com/google/go-licenses once\n# https://github.com/google/go-licenses/pull/327 is merged.\n# Remember to also update the Go package in the Renovate marker over the version (above).\ngo_dependencies += go-licenses=github.com/inteon/go-licenses/v2\ngo_dependencies += gotestsum=gotest.tools/gotestsum\ngo_dependencies += kustomize=sigs.k8s.io/kustomize/kustomize/v5\ngo_dependencies += gojq=github.com/itchyny/gojq/cmd/gojq\ngo_dependencies += crane=github.com/google/go-containerregistry/cmd/crane\ngo_dependencies += protoc-gen-go=google.golang.org/protobuf/cmd/protoc-gen-go\ngo_dependencies += cosign=github.com/sigstore/cosign/v2/cmd/cosign\ngo_dependencies += boilersuite=github.com/cert-manager/boilersuite\ngo_dependencies += gomarkdoc=github.com/princjef/gomarkdoc/cmd/gomarkdoc\ngo_dependencies += oras=oras.land/oras/cmd/oras\ngo_dependencies += klone=github.com/cert-manager/klone\ngo_dependencies += goreleaser=github.com/goreleaser/goreleaser/v2\ngo_dependencies += syft=github.com/anchore/syft/cmd/syft\ngo_dependencies += client-gen=k8s.io/code-generator/cmd/client-gen\ngo_dependencies += deepcopy-gen=k8s.io/code-generator/cmd/deepcopy-gen\ngo_dependencies += informer-gen=k8s.io/code-generator/cmd/informer-gen\ngo_dependencies += lister-gen=k8s.io/code-generator/cmd/lister-gen\ngo_dependencies += applyconfiguration-gen=k8s.io/code-generator/cmd/applyconfiguration-gen\ngo_dependencies += defaulter-gen=k8s.io/code-generator/cmd/defaulter-gen\ngo_dependencies += conversion-gen=k8s.io/code-generator/cmd/conversion-gen\ngo_dependencies += openapi-gen=k8s.io/kube-openapi/cmd/openapi-gen\ngo_dependencies += helm-tool=github.com/cert-manager/helm-tool\ngo_dependencies += image-tool=github.com/cert-manager/image-tool\ngo_dependencies += cmctl=github.com/cert-manager/cmctl/v2\ngo_dependencies += cmrel=github.com/cert-manager/release/cmd/cmrel\ngo_dependencies += golangci-lint=github.com/golangci/golangci-lint/v2/cmd/golangci-lint\ngo_dependencies += govulncheck=golang.org/x/vuln/cmd/govulncheck\ngo_dependencies += gh=github.com/cli/cli/v2/cmd/gh\ngo_dependencies += gci=github.com/daixiang0/gci\ngo_dependencies += yamlfmt=github.com/google/yamlfmt/cmd/yamlfmt\ngo_dependencies += kubeconform=github.com/yannh/kubeconform/cmd/kubeconform\n\n#################\n# go build tags #\n#################\n\ngo_tags :=\n\n# Additional Go dependencies can be defined to re-use the tooling in this file\nADDITIONAL_GO_DEPENDENCIES ?=\nADDITIONAL_GO_TAGS ?=\ngo_dependencies += $(ADDITIONAL_GO_DEPENDENCIES)\ngo_tags += $(ADDITIONAL_GO_TAGS)\n\ngo_tags_init = go_tags_$1 :=\n$(call for_each_kv,go_tags_init,$(go_dependencies))\n\ngo_tags_defs = go_tags_$1 += $2\n$(call for_each_kv,go_tags_defs,$(go_tags))\n\ngo_tool_names :=\n\n# Template for building Go-based tools from source using \"go install\"\ndefine go_dependency\ngo_tool_names += $1\n$$(DOWNLOAD_DIR)/tools/$1@$($(call uc,$1)_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $$(NEEDS_GO) $$(DOWNLOAD_DIR)/tools\n\t@# 1. Use lock script to prevent concurrent builds of the same tool\n\t@# 2. Install to temp dir using GOBIN, with GOWORK=off to ignore workspace files\n\t@# 3. Move the binary to final location\n\t@source $$(lock_script) $$@; \\\n\t\tmkdir -p $$(outfile).dir; \\\n\t\tGOWORK=off GOBIN=$$(outfile).dir $$(GO) install --tags \"$(strip $(go_tags_$1))\" $2@$($(call uc,$1)_VERSION); \\\n\t\tmv $$(outfile).dir/$1 $$(outfile); \\\n\t\trm -rf $$(outfile).dir\nendef\n$(call for_each_kv,go_dependency,$(go_dependencies))\n\n##################\n# File downloads #\n##################\n\ngo_linux_amd64_SHA256SUM=990e6b4bbba816dc3ee129eaeaf4b42f17c2800b88a2166c265ac1a200262282\ngo_linux_arm64_SHA256SUM=c958a1fe1b361391db163a485e21f5f228142d6f8b584f6bef89b26f66dc5b23\ngo_darwin_amd64_SHA256SUM=bc3f1500d9968c36d705442d90ba91addf9271665033748b82532682e90a7966\ngo_darwin_arm64_SHA256SUM=32af1522bf3e3ff3975864780a429cc0b41d190ec7bf90faa661d6d64566e7af\n\n.PRECIOUS: $(DOWNLOAD_DIR)/tools/go@$(VENDORED_GO_VERSION)_$(HOST_OS)_$(HOST_ARCH).tar.gz\n$(DOWNLOAD_DIR)/tools/go@$(VENDORED_GO_VERSION)_$(HOST_OS)_$(HOST_ARCH).tar.gz: | $(DOWNLOAD_DIR)/tools\n\t@source $(lock_script) $@; \\\n\t\t$(CURL) https://go.dev/dl/go$(VENDORED_GO_VERSION).$(HOST_OS)-$(HOST_ARCH).tar.gz -o $(outfile); \\\n\t\t$(checkhash_script) $(outfile) $(go_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM)\n\nhelm_linux_amd64_SHA256SUM=70b2c30a19da4db264dfd68c8a3664e05093a361cefd89572ffb36f8abfa3d09\nhelm_linux_arm64_SHA256SUM=13d03672be289045d2ff00e4e345d61de1c6f21c1257a45955a30e8ae036d8f1\nhelm_darwin_amd64_SHA256SUM=abf09c8503ad1d8ef76d3737a058c3456a998aae5f5966fce4bb3031aeb1654e\nhelm_darwin_arm64_SHA256SUM=7c2eca678e8001fa863cdf8cbf6ac1b3799f9404a89eb55c08260ef5732e658d\n\n.PRECIOUS: $(DOWNLOAD_DIR)/tools/helm@$(HELM_VERSION)_$(HOST_OS)_$(HOST_ARCH)\n$(DOWNLOAD_DIR)/tools/helm@$(HELM_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools\n\t@source $(lock_script) $@; \\\n\t\t$(CURL) https://get.helm.sh/helm-$(HELM_VERSION)-$(HOST_OS)-$(HOST_ARCH).tar.gz -o $(outfile).tar.gz; \\\n\t\t$(checkhash_script) $(outfile).tar.gz $(helm_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \\\n\t\ttar xfO $(outfile).tar.gz $(HOST_OS)-$(HOST_ARCH)/helm > $(outfile); \\\n\t\tchmod +x $(outfile); \\\n\t\trm -f $(outfile).tar.gz\n\nhelm-unittest_linux_amd64_SHA256SUM=9761f23d9509c98770c026e019e743b524b57010f4bc29175f78d2582ace0633\nhelm-unittest_linux_arm64_SHA256SUM=1e645d96b36582cd8b9fbd53240110267f14d80aa01137341251c60438bbe6b0\nhelm-unittest_darwin_amd64_SHA256SUM=46413a86ded6bfc70cd704ebac16f8d4a0f36712ae399a5d24e32bc44f96985f\nhelm-unittest_darwin_arm64_SHA256SUM=6a6b67b3f638f015e09c093b67c7609a07101b971a1a6d6a83d1a7f75861a4b2\n\n# helm-unittest uses \"macos\" instead of \"darwin\" in release filenames\nhelm_unittest_os := $(HOST_OS)\nifeq ($(HOST_OS),darwin)\nhelm_unittest_os := macos\nendif\n\n.PRECIOUS: $(DOWNLOAD_DIR)/tools/helm-unittest@$(HELM-UNITTEST_VERSION)_$(HOST_OS)_$(HOST_ARCH)\n$(DOWNLOAD_DIR)/tools/helm-unittest@$(HELM-UNITTEST_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools\n\t@source $(lock_script) $@; \\\n\t\t$(CURL) https://github.com/helm-unittest/helm-unittest/releases/download/$(HELM-UNITTEST_VERSION)/helm-unittest-$(helm_unittest_os)-$(HOST_ARCH)-$(HELM-UNITTEST_VERSION:v%=%).tgz -o $(outfile).tgz; \\\n\t\t$(checkhash_script) $(outfile).tgz $(helm-unittest_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \\\n\t\ttar xfO $(outfile).tgz untt > $(outfile); \\\n\t\tchmod +x $(outfile); \\\n\t\trm -f $(outfile).tgz\n\nkubectl_linux_amd64_SHA256SUM=b529430df69a688fd61b64ad2299edb5fd71cb58be2a4779dba624c7d3510efd\nkubectl_linux_arm64_SHA256SUM=6a5a4cc4e396d7626a7a693a3044b51c75520f81db30fe6816c2554e53be336f\nkubectl_darwin_amd64_SHA256SUM=dddb01bddb96f78e48e33105ccfa2feedff585a8b2e3b812f5d0f64c7403710a\nkubectl_darwin_arm64_SHA256SUM=ec644a2473b64b486987f695dfb1867963ce6d42d267b86e944585a546f92b5d\n\n.PRECIOUS: $(DOWNLOAD_DIR)/tools/kubectl@$(KUBECTL_VERSION)_$(HOST_OS)_$(HOST_ARCH)\n$(DOWNLOAD_DIR)/tools/kubectl@$(KUBECTL_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools\n\t@source $(lock_script) $@; \\\n\t\t$(CURL) https://dl.k8s.io/release/$(KUBECTL_VERSION)/bin/$(HOST_OS)/$(HOST_ARCH)/kubectl -o $(outfile); \\\n\t\t$(checkhash_script) $(outfile) $(kubectl_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \\\n\t\tchmod +x $(outfile)\n\nkind_linux_amd64_SHA256SUM=eb244cbafcc157dff60cf68693c14c9a75c4e6e6fedaf9cd71c58117cb93e3fa\nkind_linux_arm64_SHA256SUM=8e1014e87c34901cc422a1445866835d1e666f2a61301c27e722bdeab5a1f7e4\nkind_darwin_amd64_SHA256SUM=a8b3cf77b2ad77aec5bf710d1a2589d9117576132af812885cad41e9dede4d4e\nkind_darwin_arm64_SHA256SUM=88bf554fe9da6311c9f8c2d082613c002911a476f6b5090e9420b35d84e70c5c\n\n.PRECIOUS: $(DOWNLOAD_DIR)/tools/kind@$(KIND_VERSION)_$(HOST_OS)_$(HOST_ARCH)\n$(DOWNLOAD_DIR)/tools/kind@$(KIND_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools\n\t@source $(lock_script) $@; \\\n\t\t$(CURL) https://github.com/kubernetes-sigs/kind/releases/download/$(KIND_VERSION)/kind-$(HOST_OS)-$(HOST_ARCH) -o $(outfile); \\\n\t\t$(checkhash_script) $(outfile) $(kind_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \\\n\t\tchmod +x $(outfile)\n\nvault_linux_amd64_SHA256SUM=889b681990fe221b884b7932fa9c9dd0ee9811b9349554f1aa287ab63c9f3dae\nvault_linux_arm64_SHA256SUM=1104ef701aad16e104e2e7b4d2a02a6ec993237559343f3097ac63a00b42e85d\nvault_darwin_amd64_SHA256SUM=a667be3cf56dd0f21a23ba26b47028d1f51b3ca61e71b0e29ceafef1c2a1dc3a\nvault_darwin_arm64_SHA256SUM=c79012c1c8aedd682c68b5d9c89149030611c82da57f45383aef004b39a640d2\n\n.PRECIOUS: $(DOWNLOAD_DIR)/tools/vault@$(VAULT_VERSION)_$(HOST_OS)_$(HOST_ARCH)\n$(DOWNLOAD_DIR)/tools/vault@$(VAULT_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools\n\t@source $(lock_script) $@; \\\n\t\t$(CURL) https://releases.hashicorp.com/vault/$(VAULT_VERSION:v%=%)/vault_$(VAULT_VERSION:v%=%)_$(HOST_OS)_$(HOST_ARCH).zip -o $(outfile).zip; \\\n\t\t$(checkhash_script) $(outfile).zip $(vault_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \\\n\t\tunzip -p $(outfile).zip vault > $(outfile); \\\n\t\tchmod +x $(outfile); \\\n\t\trm -f $(outfile).zip\n\nazwi_linux_amd64_SHA256SUM=d816d24c865d86ca101219197b493e399d3f669e8e20e0aaffc5a09f0f4c0aaf\nazwi_linux_arm64_SHA256SUM=f74799439ec3d33d6f69dcaa237fbdde8501390f06ee6d6fb1edfb36f64e1fa6\nazwi_darwin_amd64_SHA256SUM=50dec4f29819a68827d695950a36b296aff501e81420787c16603d6394503c97\nazwi_darwin_arm64_SHA256SUM=f267f5fad691cb60d1983a3df5c9a67d83cba0ca0d87aa707a713d2ba4f47776\n\n.PRECIOUS: $(DOWNLOAD_DIR)/tools/azwi@$(AZWI_VERSION)_$(HOST_OS)_$(HOST_ARCH)\n$(DOWNLOAD_DIR)/tools/azwi@$(AZWI_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools\n\t@source $(lock_script) $@; \\\n\t\t$(CURL) https://github.com/Azure/azure-workload-identity/releases/download/$(AZWI_VERSION)/azwi-$(AZWI_VERSION)-$(HOST_OS)-$(HOST_ARCH).tar.gz -o $(outfile).tar.gz; \\\n\t\t$(checkhash_script) $(outfile).tar.gz $(azwi_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \\\n\t\ttar xfO $(outfile).tar.gz azwi > $(outfile) && chmod 775 $(outfile); \\\n\t\trm -f $(outfile).tar.gz\n\nkubebuilder_tools_linux_amd64_SHA256SUM=5716719def14a3fec3ed285e5e8c4280e6268854039b5073a96e8c0adafb1c02\nkubebuilder_tools_linux_arm64_SHA256SUM=5057fb45eecf246929da768b21d32434b8c96e22a78ef6cdfe912f1a67aae45a\nkubebuilder_tools_darwin_amd64_SHA256SUM=e733f72effc8a8076f2c8eb892de4aeb4bb54ea02082808ce3e51f80f2ff85e2\nkubebuilder_tools_darwin_arm64_SHA256SUM=3c6b1ebd745b82daed47605fb565f7c670c8a3344b57a377a914d013b6b9eef0\n\n.PRECIOUS: $(DOWNLOAD_DIR)/tools/kubebuilder_tools_$(KUBEBUILDER_ASSETS_VERSION)_$(HOST_OS)_$(HOST_ARCH).tar.gz\n$(DOWNLOAD_DIR)/tools/kubebuilder_tools_$(KUBEBUILDER_ASSETS_VERSION)_$(HOST_OS)_$(HOST_ARCH).tar.gz: | $(DOWNLOAD_DIR)/tools\n\t@source $(lock_script) $@; \\\n\t\t$(CURL) https://github.com/kubernetes-sigs/controller-tools/releases/download/envtest-$(KUBEBUILDER_ASSETS_VERSION)/envtest-$(KUBEBUILDER_ASSETS_VERSION)-$(HOST_OS)-$(HOST_ARCH).tar.gz -o $(outfile); \\\n\t\t$(checkhash_script) $(outfile) $(kubebuilder_tools_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM)\n\n$(DOWNLOAD_DIR)/tools/etcd@$(KUBEBUILDER_ASSETS_VERSION)_$(HOST_OS)_$(HOST_ARCH): $(DOWNLOAD_DIR)/tools/kubebuilder_tools_$(KUBEBUILDER_ASSETS_VERSION)_$(HOST_OS)_$(HOST_ARCH).tar.gz | $(DOWNLOAD_DIR)/tools\n\t@# Extract specific file from tarball using tar's -O flag (output to stdout)\n\t@source $(lock_script) $@; \\\n\t\ttar xfO $< controller-tools/envtest/etcd > $(outfile) && chmod 775 $(outfile)\n\n$(DOWNLOAD_DIR)/tools/kube-apiserver@$(KUBEBUILDER_ASSETS_VERSION)_$(HOST_OS)_$(HOST_ARCH): $(DOWNLOAD_DIR)/tools/kubebuilder_tools_$(KUBEBUILDER_ASSETS_VERSION)_$(HOST_OS)_$(HOST_ARCH).tar.gz | $(DOWNLOAD_DIR)/tools\n\t@# Extract specific file from tarball using tar's -O flag (output to stdout)\n\t@source $(lock_script) $@; \\\n\t\ttar xfO $< controller-tools/envtest/kube-apiserver > $(outfile) && chmod 775 $(outfile)\n\nkyverno_linux_amd64_SHA256SUM=d0c0f52e8fc8d66a3663b63942b131e5f91b63f7644b3e446546f79142d1b7a3\nkyverno_linux_arm64_SHA256SUM=6f6a66711ba8fc2bd54a28aa1755a62605d053a6a3a758186201ba1f56698ced\nkyverno_darwin_amd64_SHA256SUM=d221d8d93c622b68a2933f4e0accd61db4f41100336f1ddad141259742f70948\nkyverno_darwin_arm64_SHA256SUM=851d1fcc4427a317674cc1892af4f43dcd19983c94498a1a913b6b849f71ef8c\n\n.PRECIOUS: $(DOWNLOAD_DIR)/tools/kyverno@$(KYVERNO_VERSION)_$(HOST_OS)_$(HOST_ARCH)\n$(DOWNLOAD_DIR)/tools/kyverno@$(KYVERNO_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools\n\t@# Kyverno uses x86_64 instead of amd64 in download URLs, so translate the architecture\n\t$(eval ARCH := $(subst amd64,x86_64,$(HOST_ARCH)))\n\n\t@source $(lock_script) $@; \\\n\t\t$(CURL) https://github.com/kyverno/kyverno/releases/download/$(KYVERNO_VERSION)/kyverno-cli_$(KYVERNO_VERSION)_$(HOST_OS)_$(ARCH).tar.gz -o $(outfile).tar.gz; \\\n\t\t$(checkhash_script) $(outfile).tar.gz $(kyverno_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \\\n\t\ttar xfO $(outfile).tar.gz kyverno > $(outfile); \\\n\t\tchmod +x $(outfile); \\\n\t\trm -f $(outfile).tar.gz\n\nyq_linux_amd64_SHA256SUM=d56bf5c6819e8e696340c312bd70f849dc1678a7cda9c2ad63eebd906371d56b\nyq_linux_arm64_SHA256SUM=03061b2a50c7a498de2bbb92d7cb078ce433011f085a4994117c2726be4106ea\nyq_darwin_amd64_SHA256SUM=616b0a0f6a5b79d746f05a169c2b9bb40dee00c605ef165b9a1c1681bba738ac\nyq_darwin_arm64_SHA256SUM=541ba2287560df70f561955e2d7f7e1cd00cf2a15a884f6b5c87a4bfa887bc07\n\n.PRECIOUS: $(DOWNLOAD_DIR)/tools/yq@$(YQ_VERSION)_$(HOST_OS)_$(HOST_ARCH)\n$(DOWNLOAD_DIR)/tools/yq@$(YQ_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools\n\t@source $(lock_script) $@; \\\n\t\t$(CURL) https://github.com/mikefarah/yq/releases/download/$(YQ_VERSION)/yq_$(HOST_OS)_$(HOST_ARCH) -o $(outfile); \\\n\t\t$(checkhash_script) $(outfile) $(yq_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \\\n\t\tchmod +x $(outfile)\n\nko_linux_amd64_SHA256SUM=048ab11818089a43b7b74bc554494a79a3fd0d9822c061142e5cd3cf8b30cb27\nko_linux_arm64_SHA256SUM=9a26698876892128952fa3d038a4e99bea961d0d225865c60474b79e3db12e99\nko_darwin_amd64_SHA256SUM=0e0dd8fddbefebb8572ece4dca8f07a7472de862fedd7e9845fd9d651e0d5dbe\nko_darwin_arm64_SHA256SUM=752a639e0fbc013a35a43974b5ed87e7008bc2aee4952dfd2cc19f0013205492\n\n.PRECIOUS: $(DOWNLOAD_DIR)/tools/ko@$(KO_VERSION)_$(HOST_OS)_$(HOST_ARCH)\n$(DOWNLOAD_DIR)/tools/ko@$(KO_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools\n\t@# Ko uses capitalized OS names (Linux/Darwin) and x86_64 instead of amd64\n\t$(eval OS := $(subst linux,Linux,$(subst darwin,Darwin,$(HOST_OS))))\n\t$(eval ARCH := $(subst amd64,x86_64,$(HOST_ARCH)))\n\n\t@source $(lock_script) $@; \\\n\t\t$(CURL) https://github.com/ko-build/ko/releases/download/v$(KO_VERSION)/ko_$(KO_VERSION)_$(OS)_$(ARCH).tar.gz -o $(outfile).tar.gz; \\\n\t\t$(checkhash_script) $(outfile).tar.gz $(ko_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \\\n\t\ttar xfO $(outfile).tar.gz ko > $(outfile); \\\n\t\tchmod +x $(outfile); \\\n\t\trm -f $(outfile).tar.gz\n\nprotoc_linux_amd64_SHA256SUM=af27ea66cd26938fe48587804ca7d4817457a08350021a1c6e23a27ccc8c6904\nprotoc_linux_arm64_SHA256SUM=31c5e9e3c7bf013cf41fb97765ee255c140024a6b175b6cc9b64beddd7c23ba7\nprotoc_darwin_amd64_SHA256SUM=ab124429c1f49951f03b6c0c0e911fec04e2c7c20de5c935e0cde7353bbd016c\nprotoc_darwin_arm64_SHA256SUM=2c7e92b8b578916937df132b3032e2e8e6c170862ecf7a8333094a6f3d03650c\n\n.PRECIOUS: $(DOWNLOAD_DIR)/tools/protoc@$(PROTOC_VERSION)_$(HOST_OS)_$(HOST_ARCH)\n$(DOWNLOAD_DIR)/tools/protoc@$(PROTOC_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools\n\t@# Protoc uses different naming: darwin->osx, amd64->x86_64, arm64->aarch_64\n\t$(eval OS := $(subst darwin,osx,$(HOST_OS)))\n\t$(eval ARCH := $(subst arm64,aarch_64,$(subst amd64,x86_64,$(HOST_ARCH))))\n\n\t@source $(lock_script) $@; \\\n\t\t$(CURL) https://github.com/protocolbuffers/protobuf/releases/download/$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION:v%=%)-$(OS)-$(ARCH).zip -o $(outfile).zip; \\\n\t\t$(checkhash_script) $(outfile).zip $(protoc_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \\\n\t\tunzip -p $(outfile).zip bin/protoc > $(outfile); \\\n\t\tchmod +x $(outfile); \\\n\t\trm -f $(outfile).zip\n\ntrivy_linux_amd64_SHA256SUM=8b4376d5d6befe5c24d503f10ff136d9e0c49f9127a4279fd110b727929a5aa9\ntrivy_linux_arm64_SHA256SUM=2f6bb988b553a1bbac6bdd1ce890f5e412439564e17522b88a4541b4f364fc8d\ntrivy_darwin_amd64_SHA256SUM=52d531452b19e7593da29366007d02a810e1e0080d02f9cf6a1afb46c35aaa93\ntrivy_darwin_arm64_SHA256SUM=68e543c51dcc96e1c344053a4fde9660cf602c25565d9f09dc17dd41e13b838a\n\n.PRECIOUS: $(DOWNLOAD_DIR)/tools/trivy@$(TRIVY_VERSION)_$(HOST_OS)_$(HOST_ARCH)\n$(DOWNLOAD_DIR)/tools/trivy@$(TRIVY_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools\n\t@# Trivy uses unusual naming: Linux/macOS for OS, 64bit/ARM64 for architecture\n\t$(eval OS := $(subst linux,Linux,$(subst darwin,macOS,$(HOST_OS))))\n\t$(eval ARCH := $(subst amd64,64bit,$(subst arm64,ARM64,$(HOST_ARCH))))\n\n\t@source $(lock_script) $@; \\\n\t\t$(CURL) https://github.com/aquasecurity/trivy/releases/download/$(TRIVY_VERSION)/trivy_$(patsubst v%,%,$(TRIVY_VERSION))_$(OS)-$(ARCH).tar.gz -o $(outfile).tar.gz; \\\n\t\t$(checkhash_script) $(outfile).tar.gz $(trivy_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \\\n\t\ttar xfO $(outfile).tar.gz trivy > $(outfile); \\\n\t\tchmod +x $(outfile); \\\n\t\trm $(outfile).tar.gz\n\nytt_linux_amd64_SHA256SUM=18fe794d01c2539db39acb90994db0d8e51faa7892d0e749d74c29818017247a\nytt_linux_arm64_SHA256SUM=0e9e75b7a5f59161d2413e9d6163a1a13218f270daa1c525656195d1fcef28f6\nytt_darwin_amd64_SHA256SUM=cc51c3040b91bb0871967f9960cd9286bafd334ffd153a86914b883f3adad9ef\nytt_darwin_arm64_SHA256SUM=4cc85a5e954d651d547cdef1e673742d995a38b0840273a5897e5318185b4e18\n\n.PRECIOUS: $(DOWNLOAD_DIR)/tools/ytt@$(YTT_VERSION)_$(HOST_OS)_$(HOST_ARCH)\n$(DOWNLOAD_DIR)/tools/ytt@$(YTT_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools\n\t@source $(lock_script) $@; \\\n\t\t$(CURL) -sSfL https://github.com/vmware-tanzu/carvel-ytt/releases/download/$(YTT_VERSION)/ytt-$(HOST_OS)-$(HOST_ARCH) -o $(outfile); \\\n\t\t$(checkhash_script) $(outfile) $(ytt_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \\\n\t\tchmod +x $(outfile)\n\nrclone_linux_amd64_SHA256SUM=abc0e6e0f275a469d94645f7ef92c7c7673eed20b6558acec5ff48b74641213c\nrclone_linux_arm64_SHA256SUM=00c9e230f0004ab5e3b45c00edf7238ba5bff5fc7ea80f5a86a7da5568de6d1c\nrclone_darwin_amd64_SHA256SUM=4ef15279d857372f3ff84b967ad68fc1c3b113d631effb9c09a18e40f8a78fa7\nrclone_darwin_arm64_SHA256SUM=8cfffacc3ce732b1960645a2f7d2ce97c2ac9ba4f2221c13af6378c199a078f9\n\n.PRECIOUS: $(DOWNLOAD_DIR)/tools/rclone@$(RCLONE_VERSION)_$(HOST_OS)_$(HOST_ARCH)\n$(DOWNLOAD_DIR)/tools/rclone@$(RCLONE_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools\n\t@# Rclone uses \"osx\" instead of \"darwin\" in download URLs\n\t$(eval OS := $(subst darwin,osx,$(HOST_OS)))\n\n\t@source $(lock_script) $@; \\\n\t\t$(CURL) https://github.com/rclone/rclone/releases/download/$(RCLONE_VERSION)/rclone-$(RCLONE_VERSION)-$(OS)-$(HOST_ARCH).zip -o $(outfile).zip; \\\n\t\t$(checkhash_script) $(outfile).zip $(rclone_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \\\n\t\tunzip -p $(outfile).zip rclone-$(RCLONE_VERSION)-$(OS)-$(HOST_ARCH)/rclone > $(outfile); \\\n\t\tchmod +x $(outfile); \\\n\t\trm -f $(outfile).zip\n\nistioctl_linux_amd64_SHA256SUM=904bbf1b917dd0135aa55b99cbfa34edd0a188fdeeeef09bb995d8e8e3165112\nistioctl_linux_arm64_SHA256SUM=c4130d32359446fa5e4820c0543d06e2e424883c6890f0f8c59f3ac69dd4b44e\nistioctl_darwin_amd64_SHA256SUM=0bd51e88f8a2568892523752e12ce720793e4b9a9b25bdd4555d5932048e2bf1\nistioctl_darwin_arm64_SHA256SUM=dffa0ff011774cf65fbae5d53f84d54bd12b541a35cff68be60db1c6674f03b4\n\n.PRECIOUS: $(DOWNLOAD_DIR)/tools/istioctl@$(ISTIOCTL_VERSION)_$(HOST_OS)_$(HOST_ARCH)\n$(DOWNLOAD_DIR)/tools/istioctl@$(ISTIOCTL_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools\n\t@# Istio uses \"osx\" instead of \"darwin\" in download URLs\n\t$(eval OS := $(subst darwin,osx,$(HOST_OS)))\n\n\t@source $(lock_script) $@; \\\n\t\t$(CURL) https://github.com/istio/istio/releases/download/$(ISTIOCTL_VERSION)/istio-$(ISTIOCTL_VERSION)-$(OS)-$(HOST_ARCH).tar.gz -o $(outfile).tar.gz; \\\n\t\t$(checkhash_script) $(outfile).tar.gz $(istioctl_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \\\n\t\ttar xfO $(outfile).tar.gz istio-$(ISTIOCTL_VERSION)/bin/istioctl > $(outfile); \\\n\t\tchmod +x $(outfile); \\\n\t\trm $(outfile).tar.gz\n\npreflight_linux_amd64_SHA256SUM=15f58d0de7212ac948706515f824d0d2f42b94c11fa85cdb1bc08ad8993226ca\npreflight_linux_arm64_SHA256SUM=a05103b894ce9fd63f47bd56518b8f0b52850ef11e7ef8c21146ac1273d799ad\npreflight_darwin_amd64_SHA256SUM=f707d9ec7f564ba35dc4a7a73f20562c1f7d11035c93d56b6ae9679649de98e3\npreflight_darwin_arm64_SHA256SUM=6b9c2d3aa2b45303272ca29b7ae231d099d6a1f64142c918e01cb229aeee96a6\n\n.PRECIOUS: $(DOWNLOAD_DIR)/tools/preflight@$(PREFLIGHT_VERSION)_$(HOST_OS)_$(HOST_ARCH)\n$(DOWNLOAD_DIR)/tools/preflight@$(PREFLIGHT_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools\n\t@source $(lock_script) $@; \\\n\t\t$(CURL) https://github.com/redhat-openshift-ecosystem/openshift-preflight/releases/download/$(PREFLIGHT_VERSION)/preflight-$(HOST_OS)-$(HOST_ARCH) -o $(outfile); \\\n\t\t$(checkhash_script) $(outfile) $(preflight_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \\\n\t\tchmod +x $(outfile)\n\noperator-sdk_linux_amd64_SHA256SUM=8847c45ea994ac62b3cd134f77934df2a16a56a39a634eb988e0d1db99d1a413\noperator-sdk_linux_arm64_SHA256SUM=5fbb4c9f1eb3d8f6e9f870bfb48160842b9b541ce644d602282ef86578fedc1c\noperator-sdk_darwin_amd64_SHA256SUM=0293b988886b5a2a82b6c141c46293915f0c67cae43cabdb36a0ffdf8af042b6\noperator-sdk_darwin_arm64_SHA256SUM=8f7c19e35ce6ad4069502fcb66ea89548d0173ff8a02b253b0be4ad4909eeaf6\n\n.PRECIOUS: $(DOWNLOAD_DIR)/tools/operator-sdk@$(OPERATOR-SDK_VERSION)_$(HOST_OS)_$(HOST_ARCH)\n$(DOWNLOAD_DIR)/tools/operator-sdk@$(OPERATOR-SDK_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools\n\t@source $(lock_script) $@; \\\n\t\t$(CURL) https://github.com/operator-framework/operator-sdk/releases/download/$(OPERATOR-SDK_VERSION)/operator-sdk_$(HOST_OS)_$(HOST_ARCH) -o $(outfile); \\\n\t\t$(checkhash_script) $(outfile) $(operator-sdk_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \\\n\t\tchmod +x $(outfile)\n\n#################\n# Other Targets #\n#################\n\n# Although we \"vendor\" most tools in $(bin_dir)/tools, we still require some binaries\n# to be available on the system. The vendor-go MAKECMDGOALS trick prevents the\n# check for the presence of Go when 'make vendor-go' is run.\n\n# Gotcha warning: MAKECMDGOALS only contains what the _top level_ make invocation used, and doesn't look at target dependencies\n# i.e. if we have a target \"abc: vendor-go test\" and run \"make abc\", we'll get an error\n# about go being missing even though abc itself depends on vendor-go!\n# That means we need to pass vendor-go at the top level if go is not installed (i.e. \"make vendor-go abc\")\n\n# Check for required system tools by testing if each command exists\n# If a command is missing, echo its name. The && chains mean all tests run,\n# and \"missing\" will contain a space-separated list of any missing tools.\nmissing=$(shell (command -v curl >/dev/null || echo curl) \\\n             && (command -v sha256sum >/dev/null || command -v shasum >/dev/null || echo sha256sum) \\\n             && (command -v git >/dev/null || echo git) \\\n             && (command -v xargs >/dev/null || echo xargs) \\\n             && (command -v bash >/dev/null || echo bash))\nifneq ($(missing),)\n$(error Missing required tools: $(missing))\nendif\n\nnon_go_tool_names := $(filter-out $(go_tool_names),$(tool_names))\n\n.PHONY: non-go-tools\n## Download and setup all Non-Go tools\n## @category [shared] Tools\nnon-go-tools: $(non_go_tool_names:%=$(bin_dir)/tools/%)\n\n.PHONY: go-tools\n## Download and setup all Go tools\n## NOTE: this target is also used to learn the shas of\n## these tools (see scripts/learn_tools_shas.sh in the\n## Makefile modules repo)\n## @category [shared] Tools\ngo-tools: $(go_tool_names:%=$(bin_dir)/tools/%)\n\n.PHONY: tools\n## Download and setup all tools\n## @category [shared] Tools\ntools: non-go-tools go-tools\n"
  },
  {
    "path": "make/_shared/tools/util/checkhash.sh",
    "content": "#!/usr/bin/env bash\n\n# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nset -o errexit\nset -o nounset\nset -o pipefail\n\nSCRIPT_DIR=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" >/dev/null 2>&1 && pwd )\"\n\n# This script takes the hash of its first argument and verifies it against the\n# hex hash given in its second argument\n\nfunction usage_and_exit() {\n\techo \"usage: $0 <path-to-target> <expected-hash>\"\n\techo \"or: LEARN_FILE=<path-to-learn-file> $0 <path-to-target> <old-hash>\"\n\texit 1\n}\n\nHASH_TARGET=${1:-}\nEXPECTED_HASH=${2:-}\n\nif [[ -z $HASH_TARGET ]]; then\n\tusage_and_exit\nfi\n\nif [[ -z $EXPECTED_HASH ]]; then\n\tusage_and_exit\nfi\n\nSHASUM=$(\"${SCRIPT_DIR}/hash.sh\" \"$HASH_TARGET\")\n\nif [[ \"$SHASUM\" == \"$EXPECTED_HASH\" ]]; then\n\texit 0\nfi\n\n# When running 'make learn-sha-tools', we don't want this script to fail.\n# Instead we log what sha values are wrong, so the make.mk file can be updated.\n\nif [ \"${LEARN_FILE:-}\" != \"\" ]; then\n\techo \"s/$EXPECTED_HASH/$SHASUM/g\" >> \"${LEARN_FILE:-}\"\n\texit 0\nfi\n\necho \"invalid checksum for \\\"$HASH_TARGET\\\": wanted \\\"$EXPECTED_HASH\\\" but got \\\"$SHASUM\\\"\"\nexit 1\n"
  },
  {
    "path": "make/_shared/tools/util/hash.sh",
    "content": "#!/usr/bin/env bash\n\n# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nset -o errexit\nset -o nounset\nset -o pipefail\n\n# This script is a wrapper for outputting purely the sha256 hash of the input file,\n# ideally in a portable way.\n\ncase \"$(uname -s)\" in\n    Darwin*)    shasum -a 256 \"$1\";;\n    *)          sha256sum \"$1\" \nesac | cut -d\" \" -f1"
  },
  {
    "path": "make/_shared/tools/util/lock.sh",
    "content": "#!/usr/bin/env bash\n\n# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nset -o errexit\nset -o nounset\nset -o pipefail\n\n# This script is used to lock a file while it is being downloaded. It prevents\n# multiple processes from downloading the same file at the same time or from reading\n# a half-downloaded file.\n# We need this solution because we have recursive $(MAKE) calls in our makefile\n# which each will try to download a set of tools. To prevent them from all downloading\n# the same files, we re-use the same downloads folder for all $(MAKE) invocations and\n# use this script to deduplicate the download processes.\n\nfinalfile=\"$1\"\nlockfile=\"$finalfile.lock\"\n\n# On macOS, flock is not installed, we just skip locking in that case,\n# this means that running verify in parallel without downloading all\n# tools first will not work.\nflock_installed=$(command -v flock >/dev/null && echo \"yes\" || echo \"no\")\n\nif [[ \"$flock_installed\" == \"yes\" ]]; then\n  mkdir -p \"$(dirname \"$lockfile\")\"\n  touch \"$lockfile\"\n  exec {FD}<>\"$lockfile\"\n\n  # wait for the file to be unlocked\n  if ! flock -x $FD; then\n    echo \"Failed to obtain a lock for $lockfile\"\n    exit 1\n  fi\nfi\n\n# now that we have the lock, check if file is already there\nif [[ -e \"$finalfile\" ]]; then\n  exit 0\nfi\n\n# use a temporary file to prevent Make from thinking the file is ready\n# while in reality is is only a partial download\n# shellcheck disable=SC2034\noutfile=\"$finalfile.tmp\"\n\nfinish() {\n  rv=$?\n  if [[ $rv -eq 0 ]]; then\n    mv \"$outfile\" \"$finalfile\"\n    echo \"[info]: downloaded $finalfile\"\n  else\n    rm -rf \"$outfile\" || true\n    rm -rf \"$finalfile\" || true\n  fi\n  rm -rf \"$lockfile\" || true\n}\ntrap finish EXIT SIGINT\n"
  },
  {
    "path": "make/ark/00_mod.mk",
    "content": "build_names += ark\ngo_ark_main_dir := ./cmd/ark\ngo_ark_mod_dir := .\ngo_ark_ldflags := \\\n\t-X $(gomodule_name)/pkg/version.PreflightVersion=$(VERSION) \\\n\t-X $(gomodule_name)/pkg/version.Commit=$(GITCOMMIT) \\\n\t-X $(gomodule_name)/pkg/version.BuildDate=$(shell date \"+%F-%T-%Z\")\n\noci_ark_base_image_flavor := static\noci_ark_image_name := quay.io/jetstack/disco-agent\noci_ark_image_tag := $(VERSION)\noci_ark_image_name_development := jetstack.local/disco-agent\n\n# Annotations are the standardised set of annotations we set on every component we publish\noci_ark_build_args := \\\n\t--image-annotation=\"org.opencontainers.image.source\"=\"https://github.com/jetstack/jetstack-secure\" \\\n\t--image-annotation=\"org.opencontainers.image.vendor\"=\"CyberArk Software Ltd.\" \\\n\t--image-annotation=\"org.opencontainers.image.licenses\"=\"EULA - https://www.cyberark.com/contract-terms/\" \\\n\t--image-annotation=\"org.opencontainers.image.authors\"=\"CyberArk Software Ltd.\" \\\n\t--image-annotation=\"org.opencontainers.image.title\"=\"CyberArk Discovery and Context Agent\" \\\n\t--image-annotation=\"org.opencontainers.image.description\"=\"Gathers machine identity data from Kubernetes clusters.\" \\\n\t--image-annotation=\"org.opencontainers.image.url\"=\"https://www.cyberark.com/products/\" \\\n\t--image-annotation=\"org.opencontainers.image.documentation\"=\"https://docs.cyberark.com\" \\\n\t--image-annotation=\"org.opencontainers.image.version\"=\"$(VERSION)\" \\\n\t--image-annotation=\"org.opencontainers.image.revision\"=\"$(GITCOMMIT)\"\n\n\ndefine ark_helm_values_mutation_function\necho \"no mutations defined for this chart\"\nendef\n"
  },
  {
    "path": "make/ark/02_mod.mk",
    "content": "# Makefile targets for CyberArk Discovery and Context\n\n# The base OCI repository for all CyberArk Discovery and Context artifacts\nARK_OCI_BASE ?= quay.io/jetstack\n\n# The OCI repository (without tag) for the CyberArk Discovery and Context Agent Docker image\n# Can be overridden when calling `make ark-release` to push to a different repository.\nARK_IMAGE ?= $(ARK_OCI_BASE)/disco-agent\n\n# The OCI repository (without tag) for the CyberArk Discovery and Context Helm chart\n# Can be overridden when calling `make ark-release` to push to a different repository.\nARK_CHART ?= $(ARK_OCI_BASE)/charts/disco-agent\n\n# Used to output variables when running in GitHub Actions\nGITHUB_OUTPUT ?= /dev/stderr\n\n.PHONY: ark-release\n## Publish all release artifacts (image + helm chart)\n## @category CyberArk Discovery and Context\nark-release: oci_ark_image_digest_path := $(bin_dir)/scratch/image/oci-layout-ark.digests\nark-release: helm_digest_path := $(bin_dir)/scratch/helm/disco-agent-$(helm_chart_version).digests\nark-release:\n\t$(MAKE) oci-push-ark helm-chart-oci-push \\\n\t\toci_ark_image_name=\"$(ARK_IMAGE)\" \\\n\t\thelm_image_name=\"$(ARK_IMAGE)\" \\\n\t\thelm_image_tag=\"$(oci_ark_image_tag)\" \\\n\t\thelm_chart_source_dir=deploy/charts/disco-agent \\\n\t\thelm_chart_image_name=\"$(ARK_CHART)\"\n\n\t@echo \"ARK_IMAGE=$(ARK_IMAGE)\" >> \"$(GITHUB_OUTPUT)\"\n\t@echo \"ARK_IMAGE_TAG=$(oci_ark_image_tag)\" >> \"$(GITHUB_OUTPUT)\"\n\t@echo \"ARK_IMAGE_DIGEST=$$(head -1 $(oci_ark_image_digest_path))\" >> \"$(GITHUB_OUTPUT)\"\n\t@echo \"ARK_CHART=$(ARK_CHART)\" >> \"$(GITHUB_OUTPUT)\"\n\t@echo \"ARK_CHART_TAG=$(helm_chart_version)\" >> \"$(GITHUB_OUTPUT)\"\n\t@echo \"ARK_CHART_DIGEST=$$(head -1 $(helm_digest_path))\" >> \"$(GITHUB_OUTPUT)\"\n\n\t@echo \"Release complete!\"\n\n.PHONY: ark-test-e2e\n## Run a basic E2E test on a Kind cluster\n## See `hack/ark/e2e.sh` for the full test script.\n## @category CyberArk Discovery and Context\nark-test-e2e: $(NEEDS_KIND) $(NEEDS_KUBECTL) $(NEEDS_HELM)\n\tPATH=\"$(bin_dir)/tools:${PATH}\" ./hack/ark/test-e2e.sh\n\n.PHONY: ark-verify\n## Verify the Helm chart\n## @category CyberArk Discovery and Context\nark-verify:\n\tINSTALL_OPTIONS=\"--set acceptTerms=true\" $(MAKE) verify-helm-lint verify-helm-values verify-pod-security-standards verify-helm-kubeconform verify-helm-unittest \\\n\t\thelm_chart_source_dir=deploy/charts/disco-agent \\\n\t\thelm_chart_image_name=$(ARK_CHART)\n\nshared_verify_targets += ark-verify\n\n.PHONY: ark-generate\n## Generate Helm chart documentation and schema\n## @category CyberArk Discovery and Context\nark-generate:\n\t$(MAKE) generate-helm-docs generate-helm-schema \\\n\t\thelm_chart_source_dir=deploy/charts/disco-agent\n\nshared_generate_targets += ark-generate\n\n"
  },
  {
    "path": "make/connection_crd/main.go",
    "content": "package main\n\nimport (\n\t\"fmt\"\n\n\tcrd \"github.com/jetstack/venafi-connection-lib/config/crd/bases\"\n)\n\n// With this tool, we no longer have to use something like `helm template` to\n// pull the CRD manifest from the venafi-connection-lib project.\nfunc main() {\n\tfmt.Print(string(crd.VenafiConnectionCrd))\n}\n"
  },
  {
    "path": "make/extra_tools.mk",
    "content": "ADDITIONAL_TOOLS :=\nADDITIONAL_GO_DEPENDENCIES :=\n\nADDITIONAL_TOOLS += venctl=1.27.0\nADDITIONAL_TOOLS += step=0.28.2\n\n"
  },
  {
    "path": "make/ngts/00_mod.mk",
    "content": "build_names += ngts\ngo_ngts_main_dir := ./cmd/ark\ngo_ngts_mod_dir := .\ngo_ngts_ldflags := \\\n\t-X $(gomodule_name)/pkg/version.PreflightVersion=$(VERSION) \\\n\t-X $(gomodule_name)/pkg/version.Commit=$(GITCOMMIT) \\\n\t-X $(gomodule_name)/pkg/version.BuildDate=$(shell date \"+%F-%T-%Z\")\n\noci_ngts_base_image_flavor := static\noci_ngts_image_name := quay.io/jetstack/discovery-agent\noci_ngts_image_tag := $(VERSION)\noci_ngts_image_name_development := jetstack.local/discovery-agent\n\n# Annotations are the standardised set of annotations we set on every component we publish\noci_ngts_build_args := \\\n\t--image-annotation=\"org.opencontainers.image.source\"=\"https://github.com/jetstack/jetstack-secure\" \\\n\t--image-annotation=\"org.opencontainers.image.vendor\"=\"Palo Alto Networks\" \\\n\t--image-annotation=\"org.opencontainers.image.licenses\"=\"Apache-2.0\" \\\n\t--image-annotation=\"org.opencontainers.image.authors\"=\"Palo Alto Networks\" \\\n\t--image-annotation=\"org.opencontainers.image.title\"=\"Discovery Agent for NGTS\" \\\n\t--image-annotation=\"org.opencontainers.image.description\"=\"Gathers machine identity data from Kubernetes clusters for NGTS.\" \\\n\t--image-annotation=\"org.opencontainers.image.url\"=\"https://www.paloaltonetworks.com/\" \\\n\t--image-annotation=\"org.opencontainers.image.documentation\"=\"https://docs.paloaltonetworks.com/\" \\\n\t--image-annotation=\"org.opencontainers.image.version\"=\"$(VERSION)\" \\\n\t--image-annotation=\"org.opencontainers.image.revision\"=\"$(GITCOMMIT)\"\n\n\ndefine ngts_helm_values_mutation_function\necho \"no mutations defined for this chart\"\nendef\n"
  },
  {
    "path": "make/ngts/02_mod.mk",
    "content": "# Makefile targets for NGTS Discovery Agent\n\n# The base OCI repository for all NGTS Discovery Agent artifacts\nNGTS_OCI_BASE ?= quay.io/jetstack\n\n# The OCI repository (without tag) for the NGTS Discovery Agent Docker image\n# Can be overridden when calling `make ngts-release` to push to a different repository.\nNGTS_IMAGE ?= $(NGTS_OCI_BASE)/discovery-agent\n\n# The OCI repository (without tag) for the NGTS Discovery Agent Helm chart\n# Can be overridden when calling `make ngts-release` to push to a different repository.\nNGTS_CHART ?= $(NGTS_OCI_BASE)/charts/discovery-agent\n\n# Used to output variables when running in GitHub Actions\nGITHUB_OUTPUT ?= /dev/stderr\n\n.PHONY: ngts-release\n## Publish all release artifacts (image + helm chart)\n## @category NGTS Discovery Agent\nngts-release: oci_ngts_image_digest_path := $(bin_dir)/scratch/image/oci-layout-ngts.digests\nngts-release: helm_digest_path := $(bin_dir)/scratch/helm/discovery-agent-$(helm_chart_version).digests\nngts-release:\n\t$(MAKE) oci-push-ngts helm-chart-oci-push \\\n\t\toci_ngts_image_name=\"$(NGTS_IMAGE)\" \\\n\t\thelm_image_name=\"$(NGTS_IMAGE)\" \\\n\t\thelm_image_tag=\"$(oci_ngts_image_tag)\" \\\n\t\thelm_chart_source_dir=deploy/charts/discovery-agent \\\n\t\thelm_chart_image_name=\"$(NGTS_CHART)\"\n\n\t@echo \"NGTS_IMAGE=$(NGTS_IMAGE)\" >> \"$(GITHUB_OUTPUT)\"\n\t@echo \"NGTS_IMAGE_TAG=$(oci_ngts_image_tag)\" >> \"$(GITHUB_OUTPUT)\"\n\t@echo \"NGTS_IMAGE_DIGEST=$$(head -1 $(oci_ngts_image_digest_path))\" >> \"$(GITHUB_OUTPUT)\"\n\t@echo \"NGTS_CHART=$(NGTS_CHART)\" >> \"$(GITHUB_OUTPUT)\"\n\t@echo \"NGTS_CHART_TAG=$(helm_chart_version)\" >> \"$(GITHUB_OUTPUT)\"\n\t@echo \"NGTS_CHART_DIGEST=$$(head -1 $(helm_digest_path))\" >> \"$(GITHUB_OUTPUT)\"\n\n\t@echo \"Release complete!\"\n\n.PHONY: ngts-test-e2e\n## Run a basic E2E test on a Kind cluster\n## See `hack/ngts/e2e.sh` for the full test script.\n## @category NGTS Discovery Agent\nngts-test-e2e: $(NEEDS_KIND) $(NEEDS_KUBECTL) $(NEEDS_HELM) $(NEEDS_YQ)\n\tPATH=\"$(bin_dir)/tools:${PATH}\" ./hack/ngts/test-e2e.sh\n\n.PHONY: ngts-verify\n## Verify the Helm chart\n## @category NGTS Discovery Agent\nngts-verify:\n\tINSTALL_OPTIONS=\"--set-string config.tsgID=1234123412 --set config.clusterName=foo\" $(MAKE) verify-helm-lint verify-helm-values verify-pod-security-standards verify-helm-kubeconform verify-helm-unittest \\\n\t\thelm_chart_source_dir=deploy/charts/discovery-agent \\\n\t\thelm_chart_image_name=$(NGTS_CHART)\n\nshared_verify_targets += ngts-verify\n\n.PHONY: ngts-generate\n## Generate Helm chart documentation and schema\n## @category NGTS Discovery Agent\nngts-generate:\n\t$(MAKE) generate-helm-docs generate-helm-schema \\\n\t\thelm_chart_source_dir=deploy/charts/discovery-agent\n\nshared_generate_targets += ngts-generate\n"
  },
  {
    "path": "make/test-unit.mk",
    "content": ".PHONY: test-unit\n## Unit tests\n## @category Testing\ntest-unit: | $(NEEDS_GO) $(NEEDS_GOTESTSUM) $(ARTIFACTS) $(NEEDS_ETCD) $(NEEDS_KUBE-APISERVER)\n\tKUBEBUILDER_ASSETS=$(CURDIR)/$(bin_dir)/tools \\\n\t$(GOTESTSUM) \\\n\t\t--junitfile=$(ARTIFACTS)/junit-go-e2e.xml \\\n\t\t-- \\\n\t\t-coverprofile=$(ARTIFACTS)/filtered.cov \\\n\t\t./... \\\n\t\t-- \\\n\t\t-ldflags $(go_preflight_ldflags)\n\n\t$(GO) tool cover -func=$(ARTIFACTS)/filtered.cov\n\t$(GO) tool cover -html=$(ARTIFACTS)/filtered.cov -o=$(ARTIFACTS)/filtered.html\n"
  },
  {
    "path": "pkg/agent/config.go",
    "content": "package agent\n\nimport (\n\t\"crypto/x509\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/url\"\n\t\"os\"\n\t\"regexp\"\n\t\"time\"\n\n\t\"github.com/go-logr/logr\"\n\t\"github.com/hashicorp/go-multierror\"\n\t\"github.com/jetstack/venafi-connection-lib/http_client\"\n\t\"github.com/spf13/cobra\"\n\t\"gopkg.in/yaml.v3\"\n\t\"k8s.io/client-go/rest\"\n\n\t\"github.com/jetstack/preflight/api\"\n\t\"github.com/jetstack/preflight/pkg/client\"\n\t\"github.com/jetstack/preflight/pkg/datagatherer\"\n\t\"github.com/jetstack/preflight/pkg/datagatherer/k8sdiscovery\"\n\t\"github.com/jetstack/preflight/pkg/datagatherer/k8sdynamic\"\n\t\"github.com/jetstack/preflight/pkg/datagatherer/local\"\n\t\"github.com/jetstack/preflight/pkg/datagatherer/oidc\"\n\t\"github.com/jetstack/preflight/pkg/kubeconfig\"\n\t\"github.com/jetstack/preflight/pkg/logs\"\n\t\"github.com/jetstack/preflight/pkg/version\"\n)\n\n// Config defines the YAML configuration file that you can pass using\n// `--config-file` or `-c`.\ntype Config struct {\n\t// Deprecated: Schedule doesn't do anything. Use `period` instead.\n\tSchedule string        `yaml:\"schedule\"`\n\tPeriod   time.Duration `yaml:\"period\"`\n\n\t// Deprecated: Use `server` instead.\n\tEndpoint Endpoint `yaml:\"endpoint\"`\n\n\t// Server is the base URL for the Preflight server. It defaults to\n\t// https://preflight.jetstack.io in Jetstack Secure OAuth and Jetstack\n\t// Secure API Token modes, and https://api.venafi.cloud in Venafi Cloud Key\n\t// Pair Service Account mode. It is ignored in Venafi Cloud VenafiConnection\n\t// mode and in MachineHub mode.\n\tServer string `yaml:\"server\"`\n\n\t// OrganizationID is only used in Jetstack Secure OAuth and Jetstack Secure\n\t// API Token modes.\n\tOrganizationID string `yaml:\"organization_id\"`\n\n\t// ClusterID is the cluster that the agent is scanning. Only used in Jetstack Secure modes.\n\tClusterID string `yaml:\"cluster_id\"`\n\t// ClusterName is the name of the Kubernetes cluster where the agent is running.\n\tClusterName string `yaml:\"cluster_name\"`\n\t// ClusterDescription is a short description of the Kubernetes cluster where the\n\t// agent is running.\n\tClusterDescription string `yaml:\"cluster_description\"`\n\t// ClaimableCerts controls whether discovered certs can be claimed by other tenants.\n\t// true = certs are left unassigned, available for any tenant to claim.\n\t// false (default) = certs are owned by this cluster's tenant.\n\tClaimableCerts bool               `yaml:\"claimable_certs\"`\n\tDataGatherers  []DataGatherer     `yaml:\"data-gatherers\"`\n\tVenafiCloud    *VenafiCloudConfig `yaml:\"venafi-cloud,omitempty\"`\n\n\t// For testing purposes.\n\tInputPath string `yaml:\"input-path\"`\n\t// For testing purposes.\n\tOutputPath string `yaml:\"output-path\"`\n\n\t// Skips annotation keys that match the given set of regular expressions.\n\t// Example: \".*someprivateannotation.*\".\n\tExcludeAnnotationKeysRegex []string `yaml:\"exclude-annotation-keys-regex\"`\n\t// Skips label keys that match the given set of regular expressions.\n\tExcludeLabelKeysRegex []string `yaml:\"exclude-label-keys-regex\"`\n}\n\ntype Endpoint struct {\n\tProtocol string `yaml:\"protocol\"`\n\tHost     string `yaml:\"host\"`\n\tPath     string `yaml:\"path\"`\n}\n\ntype DataGatherer struct {\n\tKind     string `yaml:\"kind\"`\n\tName     string `yaml:\"name\"`\n\tDataPath string `yaml:\"data_path\"`\n\tConfig   datagatherer.Config\n}\n\ntype VenafiCloudConfig struct {\n\t// Deprecated: UploaderID is ignored by the backend and is not needed.\n\t// UploaderID is the upload ID that will be used when creating a cluster\n\t// connection. This field is ignored by the backend and is often arbitrarily\n\t// set to \"no\".\n\tUploaderID string `yaml:\"uploader_id,omitempty\"`\n\n\t// UploadPath is the endpoint path for the upload API. Only used in Venafi\n\t// Cloud Key Pair Service Account mode.\n\tUploadPath string `yaml:\"upload_path,omitempty\"`\n}\n\ntype AgentCmdFlags struct {\n\t// ConfigFilePath (--config-file, -c) is the path to the agent configuration\n\t// YAML file.\n\tConfigFilePath string\n\n\t// Period (--period, -p) is the time waited between scans. It takes\n\t// precedence over the config field `period`.\n\tPeriod time.Duration\n\n\t// VenafiCloudMode (--venafi-cloud) turns on the Venafi Cloud Key Pair\n\t// Service Account mode. Must be used in conjunction with\n\t// --credentials-file.\n\tVenafiCloudMode bool\n\n\t// MachineHubMode configures the agent to send data to CyberArk Machine Hub.\n\tMachineHubMode bool\n\n\t// ClientID (--client-id) is the clientID in case of Venafi Cloud Key Pair\n\t// Service Account mode.\n\tClientID string\n\n\t// PrivateKeyPath (--private-key-path) is the path for the service account\n\t// private key in case of Venafi Cloud Key Pair Service Account mode.\n\tPrivateKeyPath string\n\n\t// CredentialsPath (--credentials-file, -k) lets you specify the location of\n\t// the credentials file. This is used for the Jetstack Secure OAuth and\n\t// Venafi Cloud Key Pair Service Account modes. In Venafi Cloud Key Pair\n\t// Service Account mode, you also need to pass --venafi-cloud.\n\tCredentialsPath string\n\n\t// OneShot (--one-shot) is used for testing purposes. The agent will run\n\t// once and exit. It is often used in conjunction with --output-path and/or\n\t// --input-path.\n\tOneShot bool\n\n\t// OutputPath (--output-path) is used for testing purposes. In conjunction\n\t// with --one-shot, it allows you to write the data readings to a file\n\t// instead uploading them to the Venafi Cloud API.\n\tOutputPath string\n\n\t// InputPath (--input-path) is used for testing purposes. In conjunction\n\t// with --one-shot, it allows you to push manually crafted data readings (in\n\t// JSON format) to the Venafi Cloud API without the need to connect to a\n\t// Kubernetes cluster. See the jscp-testing-cli's README for more info:\n\t// https://gitlab.com/venafi/vaas/applications/tls-protect-for-k8s/cloud-services/-/tree/master/jscp-testing-cli\n\tInputPath string\n\n\t// BackoffMaxTime (--backoff-max-time) is the maximum time for which data\n\t// gatherers will retry after a failure.\n\tBackoffMaxTime time.Duration\n\n\t// StrictMode (--strict) causes the agent to fail at the first attempt.\n\tStrictMode bool\n\n\t// APIToken (--api-token) allows you to use the Jetstack Secure API Token\n\t// mode. Defaults to the value of the env var API_TOKEN.\n\tAPIToken string\n\n\t// VenConnName (--venafi-connection) is the name of the VenafiConnection\n\t// resource to use. Using this flag will enable Venafi Connection mode.\n\tVenConnName string\n\n\t// VenConnNS (--venafi-connection-namespace) is the namespace of the\n\t// VenafiConnection resource to use. It is only useful when the\n\t// VenafiConnection isn't in the same namespace as the agent.\n\t//\n\t// May be left empty to use the same namespace as the agent.\n\tVenConnNS string\n\n\t// InstallNS (--install-namespace) is the namespace in which the agent is\n\t// running in. Only needed when running the agent outside of Kubernetes.\n\t//\n\t// May be left empty when running in Kubernetes. In Kubernetes, the\n\t// namespace is read from the environment variable `POD_NAMESPACE`.\n\tInstallNS string\n\n\t// Profiling (--enable-pprof) enables the pprof server.\n\tProfiling bool\n\n\t// Prometheus (--enable-metrics) enables the Prometheus metrics server.\n\tPrometheus bool\n\n\t// NGTSMode (--ngts) turns on the NGTS mode. The agent will authenticate\n\t// using key pair authentication and send data to NGTS endpoints.\n\tNGTSMode bool\n\n\t// TSGID (--tsg-id) is the TSG (Tenant Service Group) ID for NGTS mode.\n\tTSGID string\n\n\t// NGTSServerURL (--ngts-server-url) is a hidden flag for developers to\n\t// override the NGTS server URL for testing purposes.\n\tNGTSServerURL string\n}\n\nfunc InitAgentCmdFlags(c *cobra.Command, cfg *AgentCmdFlags) {\n\tc.PersistentFlags().StringVarP(\n\t\t&cfg.ConfigFilePath,\n\t\t\"agent-config-file\",\n\t\t\"c\",\n\t\t\"./agent.yaml\",\n\t\t\"Config file location, default is `agent.yaml` in the current working directory.\",\n\t)\n\tc.PersistentFlags().DurationVarP(\n\t\t&cfg.Period,\n\t\t\"period\",\n\t\t\"p\",\n\t\t0,\n\t\t\"Override time between scans in the configuration file (given as XhYmZs).\",\n\t)\n\tc.PersistentFlags().StringVarP(\n\t\t&cfg.CredentialsPath,\n\t\t\"credentials-file\",\n\t\t\"k\",\n\t\t\"\",\n\t\tfmt.Sprintf(\"Location of the credentials file. For the %s and %s modes.\", JetstackSecureOAuth, VenafiCloudKeypair),\n\t)\n\tc.PersistentFlags().BoolVarP(\n\t\t&cfg.VenafiCloudMode,\n\t\t\"venafi-cloud\",\n\t\t\"\",\n\t\tfalse,\n\t\tfmt.Sprintf(\"Turns on the %s mode. The flag --credentials-file must also be passed.\", JetstackSecureOAuth),\n\t)\n\tif err := c.PersistentFlags().MarkHidden(\"venafi-cloud\"); err != nil {\n\t\tpanic(err)\n\t}\n\tc.PersistentFlags().StringVarP(\n\t\t&cfg.ClientID,\n\t\t\"client-id\",\n\t\t\"\",\n\t\t\"\",\n\t\tfmt.Sprintf(\"Turns on the %s mode. If you use this flag you don't need to use --venafi-cloud \"+\n\t\t\t\"as it will assume you are authenticating with Venafi Cloud. Using this removes the need to use a \"+\n\t\t\t\"credentials file.\", VenafiCloudKeypair),\n\t)\n\tc.PersistentFlags().StringVarP(\n\t\t&cfg.PrivateKeyPath,\n\t\t\"private-key-path\",\n\t\t\"\",\n\t\t\"\",\n\t\t\"To be used in conjunction with --client-id. The path to the private key file for the service account.\",\n\t)\n\tc.PersistentFlags().BoolVarP(\n\t\t&cfg.OneShot,\n\t\t\"one-shot\",\n\t\t\"\",\n\t\tfalse,\n\t\t\"For testing purposes. The agent will run once and exit. It is often used in conjunction with --output-path and/or --input-path.\",\n\t)\n\tc.PersistentFlags().StringVarP(\n\t\t&cfg.OutputPath,\n\t\t\"output-path\",\n\t\t\"\",\n\t\t\"\",\n\t\t\"For testing purposes. In conjunction with --one-shot, it allows you to write the data readings to a file instead of uploading to the server.\",\n\t)\n\tc.PersistentFlags().StringVarP(\n\t\t&cfg.InputPath,\n\t\t\"input-path\",\n\t\t\"\",\n\t\t\"\",\n\t\t\"For testing purposes. In conjunction with --one-shot, it allows you to push manually crafted data readings (in JSON format) to the Venafi Cloud API without the need to connect to a Kubernetes cluster.\",\n\t)\n\tc.PersistentFlags().DurationVarP(\n\t\t&cfg.BackoffMaxTime,\n\t\t\"backoff-max-time\",\n\t\t\"\",\n\t\t10*time.Minute,\n\t\t\"Max time for retrying failed data gatherers (given as XhYmZs).\",\n\t)\n\tc.PersistentFlags().BoolVarP(\n\t\t&cfg.StrictMode,\n\t\t\"strict\",\n\t\t\"\",\n\t\tfalse,\n\t\t\"Runs agent in strict mode. No retry attempts will be made for a missing data gatherer's data.\",\n\t)\n\tc.PersistentFlags().StringVar(\n\t\t&cfg.APIToken,\n\t\t\"api-token\",\n\t\tos.Getenv(\"API_TOKEN\"),\n\t\t\"Turns on the \"+string(JetstackSecureAPIToken)+\" mode. Defaults to the value of the env var API_TOKEN.\",\n\t)\n\tc.PersistentFlags().StringVar(\n\t\t&cfg.VenConnName,\n\t\t\"venafi-connection\",\n\t\t\"\",\n\t\t\"Turns on the \"+string(VenafiCloudVenafiConnection)+\" mode. \"+\n\t\t\t\"This flag configures the name of the VenafiConnection to be used.\",\n\t)\n\tc.PersistentFlags().StringVar(\n\t\t&cfg.VenConnNS,\n\t\t\"venafi-connection-namespace\",\n\t\t\"\",\n\t\t\"Namespace of the VenafiConnection to be used. It is only useful when the \"+\n\t\t\t\"VenafiConnection isn't in the same namespace as the agent. The field `allowReferencesFrom` \"+\n\t\t\t\"must be present on the cross-namespace VenafiConnection for the agent to use it.\",\n\t)\n\tc.PersistentFlags().StringVar(\n\t\t&cfg.InstallNS,\n\t\t\"install-namespace\",\n\t\t\"\",\n\t\t\"For testing purposes. Namespace in which the agent is running. \"+\n\t\t\t\"Only needed when running the agent outside of Kubernetes.\",\n\t)\n\tc.PersistentFlags().BoolVarP(\n\t\t&cfg.Profiling,\n\t\t\"enable-pprof\",\n\t\t\"\",\n\t\tfalse,\n\t\t\"Enables the pprof profiling endpoints on the agent server (port: 8081).\",\n\t)\n\tc.PersistentFlags().BoolVarP(\n\t\t&cfg.Prometheus,\n\t\t\"enable-metrics\",\n\t\t\"\",\n\t\tfalse,\n\t\t\"Enables Prometheus metrics server on the agent (port: 8081).\",\n\t)\n\n\tvar dummy bool\n\tc.PersistentFlags().BoolVar(\n\t\t&dummy,\n\t\t\"disable-compression\",\n\t\tfalse,\n\t\t\"Deprecated. No longer has an effect.\",\n\t)\n\tif err := c.PersistentFlags().MarkDeprecated(\"disable-compression\", \"no longer has an effect\"); err != nil {\n\t\tpanic(err)\n\t}\n\n\t// This is a hidden feature flag we use to build the \"Machine Hub\" feature\n\t// gradually without impacting customers. Once the feature is GA, we will\n\t// turn this flag \"on\" by default.\n\tc.PersistentFlags().BoolVar(\n\t\t&cfg.MachineHubMode,\n\t\t\"machine-hub\",\n\t\tfalse,\n\t\t\"Enables the MachineHub mode. The agent will push data to CyberArk MachineHub.\",\n\t)\n\tif err := c.PersistentFlags().MarkHidden(\"machine-hub\"); err != nil {\n\t\tpanic(err)\n\t}\n\n\tc.PersistentFlags().BoolVar(\n\t\t&cfg.NGTSMode,\n\t\t\"ngts\",\n\t\tfalse,\n\t\t\"Enables NGTS mode. The agent will authenticate using key pair authentication and send data to NGTS endpoints. \"+\n\t\t\t\"Must be used in conjunction with --tsg-id and --private-key-path. --client-id is optional if provided in the credentials secret.\",\n\t)\n\tc.PersistentFlags().StringVar(\n\t\t&cfg.TSGID,\n\t\t\"tsg-id\",\n\t\t\"\",\n\t\t\"The TSG (Tenant Service Group) ID for NGTS mode. Required when using --ngts.\",\n\t)\n\n\tngtsServerURLFlag := \"ngts-server-url\"\n\n\tc.PersistentFlags().StringVar(\n\t\t&cfg.NGTSServerURL,\n\t\tngtsServerURLFlag,\n\t\t\"\",\n\t\t\"Override the NGTS server URL for testing purposes. This flag is intended for agent development and should not need to be set.\",\n\t)\n\n\t// ngts-server-url is intended only for developers, so hide it from help\n\tif err := c.PersistentFlags().MarkHidden(ngtsServerURLFlag); err != nil {\n\t\tpanic(err)\n\t}\n\n}\n\n// OutputMode controls how the collected data is published.\n// Only one OutputMode may be provided.\ntype OutputMode string\n\nconst (\n\tJetstackSecureOAuth         OutputMode = \"Jetstack Secure OAuth\"\n\tJetstackSecureAPIToken      OutputMode = \"Jetstack Secure API Token\"\n\tVenafiCloudKeypair          OutputMode = \"Venafi Cloud Key Pair Service Account\"\n\tVenafiCloudVenafiConnection OutputMode = \"Venafi Cloud VenafiConnection\"\n\tLocalFile                   OutputMode = \"Local File\"\n\tMachineHub                  OutputMode = \"MachineHub\"\n\tNGTS                        OutputMode = \"NGTS\"\n)\n\n// The command-line flags and the config file and some environment variables are\n// combined into this struct by ValidateAndCombineConfig.\ntype CombinedConfig struct {\n\tDataGatherers  []DataGatherer\n\tPeriod         time.Duration\n\tBackoffMaxTime time.Duration\n\tInstallNS      string\n\tStrictMode     bool\n\tOneShot        bool\n\n\tOutputMode OutputMode\n\n\t// Only used in JetstackSecure modes.\n\tClusterID string\n\n\t// Used by JetstackSecureOAuth, JetstackSecureAPIToken, and\n\t// VenafiCloudKeypair. Ignored in VenafiCloudVenafiConnection mode.\n\tServer string\n\n\t// JetstackSecureOAuth and JetstackSecureAPIToken modes only.\n\tOrganizationID string\n\tEndpointPath   string // Deprecated.\n\n\t// VenafiCloudKeypair mode only.\n\tUploadPath string\n\n\t// ClusterName is the name of the Kubernetes cluster where the agent is\n\t// running.\n\tClusterName string\n\n\t// ClusterDescription is a short description of the Kubernetes cluster where\n\t// the agent is running.\n\tClusterDescription string\n\n\t// ClaimableCerts controls whether discovered certs can be claimed by other tenants.\n\t// true = certs are left unassigned, available for any tenant to claim.\n\t// false (default) = certs are owned by this cluster's tenant.\n\tClaimableCerts bool\n\n\t// VenafiCloudVenafiConnection mode only.\n\tVenConnName string\n\tVenConnNS   string\n\n\t// VenafiCloudKeypair and VenafiCloudVenafiConnection modes only.\n\tExcludeAnnotationKeysRegex []*regexp.Regexp\n\tExcludeLabelKeysRegex      []*regexp.Regexp\n\n\t// NGTS mode only.\n\tTSGID         string\n\tNGTSServerURL string\n\n\t// Only used for testing purposes.\n\tOutputPath string\n\tInputPath  string\n}\n\n// ValidateAndCombineConfig combines and validates the input configuration with\n// the flags passed to the agent and returns the final configuration as well as\n// the Venafi client to be used to upload data. Does not do any network call.\n// The logger can be changed for testing purposes. You do not need to call\n// ValidateDataGatherers as ValidateAndCombineConfig already does that.\n//\n// The error returned may be a multierror.Error. Use multierror.Prefix(err,\n// \"context:\") rather than fmt.Errorf(\"context: %w\", err) when wrapping the\n// error.\nfunc ValidateAndCombineConfig(log logr.Logger, cfg Config, flags AgentCmdFlags) (CombinedConfig, client.Client, error) {\n\tres := CombinedConfig{}\n\n\t{\n\t\tvar (\n\t\t\tmode          OutputMode\n\t\t\treason        string\n\t\t\tkeysAndValues []any\n\t\t)\n\t\tswitch {\n\t\tcase flags.NGTSMode:\n\t\t\tmode = NGTS\n\t\t\treason = \"--ngts was specified\"\n\t\t\tkeysAndValues = []any{\"ngts\", true}\n\t\tcase flags.VenafiCloudMode && flags.CredentialsPath != \"\":\n\t\t\tmode = VenafiCloudKeypair\n\t\t\treason = \"--venafi-cloud and --credentials-path were specified\"\n\t\t\tkeysAndValues = []any{\"credentialsPath\", flags.CredentialsPath}\n\t\tcase flags.ClientID != \"\" || flags.PrivateKeyPath != \"\":\n\t\t\tif flags.PrivateKeyPath == \"\" {\n\t\t\t\treturn CombinedConfig{}, nil, fmt.Errorf(\"if --client-id is specified, --private-key-path must also be specified\")\n\t\t\t}\n\t\t\tif flags.ClientID == \"\" {\n\t\t\t\treturn CombinedConfig{}, nil, fmt.Errorf(\"--private-key-path is specified, --client-id must also be specified\")\n\t\t\t}\n\n\t\t\tmode = VenafiCloudKeypair\n\t\t\treason = \"--client-id and --private-key-path were specified\"\n\t\t\tkeysAndValues = []any{\"clientID\", flags.ClientID, \"privateKeyPath\", flags.PrivateKeyPath}\n\t\tcase flags.VenConnName != \"\":\n\t\t\tmode = VenafiCloudVenafiConnection\n\t\t\treason = \"--venafi-connection was specified\"\n\t\t\tkeysAndValues = []any{\"venConnName\", flags.VenConnName}\n\t\tcase flags.APIToken != \"\":\n\t\t\tmode = JetstackSecureAPIToken\n\t\t\treason = \"--api-token was specified\"\n\t\tcase !flags.VenafiCloudMode && flags.CredentialsPath != \"\":\n\t\t\tmode = JetstackSecureOAuth\n\t\t\treason = \"--credentials-file was specified without --venafi-cloud\"\n\t\tcase flags.MachineHubMode:\n\t\t\tmode = MachineHub\n\t\t\treason = \"--machine-hub was specified\"\n\t\tcase flags.OutputPath != \"\":\n\t\t\tmode = LocalFile\n\t\t\treason = \"--output-path was specified\"\n\t\tcase cfg.OutputPath != \"\":\n\t\t\tmode = LocalFile\n\t\t\treason = \"output-path was specified in the config file\"\n\t\tdefault:\n\t\t\treturn CombinedConfig{}, nil, fmt.Errorf(\"no output mode specified. \" +\n\t\t\t\t\"To enable one of the output modes, you can:\\n\" +\n\t\t\t\t\" - Use --ngts with --tsg-id and --private-key-path to use the \" + string(NGTS) + \" mode (--client-id is optional if provided in the credentials secret).\\n\" +\n\t\t\t\t\" - Use (--venafi-cloud with --credentials-file) or (--client-id with --private-key-path) to use the \" + string(VenafiCloudKeypair) + \" mode.\\n\" +\n\t\t\t\t\" - Use --venafi-connection for the \" + string(VenafiCloudVenafiConnection) + \" mode.\\n\" +\n\t\t\t\t\" - Use --credentials-file alone if you want to use the \" + string(JetstackSecureOAuth) + \" mode.\\n\" +\n\t\t\t\t\" - Use --api-token if you want to use the \" + string(JetstackSecureAPIToken) + \" mode.\\n\" +\n\t\t\t\t\" - Use --machine-hub if you want to use the \" + string(MachineHub) + \" mode.\\n\" +\n\t\t\t\t\" - Use --output-path or output-path in the config file for \" + string(LocalFile) + \" mode.\")\n\t\t}\n\n\t\tkeysAndValues = append(keysAndValues, \"mode\", mode, \"reason\", reason)\n\t\tlog.V(logs.Debug).Info(\"Output mode selected\", keysAndValues...)\n\t\tres.OutputMode = mode\n\t}\n\n\tvar errs error\n\n\t// Validation of NGTS mode requirements.\n\tif res.OutputMode == NGTS {\n\t\tif flags.TSGID == \"\" {\n\t\t\terrs = multierror.Append(errs, fmt.Errorf(\"--tsg-id is required when using --ngts\"))\n\t\t}\n\t\tif flags.PrivateKeyPath == \"\" {\n\t\t\terrs = multierror.Append(errs, fmt.Errorf(\"--private-key-path is required when using --ngts\"))\n\t\t}\n\n\t\t// Error if MachineHub mode is also enabled\n\t\tif flags.MachineHubMode {\n\t\t\terrs = multierror.Append(errs, fmt.Errorf(\"--machine-hub cannot be used with --ngts. These are mutually exclusive modes.\"))\n\t\t}\n\n\t\t// Error if VenafiConnection mode flags are used\n\t\tif flags.VenConnName != \"\" {\n\t\t\terrs = multierror.Append(errs, fmt.Errorf(\"--venafi-connection cannot be used with --ngts. Use --client-id and --private-key-path instead.\"))\n\t\t}\n\n\t\t// Error if Jetstack Secure OAuth mode flags are used\n\t\tif !flags.VenafiCloudMode && flags.CredentialsPath != \"\" {\n\t\t\terrs = multierror.Append(errs, fmt.Errorf(\"--credentials-file (for Jetstack Secure OAuth) cannot be used with --ngts. Use --client-id and --private-key-path instead.\"))\n\t\t}\n\n\t\t// Error if API Token mode is used\n\t\tif flags.APIToken != \"\" {\n\t\t\terrs = multierror.Append(errs, fmt.Errorf(\"--api-token cannot be used with --ngts. Use --client-id and --private-key-path instead.\"))\n\t\t}\n\n\t\t// Error if --venafi-cloud is used with --ngts\n\t\tif flags.VenafiCloudMode {\n\t\t\terrs = multierror.Append(errs, fmt.Errorf(\"--venafi-cloud cannot be used with --ngts. These are different deployment targets.\"))\n\t\t}\n\n\t\t// Error if organization_id or cluster_id are set in config (these are for Jetstack Secure / CM-SaaS)\n\t\tif cfg.OrganizationID != \"\" {\n\t\t\terrs = multierror.Append(errs, fmt.Errorf(\"organization_id in config file is not supported in NGTS mode. This field is only for Jetstack Secure.\"))\n\t\t}\n\n\t\tif cfg.ClusterID != \"\" {\n\t\t\terrs = multierror.Append(errs, fmt.Errorf(\"cluster_id in config file is not supported in NGTS mode. Use cluster_name instead.\"))\n\t\t}\n\n\t\tres.TSGID = flags.TSGID\n\t\tres.NGTSServerURL = flags.NGTSServerURL\n\t}\n\n\t// Validation and defaulting of `server` and the deprecated `endpoint.path`.\n\t{\n\t\t// Only relevant if using TLSPK backends\n\t\thasEndpointField := cfg.Endpoint.Host != \"\" && cfg.Endpoint.Path != \"\"\n\t\thasServerField := cfg.Server != \"\"\n\t\tvar server string\n\t\tvar endpointPath string // Deprecated. Only used when the `endpoint` field is set.\n\t\tswitch {\n\t\tcase hasServerField && !hasEndpointField:\n\t\t\tserver = cfg.Server\n\t\tcase hasServerField && hasEndpointField:\n\t\t\t// The `server` field takes precedence over the deprecated\n\t\t\t// `endpoint` field.\n\t\t\tlog.Info(\"The `server` and `endpoint` fields are both set in the config; using the `server` field.\")\n\t\t\tserver = cfg.Server\n\t\tcase !hasServerField && hasEndpointField:\n\t\t\tlog.Info(\"Using deprecated Endpoint configuration. User Server instead.\")\n\t\t\tif cfg.Endpoint.Protocol == \"\" && cfg.Server == \"\" {\n\t\t\t\tcfg.Endpoint.Protocol = \"http\"\n\t\t\t}\n\t\t\tserver = fmt.Sprintf(\"%s://%s\", cfg.Endpoint.Protocol, cfg.Endpoint.Host)\n\t\t\tendpointPath = cfg.Endpoint.Path\n\t\tcase !hasServerField && !hasEndpointField:\n\t\t\tserver = \"https://preflight.jetstack.io\"\n\t\t\tif res.OutputMode == VenafiCloudKeypair {\n\t\t\t\t// The VenafiCloudVenafiConnection mode doesn't need a server.\n\t\t\t\tserver = client.VenafiCloudProdURL\n\t\t\t}\n\t\t\tif res.OutputMode == NGTS {\n\t\t\t\t// In NGTS mode, use NGTSServerURL if provided, otherwise we'll use a default\n\t\t\t\t// (which will be determined when creating the client)\n\t\t\t\tserver = res.NGTSServerURL\n\t\t\t}\n\t\t}\n\n\t\t// In NGTS mode: ignore the config-file server field entirely; use only\n\t\t// --ngts-server-url when provided (default URL is derived from TSG ID\n\t\t// at client construction time).\n\t\tif res.OutputMode == NGTS {\n\t\t\tif res.NGTSServerURL != \"\" {\n\t\t\t\tlog.Info(\"Using custom NGTS server URL (for testing)\", \"url\", res.NGTSServerURL)\n\t\t\t}\n\n\t\t\t// config-file server field has no impact in NGTS mode so warn about it\n\t\t\tif cfg.Server != \"\" {\n\t\t\t\tlog.Info(fmt.Sprintf(\"ignoring the server field in the config file. In %s mode, use --ngts-server-url for testing.\", NGTS))\n\t\t\t}\n\n\t\t\tserver = res.NGTSServerURL\n\t\t}\n\n\t\turl, urlErr := url.Parse(server)\n\t\tif server != \"\" && (urlErr != nil || url.Hostname() == \"\") {\n\t\t\terrs = multierror.Append(errs, fmt.Errorf(\"server %q is not a valid URL\", server))\n\t\t}\n\n\t\tif res.OutputMode == VenafiCloudVenafiConnection && server != \"\" {\n\t\t\tlog.Info(fmt.Sprintf(\"ignoring the server field specified in the config file. In %s mode, this field is not needed.\", VenafiCloudVenafiConnection))\n\t\t\tserver = \"\"\n\t\t}\n\n\t\tres.Server = server\n\t\tres.EndpointPath = endpointPath\n\t}\n\n\t// Validation of `venafi-cloud.upload_path`.\n\t{\n\t\tvar uploadPath string\n\t\tswitch res.OutputMode { // nolint:exhaustive\n\t\tcase VenafiCloudKeypair:\n\t\t\tif cfg.VenafiCloud == nil || cfg.VenafiCloud.UploadPath == \"\" {\n\t\t\t\terrs = multierror.Append(errs, fmt.Errorf(\"the venafi-cloud.upload_path field is required when using the %s mode\", res.OutputMode))\n\t\t\t\tbreak // Skip to the end of the switch statement.\n\t\t\t}\n\t\t\t_, urlErr := url.Parse(cfg.VenafiCloud.UploadPath)\n\t\t\tif urlErr != nil {\n\t\t\t\terrs = multierror.Append(errs, fmt.Errorf(\"upload_path is not a valid URL\"))\n\t\t\t\tbreak // Skip to the end of the switch statement.\n\t\t\t}\n\n\t\t\tuploadPath = cfg.VenafiCloud.UploadPath\n\t\tcase VenafiCloudVenafiConnection:\n\t\t\t// The venafi-cloud.upload_path was initially meant to let users\n\t\t\t// configure HTTP proxies, but it has never been used since HTTP\n\t\t\t// proxies don't rewrite paths. Thus, we've disabled the ability to\n\t\t\t// change this value with the new --venafi-connection flag, and this\n\t\t\t// field is simply ignored.\n\t\t\tif cfg.VenafiCloud != nil && cfg.VenafiCloud.UploadPath != \"\" {\n\t\t\t\tlog.Info(fmt.Sprintf(`ignoring the venafi-cloud.upload_path field in the config file. In %s mode, this field is not needed.`, res.OutputMode))\n\t\t\t}\n\t\t\tuploadPath = \"\"\n\t\tcase NGTS:\n\t\t\t// NGTS mode doesn't use the upload_path field\n\t\t\tif cfg.VenafiCloud != nil && cfg.VenafiCloud.UploadPath != \"\" {\n\t\t\t\tlog.Info(fmt.Sprintf(`ignoring the venafi-cloud.upload_path field in the config file. In %s mode, this field is not needed.`, res.OutputMode))\n\t\t\t}\n\t\t\tuploadPath = \"\"\n\t\t}\n\t\tres.UploadPath = uploadPath\n\t}\n\n\t// Validation of `uploader_id`.\n\t//\n\t// We found that `venafi-cloud.uploader_id` doesn't do anything in the\n\t// backend. Since the backend requires it for historical reasons (but cannot\n\t// be empty), we just ignore whatever the user has set in the config file,\n\t// and set it to an arbitrary value in the client since it doesn't matter.\n\t//\n\t// TODO(mael): Remove the arbitrary `/no` path parameter from the Agent once\n\t// https://venafi.atlassian.net/browse/VC-35385 is done.\n\t{\n\t\tif cfg.VenafiCloud != nil && cfg.VenafiCloud.UploaderID != \"\" {\n\t\t\tlog.Info(fmt.Sprintf(`ignoring the venafi-cloud.uploader_id field in the config file. This field is not needed in %s mode.`, res.OutputMode))\n\t\t}\n\t}\n\n\t// Validation of `cluster_name`, `cluster_id` and `organization_id`.\n\t{\n\t\tvar clusterName string    // Required by venafi cloud modes. Optional for MachineHub mode.\n\t\tvar clusterID string      // Required by the old jetstack-secure mode deprecated for venafi cloud modes.\n\t\tvar organizationID string // Only used by the old jetstack-secure mode.\n\t\tswitch res.OutputMode {   // nolint:exhaustive\n\t\tcase NGTS:\n\t\t\t// NGTS mode requires cluster_name\n\t\t\tif cfg.ClusterName == \"\" {\n\t\t\t\terrs = multierror.Append(errs, fmt.Errorf(\"cluster_name is required in %s mode\", res.OutputMode))\n\t\t\t}\n\t\t\tclusterName = cfg.ClusterName\n\t\t\t// cluster_id and organization_id were already validated to not be present in NGTS mode\n\t\tcase VenafiCloudKeypair, VenafiCloudVenafiConnection:\n\t\t\t// For backwards compatibility, use the agent config's `cluster_id` as\n\t\t\t// ClusterName if `cluster_name` is not set.\n\t\t\tif cfg.ClusterName == \"\" && cfg.ClusterID == \"\" {\n\t\t\t\terrs = multierror.Append(errs, fmt.Errorf(\"cluster_name or cluster_id is required in %s mode\", res.OutputMode))\n\t\t\t}\n\t\t\tif cfg.ClusterName != \"\" && cfg.ClusterID != \"\" {\n\t\t\t\tlog.Info(fmt.Sprintf(`Ignoring the cluster_id field in the config file. This field is not needed in %s mode.`, res.OutputMode))\n\t\t\t}\n\t\t\tclusterName = cfg.ClusterName\n\t\t\tif clusterName == \"\" {\n\t\t\t\tlog.Info(\"Using cluster_id as cluster_name for backwards compatibility\", \"clusterID\", cfg.ClusterID)\n\t\t\t\tclusterName = cfg.ClusterID\n\t\t\t}\n\t\t\tif cfg.OrganizationID != \"\" {\n\t\t\t\tlog.Info(fmt.Sprintf(`Ignoring the organization_id field in the config file. This field is not needed in %s mode.`, res.OutputMode))\n\t\t\t}\n\t\tcase JetstackSecureOAuth, JetstackSecureAPIToken:\n\t\t\tif cfg.OrganizationID == \"\" {\n\t\t\t\terrs = multierror.Append(errs, fmt.Errorf(\"organization_id is required\"))\n\t\t\t}\n\t\t\tif cfg.ClusterID == \"\" {\n\t\t\t\terrs = multierror.Append(errs, fmt.Errorf(\"cluster_id is required\"))\n\t\t\t}\n\t\t\torganizationID = cfg.OrganizationID\n\t\t\tclusterID = cfg.ClusterID\n\t\tcase MachineHub:\n\t\t\tclusterName = cfg.ClusterName\n\t\t\tif clusterName == \"\" {\n\t\t\t\tif arkUsername, found := os.LookupEnv(\"ARK_USERNAME\"); found {\n\t\t\t\t\tlog.Info(\"Using ARK_USERNAME environment variable as cluster name\", \"clusterName\", arkUsername)\n\t\t\t\t\tclusterName = arkUsername\n\t\t\t\t}\n\t\t\t}\n\t\t\tif cfg.OrganizationID != \"\" {\n\t\t\t\tlog.Info(fmt.Sprintf(`Ignoring the organization_id field in the config file. This field is not needed in %s mode.`, res.OutputMode))\n\t\t\t}\n\t\t\tif cfg.ClusterID != \"\" {\n\t\t\t\tlog.Info(fmt.Sprintf(`Ignoring the cluster_id field in the config file. This field is not needed in %s mode.`, res.OutputMode))\n\t\t\t}\n\t\t}\n\t\tres.OrganizationID = organizationID\n\t\tres.ClusterID = clusterID\n\t\tres.ClusterName = clusterName\n\t\tres.ClusterDescription = cfg.ClusterDescription\n\t\tres.ClaimableCerts = cfg.ClaimableCerts\n\t}\n\n\t// Validation of `data-gatherers`.\n\t{\n\t\tif dgErr := ValidateDataGatherers(cfg.DataGatherers); dgErr != nil {\n\t\t\terrs = multierror.Append(errs, dgErr)\n\t\t}\n\t\tres.DataGatherers = cfg.DataGatherers\n\t}\n\n\t// Validation of --period, -p, and the `period` field, as well as\n\t// --backoff-max-time, --one-shot, and --strict. The flag --period/-p takes\n\t// precedence over the config `period`.\n\t{\n\t\tvar period time.Duration\n\t\tswitch {\n\t\tcase flags.OneShot:\n\t\t\t// OneShot mode doesn't need a period, skipping validation.\n\t\tcase flags.Period == 0 && cfg.Period == 0:\n\t\t\terrs = multierror.Append(errs, fmt.Errorf(\"period must be set using --period or -p, or using the 'period' field in the config file\"))\n\t\tcase flags.Period == 0 && cfg.Period > 0:\n\t\t\tlog.Info(\"Using period from config\", \"period\", cfg.Period)\n\t\t\tperiod = cfg.Period\n\t\tcase flags.Period > 0 && cfg.Period == 0:\n\t\t\tperiod = flags.Period\n\t\tcase flags.Period > 0 && cfg.Period > 0:\n\t\t\t// The flag takes precedence.\n\t\t\tlog.Info(\"Both the 'period' field and --period are set. Using the value provided with --period.\")\n\t\t\tperiod = flags.Period\n\t\t}\n\t\tres.Period = period\n\t\tres.OneShot = flags.OneShot\n\t\tres.BackoffMaxTime = flags.BackoffMaxTime\n\t\tres.StrictMode = flags.StrictMode\n\t}\n\n\t// Validation of --install-namespace.\n\t{\n\t\tinstallNS := flags.InstallNS\n\t\tif installNS == \"\" {\n\t\t\tvar err error\n\t\t\tinstallNS, err = getInClusterNamespace()\n\t\t\tif err != nil {\n\t\t\t\tif res.OutputMode == VenafiCloudVenafiConnection {\n\t\t\t\t\terrs = multierror.Append(errs, fmt.Errorf(\"could not guess which namespace the agent is running in: %w\", err))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tres.InstallNS = installNS\n\t}\n\n\t// Validation of --venafi-connection and --venafi-connection-namespace.\n\tif res.OutputMode == VenafiCloudVenafiConnection {\n\t\tres.VenConnName = flags.VenConnName\n\t\tvenConnNS := flags.VenConnNS\n\t\tif flags.VenConnNS == \"\" {\n\t\t\tvenConnNS = res.InstallNS\n\t\t}\n\t\tres.VenConnNS = venConnNS\n\t}\n\n\t// Validation of --output-path, --input-path, `output-path`, and\n\t// `input-path`. The flags --output-path and --input-path take precedence.\n\t{\n\t\tres.InputPath = cfg.InputPath\n\t\tres.OutputPath = cfg.OutputPath\n\t\tif flags.OutputPath != \"\" {\n\t\t\tres.OutputPath = flags.OutputPath\n\t\t}\n\t\tif flags.InputPath != \"\" {\n\t\t\tres.InputPath = flags.InputPath\n\t\t}\n\t}\n\n\t// Validation of the config fields exclude_annotation_keys_regex and\n\t// exclude_label_keys_regex.\n\t{\n\t\tfor i, regex := range cfg.ExcludeAnnotationKeysRegex {\n\t\t\tr, err := regexp.Compile(regex)\n\t\t\tif err != nil {\n\t\t\t\terrs = multierror.Append(errs, fmt.Errorf(\"invalid exclude_annotation_keys_regex[%d]: %w\", i, err))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tres.ExcludeAnnotationKeysRegex = append(res.ExcludeAnnotationKeysRegex, r)\n\t\t}\n\t\tfor i, regex := range cfg.ExcludeLabelKeysRegex {\n\t\t\tr, err := regexp.Compile(regex)\n\t\t\tif err != nil {\n\t\t\t\terrs = multierror.Append(errs, fmt.Errorf(\"invalid exclude_label_keys_regex[%d]: %w\", i, err))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tres.ExcludeLabelKeysRegex = append(res.ExcludeLabelKeysRegex, r)\n\t\t}\n\t}\n\n\tif errs != nil {\n\t\treturn CombinedConfig{}, nil, errs\n\t}\n\n\toutputClient, err := validateCredsAndCreateClient(log, flags.CredentialsPath, flags.ClientID, flags.PrivateKeyPath, flags.APIToken, res)\n\tif err != nil {\n\t\treturn CombinedConfig{}, nil, multierror.Prefix(err, \"validating creds:\")\n\t}\n\n\treturn res, outputClient, nil\n}\n\n// Validation of --credentials-file/-k, --client-id, and --private-key-path,\n// --api-token, and creation of the client.\n//\n// The error returned may be a multierror.Error. Use multierror.Prefix(err,\n// \"context:\") rather than fmt.Errorf(\"context: %w\", err) when wrapping the\n// error.\nfunc validateCredsAndCreateClient(log logr.Logger, flagCredentialsPath, flagClientID, flagPrivateKeyPath, flagAPIToken string, cfg CombinedConfig) (client.Client, error) {\n\tvar errs error\n\n\tvar outputClient client.Client\n\tmetadata := &api.AgentMetadata{Version: version.PreflightVersion, ClusterID: cfg.ClusterID}\n\tswitch cfg.OutputMode {\n\tcase JetstackSecureOAuth:\n\t\t// Note that there are no command line flags to configure the\n\t\t// JetstackSecureOAuth mode.\n\t\tcredsBytes, err := readCredentialsFile(flagCredentialsPath)\n\t\tif err != nil {\n\t\t\terrs = multierror.Append(errs, multierror.Prefix(err, \"credentials file:\"))\n\t\t\tbreak // Don't continue with parsing if could not load the file.\n\t\t}\n\n\t\tcreds, err := client.ParseOAuthCredentials(credsBytes)\n\t\tif err != nil {\n\t\t\terrs = multierror.Append(errs, multierror.Prefix(err, \"credentials file:\"))\n\t\t\tbreak // Don't continue with the client if credentials file invalid.\n\t\t}\n\n\t\toutputClient, err = client.NewOAuthClient(metadata, creds, cfg.Server)\n\t\tif err != nil {\n\t\t\terrs = multierror.Append(errs, err)\n\t\t}\n\tcase VenafiCloudKeypair:\n\t\tvar creds *client.VenafiSvcAccountCredentials\n\n\t\tif flagClientID != \"\" && flagCredentialsPath != \"\" {\n\t\t\terrs = multierror.Append(errs, fmt.Errorf(\"--client-id and --credentials-file cannot be used simultaneously\"))\n\t\t\tbreak\n\t\t}\n\t\tif flagPrivateKeyPath != \"\" && flagCredentialsPath != \"\" {\n\t\t\terrs = multierror.Append(errs, fmt.Errorf(\"--private-key-path and --credentials-file cannot be used simultaneously\"))\n\t\t\tbreak\n\t\t}\n\t\tif flagClientID == \"\" && flagPrivateKeyPath == \"\" && flagCredentialsPath == \"\" {\n\t\t\terrs = multierror.Append(errs, fmt.Errorf(\"either --client-id and --private-key-path or --credentials-file must be provided\"))\n\t\t\tbreak\n\t\t}\n\n\t\tswitch {\n\t\tcase flagClientID != \"\" && flagPrivateKeyPath != \"\":\n\t\t\t// If --client-id and --private-key-path are passed, then\n\t\t\t// --credentials-file is ignored.\n\t\t\tcreds = &client.VenafiSvcAccountCredentials{\n\t\t\t\tClientID:       flagClientID,\n\t\t\t\tPrivateKeyFile: flagPrivateKeyPath,\n\t\t\t}\n\t\tcase flagCredentialsPath != \"\":\n\t\t\tcredsBytes, err := readCredentialsFile(flagCredentialsPath)\n\t\t\tif err != nil {\n\t\t\t\terrs = multierror.Append(errs, multierror.Prefix(err, \"credentials file:\"))\n\t\t\t\tbreak // Don't continue if couldn't read the creds file.\n\t\t\t}\n\t\t\tcreds, err = client.ParseVenafiCredentials(credsBytes)\n\t\t\tif err != nil {\n\t\t\t\terrs = multierror.Append(errs, multierror.Prefix(err, \"credentials file:\"))\n\t\t\t\tbreak // Don't continue with the client since creds is invalid.\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"programmer mistake: --client-id and --private-key-path or --credentials-file must have been provided\")\n\t\t}\n\n\t\t// The uploader ID isn't actually used in the backend, let's use an\n\t\t// arbitrary value.\n\t\tuploaderID := \"no\"\n\n\t\t// We don't do this for the VenafiCloudVenafiConnection mode because\n\t\t// the upload_path field is ignored in that mode.\n\t\tlog.Info(\"Loading upload_path from \\\"venafi-cloud\\\" configuration.\")\n\n\t\tvar err error\n\t\toutputClient, err = client.NewVenafiCloudClient(metadata, creds, cfg.Server, uploaderID, cfg.UploadPath)\n\t\tif err != nil {\n\t\t\terrs = multierror.Append(errs, err)\n\t\t}\n\tcase VenafiCloudVenafiConnection:\n\t\tvar restCfg *rest.Config\n\t\trestCfg, err := kubeconfig.LoadRESTConfig(\"\")\n\t\tif err != nil {\n\t\t\terrs = multierror.Append(errs, fmt.Errorf(\"loading kubeconfig: %w\", err))\n\t\t\tbreak // Don't continue with the client if kubeconfig wasn't loaded.\n\t\t}\n\n\t\toutputClient, err = client.NewVenConnClient(restCfg, metadata, cfg.InstallNS, cfg.VenConnName, cfg.VenConnNS, nil)\n\t\tif err != nil {\n\t\t\terrs = multierror.Append(errs, err)\n\t\t}\n\tcase JetstackSecureAPIToken:\n\t\tvar err error\n\t\toutputClient, err = client.NewAPITokenClient(metadata, flagAPIToken, cfg.Server)\n\t\tif err != nil {\n\t\t\terrs = multierror.Append(errs, err)\n\t\t}\n\tcase LocalFile:\n\t\toutputClient = client.NewFileClient(cfg.OutputPath)\n\tcase MachineHub:\n\t\tvar (\n\t\t\terr     error\n\t\t\trootCAs *x509.CertPool\n\t\t)\n\t\thttpClient := http_client.NewDefaultClient(version.UserAgent(), rootCAs)\n\t\toutputClient, err = client.NewCyberArk(httpClient)\n\t\tif err != nil {\n\t\t\terrs = multierror.Append(errs, err)\n\t\t}\n\tcase NGTS:\n\t\tvar creds *client.NGTSServiceAccountCredentials\n\n\t\tif flagPrivateKeyPath == \"\" {\n\t\t\terrs = multierror.Append(errs, fmt.Errorf(\"--private-key-path is required for NGTS mode\"))\n\t\t\tbreak\n\t\t}\n\n\t\tcreds = &client.NGTSServiceAccountCredentials{\n\t\t\tClientID:       flagClientID,\n\t\t\tPrivateKeyFile: flagPrivateKeyPath,\n\t\t}\n\n\t\t// rootCAs can be used in future to support custom CA certs, but for now will remain empty\n\t\tvar rootCAs *x509.CertPool\n\n\t\tvar err error\n\t\toutputClient, err = client.NewNGTSClient(metadata, creds, cfg.Server, cfg.TSGID, rootCAs)\n\t\tif err != nil {\n\t\t\terrs = multierror.Append(errs, err)\n\t\t}\n\tdefault:\n\t\tpanic(fmt.Errorf(\"programmer mistake: output mode not implemented: %s\", cfg.OutputMode))\n\t}\n\n\tif errs != nil {\n\t\treturn nil, fmt.Errorf(\"failed loading config using the %s mode: %w\", cfg.OutputMode, errs)\n\t}\n\n\treturn outputClient, nil\n}\n\n// Same as ValidateAndCombineConfig but just for validating the data gatherers.\n// This is separate because the `rbac` command only needs to validate the data\n// gatherers, nothing else.\n//\n// The error returned may be a multierror.Error. Use multierror.Prefix(err,\n// \"context:\") rather than fmt.Errorf(\"context: %w\", err) when wrapping the\n// error.\nfunc ValidateDataGatherers(dataGatherers []DataGatherer) error {\n\tvar err error\n\tfor i, v := range dataGatherers {\n\t\tif v.Kind == \"\" {\n\t\t\terr = multierror.Append(err, fmt.Errorf(\"datagatherer %d/%d is missing a kind\", i+1, len(dataGatherers)))\n\t\t}\n\t\tif v.Name == \"\" {\n\t\t\terr = multierror.Append(err, fmt.Errorf(\"datagatherer %d/%d is missing a name\", i+1, len(dataGatherers)))\n\t\t}\n\t}\n\n\treturn err\n}\n\n// Inspired by the controller-runtime project.\nfunc getInClusterNamespace() (string, error) {\n\tns := os.Getenv(\"POD_NAMESPACE\")\n\tif ns != \"\" {\n\t\treturn ns, nil\n\t}\n\n\treturn \"\", fmt.Errorf(\"POD_NAMESPACE env var not set, meaning that you are probably not running in cluster. Please use --install-namespace or POD_NAMESPACE to specify the namespace in which the agent is running.\")\n}\n\nfunc reMarshal(rawConfig any, config datagatherer.Config) error {\n\tbb, err := yaml.Marshal(rawConfig)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\terr = yaml.Unmarshal(bb, config)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\treturn nil\n}\n\n// UnmarshalYAML unmarshals a dataGatherer resolving the type according to Kind.\nfunc (dg *DataGatherer) UnmarshalYAML(unmarshal func(any) error) error {\n\taux := struct {\n\t\tKind      string `yaml:\"kind\"`\n\t\tName      string `yaml:\"name\"`\n\t\tDataPath  string `yaml:\"data-path,omitempty\"`\n\t\tRawConfig any    `yaml:\"config\"`\n\t}{}\n\terr := unmarshal(&aux)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdg.Kind = aux.Kind\n\tdg.Name = aux.Name\n\tdg.DataPath = aux.DataPath\n\n\tvar cfg datagatherer.Config\n\n\tswitch dg.Kind {\n\tcase \"k8s\":\n\t\tcfg = &k8sdynamic.ConfigDynamic{}\n\tcase \"k8s-dynamic\":\n\t\tcfg = &k8sdynamic.ConfigDynamic{}\n\tcase \"k8s-discovery\":\n\t\tcfg = &k8sdiscovery.ConfigDiscovery{}\n\tcase \"oidc\":\n\t\tcfg = &oidc.OIDCDiscovery{}\n\tcase \"local\":\n\t\tcfg = &local.Config{}\n\t// dummy dataGatherer is just used for testing\n\tcase \"dummy\":\n\t\tcfg = &dummyConfig{}\n\tdefault:\n\t\treturn fmt.Errorf(\"cannot parse data-gatherer configuration, kind %q is not supported\", dg.Kind)\n\t}\n\n\t// we encode aux.RawConfig, which is just a map of reflect.Values, into yaml and decode it again to the right type.\n\terr = reMarshal(aux.RawConfig, cfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdg.Config = cfg\n\n\treturn nil\n}\n\n// Dump generates a YAML string of the Config object\nfunc (c *Config) Dump() (string, error) {\n\td, err := yaml.Marshal(&c)\n\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to generate YAML dump of config: %w\", err)\n\t}\n\n\treturn string(d), nil\n}\n\n// ParseConfig only parses. It does not validate anything except for the data\n// gatherer types. To validate the config, use ValidateDataGatherers or\n// getConfiguration.\nfunc ParseConfig(data []byte) (Config, error) {\n\tvar config Config\n\n\terr := yaml.Unmarshal(data, &config)\n\tif err != nil {\n\t\treturn config, err\n\t}\n\n\treturn config, nil\n}\n\ntype credType string\n\nconst (\n\tCredOldJetstackSecureOAuth credType = \"CredOldJetstackSecureOAuth\"\n\tCredVenafiCloudKeypair     credType = \"CredVenafiCloudKeypair\"\n)\n\nfunc readCredentialsFile(path string) ([]byte, error) {\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to load credentials from file %s: %w\", path, err)\n\t}\n\tdefer file.Close()\n\n\tb, err := io.ReadAll(file)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read credentials file: %w\", err)\n\t}\n\n\treturn b, nil\n}\n"
  },
  {
    "path": "pkg/agent/config_test.go",
    "content": "package agent\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/go-logr/logr\"\n\t\"github.com/spf13/cobra\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"k8s.io/klog/v2\"\n\t\"k8s.io/klog/v2/ktesting\"\n\n\t\"github.com/jetstack/preflight/pkg/client\"\n\t\"github.com/jetstack/preflight/pkg/testutil\"\n)\n\nfunc Test_ValidateAndCombineConfig(t *testing.T) {\n\t// For common things like validating `server` and `data-gatherers`, we don't\n\t// need to test every auth mode. We just test them using the Jetstack Secure\n\t// OAuth mode.\n\tfakeCredsPath := withFile(t, `{\"user_id\":\"foo\",\"user_secret\":\"bar\",\"client_id\": \"baz\",\"client_secret\": \"foobar\",\"auth_server_domain\":\"bazbar\"}`)\n\n\tt.Run(\"In Venafi Connection mode, --install-namespace must be provided if POD_NAMESPACE is not set\", func(t *testing.T) {\n\t\t_, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tserver: https://api.venafi.eu\n\t\t\t\torganization_id: foo\n\t\t\t\tcluster_id: bar\n\t\t\t\tperiod: 5m\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--venafi-connection\", \"venafi-components\"))\n\t\tassert.EqualError(t, err, \"1 error occurred:\\n\\t* could not guess which namespace the agent is running in: POD_NAMESPACE env var not set, meaning that you are probably not running in cluster. Please use --install-namespace or POD_NAMESPACE to specify the namespace in which the agent is running.\\n\\n\")\n\t})\n\n\tt.Run(\"period must be given with either --period/-p or period field in config\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\t_, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tserver: https://api.venafi.eu\n\t\t\t\torganization_id: foo\n\t\t\t\tcluster_id: bar\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--credentials-file\", fakeCredsPath))\n\t\tassert.EqualError(t, err, \"1 error occurred:\\n\\t* period must be set using --period or -p, or using the 'period' field in the config file\\n\\n\")\n\n\t})\n\n\tt.Run(\"period can be provided using --period or -p\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\n\t\tgiven := withConfig(testutil.Undent(`\n\t\t\tserver: https://api.venafi.eu\n\t\t\torganization_id: foo\n\t\t\tcluster_id: bar\n\t\t`))\n\n\t\tgot, _, err := ValidateAndCombineConfig(discardLogs(), given, withCmdLineFlags(\"--period\", \"5m\", \"--credentials-file\", fakeCredsPath))\n\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, 5*time.Minute, got.Period)\n\n\t\tgot, _, err = ValidateAndCombineConfig(discardLogs(), given, withCmdLineFlags(\"-p\", \"3m\", \"--credentials-file\", fakeCredsPath))\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, 3*time.Minute, got.Period)\n\t})\n\n\tt.Run(\"period can be provided using the period field in config file\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tgot, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tserver: https://api.venafi.eu\n\t\t\t\tperiod: 7m\n\t\t\t\torganization_id: foo\n\t\t\t\tcluster_id: bar\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--credentials-file\", fakeCredsPath))\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, 7*time.Minute, got.Period)\n\t})\n\n\tt.Run(\"--period flag takes precedence over period field in config, shows warning\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tlog, gotLogs := recordLogs(t)\n\t\tgot, _, err := ValidateAndCombineConfig(log,\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tserver: https://api.venafi.eu\n\t\t\t\tperiod: 1111m\n\t\t\t\torganization_id: foo\n\t\t\t\tcluster_id: bar\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--period\", \"99m\", \"--credentials-file\", fakeCredsPath))\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, testutil.Undent(`\n\t\t\tINFO Output mode selected mode=\"Jetstack Secure OAuth\" reason=\"--credentials-file was specified without --venafi-cloud\"\n\t\t\tINFO Both the 'period' field and --period are set. Using the value provided with --period.\n\t\t`), gotLogs.String())\n\t\tassert.Equal(t, 99*time.Minute, got.Period)\n\t})\n\n\tt.Run(\"jetstack-secure-oauth-auth: server field is not required\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tgot, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tperiod: 1h\n\t\t\t\torganization_id: foo\n\t\t\t\tcluster_id: bar\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--credentials-file\", fakeCredsPath))\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, \"https://preflight.jetstack.io\", got.Server)\n\t})\n\n\tt.Run(\"venafi-cloud-keypair-auth: server field is not required\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tcredsPath := withFile(t, `{\"client_id\": \"foo\",\"private_key_file\": \"`+withFile(t, fakePrivKeyPEM)+`\"}`)\n\t\tgot, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tperiod: 1h\n\t\t\t\tcluster_id: bar\n\t\t\t\tvenafi-cloud:\n\t\t\t\t  upload_path: /foo/bar\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--venafi-cloud\", \"--credentials-file\", credsPath))\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, \"https://api.venafi.cloud\", got.Server)\n\t})\n\n\tt.Run(\"server URL must be valid\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\t_, _, gotErr := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tserver: \"something not a URL\"\n\t\t\t\tperiod: 1h\n\t\t\t\torganization_id: \"my_org\"\n\t\t\t\tcluster_id: \"my_cluster\"\n\t\t\t\tdata-gatherers:\n\t\t\t\t  - kind: dummy\n\t\t\t\t    name: dummy\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--credentials-file\", fakeCredsPath))\n\t\tassert.EqualError(t, gotErr, testutil.Undent(`\n\t\t\t1 error occurred:\n\t\t\t\t* server \"something not a URL\" is not a valid URL\n\n\t\t`))\n\t})\n\n\tt.Run(\"--strict is passed down\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tgot, _, gotErr := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tperiod: 1h\n\t\t\t\torganization_id: \"my_org\"\n\t\t\t\tcluster_id: \"my_cluster\"\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--strict\", \"--credentials-file\", fakeCredsPath))\n\t\trequire.NoError(t, gotErr)\n\t\tassert.Equal(t, true, got.StrictMode)\n\t})\n\n\tt.Run(\"--disable-compression is deprecated and doesn't do anything\", func(t *testing.T) {\n\t\tpath := withFile(t, `{\"user_id\":\"fpp2624799349@affectionate-hertz6.platform.jetstack.io\",\"user_secret\":\"foo\",\"client_id\": \"k3TrDbfLhCgnpAbOiiT2kIE1AbovKzjo\",\"client_secret\": \"f39w_3KT9Vp0VhzcPzvh-uVbudzqCFmHER3Huj0dvHgJwVrjxsoOQPIw_1SDiCfa\",\"auth_server_domain\":\"auth.jetstack.io\"}`)\n\t\tlog, b := recordLogs(t)\n\t\t_, _, err := ValidateAndCombineConfig(log,\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tserver: https://api.venafi.eu\n\t\t\t\tperiod: 1h\n\t\t\t\torganization_id: foo\n\t\t\t\tcluster_id: bar\n\t\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--disable-compression\", \"--credentials-file\", path, \"--install-namespace\", \"venafi\"))\n\t\trequire.NoError(t, err)\n\n\t\t// The log line printed by pflag is not captured by the log recorder.\n\t\tassert.Equal(t, testutil.Undent(`\n\t\t\tINFO Output mode selected mode=\"Jetstack Secure OAuth\" reason=\"--credentials-file was specified without --venafi-cloud\"\n\t\t\tINFO Using period from config period=\"1h0m0s\"\n\t\t`), b.String())\n\t})\n\n\tt.Run(\"error when no output mode specified\", func(t *testing.T) {\n\t\t_, cl, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tserver: https://api.venafi.eu\n\t\t\t\tperiod: 1h\n\t\t\t\torganization_id: foo\n\t\t\t\tcluster_id: bar\n\t\t\t`)),\n\t\t\twithoutCmdLineFlags(),\n\t\t)\n\t\tassert.EqualError(t, err, testutil.Undent(`\n\t\t\tno output mode specified. To enable one of the output modes, you can:\n\t\t\t - Use --ngts with --tsg-id and --private-key-path to use the NGTS mode (--client-id is optional if provided in the credentials secret).\n\t\t\t - Use (--venafi-cloud with --credentials-file) or (--client-id with --private-key-path) to use the Venafi Cloud Key Pair Service Account mode.\n\t\t\t - Use --venafi-connection for the Venafi Cloud VenafiConnection mode.\n\t\t\t - Use --credentials-file alone if you want to use the Jetstack Secure OAuth mode.\n\t\t\t - Use --api-token if you want to use the Jetstack Secure API Token mode.\n\t\t\t - Use --machine-hub if you want to use the MachineHub mode.\n\t\t\t - Use --output-path or output-path in the config file for Local File mode.`))\n\t\tassert.Nil(t, cl)\n\t})\n\n\tt.Run(\"jetstack-secure-oauth-auth: sample config\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\t// `client_id`, `client_secret`, and `auth_server_domain` are usually\n\t\t// injected at build time, but we can't do that in tests, so we need to\n\t\t// provide them in the credentials file.\n\t\tcredsPath := withFile(t, `{\"user_id\":\"fpp2624799349@affectionate-hertz6.platform.jetstack.io\",\"user_secret\":\"foo\",\"client_id\": \"k3TrDbfLhCgnpAbOiiT2kIE1AbovKzjo\",\"client_secret\": \"f39w_3KT9Vp0VhzcPzvh-uVbudzqCFmHER3Huj0dvHgJwVrjxsoOQPIw_1SDiCfa\",\"auth_server_domain\":\"auth.jetstack.io\"}`)\n\t\tgot, cl, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tperiod: 5m\n\t\t\t\tendpoint:\n\t\t\t\t  host: example.com\n\t\t\t\t  path: api/v1/data\n\t\t\t\tschedule: \"* * * * *\"\n\t\t\t\torganization_id: \"example\"\n\t\t\t\tcluster_id: \"example-cluster\"\n\t\t\t\tdata-gatherers:\n\t\t\t\t- name: d1\n\t\t\t\t  kind: dummy\n\t\t\t\t  config:\n\t\t\t\t    always-fail: false\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--credentials-file\", credsPath),\n\t\t)\n\t\texpect := CombinedConfig{\n\t\t\tOutputMode: \"Jetstack Secure OAuth\",\n\t\t\tClusterID:  \"example-cluster\",\n\t\t\tDataGatherers: []DataGatherer{{Kind: \"dummy\",\n\t\t\t\tName:   \"d1\",\n\t\t\t\tConfig: &dummyConfig{},\n\t\t\t}},\n\t\t\tPeriod:         5 * time.Minute,\n\t\t\tServer:         \"http://example.com\",\n\t\t\tOrganizationID: \"example\",\n\t\t\tEndpointPath:   \"api/v1/data\",\n\t\t\tBackoffMaxTime: 10 * time.Minute,\n\t\t\tInstallNS:      \"venafi\",\n\t\t}\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, expect, got)\n\t\tassert.IsType(t, &client.OAuthClient{}, cl)\n\t})\n\n\tt.Run(\"venafi-cloud-keypair-auth: extended config using --venafi-cloud and --credentials-file\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tprivKeyPath := withFile(t, fakePrivKeyPEM)\n\t\tcredsPath := withFile(t, `{\"client_id\": \"5bc7d07c-45da-11ef-a878-523f1e1d7de1\",\"private_key_file\": \"`+privKeyPath+`\"}`)\n\t\tgot, cl, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tserver: \"http://localhost:8080\"\n\t\t\t\tcluster_id: \"legacy cluster_id as cluster name\"\n\t\t\t\tperiod: 1h\n\t\t\t\tdata-gatherers:\n\t\t\t\t- name: d1\n\t\t\t\t  kind: dummy\n\t\t\t\t  config:\n\t\t\t\t    always-fail: false\n\t\t\t\tinput-path: \"/home\"\n\t\t\t\toutput-path: \"/nothome\"\n\t\t\t\tvenafi-cloud:\n\t\t\t\t  uploader_id: test-agent\n\t\t\t\t  upload_path: \"/testing/path\"\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--venafi-cloud\", \"--credentials-file\", credsPath, \"--backoff-max-time\", \"99m\"),\n\t\t)\n\t\texpect := CombinedConfig{\n\t\t\tServer: \"http://localhost:8080\",\n\t\t\tPeriod: time.Hour,\n\t\t\tDataGatherers: []DataGatherer{\n\t\t\t\t{Name: \"d1\", Kind: \"dummy\", Config: &dummyConfig{AlwaysFail: false}},\n\t\t\t},\n\t\t\tInputPath:      \"/home\",\n\t\t\tOutputPath:     \"/nothome\",\n\t\t\tUploadPath:     \"/testing/path\",\n\t\t\tOutputMode:     VenafiCloudKeypair,\n\t\t\tClusterName:    \"legacy cluster_id as cluster name\",\n\t\t\tBackoffMaxTime: 99 * time.Minute,\n\t\t\tInstallNS:      \"venafi\",\n\t\t}\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, expect, got)\n\t\tassert.IsType(t, &client.VenafiCloudClient{}, cl)\n\t})\n\n\tt.Run(\"venafi-cloud-keypair-auth: using --client-id and --private-key-path\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tprivKeyPath := withFile(t, fakePrivKeyPEM)\n\t\tgot, cl, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tserver: \"http://localhost:8080\"\n\t\t\t\tperiod: 1h\n\t\t\t\tcluster_id: \"the cluster name\"\n\t\t\t\tvenafi-cloud:\n\t\t\t\t  upload_path: \"/foo/bar\"\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--client-id\", \"5bc7d07c-45da-11ef-a878-523f1e1d7de1\", \"--private-key-path\", privKeyPath),\n\t\t)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, VenafiCloudKeypair, got.OutputMode)\n\t\tassert.IsType(t, &client.VenafiCloudClient{}, cl)\n\t})\n\n\tt.Run(\"jetstack-secure-oauth-auth: fail if organization_id or cluster_id is missing and --venafi-cloud not enabled\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tcredsPath := withFile(t, `{\"user_id\":\"fpp2624799349@affectionate-hertz6.platform.jetstack.io\",\"user_secret\":\"foo\",\"client_id\": \"k3TrDbfLhCgnpAbOiiT2kIE1AbovKzjo\",\"client_secret\": \"f39w_3KT9Vp0VhzcPzvh-uVbudzqCFmHER3Huj0dvHgJwVrjxsoOQPIw_1SDiCfa\",\"auth_server_domain\":\"auth.jetstack.io\"}`)\n\t\t_, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(\"\"), withCmdLineFlags(\"--credentials-file\", credsPath))\n\t\tassert.EqualError(t, err, testutil.Undent(`\n\t\t\t3 errors occurred:\n\t\t\t\t* organization_id is required\n\t\t\t\t* cluster_id is required\n\t\t\t\t* period must be set using --period or -p, or using the 'period' field in the config file\n\n\t\t`))\n\t})\n\n\tt.Run(\"venafi-cloud-keypair-auth: authenticated if --client-id set\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tpath := withFile(t, fakePrivKeyPEM)\n\t\t_, cl, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tcluster_id: foo\n\t\t\t\tvenafi-cloud:\n\t\t\t\t  upload_path: /foo/bar\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--venafi-cloud\", \"--period\", \"1m\", \"--client-id\", \"test-client-id\", \"--private-key-path\", path))\n\t\trequire.NoError(t, err)\n\t\tassert.IsType(t, &client.VenafiCloudClient{}, cl)\n\t})\n\n\tt.Run(\"venafi-cloud-keypair-auth: valid 1: --client-id and --private-key-path\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tpath := withFile(t, fakePrivKeyPEM)\n\t\t_, cl, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tcluster_id: foo\n\t\t\t\tvenafi-cloud:\n\t\t\t\t  upload_path: /foo/bar\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--venafi-cloud\", \"--period\", \"1m\", \"--private-key-path\", path, \"--client-id\", \"test-client-id\"))\n\t\trequire.NoError(t, err)\n\t\tassert.IsType(t, &client.VenafiCloudClient{}, cl)\n\t})\n\n\tt.Run(\"venafi-cloud-keypair-auth: valid 2: --venafi-cloud and --credentials-file\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tcredsPath := withFile(t, fmt.Sprintf(`{\"client_id\": \"foo\",\"private_key_file\": \"%s\"}`, withFile(t, fakePrivKeyPEM)))\n\t\t_, cl, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tcluster_id: foo\n\t\t\t\tvenafi-cloud:\n\t\t\t\t  upload_path: /foo/bar\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--venafi-cloud\", \"--credentials-file\", credsPath, \"--period\", \"1m\"))\n\t\trequire.NoError(t, err)\n\t\tassert.IsType(t, &client.VenafiCloudClient{}, cl)\n\t})\n\n\tt.Run(\"venafi-cloud-keypair-auth: when --venafi-cloud is used, upload_path is required\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tcredsPath := withFile(t, fmt.Sprintf(`{\"client_id\": \"foo\",\"private_key_file\": \"%s\"}`, withFile(t, fakePrivKeyPEM)))\n\t\t_, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tserver: \"http://localhost:8080\"\n\t\t\t\tperiod: 1h\n\t\t\t\tvenafi-cloud:\n\t\t\t\t  uploader_id: test-agent\n\t\t\t\tcluster_id: \"the cluster name\"\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--venafi-cloud\", \"--credentials-file\", credsPath))\n\t\trequire.EqualError(t, err, \"1 error occurred:\\n\\t* the venafi-cloud.upload_path field is required when using the Venafi Cloud Key Pair Service Account mode\\n\\n\")\n\t})\n\n\tt.Run(\"jetstack-secure-oauth-auth: --credential-file alone means jetstack-secure oauth auth\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\t// `client_id`, `client_secret`, and `auth_server_domain` are usually\n\t\t// injected at build time, but we can't do that in tests, so we need to\n\t\t// provide them in the credentials file.\n\t\tpath := withFile(t, `{\"user_id\":\"fpp2624799349@affectionate-hertz6.platform.jetstack.io\",\"user_secret\":\"foo\",\"client_id\": \"k3TrDbfLhCgnpAbOiiT2kIE1AbovKzjo\",\"client_secret\": \"f39w_3KT9Vp0VhzcPzvh-uVbudzqCFmHER3Huj0dvHgJwVrjxsoOQPIw_1SDiCfa\",\"auth_server_domain\":\"auth.jetstack.io\"}`)\n\t\tgot, cl, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tserver: https://api.venafi.eu\n\t\t\t\tperiod: 1h\n\t\t\t\torganization_id: foo\n\t\t\t\tcluster_id: bar\n\t\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--credentials-file\", path))\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, CombinedConfig{Server: \"https://api.venafi.eu\", Period: time.Hour, OrganizationID: \"foo\", ClusterID: \"bar\", OutputMode: JetstackSecureOAuth, BackoffMaxTime: 10 * time.Minute, InstallNS: \"venafi\"}, got)\n\t\tassert.IsType(t, &client.OAuthClient{}, cl)\n\t})\n\n\tt.Run(\"jetstack-secure-oauth-auth: --credential-file used but file is missing\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tgot, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tserver: https://api.venafi.eu\n\t\t\t\tperiod: 1h\n\t\t\t\torganization_id: foo\n\t\t\t\tcluster_id: bar\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--credentials-file\", \"credentials.json\"))\n\t\tassert.EqualError(t, err, testutil.Undent(`\n\t\t\tvalidating creds: failed loading config using the Jetstack Secure OAuth mode: 1 error occurred:\n\t\t\t\t* credentials file: failed to load credentials from file credentials.json: open credentials.json: no such file or directory\n\n\t\t`))\n\t\tassert.Equal(t, CombinedConfig{}, got)\n\t})\n\n\tt.Run(\"jetstack-secure-oauth-auth: shows helpful err messages\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tcredsPath := withFile(t, `{\"user_id\":\"\"}`)\n\t\t_, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tserver: https://api.venafi.eu\n\t\t\t\tperiod: 1h\n\t\t\t\torganization_id: foo\n\t\t\t\tcluster_id: bar\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--credentials-file\", credsPath))\n\t\tassert.EqualError(t, err, testutil.Undent(`\n\t\t\tvalidating creds: failed loading config using the Jetstack Secure OAuth mode: 2 errors occurred:\n\t\t\t\t* credentials file: user_id cannot be empty\n\t\t\t\t* credentials file: user_secret cannot be empty\n\n\t\t\t`))\n\t})\n\n\tt.Run(\"venafi-cloud-keypair-auth: --client-id cannot be used alone, it needs --private-key-path\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tgot, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tserver: https://api.venafi.eu\n\t\t\t\tperiod: 1h\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--client-id\", \"test-client-id\"))\n\t\tassert.EqualError(t, err, \"if --client-id is specified, --private-key-path must also be specified\")\n\t\tassert.Equal(t, CombinedConfig{}, got)\n\t})\n\n\tt.Run(\"venafi-cloud-keypair-auth: --private-key-path cannot be used alone, it needs --client-id\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tgot, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tserver: https://api.venafi.eu\n\t\t\t\tperiod: 1h\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--private-key-path\", \"foo\"))\n\t\tassert.EqualError(t, err, \"--private-key-path is specified, --client-id must also be specified\")\n\t\tassert.Equal(t, CombinedConfig{}, got)\n\t})\n\n\t// When --client-id is used, --venafi-cloud is implied.\n\tt.Run(\"venafi-cloud-keypair-auth: valid --client-id and --private-key-path\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tpath := withFile(t, fakePrivKeyPEM)\n\t\tgot, cl, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tserver: https://api.venafi.eu\n\t\t\t\tperiod: 1h\n\t\t\t\tcluster_id: legacy cluster_id as cluster name\n\t\t\t\tvenafi-cloud:\n\t\t\t\t  upload_path: /foo/bar\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--client-id\", \"5bc7d07c-45da-11ef-a878-523f1e1d7de1\", \"--private-key-path\", path))\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, CombinedConfig{Server: \"https://api.venafi.eu\", Period: time.Hour, OutputMode: VenafiCloudKeypair, ClusterName: \"legacy cluster_id as cluster name\", UploadPath: \"/foo/bar\", BackoffMaxTime: 10 * time.Minute, InstallNS: \"venafi\"}, got)\n\t\tassert.IsType(t, &client.VenafiCloudClient{}, cl)\n\t})\n\n\t// --credentials-file + --venafi-cloud can be used instead of\n\t// --client-id and --private-key-path. Unfortunately, --credentials-file\n\t// can't contain the private key material, just a path to it, so you\n\t// still need to have the private key file somewhere one the filesystem.\n\tt.Run(\"venafi-cloud-keypair-auth: valid --venafi-cloud + --credential-file + private key stored to disk\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tprivKeyPath := withFile(t, fakePrivKeyPEM)\n\t\tcredsPath := withFile(t, fmt.Sprintf(`{\"client_id\": \"5bc7d07c-45da-11ef-a878-523f1e1d7de1\",\"private_key_file\": \"%s\"}`, privKeyPath))\n\t\tgot, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tserver: https://api.venafi.eu\n\t\t\t\tperiod: 1h\n\t\t\t\tcluster_id: legacy cluster_id as cluster name\n\t\t\t\tvenafi-cloud:\n\t\t\t\t  upload_path: /foo/bar\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--venafi-cloud\", \"--credentials-file\", credsPath))\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, CombinedConfig{Server: \"https://api.venafi.eu\", Period: time.Hour, OutputMode: VenafiCloudKeypair, ClusterName: \"legacy cluster_id as cluster name\", UploadPath: \"/foo/bar\", BackoffMaxTime: 10 * time.Minute, InstallNS: \"venafi\"}, got)\n\t})\n\n\tt.Run(\"venafi-cloud-keypair-auth: venafi-cloud.upload_path field is required\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tprivKeyPath := withFile(t, fakePrivKeyPEM)\n\t\tcredsPath := withFile(t, fmt.Sprintf(`{\"client_id\": \"5bc7d07c-45da-11ef-a878-523f1e1d7de1\",\"private_key_file\": \"%s\"}`, privKeyPath))\n\t\t_, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tserver: https://api.venafi.eu\n\t\t\t\tperiod: 1h\n\t\t\t\tcluster_id: the cluster name\n\t\t\t\tvenafi-cloud:\n\t\t\t\t  upload_path: \"\"        # <-- Cannot be left empty\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--venafi-cloud\", \"--credentials-file\", credsPath))\n\t\trequire.EqualError(t, err, testutil.Undent(`\n\t\t\t1 error occurred:\n\t\t\t\t* the venafi-cloud.upload_path field is required when using the Venafi Cloud Key Pair Service Account mode\n\n\t\t`))\n\t})\n\n\tt.Run(\"venafi-cloud-keypair-auth: --private-key-file can be passed with --credential-file\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tprivKeyPath := withFile(t, fakePrivKeyPEM)\n\t\tcredsPath := withFile(t, `{\"client_id\": \"5bc7d07c-45da-11ef-a878-523f1e1d7de1\"}`)\n\t\tgot, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tserver: https://api.venafi.eu\n\t\t\t\tperiod: 1h\n\t\t\t\tcluster_id: the cluster name\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--venafi-cloud\", \"--credentials-file\", credsPath, \"--private-key-path\", privKeyPath))\n\t\trequire.EqualError(t, err, testutil.Undent(`\n\t\t\t1 error occurred:\n\t\t\t\t* the venafi-cloud.upload_path field is required when using the Venafi Cloud Key Pair Service Account mode\n\n\t\t`))\n\t\tassert.Equal(t, CombinedConfig{}, got)\n\t})\n\n\tt.Run(\"venafi-cloud-keypair-auth: config.venafi-cloud\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tprivKeyPath := withFile(t, fakePrivKeyPEM)\n\t\tcredsPath := withFile(t, `{\"client_id\": \"5bc7d07c-45da-11ef-a878-523f1e1d7de1\"}`)\n\t\tgot, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\t\tserver: https://api.venafi.eu\n\t\t\t\t\tperiod: 1h\n\t\t\t\t\tvenafi-cloud:\n\t\t\t\t\t  uploader_id: test-agent\n\t\t\t\t\t  upload_path: /testing/path\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--venafi-cloud\", \"--credentials-file\", credsPath, \"--private-key-path\", privKeyPath))\n\t\trequire.EqualError(t, err, testutil.Undent(`\n\t\t\t1 error occurred:\n\t\t\t\t* cluster_name or cluster_id is required in Venafi Cloud Key Pair Service Account mode\n\n\t\t`))\n\t\tassert.Equal(t, CombinedConfig{}, got)\n\t})\n\n\tt.Run(\"venafi-cloud-workload-identity-auth: valid --venafi-connection\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tt.Setenv(\"KUBECONFIG\", withFile(t, fakeKubeconfig))\n\t\tlog, gotLogs := recordLogs(t)\n\t\tgot, cl, err := ValidateAndCombineConfig(log,\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tserver: http://should-be-ignored\n\t\t\t\tperiod: 1h\n\t\t\t\tcluster_id: legacy cluster_id as cluster name\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--venafi-connection\", \"venafi-components\"))\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, testutil.Undent(`\n\t\t\tINFO Output mode selected venConnName=\"venafi-components\" mode=\"Venafi Cloud VenafiConnection\" reason=\"--venafi-connection was specified\"\n\t\t\tINFO ignoring the server field specified in the config file. In Venafi Cloud VenafiConnection mode, this field is not needed.\n\t\t\tINFO Using cluster_id as cluster_name for backwards compatibility clusterID=\"legacy cluster_id as cluster name\"\n\t\t\tINFO Using period from config period=\"1h0m0s\"\n\t\t`), gotLogs.String())\n\t\tassert.Equal(t, CombinedConfig{\n\t\t\tPeriod:         1 * time.Hour,\n\t\t\tClusterName:    \"legacy cluster_id as cluster name\",\n\t\t\tOutputMode:     VenafiCloudVenafiConnection,\n\t\t\tVenConnName:    \"venafi-components\",\n\t\t\tVenConnNS:      \"venafi\",\n\t\t\tInstallNS:      \"venafi\",\n\t\t\tBackoffMaxTime: 10 * time.Minute,\n\t\t}, got)\n\t\tassert.IsType(t, &client.VenConnClient{}, cl)\n\t})\n\n\tt.Run(\"venafi-cloud-workload-identity-auth: warning about server, venafi-cloud.uploader_id, and venafi-cloud.upload_path being skipped\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tt.Setenv(\"KUBECONFIG\", withFile(t, fakeKubeconfig))\n\t\tlog, gotLogs := recordLogs(t)\n\t\tgot, gotCl, err := ValidateAndCombineConfig(log,\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tserver: https://api.venafi.eu\n\t\t\t\tperiod: 1h\n\t\t\t\tcluster_name: cluster-1\n\t\t\t\tcluster_id: should-be-ignored-and-logged-as-ignored\n\t\t\t\tvenafi-cloud:\n\t\t\t\t  uploader_id: id\n\t\t\t\t  upload_path: /path\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--venafi-connection\", \"venafi-components\"),\n\t\t)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, testutil.Undent(`\n\t\t\tINFO Output mode selected venConnName=\"venafi-components\" mode=\"Venafi Cloud VenafiConnection\" reason=\"--venafi-connection was specified\"\n\t\t\tINFO ignoring the server field specified in the config file. In Venafi Cloud VenafiConnection mode, this field is not needed.\n\t\t\tINFO ignoring the venafi-cloud.upload_path field in the config file. In Venafi Cloud VenafiConnection mode, this field is not needed.\n\t\t\tINFO ignoring the venafi-cloud.uploader_id field in the config file. This field is not needed in Venafi Cloud VenafiConnection mode.\n\t\t\tINFO Ignoring the cluster_id field in the config file. This field is not needed in Venafi Cloud VenafiConnection mode.\n\t\t\tINFO Using period from config period=\"1h0m0s\"\n\t\t`), gotLogs.String())\n\t\tassert.Equal(t, VenafiCloudVenafiConnection, got.OutputMode)\n\t\tassert.IsType(t, &client.VenConnClient{}, gotCl)\n\t})\n\n\tt.Run(\"venafi-cloud-workload-identity-auth: server field can be left empty in venconn mode\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tt.Setenv(\"KUBECONFIG\", withFile(t, fakeKubeconfig))\n\t\tgot, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tserver: \"\"\n\t\t\t\tperiod: 1h\n\t\t\t\tcluster_id: foo\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--venafi-connection\", \"venafi-components\"))\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, VenafiCloudVenafiConnection, got.OutputMode)\n\t})\n\n\tconst arkUsername = \"cluster-1-region-1-cloud-1@cyberark.cloud.123456\"\n\n\tt.Run(\"--machine-hub selects MachineHub mode\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tt.Setenv(\"KUBECONFIG\", withFile(t, fakeKubeconfig))\n\t\tt.Setenv(\"ARK_SUBDOMAIN\", \"tlspk\")\n\t\tt.Setenv(\"ARK_USERNAME\", arkUsername)\n\t\tt.Setenv(\"ARK_SECRET\", \"test-secret\")\n\t\tgot, cl, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(\"\"),\n\t\t\twithCmdLineFlags(\"--period\", \"1m\", \"--machine-hub\"))\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, MachineHub, got.OutputMode)\n\t\tassert.Equal(t, arkUsername, got.ClusterName,\n\t\t\t\"the ClusterName should default to the ARK_USERNAME value if the cluster_name in the config file is empty\")\n\t\tassert.IsType(t, &client.CyberArkClient{}, cl)\n\t})\n\n\tt.Run(\"--machine-hub with cluster_name override\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tt.Setenv(\"KUBECONFIG\", withFile(t, fakeKubeconfig))\n\t\tt.Setenv(\"ARK_SUBDOMAIN\", \"tlspk\")\n\t\tt.Setenv(\"ARK_USERNAME\", arkUsername)\n\t\tt.Setenv(\"ARK_SECRET\", \"test-secret\")\n\t\tgot, cl, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tcluster_name: override-cluster-name\n            `)),\n\t\t\twithCmdLineFlags(\"--period\", \"1m\", \"--machine-hub\"))\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, MachineHub, got.OutputMode)\n\t\tassert.Equal(t, \"override-cluster-name\", got.ClusterName,\n\t\t\t\"the cluster_name in the config file should be used if not empty, even if ARK_USERNAME is set\")\n\t\tassert.IsType(t, &client.CyberArkClient{}, cl)\n\t})\n\n\tt.Run(\"--machine-hub without required environment variables\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tt.Setenv(\"KUBECONFIG\", withFile(t, fakeKubeconfig))\n\t\tt.Setenv(\"ARK_SUBDOMAIN\", \"\")\n\t\tt.Setenv(\"ARK_USERNAME\", \"\")\n\t\tt.Setenv(\"ARK_SECRET\", \"\")\n\t\tgot, cl, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(\"\"),\n\t\t\twithCmdLineFlags(\"--period\", \"1m\", \"--machine-hub\"))\n\t\tassert.Equal(t, CombinedConfig{}, got)\n\t\tassert.Nil(t, cl)\n\t\tassert.EqualError(t, err, testutil.Undent(`\n\t\t\tvalidating creds: failed loading config using the MachineHub mode: 1 error occurred:\n\t\t\t\t* missing environment variables: ARK_SUBDOMAIN, ARK_USERNAME, ARK_SECRET\n\n\t   `))\n\t})\n\n\tt.Run(\"argument: --output-file selects local file mode\", func(t *testing.T) {\n\t\tlog, gotLog := recordLogs(t)\n\t\tgot, outputClient, err := ValidateAndCombineConfig(log,\n\t\t\twithConfig(\"\"),\n\t\t\twithCmdLineFlags(\"--period\", \"1m\", \"--output-path\", \"/foo/bar/baz\"))\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, LocalFile, got.OutputMode)\n\t\tassert.Equal(t, testutil.Undent(`\n\t\t\tINFO Output mode selected mode=\"Local File\" reason=\"--output-path was specified\"\n\t\t`), gotLog.String())\n\t\tassert.IsType(t, &client.FileClient{}, outputClient)\n\t})\n\n\tt.Run(\"config: output-path selects local file mode\", func(t *testing.T) {\n\t\tlog, gotLog := recordLogs(t)\n\t\tgot, outputClient, err := ValidateAndCombineConfig(log,\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\toutput-path: /foo/bar/baz\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--period=1h\"))\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, LocalFile, got.OutputMode)\n\t\tassert.Equal(t, testutil.Undent(`\n\t\t\tINFO Output mode selected mode=\"Local File\" reason=\"output-path was specified in the config file\"\n\t\t`), gotLog.String())\n\t\tassert.IsType(t, &client.FileClient{}, outputClient)\n\t})\n\n\t// When --input-path is supplied, the data is being read from a local file\n\t// and the agent is probably running outside the cluster and has no access\n\t// to a cluster, so the environment variables which are required for\n\t// generating events attached to the Agent pod should not be required:\n\t// POD_NAME, POD_NAMESPACE, POD_UID, KUBECONFIG, etc.\n\t// This test deliberately does not set those environment variables.\n\t//\n\t// TODO(wallrj): Some other config settings like cluster_id, organization_id\n\t// should also not be required in this situation. We'll fix those in the\n\t// future.\n\tt.Run(\"--input-path requires no Kubernetes config\", func(t *testing.T) {\n\t\texpectedInputPath := \"/foo/bar/baz\"\n\t\tgot, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tcluster_id: should-not-be-required\n\t\t\t\torganization_id: should-not-be-required\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\n\t\t\t\t\"--one-shot\",\n\t\t\t\t\"--input-path\", expectedInputPath,\n\t\t\t\t\"--output-path\", \"/dev/null\",\n\t\t\t),\n\t\t)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, expectedInputPath, got.InputPath)\n\t})\n}\n\nfunc Test_ValidateAndCombineConfig_VenafiCloudKeyPair(t *testing.T) {\n\tt.Run(\"server, uploader_id, and cluster name are correctly passed\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\n\t\tctx, cancel := context.WithCancel(t.Context())\n\t\tdefer cancel()\n\t\tlog := ktesting.NewLogger(t, ktesting.NewConfig(ktesting.Verbosity(10)))\n\t\tctx = klog.NewContext(ctx, log)\n\n\t\tsrv, cert, setVenafiCloudAssert := testutil.FakeVenafiCloud(t)\n\t\tsetVenafiCloudAssert(func(t testing.TB, gotReq *http.Request) {\n\t\t\t// Only care about /v1/tlspk/upload/clusterdata/:uploader_id?name=\n\t\t\tif gotReq.URL.Path == \"/v1/oauth/token/serviceaccount\" {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.Equal(t, srv.URL, \"https://\"+gotReq.Host)\n\t\t\tassert.Equal(t, \"test cluster name\", gotReq.URL.Query().Get(\"name\"))\n\t\t\tassert.Equal(t, \"/v1/tlspk/upload/clusterdata/no\", gotReq.URL.Path)\n\t\t})\n\n\t\tprivKeyPath := withFile(t, fakePrivKeyPEM)\n\t\tgot, cl, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tserver: `+srv.URL+`\n\t\t\t\tperiod: 1h\n\t\t\t\tcluster_id: \"test cluster name\"\n\t\t\t\tvenafi-cloud:\n\t\t\t\t  uploader_id: no\n\t\t\t\t  upload_path: /v1/tlspk/upload/clusterdata\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--client-id\", \"5bc7d07c-45da-11ef-a878-523f1e1d7de1\", \"--private-key-path\", privKeyPath),\n\t\t)\n\t\trequire.NoError(t, err)\n\t\ttestutil.TrustCA(t, cl, cert)\n\t\tassert.Equal(t, VenafiCloudKeypair, got.OutputMode)\n\n\t\terr = cl.PostDataReadingsWithOptions(ctx, nil, client.Options{ClusterName: \"test cluster name\"})\n\t\trequire.NoError(t, err)\n\t})\n}\n\n// Slower test cases due to envtest. That's why they are separated from the\n// other tests.\nfunc Test_ValidateAndCombineConfig_VenafiConnection(t *testing.T) {\n\t_, cfg, kcl := testutil.WithEnvtest(t)\n\tt.Setenv(\"KUBECONFIG\", testutil.WithKubeconfig(t, cfg))\n\tsrv, cert, setVenafiCloudAssert := testutil.FakeVenafiCloud(t)\n\tfor _, obj := range testutil.Parse(\n\t\ttestutil.VenConnRBAC + testutil.Undent(`\n\t\t\t---\n\t\t\tapiVersion: jetstack.io/v1alpha1\n\t\t\tkind: VenafiConnection\n\t\t\tmetadata:\n\t\t\t  name: venafi-components\n\t\t\t  namespace: venafi\n\t\t\tspec:\n\t\t\t  vcp:\n\t\t\t    url: \"`+srv.URL+`\"\n\t\t\t    accessToken:\n\t\t\t      - secret:\n\t\t\t          name: accesstoken\n\t\t\t          fields: [accesstoken]\n\t\t\t---\n\t\t\tapiVersion: v1\n\t\t\tkind: Secret\n\t\t\tmetadata:\n\t\t\t  name: accesstoken\n\t\t\t  namespace: venafi\n\t\t\tstringData:\n\t\t\t  accesstoken: VALID_ACCESS_TOKEN\n\t\t\t---\n\t\t\tapiVersion: rbac.authorization.k8s.io/v1\n\t\t\tkind: Role\n\t\t\tmetadata:\n\t\t\t  name: venafi-connection-accesstoken-reader\n\t\t\t  namespace: venafi\n\t\t\trules:\n\t\t\t- apiGroups: [\"\"]\n\t\t\t  resources: [\"secrets\"]\n\t\t\t  verbs: [\"get\"]\n\t\t\t  resourceNames: [\"accesstoken\"]\n\t\t\t---\n\t\t\tapiVersion: rbac.authorization.k8s.io/v1\n\t\t\tkind: RoleBinding\n\t\t\tmetadata:\n\t\t\t  name: venafi-connection-accesstoken-reader\n\t\t\t  namespace: venafi\n\t\t\troleRef:\n\t\t\t  apiGroup: rbac.authorization.k8s.io\n\t\t\t  kind: Role\n\t\t\t  name: venafi-connection-accesstoken-reader\n\t\t\tsubjects:\n\t\t\t- kind: ServiceAccount\n\t\t\t  name: venafi-connection\n\t\t\t  namespace: venafi\n\t\t`)) {\n\t\trequire.NoError(t, kcl.Create(t.Context(), obj))\n\t}\n\n\tt.Run(\"err when cluster_id field is empty\", func(t *testing.T) {\n\t\texpected := srv.URL\n\t\tsetVenafiCloudAssert(func(t testing.TB, gotReq *http.Request) {\n\t\t\tassert.Equal(t, expected, \"https://\"+gotReq.Host)\n\t\t})\n\n\t\t_, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\tConfig{Server: \"http://should-be-ignored\", Period: 1 * time.Hour},\n\t\t\tAgentCmdFlags{VenConnName: \"venafi-components\", InstallNS: \"venafi\"})\n\t\tassert.EqualError(t, err, \"1 error occurred:\\n\\t* cluster_name or cluster_id is required in Venafi Cloud VenafiConnection mode\\n\\n\")\n\t})\n\n\tt.Run(\"the server field is ignored when VenafiConnection is used\", func(t *testing.T) {\n\t\tctx, cancel := context.WithCancel(t.Context())\n\t\tdefer cancel()\n\t\tlog := ktesting.NewLogger(t, ktesting.NewConfig(ktesting.Verbosity(10)))\n\t\tctx = klog.NewContext(ctx, log)\n\n\t\texpected := srv.URL\n\t\tsetVenafiCloudAssert(func(t testing.TB, gotReq *http.Request) {\n\t\t\tassert.Equal(t, expected, \"https://\"+gotReq.Host)\n\t\t})\n\n\t\tcfg, cl, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tserver: http://this-url-should-be-ignored\n\t\t\t\tperiod: 1h\n\t\t\t\tcluster_id: test cluster name\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--venafi-connection\", \"venafi-components\", \"--install-namespace\", \"venafi\"))\n\t\trequire.NoError(t, err)\n\n\t\ttestutil.VenConnStartWatching(ctx, t, cl)\n\t\ttestutil.TrustCA(t, cl, cert)\n\n\t\t// TODO(mael): the client should keep track of the cluster name, we\n\t\t// shouldn't need to pass it as an option to\n\t\t// PostDataReadingsWithOptions.\n\t\terr = cl.PostDataReadingsWithOptions(ctx, nil, client.Options{ClusterName: cfg.ClusterName})\n\t\trequire.NoError(t, err)\n\t})\n}\n\nfunc Test_ParseConfig(t *testing.T) {\n\tt.Run(\"happy\", func(t *testing.T) {\n\t\tcfg, err := ParseConfig([]byte(testutil.Undent(`\n\t\t\tserver: https://api.venafi.eu\n\t\t\tperiod: 1h\n\t\t\torganization_id: foo\n\t\t\tcluster_id: bar\n\t\t`)))\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t,\n\t\t\tConfig{Server: \"https://api.venafi.eu\", Period: 1 * time.Hour, OrganizationID: \"foo\", ClusterID: \"bar\"},\n\t\t\tcfg)\n\t})\n\n\tt.Run(\"unknown data gatherer kind\", func(t *testing.T) {\n\t\t_, err := ParseConfig([]byte(testutil.Undent(`\n\t\t\tendpoint:\n\t\t\t  host: example.com\n\t\t\t  path: /api/v1/data\n\t\t\tschedule: \"* * * * *\"\n\t\t\tdata-gatherers:\n\t\t\t  - kind: \"foo\"\n\t\t`)))\n\t\tassert.EqualError(t, err, `cannot parse data-gatherer configuration, kind \"foo\" is not supported`)\n\t})\n\n\tt.Run(\"validates incorrect schema\", func(t *testing.T) {\n\t\t_, gotErr := ParseConfig([]byte(`data-gatherers: \"things\"`))\n\t\tassert.EqualError(t, gotErr, \"yaml: unmarshal errors:\\n  line 1: cannot unmarshal !!str `things` into []agent.DataGatherer\")\n\t})\n\n\tt.Run(\"does not show an error when user provides an unknown field\", func(t *testing.T) {\n\t\t_, gotErr := ParseConfig([]byte(`some-unknown-field: foo`))\n\t\tassert.NoError(t, gotErr)\n\t})\n\n\t// The only validation that ParseConfig does it to check if the `kind` is\n\t// known. The rest of the validation is done in ValidateDataGatherers and\n\t// ValidateAndCombineConfig.\n\tt.Run(\"validates that the kind is known\", func(t *testing.T) {\n\t\t_, gotErr := ParseConfig([]byte(testutil.Undent(`\n\t\t\tdata-gatherers:\n\t\t\t- kind: unknown`,\n\t\t)))\n\t\tassert.EqualError(t, gotErr, `cannot parse data-gatherer configuration, kind \"unknown\" is not supported`)\n\t})\n\n\t// ParseConfig only checks the data-gatherer kind. The rest of the\n\t// validation is done in ValidateDataGatherers and ValidateAndCombineConfig.\n\tt.Run(\"does not check for missing name\", func(t *testing.T) {\n\t\t_, gotErr := ParseConfig([]byte(testutil.Undent(`\n\t\t\tendpoint:\n\t\t\t  host: example.com\n\t\t\t  path: /api/v1/data\n\t\t\tschedule: \"* * * * *\"\n\t\t\torganization_id: \"example\"\n\t\t\tcluster_id: \"example-cluster\"\n\t\t\tdata-gatherers:\n\t\t\t  - kind: dummy\n\t\t`)))\n\t\tassert.NoError(t, gotErr)\n\t})\n\tt.Run(\"does not check correct server URL\", func(t *testing.T) {\n\t\t_, gotErr := ParseConfig([]byte(testutil.Undent(`\n\t\t\tserver: https://api.venafi.eu\n\t\t`)))\n\t\tassert.NoError(t, gotErr)\n\t})\n}\n\nfunc Test_ValidateDataGatherers(t *testing.T) {\n\tt.Run(\"happy\", func(t *testing.T) {\n\t\terr := ValidateDataGatherers(withConfig(testutil.Undent(`\n\t\t\tdata-gatherers:\n\t\t\t- kind: \"k8s\"\n\t\t\t  name: \"k8s/secrets\"\n\t\t\t- kind: \"k8s-discovery\"\n\t\t\t  name: \"k8s-discovery\"\n\t\t\t- kind: \"k8s-dynamic\"\n\t\t\t  name: \"k8s/secrets\"\n\t\t\t- kind: \"local\"\n\t\t\t  name: \"local\"\n\t\t\t- kind: \"dummy\"\n\t\t\t  name: \"dummy\"\n\t\t`)).DataGatherers)\n\t\trequire.NoError(t, err)\n\t})\n\n\tt.Run(\"missing name\", func(t *testing.T) {\n\t\tgotErr := ValidateDataGatherers(withConfig(testutil.Undent(`\n\t\t\tdata-gatherers:\n\t\t\t  - kind: dummy\n\t\t`)).DataGatherers)\n\t\tassert.EqualError(t, gotErr, \"1 error occurred:\\n\\t* datagatherer 1/1 is missing a name\\n\\n\")\n\t})\n\n\t// For context, the custom UnmarshalYAML in ParseConfig already validates\n\t// the kind. That's why ValidateDataGatherers panics: because it would be a\n\t// programmer mistake.\n\tt.Run(\"missing kind triggers a panic\", func(t *testing.T) {\n\t\tassert.PanicsWithError(t, `cannot parse data-gatherer configuration, kind \"unknown\" is not supported`, func() {\n\t\t\t_ = ValidateDataGatherers(withConfig(testutil.Undent(`\n\t\t\t\tdata-gatherers:\n\t\t\t\t- kind: unknown\n\t\t\t`)).DataGatherers)\n\t\t})\n\t})\n}\n\nfunc withFile(t testing.TB, content string) string {\n\tt.Helper()\n\n\tf, err := os.CreateTemp(t.TempDir(), \"file\")\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create temporary file: %v\", err)\n\t}\n\tdefer f.Close()\n\n\t_, err = f.WriteString(content)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to write to temporary file: %v\", err)\n\t}\n\n\treturn f.Name()\n}\n\nfunc recordLogs(t *testing.T) (logr.Logger, ktesting.Buffer) {\n\tlog := ktesting.NewLogger(t, ktesting.NewConfig(ktesting.BufferLogs(true)))\n\ttestingLogger, ok := log.GetSink().(ktesting.Underlier)\n\trequire.True(t, ok)\n\treturn log, testingLogger.GetBuffer()\n}\n\nfunc discardLogs() logr.Logger {\n\treturn logr.Discard()\n}\n\n// Shortcut for ParseConfig.\nfunc withConfig(s string) Config {\n\tcfg, err := ParseConfig([]byte(s))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn cfg\n}\n\nfunc withCmdLineFlags(flags ...string) AgentCmdFlags {\n\tparsed := withoutCmdLineFlags()\n\tagentCmd := &cobra.Command{}\n\tInitAgentCmdFlags(agentCmd, &parsed)\n\terr := agentCmd.ParseFlags(flags)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn parsed\n}\n\nfunc withoutCmdLineFlags() AgentCmdFlags {\n\treturn AgentCmdFlags{}\n}\n\nconst fakeKubeconfig = `\napiVersion: v1\nclusters:\n- cluster:\n    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCVENDQWUyZ0F3SUJBZ0lJVGpXZTMvWXhJbXN3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TkRBM01UVXhOREUxTVRSYUZ3MHpOREEzTVRNeE5ESXdNVFJhTUJVeApFekFSQmdOVkJBTVRDbXQxWW1WeWJtVjBaWE13Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXdnZ0VLCkFvSUJBUUMweVhZSmIyT0JRb0NrYXYySWw1NjNRM0t3RFpGSmluNFRFSkJJbWt6MnpJVU56cHIvV09MY01jdjYKVG9IaTl1c1oyL005dktMcnhYRE1FcFNJaTR4c1psZ3BDN2Erb3hqNW80MVdqRy9rdzhmcVc2MTRUV2ZEekRkWQppRkNKOC9PdmpKdFY2elREZ04vUGtWRytKQWJIOTdnVkc5NXRzRHBIazN3Nk12WkdYK3lqdnhXblV1enlpdFIzCkNLNkhYcE82Y0xBVzJva1FWZHYrZEFUSDFrZVpZZHpMOFp0U0txcUo2QWlRTUtEMG1FbXZPWDNBRk4vUUNQdXkKTVdDUXVkQ1RaQ0t1a1gwRzllakd3NGE1RC9CZnVmYmtWd1g3Vmo3OGJjQ0NId3JJMFZNOHVzYnJzcEs5eGtsVwpodjRXOGVaQ21KZWlMajFLVUhSbTdRVlFYVHNoQWdNQkFBR2pXVEJYTUE0R0ExVWREd0VCL3dRRUF3SUNwREFQCkJnTlZIUk1CQWY4RUJUQURBUUgvTUIwR0ExVWREZ1FXQkJTckNJaE44czZpMmRIMEpwQWU3dFdPL2p2clJqQVYKQmdOVkhSRUVEakFNZ2dwcmRXSmxjbTVsZEdWek1BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQ0pQd2x1OFVhRgo5UnIvUG5QSDNtL0w2amhlcE5Kak5vNThFSWlEMWpjc1Y3R04zZUpha0h1b3g1MGRmR2gvMFFMZEwreUluamFtCkw0Y0R6RnVYeDhCL0ZXQlMwdnYvaG5WQ1JadER4bjB1OW92WC9iblNJdHpBOHNKMHA4cU1YeEFmbkxuZDI0TksKNFZXZmFXTThjbitQeUoybnJ3MHo2YmtYYnZZMGxEV2ZRakorOUJxU3IyeUZYZWM4eXljSzZ6aHlXeHJMV1p1OAoyQngrYjJML1JETDg2T3FXSkthRmljNGlWeDBoK2xDYlBIQmNwazhQOVFvSjZodThhdXdiWjZlMkwxbmZSdWFjCjB3Z1F5OEMzNVExMTdla0dOcjZKMUlrRlE5OGorYTNBTVQ2Z05KclZGZEJOOGlMcjlhMDZJQnRBb04wV2s0bysKL2F5akJBc3hONHo5Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K\n    server: https://127.0.0.1:58453\n  name: fake\ncontexts:\n- context:\n    cluster: fake\n    user: fake\n  name: fake\ncurrent-context: fake\nkind: Config\npreferences: {}\nusers:\n- name: fake\n  user:\n    client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURLVENDQWhHZ0F3SUJBZ0lJV1JQVy9Nblo0VnN3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TkRBM01UVXhOREUxTVRSYUZ3MHlOVEEzTVRVeE5ESXdNVFZhTUR3eApIekFkQmdOVkJBb1RGbXQxWW1WaFpHMDZZMngxYzNSbGNpMWhaRzFwYm5NeEdUQVhCZ05WQkFNVEVHdDFZbVZ5CmJtVjBaWE10WVdSdGFXNHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDcGpIRW4KY2w3QlVURlJLdTVUeU54TmxEdWxHYittalNLcHdsd2FGa0ZyYUZPMXU0MVRVOE9FalZhNDlheHp1SHZYNTZpWgpLMEJCbkJ5aFdYeGVKNE1CTzRWdXk2K09zYVBHWUgxcDZIcGpmUTBwVW5QODFndTgzMloyWmRaazhmZkJVb0pjCjI4b25Mbjd0UERVdjhHVk9WbndZRzE4RGFDWFFjVGR3VjFNYVFKZCtsNGpveHQ5S0J6aDhZUUhZanJMdnl4RncKd2dPbTNITk5GQ3J3Zno2Wis2bi95bHliaTA3amNHVi9nMTVHaVl6azJNWW5EbFBYUHVQYzY0MVp0NWdBcGFwSgpUbUdsaW95Ym85bUVtZmRFbnd0aDJDSTZTdkx6eXlveTJidlhEVktNRzhZTzE5N25kRUd6TE95T1lYT1RMYUNkCnhaWVVCdlNadkxSK1pzMGpBZ01CQUFHalZqQlVNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUsKQmdnckJnRUZCUWNEQWpBTUJnTlZIUk1CQWY4RUFqQUFNQjhHQTFVZEl3UVlNQmFBRktzSWlFM3l6cUxaMGZRbQprQjd1MVk3K08rdEdNQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUJBUUExeXpDdE55Rmp6SHlNZ0FFTVpXalR4OWxWClk2MHRpeTFvYjUvL0thR0MvWmhSbW94NmZ0Sy94dFJDRlptRVYxZ1ZzaXNLc0g2L0YwTEZHRys4V0lrNzVoZXkKVGtoRXUvRVpBdEpRMUNoSmFWMTg4QzNvMmtmSkZOOFlVRlRyS0k3K1NNb0RCTmJJU0VPV3FsZFRiVDdWdkVzNQpsWTRKcS9rU2xnNnNZcWNCRDYzY2pFOHpKU3Y4aDUra3J0d2JVRW90Y0ptN0IvNnpMZksxNWQ5WXBEb0F1anl0CjlVcTVROEhaSGRqWlZ1OWgvNmYvbVMvZkRyek9weDhNOTdPblU1T0MvY2dTNGtUNVhkdVo3SVB3TDJVMkZsTlIKVUdvZ0RndmxDQkFaMDV4WXh4Z2xjNlNYK3JrcURUK3VhWHNtR2dBU21oUjR4OXFkRzA1R2JIdXhoZkJhCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K\n    client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBcVl4eEozSmV3VkV4VVNydVU4amNUWlE3cFJtL3BvMGlxY0pjR2haQmEyaFR0YnVOClUxUERoSTFXdVBXc2M3aDcxK2VvbVN0QVFad2NvVmw4WGllREFUdUZic3V2anJHanhtQjlhZWg2WTMwTktWSnoKL05ZTHZOOW1kbVhXWlBIM3dWS0NYTnZLSnk1KzdUdzFML0JsVGxaOEdCdGZBMmdsMEhFM2NGZFRHa0NYZnBlSQo2TWJmU2djNGZHRUIySTZ5NzhzUmNNSURwdHh6VFJRcThIOCttZnVwLzhwY200dE80M0JsZjROZVJvbU01TmpHCkp3NVQxejdqM091TldiZVlBS1dxU1U1aHBZcU1tNlBaaEpuM1JKOExZZGdpT2tyeTg4c3FNdG03MXcxU2pCdkcKRHRmZTUzUkJzeXpzam1Gemt5MmduY1dXRkFiMG1ieTBmbWJOSXdJREFRQUJBb0lCQUY2dHkzNWdzcU0zYU5mUApwbmpwSUlTOTh6UzJGVHkzY1pUa3NUUHNHNm9UL3pMcndmYTNQdVpsV3ZrOFQ0bnJpbFM5eTN1RkdJUEszbjRICmo1aXdiY3FoWjFqQXE0OStpVnM5Qkt2QW81K3M5RTJQK3E5RkJCYjdsYWNtSlR3SGx2ZkEwSVYwUXdYd1EvYk0KZVZNRTVqMkJ0Qmh1S0hlcGovdy9UTnNTR0pqK2NlNmN2aXVVb2NXWGsxWDl2c1RDaUdtMVdnVkZGQVphVGpMTgpDcEU1dHFpdnpvbEZVbXZIbmVYNTZTOEdFWk01NFA5MFk1enJ3NHBGa0Vud1VMRlBLa1U0cUU0eWVPNVFsWUhCClQ0NklIOVNPcUU5T0pLL3JCSGVzQU45TWNrMTdKblF6Sy95bXh6eHhhcGdPMnk0bVBTcjJaaGk0SENMRHRQV2QKc0ZtRzc2RUNnWUVBeHhQTTJYVFV2bXV5ckZmUVgxblJTSW9jMGhxZFY0MnFaRFlkMzZWVWc1UUVMM0Y4S01aUwptSkNsWlJXYW9IY0NFVUdXakFTWEJaMW9hOHlOMVhSNURTV3ZJMmV5TjE1dnh3NFg1SjV5QzUvY0F4ZW00dUk3CnkzM0VWWktXZXpFQTVVeUFtNlF6ei9lR1R6QkZyNUlxYkJDUitTUldudHRXUHdJTUhkK0VoeEVDZ1lFQTJnY3QKT2h1U0xJeDZZbTFTRHVVT0pSdmtFZFlCazJPQWxRbk5kOVJoaWIxdVlVbjhPTkhYdHBsY2FHZEl3bFdkaEJlcwo4M1F4dXA4MEFydEFtM2FHMXZ6RlZ6Q05KeHA4ZGFxWlFsZk94YlJReUQ0cjdtT2Z5aENFY2VibHAxMkZKRTBQCmNhOFl2TkFuTTdkbnlTSFd0aUo2THFQWDVuMXlRSC9JY1NIaEdQTUNnWUVBa0ZDZFBzSy8rcTZ1SHR1bDFZbVIKK3FrTWpZNzNvdUd5dE9TNk1VZDBCZEtHV2pKRmxIVjRxTnFxMjZXV3ExNjZZL0lOQmNIS0RTcjM2TFduMkNhUQpIbVRFR3NGd1kwMFZjTktacFlUckhkd3NMUjIzUUdCS2dwRFFoRXc0eEdOWXgrRDJsbDJwcGNoRldDQ2hVODU4CjdFdnkxZzV1c01oR05IVHlmYkZzTEZFQ2dZRUF6QXJOVzhVenZuZFZqY25MY3Q4UXBzLzhXR2pVbnJBUFJPdWcKbTlWcDF2TXVXdVJYcElGV0JMQnYxOUZaT1czUWRTK0hEMndkb2c2ZUtUUS9HWDhLWUNhOU5JVGVoTXIzMFZMdwpEVE9KOG1KMiszK2JzNFVPcEpkaXJBb3Z3THI0QUdvUjJ3M0g4K1JGMjlOMzBMYlhieXJDOStVa0I3UTgrWG5kCkIydHljdHNDZ1lCZkxqUTNRUnpQN1Z5Y1VGNkFTYUNYVTJkcE5lckVUbGFpdldIb1FFWVo3NHEyMkFTeFcrMlEKWmtZTEM1RVNGMnZwUU5kZUZhZlRyRm9zR3pLQ1dwYXBUL2QwUC9qaG83TEF1TTJQZEcxSXFoNElRU3FUM3VqNwp4Sm9WUzhIbEg1Ri9sQzZzczZQSm1GWlpsanhFL1FVTDlucDNLYTVCRjFXdXZiZVp0Q2I5Mnc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=\n`\n\nfunc Test_ValidateAndCombineConfig_NGTS(t *testing.T) {\n\tt.Run(\"ngts: valid configuration with all required flags\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tprivKeyPath := withFile(t, fakePrivKeyPEM)\n\t\tgot, cl, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tperiod: 1h\n\t\t\t\tcluster_name: test-cluster\n\t\t\t\tcluster_description: Test NGTS cluster\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--ngts\", \"--tsg-id\", \"test-tsg-123\", \"--client-id\", \"test-client-id\", \"--private-key-path\", privKeyPath))\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, NGTS, got.OutputMode)\n\t\tassert.Equal(t, \"test-tsg-123\", got.TSGID)\n\t\tassert.Equal(t, \"test-cluster\", got.ClusterName)\n\t\tassert.Equal(t, \"Test NGTS cluster\", got.ClusterDescription)\n\t\tassert.Equal(t, false, got.ClaimableCerts)\n\t\tassert.IsType(t, &client.NGTSClient{}, cl)\n\t})\n\n\tt.Run(\"ngts: claimable_certs flows from config into CombinedConfig\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tprivKeyPath := withFile(t, fakePrivKeyPEM)\n\t\tgot, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tperiod: 1h\n\t\t\t\tcluster_name: test-cluster\n\t\t\t\tclaimable_certs: true\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--ngts\", \"--tsg-id\", \"test-tsg-123\", \"--client-id\", \"test-client-id\", \"--private-key-path\", privKeyPath))\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, true, got.ClaimableCerts)\n\t})\n\n\tt.Run(\"ngts: valid configuration with custom server URL\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tprivKeyPath := withFile(t, fakePrivKeyPEM)\n\t\tgot, cl, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tperiod: 1h\n\t\t\t\tcluster_name: test-cluster\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--ngts\", \"--tsg-id\", \"test-tsg-123\", \"--client-id\", \"test-client-id\", \"--private-key-path\", privKeyPath, \"--ngts-server-url\", \"https://ngts.test.example.com\"))\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, NGTS, got.OutputMode)\n\t\tassert.Equal(t, \"https://ngts.test.example.com\", got.NGTSServerURL)\n\t\tassert.IsType(t, &client.NGTSClient{}, cl)\n\t})\n\n\tt.Run(\"ngts: missing --ngts flag should not trigger NGTS mode\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tprivKeyPath := withFile(t, fakePrivKeyPEM)\n\t\t_, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tperiod: 1h\n\t\t\t\tcluster_name: test-cluster\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--tsg-id\", \"test-tsg-123\", \"--client-id\", \"test-client-id\", \"--private-key-path\", privKeyPath))\n\t\t// Should select VenafiCloudKeypair mode instead when --ngts is not specified\n\t\trequire.Error(t, err)\n\t\tassert.Contains(t, err.Error(), \"venafi-cloud.upload_path\")\n\t})\n\n\tt.Run(\"ngts: missing --tsg-id should error\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tprivKeyPath := withFile(t, fakePrivKeyPEM)\n\t\t_, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tperiod: 1h\n\t\t\t\tcluster_name: test-cluster\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--ngts\", \"--client-id\", \"test-client-id\", \"--private-key-path\", privKeyPath))\n\t\trequire.Error(t, err)\n\t\tassert.Contains(t, err.Error(), \"--tsg-id is required when using --ngts\")\n\t})\n\n\tt.Run(\"ngts: missing --client-id should error\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tprivKeyPath := withFile(t, fakePrivKeyPEM)\n\t\t_, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tperiod: 1h\n\t\t\t\tcluster_name: test-cluster\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--ngts\", \"--tsg-id\", \"test-tsg-123\", \"--private-key-path\", privKeyPath))\n\t\trequire.Error(t, err)\n\t\tassert.Contains(t, err.Error(), \"client_id cannot be empty\")\n\t})\n\n\tt.Run(\"ngts: missing --private-key-path should error\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\t_, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tperiod: 1h\n\t\t\t\tcluster_name: test-cluster\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--ngts\", \"--tsg-id\", \"test-tsg-123\", \"--client-id\", \"test-client-id\"))\n\t\trequire.Error(t, err)\n\t\tassert.Contains(t, err.Error(), \"--private-key-path is required when using --ngts\")\n\t})\n\n\tt.Run(\"ngts: missing cluster_name should error\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tprivKeyPath := withFile(t, fakePrivKeyPEM)\n\t\t_, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tperiod: 1h\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--ngts\", \"--tsg-id\", \"test-tsg-123\", \"--client-id\", \"test-client-id\", \"--private-key-path\", privKeyPath))\n\t\trequire.Error(t, err)\n\t\tassert.Contains(t, err.Error(), \"cluster_name is required\")\n\t})\n\n\tt.Run(\"ngts: cannot be used with --machine-hub\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tprivKeyPath := withFile(t, fakePrivKeyPEM)\n\t\t_, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tperiod: 1h\n\t\t\t\tcluster_name: test-cluster\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--ngts\", \"--machine-hub\", \"--tsg-id\", \"test-tsg-123\", \"--client-id\", \"test-client-id\", \"--private-key-path\", privKeyPath))\n\t\trequire.Error(t, err)\n\t\tassert.Contains(t, err.Error(), \"--machine-hub cannot be used with --ngts\")\n\t})\n\n\tt.Run(\"ngts: cannot be used with --venafi-connection\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tprivKeyPath := withFile(t, fakePrivKeyPEM)\n\t\t_, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tperiod: 1h\n\t\t\t\tcluster_name: test-cluster\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--ngts\", \"--venafi-connection\", \"my-conn\", \"--tsg-id\", \"test-tsg-123\", \"--client-id\", \"test-client-id\", \"--private-key-path\", privKeyPath))\n\t\trequire.Error(t, err)\n\t\tassert.Contains(t, err.Error(), \"--venafi-connection cannot be used with --ngts\")\n\t})\n\n\tt.Run(\"ngts: cannot be used with --venafi-cloud\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tprivKeyPath := withFile(t, fakePrivKeyPEM)\n\t\t_, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tperiod: 1h\n\t\t\t\tcluster_name: test-cluster\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--ngts\", \"--venafi-cloud\", \"--tsg-id\", \"test-tsg-123\", \"--client-id\", \"test-client-id\", \"--private-key-path\", privKeyPath))\n\t\trequire.Error(t, err)\n\t\tassert.Contains(t, err.Error(), \"--venafi-cloud cannot be used with --ngts\")\n\t})\n\n\tt.Run(\"ngts: cannot be used with --api-token\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tprivKeyPath := withFile(t, fakePrivKeyPEM)\n\t\t_, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tperiod: 1h\n\t\t\t\tcluster_name: test-cluster\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--ngts\", \"--api-token\", \"test-token\", \"--tsg-id\", \"test-tsg-123\", \"--client-id\", \"test-client-id\", \"--private-key-path\", privKeyPath))\n\t\trequire.Error(t, err)\n\t\tassert.Contains(t, err.Error(), \"--api-token cannot be used with --ngts\")\n\t})\n\n\tt.Run(\"ngts: organization_id in config should error\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tprivKeyPath := withFile(t, fakePrivKeyPEM)\n\t\t_, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tperiod: 1h\n\t\t\t\tcluster_name: test-cluster\n\t\t\t\torganization_id: my-org\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--ngts\", \"--tsg-id\", \"test-tsg-123\", \"--client-id\", \"test-client-id\", \"--private-key-path\", privKeyPath))\n\t\trequire.Error(t, err)\n\t\tassert.Contains(t, err.Error(), \"organization_id in config file is not supported in NGTS mode\")\n\t})\n\n\tt.Run(\"ngts: cluster_id in config should error\", func(t *testing.T) {\n\t\tt.Setenv(\"POD_NAMESPACE\", \"venafi\")\n\t\tprivKeyPath := withFile(t, fakePrivKeyPEM)\n\t\t_, _, err := ValidateAndCombineConfig(discardLogs(),\n\t\t\twithConfig(testutil.Undent(`\n\t\t\t\tperiod: 1h\n\t\t\t\tcluster_name: test-cluster\n\t\t\t\tcluster_id: my-cluster-id\n\t\t\t`)),\n\t\t\twithCmdLineFlags(\"--ngts\", \"--tsg-id\", \"test-tsg-123\", \"--client-id\", \"test-client-id\", \"--private-key-path\", privKeyPath))\n\t\trequire.Error(t, err)\n\t\tassert.Contains(t, err.Error(), \"cluster_id in config file is not supported in NGTS mode\")\n\t})\n}\n\nconst fakePrivKeyPEM = `-----BEGIN PRIVATE KEY-----\nMHcCAQEEIFptpPXOvEWDrYkiMhyEH1+FB1GwtwX2tyXH4KtBO6g7oAoGCCqGSM49\nAwEHoUQDQgAE/BsIwagYc4YUjSSFyqcStj2qliAkdVGlMoJbMuXupzQ9Qs4TX5Pl\ndFjz6J/j6Gu4fLPqXmM61Hj6kiuRHx5eHQ==\n-----END PRIVATE KEY-----\n`\n"
  },
  {
    "path": "pkg/agent/dummy_data_gatherer.go",
    "content": "package agent\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com/jetstack/preflight/pkg/datagatherer\"\n)\n\ntype dummyConfig struct {\n\tAlwaysFail        bool `yaml:\"always-fail\"`\n\tFailedAttempts    int  `yaml:\"failed-attempts\"`\n\twantOnCreationErr bool\n}\n\nfunc (c *dummyConfig) NewDataGatherer(ctx context.Context) (datagatherer.DataGatherer, error) {\n\tif c.wantOnCreationErr {\n\t\treturn nil, fmt.Errorf(\"an error\")\n\t}\n\treturn &dummyDataGatherer{\n\t\tAlwaysFail:     c.AlwaysFail,\n\t\tFailedAttempts: c.FailedAttempts,\n\t}, nil\n}\n\ntype dummyDataGatherer struct {\n\tAlwaysFail     bool\n\tattemptNumber  int\n\tFailedAttempts int\n}\n\nfunc (g *dummyDataGatherer) Run(ctx context.Context) error {\n\t// no async functionality, see Fetch\n\treturn nil\n}\n\nfunc (g *dummyDataGatherer) WaitForCacheSync(ctx context.Context) error {\n\t// no async functionality, see Fetch\n\treturn nil\n}\n\nfunc (c *dummyDataGatherer) Fetch(ctx context.Context) (any, int, error) {\n\tvar err error\n\tif c.attemptNumber < c.FailedAttempts {\n\t\terr = fmt.Errorf(\"First %d attempts will fail\", c.FailedAttempts)\n\t}\n\tif c.AlwaysFail {\n\t\terr = fmt.Errorf(\"This data gatherer will always fail\")\n\t}\n\tc.attemptNumber++\n\treturn nil, -1, err\n}\n"
  },
  {
    "path": "pkg/agent/metrics.go",
    "content": "package agent\n\nimport (\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nvar (\n\tmetricPayloadSize = prometheus.NewGaugeVec(\n\t\tprometheus.GaugeOpts{\n\t\t\tNamespace: \"jscp\",\n\t\t\tSubsystem: \"agent\",\n\t\t\tName:      \"data_readings_upload_size\",\n\t\t\tHelp:      \"Data readings upload size (in bytes) sent by the jscp in-cluster agent.\",\n\t\t}, []string{\"organization\", \"cluster\"})\n)\n"
  },
  {
    "path": "pkg/agent/run.go",
    "content": "package agent\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/pprof\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/cenkalti/backoff/v5\"\n\t\"github.com/go-logr/logr\"\n\t\"github.com/hashicorp/go-multierror\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/prometheus/client_golang/prometheus/promhttp\"\n\t\"github.com/spf13/cobra\"\n\t\"golang.org/x/sync/errgroup\"\n\tcorev1 \"k8s.io/api/core/v1\"\n\tv1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/types\"\n\t\"k8s.io/client-go/kubernetes\"\n\tclientgocorev1 \"k8s.io/client-go/kubernetes/typed/core/v1\"\n\t\"k8s.io/client-go/tools/record\"\n\t\"k8s.io/klog/v2\"\n\t\"sigs.k8s.io/controller-runtime/pkg/manager\"\n\n\t\"github.com/jetstack/preflight/api\"\n\t\"github.com/jetstack/preflight/internal/envelope\"\n\t\"github.com/jetstack/preflight/internal/envelope/keyfetch\"\n\t\"github.com/jetstack/preflight/internal/envelope/rsa\"\n\t\"github.com/jetstack/preflight/pkg/client\"\n\t\"github.com/jetstack/preflight/pkg/datagatherer\"\n\t\"github.com/jetstack/preflight/pkg/datagatherer/k8sdynamic\"\n\t\"github.com/jetstack/preflight/pkg/kubeconfig\"\n\t\"github.com/jetstack/preflight/pkg/logs\"\n\t\"github.com/jetstack/preflight/pkg/version\"\n)\n\nvar Flags AgentCmdFlags\n\n// schema version of the data sent by the agent.\n// The new default version is v2.\n// In v2 the agent posts data readings using api.gathereredResources\n// Any requests without a schema version set will be interpreted\n// as using v1 by the backend. In v1 the agent sends\n// raw resource data of unstructuredList\nconst schemaVersion string = \"v2.0.0\"\n\n// Run starts the agent process\nfunc Run(cmd *cobra.Command, args []string) (returnErr error) {\n\tbaseCtx, cancel := context.WithCancel(cmd.Context())\n\tdefer cancel()\n\tlog := klog.FromContext(baseCtx).WithName(\"Run\")\n\n\tlog.Info(\"Starting\", \"version\", version.PreflightVersion, \"commit\", version.Commit)\n\n\tfile, err := os.Open(Flags.ConfigFilePath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to load config file for agent from: %s\", Flags.ConfigFilePath)\n\t}\n\tdefer file.Close()\n\n\tb, err := io.ReadAll(file)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to read config file: %s\", err)\n\t}\n\n\tcfg, err := ParseConfig(b)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to parse config file: %s\", err)\n\t}\n\n\tconfig, preflightClient, err := ValidateAndCombineConfig(log, cfg, Flags)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"While evaluating configuration: %v\", err)\n\t}\n\n\tgroup, gctx := errgroup.WithContext(baseCtx)\n\tdefer func() {\n\t\tcancel()\n\t\tif groupErr := group.Wait(); groupErr != nil {\n\t\t\treturnErr = multierror.Append(\n\t\t\t\treturnErr,\n\t\t\t\tfmt.Errorf(\"failed to wait for controller-runtime component to stop: %v\", groupErr),\n\t\t\t)\n\t\t}\n\t}()\n\n\t{\n\t\tserver := http.NewServeMux()\n\t\tconst serverAddress = \":8081\"\n\t\tlog := log.WithName(\"APIServer\").WithValues(\"addr\", serverAddress)\n\n\t\tif Flags.Profiling {\n\t\t\tlog.Info(\"Profiling endpoints enabled\", \"path\", \"/debug/pprof\")\n\t\t\tserver.HandleFunc(\"/debug/pprof/\", pprof.Index)\n\t\t\tserver.HandleFunc(\"/debug/pprof/cmdline\", pprof.Cmdline)\n\t\t\tserver.HandleFunc(\"/debug/pprof/profile\", pprof.Profile)\n\t\t\tserver.HandleFunc(\"/debug/pprof/symbol\", pprof.Symbol)\n\t\t\tserver.HandleFunc(\"/debug/pprof/trace\", pprof.Trace)\n\t\t}\n\n\t\tif Flags.Prometheus {\n\t\t\tlog.Info(\"Metrics endpoints enabled\", \"path\", \"/metrics\")\n\t\t\tprometheus.MustRegister(metricPayloadSize)\n\t\t\tserver.Handle(\"/metrics\", promhttp.Handler())\n\t\t}\n\n\t\t// Health check endpoint. Since we haven't figured a good way of knowning\n\t\t// what \"ready\" means for the agent, we just return 200 OK unconditionally.\n\t\t// The goal is to satisfy some Kubernetes distributions, like OpenShift,\n\t\t// that require a liveness and health probe to be present for each pod.\n\t\tlog.Info(\"Healthz endpoints enabled\", \"path\", \"/healthz\")\n\t\tserver.HandleFunc(\"/healthz\", func(w http.ResponseWriter, r *http.Request) {\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t})\n\t\tlog.Info(\"Readyz endpoints enabled\", \"path\", \"/readyz\")\n\t\tserver.HandleFunc(\"/readyz\", func(w http.ResponseWriter, r *http.Request) {\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t})\n\n\t\tgroup.Go(func() error {\n\t\t\tlistenCtx := klog.NewContext(gctx, log)\n\t\t\terr := listenAndServe(\n\t\t\t\tlistenCtx,\n\t\t\t\t&http.Server{\n\t\t\t\t\tAddr:    serverAddress,\n\t\t\t\t\tHandler: server,\n\t\t\t\t\tBaseContext: func(_ net.Listener) context.Context {\n\t\t\t\t\t\treturn listenCtx\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"APIServer: %s\", err)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\n\t_, isVenConn := preflightClient.(*client.VenConnClient)\n\tif isVenConn {\n\t\tgroup.Go(func() error {\n\t\t\terr := preflightClient.(manager.Runnable).Start(gctx)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to start a controller-runtime component: %v\", err)\n\t\t\t}\n\n\t\t\t// The agent must stop if the controller-runtime component stops.\n\t\t\tcancel()\n\t\t\treturn nil\n\t\t})\n\t}\n\n\t// To help users notice issues with the agent, we show the error messages in\n\t// the agent pod's events.\n\teventf, err := newEventf(log)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create event recorder: %v\", err)\n\t}\n\n\t// Check if secret encryption is enabled via environment variable\n\t// When enabled, secret data will be kept for encryption instead of being redacted\n\tencryptSecrets := strings.ToLower(os.Getenv(\"ARK_SEND_SECRET_VALUES\")) == \"true\"\n\n\tvar encryptor envelope.Encryptor\n\n\tif encryptSecrets {\n\t\tencryptor, err = loadEncryptor(gctx, preflightClient)\n\t\tif err != nil {\n\t\t\tlog.Error(err, \"Failed to set up encryptor for secrets, secret data will not be sent\")\n\t\t\tencryptSecrets = false\n\t\t}\n\t}\n\n\tdataGatherers := map[string]datagatherer.DataGatherer{}\n\n\t// load datagatherer config and boot each one\n\tfor _, dgConfig := range config.DataGatherers {\n\t\tkind := dgConfig.Kind\n\t\tif dgConfig.DataPath != \"\" {\n\t\t\tkind = \"local\"\n\t\t\treturn fmt.Errorf(\"running data gatherer %s of type %s as Local, data-path override present: %s\", dgConfig.Name, dgConfig.Kind, dgConfig.DataPath)\n\t\t}\n\n\t\tnewDg, err := dgConfig.Config.NewDataGatherer(gctx)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to instantiate %q data gatherer  %q: %v\", kind, dgConfig.Name, err)\n\t\t}\n\n\t\tdynDg, isDynamicGatherer := newDg.(*k8sdynamic.DataGathererDynamic)\n\t\tif isDynamicGatherer {\n\t\t\tdynDg.ExcludeAnnotKeys = config.ExcludeAnnotationKeysRegex\n\t\t\tdynDg.ExcludeLabelKeys = config.ExcludeLabelKeysRegex\n\n\t\t\tgvr := dynDg.GVR()\n\n\t\t\tif encryptSecrets && gvr.Resource == \"secrets\" && gvr.Group == \"\" {\n\t\t\t\tlog.Info(\"Secret encryption enabled for datagatherer\")\n\t\t\t\tdynDg.Encryptor = encryptor\n\t\t\t}\n\t\t}\n\n\t\tlog.V(logs.Debug).Info(\"Starting DataGatherer\", \"name\", dgConfig.Name)\n\n\t\t// start the data gatherers and wait for the cache sync\n\t\tgroup.Go(func() error {\n\t\t\t// Most implementations of `DataGatherer.Run` return immediately.\n\t\t\t// Only the Dynamic DataGatherer starts an informer which runs and\n\t\t\t// blocks until the supplied channel is closed.\n\t\t\t// For this reason, we must allow these errgroup Go routines to exit\n\t\t\t// without cancelling the other Go routines in the group.\n\t\t\tif err := newDg.Run(gctx); err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to start %q data gatherer %q: %v\", kind, dgConfig.Name, err)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\n\t\t// regardless of success, this dataGatherers has been given a\n\t\t// chance to sync its cache and we will now continue as normal. We\n\t\t// assume at the informers will either recover or the log messages\n\t\t// above will help operators correct the issue.\n\t\tdataGatherers[dgConfig.Name] = newDg\n\t}\n\n\t// Wait for 5 seconds for all informers to sync. If they fail to sync\n\t// we continue (as we have no way to know if they will recover or not).\n\t//\n\t// bootCtx is a context with a timeout to allow the informer 5\n\t// seconds to perform an initial sync. It may fail, and that's fine\n\t// too, it will backoff and retry of its own accord. Initial boot\n\t// will only be delayed by a max of 5 seconds.\n\tbootCtx, bootCancel := context.WithTimeout(gctx, 5*time.Second)\n\tdefer bootCancel()\n\n\tvar timedoutDGs []string\n\tfor _, dgConfig := range config.DataGatherers {\n\t\tdg := dataGatherers[dgConfig.Name]\n\t\t// wait for the informer to complete an initial sync, we do this to\n\t\t// attempt to have an initial set of data for the first upload of\n\t\t// the run.\n\t\tif err := dg.WaitForCacheSync(bootCtx); err != nil {\n\t\t\t// log sync failure, this might recover in future\n\t\t\tif errors.Is(err, k8sdynamic.ErrCacheSyncTimeout) {\n\t\t\t\ttimedoutDGs = append(timedoutDGs, dgConfig.Name)\n\t\t\t} else {\n\t\t\t\tlog.V(logs.Info).Info(\"Failed to sync cache for datagatherer\", \"kind\", dgConfig.Kind, \"name\", dgConfig.Name, \"error\", err)\n\t\t\t}\n\t\t}\n\t}\n\tif len(timedoutDGs) > 0 {\n\t\tlog.V(logs.Info).Info(\"Skipping datagatherers for CRDs that can't be found in Kubernetes\", \"datagatherers\", timedoutDGs)\n\t}\n\t// begin the datagathering loop, periodically sending data to the\n\t// configured output using data in datagatherer caches or refreshing from\n\t// APIs each cycle depending on datagatherer implementation.\n\t// If any of the go routines exit (with nil or error) the main context will\n\t// be cancelled, which will cause this blocking loop to exit\n\t// instead of waiting for the time period.\n\tfor {\n\t\tif err := gatherAndOutputData(gctx, eventf, config, preflightClient, dataGatherers); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif config.OneShot {\n\t\t\tbreak\n\t\t}\n\n\t\tselect {\n\t\tcase <-gctx.Done():\n\t\t\treturn nil\n\t\tcase <-time.After(config.Period):\n\t\t}\n\t}\n\treturn nil\n}\n\n// loadEncryptor sets up an encryptor for encrypting secrets. For now, it just loads a hardcoded public key\nfunc loadEncryptor(ctx context.Context, preflightClient client.Client) (envelope.Encryptor, error) {\n\tcyberarkClient, ok := preflightClient.(*client.CyberArkClient)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"secret encryption is only supported for CyberArk clients\")\n\t}\n\n\tcfg, err := cyberarkClient.Config()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get CyberArk client config: %w\", err)\n\t}\n\n\tfetcher, err := keyfetch.NewClient(ctx, cyberarkClient.DiscoveryClient(), cfg, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create key fetcher for secret encryption: %w\", err)\n\t}\n\n\tencryptor, err := rsa.NewEncryptor(fetcher)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create encryptor for secret encryption: %w\", err)\n\t}\n\n\treturn encryptor, nil\n}\n\n// Creates an event recorder for the agent's Pod object. Expects the env var\n// POD_NAME to contain the pod name. Note that the RBAC rule allowing sending\n// events is attached to the pod's service account, not the impersonated service\n// account (venafi-connection).\nfunc newEventf(log logr.Logger) (Eventf, error) {\n\tpodName := os.Getenv(\"POD_NAME\")\n\tpodNode := os.Getenv(\"POD_NODE\")\n\tpodUID := os.Getenv(\"POD_UID\")\n\tpodNamespace := os.Getenv(\"POD_NAMESPACE\")\n\tif podName == \"\" || podNode == \"\" || podUID == \"\" || podNamespace == \"\" {\n\t\tlog.Info(\n\t\t\t\"Pod event recorder disabled\",\n\t\t\t\"reason\", \"The agent does not appear to be running in a Kubernetes cluster.\",\n\t\t\t\"detail\", \"When running in a Kubernetes cluster the following environment variables must be set: POD_NAME, POD_NODE, POD_UID, POD_NAMESPACE\",\n\t\t)\n\t\treturn func(eventType, reason, msg string, args ...any) {}, nil\n\t}\n\trestcfg, err := kubeconfig.LoadRESTConfig(\"\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to load kubeconfig: %v\", err)\n\t}\n\tscheme := runtime.NewScheme()\n\t_ = corev1.AddToScheme(scheme)\n\n\tvar eventf Eventf\n\n\teventClient, err := kubernetes.NewForConfig(restcfg)\n\tif err != nil {\n\t\treturn eventf, fmt.Errorf(\"failed to create event client: %v\", err)\n\t}\n\tbroadcaster := record.NewBroadcaster()\n\tbroadcaster.StartRecordingToSink(&clientgocorev1.EventSinkImpl{Interface: eventClient.CoreV1().Events(podNamespace)})\n\teventRec := broadcaster.NewRecorder(scheme, corev1.EventSource{Component: \"venafi-kubernetes-agent\", Host: podNode})\n\teventf = func(eventType, reason, msg string, args ...any) {\n\t\teventRec.Eventf(&corev1.Pod{ObjectMeta: v1.ObjectMeta{Name: podName, Namespace: podNamespace, UID: types.UID(podUID)}}, eventType, reason, msg, args...)\n\n\t}\n\n\treturn eventf, nil\n}\n\n// Like Printf but for sending events to the agent's Pod object.\ntype Eventf func(eventType, reason, msg string, args ...any)\n\nfunc gatherAndOutputData(ctx context.Context, eventf Eventf, config CombinedConfig, preflightClient client.Client, dataGatherers map[string]datagatherer.DataGatherer) error {\n\tlog := klog.FromContext(ctx).WithName(\"gatherAndOutputData\")\n\tvar readings []*api.DataReading\n\n\tif config.InputPath != \"\" {\n\t\tlog.V(logs.Debug).Info(\"Reading data from local file\", \"inputPath\", config.InputPath)\n\t\tdata, err := os.ReadFile(config.InputPath)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to read local data file: %s\", err)\n\t\t}\n\t\terr = json.Unmarshal(data, &readings)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal local data file: %s\", err)\n\t\t}\n\t} else {\n\t\tvar err error\n\t\treadings, err = gatherData(ctx, config, dataGatherers)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t{\n\t\tgroup, ctx := errgroup.WithContext(ctx)\n\n\t\tbackOff := backoff.NewExponentialBackOff()\n\t\tbackOff.InitialInterval = 30 * time.Second\n\t\tbackOff.MaxInterval = 3 * time.Minute\n\n\t\tnotificationFunc := backoff.Notify(func(err error, t time.Duration) {\n\t\t\teventf(\"Warning\", \"PushingErr\", \"retrying in %v after error: %s\", t, err)\n\t\t\tlog.Error(err, \"Warning: PushingErr: will retry\", \"retry_after\", t)\n\t\t})\n\n\t\tpost := func() (any, error) {\n\t\t\tpostCtx, cancel := context.WithTimeout(ctx, config.BackoffMaxTime)\n\t\t\tdefer cancel()\n\n\t\t\treturn struct{}{}, postData(postCtx, config, preflightClient, readings)\n\t\t}\n\n\t\tgroup.Go(func() error {\n\t\t\t_, err := backoff.Retry(ctx, post, backoff.WithBackOff(backOff), backoff.WithNotify(notificationFunc), backoff.WithMaxElapsedTime(config.BackoffMaxTime))\n\t\t\treturn err\n\t\t})\n\n\t\tgroupErr := group.Wait()\n\t\tif groupErr != nil {\n\t\t\treturn fmt.Errorf(\"got a fatal error from one or more upload actions: %s\", groupErr)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc gatherData(ctx context.Context, config CombinedConfig, dataGatherers map[string]datagatherer.DataGatherer) ([]*api.DataReading, error) {\n\tlog := klog.FromContext(ctx).WithName(\"gatherData\")\n\n\tvar readings []*api.DataReading\n\n\tvar dgError *multierror.Error\n\tfor k, dg := range dataGatherers {\n\t\tdgData, count, err := dg.Fetch(ctx)\n\t\tif err != nil {\n\t\t\tdgError = multierror.Append(dgError, fmt.Errorf(\"error in datagatherer %s: %w\", k, err))\n\n\t\t\tcontinue\n\t\t}\n\t\t{\n\t\t\t// Not all datagatherers return a count.\n\t\t\t// If `count == -1` it means that the datagatherer does not support returning a count.\n\t\t\tlog := log\n\t\t\tif count >= 0 {\n\t\t\t\tlog = log.WithValues(\"count\", count)\n\t\t\t}\n\t\t\tlog.V(logs.Debug).Info(\"Successfully gathered\", \"name\", k)\n\t\t}\n\t\treadings = append(readings, &api.DataReading{\n\t\t\tClusterID:     config.ClusterID,\n\t\t\tDataGatherer:  k,\n\t\t\tTimestamp:     api.Time{Time: time.Now()},\n\t\t\tData:          dgData,\n\t\t\tSchemaVersion: schemaVersion,\n\t\t})\n\t}\n\n\tif dgError != nil {\n\t\tdgError.ErrorFormat = func(es []error) string {\n\t\t\tpoints := make([]string, len(es))\n\t\t\tfor i, err := range es {\n\t\t\t\tpoints[i] = fmt.Sprintf(\"* %s\", err)\n\t\t\t}\n\t\t\treturn fmt.Sprintf(\n\t\t\t\t\"The following %d data gatherer(s) have failed:\\n\\t%s\",\n\t\t\t\tlen(es), strings.Join(points, \"\\n\\t\"))\n\t\t}\n\t}\n\n\tif config.StrictMode && dgError.ErrorOrNil() != nil {\n\t\treturn nil, fmt.Errorf(\"halting datagathering in strict mode due to error: %s\", dgError.ErrorOrNil())\n\t}\n\n\treturn readings, nil\n}\n\nfunc postData(ctx context.Context, config CombinedConfig, preflightClient client.Client, readings []*api.DataReading) error {\n\tlog := klog.FromContext(ctx).WithName(\"postData\")\n\terr := preflightClient.PostDataReadingsWithOptions(ctx, readings, client.Options{\n\t\tClusterName:        config.ClusterName,\n\t\tClusterDescription: config.ClusterDescription,\n\t\tClaimableCerts:     config.ClaimableCerts,\n\t\t// orgID and clusterID are not required for Venafi Cloud auth\n\t\tOrgID:     config.OrganizationID,\n\t\tClusterID: config.ClusterID,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"post to server failed: %+v\", err)\n\t}\n\tlog.Info(\"Data sent successfully\")\n\treturn nil\n}\n\n// listenAndServe starts the supplied HTTP server and stops it gracefully when\n// the supplied context is cancelled.\n// It returns when the graceful server shutdown is complete or when the server\n// exits with an error.\n// If the server fails to start, it returns the server error.\n// If the server fails to shutdown gracefully, it returns the shutdown error.\n// The server is given 3 seconds to shutdown gracefully before it is stopped\n// forcefully.\nfunc listenAndServe(ctx context.Context, server *http.Server) error {\n\tlog := klog.FromContext(ctx).WithName(\"ListenAndServe\")\n\n\tlog.V(logs.Debug).Info(\"Starting\")\n\n\tlistenCTX, listenCancelCause := context.WithCancelCause(context.WithoutCancel(ctx))\n\tgo func() {\n\t\terr := server.ListenAndServe()\n\t\tlistenCancelCause(fmt.Errorf(\"ListenAndServe: %s\", err))\n\t}()\n\n\tselect {\n\tcase <-listenCTX.Done():\n\t\tlog.V(logs.Debug).Info(\"Shutdown skipped\", \"reason\", \"Server already stopped\")\n\t\treturn context.Cause(listenCTX)\n\n\tcase <-ctx.Done():\n\t\tlog.V(logs.Debug).Info(\"Shutting down\")\n\t}\n\n\tshutdownCTX, shutdownCancel := context.WithTimeout(context.WithoutCancel(ctx), time.Second*3)\n\tshutdownErr := server.Shutdown(shutdownCTX)\n\tshutdownCancel()\n\tif shutdownErr != nil {\n\t\tshutdownErr = fmt.Errorf(\"Shutdown: %s\", shutdownErr)\n\t}\n\n\tcloseErr := server.Close()\n\tif closeErr != nil {\n\t\tcloseErr = fmt.Errorf(\"Close: %s\", closeErr)\n\t}\n\n\tlog.V(logs.Debug).Info(\"Shutdown complete\")\n\n\treturn errors.Join(shutdownErr, closeErr)\n}\n"
  },
  {
    "path": "pkg/client/client.go",
    "content": "package client\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/jetstack/preflight/api\"\n)\n\ntype (\n\t// Options is the struct describing additional information pertinent to an agent that isn't a data reading\n\t// These fields will then be uploaded together with data readings.\n\tOptions struct {\n\t\t// Only used with Jetstack Secure.\n\t\tOrgID string\n\n\t\t// Only used with Jetstack Secure.\n\t\tClusterID string\n\n\t\t// Used for Venafi Cloud and MachineHub mode.\n\t\tClusterName string\n\n\t\t// Used for Venafi Cloud and MachineHub mode.\n\t\tClusterDescription string\n\n\t\t// ClaimableCerts controls whether discovered certs can be claimed by other tenants.\n\t\t// true = certs are left unassigned, available for any tenant to claim.\n\t\t// false (default) = certs are owned by this cluster's tenant.\n\t\tClaimableCerts bool\n\t}\n\n\t// The Client interface describes types that perform requests against the Jetstack Secure backend.\n\tClient interface {\n\t\tPostDataReadingsWithOptions(ctx context.Context, readings []*api.DataReading, options Options) error\n\t}\n\n\t// The Credentials interface describes methods for credential types to implement for verification.\n\tCredentials interface {\n\t\tIsClientSet() (ok bool, why string)\n\t\tValidate() error\n\t}\n)\n\nfunc fullURL(baseURL, path string) string {\n\tbase := baseURL\n\tfor strings.HasSuffix(base, \"/\") {\n\t\tbase = strings.TrimSuffix(base, \"/\")\n\t}\n\tfor strings.HasPrefix(path, \"/\") {\n\t\tpath = strings.TrimPrefix(path, \"/\")\n\t}\n\treturn fmt.Sprintf(\"%s/%s\", base, path)\n}\n"
  },
  {
    "path": "pkg/client/client_api_token.go",
    "content": "package client\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"path/filepath\"\n\t\"time\"\n\n\t\"k8s.io/client-go/transport\"\n\t\"k8s.io/klog/v2\"\n\n\t\"github.com/jetstack/preflight/api\"\n\t\"github.com/jetstack/preflight/pkg/version\"\n)\n\ntype (\n\t// The APITokenClient type is a Client implementation used to upload data readings to the Jetstack Secure platform\n\t// using API tokens as its authentication method.\n\tAPITokenClient struct {\n\t\tapiToken      string\n\t\tbaseURL       string\n\t\tagentMetadata *api.AgentMetadata\n\t\tclient        *http.Client\n\t}\n)\n\n// NewAPITokenClient returns a new instance of the APITokenClient type that will perform HTTP requests using\n// the provided API token for authentication.\nfunc NewAPITokenClient(agentMetadata *api.AgentMetadata, apiToken, baseURL string) (*APITokenClient, error) {\n\tif baseURL == \"\" {\n\t\treturn nil, fmt.Errorf(\"cannot create APITokenClient: baseURL cannot be empty\")\n\t}\n\n\treturn &APITokenClient{\n\t\tapiToken:      apiToken,\n\t\tagentMetadata: agentMetadata,\n\t\tbaseURL:       baseURL,\n\t\tclient: &http.Client{\n\t\t\tTimeout:   time.Minute,\n\t\t\tTransport: transport.DebugWrappers(http.DefaultTransport),\n\t\t},\n\t}, nil\n}\n\n// PostDataReadingsWithOptions uploads the slice of api.DataReading to the Jetstack Secure backend to be processed for later\n// viewing in the user-interface.\nfunc (c *APITokenClient) PostDataReadingsWithOptions(ctx context.Context, readings []*api.DataReading, opts Options) error {\n\treturn c.postDataReadings(ctx, opts.OrgID, opts.ClusterID, readings)\n}\n\n// PostDataReadings uploads the slice of api.DataReading to the Jetstack Secure backend to be processed for later\n// viewing in the user-interface.\nfunc (c *APITokenClient) postDataReadings(ctx context.Context, orgID, clusterID string, readings []*api.DataReading) error {\n\tpayload := api.DataReadingsPost{\n\t\tAgentMetadata:  c.agentMetadata,\n\t\tDataGatherTime: time.Now().UTC(),\n\t\tDataReadings:   readings,\n\t}\n\tdata, err := json.Marshal(payload)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tklog.FromContext(ctx).V(2).Info(\n\t\t\"uploading data readings\",\n\t\t\"url\", filepath.Join(\"/api/v1/org\", orgID, \"datareadings\", clusterID),\n\t\t\"cluster_id\", clusterID,\n\t\t\"data_readings_count\", len(readings),\n\t\t\"data_size_bytes\", len(data),\n\t)\n\n\tres, err := c.post(ctx, filepath.Join(\"/api/v1/org\", orgID, \"datareadings\", clusterID), bytes.NewBuffer(data))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\n\tif code := res.StatusCode; code < 200 || code >= 300 {\n\t\terrorContent := \"\"\n\t\tbody, err := io.ReadAll(res.Body)\n\t\tif err == nil {\n\t\t\terrorContent = string(body)\n\t\t}\n\n\t\treturn fmt.Errorf(\"received response with status code %d. Body: [%s]\", code, errorContent)\n\t}\n\n\treturn nil\n}\n\n// Post performs an HTTP POST request.\nfunc (c *APITokenClient) post(ctx context.Context, path string, body io.Reader) (*http.Response, error) {\n\treq, err := http.NewRequestWithContext(ctx, http.MethodPost, fullURL(c.baseURL, path), body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", c.apiToken))\n\tversion.SetUserAgent(req)\n\n\treturn c.client.Do(req)\n}\n"
  },
  {
    "path": "pkg/client/client_cyberark.go",
    "content": "package client\n\nimport (\n\t\"context\"\n\t\"crypto/x509\"\n\t\"encoding/base64\"\n\t\"encoding/pem\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"slices\"\n\n\t\"github.com/go-logr/logr\"\n\tcorev1 \"k8s.io/api/core/v1\"\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/util/sets\"\n\t\"k8s.io/klog/v2\"\n\n\t\"github.com/jetstack/preflight/api\"\n\t\"github.com/jetstack/preflight/internal/cyberark\"\n\t\"github.com/jetstack/preflight/internal/cyberark/dataupload\"\n\t\"github.com/jetstack/preflight/internal/cyberark/servicediscovery\"\n\t\"github.com/jetstack/preflight/pkg/logs\"\n\t\"github.com/jetstack/preflight/pkg/version\"\n)\n\n// CyberArkClient is a client for publishing data readings to CyberArk's discoverycontext API.\ntype CyberArkClient struct {\n\tconfigLoader cyberark.ClientConfigLoader\n\thttpClient   *http.Client\n\n\tdiscoveryClient *servicediscovery.Client\n}\n\nvar _ Client = &CyberArkClient{}\n\n// NewCyberArk initializes a CyberArk client using configuration from environment variables.\n// It requires an HTTP client to be provided, which will be used for making requests.\n// The environment variables ARK_SUBDOMAIN, ARK_USERNAME, and ARK_SECRET must be set for authentication.\n// Sending secrets is controlled by the ARK_SEND_SECRETS environment variable (defaults to \"false\").\n// If sending secrets is enabled, the hardcoded public key will be loaded and an encryptor will be created.\n// If the configuration is invalid or missing, an error is returned.\nfunc NewCyberArk(httpClient *http.Client) (*CyberArkClient, error) {\n\tconfigLoader := cyberark.LoadClientConfigFromEnvironment\n\n\tcfg, err := configLoader()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &CyberArkClient{\n\t\tconfigLoader:    configLoader,\n\t\thttpClient:      httpClient,\n\t\tdiscoveryClient: servicediscovery.New(httpClient, cfg.Subdomain),\n\t}, nil\n}\n\n// PostDataReadingsWithOptions uploads data readings to CyberArk.\n// It converts the supplied data readings into a snapshot format expected by CyberArk.\n// Deleted resources are excluded from the snapshot because they are not needed by CyberArk.\n// It then minimizes the snapshot to avoid uploading unnecessary data.\n// It initializes a data upload client with the configured HTTP client and credentials,\n// then uploads a snapshot.\n// The supplied Options are not used by this publisher.\nfunc (o *CyberArkClient) PostDataReadingsWithOptions(ctx context.Context, readings []*api.DataReading, opts Options) error {\n\tlog := klog.FromContext(ctx)\n\n\tcfg, err := o.configLoader()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to load config: %w\", err)\n\t}\n\n\tserviceMap, tenantUUID, err := o.discoveryClient.DiscoverServices(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsnapshot := baseSnapshotFromOptions(opts)\n\n\tif err := convertDataReadings(defaultExtractorFunctions, readings, &snapshot); err != nil {\n\t\treturn fmt.Errorf(\"while converting data readings: %s\", err)\n\t}\n\n\t// Minimize the snapshot to reduce size and improve privacy\n\tminimizeSnapshot(log.V(logs.Debug), &snapshot)\n\n\tdatauploadClient, err := cyberark.NewDatauploadClient(ctx, o.httpClient, serviceMap, tenantUUID, cfg)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"while initializing data upload client: %s\", err)\n\t}\n\n\terr = datauploadClient.PutSnapshot(ctx, snapshot)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"while uploading snapshot: %s\", err)\n\t}\n\treturn nil\n}\n\nfunc (o *CyberArkClient) DiscoveryClient() *servicediscovery.Client {\n\treturn o.discoveryClient\n}\n\nfunc (o *CyberArkClient) Config() (cyberark.ClientConfig, error) {\n\treturn o.configLoader()\n}\n\n// baseSnapshotFromOptions creates a base snapshot with common fields from the provided options.\n// This includes the cluster name, description, and agent version.\n// Other fields like ClusterID and K8SVersion need to be populated separately.\nfunc baseSnapshotFromOptions(opts Options) dataupload.Snapshot {\n\treturn dataupload.Snapshot{\n\t\tClusterName:        opts.ClusterName,\n\t\tClusterDescription: opts.ClusterDescription,\n\t\tAgentVersion:       version.PreflightVersion,\n\t}\n}\n\n// extractOIDCFromReading converts the opaque data from a OIDCDiscoveryData\n// data reading to allow access to the OIDC fields within.\nfunc extractOIDCFromReading(reading *api.DataReading, target *dataupload.Snapshot) error {\n\tif reading == nil {\n\t\treturn fmt.Errorf(\"programmer mistake: the DataReading must not be nil\")\n\t}\n\tdata, ok := reading.Data.(*api.OIDCDiscoveryData)\n\tif !ok {\n\t\treturn fmt.Errorf(\n\t\t\t\"programmer mistake: the DataReading must have data type *api.OIDCDiscoveryData. \"+\n\t\t\t\t\"This DataReading (%s) has data type %T\", reading.DataGatherer, reading.Data)\n\t}\n\ttarget.OIDCConfig = data.OIDCConfig\n\ttarget.OIDCConfigError = data.OIDCConfigError\n\ttarget.JWKS = data.JWKS\n\ttarget.JWKSError = data.JWKSError\n\treturn nil\n}\n\n// extractClusterIDAndServerVersionFromReading converts the opaque data from a DiscoveryData\n// data reading to allow access to the Kubernetes version fields within.\nfunc extractClusterIDAndServerVersionFromReading(reading *api.DataReading, target *dataupload.Snapshot) error {\n\tif reading == nil {\n\t\treturn fmt.Errorf(\"programmer mistake: the DataReading must not be nil\")\n\t}\n\tdata, ok := reading.Data.(*api.DiscoveryData)\n\tif !ok {\n\t\treturn fmt.Errorf(\n\t\t\t\"programmer mistake: the DataReading must have data type *api.DiscoveryData. \"+\n\t\t\t\t\"This DataReading (%s) has data type %T\", reading.DataGatherer, reading.Data)\n\t}\n\ttarget.ClusterID = data.ClusterID\n\tif data.ServerVersion != nil {\n\t\ttarget.K8SVersion = data.ServerVersion.GitVersion\n\t}\n\treturn nil\n}\n\n// extractResourceListFromReading converts the opaque data from a DynamicData\n// data reading to runtime.Object resources, to allow access to the metadata and\n// other kubernetes API fields.\n// Deleted resources are skipped because the CyberArk Discovery and Context service\n// does not need to see resources that no longer exist.\nfunc extractResourceListFromReading(reading *api.DataReading, target *[]runtime.Object) error {\n\tif reading == nil {\n\t\treturn fmt.Errorf(\"programmer mistake: the DataReading must not be nil\")\n\t}\n\tdata, ok := reading.Data.(*api.DynamicData)\n\tif !ok {\n\t\treturn fmt.Errorf(\n\t\t\t\"programmer mistake: the DataReading must have data type *api.DynamicData. \"+\n\t\t\t\t\"This DataReading (%s) has data type %T\", reading.DataGatherer, reading.Data)\n\t}\n\tresources := make([]runtime.Object, 0, len(data.Items))\n\tfor i, item := range data.Items {\n\t\tif !item.DeletedAt.IsZero() {\n\t\t\tcontinue\n\t\t}\n\t\tif resource, ok := item.Resource.(runtime.Object); ok {\n\t\t\tresources = append(resources, resource)\n\t\t} else {\n\t\t\treturn fmt.Errorf(\n\t\t\t\t\"programmer mistake: the DynamicData items must have Resource type runtime.Object. \"+\n\t\t\t\t\t\"This item (%d) has Resource type %T\", i, item.Resource)\n\t\t}\n\t}\n\t*target = resources\n\treturn nil\n}\n\n// defaultExtractorFunctions maps data gatherer names to functions that extract\n// their data from DataReadings into the appropriate fields of a Snapshot.\n// Each function takes a DataReading and a pointer to a Snapshot,\n// and populates the relevant field(s) of the Snapshot based on the DataReading's data.\n// Deleted resources are excluded from the snapshot because they are not needed by CyberArk.\nvar defaultExtractorFunctions = map[string]func(*api.DataReading, *dataupload.Snapshot) error{\n\t\"ark/oidc\":      extractOIDCFromReading,\n\t\"ark/discovery\": extractClusterIDAndServerVersionFromReading,\n\t\"ark/secrets\": func(r *api.DataReading, s *dataupload.Snapshot) error {\n\t\treturn extractResourceListFromReading(r, &s.Secrets)\n\t},\n\t\"ark/serviceaccounts\": func(r *api.DataReading, s *dataupload.Snapshot) error {\n\t\treturn extractResourceListFromReading(r, &s.ServiceAccounts)\n\t},\n\t\"ark/roles\": func(r *api.DataReading, s *dataupload.Snapshot) error {\n\t\treturn extractResourceListFromReading(r, &s.Roles)\n\t},\n\t\"ark/clusterroles\": func(r *api.DataReading, s *dataupload.Snapshot) error {\n\t\treturn extractResourceListFromReading(r, &s.ClusterRoles)\n\t},\n\t\"ark/rolebindings\": func(r *api.DataReading, s *dataupload.Snapshot) error {\n\t\treturn extractResourceListFromReading(r, &s.RoleBindings)\n\t},\n\t\"ark/clusterrolebindings\": func(r *api.DataReading, s *dataupload.Snapshot) error {\n\t\treturn extractResourceListFromReading(r, &s.ClusterRoleBindings)\n\t},\n\t\"ark/jobs\": func(r *api.DataReading, s *dataupload.Snapshot) error {\n\t\treturn extractResourceListFromReading(r, &s.Jobs)\n\t},\n\t\"ark/cronjobs\": func(r *api.DataReading, s *dataupload.Snapshot) error {\n\t\treturn extractResourceListFromReading(r, &s.CronJobs)\n\t},\n\t\"ark/deployments\": func(r *api.DataReading, s *dataupload.Snapshot) error {\n\t\treturn extractResourceListFromReading(r, &s.Deployments)\n\t},\n\t\"ark/statefulsets\": func(r *api.DataReading, s *dataupload.Snapshot) error {\n\t\treturn extractResourceListFromReading(r, &s.Statefulsets)\n\t},\n\t\"ark/daemonsets\": func(r *api.DataReading, s *dataupload.Snapshot) error {\n\t\treturn extractResourceListFromReading(r, &s.Daemonsets)\n\t},\n\t\"ark/pods\": func(r *api.DataReading, s *dataupload.Snapshot) error {\n\t\treturn extractResourceListFromReading(r, &s.Pods)\n\t},\n\t\"ark/configmaps\": func(r *api.DataReading, s *dataupload.Snapshot) error {\n\t\treturn extractResourceListFromReading(r, &s.ConfigMaps)\n\t},\n\t\"ark/esoexternalsecrets\": func(r *api.DataReading, s *dataupload.Snapshot) error {\n\t\treturn extractResourceListFromReading(r, &s.ExternalSecrets)\n\t},\n\t\"ark/esosecretstores\": func(r *api.DataReading, s *dataupload.Snapshot) error {\n\t\treturn extractResourceListFromReading(r, &s.SecretStores)\n\t},\n\t\"ark/esoclusterexternalsecrets\": func(r *api.DataReading, s *dataupload.Snapshot) error {\n\t\treturn extractResourceListFromReading(r, &s.ClusterExternalSecrets)\n\t},\n\t\"ark/esoclustersecretstores\": func(r *api.DataReading, s *dataupload.Snapshot) error {\n\t\treturn extractResourceListFromReading(r, &s.ClusterSecretStores)\n\t},\n}\n\n// convertDataReadings processes a list of DataReadings using the provided\n// extractor functions to populate the fields of the target snapshot.\n// It ensures that all expected data gatherers are handled and that there are\n// no unhandled data gatherers. If any discrepancies are found, or if any\n// extractor function returns an error, it returns an error.\n// The extractorFunctions map should contain functions for each expected\n// DataGatherer name, which will be called with the corresponding DataReading\n// and the target snapshot to populate the relevant fields.\n// Deleted resources are excluded from the snapshot because they are not needed by CyberArk.\nfunc convertDataReadings(\n\textractorFunctions map[string]func(*api.DataReading, *dataupload.Snapshot) error,\n\treadings []*api.DataReading,\n\ttarget *dataupload.Snapshot,\n) error {\n\texpectedDataGatherers := sets.KeySet(extractorFunctions)\n\tunhandledDataGatherers := sets.New[string]()\n\tmissingDataGatherers := expectedDataGatherers.Clone()\n\tfor _, reading := range readings {\n\t\tdataGathererName := reading.DataGatherer\n\t\textractFunc, found := extractorFunctions[dataGathererName]\n\t\tif !found {\n\t\t\tunhandledDataGatherers.Insert(dataGathererName)\n\t\t\tcontinue\n\t\t}\n\t\tmissingDataGatherers.Delete(dataGathererName)\n\t\t// Call the extractor function to populate the relevant field in the target snapshot.\n\t\tif err := extractFunc(reading, target); err != nil {\n\t\t\treturn fmt.Errorf(\"while extracting data reading %s: %s\", dataGathererName, err)\n\t\t}\n\t}\n\tif missingDataGatherers.Len() > 0 || unhandledDataGatherers.Len() > 0 {\n\t\treturn fmt.Errorf(\n\t\t\t\"unexpected data gatherers, missing: %v, unhandled: %v\",\n\t\t\tsets.List(missingDataGatherers),\n\t\t\tsets.List(unhandledDataGatherers),\n\t\t)\n\t}\n\treturn nil\n}\n\n// minimizeSnapshot reduces the size of the snapshot by removing unnecessary data.\n//\n// This reduces the bandwidth used when uploading the snapshot to CyberArk,\n// it reduces the storage used by CyberArk to store the snapshot, and\n// it provides better privacy for the cluster being scanned; only the necessary\n// data is included in the snapshot.\n//\n// This is a best-effort attempt to minimize the snapshot size. If an error occurs\n// during analysis of a secret, the error is logged and the secret is kept in the\n// snapshot (i.e., not excluded). Errors do not prevent the snapshot from being uploaded.\n//\n// It performs the following minimization steps:\n//\n//  1. Removal of non-clientauth TLS secrets: It filters out TLS secrets that do\n//     not contain a client certificate. This is done to avoid uploading large\n//     TLS secrets that are not relevant for the CyberArk Discovery and Context\n//     service.\n//\n// TODO(wallrj): Remove more from the snapshot as we learn more about what\n// resources the Discovery and Context service require.\nfunc minimizeSnapshot(log logr.Logger, snapshot *dataupload.Snapshot) {\n\toriginalSecretCount := len(snapshot.Secrets)\n\tfilteredSecrets := make([]runtime.Object, 0, originalSecretCount)\n\tfor _, secret := range snapshot.Secrets {\n\t\tif isExcludableSecret(log, secret) {\n\t\t\tcontinue\n\t\t}\n\t\tfilteredSecrets = append(filteredSecrets, secret)\n\t}\n\tsnapshot.Secrets = filteredSecrets\n\tlog.Info(\"Minimized snapshot\", \"originalSecretCount\", originalSecretCount, \"filteredSecretCount\", len(snapshot.Secrets))\n}\n\n// isExcludableSecret filters out TLS secrets that are definitely of no interest\n// to CyberArk's Discovery and Context service, specifically TLS secrets that do\n// not contain a client certificate.\n//\n// The Secret is kept if there is any doubt or if there is a problem decoding\n// its contents.\n//\n// Secrets are obtained by a DynamicClient, so they have type\n// *unstructured.Unstructured.\nfunc isExcludableSecret(log logr.Logger, obj runtime.Object) bool {\n\t// Fast path: type assertion and kind/type checks\n\tunstructuredObj, ok := obj.(*unstructured.Unstructured)\n\tif !ok {\n\t\tlog.Info(\"Object is not a Unstructured\", \"type\", fmt.Sprintf(\"%T\", obj))\n\t\treturn false\n\t}\n\tif unstructuredObj.GetKind() != \"Secret\" || unstructuredObj.GetAPIVersion() != \"v1\" {\n\t\treturn false\n\t}\n\n\tlog = log.WithValues(\"namespace\", unstructuredObj.GetNamespace(), \"name\", unstructuredObj.GetName())\n\tdataMap, found, err := unstructured.NestedMap(unstructuredObj.Object, \"data\")\n\tif err != nil || !found {\n\t\tlog.Info(\"Secret data missing or not a map\")\n\t\treturn false\n\t}\n\n\tsecretType, found, err := unstructured.NestedString(unstructuredObj.Object, \"type\")\n\tif err != nil || !found {\n\t\tlog.Info(\"Secret object has no type\")\n\t\treturn false\n\t}\n\n\tif corev1.SecretType(secretType) != corev1.SecretTypeTLS {\n\t\tlog.Info(\"Secrets of this type are never excluded\", \"type\", secretType)\n\t\treturn false\n\t}\n\n\treturn isExcludableTLSSecret(log, dataMap)\n}\n\n// isExcludableTLSSecret checks if a TLS Secret contains a client certificate.\n// It returns true if the Secret is a TLS Secret and its tls.crt does not\n// contain a client certificate.\nfunc isExcludableTLSSecret(log logr.Logger, dataMap map[string]any) bool {\n\ttlsCrtRaw, found := dataMap[corev1.TLSCertKey]\n\tif !found {\n\t\tlog.Info(\"TLS Secret does not contain tls.crt key\")\n\t\treturn true\n\t}\n\n\t// Decode base64 if necessary (K8s secrets store data as base64-encoded strings)\n\tvar tlsCrtBytes []byte\n\tswitch v := tlsCrtRaw.(type) {\n\tcase string:\n\t\tdecoded, err := base64.StdEncoding.DecodeString(v)\n\t\tif err != nil {\n\t\t\tlog.Info(\"Failed to decode tls.crt base64\", \"error\", err.Error())\n\t\t\treturn true\n\t\t}\n\t\ttlsCrtBytes = decoded\n\tcase []byte:\n\t\ttlsCrtBytes = v\n\tdefault:\n\t\tlog.Info(\"tls.crt is not a string or byte slice\", \"type\", fmt.Sprintf(\"%T\", v))\n\t\treturn true\n\t}\n\n\t// Parse PEM certificate chain\n\thasClientCert := searchPEM(tlsCrtBytes, func(block *pem.Block) bool {\n\t\tif block.Type != \"CERTIFICATE\" || len(block.Bytes) == 0 {\n\t\t\treturn false\n\t\t}\n\t\tcert, err := x509.ParseCertificate(block.Bytes)\n\t\tif err != nil {\n\t\t\tlog.Info(\"Failed to parse PEM block as X.509 certificate\", \"error\", err.Error())\n\t\t\treturn false\n\t\t}\n\t\t// Check if the certificate has the ClientAuth EKU\n\t\treturn isClientCertificate(cert)\n\t})\n\treturn !hasClientCert\n}\n\n// searchPEM parses the given PEM data and applies the visitor function to each\n// PEM block found. If the visitor function returns true for any block, the search\n// stops and searchPEM returns true. If no blocks cause the visitor to return true,\n// searchPEM returns false.\nfunc searchPEM(data []byte, visitor func(*pem.Block) bool) bool {\n\tif visitor == nil {\n\t\treturn false\n\t}\n\t// Parse the PEM encoded certificate chain\n\tvar block *pem.Block\n\trest := data\n\tfor {\n\t\tblock, rest = pem.Decode(rest)\n\t\tif block == nil {\n\t\t\tbreak\n\t\t}\n\t\tif visitor(block) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// isClientCertificate checks if the given certificate is a client certificate\n// by checking if it has the ClientAuth EKU.\nfunc isClientCertificate(cert *x509.Certificate) bool {\n\tif cert == nil {\n\t\treturn false\n\t}\n\t// Skip CA certificates\n\tif cert.IsCA {\n\t\treturn false\n\t}\n\t// Check if the certificate has the ClientAuth EKU\n\treturn slices.Contains(cert.ExtKeyUsage, x509.ExtKeyUsageClientAuth)\n}\n"
  },
  {
    "path": "pkg/client/client_cyberark_convertdatareadings_test.go",
    "content": "package client\n\nimport (\n\t\"crypto/ecdsa\"\n\t\"crypto/elliptic\"\n\t\"crypto/rand\"\n\t\"crypto/x509\"\n\t\"crypto/x509/pkix\"\n\t\"encoding/base64\"\n\t\"encoding/pem\"\n\t\"math/big\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\tcorev1 \"k8s.io/api/core/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/version\"\n\t\"k8s.io/klog/v2/ktesting\"\n\n\t\"github.com/jetstack/preflight/api\"\n\t\"github.com/jetstack/preflight/internal/cyberark/dataupload\"\n\tpreflightversion \"github.com/jetstack/preflight/pkg/version\"\n)\n\n// TestBaseSnapshotFromOptions tests the baseSnapshotFromOptions function.\nfunc TestBaseSnapshotFromOptions(t *testing.T) {\n\ttype testCase struct {\n\t\tname    string\n\t\toptions Options\n\t\twant    dataupload.Snapshot\n\t}\n\ttests := []testCase{\n\t\t{\n\t\t\tname: \"ClusterName and ClusterDescription are used, OrgID and ClusterID\",\n\t\t\toptions: Options{\n\t\t\t\tOrgID:              \"unused-org-id\",\n\t\t\t\tClusterID:          \"unused-cluster-id\",\n\t\t\t\tClusterName:        \"some-cluster-name\",\n\t\t\t\tClusterDescription: \"some-cluster-description\",\n\t\t\t},\n\t\t\twant: dataupload.Snapshot{\n\t\t\t\tClusterName:        \"some-cluster-name\",\n\t\t\t\tClusterDescription: \"some-cluster-description\",\n\t\t\t\tAgentVersion:       preflightversion.PreflightVersion,\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tgot := baseSnapshotFromOptions(tc.options)\n\t\t\trequire.Equal(t, tc.want, got)\n\t\t})\n\t}\n}\n\n// TestExtractServerVersionFromReading tests the extractServerVersionFromReading function.\nfunc TestExtractServerVersionFromReading(t *testing.T) {\n\ttype testCase struct {\n\t\tname             string\n\t\treading          *api.DataReading\n\t\texpectedSnapshot dataupload.Snapshot\n\t\texpectError      string\n\t}\n\ttests := []testCase{\n\t\t{\n\t\t\tname:        \"nil reading\",\n\t\t\texpectError: `programmer mistake: the DataReading must not be nil`,\n\t\t},\n\t\t{\n\t\t\tname: \"nil data\",\n\t\t\treading: &api.DataReading{\n\t\t\t\tDataGatherer: \"ark/discovery\",\n\t\t\t\tData:         nil,\n\t\t\t},\n\t\t\texpectError: `programmer mistake: the DataReading must have data type *api.DiscoveryData. This DataReading (ark/discovery) has data type <nil>`,\n\t\t},\n\t\t{\n\t\t\tname: \"wrong data type\",\n\t\t\treading: &api.DataReading{\n\t\t\t\tDataGatherer: \"ark/discovery\",\n\t\t\t\tData:         &api.DynamicData{},\n\t\t\t},\n\t\t\texpectError: `programmer mistake: the DataReading must have data type *api.DiscoveryData. This DataReading (ark/discovery) has data type *api.DynamicData`,\n\t\t},\n\t\t{\n\t\t\tname: \"nil server version\",\n\t\t\treading: &api.DataReading{\n\t\t\t\tDataGatherer: \"ark/discovery\",\n\t\t\t\tData:         &api.DiscoveryData{},\n\t\t\t},\n\t\t\texpectedSnapshot: dataupload.Snapshot{},\n\t\t},\n\t\t{\n\t\t\tname: \"happy path\",\n\t\t\treading: &api.DataReading{\n\t\t\t\tDataGatherer: \"ark/discovery\",\n\t\t\t\tData: &api.DiscoveryData{\n\t\t\t\t\tClusterID: \"success-cluster-id\",\n\t\t\t\t\tServerVersion: &version.Info{\n\t\t\t\t\t\tGitVersion: \"v1.21.0\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSnapshot: dataupload.Snapshot{\n\t\t\t\tClusterID:  \"success-cluster-id\",\n\t\t\t\tK8SVersion: \"v1.21.0\",\n\t\t\t},\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tvar snapshot dataupload.Snapshot\n\t\t\terr := extractClusterIDAndServerVersionFromReading(test.reading, &snapshot)\n\t\t\tif test.expectError != \"\" {\n\t\t\t\tassert.EqualError(t, err, test.expectError)\n\t\t\t\tassert.Equal(t, dataupload.Snapshot{}, snapshot)\n\t\t\t\treturn\n\t\t\t}\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, test.expectedSnapshot, snapshot)\n\t\t})\n\t}\n}\n\n// TestExtractOIDCFromReading tests the extractOIDCFromReading function.\nfunc TestExtractOIDCFromReading(t *testing.T) {\n\ttype testCase struct {\n\t\tname             string\n\t\treading          *api.DataReading\n\t\texpectedSnapshot dataupload.Snapshot\n\t\texpectError      string\n\t}\n\ttests := []testCase{\n\t\t{\n\t\t\tname:        \"nil reading\",\n\t\t\texpectError: `programmer mistake: the DataReading must not be nil`,\n\t\t},\n\t\t{\n\t\t\tname: \"nil data\",\n\t\t\treading: &api.DataReading{\n\t\t\t\tDataGatherer: \"ark/oidc\",\n\t\t\t\tData:         nil,\n\t\t\t},\n\t\t\texpectError: `programmer mistake: the DataReading must have data type *api.OIDCDiscoveryData. This DataReading (ark/oidc) has data type <nil>`,\n\t\t},\n\t\t{\n\t\t\tname: \"wrong data type\",\n\t\t\treading: &api.DataReading{\n\t\t\t\tDataGatherer: \"ark/oidc\",\n\t\t\t\tData:         &api.DiscoveryData{},\n\t\t\t},\n\t\t\texpectError: `programmer mistake: the DataReading must have data type *api.OIDCDiscoveryData. This DataReading (ark/oidc) has data type *api.DiscoveryData`,\n\t\t},\n\t\t{\n\t\t\tname: \"happy path\",\n\t\t\treading: &api.DataReading{\n\t\t\t\tDataGatherer: \"ark/oidc\",\n\t\t\t\tData: &api.OIDCDiscoveryData{\n\t\t\t\t\tOIDCConfig:      map[string]any{\"issuer\": \"https://example.com\"},\n\t\t\t\t\tOIDCConfigError: \"oidc-err\",\n\t\t\t\t\tJWKS:            map[string]any{\"keys\": []any{}},\n\t\t\t\t\tJWKSError:       \"jwks-err\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSnapshot: dataupload.Snapshot{\n\t\t\t\tOIDCConfig:      map[string]any{\"issuer\": \"https://example.com\"},\n\t\t\t\tOIDCConfigError: \"oidc-err\",\n\t\t\t\tJWKS:            map[string]any{\"keys\": []any{}},\n\t\t\t\tJWKSError:       \"jwks-err\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tvar snapshot dataupload.Snapshot\n\t\t\terr := extractOIDCFromReading(test.reading, &snapshot)\n\t\t\tif test.expectError != \"\" {\n\t\t\t\tassert.EqualError(t, err, test.expectError)\n\t\t\t\tassert.Equal(t, dataupload.Snapshot{}, snapshot)\n\t\t\t\treturn\n\t\t\t}\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, test.expectedSnapshot, snapshot)\n\t\t})\n\t}\n}\n\n// TestExtractResourceListFromReading tests the extractResourceListFromReading function.\nfunc TestExtractResourceListFromReading(t *testing.T) {\n\ttype testCase struct {\n\t\tname             string\n\t\treading          *api.DataReading\n\t\texpectedNumItems int\n\t\texpectError      string\n\t}\n\ttests := []testCase{\n\t\t{\n\t\t\tname:        \"nil reading\",\n\t\t\texpectError: `programmer mistake: the DataReading must not be nil`,\n\t\t},\n\t\t{\n\t\t\tname: \"nil data\",\n\t\t\treading: &api.DataReading{\n\t\t\t\tDataGatherer: \"ark/namespaces\",\n\t\t\t\tData:         nil,\n\t\t\t},\n\t\t\texpectError: `programmer mistake: the DataReading must have data type *api.DynamicData. ` +\n\t\t\t\t`This DataReading (ark/namespaces) has data type <nil>`,\n\t\t},\n\t\t{\n\t\t\tname: \"wrong data type\",\n\t\t\treading: &api.DataReading{\n\t\t\t\tDataGatherer: \"ark/namespaces\",\n\t\t\t\tData:         &api.DiscoveryData{},\n\t\t\t},\n\t\t\texpectError: `programmer mistake: the DataReading must have data type *api.DynamicData. ` +\n\t\t\t\t`This DataReading (ark/namespaces) has data type *api.DiscoveryData`,\n\t\t},\n\t\t{\n\t\t\tname: \"nil items\",\n\t\t\treading: &api.DataReading{\n\t\t\t\tDataGatherer: \"ark/namespaces\",\n\t\t\t\tData:         &api.DynamicData{},\n\t\t\t},\n\t\t\texpectedNumItems: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"empty items\",\n\t\t\treading: &api.DataReading{\n\t\t\t\tDataGatherer: \"ark/namespaces\",\n\t\t\t\tData: &api.DynamicData{\n\t\t\t\t\tItems: []*api.GatheredResource{},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedNumItems: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"wrong item resource type\",\n\t\t\treading: &api.DataReading{\n\t\t\t\tDataGatherer: \"ark/namespaces\",\n\t\t\t\tData: &api.DynamicData{\n\t\t\t\t\tItems: []*api.GatheredResource{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tResource: &api.DiscoveryData{},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectError: `programmer mistake: the DynamicData items must have Resource type runtime.Object. ` +\n\t\t\t\t`This item (0) has Resource type *api.DiscoveryData`,\n\t\t},\n\t\t{\n\t\t\tname: \"happy path\",\n\t\t\treading: &api.DataReading{\n\t\t\t\tDataGatherer: \"ark/namespaces\",\n\t\t\t\tData: &api.DynamicData{\n\t\t\t\t\tItems: []*api.GatheredResource{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tResource: &unstructured.Unstructured{\n\t\t\t\t\t\t\t\tObject: map[string]any{\n\t\t\t\t\t\t\t\t\t\"kind\": \"Namespace\",\n\t\t\t\t\t\t\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\t\t\t\t\t\t\"name\": \"default\",\n\t\t\t\t\t\t\t\t\t\t\"uid\":  \"uid-default\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tResource: &unstructured.Unstructured{\n\t\t\t\t\t\t\t\tObject: map[string]any{\n\t\t\t\t\t\t\t\t\t\"kind\": \"Namespace\",\n\t\t\t\t\t\t\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\t\t\t\t\t\t\"name\": \"kube-system\",\n\t\t\t\t\t\t\t\t\t\t\"uid\":  \"uid-kube-system\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// Deleted resource should be ignored\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tDeletedAt: api.Time{Time: time.Now()},\n\t\t\t\t\t\t\tResource: &unstructured.Unstructured{\n\t\t\t\t\t\t\t\tObject: map[string]any{\n\t\t\t\t\t\t\t\t\t\"kind\": \"Namespace\",\n\t\t\t\t\t\t\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\t\t\t\t\t\t\"name\": \"kube-system\",\n\t\t\t\t\t\t\t\t\t\t\"uid\":  \"uid-kube-system\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedNumItems: 2,\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tvar resources []runtime.Object\n\t\t\terr := extractResourceListFromReading(test.reading, &resources)\n\t\t\tif test.expectError != \"\" {\n\t\t\t\tassert.EqualError(t, err, test.expectError)\n\t\t\t\tassert.Nil(t, resources)\n\t\t\t\treturn\n\t\t\t}\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NotNil(t, resources)\n\t\t\tassert.Len(t, resources, test.expectedNumItems)\n\t\t})\n\t}\n}\n\n// TestConvertDataReadings_ConfigMaps tests that configmaps are correctly converted.\nfunc TestConvertDataReadings_ConfigMaps(t *testing.T) {\n\textractorFunctions := map[string]func(*api.DataReading, *dataupload.Snapshot) error{\n\t\t\"ark/discovery\": extractClusterIDAndServerVersionFromReading,\n\t\t\"ark/configmaps\": func(reading *api.DataReading, snapshot *dataupload.Snapshot) error {\n\t\t\treturn extractResourceListFromReading(reading, &snapshot.ConfigMaps)\n\t\t},\n\t}\n\n\treadings := []*api.DataReading{\n\t\t{\n\t\t\tDataGatherer: \"ark/discovery\",\n\t\t\tData: &api.DiscoveryData{\n\t\t\t\tClusterID: \"test-cluster-id\",\n\t\t\t\tServerVersion: &version.Info{\n\t\t\t\t\tGitVersion: \"v1.21.0\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tDataGatherer: \"ark/configmaps\",\n\t\t\tData: &api.DynamicData{\n\t\t\t\tItems: []*api.GatheredResource{\n\t\t\t\t\t{\n\t\t\t\t\t\tResource: &unstructured.Unstructured{\n\t\t\t\t\t\t\tObject: map[string]any{\n\t\t\t\t\t\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\t\t\t\t\t\"kind\":       \"ConfigMap\",\n\t\t\t\t\t\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"name\":      \"conjur-connect\",\n\t\t\t\t\t\t\t\t\t\"namespace\": \"conjur\",\n\t\t\t\t\t\t\t\t\t\"labels\": map[string]any{\n\t\t\t\t\t\t\t\t\t\t\"conjur.org/name\": \"conjur-connect-configmap\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"data\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"config.yaml\": \"some-config-data\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tResource: &unstructured.Unstructured{\n\t\t\t\t\t\t\tObject: map[string]any{\n\t\t\t\t\t\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\t\t\t\t\t\"kind\":       \"ConfigMap\",\n\t\t\t\t\t\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"name\":      \"another-configmap\",\n\t\t\t\t\t\t\t\t\t\"namespace\": \"default\",\n\t\t\t\t\t\t\t\t\t\"labels\": map[string]any{\n\t\t\t\t\t\t\t\t\t\t\"conjur.org/name\": \"conjur-connect-configmap\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"data\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"setting\": \"value\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t// Deleted configmap should be ignored\n\t\t\t\t\t{\n\t\t\t\t\t\tDeletedAt: api.Time{Time: time.Now()},\n\t\t\t\t\t\tResource: &unstructured.Unstructured{\n\t\t\t\t\t\t\tObject: map[string]any{\n\t\t\t\t\t\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\t\t\t\t\t\"kind\":       \"ConfigMap\",\n\t\t\t\t\t\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"name\":      \"deleted-configmap\",\n\t\t\t\t\t\t\t\t\t\"namespace\": \"default\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tvar snapshot dataupload.Snapshot\n\terr := convertDataReadings(extractorFunctions, readings, &snapshot)\n\trequire.NoError(t, err)\n\n\t// Verify the snapshot contains the expected data\n\tassert.Equal(t, \"test-cluster-id\", snapshot.ClusterID)\n\tassert.Equal(t, \"v1.21.0\", snapshot.K8SVersion)\n\trequire.Len(t, snapshot.ConfigMaps, 2, \"should have 2 configmaps (deleted one should be excluded)\")\n\n\t// Verify the first configmap\n\tcm1, ok := snapshot.ConfigMaps[0].(*unstructured.Unstructured)\n\trequire.True(t, ok, \"configmap should be unstructured\")\n\tassert.Equal(t, \"ConfigMap\", cm1.GetKind())\n\tassert.Equal(t, \"conjur-connect\", cm1.GetName())\n\tassert.Equal(t, \"conjur\", cm1.GetNamespace())\n\n\t// Verify the second configmap\n\tcm2, ok := snapshot.ConfigMaps[1].(*unstructured.Unstructured)\n\trequire.True(t, ok, \"configmap should be unstructured\")\n\tassert.Equal(t, \"ConfigMap\", cm2.GetKind())\n\tassert.Equal(t, \"another-configmap\", cm2.GetName())\n\tassert.Equal(t, \"default\", cm2.GetNamespace())\n}\n\n// TestConvertDataReadings_ExternalSecrets tests that externalsecrets are correctly converted.\nfunc TestConvertDataReadings_ExternalSecrets(t *testing.T) {\n\textractorFunctions := map[string]func(*api.DataReading, *dataupload.Snapshot) error{\n\t\t\"ark/discovery\": extractClusterIDAndServerVersionFromReading,\n\t\t\"ark/esoexternalsecrets\": func(reading *api.DataReading, snapshot *dataupload.Snapshot) error {\n\t\t\treturn extractResourceListFromReading(reading, &snapshot.ExternalSecrets)\n\t\t},\n\t}\n\n\treadings := []*api.DataReading{\n\t\t{\n\t\t\tDataGatherer: \"ark/discovery\",\n\t\t\tData: &api.DiscoveryData{\n\t\t\t\tClusterID: \"test-cluster-id\",\n\t\t\t\tServerVersion: &version.Info{\n\t\t\t\t\tGitVersion: \"v1.21.0\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tDataGatherer: \"ark/esoexternalsecrets\",\n\t\t\tData: &api.DynamicData{\n\t\t\t\tItems: []*api.GatheredResource{\n\t\t\t\t\t{\n\t\t\t\t\t\tResource: &unstructured.Unstructured{\n\t\t\t\t\t\t\tObject: map[string]any{\n\t\t\t\t\t\t\t\t\"apiVersion\": \"external-secrets.io/v1\",\n\t\t\t\t\t\t\t\t\"kind\":       \"ExternalSecret\",\n\t\t\t\t\t\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"name\":      \"my-external-secret\",\n\t\t\t\t\t\t\t\t\t\"namespace\": \"default\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"spec\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"refreshInterval\": \"1h\",\n\t\t\t\t\t\t\t\t\t\"secretStoreRef\": map[string]any{\n\t\t\t\t\t\t\t\t\t\t\"name\": \"my-secret-store\",\n\t\t\t\t\t\t\t\t\t\t\"kind\": \"SecretStore\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tResource: &unstructured.Unstructured{\n\t\t\t\t\t\t\tObject: map[string]any{\n\t\t\t\t\t\t\t\t\"apiVersion\": \"external-secrets.io/v1\",\n\t\t\t\t\t\t\t\t\"kind\":       \"ExternalSecret\",\n\t\t\t\t\t\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"name\":      \"another-external-secret\",\n\t\t\t\t\t\t\t\t\t\"namespace\": \"production\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"spec\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"refreshInterval\": \"30m\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t// Deleted externalsecret should be ignored\n\t\t\t\t\t{\n\t\t\t\t\t\tDeletedAt: api.Time{Time: time.Now()},\n\t\t\t\t\t\tResource: &unstructured.Unstructured{\n\t\t\t\t\t\t\tObject: map[string]any{\n\t\t\t\t\t\t\t\t\"apiVersion\": \"external-secrets.io/v1\",\n\t\t\t\t\t\t\t\t\"kind\":       \"ExternalSecret\",\n\t\t\t\t\t\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"name\":      \"deleted-external-secret\",\n\t\t\t\t\t\t\t\t\t\"namespace\": \"default\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tvar snapshot dataupload.Snapshot\n\terr := convertDataReadings(extractorFunctions, readings, &snapshot)\n\trequire.NoError(t, err)\n\n\t// Verify the snapshot contains the expected data\n\tassert.Equal(t, \"test-cluster-id\", snapshot.ClusterID)\n\tassert.Equal(t, \"v1.21.0\", snapshot.K8SVersion)\n\trequire.Len(t, snapshot.ExternalSecrets, 2, \"should have 2 externalsecrets (deleted one should be excluded)\")\n\n\t// Verify the first externalsecret\n\tes1, ok := snapshot.ExternalSecrets[0].(*unstructured.Unstructured)\n\trequire.True(t, ok, \"externalsecret should be unstructured\")\n\tassert.Equal(t, \"ExternalSecret\", es1.GetKind())\n\tassert.Equal(t, \"my-external-secret\", es1.GetName())\n\tassert.Equal(t, \"default\", es1.GetNamespace())\n\n\t// Verify the second externalsecret\n\tes2, ok := snapshot.ExternalSecrets[1].(*unstructured.Unstructured)\n\trequire.True(t, ok, \"externalsecret should be unstructured\")\n\tassert.Equal(t, \"ExternalSecret\", es2.GetKind())\n\tassert.Equal(t, \"another-external-secret\", es2.GetName())\n\tassert.Equal(t, \"production\", es2.GetNamespace())\n}\n\n// TestConvertDataReadings_SecretStores tests that secretstores are correctly converted.\nfunc TestConvertDataReadings_SecretStores(t *testing.T) {\n\textractorFunctions := map[string]func(*api.DataReading, *dataupload.Snapshot) error{\n\t\t\"ark/discovery\": extractClusterIDAndServerVersionFromReading,\n\t\t\"ark/esosecretstores\": func(reading *api.DataReading, snapshot *dataupload.Snapshot) error {\n\t\t\treturn extractResourceListFromReading(reading, &snapshot.SecretStores)\n\t\t},\n\t}\n\n\treadings := []*api.DataReading{\n\t\t{\n\t\t\tDataGatherer: \"ark/discovery\",\n\t\t\tData: &api.DiscoveryData{\n\t\t\t\tClusterID: \"test-cluster-id\",\n\t\t\t\tServerVersion: &version.Info{\n\t\t\t\t\tGitVersion: \"v1.21.0\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tDataGatherer: \"ark/esosecretstores\",\n\t\t\tData: &api.DynamicData{\n\t\t\t\tItems: []*api.GatheredResource{\n\t\t\t\t\t{\n\t\t\t\t\t\tResource: &unstructured.Unstructured{\n\t\t\t\t\t\t\tObject: map[string]any{\n\t\t\t\t\t\t\t\t\"apiVersion\": \"external-secrets.io/v1\",\n\t\t\t\t\t\t\t\t\"kind\":       \"SecretStore\",\n\t\t\t\t\t\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"name\":      \"my-secret-store\",\n\t\t\t\t\t\t\t\t\t\"namespace\": \"default\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"spec\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"provider\": map[string]any{\n\t\t\t\t\t\t\t\t\t\t\"fake\": map[string]any{},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tResource: &unstructured.Unstructured{\n\t\t\t\t\t\t\tObject: map[string]any{\n\t\t\t\t\t\t\t\t\"apiVersion\": \"external-secrets.io/v1\",\n\t\t\t\t\t\t\t\t\"kind\":       \"SecretStore\",\n\t\t\t\t\t\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"name\":      \"aws-secret-store\",\n\t\t\t\t\t\t\t\t\t\"namespace\": \"production\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"spec\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"provider\": map[string]any{\n\t\t\t\t\t\t\t\t\t\t\"aws\": map[string]any{},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t// Deleted secretstore should be ignored\n\t\t\t\t\t{\n\t\t\t\t\t\tDeletedAt: api.Time{Time: time.Now()},\n\t\t\t\t\t\tResource: &unstructured.Unstructured{\n\t\t\t\t\t\t\tObject: map[string]any{\n\t\t\t\t\t\t\t\t\"apiVersion\": \"external-secrets.io/v1\",\n\t\t\t\t\t\t\t\t\"kind\":       \"SecretStore\",\n\t\t\t\t\t\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"name\":      \"deleted-secret-store\",\n\t\t\t\t\t\t\t\t\t\"namespace\": \"default\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tvar snapshot dataupload.Snapshot\n\terr := convertDataReadings(extractorFunctions, readings, &snapshot)\n\trequire.NoError(t, err)\n\n\t// Verify the snapshot contains the expected data\n\tassert.Equal(t, \"test-cluster-id\", snapshot.ClusterID)\n\tassert.Equal(t, \"v1.21.0\", snapshot.K8SVersion)\n\trequire.Len(t, snapshot.SecretStores, 2, \"should have 2 secretstores (deleted one should be excluded)\")\n\n\t// Verify the first secretstore\n\tss1, ok := snapshot.SecretStores[0].(*unstructured.Unstructured)\n\trequire.True(t, ok, \"secretstore should be unstructured\")\n\tassert.Equal(t, \"SecretStore\", ss1.GetKind())\n\tassert.Equal(t, \"my-secret-store\", ss1.GetName())\n\tassert.Equal(t, \"default\", ss1.GetNamespace())\n\n\t// Verify the second secretstore\n\tss2, ok := snapshot.SecretStores[1].(*unstructured.Unstructured)\n\trequire.True(t, ok, \"secretstore should be unstructured\")\n\tassert.Equal(t, \"SecretStore\", ss2.GetKind())\n\tassert.Equal(t, \"aws-secret-store\", ss2.GetName())\n\tassert.Equal(t, \"production\", ss2.GetNamespace())\n}\n\n// TestConvertDataReadings_ClusterExternalSecrets tests that clusterexternalsecrets are correctly converted.\nfunc TestConvertDataReadings_ClusterExternalSecrets(t *testing.T) {\n\textractorFunctions := map[string]func(*api.DataReading, *dataupload.Snapshot) error{\n\t\t\"ark/discovery\": extractClusterIDAndServerVersionFromReading,\n\t\t\"ark/esoclusterexternalsecrets\": func(reading *api.DataReading, snapshot *dataupload.Snapshot) error {\n\t\t\treturn extractResourceListFromReading(reading, &snapshot.ClusterExternalSecrets)\n\t\t},\n\t}\n\n\treadings := []*api.DataReading{\n\t\t{\n\t\t\tDataGatherer: \"ark/discovery\",\n\t\t\tData: &api.DiscoveryData{\n\t\t\t\tClusterID: \"test-cluster-id\",\n\t\t\t\tServerVersion: &version.Info{\n\t\t\t\t\tGitVersion: \"v1.21.0\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tDataGatherer: \"ark/esoclusterexternalsecrets\",\n\t\t\tData: &api.DynamicData{\n\t\t\t\tItems: []*api.GatheredResource{\n\t\t\t\t\t{\n\t\t\t\t\t\tResource: &unstructured.Unstructured{\n\t\t\t\t\t\t\tObject: map[string]any{\n\t\t\t\t\t\t\t\t\"apiVersion\": \"external-secrets.io/v1\",\n\t\t\t\t\t\t\t\t\"kind\":       \"ClusterExternalSecret\",\n\t\t\t\t\t\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"name\": \"my-cluster-external-secret\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"spec\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"externalSecretSpec\": map[string]any{\n\t\t\t\t\t\t\t\t\t\t\"secretStoreRef\": map[string]any{\n\t\t\t\t\t\t\t\t\t\t\t\"name\": \"my-cluster-secret-store\",\n\t\t\t\t\t\t\t\t\t\t\t\"kind\": \"ClusterSecretStore\",\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tResource: &unstructured.Unstructured{\n\t\t\t\t\t\t\tObject: map[string]any{\n\t\t\t\t\t\t\t\t\"apiVersion\": \"external-secrets.io/v1\",\n\t\t\t\t\t\t\t\t\"kind\":       \"ClusterExternalSecret\",\n\t\t\t\t\t\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"name\": \"aws-cluster-external-secret\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"spec\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"externalSecretSpec\": map[string]any{\n\t\t\t\t\t\t\t\t\t\t\"secretStoreRef\": map[string]any{\n\t\t\t\t\t\t\t\t\t\t\t\"name\": \"aws-cluster-secret-store\",\n\t\t\t\t\t\t\t\t\t\t\t\"kind\": \"ClusterSecretStore\",\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t// Deleted clusterexternalsecret should be ignored\n\t\t\t\t\t{\n\t\t\t\t\t\tDeletedAt: api.Time{Time: time.Now()},\n\t\t\t\t\t\tResource: &unstructured.Unstructured{\n\t\t\t\t\t\t\tObject: map[string]any{\n\t\t\t\t\t\t\t\t\"apiVersion\": \"external-secrets.io/v1\",\n\t\t\t\t\t\t\t\t\"kind\":       \"ClusterExternalSecret\",\n\t\t\t\t\t\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"name\": \"deleted-cluster-external-secret\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tvar snapshot dataupload.Snapshot\n\terr := convertDataReadings(extractorFunctions, readings, &snapshot)\n\trequire.NoError(t, err)\n\n\t// Verify the snapshot contains the expected data\n\tassert.Equal(t, \"test-cluster-id\", snapshot.ClusterID)\n\tassert.Equal(t, \"v1.21.0\", snapshot.K8SVersion)\n\trequire.Len(t, snapshot.ClusterExternalSecrets, 2, \"should have 2 clusterexternalsecrets (deleted one should be excluded)\")\n\n\t// Verify the first clusterexternalsecret\n\tces1, ok := snapshot.ClusterExternalSecrets[0].(*unstructured.Unstructured)\n\trequire.True(t, ok, \"clusterexternalsecret should be unstructured\")\n\tassert.Equal(t, \"ClusterExternalSecret\", ces1.GetKind())\n\tassert.Equal(t, \"my-cluster-external-secret\", ces1.GetName())\n\n\t// Verify the second clusterexternalsecret\n\tces2, ok := snapshot.ClusterExternalSecrets[1].(*unstructured.Unstructured)\n\trequire.True(t, ok, \"clusterexternalsecret should be unstructured\")\n\tassert.Equal(t, \"ClusterExternalSecret\", ces2.GetKind())\n\tassert.Equal(t, \"aws-cluster-external-secret\", ces2.GetName())\n}\n\n// TestConvertDataReadings_ClusterSecretStores tests that clustersecretstores are correctly converted.\nfunc TestConvertDataReadings_ClusterSecretStores(t *testing.T) {\n\textractorFunctions := map[string]func(*api.DataReading, *dataupload.Snapshot) error{\n\t\t\"ark/discovery\": extractClusterIDAndServerVersionFromReading,\n\t\t\"ark/esoclustersecretstores\": func(reading *api.DataReading, snapshot *dataupload.Snapshot) error {\n\t\t\treturn extractResourceListFromReading(reading, &snapshot.ClusterSecretStores)\n\t\t},\n\t}\n\n\treadings := []*api.DataReading{\n\t\t{\n\t\t\tDataGatherer: \"ark/discovery\",\n\t\t\tData: &api.DiscoveryData{\n\t\t\t\tClusterID: \"test-cluster-id\",\n\t\t\t\tServerVersion: &version.Info{\n\t\t\t\t\tGitVersion: \"v1.21.0\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tDataGatherer: \"ark/esoclustersecretstores\",\n\t\t\tData: &api.DynamicData{\n\t\t\t\tItems: []*api.GatheredResource{\n\t\t\t\t\t{\n\t\t\t\t\t\tResource: &unstructured.Unstructured{\n\t\t\t\t\t\t\tObject: map[string]any{\n\t\t\t\t\t\t\t\t\"apiVersion\": \"external-secrets.io/v1\",\n\t\t\t\t\t\t\t\t\"kind\":       \"ClusterSecretStore\",\n\t\t\t\t\t\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"name\": \"my-cluster-secret-store\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"spec\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"provider\": map[string]any{\n\t\t\t\t\t\t\t\t\t\t\"fake\": map[string]any{},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tResource: &unstructured.Unstructured{\n\t\t\t\t\t\t\tObject: map[string]any{\n\t\t\t\t\t\t\t\t\"apiVersion\": \"external-secrets.io/v1\",\n\t\t\t\t\t\t\t\t\"kind\":       \"ClusterSecretStore\",\n\t\t\t\t\t\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"name\": \"aws-cluster-secret-store\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"spec\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"provider\": map[string]any{\n\t\t\t\t\t\t\t\t\t\t\"aws\": map[string]any{},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t// Deleted clustersecretstore should be ignored\n\t\t\t\t\t{\n\t\t\t\t\t\tDeletedAt: api.Time{Time: time.Now()},\n\t\t\t\t\t\tResource: &unstructured.Unstructured{\n\t\t\t\t\t\t\tObject: map[string]any{\n\t\t\t\t\t\t\t\t\"apiVersion\": \"external-secrets.io/v1\",\n\t\t\t\t\t\t\t\t\"kind\":       \"ClusterSecretStore\",\n\t\t\t\t\t\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"name\": \"deleted-cluster-secret-store\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tvar snapshot dataupload.Snapshot\n\terr := convertDataReadings(extractorFunctions, readings, &snapshot)\n\trequire.NoError(t, err)\n\n\t// Verify the snapshot contains the expected data\n\tassert.Equal(t, \"test-cluster-id\", snapshot.ClusterID)\n\tassert.Equal(t, \"v1.21.0\", snapshot.K8SVersion)\n\trequire.Len(t, snapshot.ClusterSecretStores, 2, \"should have 2 clustersecretstores (deleted one should be excluded)\")\n\n\t// Verify the first clustersecretstore\n\tcss1, ok := snapshot.ClusterSecretStores[0].(*unstructured.Unstructured)\n\trequire.True(t, ok, \"clustersecretstore should be unstructured\")\n\tassert.Equal(t, \"ClusterSecretStore\", css1.GetKind())\n\tassert.Equal(t, \"my-cluster-secret-store\", css1.GetName())\n\n\t// Verify the second clustersecretstore\n\tcss2, ok := snapshot.ClusterSecretStores[1].(*unstructured.Unstructured)\n\trequire.True(t, ok, \"clustersecretstore should be unstructured\")\n\tassert.Equal(t, \"ClusterSecretStore\", css2.GetKind())\n\tassert.Equal(t, \"aws-cluster-secret-store\", css2.GetName())\n}\n\n// TestConvertDataReadings_ServiceAccounts tests that serviceaccounts are correctly converted.\nfunc TestConvertDataReadings_ServiceAccounts(t *testing.T) {\n\textractorFunctions := map[string]func(*api.DataReading, *dataupload.Snapshot) error{\n\t\t\"ark/discovery\": extractClusterIDAndServerVersionFromReading,\n\t\t\"ark/serviceaccounts\": func(reading *api.DataReading, snapshot *dataupload.Snapshot) error {\n\t\t\treturn extractResourceListFromReading(reading, &snapshot.ServiceAccounts)\n\t\t},\n\t}\n\n\treadings := []*api.DataReading{\n\t\t{\n\t\t\tDataGatherer: \"ark/discovery\",\n\t\t\tData: &api.DiscoveryData{\n\t\t\t\tClusterID: \"test-cluster-id\",\n\t\t\t\tServerVersion: &version.Info{\n\t\t\t\t\tGitVersion: \"v1.22.0\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tDataGatherer: \"ark/serviceaccounts\",\n\t\t\tData: &api.DynamicData{\n\t\t\t\tItems: []*api.GatheredResource{\n\t\t\t\t\t{\n\t\t\t\t\t\tResource: &unstructured.Unstructured{\n\t\t\t\t\t\t\tObject: map[string]any{\n\t\t\t\t\t\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\t\t\t\t\t\"kind\":       \"ServiceAccount\",\n\t\t\t\t\t\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"name\":      \"default\",\n\t\t\t\t\t\t\t\t\t\"namespace\": \"default\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tResource: &unstructured.Unstructured{\n\t\t\t\t\t\t\tObject: map[string]any{\n\t\t\t\t\t\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\t\t\t\t\t\"kind\":       \"ServiceAccount\",\n\t\t\t\t\t\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"name\":      \"app-sa\",\n\t\t\t\t\t\t\t\t\t\"namespace\": \"production\",\n\t\t\t\t\t\t\t\t\t\"labels\": map[string]any{\n\t\t\t\t\t\t\t\t\t\t\"app\": \"myapp\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tvar snapshot dataupload.Snapshot\n\terr := convertDataReadings(extractorFunctions, readings, &snapshot)\n\trequire.NoError(t, err)\n\n\tassert.Equal(t, \"test-cluster-id\", snapshot.ClusterID)\n\tassert.Equal(t, \"v1.22.0\", snapshot.K8SVersion)\n\trequire.Len(t, snapshot.ServiceAccounts, 2)\n\n\tsa1, ok := snapshot.ServiceAccounts[0].(*unstructured.Unstructured)\n\trequire.True(t, ok)\n\tassert.Equal(t, \"ServiceAccount\", sa1.GetKind())\n\tassert.Equal(t, \"default\", sa1.GetName())\n}\n\n// TestConvertDataReadings_Roles tests that roles are correctly converted.\nfunc TestConvertDataReadings_Roles(t *testing.T) {\n\textractorFunctions := map[string]func(*api.DataReading, *dataupload.Snapshot) error{\n\t\t\"ark/discovery\": extractClusterIDAndServerVersionFromReading,\n\t\t\"ark/roles\": func(reading *api.DataReading, snapshot *dataupload.Snapshot) error {\n\t\t\treturn extractResourceListFromReading(reading, &snapshot.Roles)\n\t\t},\n\t}\n\n\treadings := []*api.DataReading{\n\t\t{\n\t\t\tDataGatherer: \"ark/discovery\",\n\t\t\tData: &api.DiscoveryData{\n\t\t\t\tClusterID: \"rbac-cluster\",\n\t\t\t\tServerVersion: &version.Info{\n\t\t\t\t\tGitVersion: \"v1.23.0\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tDataGatherer: \"ark/roles\",\n\t\t\tData: &api.DynamicData{\n\t\t\t\tItems: []*api.GatheredResource{\n\t\t\t\t\t{\n\t\t\t\t\t\tResource: &unstructured.Unstructured{\n\t\t\t\t\t\t\tObject: map[string]any{\n\t\t\t\t\t\t\t\t\"apiVersion\": \"rbac.authorization.k8s.io/v1\",\n\t\t\t\t\t\t\t\t\"kind\":       \"Role\",\n\t\t\t\t\t\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"name\":      \"pod-reader\",\n\t\t\t\t\t\t\t\t\t\"namespace\": \"default\",\n\t\t\t\t\t\t\t\t\t\"labels\": map[string]any{\n\t\t\t\t\t\t\t\t\t\t\"rbac.authorization.k8s.io/aggregate-to-view\": \"true\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"rules\": []any{\n\t\t\t\t\t\t\t\t\tmap[string]any{\n\t\t\t\t\t\t\t\t\t\t\"apiGroups\": []any{\"\"},\n\t\t\t\t\t\t\t\t\t\t\"resources\": []any{\"pods\"},\n\t\t\t\t\t\t\t\t\t\t\"verbs\":     []any{\"get\", \"list\"},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t// Deleted role should be excluded\n\t\t\t\t\t{\n\t\t\t\t\t\tDeletedAt: api.Time{Time: time.Now()},\n\t\t\t\t\t\tResource: &unstructured.Unstructured{\n\t\t\t\t\t\t\tObject: map[string]any{\n\t\t\t\t\t\t\t\t\"apiVersion\": \"rbac.authorization.k8s.io/v1\",\n\t\t\t\t\t\t\t\t\"kind\":       \"Role\",\n\t\t\t\t\t\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"name\":      \"deleted-role\",\n\t\t\t\t\t\t\t\t\t\"namespace\": \"default\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tvar snapshot dataupload.Snapshot\n\terr := convertDataReadings(extractorFunctions, readings, &snapshot)\n\trequire.NoError(t, err)\n\n\tassert.Equal(t, \"rbac-cluster\", snapshot.ClusterID)\n\trequire.Len(t, snapshot.Roles, 1, \"deleted role should be excluded\")\n\n\trole, ok := snapshot.Roles[0].(*unstructured.Unstructured)\n\trequire.True(t, ok)\n\tassert.Equal(t, \"Role\", role.GetKind())\n\tassert.Equal(t, \"pod-reader\", role.GetName())\n}\n\n// TestConvertDataReadings_MultipleResources tests conversion with multiple resource types.\nfunc TestConvertDataReadings_MultipleResources(t *testing.T) {\n\textractorFunctions := map[string]func(*api.DataReading, *dataupload.Snapshot) error{\n\t\t\"ark/discovery\": extractClusterIDAndServerVersionFromReading,\n\t\t\"ark/configmaps\": func(reading *api.DataReading, snapshot *dataupload.Snapshot) error {\n\t\t\treturn extractResourceListFromReading(reading, &snapshot.ConfigMaps)\n\t\t},\n\t\t\"ark/serviceaccounts\": func(reading *api.DataReading, snapshot *dataupload.Snapshot) error {\n\t\t\treturn extractResourceListFromReading(reading, &snapshot.ServiceAccounts)\n\t\t},\n\t\t\"ark/deployments\": func(reading *api.DataReading, snapshot *dataupload.Snapshot) error {\n\t\t\treturn extractResourceListFromReading(reading, &snapshot.Deployments)\n\t\t},\n\t}\n\n\treadings := []*api.DataReading{\n\t\t{\n\t\t\tDataGatherer: \"ark/discovery\",\n\t\t\tData: &api.DiscoveryData{\n\t\t\t\tClusterID: \"multi-resource-cluster\",\n\t\t\t\tServerVersion: &version.Info{\n\t\t\t\t\tGitVersion: \"v1.24.0\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tDataGatherer: \"ark/configmaps\",\n\t\t\tData: &api.DynamicData{\n\t\t\t\tItems: []*api.GatheredResource{\n\t\t\t\t\t{\n\t\t\t\t\t\tResource: &unstructured.Unstructured{\n\t\t\t\t\t\t\tObject: map[string]any{\n\t\t\t\t\t\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\t\t\t\t\t\"kind\":       \"ConfigMap\",\n\t\t\t\t\t\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"name\":      \"app-config\",\n\t\t\t\t\t\t\t\t\t\"namespace\": \"default\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tDataGatherer: \"ark/serviceaccounts\",\n\t\t\tData: &api.DynamicData{\n\t\t\t\tItems: []*api.GatheredResource{\n\t\t\t\t\t{\n\t\t\t\t\t\tResource: &unstructured.Unstructured{\n\t\t\t\t\t\t\tObject: map[string]any{\n\t\t\t\t\t\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\t\t\t\t\t\"kind\":       \"ServiceAccount\",\n\t\t\t\t\t\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"name\":      \"app-sa\",\n\t\t\t\t\t\t\t\t\t\"namespace\": \"default\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tDataGatherer: \"ark/deployments\",\n\t\t\tData: &api.DynamicData{\n\t\t\t\tItems: []*api.GatheredResource{\n\t\t\t\t\t{\n\t\t\t\t\t\tResource: &unstructured.Unstructured{\n\t\t\t\t\t\t\tObject: map[string]any{\n\t\t\t\t\t\t\t\t\"apiVersion\": \"apps/v1\",\n\t\t\t\t\t\t\t\t\"kind\":       \"Deployment\",\n\t\t\t\t\t\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\t\t\t\t\t\"name\":      \"web-app\",\n\t\t\t\t\t\t\t\t\t\"namespace\": \"default\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tvar snapshot dataupload.Snapshot\n\terr := convertDataReadings(extractorFunctions, readings, &snapshot)\n\trequire.NoError(t, err)\n\n\t// Verify all resources are present\n\tassert.Equal(t, \"multi-resource-cluster\", snapshot.ClusterID)\n\tassert.Equal(t, \"v1.24.0\", snapshot.K8SVersion)\n\trequire.Len(t, snapshot.ConfigMaps, 1)\n\trequire.Len(t, snapshot.ServiceAccounts, 1)\n\trequire.Len(t, snapshot.Deployments, 1)\n\n\t// Verify each resource type\n\tcm, ok := snapshot.ConfigMaps[0].(*unstructured.Unstructured)\n\trequire.True(t, ok)\n\tassert.Equal(t, \"app-config\", cm.GetName())\n\n\tsa, ok := snapshot.ServiceAccounts[0].(*unstructured.Unstructured)\n\trequire.True(t, ok)\n\tassert.Equal(t, \"app-sa\", sa.GetName())\n\n\tdeploy, ok := snapshot.Deployments[0].(*unstructured.Unstructured)\n\trequire.True(t, ok)\n\tassert.Equal(t, \"web-app\", deploy.GetName())\n}\n\n// TestConvertDataReadings tests the convertDataReadings function.\nfunc TestConvertDataReadings(t *testing.T) {\n\tsimpleExtractorFunctions := map[string]func(*api.DataReading, *dataupload.Snapshot) error{\n\t\t\"ark/discovery\": extractClusterIDAndServerVersionFromReading,\n\t\t\"ark/secrets\": func(reading *api.DataReading, snapshot *dataupload.Snapshot) error {\n\t\t\treturn extractResourceListFromReading(reading, &snapshot.Secrets)\n\t\t},\n\t}\n\tsimpleReadings := []*api.DataReading{\n\t\t{\n\t\t\tDataGatherer: \"ark/discovery\",\n\t\t\tData: &api.DiscoveryData{\n\t\t\t\tClusterID: \"success-cluster-id\",\n\t\t\t\tServerVersion: &version.Info{\n\t\t\t\t\tGitVersion: \"v1.21.0\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tDataGatherer: \"ark/secrets\",\n\t\t\tData: &api.DynamicData{\n\t\t\t\tItems: []*api.GatheredResource{\n\t\t\t\t\t{\n\t\t\t\t\t\tResource: &corev1.Secret{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"app-1\",\n\t\t\t\t\t\t\t\tNamespace: \"team-1\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t// Deleted secret should be ignored\n\t\t\t\t\t{\n\t\t\t\t\t\tDeletedAt: api.Time{Time: time.Now()},\n\t\t\t\t\t\tResource: &corev1.Secret{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"deleted-1\",\n\t\t\t\t\t\t\t\tNamespace: \"team-1\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\ttype testCase struct {\n\t\tname               string\n\t\textractorFunctions map[string]func(*api.DataReading, *dataupload.Snapshot) error\n\t\treadings           []*api.DataReading\n\t\texpectedSnapshot   dataupload.Snapshot\n\t\texpectError        string\n\t}\n\ttests := []testCase{\n\t\t{\n\t\t\tname:               \"no extractor functions\",\n\t\t\treadings:           simpleReadings,\n\t\t\textractorFunctions: map[string]func(*api.DataReading, *dataupload.Snapshot) error{},\n\t\t\texpectError:        `unexpected data gatherers, missing: [], unhandled: [ark/discovery ark/secrets]`,\n\t\t},\n\t\t{\n\t\t\tname:               \"nil extractor functions\",\n\t\t\treadings:           simpleReadings,\n\t\t\textractorFunctions: nil,\n\t\t\texpectError:        `unexpected data gatherers, missing: [], unhandled: [ark/discovery ark/secrets]`,\n\t\t},\n\t\t{\n\t\t\tname:               \"empty readings\",\n\t\t\textractorFunctions: simpleExtractorFunctions,\n\t\t\treadings:           []*api.DataReading{},\n\t\t\texpectError:        `unexpected data gatherers, missing: [ark/discovery ark/secrets], unhandled: []`,\n\t\t},\n\t\t{\n\t\t\tname:               \"nil readings\",\n\t\t\textractorFunctions: simpleExtractorFunctions,\n\t\t\treadings:           nil,\n\t\t\texpectError:        `unexpected data gatherers, missing: [ark/discovery ark/secrets], unhandled: []`,\n\t\t},\n\t\t{\n\t\t\tname:               \"extractor function error\",\n\t\t\textractorFunctions: simpleExtractorFunctions,\n\t\t\treadings: []*api.DataReading{\n\t\t\t\t{\n\t\t\t\t\tDataGatherer: \"ark/discovery\",\n\t\t\t\t\tData:         &api.DynamicData{},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectError: `while extracting data reading ark/discovery: programmer mistake: the DataReading must have data type *api.DiscoveryData. This DataReading (ark/discovery) has data type *api.DynamicData`,\n\t\t},\n\t\t{\n\t\t\tname:               \"happy path\",\n\t\t\textractorFunctions: simpleExtractorFunctions,\n\t\t\treadings:           simpleReadings,\n\t\t\texpectedSnapshot: dataupload.Snapshot{\n\t\t\t\tClusterID:  \"success-cluster-id\",\n\t\t\t\tK8SVersion: \"v1.21.0\",\n\t\t\t\tSecrets: []runtime.Object{\n\t\t\t\t\t&corev1.Secret{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName:      \"app-1\",\n\t\t\t\t\t\t\tNamespace: \"team-1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tvar snapshot dataupload.Snapshot\n\t\t\terr := convertDataReadings(test.extractorFunctions, test.readings, &snapshot)\n\t\t\tif test.expectError != \"\" {\n\t\t\t\tassert.EqualError(t, err, test.expectError)\n\t\t\t\tassert.Equal(t, dataupload.Snapshot{}, snapshot)\n\t\t\t\treturn\n\t\t\t}\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, test.expectedSnapshot, snapshot)\n\t\t})\n\t}\n\n}\n\n// TestMinimizeSnapshot tests the minimizeSnapshot function.\n// It creates a snapshot with various secrets and service accounts, runs\n// minimizeSnapshot on it, and checks that the resulting snapshot only contains\n// the expected secrets and service accounts.\nfunc TestMinimizeSnapshot(t *testing.T) {\n\tsecretWithClientCert := newTLSSecret(\"tls-secret-with-client\", sampleCertificateChain(t, x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth))\n\tsecretWithoutClientCert := newTLSSecret(\"tls-secret-without-client\", sampleCertificateChain(t, x509.ExtKeyUsageServerAuth))\n\topaqueSecret := newOpaqueSecret(\"opaque-secret\")\n\tserviceAccount := &unstructured.Unstructured{\n\t\tObject: map[string]any{\n\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\"kind\":       \"ServiceAccount\",\n\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\"name\":      \"my-service-account\",\n\t\t\t\t\"namespace\": \"default\",\n\t\t\t},\n\t\t},\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\tinputSnapshot    dataupload.Snapshot\n\t\texpectedSnapshot dataupload.Snapshot\n\t}\n\ttests := []testCase{\n\t\t{\n\t\t\tname: \"empty snapshot\",\n\t\t\tinputSnapshot: dataupload.Snapshot{\n\t\t\t\tAgentVersion:    \"v1.0.0\",\n\t\t\t\tClusterID:       \"cluster-1\",\n\t\t\t\tK8SVersion:      \"v1.21.0\",\n\t\t\t\tSecrets:         []runtime.Object{},\n\t\t\t\tServiceAccounts: []runtime.Object{},\n\t\t\t\tRoles:           []runtime.Object{},\n\t\t\t},\n\t\t\texpectedSnapshot: dataupload.Snapshot{\n\t\t\t\tAgentVersion:    \"v1.0.0\",\n\t\t\t\tClusterID:       \"cluster-1\",\n\t\t\t\tK8SVersion:      \"v1.21.0\",\n\t\t\t\tSecrets:         []runtime.Object{},\n\t\t\t\tServiceAccounts: []runtime.Object{},\n\t\t\t\tRoles:           []runtime.Object{},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"snapshot with various secrets and service accounts\",\n\t\t\tinputSnapshot: dataupload.Snapshot{\n\t\t\t\tAgentVersion: \"v1.0.0\",\n\t\t\t\tClusterID:    \"cluster-1\",\n\t\t\t\tK8SVersion:   \"v1.21.0\",\n\t\t\t\tSecrets: []runtime.Object{\n\t\t\t\t\tsecretWithClientCert,\n\t\t\t\t\tsecretWithoutClientCert,\n\t\t\t\t\topaqueSecret,\n\t\t\t\t},\n\t\t\t\tServiceAccounts: []runtime.Object{\n\t\t\t\t\tserviceAccount,\n\t\t\t\t},\n\t\t\t\tRoles: []runtime.Object{},\n\t\t\t},\n\t\t\texpectedSnapshot: dataupload.Snapshot{\n\t\t\t\tAgentVersion: \"v1.0.0\",\n\t\t\t\tClusterID:    \"cluster-1\",\n\t\t\t\tK8SVersion:   \"v1.21.0\",\n\t\t\t\tSecrets: []runtime.Object{\n\t\t\t\t\tsecretWithClientCert,\n\t\t\t\t\topaqueSecret,\n\t\t\t\t},\n\t\t\t\tServiceAccounts: []runtime.Object{\n\t\t\t\t\tserviceAccount,\n\t\t\t\t},\n\t\t\t\tRoles: []runtime.Object{},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tlog := ktesting.NewLogger(t, ktesting.DefaultConfig)\n\t\t\tminimizeSnapshot(log, &test.inputSnapshot)\n\t\t\tassert.Equal(t, test.expectedSnapshot, test.inputSnapshot)\n\t\t})\n\t}\n}\n\n// TestIsExcludableSecret tests the isExcludableSecret function.\nfunc TestIsExcludableSecret(t *testing.T) {\n\ttype testCase struct {\n\t\tname    string\n\t\tsecret  runtime.Object\n\t\texclude bool\n\t}\n\n\ttests := []testCase{\n\t\t{\n\t\t\tname:    \"TLS secret with client cert in tls.crt\",\n\t\t\tsecret:  newTLSSecret(\"tls-secret-with-client\", sampleCertificateChain(t, x509.ExtKeyUsageClientAuth)),\n\t\t\texclude: false,\n\t\t},\n\t\t{\n\t\t\tname:    \"TLS secret with non-client cert in tls.crt\",\n\t\t\tsecret:  newTLSSecret(\"tls-secret-without-client\", sampleCertificateChain(t, x509.ExtKeyUsageServerAuth)),\n\t\t\texclude: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Non-unstructured\",\n\t\t\tsecret: &corev1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"non-unstructured-secret\",\n\t\t\t\t\tNamespace: \"default\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texclude: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Non-secret\",\n\t\t\tsecret: &unstructured.Unstructured{\n\t\t\t\tObject: map[string]any{\n\t\t\t\t\t\"apiVersion\": \"cert-manager/v1\",\n\t\t\t\t\t\"kind\":       \"Certificate\",\n\t\t\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\t\t\"name\":      \"non-secret\",\n\t\t\t\t\t\t\"namespace\": \"default\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texclude: false,\n\t\t},\n\t\t{\n\t\t\tname:    \"Non-TLS secret\",\n\t\t\tsecret:  newOpaqueSecret(\"non-tls-secret\"),\n\t\t\texclude: false,\n\t\t},\n\t\t{\n\t\t\tname:    \"TLS secret without tls.crt\",\n\t\t\tsecret:  newTLSSecret(\"tls-secret-with-no-cert\", nil),\n\t\t\texclude: true,\n\t\t},\n\t\t{\n\t\t\tname:    \"TLS secret with empty tls.crt\",\n\t\t\tsecret:  newTLSSecret(\"tls-secret-with-empty-cert\", \"\"),\n\t\t\texclude: true,\n\t\t},\n\t\t{\n\t\t\tname:    \"TLS secret with invalid base64 in tls.crt\",\n\t\t\tsecret:  newTLSSecret(\"tls-secret-with-invalid-cert\", \"invalid-base64\"),\n\t\t\texclude: true,\n\t\t},\n\t\t{\n\t\t\tname:    \"TLS secret with invalid PEM in tls.crt\",\n\t\t\tsecret:  newTLSSecret(\"tls-secret-with-invalid-pem\", base64.StdEncoding.EncodeToString([]byte(\"invalid-pem\"))),\n\t\t\texclude: true,\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\tlog := ktesting.NewLogger(t, ktesting.DefaultConfig)\n\t\t\texcluded := isExcludableSecret(log, tc.secret)\n\t\t\tassert.Equal(t, tc.exclude, excluded, \"case: %s\", tc.name)\n\t\t})\n\t}\n}\n\n// newTLSSecret creates a Kubernetes TLS secret with the given name and certificate data.\n// If crt is nil, the secret will not contain a \"tls.crt\" entry.\nfunc newTLSSecret(name string, crt any) *unstructured.Unstructured {\n\tdata := map[string]any{\"tls.key\": \"dummy-key\"}\n\tif crt != nil {\n\t\tdata[\"tls.crt\"] = crt\n\t}\n\treturn &unstructured.Unstructured{\n\t\tObject: map[string]any{\n\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\"kind\":       \"Secret\",\n\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\"name\":      name,\n\t\t\t\t\"namespace\": \"default\",\n\t\t\t},\n\t\t\t\"type\": \"kubernetes.io/tls\",\n\t\t\t\"data\": data,\n\t\t},\n\t}\n}\n\n// newOpaqueSecret creates a Kubernetes Opaque secret with the given name.\nfunc newOpaqueSecret(name string) *unstructured.Unstructured {\n\treturn &unstructured.Unstructured{\n\t\tObject: map[string]any{\n\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\"kind\":       \"Secret\",\n\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\"name\":      name,\n\t\t\t\t\"namespace\": \"default\",\n\t\t\t},\n\t\t\t\"type\": \"Opaque\",\n\t\t\t\"data\": map[string]any{\n\t\t\t\t\"key\": \"value\",\n\t\t\t},\n\t\t},\n\t}\n}\n\n// sampleCertificateChain returns a PEM encoded sample certificate chain for testing purposes.\n// The leaf certificate is signed by a self-signed CA certificate.\n// Uses an elliptic curve key for the CA and leaf certificates for speed.\n// The returned string is base64 encoded to match how TLS certificates\n// are typically provided in Kubernetes secrets.\nfunc sampleCertificateChain(t testing.TB, usages ...x509.ExtKeyUsage) string {\n\tt.Helper()\n\n\tcaPrivKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)\n\trequire.NoError(t, err)\n\n\tcaTemplate := x509.Certificate{\n\t\tSerialNumber: big.NewInt(1),\n\t\tSubject: pkix.Name{\n\t\t\tOrganization: []string{\"Test CA\"},\n\t\t\tCommonName:   \"Test CA\",\n\t\t},\n\t\tNotBefore:             time.Now(),\n\t\tNotAfter:              time.Now().Add(24 * time.Hour),\n\t\tKeyUsage:              x509.KeyUsageCertSign | x509.KeyUsageCRLSign,\n\t\tExtKeyUsage:           []x509.ExtKeyUsage{},\n\t\tBasicConstraintsValid: true,\n\t\tIsCA:                  true,\n\t}\n\n\tcaCertDER, err := x509.CreateCertificate(rand.Reader, &caTemplate, &caTemplate, &caPrivKey.PublicKey, caPrivKey)\n\trequire.NoError(t, err)\n\n\tcaCertPEM := pem.EncodeToMemory(&pem.Block{\n\t\tType:  \"CERTIFICATE\",\n\t\tBytes: caCertDER,\n\t})\n\n\tclientPrivKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)\n\trequire.NoError(t, err)\n\tclientTemplate := x509.Certificate{\n\t\tSerialNumber: big.NewInt(2),\n\t\tSubject: pkix.Name{\n\t\t\tOrganization: []string{\"Test Organization\"},\n\t\t\tCommonName:   \"example.com\",\n\t\t},\n\t\tNotBefore:   time.Now(),\n\t\tNotAfter:    time.Now().Add(24 * time.Hour),\n\t\tKeyUsage:    x509.KeyUsageDigitalSignature,\n\t\tExtKeyUsage: usages,\n\t}\n\n\tclientCertDER, err := x509.CreateCertificate(rand.Reader, &clientTemplate, &caTemplate, &clientPrivKey.PublicKey, caPrivKey)\n\trequire.NoError(t, err)\n\n\tclientCertPEM := pem.EncodeToMemory(&pem.Block{\n\t\tType:  \"CERTIFICATE\",\n\t\tBytes: clientCertDER,\n\t})\n\n\treturn base64.StdEncoding.EncodeToString(append(clientCertPEM, caCertPEM...))\n}\n"
  },
  {
    "path": "pkg/client/client_cyberark_test.go",
    "content": "package client_test\n\nimport (\n\t\"crypto/x509\"\n\t\"errors\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/jetstack/venafi-connection-lib/http_client\"\n\t\"github.com/stretchr/testify/require\"\n\tk8sversion \"k8s.io/apimachinery/pkg/version\"\n\t\"k8s.io/klog/v2\"\n\t\"k8s.io/klog/v2/ktesting\"\n\n\t\"github.com/jetstack/preflight/api\"\n\t\"github.com/jetstack/preflight/internal/cyberark\"\n\t\"github.com/jetstack/preflight/internal/cyberark/servicediscovery\"\n\t\"github.com/jetstack/preflight/pkg/client\"\n\t\"github.com/jetstack/preflight/pkg/testutil\"\n\t\"github.com/jetstack/preflight/pkg/version\"\n\n\t_ \"k8s.io/klog/v2/ktesting/init\"\n)\n\n// TestCyberArkClient_PostDataReadingsWithOptions_MockAPI demonstrates that the\n// dataupload code works with the mock CyberArk APIs.\n// The environment variables are chosen to match those expected by the mock\n// server.\nfunc TestCyberArkClient_PostDataReadingsWithOptions_MockAPI(t *testing.T) {\n\tt.Setenv(\"ARK_SUBDOMAIN\", servicediscovery.MockDiscoverySubdomain)\n\tt.Setenv(\"ARK_USERNAME\", \"test@example.com\")\n\tt.Setenv(\"ARK_SECRET\", \"somepassword\")\n\tt.Run(\"success\", func(t *testing.T) {\n\t\tlogger := ktesting.NewLogger(t, ktesting.DefaultConfig)\n\t\tctx := klog.NewContext(t.Context(), logger)\n\n\t\thttpClient := testutil.FakeCyberArk(t)\n\n\t\tc, err := client.NewCyberArk(httpClient)\n\t\trequire.NoError(t, err)\n\n\t\treadings := fakeReadings()\n\t\terr = c.PostDataReadingsWithOptions(ctx, readings, client.Options{})\n\t\trequire.NoError(t, err)\n\t})\n}\n\n// TestCyberArkClient_PostDataReadingsWithOptions_RealAPI demonstrates that the\n// dataupload code works with the real CyberArk APIs.\n//\n// To enable verbose request logging:\n//\n//\tgo test ./internal/cyberark/dataupload/... \\\n//\t  -v -count 1 -run TestCyberArkClient_PostDataReadingsWithOptions_RealAPI -args -testing.v 6\nfunc TestCyberArkClient_PostDataReadingsWithOptions_RealAPI(t *testing.T) {\n\tif strings.ToLower(os.Getenv(\"ARK_LIVE_TEST\")) != \"true\" {\n\t\tt.Skip(\"set ARK_LIVE_TEST=true to run this test against the live service\")\n\t\treturn\n\t}\n\n\tt.Run(\"success\", func(t *testing.T) {\n\t\tlogger := ktesting.NewLogger(t, ktesting.DefaultConfig)\n\t\tctx := klog.NewContext(t.Context(), logger)\n\n\t\tvar rootCAs *x509.CertPool\n\t\thttpClient := http_client.NewDefaultClient(version.UserAgent(), rootCAs)\n\n\t\tc, err := client.NewCyberArk(httpClient)\n\t\tif err != nil {\n\t\t\tif errors.Is(err, cyberark.ErrMissingEnvironmentVariables) {\n\t\t\t\tt.Skipf(\"Skipping: %s\", err)\n\t\t\t}\n\t\t\trequire.NoError(t, err)\n\t\t}\n\t\treadings := fakeReadings()\n\t\terr = c.PostDataReadingsWithOptions(ctx, readings, client.Options{})\n\t\trequire.NoError(t, err)\n\t})\n}\n\n// defaultDynamicDatagathererNames is the list of dynamic datagatherers that\n// are included in the defaultExtractorFunctions map in client_cyberark.go.\n// This is used by fakeReadings to generate empty readings for all the\n// dynamic datagatherers.\nvar defaultDynamicDatagathererNames = []string{\n\t\"ark/secrets\",\n\t\"ark/serviceaccounts\",\n\t\"ark/configmaps\",\n\t\"ark/esoexternalsecrets\",\n\t\"ark/esosecretstores\",\n\t\"ark/esoclusterexternalsecrets\",\n\t\"ark/esoclustersecretstores\",\n\t\"ark/roles\",\n\t\"ark/clusterroles\",\n\t\"ark/rolebindings\",\n\t\"ark/clusterrolebindings\",\n\t\"ark/jobs\",\n\t\"ark/cronjobs\",\n\t\"ark/deployments\",\n\t\"ark/statefulsets\",\n\t\"ark/daemonsets\",\n\t\"ark/pods\",\n}\n\n// fakeReadings returns a set of fake readings that includes a discovery reading\n// and empty readings for all the default dynamic datagatherers.\nfunc fakeReadings() []*api.DataReading {\n\treadings := make([]*api.DataReading, len(defaultDynamicDatagathererNames))\n\n\tfor i, name := range defaultDynamicDatagathererNames {\n\t\treadings[i] = &api.DataReading{\n\t\t\tDataGatherer: name,\n\t\t\tData:         &api.DynamicData{},\n\t\t}\n\t}\n\n\treturn append([]*api.DataReading{\n\t\t{\n\t\t\tDataGatherer: \"ark/oidc\",\n\t\t\tData: &api.OIDCDiscoveryData{\n\t\t\t\tOIDCConfigError: \"Failed to fetch /.well-known/openid-configuration: 404 Not Found\",\n\t\t\t\tJWKSError:       \"Failed to fetch /openid/v1/jwks: 404 Not Found\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tDataGatherer: \"ark/discovery\",\n\t\t\tData: &api.DiscoveryData{\n\t\t\t\tClusterID: \"ffffffff-ffff-ffff-ffff-ffffffffffff\",\n\t\t\t\tServerVersion: &k8sversion.Info{\n\t\t\t\t\tGitVersion: \"v1.21.0\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}, readings...)\n}\n"
  },
  {
    "path": "pkg/client/client_file.go",
    "content": "package client\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"k8s.io/klog/v2\"\n\n\t\"github.com/jetstack/preflight/api\"\n)\n\n// FileClient writes the supplied readings to a file, in JSON format.\ntype FileClient struct {\n\tpath string\n}\n\nfunc NewFileClient(path string) Client {\n\treturn &FileClient{\n\t\tpath: path,\n\t}\n}\n\nfunc (o *FileClient) PostDataReadingsWithOptions(ctx context.Context, readings []*api.DataReading, _ Options) error {\n\tlog := klog.FromContext(ctx)\n\tdata, err := json.MarshalIndent(readings, \"\", \"  \")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal JSON: %s\", err)\n\t}\n\terr = os.WriteFile(o.path, data, 0644)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to write file: %s\", err)\n\t}\n\tlog.Info(\"Data saved to local file\", \"outputPath\", o.path)\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/client/client_file_test.go",
    "content": "package client\n\nimport (\n\t\"encoding/json\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"k8s.io/klog/v2\"\n\t\"k8s.io/klog/v2/ktesting\"\n\n\t\"github.com/jetstack/preflight/api\"\n)\n\nfunc TestFileClient_PostDataReadingsWithOptions(t *testing.T) {\n\ttype testCase struct {\n\t\tname          string\n\t\tpath          string\n\t\treadings      []*api.DataReading\n\t\texpectedJSON  string\n\t\texpectedError string\n\t}\n\ttests := []testCase{\n\t\t{\n\t\t\tname:         \"success\",\n\t\t\tpath:         \"{tmp}/data.json\",\n\t\t\treadings:     []*api.DataReading{},\n\t\t\texpectedJSON: \"[]\",\n\t\t},\n\t\t{\n\t\t\tname:         \"success-overwrite\",\n\t\t\tpath:         \"{tmp}/exists.json\",\n\t\t\treadings:     []*api.DataReading{},\n\t\t\texpectedJSON: \"[]\",\n\t\t},\n\t\t{\n\t\t\tname: \"json-marshal-error\",\n\t\t\tpath: \"{tmp}/data.json\",\n\t\t\treadings: []*api.DataReading{\n\t\t\t\t{\n\t\t\t\t\tData: json.RawMessage(\"x\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedError: \"failed to marshal JSON: json: error calling MarshalJSON for type json.RawMessage: invalid character 'x' looking for beginning of value\",\n\t\t\texpectedJSON:  \"[]\",\n\t\t},\n\t\t{\n\t\t\tname:          \"no-such-file-or-directory\",\n\t\t\tpath:          \"{tmp}/no-such-folder/data.json\",\n\t\t\treadings:      []*api.DataReading{},\n\t\t\texpectedError: \"failed to write file: open {tmp}/no-such-folder/data.json: no such file or directory\",\n\t\t\texpectedJSON:  \"[]\",\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tlog := ktesting.NewLogger(t, ktesting.DefaultConfig)\n\t\t\tctx := klog.NewContext(t.Context(), log)\n\t\t\ttmpDir := t.TempDir()\n\t\t\trequire.NoError(t, os.WriteFile(tmpDir+\"/exists.json\", []byte(\"existing-content\"), 0644))\n\n\t\t\tpath := strings.ReplaceAll(tc.path, \"{tmp}\", tmpDir)\n\t\t\texpectedError := strings.ReplaceAll(tc.expectedError, \"{tmp}\", tmpDir)\n\n\t\t\tc := NewFileClient(path)\n\t\t\terr := c.PostDataReadingsWithOptions(ctx, tc.readings, Options{})\n\n\t\t\tif expectedError != \"\" {\n\t\t\t\tassert.EqualError(t, err, expectedError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.FileExists(t, path)\n\t\t\tactualJSON, err := os.ReadFile(path)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.JSONEq(t, tc.expectedJSON, string(actualJSON))\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/client/client_ngts.go",
    "content": "package client\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto\"\n\t\"crypto/tls\"\n\t\"crypto/x509\"\n\t\"encoding/base64\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/golang-jwt/jwt/v4\"\n\t\"github.com/google/uuid\"\n\t\"github.com/microcosm-cc/bluemonday\"\n\t\"k8s.io/client-go/transport\"\n\t\"k8s.io/klog/v2\"\n\n\t\"github.com/jetstack/preflight/api\"\n\t\"github.com/jetstack/preflight/pkg/version\"\n)\n\n// NGTSClient is a Client implementation for uploading data readings to NGTS\n// using service account keypair authentication. It follows the Private Key JWT\n// authentication pattern (RFC 7521 + RFC 7523).\ntype NGTSClient struct {\n\tcredentials   *NGTSServiceAccountCredentials\n\taccessToken   *ngtsAccessToken\n\tbaseURL       *url.URL\n\tagentMetadata *api.AgentMetadata\n\n\ttsgID         string\n\tprivateKey    crypto.PrivateKey\n\tjwtSigningAlg jwt.SigningMethod\n\tlock          sync.RWMutex\n\n\t// Made public for testing purposes.\n\tClient *http.Client\n}\n\n// NGTSServiceAccountCredentials holds the service account authentication credentials for NGTS.\ntype NGTSServiceAccountCredentials struct {\n\t// ClientID is the service account client ID\n\tClientID string `json:\"client_id,omitempty\"`\n\t// PrivateKeyFile is the path to the private key file paired to\n\t// the public key in the service account\n\tPrivateKeyFile string `json:\"private_key_file,omitempty\"`\n}\n\n// ngtsAccessToken stores an NGTS access token and its expiration time.\ntype ngtsAccessToken struct {\n\taccessToken    string\n\texpirationTime time.Time\n}\n\n// ngtsAccessTokenResponse represents the JSON response from the NGTS token endpoint.\ntype ngtsAccessTokenResponse struct {\n\tAccessToken string `json:\"access_token\"` // base 64 encoded token\n\tType        string `json:\"token_type\"`   // always \"bearer\"\n\tExpiresIn   int64  `json:\"expires_in\"`   // number of seconds after which the access token will expire\n}\n\nconst (\n\t// ngtsProdURLFormat is the format used for constructing a URL for the production environment.\n\t// The TSG ID is part of the URL.\n\tngtsProdURLFormat = \"https://%s.ngts.paloaltonetworks.com\"\n\n\t// ngtsUploadEndpoint matches the \"new\" CM-SaaS upload endpoint\n\t// Note that \"no\" is always passed to this endpoint in other paths (e.g. in the venafi-connection client and in the venafi-kubernetes-agent chart)\n\t// so we copy that behavior here.\n\tngtsUploadEndpoint = \"v1/tlspk/upload/clusterdata/no\"\n\n\t// ngtsAccessTokenEndpoint matches the CM-SaaS token endpoint\n\tngtsAccessTokenEndpoint = accessTokenEndpoint\n\n\t// ngtsRequiredGrantType matches the CM-SaaS required grant type for JWTs\n\tngtsRequiredGrantType = requiredGrantType\n)\n\n// NewNGTSClient creates a new NGTS client that authenticates using keypair authentication\n// and uploads data to NGTS endpoints. The baseURL parameter can override the default\n// NGTS server URL for testing purposes.\nfunc NewNGTSClient(agentMetadata *api.AgentMetadata, credentials *NGTSServiceAccountCredentials, baseURL string, tsgID string, rootCAs *x509.CertPool) (*NGTSClient, error) {\n\t// Load ClientID from file if not provided directly\n\tif err := credentials.LoadClientIDIfNeeded(); err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot create NGTSClient: %w\", err)\n\t}\n\n\tif err := credentials.Validate(); err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot create NGTSClient: %w\", err)\n\t}\n\n\t// NB: There may be more validation which can be done here, e.g. see\n\t// https://pan.dev/scm/api/tenancy/delete-tenancy-v-1-tenant-service-groups-tsg-id/\n\t// > Possible values: >= 10 characters and <= 10 characters, Value must match regular expression ^1[0-9]+$\n\t// For now, leaving this check simple\n\tif tsgID == \"\" {\n\t\treturn nil, fmt.Errorf(\"cannot create NGTSClient: tsgID cannot be empty\")\n\t}\n\n\tprivateKey, jwtSigningAlg, err := parsePrivateKeyAndExtractSigningMethod(credentials.PrivateKeyFile)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"while parsing private key file: %w\", err)\n\t}\n\n\tactualBaseURL := baseURL\n\n\t// Create prod NGTS URL if no explicit URL provided\n\tif actualBaseURL == \"\" {\n\t\tactualBaseURL = fmt.Sprintf(ngtsProdURLFormat, tsgID)\n\t}\n\n\tparsedBaseURL, err := url.Parse(actualBaseURL)\n\tif err != nil {\n\t\textra := \"\"\n\n\t\t// A possible failure mode would be an incorrectly formatted TSG ID, so warn about that specifically\n\t\t// if we tried to create a prod URL\n\t\tif baseURL == \"\" {\n\t\t\textra = fmt.Sprintf(\" (possibly malformed TSG ID %q?)\", tsgID)\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"invalid NGTS base URL %q: %s%s\", baseURL, err, extra)\n\t}\n\n\t// Create HTTP transport that honors proxy settings and custom CA certs\n\ttr := http.DefaultTransport.(*http.Transport).Clone()\n\tif rootCAs != nil {\n\t\tif tr.TLSClientConfig == nil {\n\t\t\ttr.TLSClientConfig = &tls.Config{}\n\t\t}\n\t\ttr.TLSClientConfig.RootCAs = rootCAs\n\t}\n\n\treturn &NGTSClient{\n\t\tagentMetadata: agentMetadata,\n\t\tcredentials:   credentials,\n\t\tbaseURL:       parsedBaseURL,\n\t\ttsgID:         tsgID,\n\t\taccessToken:   &ngtsAccessToken{},\n\t\tClient: &http.Client{\n\t\t\tTimeout:   time.Minute,\n\t\t\tTransport: transport.DebugWrappers(tr),\n\t\t},\n\t\tprivateKey:    privateKey,\n\t\tjwtSigningAlg: jwtSigningAlg,\n\t}, nil\n}\n\n// LoadClientIDIfNeeded attempts to load the ClientID from a file if it is not already set.\n// It looks for a \"clientID\" file in the same directory as the PrivateKeyFile.\n// For compatibility with the venafi-kubernetes-agent chart, it also supports \"clientId\" (lowercase 'd').\n// If both files exist, \"clientID\" takes precedence.\n// This allows the ClientID to be provided either as a direct value or via a Kubernetes secret.\nfunc (c *NGTSServiceAccountCredentials) LoadClientIDIfNeeded() error {\n\tif c == nil {\n\t\treturn fmt.Errorf(\"credentials are nil\")\n\t}\n\n\t// If ClientID is already set via helm values / CLI args, nothing to do\n\tif c.ClientID != \"\" {\n\t\tklog.V(2).Info(\"Using clientID from config.clientID helm value\")\n\t\treturn nil\n\t}\n\n\t// We'd preferably have NGTSServiceAccountCredentials.CredentialPath but we didn't want to make another change\n\t// to existing CLI flags; so we depend on PrivateKeyFile and assume clientID is in the same directory.\n\n\t// If PrivateKeyFile is not set, we can't determine where to look for the clientID file\n\tif c.PrivateKeyFile == \"\" {\n\t\treturn nil // This is actually a fatal error but will be caught by Validate() later\n\t}\n\n\tbaseDir := path.Dir(c.PrivateKeyFile)\n\n\t// Try to load ClientID from a file in the same directory as the private key\n\t// Try \"clientID\" first (takes precedence), then \"clientId\" for backward compatibility\n\tclientIDPath := baseDir + \"/clientID\"\n\tclientIDBytes, err := os.ReadFile(clientIDPath)\n\tif err != nil {\n\t\t// Try the alternative \"clientId\" (lowercase 'd') for compatibility with venafi-kubernetes-agent\n\t\tclientIDPath = baseDir + \"/clientId\"\n\t\tclientIDBytes, err = os.ReadFile(clientIDPath)\n\t\tif err != nil {\n\t\t\t// If neither file exists, that's okay - we'll let Validate() catch the empty ClientID error later\n\t\t\tklog.V(2).Info(\"Could not read clientID from file\", \"path\", clientIDPath, \"error\", err)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// Trim whitespace from the clientID\n\tc.ClientID = strings.TrimSpace(string(clientIDBytes))\n\tklog.V(2).Info(\"Loaded clientID from file\", \"path\", clientIDPath)\n\n\treturn nil\n}\n\n// Validate checks that the NGTS service account credentials are valid.\nfunc (c *NGTSServiceAccountCredentials) Validate() error {\n\tif c == nil {\n\t\treturn fmt.Errorf(\"credentials are nil\")\n\t}\n\n\tif c.ClientID == \"\" {\n\t\treturn fmt.Errorf(\"client_id cannot be empty\")\n\t}\n\n\tif c.PrivateKeyFile == \"\" {\n\t\treturn fmt.Errorf(\"NGTS private key file location cannot be empty\")\n\t}\n\n\treturn nil\n}\n\n// PostDataReadingsWithOptions uploads data readings to the NGTS backend.\n// The TSG ID is included in the upload path to identify the tenant service group.\nfunc (c *NGTSClient) PostDataReadingsWithOptions(ctx context.Context, readings []*api.DataReading, opts Options) error {\n\tpayload := api.DataReadingsPost{\n\t\tAgentMetadata:  c.agentMetadata,\n\t\tDataGatherTime: time.Now().UTC(),\n\t\tDataReadings:   readings,\n\t}\n\tdata, err := json.Marshal(payload)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tuploadURL := c.baseURL.JoinPath(ngtsUploadEndpoint)\n\n\t// Add cluster name and description as query parameters\n\tquery := uploadURL.Query()\n\tstripHTML := bluemonday.StrictPolicy()\n\tif opts.ClusterName != \"\" {\n\t\tquery.Add(\"name\", stripHTML.Sanitize(opts.ClusterName))\n\t}\n\n\tif opts.ClusterDescription != \"\" {\n\t\tquery.Add(\"description\", base64.RawURLEncoding.EncodeToString([]byte(stripHTML.Sanitize(opts.ClusterDescription))))\n\t}\n\n\tif opts.ClaimableCerts {\n\t\t// The TLSPK backend reads \"certOwnership=unassigned\" — this is the backend contract.\n\t\tquery.Add(\"certOwnership\", \"unassigned\")\n\t}\n\n\tuploadURL.RawQuery = query.Encode()\n\n\tklog.FromContext(ctx).V(2).Info(\n\t\t\"uploading data readings to NGTS\",\n\t\t\"url\", uploadURL.String(),\n\t\t\"cluster_name\", opts.ClusterName,\n\t\t\"data_readings_count\", len(readings),\n\t\t\"data_size_bytes\", len(data),\n\t)\n\n\tres, err := c.post(ctx, uploadURL.String(), bytes.NewBuffer(data))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to upload data to NGTS: %w\", err)\n\t}\n\tdefer res.Body.Close()\n\n\tif code := res.StatusCode; code < 200 || code >= 300 {\n\t\terrorContent := \"\"\n\t\tbody, err := io.ReadAll(res.Body)\n\t\tif err == nil {\n\t\t\terrorContent = string(body)\n\t\t}\n\t\treturn fmt.Errorf(\"NGTS upload failed with status code %d. Body: [%s]\", code, errorContent)\n\t}\n\n\treturn nil\n}\n\n// post performs an HTTP POST request to NGTS with authentication.\nfunc (c *NGTSClient) post(ctx context.Context, url string, body io.Reader) (*http.Response, error) {\n\ttoken, err := c.getValidAccessToken(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequestWithContext(ctx, http.MethodPost, url, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tversion.SetUserAgent(req)\n\n\tif len(token.accessToken) > 0 {\n\t\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", token.accessToken))\n\t}\n\n\treturn c.Client.Do(req)\n}\n\n// getValidAccessToken returns a valid access token. It will fetch a new access\n// token from the auth server if the current token does not exist or has expired.\nfunc (c *NGTSClient) getValidAccessToken(ctx context.Context) (*ngtsAccessToken, error) {\n\tc.lock.RLock()\n\tneedsUpdate := c.accessToken == nil || time.Now().Add(time.Minute).After(c.accessToken.expirationTime)\n\tc.lock.RUnlock()\n\n\tif needsUpdate {\n\t\terr := c.updateAccessToken(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tc.lock.RLock()\n\ttoken := c.accessToken\n\tc.lock.RUnlock()\n\n\treturn token, nil\n}\n\n// updateAccessToken fetches a new access token from the NGTS auth server using JWT authentication.\nfunc (c *NGTSClient) updateAccessToken(ctx context.Context) error {\n\tjwtToken, err := c.generateAndSignJwtToken()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to generate JWT token for NGTS authentication: %w\", err)\n\t}\n\n\tvalues := url.Values{}\n\tvalues.Set(\"grant_type\", ngtsRequiredGrantType)\n\tvalues.Set(\"assertion\", jwtToken)\n\n\ttokenURL := c.baseURL.JoinPath(ngtsAccessTokenEndpoint).String()\n\n\tencoded := values.Encode()\n\trequest, err := http.NewRequestWithContext(ctx, http.MethodPost, tokenURL, strings.NewReader(encoded))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trequest.Header.Add(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\trequest.Header.Add(\"Content-Length\", strconv.Itoa(len(encoded)))\n\tversion.SetUserAgent(request)\n\n\tnow := time.Now()\n\taccessToken := ngtsAccessTokenResponse{}\n\terr = c.sendHTTPRequest(request, &accessToken)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to obtain NGTS access token: %w\", err)\n\t}\n\n\tc.lock.Lock()\n\tc.accessToken = &ngtsAccessToken{\n\t\taccessToken:    accessToken.AccessToken,\n\t\texpirationTime: now.Add(time.Duration(accessToken.ExpiresIn) * time.Second),\n\t}\n\tc.lock.Unlock()\n\treturn nil\n}\n\n// sendHTTPRequest executes an HTTP request and unmarshals the JSON response.\nfunc (c *NGTSClient) sendHTTPRequest(request *http.Request, responseObject any) error {\n\tresponse, err := c.Client.Do(request)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer response.Body.Close()\n\n\tif response.StatusCode != http.StatusOK && response.StatusCode != http.StatusCreated {\n\t\tbody, _ := io.ReadAll(response.Body)\n\t\treturn fmt.Errorf(\"NGTS API request failed. Request %s, status code: %d, body: [%s]\", request.URL, response.StatusCode, body)\n\t}\n\n\tbody, err := io.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = json.Unmarshal(body, responseObject); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// generateAndSignJwtToken creates a JWT token signed with the service account's private key\n// for authenticating to NGTS.\nfunc (c *NGTSClient) generateAndSignJwtToken() (string, error) {\n\t// backend still expects \"api.venafi.cloud/v1/oauth/token/serviceaccount\" for audience, so force that for now\n\tvenafiCloudProdURL, err := url.Parse(VenafiCloudProdURL)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tclaims := make(jwt.MapClaims)\n\tclaims[\"sub\"] = c.credentials.ClientID\n\tclaims[\"iss\"] = c.credentials.ClientID\n\tclaims[\"iat\"] = time.Now().Unix()\n\tclaims[\"exp\"] = time.Now().Add(time.Minute).Unix()\n\tclaims[\"aud\"] = path.Join(venafiCloudProdURL.Host, ngtsAccessTokenEndpoint)\n\tclaims[\"jti\"] = uuid.New().String()\n\n\ttoken, err := jwt.NewWithClaims(c.jwtSigningAlg, claims).SignedString(c.privateKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn token, nil\n}\n"
  },
  {
    "path": "pkg/client/client_ngts_test.go",
    "content": "package client\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/jetstack/preflight/api\"\n)\n\nconst fakePrivKeyPEM = `-----BEGIN PRIVATE KEY-----\nMHcCAQEEIFptpPXOvEWDrYkiMhyEH1+FB1GwtwX2tyXH4KtBO6g7oAoGCCqGSM49\nAwEHoUQDQgAE/BsIwagYc4YUjSSFyqcStj2qliAkdVGlMoJbMuXupzQ9Qs4TX5Pl\ndFjz6J/j6Gu4fLPqXmM61Hj6kiuRHx5eHQ==\n-----END PRIVATE KEY-----\n`\n\nfunc withFile(t testing.TB, content string) string {\n\tt.Helper()\n\n\tf, err := os.CreateTemp(t.TempDir(), \"file\")\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create temporary file: %v\", err)\n\t}\n\tdefer f.Close()\n\n\t_, err = f.WriteString(content)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to write to temporary file: %v\", err)\n\t}\n\n\treturn f.Name()\n}\n\nfunc TestNewNGTSClient(t *testing.T) {\n\t// Create a temporary key file\n\tkeyFile := withFile(t, fakePrivKeyPEM)\n\n\ttests := []struct {\n\t\tname        string\n\t\tcredentials *NGTSServiceAccountCredentials\n\t\tbaseURL     string\n\t\ttsgID       string\n\t\twantErr     bool\n\t\terrContains string\n\t}{\n\t\t{\n\t\t\tname: \"valid credentials and tsg id\",\n\t\t\tcredentials: &NGTSServiceAccountCredentials{\n\t\t\t\tClientID:       \"test-client-id\",\n\t\t\t\tPrivateKeyFile: keyFile,\n\t\t\t},\n\t\t\tbaseURL: \"https://test.ngts.example.com\",\n\t\t\ttsgID:   \"test-tsg-id\",\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"missing tsg id\",\n\t\t\tcredentials: &NGTSServiceAccountCredentials{\n\t\t\t\tClientID:       \"test-client-id\",\n\t\t\t\tPrivateKeyFile: keyFile,\n\t\t\t},\n\t\t\tbaseURL:     \"https://test.ngts.example.com\",\n\t\t\ttsgID:       \"\",\n\t\t\twantErr:     true,\n\t\t\terrContains: \"tsgID cannot be empty\",\n\t\t},\n\t\t{\n\t\t\tname: \"missing clientID without file\",\n\t\t\tcredentials: &NGTSServiceAccountCredentials{\n\t\t\t\tClientID:       \"\",\n\t\t\t\tPrivateKeyFile: keyFile,\n\t\t\t},\n\t\t\tbaseURL:     \"https://test.ngts.example.com\",\n\t\t\ttsgID:       \"test-tsg-id\",\n\t\t\twantErr:     true,\n\t\t\terrContains: \"client_id cannot be empty\",\n\t\t},\n\t\t{\n\t\t\tname: \"default URL when empty\",\n\t\t\tcredentials: &NGTSServiceAccountCredentials{\n\t\t\t\tClientID:       \"test-client-id\",\n\t\t\t\tPrivateKeyFile: keyFile,\n\t\t\t},\n\t\t\tbaseURL: \"\",\n\t\t\ttsgID:   \"test-tsg-id\",\n\t\t\twantErr: false,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tmetadata := &api.AgentMetadata{\n\t\t\t\tVersion:   \"test-version\",\n\t\t\t\tClusterID: \"test-cluster\",\n\t\t\t}\n\n\t\t\tclient, err := NewNGTSClient(metadata, tt.credentials, tt.baseURL, tt.tsgID, nil)\n\n\t\t\tif tt.wantErr {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tif tt.errContains != \"\" {\n\t\t\t\t\tassert.Contains(t, err.Error(), tt.errContains)\n\t\t\t\t}\n\t\t\t\tassert.Nil(t, client)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.NotNil(t, client)\n\t\t\tassert.Equal(t, tt.tsgID, client.tsgID)\n\t\t\tif tt.baseURL != \"\" {\n\t\t\t\tassert.Equal(t, tt.baseURL, client.baseURL.String())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.Equal(t, fmt.Sprintf(ngtsProdURLFormat, tt.tsgID), client.baseURL.String())\n\t\t})\n\t}\n}\n\nfunc TestNGTSClient_LoadClientIDFromFile(t *testing.T) {\n\t// Create a temporary directory for the secret files\n\ttmpDir := t.TempDir()\n\n\t// Create the private key file\n\tkeyFile := tmpDir + \"/privatekey.pem\"\n\terr := os.WriteFile(keyFile, []byte(fakePrivKeyPEM), 0o600)\n\trequire.NoError(t, err)\n\n\t// Create the clientID file in the same directory\n\tclientIDFile := tmpDir + \"/clientID\"\n\terr = os.WriteFile(clientIDFile, []byte(\"test-client-from-file\\n\"), 0o600)\n\trequire.NoError(t, err)\n\n\ttests := []struct {\n\t\tname        string\n\t\tcredentials *NGTSServiceAccountCredentials\n\t\twantErr     bool\n\t\twantClient  string\n\t}{\n\t\t{\n\t\t\tname: \"load clientID from file\",\n\t\t\tcredentials: &NGTSServiceAccountCredentials{\n\t\t\t\tClientID:       \"\", // Empty - should be loaded from file\n\t\t\t\tPrivateKeyFile: keyFile,\n\t\t\t},\n\t\t\twantErr:    false,\n\t\t\twantClient: \"test-client-from-file\",\n\t\t},\n\t\t{\n\t\t\tname: \"explicit clientID takes precedence\",\n\t\t\tcredentials: &NGTSServiceAccountCredentials{\n\t\t\t\tClientID:       \"explicit-client-id\",\n\t\t\t\tPrivateKeyFile: keyFile,\n\t\t\t},\n\t\t\twantErr:    false,\n\t\t\twantClient: \"explicit-client-id\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tmetadata := &api.AgentMetadata{\n\t\t\t\tVersion:   \"test-version\",\n\t\t\t\tClusterID: \"test-cluster\",\n\t\t\t}\n\n\t\t\tclient, err := NewNGTSClient(metadata, tt.credentials, \"https://test.example.com\", \"test-tsg\", nil)\n\n\t\t\tif tt.wantErr {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.NotNil(t, client)\n\t\t\tassert.Equal(t, tt.wantClient, client.credentials.ClientID)\n\t\t})\n\t}\n}\n\nfunc TestNGTSClient_LoadClientIDFromFileAlternativeNames(t *testing.T) {\n\ttests := []struct {\n\t\tname           string\n\t\tsetupFiles     func(tmpDir string) string // returns keyFile path\n\t\twantClientID   string\n\t\twantErr        bool\n\t\twantErrContain string\n\t}{\n\t\t{\n\t\t\t// Note: venafi-kubernetes-agent didn't support storing the client ID in the secret, but\n\t\t\t// we don't want users moving to discovery-agent to be caught out by such a trivial mistake.\n\t\t\tname: \"load from clientId (lowercase d) for venafi-kubernetes-agent compatibility\",\n\t\t\tsetupFiles: func(tmpDir string) string {\n\t\t\t\tkeyFile := tmpDir + \"/privatekey.pem\"\n\t\t\t\terr := os.WriteFile(keyFile, []byte(fakePrivKeyPEM), 0o600)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\t// Create clientId file (lowercase 'd')\n\t\t\t\tclientIdFile := tmpDir + \"/clientId\"\n\t\t\t\terr = os.WriteFile(clientIdFile, []byte(\"test-client-from-clientId\\n\"), 0o600)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\treturn keyFile\n\t\t\t},\n\t\t\twantClientID: \"test-client-from-clientId\",\n\t\t\twantErr:      false,\n\t\t},\n\t\t{\n\t\t\tname: \"load from clientID (uppercase D)\",\n\t\t\tsetupFiles: func(tmpDir string) string {\n\t\t\t\tkeyFile := tmpDir + \"/privatekey.pem\"\n\t\t\t\terr := os.WriteFile(keyFile, []byte(fakePrivKeyPEM), 0o600)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\t// Create only clientID file (uppercase 'D')\n\t\t\t\tclientIDFile := tmpDir + \"/clientID\"\n\t\t\t\terr = os.WriteFile(clientIDFile, []byte(\"from-clientID\"), 0o600)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\treturn keyFile\n\t\t\t},\n\t\t\twantClientID: \"from-clientID\",\n\t\t\twantErr:      false,\n\t\t},\n\t\t{\n\t\t\tname: \"error when no clientID file exists\",\n\t\t\tsetupFiles: func(tmpDir string) string {\n\t\t\t\tkeyFile := tmpDir + \"/privatekey.pem\"\n\t\t\t\terr := os.WriteFile(keyFile, []byte(fakePrivKeyPEM), 0o600)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\t// Don't create any clientID file\n\t\t\t\treturn keyFile\n\t\t\t},\n\t\t\twantErr:        true,\n\t\t\twantErrContain: \"client_id cannot be empty\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\ttmpDir := t.TempDir()\n\t\t\tkeyFile := tt.setupFiles(tmpDir)\n\n\t\t\tcredentials := &NGTSServiceAccountCredentials{\n\t\t\t\tClientID:       \"\", // Empty - should be loaded from file\n\t\t\t\tPrivateKeyFile: keyFile,\n\t\t\t}\n\n\t\t\tmetadata := &api.AgentMetadata{\n\t\t\t\tVersion:   \"test-version\",\n\t\t\t\tClusterID: \"test-cluster\",\n\t\t\t}\n\n\t\t\tclient, err := NewNGTSClient(metadata, credentials, \"https://test.example.com\", \"test-tsg\", nil)\n\n\t\t\tif tt.wantErr {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tif tt.wantErrContain != \"\" {\n\t\t\t\t\tassert.Contains(t, err.Error(), tt.wantErrContain)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.NotNil(t, client)\n\t\t\tassert.Equal(t, tt.wantClientID, client.credentials.ClientID)\n\t\t})\n\t}\n}\n\nfunc TestNGTSClient_PostDataReadingsWithOptions(t *testing.T) {\n\tkeyFile := withFile(t, fakePrivKeyPEM)\n\n\t// Create a test server that simulates NGTS backend\n\tvar receivedRequest *http.Request\n\tvar receivedBody []byte\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\treceivedRequest = r\n\n\t\t// First request is for access token\n\t\tif r.URL.Path == ngtsAccessTokenEndpoint {\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t_ = json.NewEncoder(w).Encode(ngtsAccessTokenResponse{\n\t\t\t\tAccessToken: \"test-access-token\",\n\t\t\t\tType:        \"bearer\",\n\t\t\t\tExpiresIn:   3600,\n\t\t\t})\n\t\t\treturn\n\t\t}\n\n\t\t// Second request is for data upload\n\t\tbody := make([]byte, r.ContentLength)\n\t\t_, _ = r.Body.Read(body)\n\t\treceivedBody = body\n\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_, _ = w.Write([]byte(`{\"status\": \"success\"}`))\n\t}))\n\tdefer server.Close()\n\n\tcredentials := &NGTSServiceAccountCredentials{\n\t\tClientID:       \"test-client-id\",\n\t\tPrivateKeyFile: keyFile,\n\t}\n\n\tmetadata := &api.AgentMetadata{\n\t\tVersion:   \"test-version\",\n\t\tClusterID: \"test-cluster\",\n\t}\n\n\ttsgID := \"test-tsg-123\"\n\tclient, err := NewNGTSClient(metadata, credentials, server.URL, tsgID, nil)\n\trequire.NoError(t, err)\n\n\t// Test data upload\n\treadings := []*api.DataReading{\n\t\t{\n\t\t\tDataGatherer: \"test-gatherer\",\n\t\t\tTimestamp:    api.Time{},\n\t\t\tData:         &api.DynamicData{},\n\t\t},\n\t}\n\n\topts := Options{\n\t\tClusterName:        \"test-cluster\",\n\t\tClusterDescription: \"Test cluster description\",\n\t}\n\n\terr = client.PostDataReadingsWithOptions(t.Context(), readings, opts)\n\trequire.NoError(t, err)\n\n\t// Verify the upload request\n\tassert.NotNil(t, receivedRequest)\n\tassert.Equal(t, \"/\"+ngtsUploadEndpoint, receivedRequest.URL.Path)\n\tassert.Contains(t, receivedRequest.URL.RawQuery, \"name=test-cluster\")\n\tassert.Equal(t, \"Bearer test-access-token\", receivedRequest.Header.Get(\"Authorization\"))\n\t// certOwnership not set — must NOT appear in query\n\tassert.NotContains(t, receivedRequest.URL.RawQuery, \"certOwnership\")\n\n\t// Verify the payload\n\tvar payload api.DataReadingsPost\n\terr = json.Unmarshal(receivedBody, &payload)\n\trequire.NoError(t, err)\n\tassert.Equal(t, 1, len(payload.DataReadings))\n\n\t// Verify claimableCerts=true is included when set\n\tt.Run(\"claimableCerts: true sends certOwnership=unassigned to backend\", func(t *testing.T) {\n\t\toptsUnassigned := Options{\n\t\t\tClusterName:    \"test-cluster\",\n\t\t\tClaimableCerts: true,\n\t\t}\n\t\terr = client.PostDataReadingsWithOptions(t.Context(), readings, optsUnassigned)\n\t\trequire.NoError(t, err)\n\t\tassert.Contains(t, receivedRequest.URL.RawQuery, \"certOwnership=unassigned\")\n\t})\n}\n\nfunc TestNGTSClient_AuthenticationFlow(t *testing.T) {\n\tkeyFile := withFile(t, fakePrivKeyPEM)\n\n\tauthCallCount := 0\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path == ngtsAccessTokenEndpoint {\n\t\t\tauthCallCount++\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t_ = json.NewEncoder(w).Encode(ngtsAccessTokenResponse{\n\t\t\t\tAccessToken: \"test-access-token\",\n\t\t\t\tType:        \"bearer\",\n\t\t\t\tExpiresIn:   3600,\n\t\t\t})\n\t\t\treturn\n\t\t}\n\t\tw.WriteHeader(http.StatusOK)\n\t}))\n\tdefer server.Close()\n\n\tcredentials := &NGTSServiceAccountCredentials{\n\t\tClientID:       \"test-client-id\",\n\t\tPrivateKeyFile: keyFile,\n\t}\n\n\tmetadata := &api.AgentMetadata{\n\t\tVersion:   \"test-version\",\n\t\tClusterID: \"test-cluster\",\n\t}\n\n\tclient, err := NewNGTSClient(metadata, credentials, server.URL, \"test-tsg\", nil)\n\trequire.NoError(t, err)\n\n\t// Make multiple requests - should only authenticate once\n\treadings := []*api.DataReading{{DataGatherer: \"test\", Data: &api.DynamicData{}}}\n\topts := Options{ClusterName: \"test\"}\n\n\tfor range 3 {\n\t\terr = client.PostDataReadingsWithOptions(t.Context(), readings, opts)\n\t\trequire.NoError(t, err)\n\t}\n\n\t// Should only authenticate once since token is cached\n\tassert.Equal(t, 1, authCallCount)\n}\n\nfunc TestNGTSClient_ErrorHandling(t *testing.T) {\n\tkeyFile := withFile(t, fakePrivKeyPEM)\n\n\ttests := []struct {\n\t\tname           string\n\t\tserverHandler  http.HandlerFunc\n\t\texpectedErrMsg string\n\t}{\n\t\t{\n\t\t\tname: \"authentication failure\",\n\t\t\tserverHandler: func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tif r.URL.Path == ngtsAccessTokenEndpoint {\n\t\t\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\t\t\t_, _ = w.Write([]byte(`{\"error\": \"invalid_client\"}`))\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t},\n\t\t\texpectedErrMsg: \"failed to obtain NGTS access token\",\n\t\t},\n\t\t{\n\t\t\tname: \"upload failure\",\n\t\t\tserverHandler: func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tif r.URL.Path == ngtsAccessTokenEndpoint {\n\t\t\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t\t\t_ = json.NewEncoder(w).Encode(ngtsAccessTokenResponse{\n\t\t\t\t\t\tAccessToken: \"test-token\",\n\t\t\t\t\t\tType:        \"bearer\",\n\t\t\t\t\t\tExpiresIn:   3600,\n\t\t\t\t\t})\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\t_, _ = w.Write([]byte(`{\"error\": \"internal server error\"}`))\n\t\t\t},\n\t\t\texpectedErrMsg: \"NGTS upload failed with status code 500\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tserver := httptest.NewServer(tt.serverHandler)\n\t\t\tdefer server.Close()\n\n\t\t\tcredentials := &NGTSServiceAccountCredentials{\n\t\t\t\tClientID:       \"test-client-id\",\n\t\t\t\tPrivateKeyFile: keyFile,\n\t\t\t}\n\n\t\t\tmetadata := &api.AgentMetadata{Version: \"test\", ClusterID: \"test\"}\n\t\t\tclient, err := NewNGTSClient(metadata, credentials, server.URL, \"test-tsg\", nil)\n\t\t\trequire.NoError(t, err)\n\n\t\t\treadings := []*api.DataReading{{DataGatherer: \"test\", Data: &api.DynamicData{}}}\n\t\t\topts := Options{ClusterName: \"test\"}\n\n\t\t\terr = client.PostDataReadingsWithOptions(t.Context(), readings, opts)\n\t\t\trequire.Error(t, err)\n\t\t\tassert.Contains(t, err.Error(), tt.expectedErrMsg)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/client/client_oauth.go",
    "content": "package client\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/hashicorp/go-multierror\"\n\t\"k8s.io/client-go/transport\"\n\t\"k8s.io/klog/v2\"\n\n\t\"github.com/jetstack/preflight/api\"\n\t\"github.com/jetstack/preflight/pkg/version\"\n)\n\ntype (\n\t// The OAuthClient type is a Client implementation used to upload data readings to the Jetstack Secure platform\n\t// using OAuth as its authentication method.\n\tOAuthClient struct {\n\t\tcredentials   *OAuthCredentials\n\t\taccessToken   *accessToken\n\t\tbaseURL       string\n\t\tagentMetadata *api.AgentMetadata\n\t\tclient        *http.Client\n\t}\n\n\taccessToken struct {\n\t\tbearer         string\n\t\texpirationDate time.Time\n\t}\n\n\t// OAuthCredentials defines the format of the credentials.json file.\n\tOAuthCredentials struct {\n\t\t// UserID is the ID or email for the user or service account.\n\t\tUserID string `json:\"user_id\"`\n\t\t// UserSecret is the secret for the user or service account.\n\t\tUserSecret string `json:\"user_secret\"`\n\t\t// The following fields are optional as the default behaviour\n\t\t// is to use the equivalent variables defined at package level\n\t\t// and injected at build time.\n\t\t// ClientID is the oauth2 client ID.\n\t\tClientID string `json:\"client_id,omitempty\"`\n\t\t// ClientSecret is the oauth2 client secret.\n\t\tClientSecret string `json:\"client_secret,omitempty\"`\n\t\t// AuthServerDomain is the domain for the auth server.\n\t\tAuthServerDomain string `json:\"auth_server_domain,omitempty\"`\n\t}\n)\n\nvar (\n\t// ClientID is the auth0 client identifier (injected at build time)\n\tClientID string\n\n\t// ClientSecret is the auth0 client secret (injected at build time)\n\tClientSecret string\n\n\t// AuthServerDomain is the auth0 domain (injected at build time)\n\tAuthServerDomain string\n)\n\nfunc (t *accessToken) needsRenew() bool {\n\treturn t.bearer == \"\" || time.Now().After(t.expirationDate)\n}\n\n// NewOAuthClient returns a new instance of the OAuthClient type that will perform HTTP requests using OAuth to provide\n// authentication tokens to the backend API.\nfunc NewOAuthClient(agentMetadata *api.AgentMetadata, credentials *OAuthCredentials, baseURL string) (*OAuthClient, error) {\n\tif err := credentials.Validate(); err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot create OAuthClient: %v\", err)\n\t}\n\tif baseURL == \"\" {\n\t\treturn nil, fmt.Errorf(\"programmer mistake: cannot create APITokenClient: baseURL cannot be empty, should have been checked by the caller\")\n\t}\n\n\tok, _ := credentials.IsClientSet()\n\tif !ok {\n\t\tcredentials.ClientID = ClientID\n\t\tcredentials.ClientSecret = ClientSecret\n\t\tcredentials.AuthServerDomain = AuthServerDomain\n\t}\n\n\tok, why := credentials.IsClientSet()\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"%s\", why)\n\t}\n\n\treturn &OAuthClient{\n\t\tagentMetadata: agentMetadata,\n\t\tcredentials:   credentials,\n\t\tbaseURL:       baseURL,\n\t\taccessToken:   &accessToken{},\n\t\tclient: &http.Client{\n\t\t\tTimeout:   time.Minute,\n\t\t\tTransport: transport.DebugWrappers(http.DefaultTransport),\n\t\t},\n\t}, nil\n}\n\nfunc (c *OAuthClient) PostDataReadingsWithOptions(ctx context.Context, readings []*api.DataReading, opts Options) error {\n\treturn c.postDataReadings(ctx, opts.OrgID, opts.ClusterID, readings)\n}\n\n// PostDataReadings uploads the slice of api.DataReading to the Jetstack Secure backend to be processed for later\n// viewing in the user-interface.\nfunc (c *OAuthClient) postDataReadings(ctx context.Context, orgID, clusterID string, readings []*api.DataReading) error {\n\tpayload := api.DataReadingsPost{\n\t\tAgentMetadata:  c.agentMetadata,\n\t\tDataGatherTime: time.Now().UTC(),\n\t\tDataReadings:   readings,\n\t}\n\tdata, err := json.Marshal(payload)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tklog.FromContext(ctx).V(2).Info(\n\t\t\"uploading data readings\",\n\t\t\"url\", filepath.Join(\"/api/v1/org\", orgID, \"datareadings\", clusterID),\n\t\t\"cluster_id\", clusterID,\n\t\t\"data_readings_count\", len(readings),\n\t\t\"data_size_bytes\", len(data),\n\t)\n\n\tres, err := c.post(ctx, filepath.Join(\"/api/v1/org\", orgID, \"datareadings\", clusterID), bytes.NewBuffer(data))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\n\tif code := res.StatusCode; code < 200 || code >= 300 {\n\t\terrorContent := \"\"\n\t\tbody, err := io.ReadAll(res.Body)\n\t\tif err == nil {\n\t\t\terrorContent = string(body)\n\t\t}\n\n\t\treturn fmt.Errorf(\"received response with status code %d. Body: [%s]\", code, errorContent)\n\t}\n\n\treturn nil\n}\n\n// Post performs an HTTP POST request.\nfunc (c *OAuthClient) post(ctx context.Context, path string, body io.Reader) (*http.Response, error) {\n\ttoken, err := c.getValidAccessToken(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequestWithContext(ctx, http.MethodPost, fullURL(c.baseURL, path), body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\n\tversion.SetUserAgent(req)\n\n\tif len(token.bearer) > 0 {\n\t\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", token.bearer))\n\t}\n\n\treturn c.client.Do(req)\n}\n\n// getValidAccessToken returns a valid access token. It will fetch a new access\n// token from the auth server in case the current access token does not exist\n// or it is expired.\nfunc (c *OAuthClient) getValidAccessToken(ctx context.Context) (*accessToken, error) {\n\tif c.accessToken.needsRenew() {\n\t\terr := c.renewAccessToken(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn c.accessToken, nil\n}\n\nfunc (c *OAuthClient) renewAccessToken(ctx context.Context) error {\n\ttokenURL := fmt.Sprintf(\"https://%s/oauth/token\", c.credentials.AuthServerDomain)\n\taudience := \"https://preflight.jetstack.io/api/v1\"\n\tpayload := url.Values{}\n\tpayload.Set(\"grant_type\", \"password\")\n\tpayload.Set(\"client_id\", c.credentials.ClientID)\n\tpayload.Set(\"client_secret\", c.credentials.ClientSecret)\n\tpayload.Set(\"audience\", audience)\n\tpayload.Set(\"username\", c.credentials.UserID)\n\tpayload.Set(\"password\", c.credentials.UserSecret)\n\treq, err := http.NewRequestWithContext(ctx, http.MethodPost, tokenURL, strings.NewReader(payload.Encode()))\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Add(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\tversion.SetUserAgent(req)\n\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbody, err := io.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer res.Body.Close()\n\n\tif status := res.StatusCode; status < 200 || status >= 300 {\n\t\treturn fmt.Errorf(\"auth server did not provide an access token: (status %d) %s.\", status, string(body))\n\t}\n\n\tresponse := struct {\n\t\tBearer    string `json:\"access_token\"`\n\t\tExpiresIn uint   `json:\"expires_in\"`\n\t}{}\n\n\terr = json.Unmarshal(body, &response)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif response.ExpiresIn == 0 {\n\t\treturn fmt.Errorf(\"got wrong expiration for access token\")\n\t}\n\n\tc.accessToken.bearer = response.Bearer\n\tc.accessToken.expirationDate = time.Now().Add(time.Duration(response.ExpiresIn) * time.Second)\n\n\treturn nil\n}\n\n// Performs validations. Since it may return a multierror.Error, remember to use\n// multierror.Prefix(err, \"context: \") rather than fmt.Errorf(\"context: %w\",\n// err) when wrapping the error.\nfunc ParseOAuthCredentials(data []byte) (*OAuthCredentials, error) {\n\tvar credentials OAuthCredentials\n\n\terr := json.Unmarshal(data, &credentials)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = credentials.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &credentials, nil\n}\n\n// IsClientSet returns whether the client credentials are set or not. `why` is\n// only returned when `ok` is false.\nfunc (c *OAuthCredentials) IsClientSet() (ok bool, why string) {\n\tif c.ClientID == \"\" {\n\t\treturn false, \"ClientID is empty\"\n\t}\n\tif c.ClientSecret == \"\" {\n\t\treturn false, \"ClientSecret is empty\"\n\t}\n\tif c.AuthServerDomain == \"\" {\n\t\treturn false, \"AuthServerDomain is empty\"\n\t}\n\n\treturn true, \"\"\n}\n\nfunc (c *OAuthCredentials) Validate() error {\n\tvar result *multierror.Error\n\n\tif c == nil {\n\t\treturn fmt.Errorf(\"credentials are nil\")\n\t}\n\n\tif c.UserID == \"\" {\n\t\tresult = multierror.Append(result, fmt.Errorf(\"user_id cannot be empty\"))\n\t}\n\n\tif c.UserSecret == \"\" {\n\t\tresult = multierror.Append(result, fmt.Errorf(\"user_secret cannot be empty\"))\n\t}\n\n\treturn result.ErrorOrNil()\n}\n"
  },
  {
    "path": "pkg/client/client_venafi_cloud.go",
    "content": "package client\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto\"\n\t\"encoding/base64\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"path\"\n\t\"path/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/golang-jwt/jwt/v4\"\n\t\"github.com/google/uuid\"\n\t\"github.com/hashicorp/go-multierror\"\n\t\"github.com/microcosm-cc/bluemonday\"\n\t\"k8s.io/client-go/transport\"\n\t\"k8s.io/klog/v2\"\n\n\t\"github.com/jetstack/preflight/api\"\n\t\"github.com/jetstack/preflight/pkg/version\"\n)\n\ntype (\n\t// The VenafiCloudClient type is a Client implementation used to upload data readings to the Venafi Cloud platform\n\t// using service account authentication as its authentication method.\n\t//\n\t// This form of authentication follows the Private Key JWT standard found at https://oauth.net/private-key-jwt,\n\t// which is a combination of two RFCs:\n\t// * RFC 7521 (Assertion Framework)\n\t// * RFC 7523 (JWT Profile for Client Authentication)\n\tVenafiCloudClient struct {\n\t\tcredentials   *VenafiSvcAccountCredentials\n\t\taccessToken   *venafiCloudAccessToken\n\t\tbaseURL       string\n\t\tagentMetadata *api.AgentMetadata\n\n\t\tuploaderID    string\n\t\tuploadPath    string\n\t\tprivateKey    crypto.PrivateKey\n\t\tjwtSigningAlg jwt.SigningMethod\n\t\tlock          sync.RWMutex\n\n\t\t// Made public for testing purposes.\n\t\tClient *http.Client\n\t}\n\n\tVenafiSvcAccountCredentials struct {\n\t\t// ClientID is the service account client ID\n\t\tClientID string `json:\"client_id,omitempty\"`\n\t\t// PrivateKeyFile is the path to the private key file paired to\n\t\t// the public key in the service account\n\t\tPrivateKeyFile string `json:\"private_key_file,omitempty\"`\n\t}\n\n\tvenafiCloudAccessToken struct {\n\t\taccessToken    string\n\t\texpirationTime time.Time\n\t}\n\n\taccessTokenInformation struct {\n\t\tAccessToken string `json:\"access_token\"` // base 64 encoded token\n\t\tType        string `json:\"token_type\"`   // always be “bearer” for now\n\t\tExpiresIn   int64  `json:\"expires_in\"`   // number of seconds after which the access token will expire\n\t}\n)\n\nconst (\n\t// URL for the venafi-cloud backend services\n\tVenafiCloudProdURL               = \"https://api.venafi.cloud\"\n\tdefaultVenafiCloudUploadEndpoint = \"v1/tlspk/uploads\"\n\taccessTokenEndpoint              = \"/v1/oauth/token/serviceaccount\"\n\trequiredGrantType                = \"urn:ietf:params:oauth:grant-type:jwt-bearer\"\n)\n\n// NewVenafiCloudClient returns a new instance of the VenafiCloudClient type that will perform HTTP requests using a bearer token\n// to authenticate to the backend API.\nfunc NewVenafiCloudClient(agentMetadata *api.AgentMetadata, credentials *VenafiSvcAccountCredentials, baseURL string, uploaderID string, uploadPath string) (*VenafiCloudClient, error) {\n\tif err := credentials.Validate(); err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot create VenafiCloudClient: %w\", err)\n\t}\n\tprivateKey, jwtSigningAlg, err := parsePrivateKeyAndExtractSigningMethod(credentials.PrivateKeyFile)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"while parsing private key file: %w\", err)\n\t}\n\tif baseURL == \"\" {\n\t\treturn nil, fmt.Errorf(\"cannot create VenafiCloudClient: baseURL cannot be empty\")\n\t}\n\n\tok, why := credentials.IsClientSet()\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"%s\", why)\n\t}\n\n\tif uploadPath == \"\" {\n\t\t// if the uploadPath is not given, use default upload path\n\t\tuploadPath = defaultVenafiCloudUploadEndpoint\n\t}\n\n\treturn &VenafiCloudClient{\n\t\tagentMetadata: agentMetadata,\n\t\tcredentials:   credentials,\n\t\tbaseURL:       baseURL,\n\t\taccessToken:   &venafiCloudAccessToken{},\n\t\tClient: &http.Client{\n\t\t\tTimeout:   time.Minute,\n\t\t\tTransport: transport.DebugWrappers(http.DefaultTransport),\n\t\t},\n\t\tuploaderID:    uploaderID,\n\t\tuploadPath:    uploadPath,\n\t\tprivateKey:    privateKey,\n\t\tjwtSigningAlg: jwtSigningAlg,\n\t}, nil\n}\n\n// ParseVenafiCredentials reads credentials into a VenafiSvcAccountCredentials struct. Performs validations.\nfunc ParseVenafiCredentials(data []byte) (*VenafiSvcAccountCredentials, error) {\n\tvar credentials VenafiSvcAccountCredentials\n\n\terr := json.Unmarshal(data, &credentials)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = credentials.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &credentials, nil\n}\n\nfunc (c *VenafiSvcAccountCredentials) Validate() error {\n\tvar result *multierror.Error\n\n\tif c == nil {\n\t\treturn fmt.Errorf(\"credentials are nil\")\n\t}\n\n\tif c.ClientID == \"\" {\n\t\tresult = multierror.Append(result, fmt.Errorf(\"client_id cannot be empty\"))\n\t}\n\n\tif c.PrivateKeyFile == \"\" {\n\t\tresult = multierror.Append(result, fmt.Errorf(\"private_key_file cannot be empty\"))\n\t}\n\n\treturn result.ErrorOrNil()\n}\n\n// IsClientSet returns whether the client credentials are set or not. `why` is\n// only returned when `ok` is false.\nfunc (c *VenafiSvcAccountCredentials) IsClientSet() (ok bool, why string) {\n\tif c.ClientID == \"\" {\n\t\treturn false, \"ClientID is empty\"\n\t}\n\tif c.PrivateKeyFile == \"\" {\n\t\treturn false, \"PrivateKeyFile is empty\"\n\t}\n\n\treturn true, \"\"\n}\n\n// PostDataReadingsWithOptions uploads the slice of api.DataReading to the Venafi Cloud backend to be processed.\n// The Options are then passed as URL params in the request\nfunc (c *VenafiCloudClient) PostDataReadingsWithOptions(ctx context.Context, readings []*api.DataReading, opts Options) error {\n\tpayload := api.DataReadingsPost{\n\t\tAgentMetadata:  c.agentMetadata,\n\t\tDataGatherTime: time.Now().UTC(),\n\t\tDataReadings:   readings,\n\t}\n\tdata, err := json.Marshal(payload)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !strings.HasSuffix(c.uploadPath, \"/\") {\n\t\tc.uploadPath = fmt.Sprintf(\"%s/\", c.uploadPath)\n\t}\n\n\tvenafiCloudUploadURL, err := url.Parse(filepath.Join(c.uploadPath, c.uploaderID))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// validate options and send them as URL params\n\tquery := venafiCloudUploadURL.Query()\n\tstripHTML := bluemonday.StrictPolicy()\n\tif opts.ClusterName != \"\" {\n\t\tquery.Add(\"name\", stripHTML.Sanitize(opts.ClusterName))\n\t}\n\tif opts.ClusterDescription != \"\" {\n\t\tquery.Add(\"description\", base64.RawURLEncoding.EncodeToString([]byte(stripHTML.Sanitize(opts.ClusterDescription))))\n\t}\n\tvenafiCloudUploadURL.RawQuery = query.Encode()\n\n\tklog.FromContext(ctx).V(2).Info(\n\t\t\"uploading data readings\",\n\t\t\"url\", venafiCloudUploadURL.String(),\n\t\t\"cluster_name\", opts.ClusterName,\n\t\t\"data_readings_count\", len(readings),\n\t\t\"data_size_bytes\", len(data),\n\t)\n\n\tres, err := c.post(ctx, venafiCloudUploadURL.String(), bytes.NewBuffer(data))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\n\tif code := res.StatusCode; code < 200 || code >= 300 {\n\t\terrorContent := \"\"\n\t\tbody, err := io.ReadAll(res.Body)\n\t\tif err == nil {\n\t\t\terrorContent = string(body)\n\t\t}\n\t\treturn fmt.Errorf(\"received response with status code %d. Body: [%s]\", code, errorContent)\n\t}\n\n\treturn nil\n}\n\n// Post performs an HTTP POST request.\nfunc (c *VenafiCloudClient) post(ctx context.Context, path string, body io.Reader) (*http.Response, error) {\n\ttoken, err := c.getValidAccessToken(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequestWithContext(ctx, http.MethodPost, fullURL(c.baseURL, path), body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tversion.SetUserAgent(req)\n\n\tif len(token.accessToken) > 0 {\n\t\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", token.accessToken))\n\t}\n\n\treturn c.Client.Do(req)\n}\n\n// getValidAccessToken returns a valid access token. It will fetch a new access\n// token from the auth server in case the current access token does not exist\n// or it is expired.\nfunc (c *VenafiCloudClient) getValidAccessToken(ctx context.Context) (*venafiCloudAccessToken, error) {\n\tc.lock.RLock()\n\tneedsUpdate := c.accessToken == nil || time.Now().Add(time.Minute).After(c.accessToken.expirationTime)\n\tc.lock.RUnlock()\n\n\tif needsUpdate {\n\t\terr := c.updateAccessToken(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tc.lock.RLock()\n\ttoken := c.accessToken\n\tc.lock.RUnlock()\n\n\treturn token, nil\n}\n\nfunc (c *VenafiCloudClient) updateAccessToken(ctx context.Context) error {\n\tjwtToken, err := c.generateAndSignJwtToken()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvalues := url.Values{}\n\tvalues.Set(\"grant_type\", requiredGrantType)\n\tvalues.Set(\"assertion\", jwtToken)\n\n\ttokenURL := fullURL(c.baseURL, accessTokenEndpoint)\n\n\tencoded := values.Encode()\n\trequest, err := http.NewRequestWithContext(ctx, http.MethodPost, tokenURL, strings.NewReader(encoded))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trequest.Header.Add(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\trequest.Header.Add(\"Content-Length\", strconv.Itoa(len(encoded)))\n\tversion.SetUserAgent(request)\n\n\tnow := time.Now()\n\taccessToken := accessTokenInformation{}\n\terr = c.sendHTTPRequest(request, &accessToken)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.lock.Lock()\n\tc.accessToken = &venafiCloudAccessToken{\n\t\taccessToken:    accessToken.AccessToken,\n\t\texpirationTime: now.Add(time.Duration(accessToken.ExpiresIn) * time.Second),\n\t}\n\tc.lock.Unlock()\n\treturn nil\n}\n\nfunc (c *VenafiCloudClient) sendHTTPRequest(request *http.Request, responseObject any) error {\n\tresponse, err := c.Client.Do(request)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer response.Body.Close()\n\n\tif response.StatusCode != http.StatusOK && response.StatusCode != http.StatusCreated {\n\t\tbody, _ := io.ReadAll(response.Body)\n\t\treturn fmt.Errorf(\"failed to execute http request to the Control Plane. Request %s, status code: %d, body: [%s]\", request.URL, response.StatusCode, body)\n\t}\n\n\tbody, err := io.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = json.Unmarshal(body, responseObject); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (c *VenafiCloudClient) generateAndSignJwtToken() (string, error) {\n\tprodURL, err := url.Parse(VenafiCloudProdURL)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tclaims := make(jwt.MapClaims)\n\tclaims[\"sub\"] = c.credentials.ClientID\n\tclaims[\"iss\"] = c.credentials.ClientID\n\tclaims[\"iat\"] = time.Now().Unix()\n\tclaims[\"exp\"] = time.Now().Add(time.Minute).Unix()\n\tclaims[\"aud\"] = path.Join(prodURL.Host, accessTokenEndpoint)\n\tclaims[\"jti\"] = uuid.New().String()\n\n\ttoken, err := jwt.NewWithClaims(c.jwtSigningAlg, claims).SignedString(c.privateKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn token, nil\n}\n"
  },
  {
    "path": "pkg/client/client_venconn.go",
    "content": "package client\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/x509\"\n\t\"encoding/base64\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"time\"\n\n\tvenapi \"github.com/jetstack/venafi-connection-lib/api/v1alpha1\"\n\t\"github.com/jetstack/venafi-connection-lib/chain/sources/venafi\"\n\t\"github.com/jetstack/venafi-connection-lib/venafi_client\"\n\tcorev1 \"k8s.io/api/core/v1\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/types\"\n\t\"k8s.io/client-go/rest\"\n\t\"k8s.io/client-go/transport\"\n\t\"k8s.io/klog/v2\"\n\t\"sigs.k8s.io/controller-runtime/pkg/client/apiutil\"\n\n\t\"github.com/jetstack/preflight/api\"\n\t\"github.com/jetstack/preflight/pkg/version\"\n)\n\ntype VenConnClient struct {\n\tagentMetadata *api.AgentMetadata\n\tconnHandler   venafi_client.ConnectionHandler\n\tinstallNS     string // Namespace in which the agent is running in.\n\tvenConnName   string // Name of the VenafiConnection resource to use.\n\tvenConnNS     string // Namespace of the VenafiConnection resource to use.\n\n\t// Used to make HTTP requests to Venafi Cloud. This field is public for\n\t// testing purposes so that we can configure trusted CAs; there should be a\n\t// way to do that without messing with the client directly (e.g., a flag to\n\t// pass a custom CA?), but it's not there yet.\n\tClient *http.Client\n}\n\n// NewVenConnClient lets you make requests to the Venafi Cloud backend using the\n// given VenafiConnection resource.\n//\n// You need to call Start to start watching the VenafiConnection resource. If\n// you don't, the client will be unable to find the VenafiConnection that you\n// are referring to as its client-go cache will remain empty.\n//\n// The http.Client is used for Venafi and Vault, not for Kubernetes. The\n// `installNS` is the namespace in which the agent is running in and cannot be\n// empty. `venConnName` and `venConnNS` must not be empty either. The passed\n// `restcfg` is not mutated. `trustedCAs` is only used for connecting to Venafi\n// Cloud and Vault and can be left nil.\nfunc NewVenConnClient(restcfg *rest.Config, agentMetadata *api.AgentMetadata, installNS, venConnName, venConnNS string, trustedCAs *x509.CertPool) (*VenConnClient, error) {\n\tif installNS == \"\" {\n\t\treturn nil, errors.New(\"programmer mistake: installNS must be provided\")\n\t}\n\tif venConnName == \"\" {\n\t\treturn nil, errors.New(\"programmer mistake: venConnName must be provided\")\n\t}\n\tif venConnNS == \"\" {\n\t\treturn nil, errors.New(\"programmer mistake: venConnNS must be provided\")\n\t}\n\n\trestcfg = rest.CopyConfig(restcfg)\n\trestcfg.Impersonate = rest.ImpersonationConfig{\n\t\tUserName: fmt.Sprintf(\"system:serviceaccount:%s:venafi-connection\", installNS),\n\t}\n\n\t// TLS-related configuration such as root CAs and client certs are contained\n\t// in the restcfg; let's create an http.Client that uses them.\n\thttpCl, err := rest.HTTPClientFor(restcfg)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"while turning the REST config into an HTTP client: %w\", err)\n\t}\n\n\trestMapper, err := apiutil.NewDynamicRESTMapper(restcfg, httpCl)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"while creating the REST mapper: %w\", err)\n\t}\n\n\t// This Kubernetes client only needs to be able to read and write the\n\t// VenafiConnection resources and read Secret resources.\n\tscheme := runtime.NewScheme()\n\t_ = venapi.AddToScheme(scheme)\n\t_ = corev1.AddToScheme(scheme)\n\n\tvar unusedTPPDefaultClientID string\n\n\thandler, err := venafi_client.NewConnectionHandler(\n\t\tversion.UserAgent(),\n\t\t\"venafi-kubernetes-agent.jetstack.io\",\n\t\t\"VenafiKubernetesAgent\",\n\t\tunusedTPPDefaultClientID,\n\t\trestcfg,\n\t\tscheme,\n\t\trestMapper,\n\t\ttrustedCAs,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvcpClient := &http.Client{}\n\ttr := http.DefaultTransport.(*http.Transport).Clone()\n\tif trustedCAs != nil {\n\t\ttr.TLSClientConfig.RootCAs = trustedCAs\n\t}\n\tvcpClient.Transport = transport.DebugWrappers(tr)\n\n\treturn &VenConnClient{\n\t\tagentMetadata: agentMetadata,\n\t\tconnHandler:   handler,\n\t\tinstallNS:     installNS,\n\t\tvenConnName:   venConnName,\n\t\tvenConnNS:     venConnNS,\n\t\tClient:        vcpClient,\n\t}, nil\n}\n\n// Start starts watching VenafiConnections. This function will return soon after\n// the context is closed, or if an error occurs.\nfunc (c *VenConnClient) Start(ctx context.Context) error {\n\treturn c.connHandler.CacheRunnable().Start(ctx)\n}\n\n// `opts.ClusterName` and `opts.ClusterDescription` are the only values used\n// from the Options struct. OrgID and ClusterID are not used in Venafi Cloud.\nfunc (c *VenConnClient) PostDataReadingsWithOptions(ctx context.Context, readings []*api.DataReading, opts Options) error {\n\tif opts.ClusterName == \"\" {\n\t\treturn fmt.Errorf(\"programmer mistake: the cluster name (aka `cluster_id` in the config file) cannot be left empty\")\n\t}\n\n\t_, details, err := c.connHandler.Get(ctx, c.installNS, venafi.Scope{}, types.NamespacedName{Name: c.venConnName, Namespace: c.venConnNS})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"while loading the VenafiConnection %s/%s: %w\", c.venConnNS, c.venConnName, err)\n\t}\n\tif details.TPP != nil {\n\t\treturn fmt.Errorf(`VenafiConnection %s/%s: the agent cannot be used with TPP`, c.venConnNS, c.venConnName)\n\t}\n\tif details.VCP != nil && details.VCP.APIKey != \"\" {\n\t\t// Although it is technically possible to use an API key, we have\n\t\t// decided to not allow it as it isn't recommended and will eventually\n\t\t// be phased out.\n\t\treturn fmt.Errorf(`VenafiConnection %s/%s: the agent cannot be used with an API key`, c.venConnNS, c.venConnName)\n\t}\n\tif details.VCP == nil || details.VCP.AccessToken == \"\" {\n\t\treturn fmt.Errorf(`programmer mistake: VenafiConnection %s/%s: TPPAccessToken is empty in the token returned by connHandler.Get: %v`, c.venConnNS, c.venConnName, details)\n\t}\n\n\tpayload := api.DataReadingsPost{\n\t\tAgentMetadata:  c.agentMetadata,\n\t\tDataGatherTime: time.Now().UTC(),\n\t\tDataReadings:   readings,\n\t}\n\tdata, err := json.Marshal(payload)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tklog.FromContext(ctx).V(2).Info(\n\t\t\"uploading data readings\",\n\t\t\"url\", fullURL(details.VCP.URL, \"/v1/tlspk/upload/clusterdata/no\"),\n\t\t\"cluster_name\", opts.ClusterName,\n\t\t\"data_readings_count\", len(readings),\n\t\t\"data_size_bytes\", len(data),\n\t)\n\n\t// The path parameter \"no\" is a dummy parameter to make the Venafi Cloud\n\t// backend happy. This parameter, named `uploaderID` in the backend, is not\n\t// actually used by the backend.\n\treq, err := http.NewRequestWithContext(ctx, http.MethodPost, fullURL(details.VCP.URL, \"/v1/tlspk/upload/clusterdata/no\"), bytes.NewReader(data))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", details.VCP.AccessToken))\n\tversion.SetUserAgent(req)\n\n\tq := req.URL.Query()\n\tq.Set(\"name\", opts.ClusterName)\n\tif opts.ClusterDescription != \"\" {\n\t\tq.Set(\"description\", base64.RawURLEncoding.EncodeToString([]byte(opts.ClusterDescription)))\n\t}\n\treq.URL.RawQuery = q.Encode()\n\n\tres, err := c.Client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\n\tif code := res.StatusCode; code < 200 || code >= 300 {\n\t\terrorContent := \"\"\n\t\tbody, err := io.ReadAll(res.Body)\n\t\tif err == nil {\n\t\t\terrorContent = string(body)\n\t\t}\n\n\t\treturn fmt.Errorf(\"received response with status code %d. Body: [%s]\", code, errorContent)\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/client/client_venconn_test.go",
    "content": "package client_test\n\nimport (\n\t\"context\"\n\t\"crypto/x509\"\n\t\"net/http\"\n\t\"regexp\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/jetstack/venafi-connection-lib/api/v1alpha1\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"k8s.io/apimachinery/pkg/types\"\n\t\"k8s.io/client-go/rest\"\n\t\"k8s.io/klog/v2\"\n\t\"k8s.io/klog/v2/ktesting\"\n\tctrlruntime \"sigs.k8s.io/controller-runtime/pkg/client\"\n\n\t\"github.com/jetstack/preflight/api\"\n\t\"github.com/jetstack/preflight/pkg/client\"\n\t\"github.com/jetstack/preflight/pkg/testutil\"\n)\n\n// These are using envtest (slow) rather than a fake clientset (fast) because\n// controller-runtime's fake clientset doesn't support server-side apply [1] and\n// also because we want to create serviceaccount tokens, which isn't supported\n// by the fake clientset either.\n//\n// The goal is to test the following behaviors:\n//\n//   - VenafiConnection's `accessToken` works as expected with a fake Venafi\n//     Cloud server.\n//   - VenafiConnection's `apiKey` and `tpp` can't be used by the user.\n//   - NewVenConnClient's `trustedCAs` works as expected.\n//\n// [1] https://github.com/kubernetes-sigs/controller-runtime/issues/2341\nfunc TestVenConnClient_PostDataReadingsWithOptions(t *testing.T) {\n\tctx, cancel := context.WithCancel(t.Context())\n\tdefer cancel()\n\tlog := ktesting.NewLogger(t, ktesting.NewConfig(ktesting.Verbosity(10)))\n\tctx = klog.NewContext(ctx, log)\n\t_, restconf, kclient := testutil.WithEnvtest(t)\n\tfor _, obj := range testutil.Parse(testutil.VenConnRBAC) {\n\t\trequire.NoError(t, kclient.Create(ctx, obj))\n\t}\n\tt.Parallel()\n\n\tt.Run(\"valid accessToken\", run_TestVenConnClient_PostDataReadingsWithOptions(ctx, restconf, kclient, testcase{\n\t\tgiven: testutil.Undent(`\n\t\t\tapiVersion: jetstack.io/v1alpha1\n\t\t\tkind: VenafiConnection\n\t\t\tmetadata:\n\t\t\t  name: venafi-components\n\t\t\t  namespace: TEST_NAMESPACE\n\t\t\tspec:\n\t\t\t  vcp:\n\t\t\t    url: FAKE_VENAFI_CLOUD_URL\n\t\t\t    accessToken:\n\t\t\t      - secret:\n\t\t\t          name: accesstoken\n\t\t\t          fields: [accesstoken]\n\t\t\t  allowReferencesFrom:\n\t\t\t    matchExpressions:\n\t\t\t      - {key: kubernetes.io/metadata.name, operator: In, values: [venafi]}\n\t\t\t---\n\t\t\tapiVersion: v1\n\t\t\tkind: Secret\n\t\t\tmetadata:\n\t\t\t  name: accesstoken\n\t\t\t  namespace: TEST_NAMESPACE\n\t\t\tstringData:\n\t\t\t  accesstoken: VALID_ACCESS_TOKEN\n\t\t\t---\n\t\t\tapiVersion: rbac.authorization.k8s.io/v1\n\t\t\tkind: Role\n\t\t\tmetadata:\n\t\t\t  name: venafi-connection-accesstoken-reader\n\t\t\t  namespace: TEST_NAMESPACE\n\t\t\trules:\n\t\t\t- apiGroups: [\"\"]\n\t\t\t  resources: [\"secrets\"]\n\t\t\t  verbs: [\"get\"]\n\t\t\t  resourceNames: [\"accesstoken\"]\n\t\t\t---\n\t\t\tapiVersion: rbac.authorization.k8s.io/v1\n\t\t\tkind: RoleBinding\n\t\t\tmetadata:\n\t\t\t  name: venafi-connection-accesstoken-reader\n\t\t\t  namespace: TEST_NAMESPACE\n\t\t\troleRef:\n\t\t\t  apiGroup: rbac.authorization.k8s.io\n\t\t\t  kind: Role\n\t\t\t  name: venafi-connection-accesstoken-reader\n\t\t\tsubjects:\n\t\t\t- kind: ServiceAccount\n\t\t\t  name: venafi-connection\n\t\t\t  namespace: venafi\n\t\t`),\n\t\texpectReadyCondMsg: \"Generated a new token\",\n\t}))\n\tt.Run(\"error when the apiKey field is used\", run_TestVenConnClient_PostDataReadingsWithOptions(ctx, restconf, kclient, testcase{\n\t\t// Why isn't it possible to use the 'apiKey' field? Although the\n\t\t// Kubernetes Discovery endpoint works with an API key, we have decided\n\t\t// to not support it because it isn't recommended.\n\t\tgiven: testutil.Undent(`\n\t\t\tapiVersion: jetstack.io/v1alpha1\n\t\t\tkind: VenafiConnection\n\t\t\tmetadata:\n\t\t\t  name: venafi-components\n\t\t\t  namespace: TEST_NAMESPACE\n\t\t\tspec:\n\t\t\t  vcp:\n\t\t\t    url: FAKE_VENAFI_CLOUD_URL\n\t\t\t    apiKey:\n\t\t\t      - secret:\n\t\t\t          name: apikey\n\t\t\t          fields: [apikey]\n\t\t\t  allowReferencesFrom:\n\t\t\t    matchExpressions:\n\t\t\t      - {key: kubernetes.io/metadata.name, operator: In, values: [venafi]}\n\t\t\t---\n\t\t\tapiVersion: v1\n\t\t\tkind: Secret\n\t\t\tmetadata:\n\t\t\t  name: apikey\n\t\t\t  namespace: TEST_NAMESPACE\n\t\t\tstringData:\n\t\t\t  apikey: VALID_API_KEY\n\t\t\t---\n\t\t\tapiVersion: rbac.authorization.k8s.io/v1\n\t\t\tkind: Role\n\t\t\tmetadata:\n\t\t\t  name: venafi-connection-apikey-reader\n\t\t\t  namespace: TEST_NAMESPACE\n\t\t\trules:\n\t\t\t- apiGroups: [\"\"]\n\t\t\t  resources: [\"secrets\"]\n\t\t\t  verbs: [\"get\"]\n\t\t\t  resourceNames: [\"apikey\"]\n\t\t\t---\n\t\t\tapiVersion: rbac.authorization.k8s.io/v1\n\t\t\tkind: RoleBinding\n\t\t\tmetadata:\n\t\t\t  name: venafi-connection-apikey-reader\n\t\t\t  namespace: TEST_NAMESPACE\n\t\t\troleRef:\n\t\t\t  apiGroup: rbac.authorization.k8s.io\n\t\t\t  kind: Role\n\t\t\t  name: venafi-connection-apikey-reader\n\t\t\tsubjects:\n\t\t\t- kind: ServiceAccount\n\t\t\t  name: venafi-connection\n\t\t\t  namespace: venafi\n\t\t`),\n\t\t// PostDataReadingsWithOptions failed, but Get succeeded; that's why the\n\t\t// condition says the VenafiConnection is ready.\n\t\texpectReadyCondMsg: \"Generated a new token\",\n\t\texpectErr:          \"VenafiConnection error-when-the-apikey-field-is-used/venafi-components: the agent cannot be used with an API key\",\n\t}))\n\tt.Run(\"error when the tpp field is used\", run_TestVenConnClient_PostDataReadingsWithOptions(ctx, restconf, kclient, testcase{\n\t\t// IMPORTANT: The user may think they can use 'tpp', spend time\n\t\t// debugging and making the venafi connection work, and then find out\n\t\t// that it doesn't work. The reason is because as of now, we don't first\n\t\t// check if the user has used the 'tpp' field before running Get.\n\t\tgiven: testutil.Undent(`\n\t\t\tapiVersion: jetstack.io/v1alpha1\n\t\t\tkind: VenafiConnection\n\t\t\tmetadata:\n\t\t\t  name: venafi-components\n\t\t\t  namespace: TEST_NAMESPACE\n\t\t\tspec:\n\t\t\t  tpp:\n\t\t\t    url: FAKE_TPP_URL\n\t\t\t    accessToken:\n\t\t\t      - secret:\n\t\t\t          name: accesstoken\n\t\t\t          fields: [accesstoken]\n\t\t\t  allowReferencesFrom:\n\t\t\t    matchExpressions:\n\t\t\t      - {key: kubernetes.io/metadata.name, operator: In, values: [venafi]}\n\t\t\t---\n\t\t\tapiVersion: v1\n\t\t\tkind: Secret\n\t\t\tmetadata:\n\t\t\t  name: accesstoken\n\t\t\t  namespace: TEST_NAMESPACE\n\t\t\tstringData:\n\t\t\t  accesstoken: VALID_ACCESS_TOKEN\n\t\t\t---\n\t\t\tapiVersion: rbac.authorization.k8s.io/v1\n\t\t\tkind: Role\n\t\t\tmetadata:\n\t\t\t  name: venafi-connection-accesstoken-reader\n\t\t\t  namespace: TEST_NAMESPACE\n\t\t\trules:\n\t\t\t- apiGroups: [\"\"]\n\t\t\t  resources: [\"secrets\"]\n\t\t\t  verbs: [\"get\"]\n\t\t\t  resourceNames: [\"accesstoken\"]\n\t\t\t---\n\t\t\tapiVersion: rbac.authorization.k8s.io/v1\n\t\t\tkind: RoleBinding\n\t\t\tmetadata:\n\t\t\t  name: venafi-connection-accesstoken-reader\n\t\t\t  namespace: TEST_NAMESPACE\n\t\t\troleRef:\n\t\t\t  apiGroup: rbac.authorization.k8s.io\n\t\t\t  kind: Role\n\t\t\t  name: venafi-connection-accesstoken-reader\n\t\t\tsubjects:\n\t\t\t- kind: ServiceAccount\n\t\t\t  name: venafi-connection\n\t\t\t  namespace: venafi\n\t\t`),\n\t\texpectReadyCondMsg: \"Generated a new token\",\n\t\texpectErr:          \"VenafiConnection error-when-the-tpp-field-is-used/venafi-components: the agent cannot be used with TPP\",\n\t}))\n}\n\ntype testcase struct {\n\tgiven              string\n\texpectErr          string\n\texpectReadyCondMsg string\n}\n\n// All tests share the same envtest (i.e., the same apiserver and etcd process),\n// so each test needs to be contained in its own Kubernetes namespace.\nfunc run_TestVenConnClient_PostDataReadingsWithOptions(ctx context.Context, restcfg *rest.Config, kclient ctrlruntime.WithWatch, test testcase) func(t *testing.T) {\n\treturn func(t *testing.T) {\n\t\tt.Helper()\n\t\tlog := ktesting.NewLogger(t, ktesting.NewConfig(ktesting.Verbosity(10)))\n\t\tctx := klog.NewContext(ctx, log)\n\t\tfakeVenafiCloud, certCloud, fakeVenafiAssert := testutil.FakeVenafiCloud(t)\n\t\tfakeTPP, certTPP := testutil.FakeTPP(t)\n\t\tfakeVenafiAssert(func(t testing.TB, r *http.Request) {\n\t\t\tif r.URL.Path == \"/v1/useraccounts\" {\n\t\t\t\treturn // We only care about /v1/tlspk/upload/clusterdata.\n\t\t\t}\n\t\t\t// Let's make sure we didn't forget to add the arbitrary \"/no\"\n\t\t\t// (uploader_id) path segment to /v1/tlspk/upload/clusterdata.\n\t\t\tassert.Equal(t, \"/v1/tlspk/upload/clusterdata/no\", r.URL.Path)\n\t\t})\n\n\t\tcertPool := x509.NewCertPool()\n\t\tcertPool.AddCert(certCloud)\n\t\tcertPool.AddCert(certTPP)\n\n\t\tcl, err := client.NewVenConnClient(\n\t\t\trestcfg,\n\t\t\t&api.AgentMetadata{ClusterID: \"no\"},\n\t\t\t\"venafi\",               // Namespace in which the Agent is running.\n\t\t\t\"venafi-components\",    // Name of the VenafiConnection.\n\t\t\ttestNameToNamespace(t), // Namespace of the VenafiConnection.\n\t\t\tcertPool,\n\t\t)\n\t\trequire.NoError(t, err)\n\n\t\ttestutil.VenConnStartWatching(ctx, t, cl)\n\n\t\ttest.given = strings.ReplaceAll(test.given, \"FAKE_VENAFI_CLOUD_URL\", fakeVenafiCloud.URL)\n\t\ttest.given = strings.ReplaceAll(test.given, \"FAKE_TPP_URL\", fakeTPP.URL)\n\t\ttest.given = strings.ReplaceAll(test.given, \"TEST_NAMESPACE\", testNameToNamespace(t))\n\n\t\tvar givenObjs []ctrlruntime.Object\n\t\tgivenObjs = append(givenObjs, testutil.Parse(testutil.Undent(`\n\t\t\tapiVersion: v1\n\t\t\tkind: Namespace\n\t\t\tmetadata:\n\t\t\t  name: `+testNameToNamespace(t)))...)\n\t\tgivenObjs = append(givenObjs, testutil.Parse(test.given)...)\n\t\tfor _, obj := range givenObjs {\n\t\t\trequire.NoError(t, kclient.Create(ctx, obj))\n\t\t}\n\t\terr = cl.PostDataReadingsWithOptions(ctx, []*api.DataReading{}, client.Options{ClusterName: \"test cluster name\"})\n\t\tif test.expectErr != \"\" {\n\t\t\tassert.EqualError(t, err, test.expectErr)\n\t\t} else {\n\t\t\trequire.NoError(t, err)\n\t\t}\n\n\t\tgot := v1alpha1.VenafiConnection{}\n\t\terr = kclient.Get(ctx, types.NamespacedName{Name: \"venafi-components\", Namespace: testNameToNamespace(t)}, &got)\n\t\trequire.NoError(t, err)\n\t\trequire.Len(t, got.Status.Conditions, 1)\n\t\tassert.Equal(t, test.expectReadyCondMsg, got.Status.Conditions[0].Message)\n\t}\n}\n\n// Because we want valid namespaces for each of the tests, this func converts a\n// test name into a valid Kubernetes namespace (i.e., a DNS label as per RFC\n// 1123, including trimming to 63 chars).\n//\n// For example, the test name:\n//\n//\tTest/sub test has special chars ':\"-;@# and is also super super super super long!\n//\n// will be converted to:\n//\n//\tsub-test-has-special-chars-and-is-also-super-super-super-super-\n//\n// Only the last part of the test name is used.\n//\n// nolint:dupword\nfunc testNameToNamespace(t testing.TB) string {\n\tregex := regexp.MustCompile(\"[^a-zA-Z0-9-]\")\n\n\t// Only keep the part after the last slash.\n\tparts := strings.Split(t.Name(), \"/\")\n\tif len(parts) == 0 {\n\t\treturn \"\"\n\t}\n\n\ts := parts[len(parts)-1]\n\ts = strings.ToLower(s)\n\ts = strings.ReplaceAll(s, \"_\", \"-\")\n\ts = regex.ReplaceAllString(s, \"\")\n\ts = strings.TrimLeft(s, \"-\")\n\ts = strings.TrimRight(s, \"-\")\n\treturn s\n}\n"
  },
  {
    "path": "pkg/client/util.go",
    "content": "package client\n\nimport (\n\t\"crypto\"\n\t\"crypto/ecdsa\"\n\t\"crypto/ed25519\"\n\t\"crypto/rsa\"\n\t\"crypto/x509\"\n\t\"encoding/pem\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com/golang-jwt/jwt/v4\"\n)\n\n// parsePrivateKeyFromPEMFile reads and parses a PEM-encoded private key file.\nfunc parsePrivateKeyFromPEMFile(privateKeyFilePath string) (crypto.PrivateKey, error) {\n\tpkBytes, err := os.ReadFile(privateKeyFilePath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to fetch private key %q: %s\",\n\t\t\tprivateKeyFilePath, err)\n\t}\n\n\tder, _ := pem.Decode(pkBytes)\n\tif der == nil {\n\t\treturn nil, fmt.Errorf(\"while decoding the PEM-encoded private key %v, its content were: %s\", privateKeyFilePath, string(pkBytes))\n\t}\n\n\tif key, err := x509.ParsePKCS1PrivateKey(der.Bytes); err == nil {\n\t\treturn key, nil\n\t}\n\tif key, err := x509.ParsePKCS8PrivateKey(der.Bytes); err == nil {\n\t\tswitch key := key.(type) {\n\t\tcase *rsa.PrivateKey, *ecdsa.PrivateKey, ed25519.PrivateKey:\n\t\t\treturn key, nil\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"found unknown private key type in PKCS#8 wrapping: %T\", key)\n\t\t}\n\t}\n\tif key, err := x509.ParseECPrivateKey(der.Bytes); err == nil {\n\t\treturn key, nil\n\t}\n\treturn nil, fmt.Errorf(\"while parsing EC private: %w\", err)\n}\n\n// parsePrivateKeyAndExtractSigningMethod parses a private key file and determines\n// the appropriate JWT signing method based on the key type and size.\nfunc parsePrivateKeyAndExtractSigningMethod(privateKeyFile string) (crypto.PrivateKey, jwt.SigningMethod, error) {\n\tprivateKey, err := parsePrivateKeyFromPEMFile(privateKeyFile)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar signingMethod jwt.SigningMethod\n\tswitch key := privateKey.(type) {\n\tcase *rsa.PrivateKey:\n\t\tbitLen := key.N.BitLen()\n\t\tswitch bitLen {\n\t\tcase 2048:\n\t\t\tsigningMethod = jwt.SigningMethodRS256\n\t\tcase 3072:\n\t\t\tsigningMethod = jwt.SigningMethodRS384\n\t\tcase 4096:\n\t\t\tsigningMethod = jwt.SigningMethodRS512\n\t\tdefault:\n\t\t\tsigningMethod = jwt.SigningMethodRS256\n\t\t}\n\n\tcase *ecdsa.PrivateKey:\n\t\tbitLen := key.Curve.Params().BitSize\n\t\tswitch bitLen {\n\t\tcase 256:\n\t\t\tsigningMethod = jwt.SigningMethodES256\n\t\tcase 384:\n\t\t\tsigningMethod = jwt.SigningMethodES384\n\t\tcase 521:\n\t\t\tsigningMethod = jwt.SigningMethodES512\n\t\tdefault:\n\t\t\tsigningMethod = jwt.SigningMethodES256\n\t\t}\n\n\tcase ed25519.PrivateKey:\n\t\tsigningMethod = jwt.SigningMethodEdDSA\n\n\tdefault:\n\t\terr = fmt.Errorf(\"unsupported private key type\")\n\t}\n\treturn privateKey, signingMethod, err\n}\n"
  },
  {
    "path": "pkg/datagatherer/datagatherer.go",
    "content": "// Package datagatherer provides the DataGatherer interface.\npackage datagatherer\n\nimport \"context\"\n\n// Config is the configuration of a DataGatherer.\ntype Config interface {\n\t// NewDataGatherer constructs a DataGatherer with an specific configuration.\n\tNewDataGatherer(ctx context.Context) (DataGatherer, error)\n}\n\n// DataGatherer is the interface for Data Gatherers. Data Gatherers are in charge of fetching data from a certain cloud provider API or Kubernetes component.\ntype DataGatherer interface {\n\t// Fetch retrieves data.\n\t// count is the number of items that were discovered. A negative count means the number\n\t// of items was indeterminate.\n\tFetch(ctx context.Context) (data any, count int, err error)\n\t// Run starts the data gatherer's informers for resource collection.\n\t// Returns error if the data gatherer informer wasn't initialized\n\tRun(ctx context.Context) error\n\t// WaitForCacheSync waits for the data gatherer's informers cache to sync.\n\tWaitForCacheSync(ctx context.Context) error\n}\n"
  },
  {
    "path": "pkg/datagatherer/k8sdiscovery/discovery.go",
    "content": "package k8sdiscovery\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/discovery\"\n\n\t\"github.com/jetstack/preflight/api\"\n\t\"github.com/jetstack/preflight/pkg/datagatherer\"\n\t\"github.com/jetstack/preflight/pkg/kubeconfig\"\n)\n\n// ConfigDiscovery contains the configuration for the k8s-discovery data-gatherer\ntype ConfigDiscovery struct {\n\t// KubeConfigPath is the path to the kubeconfig file. If empty, will assume it runs in-cluster.\n\tKubeConfigPath string `yaml:\"kubeconfig\"`\n}\n\n// UnmarshalYAML unmarshals the Config resolving GroupVersionResource.\nfunc (c *ConfigDiscovery) UnmarshalYAML(unmarshal func(any) error) error {\n\taux := struct {\n\t\tKubeConfigPath string `yaml:\"kubeconfig\"`\n\t}{}\n\terr := unmarshal(&aux)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.KubeConfigPath = aux.KubeConfigPath\n\n\treturn nil\n}\n\n// NewDataGatherer constructs a new instance of the generic K8s data-gatherer for the provided\n// GroupVersionResource.\n// It gets the UID of the 'kube-system' namespace to use as the cluster ID, once at startup.\n// The UID is assumed to be stable for the lifetime of the cluster.\n// - https://github.com/kubernetes/kubernetes/issues/77487#issuecomment-489786023\nfunc (c *ConfigDiscovery) NewDataGatherer(ctx context.Context) (datagatherer.DataGatherer, error) {\n\tcl, err := kubeconfig.NewDiscoveryClient(c.KubeConfigPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcs, err := kubeconfig.NewClientSet(c.KubeConfigPath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"while creating new clientset: %s\", err)\n\t}\n\tkubesystemNS, err := cs.CoreV1().Namespaces().Get(ctx, \"kube-system\", metav1.GetOptions{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"while getting the kube-system namespace: %s\", err)\n\t}\n\treturn &DataGathererDiscovery{\n\t\tcl:        cl,\n\t\tclusterID: string(kubesystemNS.UID),\n\t}, nil\n}\n\n// DataGathererDiscovery stores the config for a k8s-discovery datagatherer\ntype DataGathererDiscovery struct {\n\t// The 'discovery' client used for fetching data.\n\tcl *discovery.DiscoveryClient\n\t// The cluster ID, derived from the UID of the 'kube-system' namespace.\n\tclusterID string\n}\n\nfunc (g *DataGathererDiscovery) Run(ctx context.Context) error {\n\t// no async functionality, see Fetch\n\treturn nil\n}\n\nfunc (g *DataGathererDiscovery) WaitForCacheSync(ctx context.Context) error {\n\t// no async functionality, see Fetch\n\treturn nil\n}\n\n// Fetch will fetch discovery data from the apiserver, or return an error\nfunc (g *DataGathererDiscovery) Fetch(ctx context.Context) (any, int, error) {\n\tdata, err := g.cl.ServerVersion()\n\tif err != nil {\n\t\treturn nil, -1, fmt.Errorf(\"failed to get server version: %v\", err)\n\t}\n\tresponse := &api.DiscoveryData{\n\t\tClusterID:     g.clusterID,\n\t\tServerVersion: data,\n\t}\n\treturn response, 1, nil\n}\n"
  },
  {
    "path": "pkg/datagatherer/k8sdynamic/cache.go",
    "content": "package k8sdynamic\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/go-logr/logr\"\n\t\"github.com/pmylund/go-cache\"\n\t\"k8s.io/apimachinery/pkg/types\"\n\n\t\"github.com/jetstack/preflight/api\"\n)\n\n// time interface, this is used to fetch the current time\n// whenever a k8s resource is deleted\ntype timeInterface interface {\n\tnow() time.Time\n}\n\nvar clock timeInterface = &realTime{}\n\ntype realTime struct {\n}\n\nfunc (*realTime) now() time.Time {\n\treturn time.Now()\n}\n\ntype cacheResource interface {\n\tGetUID() types.UID\n\tGetNamespace() string\n}\n\nfunc logCacheUpdateFailure(log logr.Logger, obj any, operation string) {\n\t// We use WithCallStackHelper to ensure the correct caller line numbers in the log messages\n\thelper, log := log.WithCallStackHelper()\n\thelper()\n\terr := fmt.Errorf(\"not a cacheResource type: %T missing metadata/uid field\", obj)\n\tlog.Error(err, \"Cache update failure\", \"operation\", operation)\n}\n\n// onAdd handles the informer creation events, adding the created runtime.Object\n// to the data gatherer's cache. The cache key is the uid of the object\nfunc onAdd(log logr.Logger, obj any, dgCache *cache.Cache) {\n\titem, ok := obj.(cacheResource)\n\tif ok {\n\t\tcacheObject := &api.GatheredResource{\n\t\t\tResource: obj,\n\t\t}\n\t\tdgCache.Set(string(item.GetUID()), cacheObject, cache.DefaultExpiration)\n\t\treturn\n\t}\n\tlogCacheUpdateFailure(log, obj, \"add\")\n}\n\n// onUpdate handles the informer update events, replacing the old object with the new one\n// if it's present in the data gatherer's cache, (if the object isn't present, it gets added).\n// The cache key is the uid of the object\nfunc onUpdate(log logr.Logger, oldObj, newObj any, dgCache *cache.Cache) {\n\titem, ok := oldObj.(cacheResource)\n\tif ok {\n\t\tcacheObject := updateCacheGatheredResource(string(item.GetUID()), newObj, dgCache)\n\t\tdgCache.Set(string(item.GetUID()), cacheObject, cache.DefaultExpiration)\n\t\treturn\n\t}\n\tlogCacheUpdateFailure(log, oldObj, \"update\")\n}\n\n// onDelete handles the informer deletion events, updating the object's properties with the deletion\n// time of the object (but not removing the object from the cache).\n// The cache key is the uid of the object\nfunc onDelete(log logr.Logger, obj any, dgCache *cache.Cache) {\n\titem, ok := obj.(cacheResource)\n\tif ok {\n\t\tcacheObject := updateCacheGatheredResource(string(item.GetUID()), obj, dgCache)\n\t\tcacheObject.DeletedAt = api.Time{Time: clock.now()}\n\t\tdgCache.Set(string(item.GetUID()), cacheObject, cache.DefaultExpiration)\n\t\treturn\n\t}\n\tlogCacheUpdateFailure(log, obj, \"delete\")\n}\n\n// creates a new updated instance of a cache object, with the resource\n// argument. If the object is present in the cache it fetches the object's\n// properties.\nfunc updateCacheGatheredResource(cacheKey string, resource any, dgCache *cache.Cache) *api.GatheredResource {\n\t// updated cache object\n\tcacheObject := &api.GatheredResource{\n\t\tResource: resource,\n\t}\n\t// update the object's properties, if it's already in the cache\n\tif o, ok := dgCache.Get(cacheKey); ok {\n\t\tdeletedAt := o.(*api.GatheredResource).DeletedAt\n\t\tif deletedAt.IsZero() && !deletedAt.IsZero() {\n\t\t\tcacheObject.DeletedAt = deletedAt\n\t\t}\n\t}\n\treturn cacheObject\n}\n"
  },
  {
    "path": "pkg/datagatherer/k8sdynamic/cache_test.go",
    "content": "package k8sdynamic\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/go-logr/logr\"\n\t\"github.com/pmylund/go-cache\"\n\t\"github.com/stretchr/testify/require\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/klog/v2/ktesting\"\n\n\t\"github.com/jetstack/preflight/api\"\n)\n\nfunc makeGatheredResource(obj runtime.Object, deletedAt api.Time) *api.GatheredResource {\n\treturn &api.GatheredResource{\n\t\tResource:  obj,\n\t\tDeletedAt: deletedAt,\n\t}\n}\n\nfunc TestOnAddCache(t *testing.T) {\n\ttcs := map[string]struct {\n\t\tinputObjects []runtime.Object\n\t\teventObjects []runtime.Object\n\t\teventFunc    func(log logr.Logger, old, obj any, dgCache *cache.Cache)\n\t\texpected     []*api.GatheredResource\n\t}{\n\t\t\"add all objects\": {\n\t\t\tinputObjects: []runtime.Object{\n\t\t\t\tgetObject(\"foobar/v1\", \"Foo\", \"testfoo\", \"testns\", false),\n\t\t\t\tgetObject(\"v1\", \"Service\", \"testservice\", \"testns\", false),\n\t\t\t\tgetObject(\"foobar/v1\", \"NotFoo\", \"notfoo\", \"testns\", false),\n\t\t\t},\n\t\t\texpected: []*api.GatheredResource{\n\t\t\t\tmakeGatheredResource(getObject(\"foobar/v1\", \"Foo\", \"testfoo\", \"testns\", false), api.Time{}),\n\t\t\t\tmakeGatheredResource(getObject(\"v1\", \"Service\", \"testservice\", \"testns\", false), api.Time{}),\n\t\t\t\tmakeGatheredResource(getObject(\"foobar/v1\", \"NotFoo\", \"notfoo\", \"testns\", false), api.Time{}),\n\t\t\t},\n\t\t},\n\t\t\"delete all objects. All objects should have the deletedAt flag\": {\n\t\t\tinputObjects: []runtime.Object{\n\t\t\t\tgetObject(\"foobar/v1\", \"Foo\", \"testfoo\", \"testns\", false),\n\t\t\t\tgetObject(\"v1\", \"Service\", \"testservice\", \"testns\", false),\n\t\t\t\tgetObject(\"foobar/v1\", \"NotFoo\", \"notfoo\", \"testns\", false),\n\t\t\t},\n\t\t\t// objects to delete\n\t\t\teventObjects: []runtime.Object{\n\t\t\t\tgetObject(\"foobar/v1\", \"Foo\", \"testfoo\", \"testns\", false),\n\t\t\t\tgetObject(\"v1\", \"Service\", \"testservice\", \"testns\", false),\n\t\t\t\tgetObject(\"foobar/v1\", \"NotFoo\", \"notfoo\", \"testns\", false),\n\t\t\t},\n\t\t\teventFunc: func(log logr.Logger, oldObj, newObj any, dgCache *cache.Cache) {\n\t\t\t\tonDelete(log, oldObj, dgCache)\n\t\t\t},\n\t\t\texpected: []*api.GatheredResource{\n\t\t\t\tmakeGatheredResource(\n\t\t\t\t\tgetObject(\"foobar/v1\", \"Foo\", \"testfoo\", \"testns\", false),\n\t\t\t\t\tapi.Time{Time: clock.now()},\n\t\t\t\t),\n\t\t\t\tmakeGatheredResource(\n\t\t\t\t\tgetObject(\"v1\", \"Service\", \"testservice\", \"testns\", false),\n\t\t\t\t\tapi.Time{Time: clock.now()},\n\t\t\t\t),\n\t\t\t\tmakeGatheredResource(\n\t\t\t\t\tgetObject(\"foobar/v1\", \"NotFoo\", \"notfoo\", \"testns\", false),\n\t\t\t\t\tapi.Time{Time: clock.now()},\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t\t\"update all objects' namespace\": {\n\t\t\tinputObjects: []runtime.Object{\n\t\t\t\tgetObject(\"foobar/v1\", \"Foo\", \"testfoo\", \"testns\", false),\n\t\t\t\tgetObject(\"v1\", \"Service\", \"testservice\", \"testns\", false),\n\t\t\t\tgetObject(\"foobar/v1\", \"NotFoo\", \"notfoo\", \"testns\", false),\n\t\t\t},\n\t\t\t// objects to update\n\t\t\teventObjects: []runtime.Object{\n\t\t\t\tgetObject(\"foobar/v1\", \"Foo\", \"testfoo\", \"testns1\", false),\n\t\t\t\tgetObject(\"v1\", \"Service\", \"testservice\", \"testns1\", false),\n\t\t\t\tgetObject(\"foobar/v1\", \"NotFoo\", \"notfoo\", \"testns1\", false),\n\t\t\t},\n\t\t\teventFunc: onUpdate,\n\t\t\texpected: []*api.GatheredResource{\n\t\t\t\tmakeGatheredResource(\n\t\t\t\t\tgetObject(\"foobar/v1\", \"Foo\", \"testfoo\", \"testns1\", false),\n\t\t\t\t\tapi.Time{},\n\t\t\t\t),\n\t\t\t\tmakeGatheredResource(\n\t\t\t\t\tgetObject(\"v1\", \"Service\", \"testservice\", \"testns1\", false),\n\t\t\t\t\tapi.Time{},\n\t\t\t\t),\n\t\t\t\tmakeGatheredResource(\n\t\t\t\t\tgetObject(\"foobar/v1\", \"NotFoo\", \"notfoo\", \"testns1\", false),\n\t\t\t\t\tapi.Time{},\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, tc := range tcs {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tlog := ktesting.NewLogger(t, ktesting.NewConfig(ktesting.Verbosity(10)))\n\t\t\tdgCache := cache.New(5*time.Minute, 30*time.Second)\n\t\t\t// adding initial objetcs to the cache\n\t\t\tfor _, obj := range tc.inputObjects {\n\t\t\t\tonAdd(log, obj, dgCache)\n\t\t\t}\n\n\t\t\t// Testing event founction on set of objects\n\t\t\tfor _, obj := range tc.eventObjects {\n\t\t\t\tif tc.eventFunc != nil {\n\t\t\t\t\ttc.eventFunc(log, obj, obj, dgCache)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// items back from the cache\n\t\t\tlist := []*api.GatheredResource{}\n\t\t\tfor _, item := range dgCache.Items() {\n\t\t\t\tcacheObject := item.Object.(*api.GatheredResource)\n\t\t\t\tlist = append(list, cacheObject)\n\t\t\t}\n\n\t\t\t// sorting list of results by name\n\t\t\tsortGatheredResources(list)\n\t\t\t// sorting list of expected results by name\n\t\t\tsortGatheredResources(tc.expected)\n\n\t\t\tif len(list) != len(tc.expected) {\n\t\t\t\tt.Errorf(\"unexpected number of return items found. exp:%+v act:%+v\", tc.expected, list)\n\t\t\t}\n\n\t\t\trequire.Equal(t, tc.expected, list)\n\t\t})\n\t}\n}\n\n// TestNoneCache demonstrates that the cache helpers do not crash if passed a\n// non-cachable object, but log an error with a reference to the object type.\nfunc TestNoneCache(t *testing.T) {\n\tlog := ktesting.NewLogger(t, ktesting.NewConfig(ktesting.Verbosity(10)))\n\n\ttype notCachable struct{}\n\tonAdd(log, &notCachable{}, nil)\n\tonUpdate(log, &notCachable{}, nil, nil)\n\tonDelete(log, &notCachable{}, nil)\n}\n"
  },
  {
    "path": "pkg/datagatherer/k8sdynamic/dynamic.go",
    "content": "package k8sdynamic\n\n// The venafi-kubernetes-agent has a requirement that **all** resources should\n// be uploaded, even short-lived secrets, which are created and deleted\n// in-between data uploads. A cache was added to the datagatherer code, to\n// satisfy this requirement. The cache stores all resources for 5 minutes. And\n// the informer event handlers (onAdd, onUpdate, onDelete) update the cache\n// accordingly. The onDelete handler does not remove the object from the cache,\n// but instead marks the object as deleted by setting the DeletedAt field on the\n// GatheredResource. This ensures that deleted resources are still present in\n// the cache for the duration of the cache expiry time.\n//\n// The cache expiry is hard coded to 5 minutes, which is longer than the\n// venafi-kubernetes-agent default upload interval of 1 minute. This means that\n// even if a resource is created and deleted in-between data gatherer runs, it\n// will still be present in the cache when the data gatherer runs.\n//\n// TODO(wallrj): When the agent is deployed as CyberArk disco-agent, the deleted\n// items are currently discarded before upload. If this remains the case, then the cache is unnecessary\n// and should be disabled to save memory.\n// If, in the future, the CyberArk Discovery and Context service does want to\n// see deleted items, the \"deleted resource reporting mechanism\" will need to be\n// redesigned, so that deleted items are retained for the duration of the upload\n// interval.\n//\n// TODO(wallrj): When the agent is deployed as CyberArk disco-agent, the upload\n// interval is 12 hours by default, so the 5 minute cache expiry is not\n// sufficient.\n//\n// TODO(wallrj): The shared informer is configured to refresh all relist all\n// resources every 1 minute, which will cause unnecessary load on the apiserver.\n// We need to look back at the Git history and understand whether this was done\n// for good reason or due to some misunderstanding.\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"slices\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/pmylund/go-cache\"\n\tadmissionregistrationv1 \"k8s.io/api/admissionregistration/v1\"\n\tappsv1 \"k8s.io/api/apps/v1\"\n\tbatchv1 \"k8s.io/api/batch/v1\"\n\tcorev1 \"k8s.io/api/core/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured\"\n\t\"k8s.io/apimachinery/pkg/fields\"\n\t\"k8s.io/apimachinery/pkg/labels\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\t\"k8s.io/client-go/dynamic\"\n\t\"k8s.io/client-go/dynamic/dynamicinformer\"\n\t\"k8s.io/client-go/informers\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/kubernetes/scheme\"\n\tk8scache \"k8s.io/client-go/tools/cache\"\n\t\"k8s.io/klog/v2\"\n\n\t\"github.com/jetstack/preflight/api\"\n\t\"github.com/jetstack/preflight/internal/envelope\"\n\t\"github.com/jetstack/preflight/pkg/datagatherer\"\n\t\"github.com/jetstack/preflight/pkg/kubeconfig\"\n\t\"github.com/jetstack/preflight/pkg/logs\"\n)\n\n// ConfigDynamic contains the configuration for the data-gatherer.\ntype ConfigDynamic struct {\n\t// KubeConfigPath is the path to the kubeconfig file. If empty, will assume it runs in-cluster.\n\tKubeConfigPath string `yaml:\"kubeconfig\"`\n\t// GroupVersionResource identifies the resource type to gather.\n\tGroupVersionResource schema.GroupVersionResource\n\t// ExcludeNamespaces is a list of namespaces to exclude.\n\tExcludeNamespaces []string `yaml:\"exclude-namespaces\"`\n\t// IncludeNamespaces is a list of namespaces to include.\n\tIncludeNamespaces []string `yaml:\"include-namespaces\"`\n\t// FieldSelectors is a list of field selectors to use when listing this resource\n\tFieldSelectors []string `yaml:\"field-selectors\"`\n\t// LabelSelectors is a list of label selectors to use when listing this resource\n\tLabelSelectors []string `yaml:\"label-selectors\"`\n}\n\n// UnmarshalYAML unmarshals the ConfigDynamic resolving GroupVersionResource.\nfunc (c *ConfigDynamic) UnmarshalYAML(unmarshal func(any) error) error {\n\taux := struct {\n\t\tKubeConfigPath string `yaml:\"kubeconfig\"`\n\t\tResourceType   struct {\n\t\t\tGroup    string `yaml:\"group\"`\n\t\t\tVersion  string `yaml:\"version\"`\n\t\t\tResource string `yaml:\"resource\"`\n\t\t} `yaml:\"resource-type\"`\n\t\tExcludeNamespaces []string `yaml:\"exclude-namespaces\"`\n\t\tIncludeNamespaces []string `yaml:\"include-namespaces\"`\n\t\tFieldSelectors    []string `yaml:\"field-selectors\"`\n\t\tLabelSelectors    []string `yaml:\"label-selectors\"`\n\t}{}\n\terr := unmarshal(&aux)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.KubeConfigPath = aux.KubeConfigPath\n\tc.GroupVersionResource.Group = aux.ResourceType.Group\n\tc.GroupVersionResource.Version = aux.ResourceType.Version\n\tc.GroupVersionResource.Resource = aux.ResourceType.Resource\n\tc.ExcludeNamespaces = aux.ExcludeNamespaces\n\tc.IncludeNamespaces = aux.IncludeNamespaces\n\tc.FieldSelectors = aux.FieldSelectors\n\tc.LabelSelectors = aux.LabelSelectors\n\n\treturn nil\n}\n\n// validate validates the configuration.\nfunc (c *ConfigDynamic) validate() error {\n\tvar errs []string\n\tif len(c.ExcludeNamespaces) > 0 && len(c.IncludeNamespaces) > 0 {\n\t\terrs = append(errs, \"cannot set excluded and included namespaces\")\n\t}\n\n\tif c.GroupVersionResource.Resource == \"\" {\n\t\terrs = append(errs, \"invalid configuration: GroupVersionResource.Resource cannot be empty\")\n\t}\n\n\tfor i, fieldSelectorString := range c.FieldSelectors {\n\t\tif fieldSelectorString == \"\" {\n\t\t\terrs = append(errs, fmt.Sprintf(\"invalid field selector %d: must not be empty\", i))\n\t\t}\n\t\t_, err := fields.ParseSelector(fieldSelectorString)\n\t\tif err != nil {\n\t\t\terrs = append(errs, fmt.Sprintf(\"invalid field selector %d: %s\", i, err))\n\t\t}\n\t}\n\n\tfor i, labelSelectorString := range c.LabelSelectors {\n\t\tif labelSelectorString == \"\" {\n\t\t\terrs = append(errs, fmt.Sprintf(\"invalid label selector %d: must not be empty\", i))\n\t\t}\n\t\t_, err := labels.Parse(labelSelectorString)\n\t\tif err != nil {\n\t\t\terrs = append(errs, fmt.Sprintf(\"invalid label selector %d: %s\", i, err))\n\t\t}\n\t}\n\n\tif len(errs) > 0 {\n\t\treturn errors.New(strings.Join(errs, \", \"))\n\t}\n\n\treturn nil\n}\n\n// sharedInformerFunc creates a SharedIndexInformer given a SharedInformerFactory\ntype sharedInformerFunc func(informers.SharedInformerFactory) k8scache.SharedIndexInformer\n\n// kubernetesNativeResources map of the native kubernetes resources, linking each resource to a sharedInformerFunc for that resource.\n// secrets are still treated as unstructured rather than corev1.Secret, for a faster unmarshaling\nvar kubernetesNativeResources = map[schema.GroupVersionResource]sharedInformerFunc{\n\tcorev1.SchemeGroupVersion.WithResource(\"pods\"): func(sharedFactory informers.SharedInformerFactory) k8scache.SharedIndexInformer {\n\t\treturn sharedFactory.Core().V1().Pods().Informer()\n\t},\n\tcorev1.SchemeGroupVersion.WithResource(\"nodes\"): func(sharedFactory informers.SharedInformerFactory) k8scache.SharedIndexInformer {\n\t\treturn sharedFactory.Core().V1().Nodes().Informer()\n\t},\n\tcorev1.SchemeGroupVersion.WithResource(\"services\"): func(sharedFactory informers.SharedInformerFactory) k8scache.SharedIndexInformer {\n\t\treturn sharedFactory.Core().V1().Services().Informer()\n\t},\n\tcorev1.SchemeGroupVersion.WithResource(\"configmaps\"): func(sharedFactory informers.SharedInformerFactory) k8scache.SharedIndexInformer {\n\t\treturn sharedFactory.Core().V1().ConfigMaps().Informer()\n\t},\n\tappsv1.SchemeGroupVersion.WithResource(\"deployments\"): func(sharedFactory informers.SharedInformerFactory) k8scache.SharedIndexInformer {\n\t\treturn sharedFactory.Apps().V1().Deployments().Informer()\n\t},\n\tappsv1.SchemeGroupVersion.WithResource(\"daemonsets\"): func(sharedFactory informers.SharedInformerFactory) k8scache.SharedIndexInformer {\n\t\treturn sharedFactory.Apps().V1().DaemonSets().Informer()\n\t},\n\tappsv1.SchemeGroupVersion.WithResource(\"statefulsets\"): func(sharedFactory informers.SharedInformerFactory) k8scache.SharedIndexInformer {\n\t\treturn sharedFactory.Apps().V1().StatefulSets().Informer()\n\t},\n\tappsv1.SchemeGroupVersion.WithResource(\"replicasets\"): func(sharedFactory informers.SharedInformerFactory) k8scache.SharedIndexInformer {\n\t\treturn sharedFactory.Apps().V1().ReplicaSets().Informer()\n\t},\n\tappsv1.SchemeGroupVersion.WithResource(\"replicasets\"): func(sharedFactory informers.SharedInformerFactory) k8scache.SharedIndexInformer {\n\t\treturn sharedFactory.Apps().V1().ReplicaSets().Informer()\n\t},\n\tadmissionregistrationv1.SchemeGroupVersion.WithResource(\"validatingwebhookconfigurations\"): func(sharedFactory informers.SharedInformerFactory) k8scache.SharedIndexInformer {\n\t\treturn sharedFactory.Admissionregistration().V1().ValidatingWebhookConfigurations().Informer()\n\t},\n\tadmissionregistrationv1.SchemeGroupVersion.WithResource(\"mutatingwebhookconfigurations\"): func(sharedFactory informers.SharedInformerFactory) k8scache.SharedIndexInformer {\n\t\treturn sharedFactory.Admissionregistration().V1().MutatingWebhookConfigurations().Informer()\n\t},\n\tbatchv1.SchemeGroupVersion.WithResource(\"jobs\"): func(sharedFactory informers.SharedInformerFactory) k8scache.SharedIndexInformer {\n\t\treturn sharedFactory.Batch().V1().Jobs().Informer()\n\t},\n}\n\n// NewDataGatherer constructs a new instance of the generic K8s data-gatherer for the provided\nfunc (c *ConfigDynamic) NewDataGatherer(ctx context.Context) (datagatherer.DataGatherer, error) {\n\tif isNativeResource(c.GroupVersionResource) {\n\t\tclientset, err := kubeconfig.NewClientSet(c.KubeConfigPath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn c.newDataGathererWithClient(ctx, nil, clientset)\n\t} else {\n\t\tcl, err := kubeconfig.NewDynamicClient(c.KubeConfigPath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn c.newDataGathererWithClient(ctx, cl, nil)\n\t}\n}\n\nfunc (c *ConfigDynamic) newDataGathererWithClient(ctx context.Context, cl dynamic.Interface, clientset kubernetes.Interface) (datagatherer.DataGatherer, error) {\n\tlog := klog.FromContext(ctx)\n\tif err := c.validate(); err != nil {\n\t\treturn nil, err\n\t}\n\t// init shared informer for selected namespaces\n\tfieldSelector := generateExcludedNamespacesFieldSelector(c.ExcludeNamespaces)\n\n\t// Add any custom field selectors to the excluded namespaces selector\n\t// The selectors have already been validated, so it is safe to use\n\t// ParseSelectorOrDie here.\n\tfor _, fieldSelectorString := range c.FieldSelectors {\n\t\tfieldSelector = fields.AndSelectors(fieldSelector, fields.ParseSelectorOrDie(fieldSelectorString))\n\t}\n\n\t// Add any custom label selectors\n\t// The selectors have already been validated, so Parse is expected to\n\t// succeed; any parse error is treated as a programming error.\n\tlabelSelector := labels.Everything()\n\tfor _, labelSelectorString := range c.LabelSelectors {\n\t\tselector, err := labels.Parse(labelSelectorString)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"PROGRAMMING ERROR: should have been caught in validation: \"+\n\t\t\t\t\"failed to parse validated label selector %q: %v\", labelSelectorString, err))\n\t\t}\n\t\treqs, _ := selector.Requirements()\n\t\tlabelSelector = labelSelector.Add(reqs...)\n\t}\n\n\t// init cache to store gathered resources\n\tdgCache := cache.New(5*time.Minute, 30*time.Second)\n\n\tnewDataGatherer := &DataGathererDynamic{\n\t\tgroupVersionResource: c.GroupVersionResource,\n\t\tfieldSelector:        fieldSelector.String(),\n\t\tlabelSelector:        labelSelector.String(),\n\t\tnamespaces:           c.IncludeNamespaces,\n\t\tcache:                dgCache,\n\t}\n\n\t// In order to reduce memory usage that might come from using Dynamic Informers\n\t// * https://github.com/kyverno/kyverno/issues/1832#issuecomment-968782166\n\t// * https://github.com/kubernetes/client-go/issues/832\n\t// * https://github.com/kubernetes/client-go/issues/871\n\t// we use SharedIndexInformer for known resources, these informers have less of an impact on the\n\t// memory usage. Dynamic datagatheres will use them for some of the native resources instead of\n\t// dynamic informers.\n\n\tif informerFunc, ok := kubernetesNativeResources[c.GroupVersionResource]; ok {\n\t\tfactory := informers.NewSharedInformerFactoryWithOptions(clientset,\n\t\t\t// TODO(wallrj): This causes all resources to be relisted every 1\n\t\t\t// minute which will cause unnecessary load on the apiserver.\n\t\t\t60*time.Second,\n\t\t\tinformers.WithNamespace(metav1.NamespaceAll),\n\t\t\tinformers.WithTweakListOptions(func(options *metav1.ListOptions) {\n\t\t\t\toptions.FieldSelector = fieldSelector.String()\n\t\t\t\toptions.LabelSelector = labelSelector.String()\n\t\t\t}),\n\t\t)\n\t\tnewDataGatherer.informer = informerFunc(factory)\n\t} else {\n\t\tfactory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(\n\t\t\tcl,\n\t\t\t// TODO(wallrj): This causes all resources to be relisted every 1\n\t\t\t// minute which will cause unnecessary load on the apiserver.\n\t\t\t60*time.Second,\n\t\t\tmetav1.NamespaceAll,\n\t\t\tfunc(options *metav1.ListOptions) {\n\t\t\t\toptions.FieldSelector = fieldSelector.String()\n\t\t\t\toptions.LabelSelector = labelSelector.String()\n\t\t\t},\n\t\t)\n\t\tnewDataGatherer.informer = factory.ForResource(c.GroupVersionResource).Informer()\n\t}\n\n\tregistration, err := newDataGatherer.informer.AddEventHandlerWithOptions(k8scache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj any) {\n\t\t\tonAdd(log, obj, dgCache)\n\t\t},\n\t\tUpdateFunc: func(oldObj, newObj any) {\n\t\t\tonUpdate(log, oldObj, newObj, dgCache)\n\t\t},\n\t\tDeleteFunc: func(obj any) {\n\t\t\tonDelete(log, obj, dgCache)\n\t\t},\n\t}, k8scache.HandlerOptions{\n\t\tLogger: &log,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnewDataGatherer.registration = registration\n\n\treturn newDataGatherer, nil\n}\n\n// DataGathererDynamic is a generic gatherer for Kubernetes. It knows how to request\n// a list of generic resources from the Kubernetes apiserver.\n// It does not deserialize the objects into structured data, instead utilising\n// the Kubernetes `Unstructured` type for data handling.\n// This is to allow us to support arbitrary CRDs and resources that Preflight\n// does not have registered as part of its `runtime.Scheme`.\ntype DataGathererDynamic struct {\n\t// groupVersionResource is the name of the API group, version and resource\n\t// that should be fetched by this data gatherer.\n\tgroupVersionResource schema.GroupVersionResource\n\t// namespace, if specified, limits the namespace of the resources returned.\n\t// This field *must* be omitted when the groupVersionResource refers to a\n\t// non-namespaced resource.\n\tnamespaces []string\n\t// fieldSelector is a field selector string used to filter resources\n\t// returned by the Kubernetes API.\n\t// https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/\n\tfieldSelector string\n\t// labelSelector is a label selector string used to filter resources\n\t// returned by the Kubernetes API.\n\tlabelSelector string\n\t// cache holds all resources watched by the data gatherer, default object expiry time 5 minutes\n\t// 30 seconds purge time https://pkg.go.dev/github.com/patrickmn/go-cache\n\tcache *cache.Cache\n\t// informer watches the events around the targeted resource and updates the cache\n\tinformer     k8scache.SharedIndexInformer\n\tregistration k8scache.ResourceEventHandlerRegistration\n\n\tExcludeAnnotKeys []*regexp.Regexp\n\tExcludeLabelKeys []*regexp.Regexp\n\n\t// Encryptor, if non-nil, will be used to envelope encrypt Secret data.\n\t// If nil, Secret data will be redacted.\n\tEncryptor envelope.Encryptor\n}\n\nfunc (g *DataGathererDynamic) GVR() schema.GroupVersionResource {\n\treturn g.groupVersionResource\n}\n\n// Run starts the dynamic data gatherer's informers for resource collection.\n// Returns error if the data gatherer informer wasn't initialized, Run blocks\n// until the stopCh is closed.\nfunc (g *DataGathererDynamic) Run(ctx context.Context) error {\n\tlog := klog.FromContext(ctx)\n\tif g.informer == nil {\n\t\treturn fmt.Errorf(\"informer was not initialized, impossible to start\")\n\t}\n\n\t// attach WatchErrorHandler, it needs to be set before starting an informer\n\terr := g.informer.SetWatchErrorHandler(func(r *k8scache.Reflector, err error) {\n\t\tif strings.Contains(fmt.Sprintf(\"%s\", err), \"the server could not find the requested resource\") {\n\t\t\tlog.V(logs.Debug).Info(\"Server missing resource for datagatherer\", \"groupVersionResource\", g.groupVersionResource)\n\t\t} else {\n\t\t\tlog.Info(\"datagatherer informer has failed and is backing off\", \"groupVersionResource\", g.groupVersionResource, \"reason\", err)\n\t\t}\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to SetWatchErrorHandler on informer: %s\", err)\n\t}\n\n\t// start shared informer\n\tg.informer.RunWithContext(ctx)\n\n\treturn nil\n}\n\nvar ErrCacheSyncTimeout = fmt.Errorf(\"timed out waiting for Kubernetes cache to sync\")\n\n// WaitForCacheSync waits for the data gatherer's informers cache to sync before\n// collecting the resources. Use errors.Is(err, ErrCacheSyncTimeout) to check if\n// the cache sync failed.\nfunc (g *DataGathererDynamic) WaitForCacheSync(ctx context.Context) error {\n\t// Don't use WaitForNamedCacheSync, since we don't want to log extra messages.\n\tif !k8scache.WaitForCacheSync(ctx.Done(), g.registration.HasSynced) {\n\t\treturn ErrCacheSyncTimeout\n\t}\n\n\treturn nil\n}\n\n// Fetch will fetch the requested data from the apiserver, or return an error\n// if fetching the data fails.\nfunc (g *DataGathererDynamic) Fetch(ctx context.Context) (any, int, error) {\n\tif g.groupVersionResource.String() == \"\" {\n\t\treturn nil, -1, fmt.Errorf(\"resource type must be specified\")\n\t}\n\n\tvar items = []*api.GatheredResource{}\n\n\tfetchNamespaces := g.namespaces\n\tif len(fetchNamespaces) == 0 {\n\t\t// then they must have been looking for all namespaces\n\t\tfetchNamespaces = []string{metav1.NamespaceAll}\n\t}\n\n\t// delete expired items from the cache\n\tg.cache.DeleteExpired()\n\tfor _, item := range g.cache.Items() {\n\t\t// filter cache items by namespace\n\t\tcacheObject := item.Object.(*api.GatheredResource)\n\t\tif resource, ok := cacheObject.Resource.(cacheResource); ok {\n\t\t\tnamespace := resource.GetNamespace()\n\t\t\tif isIncludedNamespace(namespace, fetchNamespaces) {\n\t\t\t\titems = append(items, cacheObject)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\treturn nil, -1, fmt.Errorf(\"failed to parse cached resource\")\n\t}\n\n\t// Redact Secret data (which may include encrypting it if enabled)\n\terr := g.redactList(ctx, items)\n\tif err != nil {\n\t\treturn nil, -1, err\n\t}\n\n\treturn &api.DynamicData{\n\t\tItems: items,\n\t}, len(items), nil\n}\n\n// redactList removes sensitive and superfluous data from the supplied resource list.\n// All resources have superfluous managed-data fields removed.\n// All resources have sensitive labels and annotations removed.\n// Secret and Route are processed as special cases. For these\n// resources there is an allow-list of fields that should be retained.\n// For Secret resources, the `data` is redacted, to prevent private keys or sensitive\n// data being collected; only the tls.crt and ca.crt data keys are retained.\n// However, if keepSecretData is true (i.e., encryption is enabled), secret data is NOT redacted\n// so it can be encrypted later in the upload pipeline.\n// For Route resources, only the fields related to CA certificate and policy are retained.\n// TODO(wallrj): A short coming of the current allow-list implementation is that\n// you have to specify absolute fields paths. It is not currently possible to\n// select all metadata with: `{metadata}`. This means that the metadata for\n// Secret and Route has fewer fields than the metadata for all other resources.\nfunc (g *DataGathererDynamic) redactList(ctx context.Context, list []*api.GatheredResource) error {\n\tsecretSelectedFields := slices.Clone(SecretSelectedFields)\n\n\tif g.Encryptor != nil {\n\t\tsecretSelectedFields = append(secretSelectedFields, FieldPath{\"_encryptedData\"})\n\t}\n\n\tfor i := range list {\n\t\tif item, ok := list[i].Resource.(*unstructured.Unstructured); ok {\n\t\t\t// Determine the kind of items in case this is a generic 'mixed' list.\n\t\t\tgvks, _, err := scheme.Scheme.ObjectKinds(item)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tresource := item\n\n\t\t\t// Redact item if it is a Secret or a Route.\n\t\t\tfor _, gvk := range gvks {\n\t\t\t\t// secret object\n\t\t\t\tif gvk.Kind == \"Secret\" && (gvk.Group == \"core\" || gvk.Group == \"\") {\n\t\t\t\t\t// Note: We must redact data field in all cases!\n\t\t\t\t\t// If encryption is enabled, we encrypt the data and preserve it, but we still need to redact later.\n\t\t\t\t\t// If encryption is enabled and _fails_, we MUST still redact the data field to avoid leaking sensitive information.\n\t\t\t\t\tif g.Encryptor != nil {\n\t\t\t\t\t\terr := g.encryptDataField(ctx, resource)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t// WARNING: We CAN NOT return an error here, as that would leak the secret data\n\t\t\t\t\t\t\tlog := klog.FromContext(ctx).WithName(\"encryptDataField\")\n\t\t\t\t\t\t\tlog.Error(err, \"failed to encrypt secret data field; no encrypted secret data will be sent for object\", \"secretName\", resource.GetName())\n\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// Redact to only selected fields\n\t\t\t\t\tif err := Select(secretSelectedFields, resource); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t} else if gvk.Kind == \"Route\" && gvk.Group == \"route.openshift.io\" {\n\t\t\t\t\t// route object\n\t\t\t\t\tif err := Select(RouteSelectedFields, resource); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// remove managedFields from all resources\n\t\t\tRedact(RedactFields, resource)\n\n\t\t\tRemoveUnstructuredKeys(g.ExcludeAnnotKeys, resource, \"metadata\", \"annotations\")\n\t\t\tRemoveUnstructuredKeys(g.ExcludeLabelKeys, resource, \"metadata\", \"labels\")\n\n\t\t\tcontinue\n\t\t}\n\n\t\t// objectMeta interface is used to give resources from sharedIndexInformers, (core.Pod|apps.Deployment), a common interface\n\t\t// with access to the metav1.Object\n\t\ttype objectMeta interface{ GetObjectMeta() metav1.Object }\n\t\t// all objects fetched from sharedIndexInformers is now redacted\n\t\t// removing the managedFields and `kubectl.kubernetes.io/last-applied-configuration` annotation\n\t\tif item, ok := list[i].Resource.(objectMeta); ok {\n\t\t\titem.GetObjectMeta().SetManagedFields(nil)\n\t\t\tdelete(item.GetObjectMeta().GetAnnotations(), \"kubectl.kubernetes.io/last-applied-configuration\")\n\n\t\t\tRemoveTypedKeys(g.ExcludeAnnotKeys, item.GetObjectMeta().GetAnnotations())\n\t\t\tRemoveTypedKeys(g.ExcludeLabelKeys, item.GetObjectMeta().GetLabels())\n\n\t\t\tresource := item.(runtime.Object)\n\t\t\tgvks, _, err := scheme.Scheme.ObjectKinds(resource)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// During the internal marshal/unmarshal the runtime.Object the metav1.TypeMeta seems to be lost\n\t\t\t// this section reassigns the TypeMeta to the resource\n\t\t\tfor _, gvk := range gvks {\n\t\t\t\tif len(gvk.Kind) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif len(gvk.Version) == 0 || gvk.Version == runtime.APIVersionInternal {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tresource.GetObjectKind().SetGroupVersionKind(gvk)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\t}\n\treturn nil\n}\n\nconst encryptedDataFieldName = \"_encryptedData\"\n\nvar encryptedDataField = FieldPath{encryptedDataFieldName}\n\n// encryptDataField encrypts the `data` field of the given secret and stores the encrypted data\n// in a new field with the name of [encryptedDataFieldName]. The original `data` field is left unchanged, on the\n// assumption that it will be redacted after the encryption step.\n// This function does not check that the given resource is actually a Secret; that is the caller's responsibility.\nfunc (g *DataGathererDynamic) encryptDataField(ctx context.Context, secret *unstructured.Unstructured) error {\n\tif g.Encryptor == nil {\n\t\treturn nil\n\t}\n\n\tplaintextDataRaw, found, err := unstructured.NestedFieldNoCopy(secret.Object, \"data\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error retrieving secret data field during redaction for encryption: %w\", err)\n\t}\n\n\tif !found {\n\t\treturn fmt.Errorf(\"no data field found on secret\")\n\t}\n\n\tplaintextDataTyped, ok := plaintextDataRaw.(map[string]any)\n\tif !ok {\n\t\treturn fmt.Errorf(\"secret data field is not of expected map type for encryption\")\n\t}\n\n\t// we want to encrypt the JSON representation of the data field\n\tplaintextData, err := json.Marshal(plaintextDataTyped)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal secret data field for encryption: %w\", err)\n\t}\n\n\tencryptedData, err := g.Encryptor.Encrypt(ctx, plaintextData)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to encrypt secret data during redaction: %w\", err)\n\t}\n\n\terr = unstructured.SetNestedField(secret.Object, encryptedData.ToMap(), encryptedDataField...)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to set %s field on secret resource during redaction: %w\", encryptedDataFieldName, err)\n\t}\n\n\treturn nil\n}\n\n// Meant for typed clientset objects.\nfunc RemoveTypedKeys(excludeAnnotKeys []*regexp.Regexp, m map[string]string) {\n\tfor key := range m {\n\t\tfor _, excludeAnnotKey := range excludeAnnotKeys {\n\t\t\tif excludeAnnotKey.MatchString(key) {\n\t\t\t\tdelete(m, key)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// Meant for unstructured clientset objects. Removes the keys from the field\n// given as input. For example, let's say we have the following object:\n//\n//\t{\n//\t  \"metadata\": {\n//\t    \"annotations\": {\n//\t      \"key1\": \"value1\",\n//\t      \"key2\": \"value2\"\n//\t    }\n//\t  }\n//\t}\n//\n// Then, the following call:\n//\n//\tRemoveUnstructuredKeys(\"^key1$\", obj, \"metadata\", \"annotations\")\n//\n// Will result in:\n//\n//\t{\n//\t  \"metadata\": {\n//\t    \"annotations\": {\"key2\": \"value2\"}\n//\t  }\n//\t}\n//\n// If the given path doesn't exist or leads to a non-map object, nothing\n// happens. The leaf object must either be a map[string]interface{} (that's\n// what's returned by the unstructured clientset) or a map[string]string (that's\n// what's returned by the typed clientset).\nfunc RemoveUnstructuredKeys(excludeKeys []*regexp.Regexp, obj *unstructured.Unstructured, path ...string) {\n\tannotsRaw, ok, err := unstructured.NestedFieldNoCopy(obj.Object, path...)\n\tif err != nil {\n\t\treturn\n\t}\n\tif !ok {\n\t\treturn\n\t}\n\n\t// The field may be nil since yaml.Unmarshal's omitempty might not be set\n\t// on this struct field.\n\tif annotsRaw == nil {\n\t\treturn\n\t}\n\n\t// The only possible type in an unstructured.Unstructured object is\n\t// map[string]interface{}. That's because the yaml.Unmarshal func is used\n\t// with an empty map[string]interface{} object, which means all nested\n\t// objects will be unmarshalled to a map[string]interface{}.\n\tannots, ok := annotsRaw.(map[string]any)\n\tif !ok {\n\t\treturn\n\t}\n\n\tfor key := range annots {\n\t\tfor _, excludeAnnotKey := range excludeKeys {\n\t\t\tif excludeAnnotKey.MatchString(key) {\n\t\t\t\tdelete(annots, key)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// generateExcludedNamespacesFieldSelector creates a field selector string from\n// a list of namespaces to exclude.\nfunc generateExcludedNamespacesFieldSelector(excludeNamespaces []string) fields.Selector {\n\tvar selectors []fields.Selector\n\tfor _, excludeNamespace := range excludeNamespaces {\n\t\tif excludeNamespace == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tselectors = append(selectors, fields.OneTermNotEqualSelector(\"metadata.namespace\", excludeNamespace))\n\t}\n\treturn fields.AndSelectors(selectors...)\n}\n\nfunc isIncludedNamespace(namespace string, namespaces []string) bool {\n\tif namespaces[0] == metav1.NamespaceAll {\n\t\treturn true\n\t}\n\treturn slices.Contains(namespaces, namespace)\n}\n\nfunc isNativeResource(gvr schema.GroupVersionResource) bool {\n\t_, ok := kubernetesNativeResources[gvr]\n\treturn ok\n}\n"
  },
  {
    "path": "pkg/datagatherer/k8sdynamic/dynamic_test.go",
    "content": "package k8sdynamic\n\nimport (\n\t\"context\"\n\t\"crypto/rand\"\n\tstdrsa \"crypto/rsa\"\n\t\"encoding/base64\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"slices\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/lestrrat-go/jwx/v3/jwa\"\n\t\"github.com/lestrrat-go/jwx/v3/jwe\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gopkg.in/yaml.v2\"\n\tcorev1 \"k8s.io/api/core/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\t\"k8s.io/client-go/dynamic/dynamicinformer\"\n\t\"k8s.io/client-go/dynamic/fake\"\n\t\"k8s.io/client-go/informers\"\n\tfakeclientset \"k8s.io/client-go/kubernetes/fake\"\n\tk8scache \"k8s.io/client-go/tools/cache\"\n\n\t\"github.com/jetstack/preflight/api\"\n\t\"github.com/jetstack/preflight/internal/envelope\"\n\t\"github.com/jetstack/preflight/internal/envelope/keyfetch\"\n\t\"github.com/jetstack/preflight/internal/envelope/rsa\"\n)\n\nfunc getObject(version, kind, name, namespace string, withManagedFields bool) *unstructured.Unstructured {\n\tmetadata := map[string]any{\n\t\t\"name\":      name,\n\t\t\"namespace\": namespace,\n\t\t\"uid\":       fmt.Sprintf(\"%s1\", name),\n\t}\n\n\tif withManagedFields {\n\t\t// []metav1.FieldsV1{} can't be deep copied by fake client so using\n\t\t// string as example value\n\t\tmetadata[\"managedFields\"] = \"set\"\n\t}\n\n\tobject := map[string]any{\n\t\t\"apiVersion\": version,\n\t\t\"kind\":       kind,\n\t\t\"metadata\":   metadata,\n\t}\n\n\treturn &unstructured.Unstructured{\n\t\tObject: object,\n\t}\n}\n\nfunc getObjectAnnot(version, kind, name, namespace string, annotations, labels map[string]any) *unstructured.Unstructured {\n\tobj := getObject(version, kind, name, namespace, false)\n\n\tmetadata, _ := obj.Object[\"metadata\"].(map[string]any)\n\tif annotations == nil {\n\t\tannotations = make(map[string]any)\n\t}\n\tmetadata[\"annotations\"] = annotations\n\tmetadata[\"labels\"] = labels\n\n\treturn obj\n}\n\nfunc getSecret(name, namespace string, data map[string]any, isTLS bool, withLastApplied bool) *unstructured.Unstructured {\n\tobject := getObject(\"v1\", \"Secret\", name, namespace, false)\n\n\tif data != nil {\n\t\tobject.Object[\"data\"] = data\n\t}\n\n\tobject.Object[\"type\"] = \"Opaque\"\n\tif isTLS {\n\t\tobject.Object[\"type\"] = \"kubernetes.io/tls\"\n\t}\n\n\tmetadata, _ := object.Object[\"metadata\"].(map[string]any)\n\tannotations := make(map[string]any)\n\n\t// if we're creating a 'raw' secret as scraped that was applied by kubectl\n\tif withLastApplied {\n\t\tjsonData, _ := json.Marshal(data)\n\t\tannotations[\"kubectl.kubernetes.io/last-applied-configuration\"] = string(jsonData)\n\t}\n\n\tmetadata[\"annotations\"] = annotations\n\n\treturn object\n}\n\nfunc sortResourcesByName(list []*unstructured.Unstructured) {\n\tslices.SortStableFunc(list, func(a, b *unstructured.Unstructured) int {\n\t\treturn strings.Compare(a.GetName(), b.GetName())\n\t})\n}\n\nfunc sortGatheredResources(list []*api.GatheredResource) {\n\ttype namer interface {\n\t\tGetName() string\n\t}\n\n\tslices.SortStableFunc(list, func(a, b *api.GatheredResource) int {\n\t\taNamer, ok := a.Resource.(namer)\n\t\tif !ok {\n\t\t\tpanic(\"got unexpected resource type\")\n\t\t}\n\n\t\tbNamer, ok := b.Resource.(namer)\n\t\tif !ok {\n\t\t\tpanic(\"got unexpected resource type\")\n\t\t}\n\n\t\treturn strings.Compare(aNamer.GetName(), bNamer.GetName())\n\t})\n\n}\n\nfunc TestNewDataGathererWithClientAndDynamicInformer(t *testing.T) {\n\tctx := t.Context()\n\n\tconfig := ConfigDynamic{\n\t\tExcludeNamespaces:    []string{\"kube-system\"},\n\t\tGroupVersionResource: schema.GroupVersionResource{Group: \"foobar\", Version: \"v1\", Resource: \"foos\"},\n\t\tFieldSelectors: []string{\n\t\t\t\"type!=kubernetes.io/service-account-token\",\n\t\t\t\"type!=kubernetes.io/dockercfg\",\n\t\t},\n\t\tLabelSelectors: []string{\n\t\t\t\"conjur.org/name=conjur-connect-configmap\",\n\t\t\t\"app=my-app\",\n\t\t},\n\t}\n\tcl := fake.NewSimpleDynamicClient(runtime.NewScheme())\n\tdg, err := config.newDataGathererWithClient(ctx, cl, nil)\n\n\tif err != nil {\n\t\tt.Errorf(\"expected no error but got: %v\", err)\n\t}\n\n\texpected := &DataGathererDynamic{\n\t\tgroupVersionResource: config.GroupVersionResource,\n\t\t// it's important that the namespaces are set as the IncludeNamespaces\n\t\t// during initialization\n\t\tnamespaces:    config.IncludeNamespaces,\n\t\tfieldSelector: \"metadata.namespace!=kube-system,type!=kubernetes.io/service-account-token,type!=kubernetes.io/dockercfg\",\n\t\tlabelSelector: \"app=my-app,conjur.org/name=conjur-connect-configmap\",\n\t}\n\n\tgatherer := dg.(*DataGathererDynamic)\n\t// test gatherer's fields\n\tif !reflect.DeepEqual(gatherer.groupVersionResource, expected.groupVersionResource) {\n\t\tt.Errorf(\"expected %v, got %v\", expected, dg)\n\t}\n\tif !reflect.DeepEqual(gatherer.namespaces, expected.namespaces) {\n\t\tt.Errorf(\"expected %v, got %v\", expected, dg)\n\t}\n\tif gatherer.cache == nil {\n\t\tt.Errorf(\"unexpected cache value: %v\", nil)\n\t}\n\tif gatherer.informer == nil {\n\t\tt.Errorf(\"unexpected resource informer value: %v\", nil)\n\t}\n\tif gatherer.registration == nil {\n\t\tt.Errorf(\"unexpected resource event handler registration value: %v\", nil)\n\t}\n\tif !reflect.DeepEqual(gatherer.fieldSelector, expected.fieldSelector) {\n\t\tt.Errorf(\"expected %v, got %v\", expected.fieldSelector, gatherer.fieldSelector)\n\t}\n\tif !reflect.DeepEqual(gatherer.labelSelector, expected.labelSelector) {\n\t\tt.Errorf(\"expected %v, got %v\", expected.labelSelector, gatherer.labelSelector)\n\t}\n}\n\nfunc TestNewDataGathererWithClientAndSharedIndexInformer(t *testing.T) {\n\tctx := t.Context()\n\tconfig := ConfigDynamic{\n\t\tIncludeNamespaces:    []string{\"a\"},\n\t\tGroupVersionResource: schema.GroupVersionResource{Group: \"\", Version: \"v1\", Resource: \"pods\"},\n\t\tLabelSelectors: []string{\n\t\t\t\"app=my-app\",\n\t\t\t\"version=v1\",\n\t\t},\n\t}\n\tclientset := fakeclientset.NewSimpleClientset()\n\tdg, err := config.newDataGathererWithClient(ctx, nil, clientset)\n\tif err != nil {\n\t\tt.Errorf(\"expected no error but got: %v\", err)\n\t}\n\n\texpected := &DataGathererDynamic{\n\t\tgroupVersionResource: config.GroupVersionResource,\n\t\t// it's important that the namespaces are set as the IncludeNamespaces\n\t\t// during initialization\n\t\tnamespaces:    config.IncludeNamespaces,\n\t\tlabelSelector: \"app=my-app,version=v1\",\n\t}\n\n\tgatherer := dg.(*DataGathererDynamic)\n\t// test gatherer's fields\n\tif !reflect.DeepEqual(gatherer.groupVersionResource, expected.groupVersionResource) {\n\t\tt.Errorf(\"expected %v, got %v\", expected, dg)\n\t}\n\tif !reflect.DeepEqual(gatherer.namespaces, expected.namespaces) {\n\t\tt.Errorf(\"expected %v, got %v\", expected, dg)\n\t}\n\tif gatherer.cache == nil {\n\t\tt.Errorf(\"unexpected cache value: %v\", nil)\n\t}\n\tif gatherer.informer == nil {\n\t\tt.Errorf(\"unexpected resource informer value: %v\", nil)\n\t}\n\tif gatherer.registration == nil {\n\t\tt.Errorf(\"unexpected event handler registration value: %v\", nil)\n\t}\n\tif !reflect.DeepEqual(gatherer.labelSelector, expected.labelSelector) {\n\t\tt.Errorf(\"expected %v, got %v\", expected.labelSelector, gatherer.labelSelector)\n\t}\n}\n\nfunc TestUnmarshalDynamicConfig(t *testing.T) {\n\ttextCfg := `\nkubeconfig: \"/home/someone/.kube/config\"\nresource-type:\n  group: \"g\"\n  version: \"v\"\n  resource: \"r\"\nexclude-namespaces:\n- kube-system\n- my-namespace\n# this config is invalid, but the validation is tested elsewhere\n# include-namespaces is here just to ensure that they are loaded\n# from the config file\ninclude-namespaces:\n- default\nfield-selectors:\n- type!=kubernetes.io/service-account-token\nlabel-selectors:\n- conjur.org/name=conjur-connect-configmap\n- app=my-app\n`\n\n\texpectedGVR := schema.GroupVersionResource{\n\t\tGroup:    \"g\",\n\t\tVersion:  \"v\",\n\t\tResource: \"r\",\n\t}\n\n\texpectedExcludeNamespaces := []string{\n\t\t\"kube-system\",\n\t\t\"my-namespace\",\n\t}\n\n\texpectedIncludeNamespaces := []string{\"default\"}\n\n\texpectedFieldSelectors := []string{\n\t\t\"type!=kubernetes.io/service-account-token\",\n\t}\n\n\texpectedLabelSelectors := []string{\n\t\t\"conjur.org/name=conjur-connect-configmap\",\n\t\t\"app=my-app\",\n\t}\n\n\tcfg := ConfigDynamic{}\n\terr := yaml.Unmarshal([]byte(textCfg), &cfg)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %+v\", err)\n\t}\n\n\tif got, want := cfg.KubeConfigPath, \"/home/someone/.kube/config\"; got != want {\n\t\tt.Errorf(\"KubeConfigPath does not match: got=%q; want=%q\", got, want)\n\t}\n\n\tif got, want := cfg.GroupVersionResource, expectedGVR; !reflect.DeepEqual(got, want) {\n\t\tt.Errorf(\"GroupVersionResource does not match: got=%+v want=%+v\", got, want)\n\t}\n\n\tif got, want := cfg.ExcludeNamespaces, expectedExcludeNamespaces; !reflect.DeepEqual(got, want) {\n\t\tt.Errorf(\"ExcludeNamespaces does not match: got=%+v want=%+v\", got, want)\n\t}\n\tif got, want := cfg.IncludeNamespaces, expectedIncludeNamespaces; !reflect.DeepEqual(got, want) {\n\t\tt.Errorf(\"IncludeNamespaces does not match: got=%+v want=%+v\", got, want)\n\t}\n\tif got, want := cfg.FieldSelectors, expectedFieldSelectors; !reflect.DeepEqual(got, want) {\n\t\tt.Errorf(\"FieldSelectors does not match: got=%+v want=%+v\", got, want)\n\t}\n\tif got, want := cfg.LabelSelectors, expectedLabelSelectors; !reflect.DeepEqual(got, want) {\n\t\tt.Errorf(\"LabelSelectors does not match: got=%+v want=%+v\", got, want)\n\t}\n}\n\nfunc TestConfigDynamicValidate(t *testing.T) {\n\ttests := []struct {\n\t\tConfig        ConfigDynamic\n\t\tExpectedError string\n\t}{\n\t\t{\n\t\t\tConfig: ConfigDynamic{\n\t\t\t\tGroupVersionResource: schema.GroupVersionResource{\n\t\t\t\t\tGroup:    \"\",\n\t\t\t\t\tVersion:  \"\",\n\t\t\t\t\tResource: \"\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpectedError: \"invalid configuration: GroupVersionResource.Resource cannot be empty\",\n\t\t},\n\t\t{\n\t\t\tConfig: ConfigDynamic{\n\t\t\t\tIncludeNamespaces: []string{\"a\"},\n\t\t\t\tExcludeNamespaces: []string{\"b\"},\n\t\t\t},\n\t\t\tExpectedError: \"cannot set excluded and included namespaces\",\n\t\t},\n\t\t{\n\t\t\tConfig: ConfigDynamic{\n\t\t\t\tGroupVersionResource: schema.GroupVersionResource{\n\t\t\t\t\tGroup:    \"\",\n\t\t\t\t\tVersion:  \"v1\",\n\t\t\t\t\tResource: \"secrets\",\n\t\t\t\t},\n\t\t\t\tFieldSelectors: []string{\"\"},\n\t\t\t},\n\t\t\tExpectedError: \"invalid field selector 0: must not be empty\",\n\t\t},\n\t\t{\n\t\t\tConfig: ConfigDynamic{\n\t\t\t\tGroupVersionResource: schema.GroupVersionResource{\n\t\t\t\t\tGroup:    \"\",\n\t\t\t\t\tVersion:  \"v1\",\n\t\t\t\t\tResource: \"secrets\",\n\t\t\t\t},\n\t\t\t\tFieldSelectors: []string{\"foo\"},\n\t\t\t},\n\t\t\tExpectedError: \"invalid field selector 0: invalid selector: 'foo'; can't understand 'foo'\",\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\terr := test.Config.validate()\n\t\tif err == nil && test.ExpectedError != \"\" {\n\t\t\tt.Errorf(\"expected error: %q, got: nil\", test.ExpectedError)\n\t\t}\n\t\tif err != nil && !strings.Contains(err.Error(), test.ExpectedError) {\n\t\t\tt.Errorf(\"expected %s, got %s\", test.ExpectedError, err.Error())\n\t\t}\n\t}\n}\n\nfunc TestGenerateExcludedNamespacesFieldSelector(t *testing.T) {\n\ttests := []struct {\n\t\tExcludeNamespaces     []string\n\t\tExpectedFieldSelector string\n\t}{\n\t\t{\n\t\t\tExcludeNamespaces: []string{\n\t\t\t\t\"\",\n\t\t\t},\n\t\t\tExpectedFieldSelector: \"\",\n\t\t},\n\t\t{\n\t\t\tExcludeNamespaces: []string{\n\t\t\t\t\"kube-system\",\n\t\t\t},\n\t\t\tExpectedFieldSelector: \"metadata.namespace!=kube-system\",\n\t\t},\n\t\t{\n\t\t\tExcludeNamespaces: []string{\n\t\t\t\t\"kube-system\",\n\t\t\t\t\"my-namespace\",\n\t\t\t},\n\t\t\tExpectedFieldSelector: \"metadata.namespace!=kube-system,metadata.namespace!=my-namespace\",\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tfieldSelector := generateExcludedNamespacesFieldSelector(test.ExcludeNamespaces).String()\n\t\tif fieldSelector != test.ExpectedFieldSelector {\n\t\t\tt.Errorf(\"ExpectedFieldSelector does not match: got=%+v want=%+v\", fieldSelector, test.ExpectedFieldSelector)\n\t\t}\n\t}\n}\n\n// fake time for testing\ntype fakeTime struct {\n}\n\nfunc (f *fakeTime) now() time.Time {\n\t//2021-03-16T18:22:15+00:00\n\treturn time.Unix(1615918935, 0)\n}\n\nfunc init() {\n\tclock = &fakeTime{}\n}\n\ntype failEncryptor struct{}\n\nfunc (fe *failEncryptor) Encrypt(_ context.Context, plaintext []byte) (*envelope.EncryptedData, error) {\n\treturn nil, fmt.Errorf(\"encryption failed\")\n}\n\nfunc TestDynamicGatherer_Fetch(t *testing.T) {\n\tprivKey, err := stdrsa.GenerateKey(rand.Reader, 2048)\n\trequire.NoError(t, err)\n\n\tkeyID := \"test-key-id\"\n\n\tfetcher := keyfetch.NewFakeClientWithKey(keyID, privKey.Public().(*stdrsa.PublicKey))\n\tencryptor, err := rsa.NewEncryptor(fetcher)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create encryptor: %v\", err)\n\t}\n\n\t// start a k8s client\n\t// init the datagatherer's informer with the client\n\t// add/delete resources watched by the data gatherer\n\t// check the expected result\n\ttests := map[string]struct {\n\t\tconfig            ConfigDynamic\n\t\texcludeAnnotsKeys []string\n\t\texcludeLabelKeys  []string\n\t\taddObjects        []*unstructured.Unstructured\n\t\tdeleteObjects     map[string]string\n\t\tupdateObjects     map[string]runtime.Object\n\t\texpected          []*api.GatheredResource\n\n\t\tencryptor               envelope.Encryptor\n\t\texpectEncryptionFailure bool\n\n\t\terr bool\n\t}{\n\t\t\"fetches the default namespace\": {\n\t\t\taddObjects: []*unstructured.Unstructured{\n\t\t\t\tgetObject(\"v1\", \"Namespace\", \"default\", \"\", false),\n\t\t\t},\n\t\t\tconfig: ConfigDynamic{\n\t\t\t\tIncludeNamespaces:    []string{\"\"},\n\t\t\t\tGroupVersionResource: schema.GroupVersionResource{Group: \"\", Version: \"v1\", Resource: \"namespaces\"},\n\t\t\t},\n\t\t\texpected: []*api.GatheredResource{\n\t\t\t\t{\n\t\t\t\t\tResource: &unstructured.Unstructured{\n\t\t\t\t\t\tObject: map[string]any{\n\t\t\t\t\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\t\t\t\t\"kind\":       \"Namespace\",\n\t\t\t\t\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\t\t\t\t\"name\": \"default\",\n\t\t\t\t\t\t\t\t\"uid\":  \"default1\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"only a Foo should be returned if GVR selects foos\": {\n\t\t\taddObjects: []*unstructured.Unstructured{\n\t\t\t\tgetObject(\"foobar/v1\", \"Foo\", \"testfoo\", \"testns\", false),\n\t\t\t\tgetObject(\"v1\", \"Service\", \"testservice\", \"testns\", false),\n\t\t\t\tgetObject(\"foobar/v1\", \"NotFoo\", \"notfoo\", \"testns\", false),\n\t\t\t},\n\t\t\tconfig: ConfigDynamic{\n\t\t\t\tIncludeNamespaces:    []string{\"testns\"},\n\t\t\t\tGroupVersionResource: schema.GroupVersionResource{Group: \"foobar\", Version: \"v1\", Resource: \"foos\"},\n\t\t\t},\n\t\t\texpected: []*api.GatheredResource{\n\t\t\t\t{\n\t\t\t\t\tResource: getObject(\"foobar/v1\", \"Foo\", \"testfoo\", \"testns\", false),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"delete a Foo resource from the testns, the cache should have a Foo with deletedAt set to now()\": {\n\t\t\taddObjects: []*unstructured.Unstructured{\n\t\t\t\tgetObject(\"foobar/v1\", \"Foo\", \"testfoo\", \"testns\", false),\n\t\t\t\tgetObject(\"v1\", \"Service\", \"testservice\", \"testns\", false),\n\t\t\t\tgetObject(\"foobar/v1\", \"NotFoo\", \"notfoo\", \"testns\", false),\n\t\t\t},\n\t\t\tdeleteObjects: map[string]string{\n\t\t\t\t\"testns\": \"testfoo\",\n\t\t\t},\n\t\t\tconfig: ConfigDynamic{\n\t\t\t\tIncludeNamespaces:    []string{\"testns\"},\n\t\t\t\tGroupVersionResource: schema.GroupVersionResource{Group: \"foobar\", Version: \"v1\", Resource: \"foos\"},\n\t\t\t},\n\t\t\texpected: []*api.GatheredResource{\n\t\t\t\t{\n\t\t\t\t\tResource:  getObject(\"foobar/v1\", \"Foo\", \"testfoo\", \"testns\", false),\n\t\t\t\t\tDeletedAt: api.Time{Time: clock.now()},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"only Foos in the specified namespace should be returned\": {\n\t\t\tconfig: ConfigDynamic{\n\t\t\t\tIncludeNamespaces:    []string{\"testns\"},\n\t\t\t\tGroupVersionResource: schema.GroupVersionResource{Group: \"foobar\", Version: \"v1\", Resource: \"foos\"},\n\t\t\t},\n\t\t\taddObjects: []*unstructured.Unstructured{\n\t\t\t\tgetObject(\"foobar/v1\", \"Foo\", \"testfoo\", \"testns\", false),\n\t\t\t\tgetObject(\"foobar/v1\", \"Foo\", \"testfoo\", \"nottestns\", false),\n\t\t\t},\n\t\t\texpected: []*api.GatheredResource{\n\t\t\t\t{\n\t\t\t\t\tResource: getObject(\"foobar/v1\", \"Foo\", \"testfoo\", \"testns\", false),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"Foos in different namespaces should be returned if no namespace field is set\": {\n\t\t\tconfig: ConfigDynamic{\n\t\t\t\tIncludeNamespaces:    []string{\"\"},\n\t\t\t\tGroupVersionResource: schema.GroupVersionResource{Group: \"foobar\", Version: \"v1\", Resource: \"foos\"},\n\t\t\t},\n\t\t\taddObjects: []*unstructured.Unstructured{\n\t\t\t\tgetObject(\"foobar/v1\", \"Foo\", \"testfoo1\", \"testns1\", false),\n\t\t\t\tgetObject(\"foobar/v1\", \"Foo\", \"testfoo2\", \"testns2\", false),\n\t\t\t},\n\t\t\texpected: []*api.GatheredResource{\n\t\t\t\t{\n\t\t\t\t\tResource: getObject(\"foobar/v1\", \"Foo\", \"testfoo1\", \"testns1\", false),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tResource: getObject(\"foobar/v1\", \"Foo\", \"testfoo2\", \"testns2\", false),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"DeleteFoos in different namespaces should be returned if no namespace field is set\": {\n\t\t\tconfig: ConfigDynamic{\n\t\t\t\tIncludeNamespaces:    []string{\"\"},\n\t\t\t\tGroupVersionResource: schema.GroupVersionResource{Group: \"foobar\", Version: \"v1\", Resource: \"foos\"},\n\t\t\t},\n\t\t\taddObjects: []*unstructured.Unstructured{\n\t\t\t\tgetObject(\"foobar/v1\", \"Foo\", \"testfoo1\", \"testns1\", false),\n\t\t\t\tgetObject(\"foobar/v1\", \"Foo\", \"testfoo2\", \"testns2\", false),\n\t\t\t},\n\t\t\texpected: []*api.GatheredResource{\n\t\t\t\t{\n\t\t\t\t\tResource: getObject(\"foobar/v1\", \"Foo\", \"testfoo1\", \"testns1\", false),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tResource: getObject(\"foobar/v1\", \"Foo\", \"testfoo2\", \"testns2\", false),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"Delete all Foo resources, all the fetched resources should have a deletedAt field set to now()\": {\n\t\t\tconfig: ConfigDynamic{\n\t\t\t\tIncludeNamespaces:    []string{\"\"},\n\t\t\t\tGroupVersionResource: schema.GroupVersionResource{Group: \"foobar\", Version: \"v1\", Resource: \"foos\"},\n\t\t\t},\n\t\t\tdeleteObjects: map[string]string{\n\t\t\t\t\"testns1\": \"testfoo1\",\n\t\t\t\t\"testns2\": \"testfoo2\",\n\t\t\t},\n\t\t\taddObjects: []*unstructured.Unstructured{\n\t\t\t\tgetObject(\"foobar/v1\", \"Foo\", \"testfoo1\", \"testns1\", false),\n\t\t\t\tgetObject(\"foobar/v1\", \"Foo\", \"testfoo2\", \"testns2\", false),\n\t\t\t},\n\t\t\texpected: []*api.GatheredResource{\n\t\t\t\t{\n\t\t\t\t\tResource:  getObject(\"foobar/v1\", \"Foo\", \"testfoo1\", \"testns1\", false),\n\t\t\t\t\tDeletedAt: api.Time{Time: clock.now()},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tResource:  getObject(\"foobar/v1\", \"Foo\", \"testfoo2\", \"testns2\", false),\n\t\t\t\t\tDeletedAt: api.Time{Time: clock.now()},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"Update all Foo resources, all the fetched resources should have been updated\": {\n\t\t\tconfig: ConfigDynamic{\n\t\t\t\tIncludeNamespaces:    []string{\"\"},\n\t\t\t\tGroupVersionResource: schema.GroupVersionResource{Group: \"foobar\", Version: \"v1\", Resource: \"foos\"},\n\t\t\t},\n\t\t\tupdateObjects: map[string]runtime.Object{\n\t\t\t\t\"testns1\": getObject(\"foobar/v1\", \"Foo\", \"testfoo1\", \"testns1\", false),\n\t\t\t\t\"testns2\": getObject(\"foobar/v1\", \"Foo\", \"testfoo2\", \"testns2\", false),\n\t\t\t},\n\t\t\taddObjects: []*unstructured.Unstructured{\n\t\t\t\tgetObject(\"foobar/v1\", \"Foo\", \"testfoo1\", \"testns1\", false),\n\t\t\t\tgetObject(\"foobar/v1\", \"Foo\", \"testfoo2\", \"testns2\", false),\n\t\t\t},\n\t\t\texpected: []*api.GatheredResource{\n\t\t\t\t{\n\t\t\t\t\tResource: getObject(\"foobar/v1\", \"Foo\", \"testfoo1\", \"testns1\", false),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tResource: getObject(\"foobar/v1\", \"Foo\", \"testfoo2\", \"testns2\", false),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"Secret resources should have data removed\": {\n\t\t\tconfig: ConfigDynamic{\n\t\t\t\tIncludeNamespaces:    []string{\"\"},\n\t\t\t\tGroupVersionResource: schema.GroupVersionResource{Group: \"\", Version: \"v1\", Resource: \"secrets\"},\n\t\t\t},\n\t\t\taddObjects: []*unstructured.Unstructured{\n\t\t\t\tgetSecret(\"testsecret\", \"testns1\", map[string]any{\n\t\t\t\t\t\"secretKey\": \"secretValue\",\n\t\t\t\t}, false, true),\n\t\t\t\tgetSecret(\"anothertestsecret\", \"testns2\", map[string]any{\n\t\t\t\t\t\"secretNumber\": \"12345\",\n\t\t\t\t}, false, true),\n\t\t\t},\n\t\t\texpected: []*api.GatheredResource{\n\t\t\t\t{\n\t\t\t\t\tResource: getSecret(\"testsecret\", \"testns1\", nil, false, false),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tResource: getSecret(\"anothertestsecret\", \"testns2\", nil, false, false),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"Secret of type kubernetes.io/tls should have crts and not keys\": {\n\t\t\tconfig: ConfigDynamic{\n\t\t\t\tIncludeNamespaces:    []string{\"\"},\n\t\t\t\tGroupVersionResource: schema.GroupVersionResource{Group: \"\", Version: \"v1\", Resource: \"secrets\"},\n\t\t\t},\n\t\t\taddObjects: []*unstructured.Unstructured{\n\t\t\t\tgetSecret(\"testsecret\", \"testns1\", map[string]any{\n\t\t\t\t\t\"tls.key\": \"secretValue\",\n\t\t\t\t\t\"tls.crt\": \"value\",\n\t\t\t\t\t\"ca.crt\":  \"value\",\n\t\t\t\t}, true, true),\n\t\t\t\tgetSecret(\"anothertestsecret\", \"testns2\", map[string]any{\n\t\t\t\t\t\"example.key\": \"secretValue\",\n\t\t\t\t\t\"example.crt\": \"value\",\n\t\t\t\t}, true, true),\n\t\t\t},\n\t\t\texpected: []*api.GatheredResource{\n\t\t\t\t{\n\t\t\t\t\t// only tls.crt and ca.cert remain\n\t\t\t\t\tResource: getSecret(\"testsecret\", \"testns1\", map[string]any{\n\t\t\t\t\t\t\"tls.crt\": \"value\",\n\t\t\t\t\t\t\"ca.crt\":  \"value\",\n\t\t\t\t\t}, true, false),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t// all other keys removed\n\t\t\t\t\tResource: getSecret(\"anothertestsecret\", \"testns2\", nil, true, false),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"Secret resources should have encrypted data when encryption is enabled\": {\n\t\t\tconfig: ConfigDynamic{\n\t\t\t\tIncludeNamespaces:    []string{\"\"},\n\t\t\t\tGroupVersionResource: schema.GroupVersionResource{Group: \"\", Version: \"v1\", Resource: \"secrets\"},\n\t\t\t},\n\t\t\taddObjects: []*unstructured.Unstructured{\n\t\t\t\tgetSecret(\"testsecret\", \"testns1\", map[string]any{\n\t\t\t\t\t\"secretKey\": \"secretValue\",\n\t\t\t\t}, false, true),\n\t\t\t\tgetSecret(\"anothertestsecret\", \"testns2\", map[string]any{\n\t\t\t\t\t\"secretNumber\": \"12345\",\n\t\t\t\t}, false, true),\n\t\t\t},\n\t\t\tencryptor: encryptor,\n\t\t\texpected: []*api.GatheredResource{\n\t\t\t\t{\n\t\t\t\t\tResource: getSecret(\"testsecret\", \"testns1\", nil, false, false),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tResource: getSecret(\"anothertestsecret\", \"testns2\", nil, false, false),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"Secret resources should have encrypted data when encryption is enabled with some data fields preserved\": {\n\t\t\tconfig: ConfigDynamic{\n\t\t\t\tIncludeNamespaces:    []string{\"\"},\n\t\t\t\tGroupVersionResource: schema.GroupVersionResource{Group: \"\", Version: \"v1\", Resource: \"secrets\"},\n\t\t\t},\n\t\t\taddObjects: []*unstructured.Unstructured{\n\t\t\t\tgetSecret(\"testsecret-notpreserved\", \"testns1\", map[string]any{\n\t\t\t\t\t\"secretKey\": \"secretValue\",\n\t\t\t\t}, false, true),\n\t\t\t\tgetSecret(\"testsecret-preserved\", \"testns1\", map[string]any{\n\t\t\t\t\t\"tls.key\": \"secretValue\",\n\t\t\t\t\t\"tls.crt\": \"value\",\n\t\t\t\t\t\"ca.crt\":  \"value\",\n\t\t\t\t}, true, true),\n\t\t\t},\n\t\t\tencryptor: encryptor,\n\t\t\texpected: []*api.GatheredResource{\n\t\t\t\t{\n\t\t\t\t\t// only tls.crt and ca.cert remain, although tls.key will be present in encrypted data\n\t\t\t\t\tResource: getSecret(\"testsecret-preserved\", \"testns1\", map[string]any{\n\t\t\t\t\t\t\"tls.crt\": \"value\",\n\t\t\t\t\t\t\"ca.crt\":  \"value\",\n\t\t\t\t\t}, true, false),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tResource: getSecret(\"testsecret-notpreserved\", \"testns1\", nil, false, false),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"Secret resources should still be redacted if encryption fails\": {\n\t\t\tconfig: ConfigDynamic{\n\t\t\t\tIncludeNamespaces:    []string{\"\"},\n\t\t\t\tGroupVersionResource: schema.GroupVersionResource{Group: \"\", Version: \"v1\", Resource: \"secrets\"},\n\t\t\t},\n\t\t\taddObjects: []*unstructured.Unstructured{\n\t\t\t\tgetSecret(\"testsecret\", \"testns1\", map[string]any{\n\t\t\t\t\t\"secretKey\": \"secretValue\",\n\t\t\t\t}, false, true),\n\t\t\t},\n\t\t\tencryptor:               &failEncryptor{},\n\t\t\texpectEncryptionFailure: true,\n\t\t\texpected: []*api.GatheredResource{\n\t\t\t\t{\n\t\t\t\t\tResource: getSecret(\"testsecret\", \"testns1\", nil, false, false),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"excluded annotations are removed for unstructured-based gatherers such as secrets\": {\n\t\t\tconfig: ConfigDynamic{GroupVersionResource: schema.GroupVersionResource{Group: \"\", Version: \"v1\", Resource: \"secrets\"}},\n\n\t\t\t// To give a realistic regex in this test case, let's use the\n\t\t\t// example of the Kapp project that uses four annotations that all\n\t\t\t// start with `kapp.k14s.io/original*`. These annotations are\n\t\t\t// similar to `kubectl.kubernetes.io/last-applied-configuration` in\n\t\t\t// that they may contain sensitive information. From [1], they may\n\t\t\t// look like this:\n\t\t\t//\n\t\t\t//  kapp.k14s.io/original: |\n\t\t\t//    {\"apiVersion\":\"v1\",\"kind\":\"Secret\",\"spec\":{\"data\": {\"password\": \"cGFzc3dvcmQ=\",\"username\": \"bXl1c2VybmFtZQ==\"}}}\n\t\t\t//  kapp.k14s.io/original-diff: |\n\t\t\t//    - type: test\n\t\t\t//      path: /data\n\t\t\t//      value:\n\t\t\t//      password: cygpcGVyUzNjcmV0UEBhc3N3b3JkIQ==\n\t\t\t//      username: bXl1c2VybmFtZQ==\n\t\t\t//\n\t\t\t//  [1]: https://github.com/carvel-dev/kapp/issues/90#issuecomment-602074356\n\t\t\t//\n\t\t\t// The regular expression could be:\n\t\t\texcludeAnnotsKeys: []string{`^kapp\\.k14s\\.io/original.*`},\n\n\t\t\t// A somewhat realistic example of labels that would need to be\n\t\t\t// excluded would be when a company declares ownership using\n\t\t\t// sensitive identifiers (e.g., employee IDs), and the company\n\t\t\t// doesn't want these IDs to be exposed. Let's imagine these\n\t\t\t// employee IDs look like this:\n\t\t\t//\n\t\t\t//  company.com/employee-id: 12345\n\t\t\t//\n\t\t\t// The regular expression would then be:\n\t\t\texcludeLabelKeys: []string{`^company\\.com/employee-id$`},\n\n\t\t\taddObjects: []*unstructured.Unstructured{getObjectAnnot(\"v1\", \"Secret\", \"s0\", \"n1\",\n\t\t\t\tmap[string]any{\"kapp.k14s.io/original\": \"foo\", \"kapp.k14s.io/original-diff\": \"bar\", \"normal\": \"true\"},\n\t\t\t\tmap[string]any{`company.com/employee-id`: \"12345\", \"prod\": \"true\"},\n\t\t\t)},\n\t\t\texpected: []*api.GatheredResource{{Resource: getObjectAnnot(\"v1\", \"Secret\", \"s0\", \"n1\",\n\t\t\t\tmap[string]any{\"normal\": \"true\"},\n\t\t\t\tmap[string]any{\"prod\": \"true\"},\n\t\t\t)}},\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tvar wg sync.WaitGroup\n\t\t\tctx := t.Context()\n\n\t\t\tgvrToListKind := map[schema.GroupVersionResource]string{\n\t\t\t\t{Group: \"foobar\", Version: \"v1\", Resource: \"foos\"}:      \"UnstructuredList\",\n\t\t\t\t{Group: \"apps\", Version: \"v1\", Resource: \"deployments\"}: \"UnstructuredList\",\n\t\t\t\t{Group: \"\", Version: \"v1\", Resource: \"secrets\"}:         \"UnstructuredList\",\n\t\t\t\t{Group: \"\", Version: \"v1\", Resource: \"namespaces\"}:      \"UnstructuredList\",\n\t\t\t}\n\n\t\t\taddObjs := make([]runtime.Object, len(tc.addObjects))\n\t\t\tfor i, obj := range tc.addObjects {\n\t\t\t\taddObjs[i] = obj\n\t\t\t}\n\t\t\tcl := fake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, addObjs...)\n\n\t\t\t// init the datagatherer's informer with the client\n\t\t\tdg, err := tc.config.newDataGathererWithClient(ctx, cl, nil)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"unexpected error: %+v\", err)\n\t\t\t}\n\n\t\t\t// initializing test informer, this informer will update the waitGroup making sure all the\n\t\t\t// update and delete events have all been capture by the informers, the 100 mills sleep is\n\t\t\t// just to make sure dg informer is caught up. This allows us to wait until the waitGroup is\n\t\t\t// done before doing the dg.Fetch.\n\t\t\tfactory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(cl, 10*time.Minute, metav1.NamespaceAll, nil)\n\t\t\tresourceInformer := factory.ForResource(tc.config.GroupVersionResource)\n\t\t\ttestInformer := resourceInformer.Informer()\n\t\t\t_, err = testInformer.AddEventHandler(k8scache.ResourceEventHandlerFuncs{\n\t\t\t\tDeleteFunc: func(obj any) {\n\t\t\t\t\tdefer wg.Done()\n\t\t\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t\t\t},\n\t\t\t\tUpdateFunc: func(oldObj, newObj any) {\n\t\t\t\t\tdefer wg.Done()\n\t\t\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t\t\t},\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\t\t\t// start test Informer\n\t\t\tfactory.Start(ctx.Done())\n\t\t\tk8scache.WaitForCacheSync(ctx.Done(), testInformer.HasSynced)\n\n\t\t\tdgd := dg.(*DataGathererDynamic)\n\t\t\tfor _, key := range tc.excludeAnnotsKeys {\n\t\t\t\tdgd.ExcludeAnnotKeys = append(dgd.ExcludeAnnotKeys, regexp.MustCompile(key))\n\t\t\t}\n\t\t\tfor _, key := range tc.excludeLabelKeys {\n\t\t\t\tdgd.ExcludeLabelKeys = append(dgd.ExcludeLabelKeys, regexp.MustCompile(key))\n\t\t\t}\n\n\t\t\tif tc.encryptor != nil {\n\t\t\t\tdgd.Encryptor = tc.encryptor\n\t\t\t}\n\n\t\t\t// start data gatherer informer\n\t\t\tdynamiDg := dg\n\t\t\tgo func() {\n\t\t\t\tif err = dynamiDg.Run(ctx); err != nil {\n\t\t\t\t\tt.Errorf(\"unexpected client error: %+v\", err)\n\t\t\t\t}\n\t\t\t}()\n\t\t\terr = dynamiDg.WaitForCacheSync(ctx)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"unexpected client error: %+v\", err)\n\t\t\t}\n\n\t\t\t// deletes all the objects set to be deleted, to trigger\n\t\t\t// a delete event in the informers. Add 1 to wg making \"sure\" (https://github.com/kubernetes/kubernetes/issues/95372)\n\t\t\t// the informers cache are sync\n\t\t\tfor ns, delete := range tc.deleteObjects {\n\t\t\t\twg.Add(1)\n\t\t\t\tdeletePolicy := metav1.DeletePropagationForeground\n\t\t\t\tdeleteOptions := metav1.DeleteOptions{\n\t\t\t\t\tPropagationPolicy: &deletePolicy,\n\t\t\t\t}\n\t\t\t\terr := cl.Resource(tc.config.GroupVersionResource).Namespace(ns).Delete(ctx, delete, deleteOptions)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"unexpected client delete error: %+v\", err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor ns, update := range tc.updateObjects {\n\t\t\t\twg.Add(1)\n\t\t\t\tnewObj := update.(*unstructured.Unstructured)\n\t\t\t\t_, err := cl.Resource(tc.config.GroupVersionResource).Namespace(ns).Update(ctx, newObj, metav1.UpdateOptions{})\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"unexpected client update error: %+v\", err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// wait for all the events to occur, else timeut in 30 seconds\n\t\t\tif waitTimeout(&wg, 30*time.Second) {\n\t\t\t\tt.Fatalf(\"unexpected timeout\")\n\t\t\t}\n\t\t\tres, expectCount, err := dynamiDg.Fetch(ctx)\n\t\t\tif err != nil && !tc.err {\n\t\t\t\tt.Errorf(\"expected no error but got: %v\", err)\n\t\t\t}\n\t\t\tif err == nil && tc.err {\n\t\t\t\tt.Errorf(\"expected to get an error but didn't get one\")\n\t\t\t}\n\n\t\t\tif tc.expected != nil {\n\t\t\t\tdata, ok := res.(*api.DynamicData)\n\t\t\t\tif !ok {\n\t\t\t\t\tt.Errorf(\"expected result be *api.DynamicData but wasn't\")\n\t\t\t\t}\n\n\t\t\t\tlist := data.Items\n\t\t\t\t// sorting list of results by name\n\t\t\t\tsortGatheredResources(list)\n\t\t\t\t// sorting list of expected results by name\n\t\t\t\tsortGatheredResources(tc.expected)\n\n\t\t\t\t// check lengths of lists first before we iterate to compare items\n\t\t\t\tassert.Len(t, list, expectCount, \"unexpected number of resources returned\")\n\n\t\t\t\tfor i, item := range list {\n\t\t\t\t\tgot, ok := item.Resource.(*unstructured.Unstructured)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tt.Errorf(\"expected resource to be of type unstructured.Unstructured but got %T\", item.Resource)\n\t\t\t\t\t}\n\n\t\t\t\t\texpected, ok := tc.expected[i].Resource.(*unstructured.Unstructured)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tt.Errorf(\"expected resource to be of type unstructured.Unstructured but got %T\", tc.expected[i].Resource)\n\t\t\t\t\t}\n\n\t\t\t\t\t// If encryption is enabled, validate the encrypted data\n\t\t\t\t\tif tc.encryptor != nil {\n\t\t\t\t\t\tif tc.expectEncryptionFailure {\n\t\t\t\t\t\t\t_, found, err := unstructured.NestedFieldNoCopy(got.Object, encryptedDataFieldName)\n\t\t\t\t\t\t\trequire.NoError(t, err, \"error checking %s field\", encryptedDataFieldName)\n\t\t\t\t\t\t\trequire.False(t, found, \"expected %s field to not exist when encryption fails\", encryptedDataFieldName)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tsortResourcesByName(tc.addObjects)\n\t\t\t\t\t\t\tcompareEncryptedData(t, privKey, got, tc.addObjects[i])\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tassert.Equal(t, expected, got)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc compareEncryptedData(t *testing.T, privKey *stdrsa.PrivateKey, got *unstructured.Unstructured, original *unstructured.Unstructured) {\n\tt.Helper()\n\n\t// Check that encrypted data field exists\n\tencryptedDataRaw, found, err := unstructured.NestedFieldNoCopy(got.Object, encryptedDataFieldName)\n\trequire.NoError(t, err, \"error retrieving %s field\", encryptedDataFieldName)\n\trequire.True(t, found, \"expected %s field to exist when encryption is enabled\", encryptedDataFieldName)\n\n\t// Convert to map and validate structure\n\tencryptedDataMap, ok := encryptedDataRaw.(map[string]any)\n\trequire.True(t, ok, \"expected %s to be a map[string]any\", encryptedDataFieldName)\n\n\t// Check type field\n\ttypeField, ok := encryptedDataMap[\"type\"].(string)\n\trequire.True(t, ok, \"expected type field to be a string\")\n\tassert.Equal(t, rsa.EncryptionType, typeField, \"expected type to be %s\", rsa.EncryptionType)\n\n\t// Check data field exists and is valid\n\tdataFieldRaw, ok := encryptedDataMap[\"data\"]\n\trequire.True(t, ok, \"expected data field to exist\")\n\n\tdataField, ok := dataFieldRaw.(string)\n\trequire.True(t, ok, \"expected data field to be a JSON string\")\n\n\tjweBytes, err := base64.StdEncoding.DecodeString(dataField)\n\trequire.NoError(t, err, \"data field should be valid base64 string\")\n\n\trequire.NotEmpty(t, jweBytes, \"expected data field to be non-empty\")\n\n\t// Verify JWE can be parsed\n\t_, err = jwe.Parse(jweBytes)\n\trequire.NoError(t, err, \"data should be a valid JWE\")\n\n\tplaintext, err := jwe.Decrypt(jweBytes, jwe.WithKey(jwa.RSA_OAEP_256(), privKey), jwe.WithContext(t.Context()))\n\trequire.NoError(t, err, \"failed to decrypt JWE\")\n\n\t// Verify decrypted plaintext matches expected resource data\n\texpectedData, found, err := unstructured.NestedMap(original.Object, \"data\")\n\trequire.True(t, found, \"expected data field to exist in original resource\")\n\trequire.NoError(t, err, \"error retrieving data field from original resource\")\n\n\tvar decryptedDataMap map[string]any\n\terr = json.Unmarshal(plaintext, &decryptedDataMap)\n\trequire.NoError(t, err, \"failed to unmarshal decrypted plaintext\")\n\n\tassert.Equal(t, expectedData, decryptedDataMap, \"decrypted data does not match original data\")\n\n\t// Remove encrypted data so that simple comparison works for other fields\n\tunstructured.RemoveNestedField(got.Object, encryptedDataFieldName)\n}\n\nfunc TestDynamicGathererNativeResources_Fetch(t *testing.T) {\n\t// start a k8s client\n\t// init the datagatherer's informer with the client\n\t// add/delete resources watched by the data gatherer\n\t// check the expected result\n\tpodGVR := schema.GroupVersionResource{Group: corev1.SchemeGroupVersion.Group, Version: corev1.SchemeGroupVersion.Version, Resource: \"pods\"}\n\ttests := map[string]struct {\n\t\tconfig            ConfigDynamic\n\t\texcludeAnnotsKeys []string\n\t\texcludeLabelKeys  []string\n\t\taddObjects        []runtime.Object\n\t\tdeleteObjects     map[string]string\n\t\tupdateObjects     map[string]runtime.Object\n\t\texpected          []*api.GatheredResource\n\t\terr               bool\n\t}{\n\t\t\"only a Pod should be returned if GVR selects pods\": {\n\t\t\taddObjects: []runtime.Object{\n\t\t\t\tgetObject(\"foobar/v1\", \"Foo\", \"testfoo\", \"testns\", false),\n\t\t\t\tgetObject(\"v1\", \"Service\", \"testservice\", \"testns\", false),\n\t\t\t\tgetObject(\"foobar/v1\", \"NotFoo\", \"notfoo\", \"testns\", false),\n\t\t\t\t&corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: \"Pod\", APIVersion: \"v1\"}, ObjectMeta: metav1.ObjectMeta{Name: \"testpod1\", Namespace: \"testns\", UID: \"uid-testpod1\"}},\n\t\t\t},\n\t\t\tconfig: ConfigDynamic{\n\t\t\t\tIncludeNamespaces:    []string{\"testns\"},\n\t\t\t\tGroupVersionResource: podGVR,\n\t\t\t},\n\t\t\texpected: []*api.GatheredResource{\n\t\t\t\t{\n\t\t\t\t\tResource: &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: \"Pod\", APIVersion: \"v1\"}, ObjectMeta: metav1.ObjectMeta{Name: \"testpod1\", Namespace: \"testns\", UID: \"uid-testpod1\"}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"delete a Pod resource from the testns, the cache should have a Pod with deletedAt set to now()\": {\n\t\t\taddObjects: []runtime.Object{\n\t\t\t\t&corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: \"Pod\", APIVersion: \"v1\"}, ObjectMeta: metav1.ObjectMeta{Name: \"testfoo\", Namespace: \"testns\", UID: \"uid-testfoo1\"}},\n\t\t\t\tgetObject(\"v1\", \"Service\", \"testservice\", \"testns\", false),\n\t\t\t\tgetObject(\"foobar/v1\", \"NotFoo\", \"notfoo\", \"testns\", false),\n\t\t\t},\n\t\t\tdeleteObjects: map[string]string{\n\t\t\t\t\"testns\": \"testfoo\",\n\t\t\t},\n\t\t\tconfig: ConfigDynamic{\n\t\t\t\tIncludeNamespaces:    []string{\"testns\"},\n\t\t\t\tGroupVersionResource: podGVR,\n\t\t\t},\n\t\t\texpected: []*api.GatheredResource{\n\t\t\t\t{\n\t\t\t\t\tResource:  &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: \"Pod\", APIVersion: \"v1\"}, ObjectMeta: metav1.ObjectMeta{Name: \"testfoo\", Namespace: \"testns\", UID: \"uid-testfoo1\"}},\n\t\t\t\t\tDeletedAt: api.Time{Time: clock.now()},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"Pods in different namespaces should be returned if no namespace field is set\": {\n\t\t\tconfig: ConfigDynamic{\n\t\t\t\tIncludeNamespaces:    []string{\"\"},\n\t\t\t\tGroupVersionResource: podGVR,\n\t\t\t},\n\t\t\taddObjects: []runtime.Object{\n\t\t\t\t&corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: \"Pod\", APIVersion: \"v1\"}, ObjectMeta: metav1.ObjectMeta{Name: \"testpod1\", Namespace: \"testns\", UID: \"uid-testpod1\"}},\n\t\t\t\t&corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: \"Pod\", APIVersion: \"v1\"}, ObjectMeta: metav1.ObjectMeta{Name: \"testpod2\", Namespace: \"testns2\", UID: \"uid-testpod2\"}},\n\t\t\t},\n\t\t\texpected: []*api.GatheredResource{\n\t\t\t\t{\n\t\t\t\t\tResource: &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: \"Pod\", APIVersion: \"v1\"}, ObjectMeta: metav1.ObjectMeta{Name: \"testpod1\", Namespace: \"testns\", UID: \"uid-testpod1\"}},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tResource: &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: \"Pod\", APIVersion: \"v1\"}, ObjectMeta: metav1.ObjectMeta{Name: \"testpod2\", Namespace: \"testns2\", UID: \"uid-testpod2\"}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"Delete Pods in different namespaces should be returned if no namespace field is set\": {\n\t\t\tconfig: ConfigDynamic{\n\t\t\t\tIncludeNamespaces:    []string{\"\"},\n\t\t\t\tGroupVersionResource: podGVR,\n\t\t\t},\n\t\t\taddObjects: []runtime.Object{\n\t\t\t\t&corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: \"Pod\", APIVersion: \"v1\"}, ObjectMeta: metav1.ObjectMeta{Name: \"testpod1\", Namespace: \"testns\", UID: \"uid-testpod1\"}},\n\t\t\t\t&corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: \"Pod\", APIVersion: \"v1\"}, ObjectMeta: metav1.ObjectMeta{Name: \"testpod2\", Namespace: \"testns2\", UID: \"uid-testpod2\"}},\n\t\t\t},\n\t\t\texpected: []*api.GatheredResource{\n\t\t\t\t{\n\t\t\t\t\tResource: &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: \"Pod\", APIVersion: \"v1\"}, ObjectMeta: metav1.ObjectMeta{Name: \"testpod1\", Namespace: \"testns\", UID: \"uid-testpod1\"}},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tResource: &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: \"Pod\", APIVersion: \"v1\"}, ObjectMeta: metav1.ObjectMeta{Name: \"testpod2\", Namespace: \"testns2\", UID: \"uid-testpod2\"}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"Delete all Pod resources, all the fetched resources should have a deletedAt field set to now()\": {\n\t\t\tconfig: ConfigDynamic{\n\t\t\t\tIncludeNamespaces:    []string{\"\"},\n\t\t\t\tGroupVersionResource: podGVR,\n\t\t\t},\n\t\t\tdeleteObjects: map[string]string{\n\t\t\t\t\"testns1\": \"testpod1\",\n\t\t\t\t\"testns2\": \"testpod2\",\n\t\t\t},\n\t\t\taddObjects: []runtime.Object{\n\t\t\t\t&corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: \"Pod\", APIVersion: \"v1\"}, ObjectMeta: metav1.ObjectMeta{Name: \"testpod1\", Namespace: \"testns1\", UID: \"uid-testpod1\"}},\n\t\t\t\t&corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: \"Pod\", APIVersion: \"v1\"}, ObjectMeta: metav1.ObjectMeta{Name: \"testpod2\", Namespace: \"testns2\", UID: \"uid-testpod2\"}},\n\t\t\t},\n\t\t\texpected: []*api.GatheredResource{\n\t\t\t\t{\n\t\t\t\t\tResource:  &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: \"Pod\", APIVersion: \"v1\"}, ObjectMeta: metav1.ObjectMeta{Name: \"testpod1\", Namespace: \"testns1\", UID: \"uid-testpod1\"}},\n\t\t\t\t\tDeletedAt: api.Time{Time: clock.now()},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tResource:  &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: \"Pod\", APIVersion: \"v1\"}, ObjectMeta: metav1.ObjectMeta{Name: \"testpod2\", Namespace: \"testns2\", UID: \"uid-testpod2\"}},\n\t\t\t\t\tDeletedAt: api.Time{Time: clock.now()},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"Update all Pods resources, all the fetched resources should have been updated\": {\n\t\t\tconfig: ConfigDynamic{\n\t\t\t\tIncludeNamespaces:    []string{\"\"},\n\t\t\t\tGroupVersionResource: podGVR,\n\t\t\t},\n\t\t\tupdateObjects: map[string]runtime.Object{\n\t\t\t\t\"testns1\": &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: \"Pod\", APIVersion: \"v1\"}, ObjectMeta: metav1.ObjectMeta{Name: \"testpod1\", Namespace: \"testns1\", UID: \"uid-testpod1\", Labels: map[string]string{\"foo\": \"newlabel\"}}},\n\t\t\t\t\"testns2\": &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: \"Pod\", APIVersion: \"v1\"}, ObjectMeta: metav1.ObjectMeta{Name: \"testpod2\", Namespace: \"testns2\", UID: \"uid-testpod2\", Labels: map[string]string{\"foo\": \"newlabel\"}}},\n\t\t\t},\n\t\t\taddObjects: []runtime.Object{\n\t\t\t\t&corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: \"Pod\", APIVersion: \"v1\"}, ObjectMeta: metav1.ObjectMeta{Name: \"testpod1\", Namespace: \"testns1\", UID: \"uid-testpod1\"}},\n\t\t\t\t&corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: \"Pod\", APIVersion: \"v1\"}, ObjectMeta: metav1.ObjectMeta{Name: \"testpod2\", Namespace: \"testns2\", UID: \"uid-testpod2\"}},\n\t\t\t},\n\t\t\texpected: []*api.GatheredResource{\n\t\t\t\t{\n\t\t\t\t\tResource: &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: \"Pod\", APIVersion: \"v1\"}, ObjectMeta: metav1.ObjectMeta{Name: \"testpod1\", Namespace: \"testns1\", UID: \"uid-testpod1\", Labels: map[string]string{\"foo\": \"newlabel\"}}},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tResource: &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: \"Pod\", APIVersion: \"v1\"}, ObjectMeta: metav1.ObjectMeta{Name: \"testpod2\", Namespace: \"testns2\", UID: \"uid-testpod2\", Labels: map[string]string{\"foo\": \"newlabel\"}}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"only Pods in the specified namespace should be returned\": {\n\t\t\tconfig: ConfigDynamic{\n\t\t\t\tIncludeNamespaces:    []string{\"testns\"},\n\t\t\t\tGroupVersionResource: podGVR,\n\t\t\t},\n\t\t\taddObjects: []runtime.Object{\n\t\t\t\t&corev1.Pod{\n\t\t\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\t\t\tKind:       \"Pod\",\n\t\t\t\t\t\tAPIVersion: \"v1\",\n\t\t\t\t\t},\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName:      \"testfoo1\",\n\t\t\t\t\t\tNamespace: \"testns\",\n\t\t\t\t\t\tUID:       \"uid-testfoo1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t&corev1.Pod{\n\t\t\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\t\t\tKind:       \"Pod\",\n\t\t\t\t\t\tAPIVersion: \"v1\",\n\t\t\t\t\t},\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName:      \"testfoo1\",\n\t\t\t\t\t\tNamespace: \"nottestns\",\n\t\t\t\t\t\tUID:       \"uid-testfoo2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: []*api.GatheredResource{\n\t\t\t\t{\n\t\t\t\t\tResource: &corev1.Pod{\n\t\t\t\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\t\t\t\tKind:       \"Pod\",\n\t\t\t\t\t\t\tAPIVersion: \"v1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName:      \"testfoo1\",\n\t\t\t\t\t\t\tNamespace: \"testns\",\n\t\t\t\t\t\t\tUID:       \"uid-testfoo1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Pod is the only native resource that we test out of lack of time\n\t\t// (would require a lot of changes to the testing func). Ideally we\n\t\t// should test all native resources such as Service, Deployment,\n\t\t// Ingress, Namespace, and so on.\n\t\t\"excluded annotations are removed for typed resources gatherers such as pods\": {\n\t\t\tconfig:            ConfigDynamic{GroupVersionResource: podGVR},\n\t\t\texcludeAnnotsKeys: []string{\"secret\"},\n\t\t\texcludeLabelKeys:  []string{\"secret\"},\n\t\t\taddObjects: []runtime.Object{\n\t\t\t\t&corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: \"p0\", UID: \"p0\", Namespace: \"n1\", Annotations: map[string]string{\"normal-annot\": \"bar\"}}},\n\t\t\t\t&corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: \"p1\", UID: \"p1\", Namespace: \"n1\", Labels: map[string]string{\"normal-label\": \"bar\"}}},\n\t\t\t\t&corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: \"p2\", UID: \"p2\", Namespace: \"n1\", Annotations: map[string]string{\"super-secret-annot\": \"bar\"}}},\n\t\t\t\t&corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: \"p3\", UID: \"p3\", Namespace: \"n1\", Labels: map[string]string{\"super-secret-label\": \"bar\"}}},\n\t\t\t},\n\t\t\texpected: []*api.GatheredResource{\n\t\t\t\t{Resource: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: \"p0\", UID: \"p0\", Namespace: \"n1\", Annotations: map[string]string{\"normal-annot\": \"bar\"}}, TypeMeta: metav1.TypeMeta{Kind: \"Pod\", APIVersion: \"v1\"}}},\n\t\t\t\t{Resource: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: \"p1\", UID: \"p1\", Namespace: \"n1\", Labels: map[string]string{\"normal-label\": \"bar\"}}, TypeMeta: metav1.TypeMeta{Kind: \"Pod\", APIVersion: \"v1\"}}},\n\t\t\t\t{Resource: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: \"p2\", UID: \"p2\", Namespace: \"n1\", Annotations: map[string]string{}}, TypeMeta: metav1.TypeMeta{Kind: \"Pod\", APIVersion: \"v1\"}}},\n\t\t\t\t{Resource: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: \"p3\", UID: \"p3\", Namespace: \"n1\", Labels: map[string]string{}}, TypeMeta: metav1.TypeMeta{Kind: \"Pod\", APIVersion: \"v1\"}}},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tvar wg sync.WaitGroup\n\t\t\tctx := t.Context()\n\n\t\t\tclientset := fakeclientset.NewSimpleClientset(tc.addObjects...)\n\n\t\t\t// init the datagatherer's informer with the client\n\t\t\tdg, err := tc.config.newDataGathererWithClient(ctx, nil, clientset)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"unexpected error: %+v\", err)\n\t\t\t}\n\n\t\t\t// initializing test informer, this informer will capture all the events\n\t\t\t// that occur in the test case and only allow the dg.Fetch to be performed\n\t\t\t// after all the events have been triggered\n\t\t\tfactory := informers.NewSharedInformerFactoryWithOptions(clientset,\n\t\t\t\t10*time.Minute,\n\t\t\t\tinformers.WithNamespace(metav1.NamespaceAll),\n\t\t\t\tinformers.WithTweakListOptions(func(options *metav1.ListOptions) {}))\n\t\t\ttestInformer := factory.Core().V1().Pods().Informer()\n\t\t\t_, err = testInformer.AddEventHandler(k8scache.ResourceEventHandlerFuncs{\n\t\t\t\tDeleteFunc: func(obj any) {\n\t\t\t\t\tdefer wg.Done()\n\t\t\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t\t\t},\n\t\t\t\tUpdateFunc: func(oldObj, newObj any) {\n\t\t\t\t\tdefer wg.Done()\n\t\t\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t\t\t},\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// start test Informer\n\t\t\tfactory.Start(ctx.Done())\n\t\t\tk8scache.WaitForCacheSync(ctx.Done(), testInformer.HasSynced)\n\t\t\tdgd := dg.(*DataGathererDynamic)\n\t\t\tfor _, key := range tc.excludeAnnotsKeys {\n\t\t\t\tdgd.ExcludeAnnotKeys = append(dgd.ExcludeAnnotKeys, regexp.MustCompile(key))\n\t\t\t}\n\t\t\tfor _, key := range tc.excludeLabelKeys {\n\t\t\t\tdgd.ExcludeLabelKeys = append(dgd.ExcludeLabelKeys, regexp.MustCompile(key))\n\t\t\t}\n\n\t\t\t// start data gatherer informer\n\t\t\tdynamiDg := dg\n\t\t\tgo func() {\n\t\t\t\tif err = dynamiDg.Run(ctx); err != nil {\n\t\t\t\t\tt.Errorf(\"unexpected client error: %+v\", err)\n\t\t\t\t}\n\t\t\t}()\n\t\t\terr = dynamiDg.WaitForCacheSync(ctx)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"unexpected client error: %+v\", err)\n\t\t\t}\n\n\t\t\t// deletes all the objects set to be deleted, to trigger\n\t\t\t// a delete event in the informers. Add 1 to wg\n\t\t\tfor ns, delete := range tc.deleteObjects {\n\t\t\t\twg.Add(1)\n\t\t\t\tdeletePolicy := metav1.DeletePropagationForeground\n\t\t\t\tdeleteOptions := metav1.DeleteOptions{\n\t\t\t\t\tPropagationPolicy: &deletePolicy,\n\t\t\t\t}\n\t\t\t\terr := clientset.CoreV1().Pods(ns).Delete(ctx, delete, deleteOptions)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"unexpected client delete error: %+v\", err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor ns, update := range tc.updateObjects {\n\t\t\t\twg.Add(1)\n\t\t\t\tnewObj := update.(*corev1.Pod)\n\t\t\t\t_, err := clientset.CoreV1().Pods(ns).Update(ctx, newObj, metav1.UpdateOptions{})\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"unexpected client update error: %+v\", err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// wait for all the events to occur, else timeout in 30 seconds\n\t\t\tif waitTimeout(&wg, 5*time.Second) {\n\t\t\t\tt.Fatalf(\"unexpected timeout\")\n\t\t\t}\n\t\t\trawRes, count, err := dynamiDg.Fetch(ctx)\n\t\t\tif tc.err {\n\t\t\t\trequire.Error(t, err)\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t}\n\n\t\t\tif tc.expected != nil {\n\t\t\t\tres, ok := rawRes.(*api.DynamicData)\n\t\t\t\trequire.Truef(t, ok, \"expected result be an *api.DynamicData but wasn't\")\n\t\t\t\tactual := res.Items\n\n\t\t\t\t// sorting list of results by name\n\t\t\t\tsortGatheredResources(actual)\n\t\t\t\t// sorting list of expected results by name\n\t\t\t\tsortGatheredResources(tc.expected)\n\n\t\t\t\tassert.Equal(t, tc.expected, actual)\n\t\t\t\tassert.Len(t, actual, count)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// waitTimeout waits for the waitgroup for the specified max timeout.\n// Returns true if waiting timed out.\nfunc waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool {\n\tc := make(chan struct{})\n\tgo func() {\n\t\tdefer close(c)\n\t\twg.Wait()\n\t}()\n\tselect {\n\tcase <-c:\n\t\treturn false\n\tcase <-time.After(timeout):\n\t\treturn true\n\t}\n}\n\nfunc TestRemoveUnstructuredKeys(t *testing.T) {\n\tt.Run(\"remove single key\", run_TestRemoveUnstructuredKeys(tc_RemoveUnstructuredKeys{\n\t\tgivenPath:    []string{\"metadata\", \"annotations\"},\n\t\tgivenExclude: []string{\"^toexclude$\"},\n\t\tgivenObj: map[string]any{\n\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\"annotations\": map[string]any{\n\t\t\t\t\t\"toexclude\": \"foo\",\n\t\t\t\t\t\"tokeep\":    \"bar\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\texpectObj: map[string]any{\n\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\"annotations\": map[string]any{\n\t\t\t\t\t\"tokeep\": \"bar\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}))\n\n\tt.Run(\"remove keys using multiple regexes\", run_TestRemoveUnstructuredKeys(tc_RemoveUnstructuredKeys{\n\t\tgivenPath:    []string{\"metadata\", \"annotations\"},\n\t\tgivenExclude: []string{\"^toexclude1$\", \"^toexclude2$\"},\n\t\tgivenObj: map[string]any{\n\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\"annotations\": map[string]any{\n\t\t\t\t\t\"toexclude1\": \"foo\",\n\t\t\t\t\t\"toexclude2\": \"bar\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\texpectObj: map[string]any{\n\t\t\t\"metadata\": map[string]any{\"annotations\": map[string]any{}},\n\t\t},\n\t}))\n\n\tt.Run(\"remove multiple keys with a single regex\", run_TestRemoveUnstructuredKeys(tc_RemoveUnstructuredKeys{\n\t\tgivenPath:    []string{\"metadata\", \"annotations\"},\n\t\tgivenExclude: []string{\"toexclude.*\"},\n\t\tgivenObj: map[string]any{\n\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\"annotations\": map[string]any{\n\t\t\t\t\t\"toexclude1\": \"foo\",\n\t\t\t\t\t\"toexclude2\": \"bar\",\n\t\t\t\t\t\"tokeep\":     \"baz\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\texpectObj: map[string]any{\n\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\"annotations\": map[string]any{\n\t\t\t\t\t\"tokeep\": \"baz\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}))\n\n\tt.Run(\"with no regex, the object is untouched\", run_TestRemoveUnstructuredKeys(tc_RemoveUnstructuredKeys{\n\t\tgivenPath:    []string{\"metadata\", \"annotations\"},\n\t\tgivenExclude: []string{},\n\t\tgivenObj: map[string]any{\n\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\"annotations\": map[string]any{\n\t\t\t\t\t\"tokeep1\": \"foo\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\texpectObj: map[string]any{\n\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\"annotations\": map[string]any{\n\t\t\t\t\t\"tokeep1\": \"foo\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}))\n\n\t// The \"leaf\" field is the field that is at the end of the path. For\n\t// example, \"annotations\" is the leaf field in metadata.annotations.\n\tt.Run(\"works when the leaf field is not found\", run_TestRemoveUnstructuredKeys(tc_RemoveUnstructuredKeys{\n\t\tgivenPath:    []string{\"metadata\", \"annotations\"},\n\t\tgivenExclude: []string{},\n\n\t\tgivenObj:  map[string]any{\"metadata\": map[string]any{}},\n\t\texpectObj: map[string]any{\"metadata\": map[string]any{}},\n\t}))\n\n\tt.Run(\"works when the leaf field is nil\", run_TestRemoveUnstructuredKeys(tc_RemoveUnstructuredKeys{\n\t\tgivenPath:    []string{\"metadata\", \"annotations\"},\n\t\tgivenExclude: []string{},\n\t\tgivenObj:     map[string]any{\"metadata\": map[string]any{\"annotations\": nil}},\n\t\texpectObj:    map[string]any{\"metadata\": map[string]any{\"annotations\": nil}},\n\t}))\n\n\tt.Run(\"works when leaf field is unexpectedly not nil and not a known map\", run_TestRemoveUnstructuredKeys(tc_RemoveUnstructuredKeys{\n\t\tgivenPath: []string{\"metadata\", \"annotations\"},\n\t\tgivenObj:  map[string]any{\"metadata\": map[string]any{\"annotations\": 42}},\n\t\texpectObj: map[string]any{\"metadata\": map[string]any{\"annotations\": 42}},\n\t}))\n\n\t// The \"intermediate\" field is the field that is not at the end of the path.\n\t// For example, \"metadata\" is the intermediate field in\n\t// metadata.annotations.\n\tt.Run(\"works when the intermediate field doesn't exist\", run_TestRemoveUnstructuredKeys(tc_RemoveUnstructuredKeys{\n\t\tgivenPath: []string{\"metadata\", \"annotations\"},\n\t\tgivenObj:  map[string]any{},\n\t\texpectObj: map[string]any{},\n\t}))\n\n\tt.Run(\"works when the intermediate field is nil\", run_TestRemoveUnstructuredKeys(tc_RemoveUnstructuredKeys{\n\t\tgivenPath: []string{\"metadata\", \"annotations\"},\n\t\tgivenObj:  map[string]any{\"metadata\": nil},\n\t\texpectObj: map[string]any{\"metadata\": nil},\n\t}))\n\n\tt.Run(\"works when the intermediate field is unexpectedly not nil and not a map\", run_TestRemoveUnstructuredKeys(tc_RemoveUnstructuredKeys{\n\t\tgivenPath: []string{\"metadata\", \"annotations\"},\n\t\tgivenObj:  map[string]any{\"metadata\": 42},\n\t\texpectObj: map[string]any{\"metadata\": 42},\n\t}))\n}\n\ntype tc_RemoveUnstructuredKeys struct {\n\tgivenExclude []string\n\tgivenObj     map[string]any\n\tgivenPath    []string\n\texpectObj    map[string]any\n}\n\nfunc run_TestRemoveUnstructuredKeys(tc tc_RemoveUnstructuredKeys) func(*testing.T) {\n\treturn func(t *testing.T) {\n\t\tt.Helper()\n\t\tRemoveUnstructuredKeys(toRegexps(tc.givenExclude), &unstructured.Unstructured{Object: tc.givenObj}, tc.givenPath...)\n\t\tassert.Equal(t, tc.expectObj, tc.givenObj)\n\t}\n}\n\nfunc TestRemoveTypedKeys(t *testing.T) {\n\tt.Run(\"remove single key\", run_TestRemoveTypedKeys(tc_TestRemoveTypedKeys{\n\t\tgivenExclude: []string{\"^toexclude$\"},\n\t\tgiven:        map[string]string{\"toexclude\": \"foo\", \"tokeep\": \"bar\"},\n\t\texpected:     map[string]string{\"tokeep\": \"bar\"},\n\t}))\n\n\tt.Run(\"remove keys using multiple regexes\", run_TestRemoveTypedKeys(tc_TestRemoveTypedKeys{\n\t\tgivenExclude: []string{\"^toexclude1$\", \"^toexclude2$\"},\n\t\tgiven:        map[string]string{\"toexclude1\": \"foo\", \"toexclude2\": \"bar\", \"tokeep\": \"baz\"},\n\t\texpected:     map[string]string{\"tokeep\": \"baz\"},\n\t}))\n\n\tt.Run(\"remove multiple keys with a single regex\", run_TestRemoveTypedKeys(tc_TestRemoveTypedKeys{\n\t\tgivenExclude: []string{\"^toexclude.*\"},\n\t\tgiven:        map[string]string{\"toexclude1\": \"foo\", \"toexclude2\": \"bar\", \"tokeep\": \"baz\"},\n\t\texpected:     map[string]string{\"tokeep\": \"baz\"},\n\t}))\n\n\tt.Run(\"with no regex, the object is untouched\", run_TestRemoveTypedKeys(tc_TestRemoveTypedKeys{\n\t\tgivenExclude: []string{},\n\t\tgiven:        map[string]string{\"tokeep1\": \"foo\", \"tokeep2\": \"bar\"},\n\t\texpected:     map[string]string{\"tokeep1\": \"foo\", \"tokeep2\": \"bar\"},\n\t}))\n\n\tt.Run(\"works when the map is nil\", run_TestRemoveTypedKeys(tc_TestRemoveTypedKeys{\n\t\tgivenExclude: []string{\"^toexclude$\"},\n\t\tgiven:        nil,\n\t\texpected:     nil,\n\t}))\n}\n\ntype tc_TestRemoveTypedKeys struct {\n\tgivenExclude []string\n\tgiven        map[string]string\n\texpected     map[string]string\n}\n\nfunc run_TestRemoveTypedKeys(tc tc_TestRemoveTypedKeys) func(t *testing.T) {\n\treturn func(t *testing.T) {\n\t\tt.Helper()\n\t\tRemoveTypedKeys(toRegexps(tc.givenExclude), tc.given)\n\t\tassert.Equal(t, tc.expected, tc.given)\n\t}\n}\n\nfunc toRegexps(keys []string) []*regexp.Regexp {\n\tvar regexps []*regexp.Regexp\n\tfor _, key := range keys {\n\t\tregexps = append(regexps, regexp.MustCompile(key))\n\t}\n\treturn regexps\n}\n\n// TestValidate_LabelSelectors tests validation of label selectors\nfunc TestValidate_LabelSelectors(t *testing.T) {\n\ttests := []struct {\n\t\tname           string\n\t\tlabelSelectors []string\n\t\texpectError    bool\n\t\terrorContains  string\n\t}{\n\t\t{\n\t\t\tname:           \"valid simple label selector\",\n\t\t\tlabelSelectors: []string{\"app=myapp\"},\n\t\t\texpectError:    false,\n\t\t},\n\t\t{\n\t\t\tname:           \"valid label selector with dot notation\",\n\t\t\tlabelSelectors: []string{\"conjur.org/name=conjur-connect-configmap\"},\n\t\t\texpectError:    false,\n\t\t},\n\t\t{\n\t\t\tname:           \"valid negative label selector\",\n\t\t\tlabelSelectors: []string{\"app!=test\"},\n\t\t\texpectError:    false,\n\t\t},\n\t\t{\n\t\t\tname:           \"valid multiple label selectors\",\n\t\t\tlabelSelectors: []string{\"app=myapp\", \"environment=production\"},\n\t\t\texpectError:    false,\n\t\t},\n\t\t{\n\t\t\tname:           \"valid label existence check\",\n\t\t\tlabelSelectors: []string{\"app\"},\n\t\t\texpectError:    false,\n\t\t},\n\t\t{\n\t\t\tname:           \"valid label non-existence check\",\n\t\t\tlabelSelectors: []string{\"!app\"},\n\t\t\texpectError:    false,\n\t\t},\n\t\t{\n\t\t\tname:           \"valid set-based selector\",\n\t\t\tlabelSelectors: []string{\"environment in (production, staging)\"},\n\t\t\texpectError:    false,\n\t\t},\n\t\t{\n\t\t\tname:           \"valid negative set-based selector\",\n\t\t\tlabelSelectors: []string{\"environment notin (dev, test)\"},\n\t\t\texpectError:    false,\n\t\t},\n\t\t{\n\t\t\tname:           \"empty label selector\",\n\t\t\tlabelSelectors: []string{\"\"},\n\t\t\texpectError:    true,\n\t\t\terrorContains:  \"must not be empty\",\n\t\t},\n\t\t{\n\t\t\tname:           \"invalid label selector syntax\",\n\t\t\tlabelSelectors: []string{\"invalid===syntax\"},\n\t\t\texpectError:    true,\n\t\t\terrorContains:  \"invalid label selector\",\n\t\t},\n\t\t{\n\t\t\tname:           \"multiple selectors with one invalid\",\n\t\t\tlabelSelectors: []string{\"app=valid\", \"invalid===\"},\n\t\t\texpectError:    true,\n\t\t\terrorContains:  \"invalid label selector 1\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tconfig := &ConfigDynamic{\n\t\t\t\tGroupVersionResource: schema.GroupVersionResource{\n\t\t\t\t\tVersion:  \"v1\",\n\t\t\t\t\tResource: \"configmaps\",\n\t\t\t\t},\n\t\t\t\tLabelSelectors: tt.labelSelectors,\n\t\t\t}\n\n\t\t\terr := config.validate()\n\t\t\tif tt.expectError {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tif tt.errorContains != \"\" {\n\t\t\t\t\tassert.Contains(t, err.Error(), tt.errorContains)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TestValidate_FieldSelectors tests validation of field selectors.\nfunc TestValidate_FieldSelectors(t *testing.T) {\n\ttests := []struct {\n\t\tname           string\n\t\tfieldSelectors []string\n\t\texpectError    bool\n\t\terrorContains  string\n\t}{\n\t\t{\n\t\t\tname:           \"valid field selector\",\n\t\t\tfieldSelectors: []string{\"metadata.name=test\"},\n\t\t\texpectError:    false,\n\t\t},\n\t\t{\n\t\t\tname:           \"valid negative field selector\",\n\t\t\tfieldSelectors: []string{\"type!=kubernetes.io/dockercfg\"},\n\t\t\texpectError:    false,\n\t\t},\n\t\t{\n\t\t\tname:           \"multiple valid field selectors\",\n\t\t\tfieldSelectors: []string{\"metadata.namespace=default\", \"type!=Opaque\"},\n\t\t\texpectError:    false,\n\t\t},\n\t\t{\n\t\t\tname:           \"empty field selector\",\n\t\t\tfieldSelectors: []string{\"\"},\n\t\t\texpectError:    true,\n\t\t\terrorContains:  \"must not be empty\",\n\t\t},\n\t\t{\n\t\t\tname:           \"invalid field selector syntax\",\n\t\t\tfieldSelectors: []string{\"invalid===field\"},\n\t\t\texpectError:    true,\n\t\t\terrorContains:  \"invalid field selector\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tconfig := &ConfigDynamic{\n\t\t\t\tGroupVersionResource: schema.GroupVersionResource{\n\t\t\t\t\tVersion:  \"v1\",\n\t\t\t\t\tResource: \"secrets\",\n\t\t\t\t},\n\t\t\t\tFieldSelectors: tt.fieldSelectors,\n\t\t\t}\n\n\t\t\terr := config.validate()\n\t\t\tif tt.expectError {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tif tt.errorContains != \"\" {\n\t\t\t\t\tassert.Contains(t, err.Error(), tt.errorContains)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TestValidate_CombinedSelectors tests validation with both field and label selectors.\nfunc TestValidate_CombinedSelectors(t *testing.T) {\n\ttests := []struct {\n\t\tname           string\n\t\tfieldSelectors []string\n\t\tlabelSelectors []string\n\t\texpectError    bool\n\t\terrorContains  string\n\t}{\n\t\t{\n\t\t\tname:           \"valid field and label selectors\",\n\t\t\tfieldSelectors: []string{\"type!=kubernetes.io/dockercfg\"},\n\t\t\tlabelSelectors: []string{\"app=myapp\"},\n\t\t\texpectError:    false,\n\t\t},\n\t\t{\n\t\t\tname:           \"invalid field selector with valid label selector\",\n\t\t\tfieldSelectors: []string{\"invalid===\"},\n\t\t\tlabelSelectors: []string{\"app=myapp\"},\n\t\t\texpectError:    true,\n\t\t\terrorContains:  \"invalid field selector\",\n\t\t},\n\t\t{\n\t\t\tname:           \"valid field selector with invalid label selector\",\n\t\t\tfieldSelectors: []string{\"type!=Opaque\"},\n\t\t\tlabelSelectors: []string{\"invalid===\"},\n\t\t\texpectError:    true,\n\t\t\terrorContains:  \"invalid label selector\",\n\t\t},\n\t\t{\n\t\t\tname:           \"both selectors invalid\",\n\t\t\tfieldSelectors: []string{\"bad===field\"},\n\t\t\tlabelSelectors: []string{\"bad===label\"},\n\t\t\texpectError:    true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tconfig := &ConfigDynamic{\n\t\t\t\tGroupVersionResource: schema.GroupVersionResource{\n\t\t\t\t\tVersion:  \"v1\",\n\t\t\t\t\tResource: \"configmaps\",\n\t\t\t\t},\n\t\t\t\tFieldSelectors: tt.fieldSelectors,\n\t\t\t\tLabelSelectors: tt.labelSelectors,\n\t\t\t}\n\n\t\t\terr := config.validate()\n\t\t\tif tt.expectError {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tif tt.errorContains != \"\" {\n\t\t\t\t\tassert.Contains(t, err.Error(), tt.errorContains)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/datagatherer/k8sdynamic/fieldfilter.go",
    "content": "package k8sdynamic\n\nimport (\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured\"\n)\n\n// SecretSelectedFields is the list of fields sent from Secret objects to the\n// backend.\n// The `data` is redacted, to prevent private keys or sensitive data being\n// collected. Only the following none-sensitive keys are retained: tls.crt,\n// ca.crt. These keys are assumed to always contain public TLS certificates.\n// The `conjur-map` key is also retained, as it is used to map Secrets to\n// Conjur variables, and is not considered sensitive.\n// See https://docs.cyberark.com/conjur-open-source/latest/en/content/integrations/k8s-ocp/cjr-secrets-provider-lp.htm\nvar SecretSelectedFields = []FieldPath{\n\t{\"kind\"},\n\t{\"apiVersion\"},\n\t{\"metadata\", \"annotations\"},\n\t{\"metadata\", \"labels\"},\n\t{\"metadata\", \"name\"},\n\t{\"metadata\", \"namespace\"},\n\t{\"metadata\", \"ownerReferences\"},\n\t{\"metadata\", \"selfLink\"},\n\t{\"metadata\", \"uid\"},\n\t{\"metadata\", \"creationTimestamp\"},\n\t{\"metadata\", \"deletionTimestamp\"},\n\t{\"metadata\", \"resourceVersion\"},\n\t{\"immutable\"},\n\t{\"type\"},\n\t{\"data\", \"tls.crt\"},\n\t{\"data\", \"ca.crt\"},\n\t{\"data\", \"conjur-map\"},\n}\n\n// RouteSelectedFields is the list of fields sent from OpenShift Route objects to the\n// backend.\n// The Route resource is redacted because it may contain private keys for TLS.\n//\n// TODO(wallrj): Find out if the `.tls.key` field is the only one that may\n// contain sensitive data and if so, that field could be redacted instead\n// selecting everything else, for consistency with Ingress or any of the other\n// resources that are collected. Or alternatively add an comment to explain why\n// for Route, the set of fields is allow-listed while for Ingress, all fields\n// are collected.\n// https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/network_apis/route-route-openshift-io-v1#spec-tls-3\nvar RouteSelectedFields = []FieldPath{\n\t{\"kind\"},\n\t{\"apiVersion\"},\n\t{\"metadata\", \"annotations\"},\n\t{\"metadata\", \"name\"},\n\t{\"metadata\", \"namespace\"},\n\t{\"metadata\", \"ownerReferences\"},\n\t{\"metadata\", \"selfLink\"},\n\t{\"metadata\", \"uid\"},\n\t{\"metadata\", \"creationTimestamp\"},\n\t{\"metadata\", \"deletionTimestamp\"},\n\t{\"metadata\", \"resourceVersion\"},\n\n\t{\"spec\", \"host\"},\n\t{\"spec\", \"to\", \"kind\"},\n\t{\"spec\", \"to\", \"name\"},\n\t{\"spec\", \"to\", \"weight\"},\n\t{\"spec\", \"tls\", \"termination\"},\n\t{\"spec\", \"tls\", \"certificate\"},\n\t{\"spec\", \"tls\", \"caCertificate\"},\n\t{\"spec\", \"tls\", \"destinationCACertificate\"},\n\t{\"spec\", \"tls\", \"insecureEdgeTerminationPolicy\"},\n\t{\"spec\", \"wildcardPolicy\"},\n\t{\"status\"},\n}\n\n// RedactFields are removed from all objects\nvar RedactFields = []FieldPath{\n\t{\"metadata\", \"managedFields\"},\n\t{\"metadata\", \"annotations\", \"kubectl.kubernetes.io/last-applied-configuration\"},\n}\n\ntype FieldPath []string\n\n// Select removes all but the supplied fields from the resource\nfunc Select(fields []FieldPath, resource *unstructured.Unstructured) error {\n\tnewResource := unstructured.Unstructured{\n\t\tObject: map[string]any{},\n\t}\n\n\tfor _, field := range fields {\n\t\tvalue, found, err := unstructured.NestedFieldNoCopy(resource.Object, field...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !found {\n\t\t\tcontinue\n\t\t}\n\t\tif err := unstructured.SetNestedField(newResource.Object, value, field...); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tresource.Object = newResource.Object\n\n\treturn nil\n}\n\n// Redact removes the supplied fields from the resource\nfunc Redact(fields []FieldPath, resource *unstructured.Unstructured) {\n\tfor _, field := range fields {\n\t\tunstructured.RemoveNestedField(resource.Object, field...)\n\t}\n}\n"
  },
  {
    "path": "pkg/datagatherer/k8sdynamic/fieldfilter_test.go",
    "content": "package k8sdynamic\n\nimport (\n\t\"encoding/json\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured\"\n\n\t\"github.com/jetstack/preflight/pkg/testutil\"\n)\n\nfunc TestSelect(t *testing.T) {\n\tt.Run(\"secret\", run_TestSelect(\n\t\tmap[string]any{\n\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\"kind\":       \"Secret\",\n\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\"name\":      \"example\",\n\t\t\t\t\"namespace\": \"example\",\n\t\t\t\t\"annotations\": map[string]any{\n\t\t\t\t\t\"kubectl.kubernetes.io/last-applied-configuration\": \"secret\",\n\t\t\t\t},\n\t\t\t\t\"labels\": map[string]any{\n\t\t\t\t\t\"foo\": \"bar\",\n\t\t\t\t},\n\t\t\t\t\"resourceVersion\":   \"fake-resource-version\",\n\t\t\t\t\"creationTimestamp\": \"2025-08-15T00:00:01Z\",\n\t\t\t\t\"deletionTimestamp\": \"2025-08-15T00:00:02Z\",\n\t\t\t\t// Examples of fields which are dropped\n\t\t\t\t\"deletionGracePeriodSeconds\": 10,\n\t\t\t\t\"finalizers\":                 []string{\"example.com/fake-finalizer\"},\n\t\t\t\t\"generation\":                 11,\n\t\t\t},\n\t\t\t\"type\": \"kubernetes.io/tls\",\n\t\t\t\"data\": map[string]any{\n\t\t\t\t\"tls.crt\":    \"cert data\",\n\t\t\t\t\"tls.key\":    \"secret\",\n\t\t\t\t\"extra\":      \"should be removed\",\n\t\t\t\t\"conjur-map\": \"should be kept\",\n\t\t\t},\n\t\t},\n\t\tSecretSelectedFields,\n\t\tmap[string]any{\n\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\"kind\":       \"Secret\",\n\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\"name\":      \"example\",\n\t\t\t\t\"namespace\": \"example\",\n\t\t\t\t\"annotations\": map[string]any{\n\t\t\t\t\t// The \"last-applied-configuration\" isn't ignored in\n\t\t\t\t\t// \"Select\". \"Redact\" removes it.\n\t\t\t\t\t\"kubectl.kubernetes.io/last-applied-configuration\": \"secret\",\n\t\t\t\t},\n\t\t\t\t\"labels\": map[string]any{\n\t\t\t\t\t\"foo\": \"bar\",\n\t\t\t\t},\n\t\t\t\t\"resourceVersion\":   \"fake-resource-version\",\n\t\t\t\t\"creationTimestamp\": \"2025-08-15T00:00:01Z\",\n\t\t\t\t\"deletionTimestamp\": \"2025-08-15T00:00:02Z\",\n\t\t\t},\n\t\t\t\"type\": \"kubernetes.io/tls\",\n\t\t\t\"data\": map[string]any{\n\t\t\t\t// The \"tls.key\" is ignored.\n\t\t\t\t\"tls.crt\":    \"cert data\",\n\t\t\t\t\"conjur-map\": \"should be kept\",\n\t\t\t},\n\t\t},\n\t))\n\n\t// Confirm select function preserves immutability\n\tt.Run(\"secret-immutable\", run_TestSelect(\n\t\tmap[string]any{\n\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\"kind\":       \"Secret\",\n\t\t\t\"immutable\":  true,\n\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\"name\":      \"with-immutable\",\n\t\t\t\t\"namespace\": \"example\",\n\t\t\t},\n\t\t\t\"type\": \"Opaque\",\n\t\t},\n\t\tSecretSelectedFields,\n\t\tmap[string]any{\n\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\"kind\":       \"Secret\",\n\t\t\t\"immutable\":  true,\n\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\"name\":      \"with-immutable\",\n\t\t\t\t\"namespace\": \"example\",\n\t\t\t},\n\t\t\t\"type\": \"Opaque\",\n\t\t},\n\t))\n\n\tt.Run(\"secret-immutable-false\", run_TestSelect(\n\t\tmap[string]any{\n\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\"kind\":       \"Secret\",\n\t\t\t\"immutable\":  false,\n\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\"name\":      \"with-immutable-false\",\n\t\t\t\t\"namespace\": \"example\",\n\t\t\t},\n\t\t\t\"type\": \"Opaque\",\n\t\t},\n\t\tSecretSelectedFields,\n\t\tmap[string]any{\n\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\"kind\":       \"Secret\",\n\t\t\t\"immutable\":  false,\n\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\"name\":      \"with-immutable-false\",\n\t\t\t\t\"namespace\": \"example\",\n\t\t\t},\n\t\t\t\"type\": \"Opaque\",\n\t\t},\n\t))\n\n\tt.Run(\"secret-immutable-absent\", run_TestSelect(\n\t\tmap[string]any{\n\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\"kind\":       \"Secret\",\n\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\"name\":      \"immutable-absent\",\n\t\t\t\t\"namespace\": \"example\",\n\t\t\t},\n\t\t\t\"type\": \"Opaque\",\n\t\t},\n\t\tSecretSelectedFields,\n\t\tmap[string]any{\n\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\"kind\":       \"Secret\",\n\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\"name\":      \"immutable-absent\",\n\t\t\t\t\"namespace\": \"example\",\n\t\t\t},\n\t\t\t\"type\": \"Opaque\",\n\t\t},\n\t))\n\n\tt.Run(\"route\", run_TestSelect(\n\t\tmap[string]any{\n\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\"kind\":       \"Route\",\n\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\"name\": \"example\",\n\t\t\t\t\"annotations\": map[string]any{\n\t\t\t\t\t\"kubectl.kubernetes.io/last-applied-configuration\": \"secret\",\n\t\t\t\t},\n\t\t\t\t\"labels\": map[string]any{\n\t\t\t\t\t\"foo\": \"bar\",\n\t\t\t\t},\n\t\t\t\t\"resourceVersion\":   \"fake-resource-version\",\n\t\t\t\t\"creationTimestamp\": \"2025-08-15T00:00:01Z\",\n\t\t\t\t\"deletionTimestamp\": \"2025-08-15T00:00:02Z\",\n\t\t\t\t// Examples of fields which are dropped\n\t\t\t\t\"deletionGracePeriodSeconds\": 10,\n\t\t\t\t\"finalizers\":                 []string{\"example.com/fake-finalizer\"},\n\t\t\t\t\"generation\":                 11,\n\t\t\t},\n\t\t\t\"spec\": map[string]any{\n\t\t\t\t\"host\": \"www.example.com\",\n\t\t\t\t\"to\": map[string]any{\n\t\t\t\t\t\"kind\": \"Service\",\n\t\t\t\t\t\"name\": \"frontend\",\n\t\t\t\t},\n\t\t\t\t\"tls\": map[string]any{\n\t\t\t\t\t\"termination\":              \"reencrypt\",\n\t\t\t\t\t\"key\":                      \"secret\",\n\t\t\t\t\t\"certificate\":              \"cert data\",\n\t\t\t\t\t\"caCertificate\":            \"caCert data\",\n\t\t\t\t\t\"destinationCACertificate\": \"destinationCaCert data\",\n\t\t\t\t},\n\t\t\t},\n\t\t}, RouteSelectedFields,\n\t\tmap[string]any{\n\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\"kind\":       \"Route\",\n\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\"name\": \"example\",\n\t\t\t\t\"annotations\": map[string]any{\n\t\t\t\t\t// The \"last-applied-configuration\" isn't ignored in\n\t\t\t\t\t// \"Select\". \"Redact\" removes it.\n\t\t\t\t\t\"kubectl.kubernetes.io/last-applied-configuration\": \"secret\",\n\t\t\t\t},\n\t\t\t\t\"resourceVersion\":   \"fake-resource-version\",\n\t\t\t\t\"creationTimestamp\": \"2025-08-15T00:00:01Z\",\n\t\t\t\t\"deletionTimestamp\": \"2025-08-15T00:00:02Z\",\n\t\t\t},\n\t\t\t\"spec\": map[string]any{\n\t\t\t\t\"host\": \"www.example.com\",\n\t\t\t\t\"to\": map[string]any{\n\t\t\t\t\t\"kind\": \"Service\",\n\t\t\t\t\t\"name\": \"frontend\",\n\t\t\t\t},\n\t\t\t\t\"tls\": map[string]any{\n\t\t\t\t\t\"termination\": \"reencrypt\",\n\t\t\t\t\t// The \"key\" field is ignored.\n\t\t\t\t\t\"certificate\":              \"cert data\",\n\t\t\t\t\t\"caCertificate\":            \"caCert data\",\n\t\t\t\t\t\"destinationCACertificate\": \"destinationCaCert data\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t))\n}\n\nfunc run_TestSelect(given map[string]any, givenSelect []FieldPath, expect map[string]any) func(*testing.T) {\n\treturn func(t *testing.T) {\n\t\tt.Helper()\n\t\tgivenPtr := unstructured.Unstructured{Object: given}\n\t\terr := Select(givenSelect, &givenPtr)\n\t\trequire.NoError(t, err)\n\n\t\tassert.Equal(t, expect, givenPtr.Object)\n\t}\n}\n\nfunc TestSelectMissingSelectedField(t *testing.T) {\n\tresource := &unstructured.Unstructured{\n\t\tObject: map[string]any{\n\t\t\t\"kind\": \"Secret\",\n\t\t},\n\t}\n\n\tfieldsToSelect := []FieldPath{\n\t\t{\"kind\"}, // required for unstructured unmarshal\n\t\t{\"missing\"},\n\t}\n\n\terr := Select(fieldsToSelect, resource)\n\trequire.NoError(t, err)\n\tbytes, err := json.MarshalIndent(resource, \"\", \"    \")\n\trequire.NoError(t, err)\n\n\texpectedJSON := testutil.Undent(`\n\t\t{\n\t\t    \"kind\": \"Secret\"\n\t\t}`)\n\tassert.Equal(t, expectedJSON, string(bytes))\n}\n\nfunc TestRedactSecret(t *testing.T) {\n\tresource := &unstructured.Unstructured{\n\t\tObject: map[string]any{\n\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\"kind\":       \"Secret\",\n\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\"name\":      \"example\",\n\t\t\t\t\"namespace\": \"example\",\n\t\t\t\t\"annotations\": map[string]any{\n\t\t\t\t\t\"kubectl.kubernetes.io/last-applied-configuration\": \"secret\",\n\t\t\t\t},\n\t\t\t\t\"managedFields\": nil,\n\t\t\t},\n\t\t\t\"type\": \"kubernetes.io/tls\",\n\t\t\t\"data\": map[string]any{\n\t\t\t\t\"tls.crt\": \"cert data\",\n\t\t\t\t\"tls.key\": \"secret\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfieldsToRedact := []FieldPath{\n\t\t{\"metadata\", \"managedFields\"},\n\t\t{\"metadata\", \"annotations\", \"kubectl.kubernetes.io/last-applied-configuration\"},\n\t\t{\"data\", \"tls.key\"},\n\t}\n\n\tRedact(fieldsToRedact, resource)\n\n\tbytes, err := json.MarshalIndent(resource, \"\", \"    \")\n\trequire.NoError(t, err)\n\texpectedJSON := testutil.Undent(`\n\t\t{\n\t\t    \"apiVersion\": \"v1\",\n\t\t    \"data\": {\n\t\t        \"tls.crt\": \"cert data\"\n\t\t    },\n\t\t    \"kind\": \"Secret\",\n\t\t    \"metadata\": {\n\t\t        \"annotations\": {},\n\t\t        \"name\": \"example\",\n\t\t        \"namespace\": \"example\"\n\t\t    },\n\t\t    \"type\": \"kubernetes.io/tls\"\n\t\t}`)\n\tassert.Equal(t, expectedJSON, string(bytes))\n}\n\nfunc TestRedactPod(t *testing.T) {\n\tresource := &unstructured.Unstructured{\n\t\tObject: map[string]any{\n\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\"kind\":       \"Pod\",\n\t\t\t\"metadata\": map[string]any{\n\t\t\t\t\"name\":          \"example\",\n\t\t\t\t\"namespace\":     \"example\",\n\t\t\t\t\"managedFields\": []any{},\n\t\t\t},\n\t\t\t\"spec\": map[string]any{\n\t\t\t\t\"serviceAccountName\": \"example\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfieldsToRedact := []FieldPath{\n\t\t{\"metadata\", \"managedFields\"},\n\t}\n\n\tRedact(fieldsToRedact, resource)\n\n\tbytes, err := json.MarshalIndent(resource, \"\", \"    \")\n\trequire.NoError(t, err)\n\texpectedJSON := testutil.Undent(`\n\t\t{\n\t\t    \"apiVersion\": \"v1\",\n\t\t    \"kind\": \"Pod\",\n\t\t    \"metadata\": {\n\t\t        \"name\": \"example\",\n\t\t        \"namespace\": \"example\"\n\t\t    },\n\t\t    \"spec\": {\n\t\t        \"serviceAccountName\": \"example\"\n\t\t    }\n\t\t}`)\n\tassert.Equal(t, expectedJSON, string(bytes))\n}\n\nfunc TestRedactMissingField(t *testing.T) {\n\tresource := &unstructured.Unstructured{\n\t\tObject: map[string]any{\n\t\t\t\"kind\": \"Secret\",\n\t\t},\n\t}\n\n\tfieldsToRedact := []FieldPath{\n\t\t{\"missing\"},\n\t}\n\n\tRedact(fieldsToRedact, resource)\n\tbytes, err := json.MarshalIndent(resource, \"\", \"    \")\n\trequire.NoError(t, err)\n\n\texpectedJSON := testutil.Undent(`\n\t\t{\n\t\t    \"kind\": \"Secret\"\n\t\t}`)\n\tassert.Equal(t, expectedJSON, string(bytes))\n}\n"
  },
  {
    "path": "pkg/datagatherer/local/local.go",
    "content": "package local\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com/jetstack/preflight/pkg/datagatherer\"\n)\n\n// Config is the configuration for a local DataGatherer.\ntype Config struct {\n\t// DataPath is the path to file containing the data to load.\n\tDataPath string `yaml:\"data-path\"`\n}\n\n// validate validates the configuration.\nfunc (c *Config) validate() error {\n\tif c.DataPath == \"\" {\n\t\treturn fmt.Errorf(\"invalid configuration: DataPath cannot be empty\")\n\t}\n\treturn nil\n}\n\n// DataGatherer is a data-gatherer that loads data from a local file.\ntype DataGatherer struct {\n\tdataPath string\n}\n\n// NewDataGatherer returns a new DataGatherer.\nfunc (c *Config) NewDataGatherer(ctx context.Context) (datagatherer.DataGatherer, error) {\n\tif err := c.validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &DataGatherer{\n\t\tdataPath: c.DataPath,\n\t}, nil\n}\n\nfunc (g *DataGatherer) Run(ctx context.Context) error {\n\t// no async functionality, see Fetch\n\treturn nil\n}\n\nfunc (g *DataGatherer) WaitForCacheSync(ctx context.Context) error {\n\t// no async functionality, see Fetch\n\treturn nil\n}\n\n// Fetch loads and returns the data from the LocalDatagatherer's dataPath\nfunc (g *DataGatherer) Fetch(ctx context.Context) (any, int, error) {\n\tdataBytes, err := os.ReadFile(g.dataPath)\n\tif err != nil {\n\t\treturn nil, -1, err\n\t}\n\treturn dataBytes, -1, nil\n}\n"
  },
  {
    "path": "pkg/datagatherer/oidc/oidc.go",
    "content": "package oidc\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/url\"\n\t\"strings\"\n\n\tapierrors \"k8s.io/apimachinery/pkg/api/errors\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/rest\"\n\t\"k8s.io/klog/v2\"\n\n\t\"github.com/jetstack/preflight/api\"\n\t\"github.com/jetstack/preflight/pkg/datagatherer\"\n\t\"github.com/jetstack/preflight/pkg/kubeconfig\"\n)\n\n// OIDCDiscovery contains the configuration for the oidc data-gatherer.\ntype OIDCDiscovery struct {\n\t// KubeConfigPath is the path to the kubeconfig file. If empty, will assume it runs in-cluster.\n\tKubeConfigPath string `yaml:\"kubeconfig\"`\n}\n\n// UnmarshalYAML unmarshals the Config resolving GroupVersionResource.\nfunc (c *OIDCDiscovery) UnmarshalYAML(unmarshal func(any) error) error {\n\taux := struct {\n\t\tKubeConfigPath string `yaml:\"kubeconfig\"`\n\t}{}\n\terr := unmarshal(&aux)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.KubeConfigPath = aux.KubeConfigPath\n\n\treturn nil\n}\n\nfunc (c *OIDCDiscovery) NewDataGatherer(ctx context.Context) (datagatherer.DataGatherer, error) {\n\tcl, err := kubeconfig.NewDiscoveryClient(c.KubeConfigPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &DataGathererOIDC{\n\t\tcl: cl.RESTClient(),\n\t}, nil\n}\n\n// DataGathererOIDC stores the config for an oidc datagatherer.\ntype DataGathererOIDC struct {\n\tcl rest.Interface\n}\n\nvar _ datagatherer.DataGatherer = &DataGathererOIDC{}\n\nfunc (g *DataGathererOIDC) Run(ctx context.Context) error {\n\treturn nil\n}\n\nfunc (g *DataGathererOIDC) WaitForCacheSync(ctx context.Context) error {\n\t// no async functionality, see Fetch\n\treturn nil\n}\n\n// Fetch will fetch the OIDC discovery document and JWKS from the cluster API server.\nfunc (g *DataGathererOIDC) Fetch(ctx context.Context) (any, int, error) {\n\toidcResponse, oidcErr := g.fetchOIDCConfig(ctx)\n\tjwksResponse, jwksErr := g.fetchJWKS(ctx)\n\n\terrToString := func(err error) string {\n\t\tif err != nil {\n\t\t\treturn err.Error()\n\t\t}\n\t\treturn \"\"\n\t}\n\n\tif oidcErr != nil {\n\t\tklog.FromContext(ctx).V(4).Error(oidcErr, \"Failed to fetch OIDC configuration\")\n\t}\n\tif jwksErr != nil {\n\t\tklog.FromContext(ctx).V(4).Error(jwksErr, \"Failed to fetch JWKS\")\n\t}\n\n\treturn &api.OIDCDiscoveryData{\n\t\tOIDCConfig:      oidcResponse,\n\t\tOIDCConfigError: errToString(oidcErr),\n\t\tJWKS:            jwksResponse,\n\t\tJWKSError:       errToString(jwksErr),\n\t}, 1 /* we have 1 result, so return 1 as count */, nil\n}\n\nfunc (g *DataGathererOIDC) fetchOIDCConfig(ctx context.Context) (map[string]any, error) {\n\t// Fetch the OIDC discovery document from the well-known endpoint.\n\tresult := g.cl.Get().AbsPath(\"/.well-known/openid-configuration\").Do(ctx)\n\tif err := result.Error(); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get /.well-known/openid-configuration: %s\", k8sErrorMessage(err))\n\t}\n\n\tbytes, _ := result.Raw() // we already checked result.Error(), so there is no error here\n\tvar oidcResponse map[string]any\n\tif err := json.Unmarshal(bytes, &oidcResponse); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal OIDC discovery document: %v (raw: %q)\", err, stringFirstN(string(bytes), 80))\n\t}\n\n\treturn oidcResponse, nil\n}\n\nfunc (g *DataGathererOIDC) fetchJWKS(ctx context.Context) (map[string]any, error) {\n\t// Fetch the JWKS from the default /openid/v1/jwks endpoint.\n\t// We are not using the jwks_uri from the OIDC config because:\n\t//  - on hybrid OpenShift clusters, we saw it pointed to a non-existent URL\n\t//  - on fully private AWS EKS clusters, the URL is still public and might not\n\t//    be reachable from within the cluster (https://github.com/aws/containers-roadmap/issues/2038)\n\t// So we are using the default path instead, which we think should work in most cases.\n\tresult := g.cl.Get().AbsPath(\"/openid/v1/jwks\").Do(ctx)\n\tif err := result.Error(); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get /openid/v1/jwks: %s\", k8sErrorMessage(err))\n\t}\n\n\tbytes, _ := result.Raw() // we already checked result.Error(), so there is no error here\n\tvar jwksResponse map[string]any\n\tif err := json.Unmarshal(bytes, &jwksResponse); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal JWKS response: %v (raw: %q)\", err, stringFirstN(string(bytes), 80))\n\t}\n\n\treturn jwksResponse, nil\n}\n\nfunc stringFirstN(s string, n int) string {\n\tif len(s) <= n {\n\t\treturn s\n\t}\n\treturn s[:n]\n}\n\n// based on https://github.com/kubernetes/kubectl/blob/a64ceaeab69eed1f11a9e1bd91cf2c1446de811c/pkg/cmd/util/helpers.go#L244\nfunc k8sErrorMessage(err error) string {\n\tif status, isStatus := err.(apierrors.APIStatus); isStatus {\n\t\tswitch s := status.Status(); {\n\t\tcase s.Reason == metav1.StatusReasonUnauthorized:\n\t\t\treturn fmt.Sprintf(\"error: You must be logged in to the server (%s)\", s.Message)\n\t\tcase len(s.Reason) > 0:\n\t\t\treturn fmt.Sprintf(\"Error from server (%s): %s\", s.Reason, err.Error())\n\t\tdefault:\n\t\t\treturn fmt.Sprintf(\"Error from server: %s\", err.Error())\n\t\t}\n\t}\n\n\tif apierrors.IsUnexpectedObjectError(err) {\n\t\treturn fmt.Sprintf(\"Server returned an unexpected response: %s\", err.Error())\n\t}\n\n\tif t, isURL := err.(*url.Error); isURL {\n\t\tif strings.Contains(t.Err.Error(), \"connection refused\") {\n\t\t\thost := t.URL\n\t\t\tif server, err := url.Parse(t.URL); err == nil {\n\t\t\t\thost = server.Host\n\t\t\t}\n\t\t\treturn fmt.Sprintf(\"The connection to the server %s was refused - did you specify the right host or port?\", host)\n\t\t}\n\t\treturn fmt.Sprintf(\"Unable to connect to the server: %v\", t.Err)\n\t}\n\n\treturn fmt.Sprintf(\"error: %v\", err)\n}\n"
  },
  {
    "path": "pkg/datagatherer/oidc/oidc_test.go",
    "content": "package oidc\n\nimport (\n\t\"bytes\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n\t\"k8s.io/client-go/discovery\"\n\t\"k8s.io/client-go/rest\"\n\n\t\"github.com/jetstack/preflight/api\"\n)\n\nfunc makeRESTClient(t *testing.T, ts *httptest.Server) rest.Interface {\n\tt.Helper()\n\tu, err := url.Parse(ts.URL)\n\tif err != nil {\n\t\tt.Fatalf(\"parse server url: %v\", err)\n\t}\n\n\tcfg := &rest.Config{\n\t\tHost: u.Host,\n\t}\n\n\tdiscoveryClient, err := discovery.NewDiscoveryClientForConfigAndClient(cfg, ts.Client())\n\tif err != nil {\n\t\tt.Fatalf(\"new discovery client: %v\", err)\n\t}\n\n\treturn discoveryClient.RESTClient()\n}\n\nfunc TestFetch_Success(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tswitch r.URL.Path {\n\t\tcase \"/.well-known/openid-configuration\":\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t_, _ = w.Write([]byte(`{\"issuer\":\"https://example\"}`))\n\t\tcase \"/openid/v1/jwks\":\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t_, _ = w.Write([]byte(`{\"keys\":[]}`))\n\t\tdefault:\n\t\t\thttp.NotFound(w, r)\n\t\t}\n\t}))\n\tdefer ts.Close()\n\n\trc := makeRESTClient(t, ts)\n\tg := &DataGathererOIDC{cl: rc}\n\n\tanyRes, count, err := g.Fetch(t.Context())\n\trequire.NoError(t, err)\n\trequire.Equal(t, 1, count)\n\n\tres, ok := anyRes.(*api.OIDCDiscoveryData)\n\trequire.True(t, ok, \"unexpected result type\")\n\n\trequire.NotNil(t, res.OIDCConfig)\n\trequire.Equal(t, \"https://example\", res.OIDCConfig[\"issuer\"].(string))\n\trequire.Empty(t, res.OIDCConfigError)\n\n\trequire.NotNil(t, res.JWKS)\n\t_, ok = res.JWKS[\"keys\"].([]any)\n\trequire.True(t, ok, \"unexpected result type\")\n\trequire.Empty(t, res.JWKSError)\n}\n\nfunc TestFetch_Errors(t *testing.T) {\n\ttests := []struct {\n\t\tname                        string\n\t\topenidConfigurationResponse func(w http.ResponseWriter, r *http.Request)\n\t\tjwksResponse                func(w http.ResponseWriter, r *http.Request)\n\t\texpOIDCConfigError          string\n\t\texpJWKSError                string\n\t}{\n\t\t{\n\t\t\tname: \"5xx errors\",\n\t\t\topenidConfigurationResponse: func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\thttp.Error(w, \"boom\", http.StatusInternalServerError)\n\t\t\t},\n\t\t\tjwksResponse: func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\thttp.Error(w, \"boom\", http.StatusInternalServerError)\n\t\t\t},\n\t\t\texpOIDCConfigError: `failed to get /.well-known/openid-configuration: Error from server (InternalError): an error on the server (\"boom\") has prevented the request from succeeding`,\n\t\t\texpJWKSError:       `failed to get /openid/v1/jwks: Error from server (InternalError): an error on the server (\"boom\") has prevented the request from succeeding`,\n\t\t},\n\t\t{\n\t\t\tname: \"malformed JSON\",\n\t\t\topenidConfigurationResponse: func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t\t_, _ = w.Write([]byte(`}{`))\n\t\t\t},\n\t\t\tjwksResponse: func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t\t_, _ = w.Write([]byte(`}`))\n\t\t\t\t_, _ = w.Write(bytes.Repeat([]byte{'0'}, 5000))\n\t\t\t},\n\t\t\texpOIDCConfigError: `failed to unmarshal OIDC discovery document: invalid character '}' looking for beginning of value (raw: \"}{\")`,\n\t\t\texpJWKSError:       `failed to unmarshal JWKS response: invalid character '}' looking for beginning of value (raw: \"}0000000000000000000000000000000000000000000000000000000000000000000000000000000\")`,\n\t\t},\n\t\t{\n\t\t\tname: \"Forbidden error (no body)\",\n\t\t\topenidConfigurationResponse: func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\thttp.Error(w, \"forbidden\", http.StatusForbidden)\n\t\t\t},\n\t\t\tjwksResponse: func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\thttp.Error(w, \"forbidden\", http.StatusForbidden)\n\t\t\t},\n\t\t\texpOIDCConfigError: \"failed to get /.well-known/openid-configuration: Error from server (Forbidden): forbidden\",\n\t\t\texpJWKSError:       \"failed to get /openid/v1/jwks: Error from server (Forbidden): forbidden\",\n\t\t},\n\t\t{\n\t\t\tname: \"Forbidden error (*metav1.Status body)\",\n\t\t\topenidConfigurationResponse: func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t\tw.WriteHeader(http.StatusForbidden)\n\t\t\t\t_, _ = w.Write([]byte(`{\n\t\t\t\t\t\"kind\":\"Status\",\n\t\t\t\t\t\"apiVersion\":\"v1\",\n\t\t\t\t\t\"metadata\":{},\n\t\t\t\t\t\"status\":\"Failure\",\n\t\t\t\t\t\"message\":\"forbidden: User \\\"system:serviceaccount:default:test\\\" cannot get path \\\"/.well-known/openid-configuration\\\"\",\n\t\t\t\t\t\"reason\":\"Forbidden\",\n\t\t\t\t\t\"details\":{},\n\t\t\t\t\t\"code\":403\n\t\t\t\t}`))\n\t\t\t},\n\t\t\tjwksResponse: func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t\tw.WriteHeader(http.StatusForbidden)\n\t\t\t\t_, _ = w.Write([]byte(`{\n\t\t\t\t\t\"kind\":\"Status\",\n\t\t\t\t\t\"apiVersion\":\"v1\",\n\t\t\t\t\t\"metadata\":{},\n\t\t\t\t\t\"status\":\"Failure\",\n\t\t\t\t\t\"message\":\"forbidden: User \\\"system:serviceaccount:default:test\\\" cannot get path \\\"/openid/v1/jwks\\\"\",\n\t\t\t\t\t\"reason\":\"Forbidden\",\n\t\t\t\t\t\"details\":{},\n\t\t\t\t\t\"code\":403\n\t\t\t\t}`))\n\t\t\t},\n\t\t\texpOIDCConfigError: `failed to get /.well-known/openid-configuration: Error from server (Forbidden): forbidden: User \"system:serviceaccount:default:test\" cannot get path \"/.well-known/openid-configuration\"`,\n\t\t\texpJWKSError:       `failed to get /openid/v1/jwks: Error from server (Forbidden): forbidden: User \"system:serviceaccount:default:test\" cannot get path \"/openid/v1/jwks\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"Unauthorized error (*metav1.Status body)\",\n\t\t\topenidConfigurationResponse: func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t\tw.WriteHeader(http.StatusForbidden)\n\t\t\t\t_, _ = w.Write([]byte(`{\n\t\t\t\t\t\"kind\": \"Status\",\n\t\t\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\t\t\"metadata\": {},\n\t\t\t\t\t\"status\": \"Failure\",\n\t\t\t\t\t\"message\": \"Unauthorized\",\n\t\t\t\t\t\"reason\": \"Unauthorized\",\n\t\t\t\t\t\"code\": 401\n\t\t\t\t}`))\n\t\t\t},\n\t\t\tjwksResponse: func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t\tw.WriteHeader(http.StatusForbidden)\n\t\t\t\t_, _ = w.Write([]byte(`{\n\t\t\t\t\t\"kind\": \"Status\",\n\t\t\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\t\t\"metadata\": {},\n\t\t\t\t\t\"status\": \"Failure\",\n\t\t\t\t\t\"message\": \"Unauthorized\",\n\t\t\t\t\t\"reason\": \"Unauthorized\",\n\t\t\t\t\t\"code\": 401\n\t\t\t\t}`))\n\t\t\t},\n\t\t\texpOIDCConfigError: `failed to get /.well-known/openid-configuration: error: You must be logged in to the server (Unauthorized)`,\n\t\t\texpJWKSError:       `failed to get /openid/v1/jwks: error: You must be logged in to the server (Unauthorized)`,\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tswitch r.URL.Path {\n\t\t\t\tcase \"/.well-known/openid-configuration\":\n\t\t\t\t\ttc.openidConfigurationResponse(w, r)\n\t\t\t\t\treturn\n\t\t\t\tcase \"/openid/v1/jwks\":\n\t\t\t\t\ttc.jwksResponse(w, r)\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t\tt.Fatalf(\"unexpected request path: %s\", r.URL.Path)\n\t\t\t\t}\n\t\t\t}))\n\t\t\tdefer ts.Close()\n\n\t\t\trc := makeRESTClient(t, ts)\n\t\t\tg := &DataGathererOIDC{cl: rc}\n\n\t\t\tanyRes, count, err := g.Fetch(t.Context())\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, 1, count)\n\n\t\t\tres, ok := anyRes.(*api.OIDCDiscoveryData)\n\t\t\trequire.True(t, ok, \"unexpected result type\")\n\n\t\t\trequire.Nil(t, res.OIDCConfig)\n\t\t\trequire.NotEmpty(t, res.OIDCConfigError)\n\t\t\trequire.Equal(t, tc.expOIDCConfigError, res.OIDCConfigError)\n\n\t\t\trequire.Nil(t, res.JWKS)\n\t\t\trequire.NotEmpty(t, res.JWKSError)\n\t\t\trequire.Equal(t, tc.expJWKSError, res.JWKSError)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/echo/echo.go",
    "content": "package echo\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/http\"\n\n\t\"github.com/fatih/color\"\n\t\"github.com/spf13/cobra\"\n\n\t\"github.com/jetstack/preflight/api\"\n)\n\nvar EchoListen string\n\nvar Compact bool\n\nfunc Echo(cmd *cobra.Command, args []string) error {\n\thttp.HandleFunc(\"/\", echoHandler)\n\tfmt.Println(\"Listening to requests at \", EchoListen)\n\treturn http.ListenAndServe(EchoListen, nil)\n}\n\nfunc echoHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != http.MethodPost {\n\t\twriteError(w, fmt.Sprintf(\"invalid method. Expected POST, received %s\", r.Method), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// decode all data, however only datareadings are printed below\n\tvar payload api.DataReadingsPost\n\terr := json.NewDecoder(r.Body).Decode(&payload)\n\tif err != nil {\n\t\twriteError(w, fmt.Sprintf(\"decoding body: %+v\", err), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// print the data sent to the echo server to the console\n\n\tif Compact {\n\t\tfmt.Printf(\"-- %s %s -> created %d\\n\", r.Method, r.URL.Path, http.StatusCreated)\n\t\tfmt.Printf(\"received %d readings:\\n\", len(payload.DataReadings))\n\t\tfor _, r := range payload.DataReadings {\n\t\t\tfmt.Printf(\"%+v\\n\", r)\n\t\t}\n\t} else {\n\t\tcolor.Green(\"-- %s %s -> created %d\\n\", r.Method, r.URL.Path, http.StatusCreated)\n\t\tfmt.Printf(\"received %d readings:\\n\", len(payload.DataReadings))\n\n\t\tfor i, r := range payload.DataReadings {\n\t\t\tc := color.New(color.FgYellow)\n\t\t\tif i%2 == 0 {\n\t\t\t\tc = color.New(color.FgCyan)\n\t\t\t}\n\n\t\t\tc.Printf(\"%v:\\n%s\\n\", i, prettyPrint(r))\n\t\t}\n\n\t\tcolor.Green(\"-----\")\n\t}\n\n\t// return successful response to the agent\n\tfmt.Fprintf(w, `{ \"status\": \"ok\" }`)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n}\n\nfunc writeError(w http.ResponseWriter, err string, code int) {\n\tfmt.Printf(\"-- error %d -> %s\\n\", code, err)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\thttp.Error(w, fmt.Sprintf(`{ \"error\": \"%s\", \"code\": %d }`, err, code), code)\n}\n\nfunc prettyPrint(reading *api.DataReading) string {\n\treturn fmt.Sprintf(`ClusterID: %s\nData gatherer: %s\nTimestamp: %s\nSchemaVersion: %s\nData: %+v`,\n\t\treading.ClusterID, reading.DataGatherer, reading.Timestamp, reading.SchemaVersion, reading.Data)\n}\n"
  },
  {
    "path": "pkg/echo/echo_test.go",
    "content": "package echo\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"testing\"\n\t\"time\"\n\n\t\"k8s.io/apimachinery/pkg/version\"\n\n\t\"github.com/jetstack/preflight/api\"\n)\n\ntype testInput struct {\n\tdescription string\n\tdata        *api.DataReadingsPost\n\texp         int\n\tmethod      string\n}\n\nfunc TestEchoServerRequestResponse(t *testing.T) {\n\t// create sample data in same format that would be generated by the agent\n\tsampleUploadCases := []testInput{\n\t\t{\n\t\t\tdescription: \"correct request input should return status code 200\",\n\t\t\tdata: &api.DataReadingsPost{\n\t\t\t\tAgentMetadata: &api.AgentMetadata{\n\t\t\t\t\tVersion:   \"test suite\",\n\t\t\t\t\tClusterID: \"test_suite_cluster\",\n\t\t\t\t},\n\t\t\t\tDataGatherTime: time.Now(),\n\t\t\t\tDataReadings: []*api.DataReading{\n\t\t\t\t\t{\n\t\t\t\t\t\tClusterID:    \"test_suite_cluster\",\n\t\t\t\t\t\tDataGatherer: \"dummy\",\n\t\t\t\t\t\tTimestamp:    api.Time{Time: time.Now()},\n\t\t\t\t\t\tData: &api.DiscoveryData{\n\t\t\t\t\t\t\tServerVersion: &version.Info{\n\t\t\t\t\t\t\t\tGitVersion: \"v1.20.0\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tSchemaVersion: \"2.0.0\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texp:    http.StatusOK,\n\t\t\tmethod: \"POST\",\n\t\t},\n\t\t{\n\t\t\tdescription: \"sending GET request should return status code 400\",\n\t\t\tmethod:      \"GET\",\n\t\t\tdata:        nil,\n\t\t\texp:         http.StatusBadRequest,\n\t\t},\n\t}\n\n\tfor _, sampleUpload := range sampleUploadCases {\n\t\t// generate the JSON representation of the data to be sent to the echo server\n\t\trequestBodyJSON, err := json.Marshal(sampleUpload.data)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"[%s]\\nfailed to generate JSON request body to post: %s\", sampleUpload.description, err)\n\t\t}\n\n\t\t// generate a request to test the handler containing the JSON data as a body\n\t\treq, err := http.NewRequestWithContext(t.Context(), sampleUpload.method, \"http://example.com/api/v1/datareadings\", bytes.NewBuffer(requestBodyJSON))\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"[%s]\\nfailed to generate request to test echo server: %s\", sampleUpload.description, err)\n\t\t}\n\n\t\t// create recorder to save the response\n\t\trr := httptest.NewRecorder()\n\n\t\t// perform the request with the handler\n\t\techoHandler(rr, req)\n\n\t\t// Check the response from the echo handler is the expected one\n\t\tresponse := rr.Result()\n\t\tif response.StatusCode != sampleUpload.exp {\n\t\t\tt.Fatalf(\"[%s]\\necho server responded with an unexpected code: %d\", sampleUpload.description, response.StatusCode)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/kubeconfig/client.go",
    "content": "package kubeconfig\n\nimport (\n\t\"k8s.io/client-go/discovery\"\n\t\"k8s.io/client-go/dynamic\"\n\t\"k8s.io/client-go/kubernetes\"\n)\n\n// NewDynamicClient creates a new 'dynamic' clientset using the provided kubeconfig.\n// If kubeconfigPath is not set/empty, it will attempt to load configuration using\n// the default loading rules.\nfunc NewDynamicClient(kubeconfigPath string) (dynamic.Interface, error) {\n\tcfg, err := LoadRESTConfig(kubeconfigPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcl, err := dynamic.NewForConfig(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cl, nil\n}\n\n// NewDiscoveryClient creates a new 'discovery' client using the provided\n// kubeconfig.  If kubeconfigPath is not set/empty, it will attempt to load\n// configuration using the default loading rules.\nfunc NewDiscoveryClient(kubeconfigPath string) (*discovery.DiscoveryClient, error) {\n\tcfg, err := LoadRESTConfig(kubeconfigPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdiscoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn discoveryClient, nil\n}\n\n// NewClientSet creates a new kubernetes clientset using the provided kubeconfig.\n// If kubeconfigPath is not set/empty, it will attempt to load configuration using\n// the default loading rules.\nfunc NewClientSet(kubeconfigPath string) (kubernetes.Interface, error) {\n\tcfg, err := LoadRESTConfig(kubeconfigPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclientset, err := kubernetes.NewForConfig(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn clientset, nil\n}\n"
  },
  {
    "path": "pkg/kubeconfig/client_test.go",
    "content": "package kubeconfig\n\nimport (\n\t\"os\"\n\t\"testing\"\n\n\tclientcmdapi \"k8s.io/client-go/tools/clientcmd/api\"\n\tclientcmdlatest \"k8s.io/client-go/tools/clientcmd/api/latest\"\n)\n\n// These tests do not currently validate the created dynamic client uses the\n// KUBECONFIG file that we create, however it _does_ exercise enough of the\n// code path to show that the function is correctly selecting which file to\n// load and returning it.\n\nfunc TestNewDynamicClient_ExplicitKubeconfig(t *testing.T) {\n\tkc := createValidTestConfig()\n\tpath := writeConfigToFile(t, kc)\n\t_, err := NewDynamicClient(path)\n\tif err != nil {\n\t\tt.Error(\"failed to create client: \", err)\n\t}\n}\n\nfunc TestNewDynamicClient_InferredKubeconfig(t *testing.T) {\n\tkc := createValidTestConfig()\n\tpath := writeConfigToFile(t, kc)\n\tcleanupFn := temporarilySetEnv(\"KUBECONFIG\", path)\n\tdefer cleanupFn()\n\t_, err := NewDynamicClient(\"\")\n\tif err != nil {\n\t\tt.Error(\"failed to create client: \", err)\n\t}\n}\n\nfunc TestNewDiscoveryClient_ExplicitKubeconfig(t *testing.T) {\n\tkc := createValidTestConfig()\n\tpath := writeConfigToFile(t, kc)\n\t_, err := NewDiscoveryClient(path)\n\tif err != nil {\n\t\tt.Error(\"failed to create client: \", err)\n\t}\n}\n\nfunc TestNewDiscoveryClient_InferredKubeconfig(t *testing.T) {\n\tkc := createValidTestConfig()\n\tpath := writeConfigToFile(t, kc)\n\tcleanupFn := temporarilySetEnv(\"KUBECONFIG\", path)\n\tdefer cleanupFn()\n\t_, err := NewDiscoveryClient(\"\")\n\tif err != nil {\n\t\tt.Error(\"failed to create client: \", err)\n\t}\n}\n\nfunc writeConfigToFile(t *testing.T, cfg clientcmdapi.Config) string {\n\tf, err := os.CreateTemp(t.TempDir(), \"testcase-*\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer f.Close()\n\tif err := clientcmdlatest.Codec.Encode(&cfg, f); err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn f.Name()\n}\n\nfunc createValidTestConfig() clientcmdapi.Config {\n\tconst (\n\t\tserver = \"https://example.com:8080\"\n\t\ttoken  = \"the-token\"\n\t)\n\n\tconfig := clientcmdapi.NewConfig()\n\tconfig.Clusters[\"clean\"] = &clientcmdapi.Cluster{\n\t\tServer: server,\n\t}\n\tconfig.AuthInfos[\"clean\"] = &clientcmdapi.AuthInfo{\n\t\tToken: token,\n\t}\n\tconfig.Contexts[\"clean\"] = &clientcmdapi.Context{\n\t\tCluster:  \"clean\",\n\t\tAuthInfo: \"clean\",\n\t}\n\tconfig.CurrentContext = \"clean\"\n\n\treturn *config\n}\n\nfunc temporarilySetEnv(key, value string) func() {\n\told := os.Getenv(key)\n\tos.Setenv(key, value)\n\treturn func() {\n\t\tos.Setenv(key, old)\n\t}\n}\n"
  },
  {
    "path": "pkg/kubeconfig/kubeconfig.go",
    "content": "package kubeconfig\n\nimport (\n\t\"k8s.io/client-go/rest\"\n\t\"k8s.io/client-go/tools/clientcmd\"\n)\n\n// LoadRESTConfig loads the kube config from the provided path. If the path is\n// empty, the kube config will be loaded from KUBECONFIG, and if KUBECONFIG\n// isn't set, the in-cluster config will be used.\nfunc LoadRESTConfig(path string) (*rest.Config, error) {\n\tloadingrules := clientcmd.NewDefaultClientConfigLoadingRules()\n\n\t// If the kubeconfig path is provided, use that file and fail if it does\n\t// not exist.\n\t// If the kubeconfig path is not provided, use the default loading rules\n\t// so we read the regular KUBECONFIG variable or create a non-interactive\n\t// client for agents running in cluster\n\tloadingrules.ExplicitPath = path\n\n\tcfg, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(\n\t\tloadingrules,\n\t\t&clientcmd.ConfigOverrides{},\n\t).ClientConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cfg, nil\n}\n"
  },
  {
    "path": "pkg/logs/logs.go",
    "content": "package logs\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"log/slog\"\n\t\"strings\"\n\n\t\"github.com/spf13/pflag\"\n\t\"k8s.io/apimachinery/pkg/util/runtime\"\n\t\"k8s.io/apimachinery/pkg/util/sets\"\n\t\"k8s.io/component-base/featuregate\"\n\t\"k8s.io/component-base/logs\"\n\tlogsapi \"k8s.io/component-base/logs/api/v1\"\n\t\"k8s.io/klog/v2\"\n\tctrlruntimelog \"sigs.k8s.io/controller-runtime/pkg/log\"\n\n\t_ \"k8s.io/component-base/logs/json/register\"\n)\n\n// venafi-kubernetes-agent follows [Kubernetes Logging Conventions] and writes\n// logs in [Kubernetes text logging format] by default. It does not support\n// named levels (aka. severity), instead it uses arbitrary levels. Errors and\n// warnings are logged to stderr and Info messages to stdout, because that is\n// how some cloud logging systems (notably Google Cloud Logs Explorer) assign a\n// severity (INFO or ERROR) in the UI. The agent's and vcert's logs are written\n// logged as Info messages with level=0.\n//\n// Further reading:\n//  - [Kubernetes logging conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)\n//  - [Kubernetes text logging format](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#text-logging-format)\n//  - [Why not named levels, like Info/Warning/Error?](https://github.com/go-logr/logr?tab=readme-ov-file#why-not-named-levels-like-infowarningerror)\n//  - [GKE logs best practices](https://cloud.google.com/kubernetes-engine/docs/concepts/about-logs#best_practices)\n//  - [Structured Logging KEP](https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/1602-structured-logging/README.md)\n//  - [Examples of using k8s.io/component-base/logs](https://github.com/kubernetes/kubernetes/tree/master/staging/src/k8s.io/component-base/logs/example),\n//    upon which this code was based.\n\nvar (\n\n\t// All but the essential logging flags will be hidden to avoid overwhelming\n\t// the user. The hidden flags can still be used. For example if a user does\n\t// not like the split-stream behavior and a Venafi field engineer can\n\t// instruct them to patch --log-json-split-stream=false on to the Deployment\n\t// arguments.\n\tvisibleFlagNames = sets.New[string](\"v\", \"vmodule\", \"logging-format\")\n\t// This default logging configuration will be updated with values from the\n\t// logging flags, even those that are hidden.\n\tconfiguration = logsapi.NewLoggingConfiguration()\n\t// Logging features will be added to this feature gate, but the\n\t// feature-gates flag will be hidden from the user.\n\tfeatures = featuregate.NewFeatureGate()\n)\n\nconst (\n\t// Standard log verbosity levels.\n\t// Use these instead of integers in venafi-kubernetes-agent code.\n\tInfo  = 0\n\tDebug = 1\n\tTrace = 2\n)\n\nfunc init() {\n\truntime.Must(logsapi.AddFeatureGates(features))\n\t// Turn on ALPHA options to enable the split-stream logging options.\n\truntime.Must(features.OverrideDefault(logsapi.LoggingAlphaOptions, true))\n}\n\n// AddFlags adds log related flags to the supplied flag set.\n//\n// The split-stream options are enabled by default, so that errors are logged to\n// stderr and info to stdout, allowing cloud logging systems to assign a\n// severity INFO or ERROR to the messages.\nfunc AddFlags(fs *pflag.FlagSet) {\n\tvar tfs pflag.FlagSet\n\tlogsapi.AddFlags(configuration, &tfs)\n\tfeatures.AddFlag(&tfs)\n\ttfs.VisitAll(func(f *pflag.Flag) {\n\t\tif !visibleFlagNames.Has(f.Name) {\n\t\t\t_ = tfs.MarkHidden(f.Name)\n\t\t}\n\n\t\t// The original usage string includes details about how\n\t\t// JSON logging is only available when BETA logging features are\n\t\t// enabled, but that's not relevant here because the feature is enabled\n\t\t// by default.\n\t\tif f.Name == \"logging-format\" {\n\t\t\tf.Usage = `Sets the log format. Permitted formats: \"json\", \"text\".`\n\t\t}\n\t\tif f.Name == \"log-text-split-stream\" {\n\t\t\tf.DefValue = \"true\"\n\t\t\truntime.Must(f.Value.Set(\"true\"))\n\t\t}\n\t\tif f.Name == \"log-json-split-stream\" {\n\t\t\tf.DefValue = \"true\"\n\t\t\truntime.Must(f.Value.Set(\"true\"))\n\t\t}\n\n\t\t// Since `--v` (which is the long form of `-v`) isn't the standard in\n\t\t// our projects (it only exists in cert-manager, webhook, and such),\n\t\t// let's rename it to the more common `--log-level`, which appears in\n\t\t// openshift-routes, csi-driver, trust-manager, and approver-policy.\n\t\t// More details at:\n\t\t// https://github.com/jetstack/jetstack-secure/pull/596#issuecomment-2421708181\n\t\tif f.Name == \"v\" {\n\t\t\tf.Name = \"log-level\"\n\t\t\tf.Shorthand = \"v\"\n\t\t\tf.Usage = fmt.Sprintf(\"%s. 0=Info, 1=Debug, 2=Trace. Use 6-9 for increasingly verbose HTTP request logging. (default: 0)\", f.Usage)\n\t\t}\n\t})\n\tfs.AddFlagSet(&tfs)\n}\n\n// Initialize uses k8s.io/component-base/logs, to configure the following global\n// loggers: log, slog, and klog. All are configured to write in the same format.\nfunc Initialize() error {\n\t// This configures the global logger in klog *and* slog, if compiled with Go\n\t// >= 1.21.\n\tlogs.InitLogs()\n\tif err := logsapi.ValidateAndApply(configuration, features); err != nil {\n\t\treturn fmt.Errorf(\"Error in logging configuration: %s\", err)\n\t}\n\n\t// Thanks to logs.InitLogs, slog.Default now uses klog as its backend. Thus,\n\t// the client-go library, which relies on klog.Info, has the same logger as\n\t// the agent, which still uses log.Printf.\n\tslog := slog.Default()\n\n\t// Let's make sure the VCert library, which is the only library we import to\n\t// be using the global log.Default, also uses the common slog logger.\n\tvcertLog := log.Default()\n\tvcertLog.SetOutput(LogToSlogWriter{Slog: slog, Source: \"vcert\"})\n\n\t// The venafi-connection-lib client uses various controller-runtime packages\n\t// which emit log messages. Make sure those log messages are not discarded.\n\tctrlruntimelog.SetLogger(klog.Background().WithValues(\"source\", \"controller-runtime\"))\n\n\treturn nil\n}\n\ntype LogToSlogWriter struct {\n\tSlog   *slog.Logger\n\tSource string\n}\n\nfunc (w LogToSlogWriter) Write(p []byte) (n int, err error) {\n\t// log.Printf writes a newline at the end of the message, so we need to trim\n\t// it.\n\tp = bytes.TrimSuffix(p, []byte(\"\\n\"))\n\n\tmessage := string(p)\n\tif strings.Contains(message, \"error\") ||\n\t\tstrings.Contains(message, \"failed\") {\n\t\tw.Slog.With(\"source\", w.Source).Error(message)\n\t} else {\n\t\tw.Slog.With(\"source\", w.Source).Info(message)\n\t}\n\treturn len(p), nil\n}\n"
  },
  {
    "path": "pkg/logs/logs_test.go",
    "content": "package logs_test\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"log/slog\"\n\t\"os\"\n\t\"os/exec\"\n\t\"regexp\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/spf13/pflag\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"k8s.io/klog/v2\"\n\n\t\"github.com/jetstack/preflight/pkg/logs\"\n\n\t_ \"github.com/Venafi/vcert/v5\"\n)\n\n// TestLogs demonstrates how the logging flags affect the logging output.\n//\n// The test executes itself with as a sub-process to avoid mutating the global\n// logging configuration.\n//\n// Inspired by:\n// - https://stackoverflow.com/a/67945462\n// - https://go.dev/src/flag/flag_test.go (TestExitCode)\nfunc TestLogs(t *testing.T) {\n\tif flags, found := os.LookupEnv(\"GO_CHILD_FLAG\"); found {\n\t\tif _, found := os.LookupEnv(\"GO_CHILD_SKIP_INITIALIZE\"); !found {\n\t\t\tfs := pflag.NewFlagSet(\"test-logs\", pflag.ContinueOnError)\n\t\t\tfs.SetOutput(io.Discard)\n\t\t\tlogs.AddFlags(fs)\n\t\t\tif err := fs.Parse(strings.Split(flags, \" \")); err != nil {\n\t\t\t\texitCode := 0\n\t\t\t\tif errors.Is(err, pflag.ErrHelp) {\n\t\t\t\t\tfmt.Fprint(os.Stdout, fs.FlagUsages())\n\t\t\t\t\tos.Exit(exitCode)\n\t\t\t\t} else {\n\t\t\t\t\texitCode := 1\n\t\t\t\t\tklog.ErrorS(err, \"Exiting due to error\", \"exit-code\", exitCode)\n\t\t\t\t\tklog.FlushAndExit(time.Second, exitCode)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err := logs.Initialize(); err != nil {\n\t\t\t\texitCode := 1\n\t\t\t\tklog.ErrorS(err, \"Exiting due to error\", \"exit-code\", exitCode)\n\t\t\t\tklog.FlushAndExit(time.Second, exitCode)\n\t\t\t}\n\t\t}\n\n\t\tlog.Print(\"log Print\")\n\t\tslog.Info(\"slog Info\")\n\t\tslog.Warn(\"slog Warn\")\n\t\tslog.Error(\"slog Error\")\n\t\tklog.Info(\"klog Info\")\n\t\tklog.Warning(\"klog Warning\")\n\t\tklog.ErrorS(errors.New(\"fake-error\"), \"klog Error\")\n\t\tklog.InfoS(\"klog InfoS\", \"key\", \"value\")\n\t\tlogger := klog.FromContext(t.Context()).WithName(\"foo\")\n\t\tlogger.V(3).Info(\"Contextual Info Level 3\", \"key\", \"value\")\n\t\tlogger.Error(errors.New(\"fake-error\"), \"Contextual error\", \"key\", \"value\")\n\n\t\tklog.FlushAndExit(time.Second, 0)\n\t}\n\n\ttests := []struct {\n\t\tname          string\n\t\tflags         string\n\t\tskipIntialize bool\n\t\texpectError   bool\n\t\texpectStdout  string\n\t\texpectStderr  string\n\t}{\n\t\t{\n\t\t\tname:  \"help\",\n\t\t\tflags: \"-h\",\n\t\t\texpectStdout: `\n  -v, --log-level Level         number for the log level verbosity. 0=Info, 1=Debug, 2=Trace. Use 6-9 for increasingly verbose HTTP request logging. (default: 0)\n      --logging-format string   Sets the log format. Permitted formats: \"json\", \"text\". (default \"text\")\n      --vmodule pattern=N,...   comma-separated list of pattern=N settings for file-filtered logging (only works for text log format)\n`,\n\t\t},\n\t\t{\n\t\t\tname:        \"unrecognized-flag\",\n\t\t\tflags:       \"--foo\",\n\t\t\texpectError: true,\n\t\t\texpectStderr: `\nE0000 00:00:00.000000   00000 logs_test.go:000] \"Exiting due to error\" err=\"unknown flag: --foo\" exit-code=1\n`,\n\t\t},\n\t\t{\n\t\t\tname:        \"v-long-form-not-available\",\n\t\t\tflags:       \"--v=3\",\n\t\t\texpectError: true,\n\t\t\texpectStderr: `\nE0000 00:00:00.000000   00000 logs_test.go:000] \"Exiting due to error\" err=\"unknown flag: --v\" exit-code=1\n`,\n\t\t},\n\t\t{\n\t\t\tname:        \"logging-format-unrecognized\",\n\t\t\tflags:       \"--logging-format=foo\",\n\t\t\texpectError: true,\n\t\t\texpectStderr: `\nE0000 00:00:00.000000   00000 logs_test.go:000] \"Exiting due to error\" err=\"Error in logging configuration: format: Invalid value: \\\"foo\\\": Unsupported log format\" exit-code=1\n`,\n\t\t},\n\t\t{\n\t\t\tname:          \"original-defaults\",\n\t\t\tflags:         \"\",\n\t\t\tskipIntialize: true,\n\t\t\texpectStderr: `\n0000/00/00 00:00:00 log Print\n0000/00/00 00:00:00 INFO slog Info\n0000/00/00 00:00:00 WARN slog Warn\n0000/00/00 00:00:00 ERROR slog Error\nI0000 00:00:00.000000   00000 logs_test.go:000] klog Info\nW0000 00:00:00.000000   00000 logs_test.go:000] klog Warning\nE0000 00:00:00.000000   00000 logs_test.go:000] \"klog Error\" err=\"fake-error\"\nI0000 00:00:00.000000   00000 logs_test.go:000] \"klog InfoS\" key=\"value\"\nE0000 00:00:00.000000   00000 logs_test.go:000] \"Contextual error\" err=\"fake-error\" logger=\"foo\" key=\"value\"\n`,\n\t\t},\n\t\t{\n\t\t\tname:  \"modified-defaults\",\n\t\t\tflags: \"\",\n\t\t\texpectStdout: `\nI0000 00:00:00.000000   00000 logs.go:000] \"log Print\" source=\"vcert\"\nI0000 00:00:00.000000   00000 logs_test.go:000] \"slog Info\"\nI0000 00:00:00.000000   00000 logs_test.go:000] klog Info\nI0000 00:00:00.000000   00000 logs_test.go:000] \"klog InfoS\" key=\"value\"\n`,\n\t\t\texpectStderr: `\nW0000 00:00:00.000000   00000 logs_test.go:000] \"slog Warn\"\nE0000 00:00:00.000000   00000 logs_test.go:000] \"slog Error\"\nW0000 00:00:00.000000   00000 logs_test.go:000] klog Warning\nE0000 00:00:00.000000   00000 logs_test.go:000] \"klog Error\" err=\"fake-error\"\nE0000 00:00:00.000000   00000 logs_test.go:000] \"Contextual error\" err=\"fake-error\" logger=\"foo\" key=\"value\"\n`,\n\t\t},\n\t\t{\n\t\t\tname:  \"logging-format-json\",\n\t\t\tflags: \"--logging-format=json\",\n\t\t\texpectStdout: `\n{\"ts\":0000000000000.000,\"caller\":\"logs/logs.go:000\",\"msg\":\"log Print\",\"source\":\"vcert\",\"v\":0}\n{\"ts\":0000000000000.000,\"caller\":\"logs/logs_test.go:000\",\"msg\":\"slog Info\",\"v\":0}\n{\"ts\":0000000000000.000,\"caller\":\"logs/logs_test.go:000\",\"msg\":\"slog Warn\",\"v\":0}\n{\"ts\":0000000000000.000,\"caller\":\"logs/logs_test.go:000\",\"msg\":\"klog Info\",\"v\":0}\n{\"ts\":0000000000000.000,\"caller\":\"logs/logs_test.go:000\",\"msg\":\"klog Warning\",\"v\":0}\n{\"ts\":0000000000000.000,\"caller\":\"logs/logs_test.go:000\",\"msg\":\"klog InfoS\",\"v\":0,\"key\":\"value\"}\n`,\n\t\t\texpectStderr: `\n{\"ts\":0000000000000.000,\"caller\":\"logs/logs_test.go:000\",\"msg\":\"slog Error\"}\n{\"ts\":0000000000000.000,\"caller\":\"logs/logs_test.go:000\",\"msg\":\"klog Error\",\"err\":\"fake-error\"}\n{\"ts\":0000000000000.000,\"logger\":\"foo\",\"caller\":\"logs/logs_test.go:000\",\"msg\":\"Contextual error\",\"key\":\"value\",\"err\":\"fake-error\"}\n`,\n\t\t},\n\t\t{\n\t\t\tname:  \"log-json-split-stream-false\",\n\t\t\tflags: \"--logging-format=json --log-json-split-stream=false\",\n\t\t\texpectStderr: `\n{\"ts\":0000000000000.000,\"caller\":\"logs/logs.go:000\",\"msg\":\"log Print\",\"source\":\"vcert\",\"v\":0}\n{\"ts\":0000000000000.000,\"caller\":\"logs/logs_test.go:000\",\"msg\":\"slog Info\",\"v\":0}\n{\"ts\":0000000000000.000,\"caller\":\"logs/logs_test.go:000\",\"msg\":\"slog Warn\",\"v\":0}\n{\"ts\":0000000000000.000,\"caller\":\"logs/logs_test.go:000\",\"msg\":\"slog Error\"}\n{\"ts\":0000000000000.000,\"caller\":\"logs/logs_test.go:000\",\"msg\":\"klog Info\",\"v\":0}\n{\"ts\":0000000000000.000,\"caller\":\"logs/logs_test.go:000\",\"msg\":\"klog Warning\",\"v\":0}\n{\"ts\":0000000000000.000,\"caller\":\"logs/logs_test.go:000\",\"msg\":\"klog Error\",\"err\":\"fake-error\"}\n{\"ts\":0000000000000.000,\"caller\":\"logs/logs_test.go:000\",\"msg\":\"klog InfoS\",\"v\":0,\"key\":\"value\"}\n{\"ts\":0000000000000.000,\"logger\":\"foo\",\"caller\":\"logs/logs_test.go:000\",\"msg\":\"Contextual error\",\"key\":\"value\",\"err\":\"fake-error\"}\n`,\n\t\t},\n\t\t{\n\t\t\tname:  \"logging-format-text\",\n\t\t\tflags: \"--logging-format=text\",\n\t\t\texpectStdout: `\nI0000 00:00:00.000000   00000 logs.go:000] \"log Print\" source=\"vcert\"\nI0000 00:00:00.000000   00000 logs_test.go:000] \"slog Info\"\nI0000 00:00:00.000000   00000 logs_test.go:000] klog Info\nI0000 00:00:00.000000   00000 logs_test.go:000] \"klog InfoS\" key=\"value\"\n`,\n\t\t\texpectStderr: `\nW0000 00:00:00.000000   00000 logs_test.go:000] \"slog Warn\"\nE0000 00:00:00.000000   00000 logs_test.go:000] \"slog Error\"\nW0000 00:00:00.000000   00000 logs_test.go:000] klog Warning\nE0000 00:00:00.000000   00000 logs_test.go:000] \"klog Error\" err=\"fake-error\"\nE0000 00:00:00.000000   00000 logs_test.go:000] \"Contextual error\" err=\"fake-error\" logger=\"foo\" key=\"value\"\n`,\n\t\t},\n\t\t{\n\t\t\tname:  \"log-text-split-stream-false\",\n\t\t\tflags: \"--logging-format=text --log-text-split-stream=false\",\n\t\t\texpectStderr: `\nI0000 00:00:00.000000   00000 logs.go:000] \"log Print\" source=\"vcert\"\nI0000 00:00:00.000000   00000 logs_test.go:000] \"slog Info\"\nW0000 00:00:00.000000   00000 logs_test.go:000] \"slog Warn\"\nE0000 00:00:00.000000   00000 logs_test.go:000] \"slog Error\"\nI0000 00:00:00.000000   00000 logs_test.go:000] klog Info\nW0000 00:00:00.000000   00000 logs_test.go:000] klog Warning\nE0000 00:00:00.000000   00000 logs_test.go:000] \"klog Error\" err=\"fake-error\"\nI0000 00:00:00.000000   00000 logs_test.go:000] \"klog InfoS\" key=\"value\"\nE0000 00:00:00.000000   00000 logs_test.go:000] \"Contextual error\" err=\"fake-error\" logger=\"foo\" key=\"value\"\n`,\n\t\t},\n\t\t{\n\t\t\tname:  \"v-level-3\",\n\t\t\tflags: \"-v=3\",\n\t\t\texpectStdout: `\nI0000 00:00:00.000000   00000 logs.go:000] \"log Print\" source=\"vcert\"\nI0000 00:00:00.000000   00000 logs_test.go:000] \"slog Info\"\nI0000 00:00:00.000000   00000 logs_test.go:000] klog Info\nI0000 00:00:00.000000   00000 logs_test.go:000] \"klog InfoS\" key=\"value\"\nI0000 00:00:00.000000   00000 logs_test.go:000] \"Contextual Info Level 3\" logger=\"foo\" key=\"value\"\n`,\n\t\t\texpectStderr: `\nW0000 00:00:00.000000   00000 logs_test.go:000] \"slog Warn\"\nE0000 00:00:00.000000   00000 logs_test.go:000] \"slog Error\"\nW0000 00:00:00.000000   00000 logs_test.go:000] klog Warning\nE0000 00:00:00.000000   00000 logs_test.go:000] \"klog Error\" err=\"fake-error\"\nE0000 00:00:00.000000   00000 logs_test.go:000] \"Contextual error\" err=\"fake-error\" logger=\"foo\" key=\"value\"\n`,\n\t\t},\n\t\t{\n\t\t\tname:  \"log-level-3\",\n\t\t\tflags: \"--log-level=3\",\n\t\t\texpectStdout: `\nI0000 00:00:00.000000   00000 logs.go:000] \"log Print\" source=\"vcert\"\nI0000 00:00:00.000000   00000 logs_test.go:000] \"slog Info\"\nI0000 00:00:00.000000   00000 logs_test.go:000] klog Info\nI0000 00:00:00.000000   00000 logs_test.go:000] \"klog InfoS\" key=\"value\"\nI0000 00:00:00.000000   00000 logs_test.go:000] \"Contextual Info Level 3\" logger=\"foo\" key=\"value\"\n`,\n\t\t\texpectStderr: `\nW0000 00:00:00.000000   00000 logs_test.go:000] \"slog Warn\"\nE0000 00:00:00.000000   00000 logs_test.go:000] \"slog Error\"\nW0000 00:00:00.000000   00000 logs_test.go:000] klog Warning\nE0000 00:00:00.000000   00000 logs_test.go:000] \"klog Error\" err=\"fake-error\"\nE0000 00:00:00.000000   00000 logs_test.go:000] \"Contextual error\" err=\"fake-error\" logger=\"foo\" key=\"value\"\n`,\n\t\t},\n\t\t{\n\t\t\tname:  \"vmodule-level-3\",\n\t\t\tflags: \"--vmodule=logs_test=3\",\n\t\t\texpectStdout: `\nI0000 00:00:00.000000   00000 logs.go:000] \"log Print\" source=\"vcert\"\nI0000 00:00:00.000000   00000 logs_test.go:000] \"slog Info\"\nI0000 00:00:00.000000   00000 logs_test.go:000] klog Info\nI0000 00:00:00.000000   00000 logs_test.go:000] \"klog InfoS\" key=\"value\"\nI0000 00:00:00.000000   00000 logs_test.go:000] \"Contextual Info Level 3\" logger=\"foo\" key=\"value\"\n`,\n\t\t\texpectStderr: `\nW0000 00:00:00.000000   00000 logs_test.go:000] \"slog Warn\"\nE0000 00:00:00.000000   00000 logs_test.go:000] \"slog Error\"\nW0000 00:00:00.000000   00000 logs_test.go:000] klog Warning\nE0000 00:00:00.000000   00000 logs_test.go:000] \"klog Error\" err=\"fake-error\"\nE0000 00:00:00.000000   00000 logs_test.go:000] \"Contextual error\" err=\"fake-error\" logger=\"foo\" key=\"value\"\n`,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tctx := t.Context()\n\t\t\tcmd := exec.CommandContext(ctx, os.Args[0], \"-test.run=^TestLogs$\", \"-test.v\")\n\t\t\tvar (\n\t\t\t\tstdout bytes.Buffer\n\t\t\t\tstderr bytes.Buffer\n\t\t\t)\n\t\t\tcmd.Stdout = &stdout\n\t\t\tcmd.Stderr = &stderr\n\t\t\tcmd.Env = append(\n\t\t\t\tos.Environ(),\n\t\t\t\t\"GO_CHILD_FLAG=\"+test.flags,\n\t\t\t)\n\t\t\tif test.skipIntialize {\n\t\t\t\tcmd.Env = append(\n\t\t\t\t\tcmd.Env,\n\t\t\t\t\t\"GO_CHILD_SKIP_INITIALIZE=true\",\n\t\t\t\t)\n\t\t\t}\n\t\t\terr := cmd.Run()\n\n\t\t\tt.Logf(\"FLAGS\\n%s\\n\", test.flags)\n\t\t\t// Remove the standard output generated by `-test.v`\n\t\t\tstdoutStr := strings.TrimPrefix(stdout.String(), \"=== RUN   TestLogs\\n\")\n\t\t\tstderrStr := stderr.String()\n\t\t\tt.Logf(\"STDOUT\\n%s\\n\", stdoutStr)\n\t\t\tt.Logf(\"STDERR\\n%s\\n\", stderrStr)\n\t\t\tif test.expectError {\n\t\t\t\tvar target *exec.ExitError\n\t\t\t\trequire.ErrorAs(t, err, &target)\n\t\t\t\trequire.Equal(t, 1, target.ExitCode(), \"Flag parsing failures should always result in exit code 1\")\n\t\t\t\tt.Logf(\"ERROR: %v\", err)\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t}\n\n\t\t\t// This trick helps with the readability of the table test: we can\n\t\t\t// have the first \"expected\" log line at the same level as the other\n\t\t\t// lines.\n\t\t\ttest.expectStdout = strings.TrimPrefix(test.expectStdout, \"\\n\")\n\t\t\ttest.expectStderr = strings.TrimPrefix(test.expectStderr, \"\\n\")\n\n\t\t\trequire.Equal(t, test.expectStdout, replaceWithStaticTimestamps(stdoutStr), \"stdout doesn't match\")\n\t\t\trequire.Equal(t, test.expectStderr, replaceWithStaticTimestamps(stderrStr), \"stderr doesn't match\")\n\t\t})\n\t}\n}\n\nvar (\n\ttimestampRegexpStdLog = regexp.MustCompile(`\\d{4}/\\d{2}/\\d{2} \\d{2}:\\d{2}:\\d{2}`)\n\ttimestampRegexpKlog   = regexp.MustCompile(`\\d{4} \\d{2}:\\d{2}:\\d{2}\\.\\d{6} +\\d+`)\n\ttimestampRegexpJSON   = regexp.MustCompile(`\"ts\":\\d+\\.?\\d*`)\n\tfileAndLineRegexpJSON = regexp.MustCompile(`\"caller\":\"([^\"]+).go:\\d+\"`)\n\tfileAndLineRegexpKlog = regexp.MustCompile(` ([^:]+).go:\\d+`)\n)\n\n// Replaces the klog and JSON timestamps with a static timestamp to make it\n// easier to assert the logs. It also replaces the line number with 000 as it\n// often changes.\n//\n//\tI1018 15:12:57.953433   22183 logs.go:000] log\n//\t{\"ts\":1729258473588.828,\"caller\":\"log/log.go:000\",\"msg\":\"log Print\",\"v\":0}\n//\t2024/10/18 15:40:50 log Print\n//\n// to the fixed:\n//\n//\tI0000 00:00:00.000000   00000 logs.go:000] log\n//\t{\"ts\":0000000000000.000,\"caller\":\"log/log.go:000\",\"msg\":\"log Print\",\"v\":0}\n//\t0000/00/00 00:00:00 log Print\nfunc replaceWithStaticTimestamps(input string) string {\n\tinput = timestampRegexpKlog.ReplaceAllString(input, \"0000 00:00:00.000000   00000\")\n\tinput = timestampRegexpJSON.ReplaceAllString(input, `\"ts\":0000000000000.000`)\n\tinput = timestampRegexpStdLog.ReplaceAllString(input, \"0000/00/00 00:00:00\")\n\tinput = fileAndLineRegexpJSON.ReplaceAllString(input, `\"caller\":\"$1.go:000\"`)\n\tinput = fileAndLineRegexpKlog.ReplaceAllString(input, \" $1.go:000\")\n\treturn input\n}\n\nfunc Test_replaceWithStaticTimestamps(t *testing.T) {\n\ttests := []struct {\n\t\tname     string\n\t\tinput    string\n\t\texpected string\n\t}{\n\t\t{\n\t\t\tname:     \"klog\",\n\t\t\tinput:    `I1018 15:20:42.861239    2386 logs_test.go:13] \"Contextual Info Level 3\" logger=\"foo\" key=\"value\"`,\n\t\t\texpected: `I0000 00:00:00.000000   00000 logs_test.go:000] \"Contextual Info Level 3\" logger=\"foo\" key=\"value\"`,\n\t\t},\n\t\t{\n\t\t\tname:     \"json-with-nanoseconds\",\n\t\t\tinput:    `{\"ts\":1729270111728.125,\"caller\":\"logs/logs_test.go:000\",\"msg\":\"slog Warn\",\"v\":0}`,\n\t\t\texpected: `{\"ts\":0000000000000.000,\"caller\":\"logs/logs_test.go:000\",\"msg\":\"slog Warn\",\"v\":0}`,\n\t\t},\n\t\t{\n\t\t\tname:     \"json-might-not-have-nanoseconds\",\n\t\t\tinput:    `{\"ts\":1729270111728,\"caller\":\"logs/logs_test.go:000\",\"msg\":\"slog Info\",\"v\":0}`,\n\t\t\texpected: `{\"ts\":0000000000000.000,\"caller\":\"logs/logs_test.go:000\",\"msg\":\"slog Info\",\"v\":0}`,\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tassert.Equal(t, test.expected, replaceWithStaticTimestamps(test.input))\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/permissions/generate.go",
    "content": "package permissions\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\trbac \"k8s.io/api/rbac/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"sigs.k8s.io/yaml\"\n\n\t\"github.com/jetstack/preflight/pkg/agent\"\n\t\"github.com/jetstack/preflight/pkg/datagatherer/k8sdynamic\"\n)\n\n// AgentRBACManifests is a wrapper around the various RBAC structs needed to grant the agent fine-grained permissions as per its dg configs\ntype AgentRBACManifests struct {\n\t// ClusterRoles is a list of roles for resources the agent will collect\n\tClusterRoles []rbac.ClusterRole\n\t// ClusterRoleBindings is a list of crbs for resources which have no include/exclude ns configured\n\tClusterRoleBindings []rbac.ClusterRoleBinding\n\t// RoleBindings is a list of namespaced bindings to grant permissions when include/exclude ns set\n\tRoleBindings []rbac.RoleBinding\n}\n\nconst agentNamespace = \"jetstack-secure\"\nconst agentSubjectName = \"agent\"\n\nfunc GenerateAgentRBACManifests(dataGatherers []agent.DataGatherer) AgentRBACManifests {\n\t// create a new AgentRBACManifest struct\n\tvar AgentRBACManifests AgentRBACManifests\n\n\tfor _, dg := range dataGatherers {\n\t\tif dg.Kind != \"k8s-dynamic\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tdyConfig := dg.Config.(*k8sdynamic.ConfigDynamic)\n\t\tmetadataName := fmt.Sprintf(\"%s-agent-%s-reader\", agentNamespace, dyConfig.GroupVersionResource.Resource)\n\n\t\tAgentRBACManifests.ClusterRoles = append(AgentRBACManifests.ClusterRoles, rbac.ClusterRole{\n\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\tKind:       \"ClusterRole\",\n\t\t\t\tAPIVersion: \"rbac.authorization.k8s.io/v1\",\n\t\t\t},\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: metadataName,\n\t\t\t},\n\t\t\tRules: []rbac.PolicyRule{\n\t\t\t\t{\n\t\t\t\t\tVerbs:     []string{\"get\", \"list\", \"watch\"},\n\t\t\t\t\tAPIGroups: []string{dyConfig.GroupVersionResource.Group},\n\t\t\t\t\tResources: []string{dyConfig.GroupVersionResource.Resource},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\n\t\t// if dyConfig.IncludeNamespaces has more than 0 items in it\n\t\t//   then, for each namespace create a rbac.RoleBinding in that namespace\n\t\tif len(dyConfig.IncludeNamespaces) != 0 {\n\t\t\tfor _, ns := range dyConfig.IncludeNamespaces {\n\t\t\t\tAgentRBACManifests.RoleBindings = append(AgentRBACManifests.RoleBindings, rbac.RoleBinding{\n\t\t\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\t\t\tKind:       \"RoleBinding\",\n\t\t\t\t\t\tAPIVersion: \"rbac.authorization.k8s.io/v1\",\n\t\t\t\t\t},\n\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName:      metadataName,\n\t\t\t\t\t\tNamespace: ns,\n\t\t\t\t\t},\n\n\t\t\t\t\tSubjects: []rbac.Subject{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKind:      \"ServiceAccount\",\n\t\t\t\t\t\t\tName:      agentSubjectName,\n\t\t\t\t\t\t\tNamespace: agentNamespace,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\n\t\t\t\t\tRoleRef: rbac.RoleRef{\n\t\t\t\t\t\tKind:     \"ClusterRole\",\n\t\t\t\t\t\tName:     metadataName,\n\t\t\t\t\t\tAPIGroup: \"rbac.authorization.k8s.io\",\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t}\n\t\t} else {\n\t\t\t// only do this if the dg does not have IncludeNamespaces set\n\t\t\tAgentRBACManifests.ClusterRoleBindings = append(AgentRBACManifests.ClusterRoleBindings, rbac.ClusterRoleBinding{\n\t\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\t\tKind:       \"ClusterRoleBinding\",\n\t\t\t\t\tAPIVersion: \"rbac.authorization.k8s.io/v1\",\n\t\t\t\t},\n\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: metadataName,\n\t\t\t\t},\n\n\t\t\t\tSubjects: []rbac.Subject{\n\t\t\t\t\t{\n\t\t\t\t\t\tKind:      \"ServiceAccount\",\n\t\t\t\t\t\tName:      agentSubjectName,\n\t\t\t\t\t\tNamespace: agentNamespace,\n\t\t\t\t\t},\n\t\t\t\t},\n\n\t\t\t\tRoleRef: rbac.RoleRef{\n\t\t\t\t\tKind:     \"ClusterRole\",\n\t\t\t\t\tName:     metadataName,\n\t\t\t\t\tAPIGroup: \"rbac.authorization.k8s.io\",\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\n\t}\n\n\treturn AgentRBACManifests\n}\n\nfunc createClusterRoleString(clusterRoles []rbac.ClusterRole) string {\n\tvar builder strings.Builder\n\tfor _, cb := range clusterRoles {\n\t\tdata, err := yaml.Marshal(cb)\n\t\tif err != nil {\n\t\t\tfmt.Print(\"Cluster Role fails to marshal\")\n\t\t}\n\n\t\tbuilder.WriteString(\"\\n\")\n\t\tbuilder.Write(data)\n\t\tbuilder.WriteString(\"---\")\n\t}\n\n\treturn builder.String()\n}\nfunc createRoleBindingString(roleBindings []rbac.RoleBinding) string {\n\tvar builder strings.Builder\n\tfor _, cb := range roleBindings {\n\t\tdata, err := yaml.Marshal(cb)\n\t\tif err != nil {\n\t\t\tfmt.Print(\"Role Binding fails to marshal\")\n\t\t}\n\n\t\tbuilder.WriteString(\"\\n\")\n\t\tbuilder.Write(data)\n\t\tbuilder.WriteString(\"---\")\n\t}\n\n\treturn builder.String()\n}\nfunc createClusterRoleBindingString(clusterRoleBindings []rbac.ClusterRoleBinding) string {\n\tvar builder strings.Builder\n\tfor _, cb := range clusterRoleBindings {\n\t\tdata, err := yaml.Marshal(cb)\n\t\tif err != nil {\n\t\t\tfmt.Print(\"Cluster Role Binding fails to marshal\")\n\t\t}\n\n\t\tbuilder.WriteString(\"\\n\")\n\t\tbuilder.Write(data)\n\t\tbuilder.WriteString(\"---\")\n\t}\n\n\treturn builder.String()\n}\n\nfunc GenerateFullManifest(dataGatherers []agent.DataGatherer) string {\n\tagentRBACManifestsStruct := GenerateAgentRBACManifests(dataGatherers)\n\tagentCLR := createClusterRoleString(agentRBACManifestsStruct.ClusterRoles)\n\tagentCLRB := createClusterRoleBindingString(agentRBACManifestsStruct.ClusterRoleBindings)\n\tagentRB := createRoleBindingString(agentRBACManifestsStruct.RoleBindings)\n\n\tout := fmt.Sprintf(`%s%s%s`, agentCLR, agentCLRB, agentRB)\n\tout = strings.TrimPrefix(out, \"\\n\")\n\tout = strings.TrimSpace(out)\n\tout = strings.ReplaceAll(out, \"\\n  creationTimestamp: null\", \"\")\n\n\treturn out\n\n}\n"
  },
  {
    "path": "pkg/permissions/generate_test.go",
    "content": "package permissions\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n\trbac \"k8s.io/api/rbac/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\n\t\"github.com/jetstack/preflight/pkg/agent\"\n\t\"github.com/jetstack/preflight/pkg/datagatherer/k8sdynamic\"\n)\n\nfunc TestGenerateAgentRBACManifestsString(t *testing.T) {\n\ttestCases := []struct {\n\t\tdescription           string\n\t\tdataGatherers         []agent.DataGatherer\n\t\texpectedRBACManifests string\n\t}{\n\t\t{\n\t\t\tdescription: \"Generate ClusterRole and ClusterRoleBinding for simple pod dg use case\",\n\t\t\tdataGatherers: []agent.DataGatherer{\n\t\t\t\t{\n\t\t\t\t\tName: \"k8s/pods\",\n\t\t\t\t\tKind: \"k8s-dynamic\",\n\t\t\t\t\tConfig: &k8sdynamic.ConfigDynamic{\n\t\t\t\t\t\tGroupVersionResource: schema.GroupVersionResource{\n\t\t\t\t\t\t\tVersion:  \"v1\",\n\t\t\t\t\t\t\tResource: \"pods\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedRBACManifests: `apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: jetstack-secure-agent-pods-reader\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - pods\n  verbs:\n  - get\n  - list\n  - watch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: jetstack-secure-agent-pods-reader\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: jetstack-secure-agent-pods-reader\nsubjects:\n- kind: ServiceAccount\n  name: agent\n  namespace: jetstack-secure\n---`,\n\t\t},\n\t\t{\n\t\t\tdescription: \"Generate ClusterRole and RoleBinding for simple pod dg with include namespace \\\"foobar\\\"\",\n\t\t\tdataGatherers: []agent.DataGatherer{\n\t\t\t\t{\n\t\t\t\t\tName: \"k8s/pods\",\n\t\t\t\t\tKind: \"k8s-dynamic\",\n\t\t\t\t\tConfig: &k8sdynamic.ConfigDynamic{\n\t\t\t\t\t\tIncludeNamespaces: []string{\"foobar\"},\n\t\t\t\t\t\tGroupVersionResource: schema.GroupVersionResource{\n\t\t\t\t\t\t\tVersion:  \"v1\",\n\t\t\t\t\t\t\tResource: \"pods\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedRBACManifests: `apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: jetstack-secure-agent-pods-reader\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - pods\n  verbs:\n  - get\n  - list\n  - watch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: jetstack-secure-agent-pods-reader\n  namespace: foobar\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: jetstack-secure-agent-pods-reader\nsubjects:\n- kind: ServiceAccount\n  name: agent\n  namespace: jetstack-secure\n---`,\n\t\t},\n\t\t{\n\t\t\tdescription: \"Generate multiple ClusterRoles and ClusterRoleBindings for simple pod and nodes dg use case\",\n\t\t\tdataGatherers: []agent.DataGatherer{\n\t\t\t\t{\n\t\t\t\t\tName: \"k8s/pods\",\n\t\t\t\t\tKind: \"k8s-dynamic\",\n\t\t\t\t\tConfig: &k8sdynamic.ConfigDynamic{\n\t\t\t\t\t\tGroupVersionResource: schema.GroupVersionResource{\n\t\t\t\t\t\t\tVersion:  \"v1\",\n\t\t\t\t\t\t\tResource: \"pods\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"k8s/nodes\",\n\t\t\t\t\tKind: \"k8s-dynamic\",\n\t\t\t\t\tConfig: &k8sdynamic.ConfigDynamic{\n\t\t\t\t\t\tGroupVersionResource: schema.GroupVersionResource{\n\t\t\t\t\t\t\tVersion:  \"v1\",\n\t\t\t\t\t\t\tResource: \"nodes\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedRBACManifests: `apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: jetstack-secure-agent-pods-reader\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - pods\n  verbs:\n  - get\n  - list\n  - watch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: jetstack-secure-agent-nodes-reader\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - nodes\n  verbs:\n  - get\n  - list\n  - watch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: jetstack-secure-agent-pods-reader\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: jetstack-secure-agent-pods-reader\nsubjects:\n- kind: ServiceAccount\n  name: agent\n  namespace: jetstack-secure\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: jetstack-secure-agent-nodes-reader\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: jetstack-secure-agent-nodes-reader\nsubjects:\n- kind: ServiceAccount\n  name: agent\n  namespace: jetstack-secure\n---`,\n\t\t},\n\t}\n\n\tfor _, input := range testCases {\n\t\tgot := GenerateFullManifest(input.dataGatherers)\n\t\tif input.expectedRBACManifests != got {\n\t\t\tt.Errorf(\"value mismatch, \\n**********expected:******************************\\n%s\\n**********got:******************************\\n%s\", input.expectedRBACManifests, got)\n\t\t}\n\t}\n}\n\nfunc TestGenerateAgentRBACManifests(t *testing.T) {\n\ttestCases := []struct {\n\t\tdescription                string\n\t\tdataGatherers              []agent.DataGatherer\n\t\texpectedAgentRBACManifests AgentRBACManifests\n\t}{\n\t\t{\n\t\t\tdescription: \"Generate ClusterRole and ClusterRoleBinding for simple pod dg use case\",\n\t\t\tdataGatherers: []agent.DataGatherer{\n\t\t\t\t{\n\t\t\t\t\tName: \"k8s/pods\",\n\t\t\t\t\tKind: \"k8s-dynamic\",\n\t\t\t\t\tConfig: &k8sdynamic.ConfigDynamic{\n\t\t\t\t\t\tGroupVersionResource: schema.GroupVersionResource{\n\t\t\t\t\t\t\tVersion:  \"v1\",\n\t\t\t\t\t\t\tResource: \"pods\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedAgentRBACManifests: AgentRBACManifests{\n\t\t\t\tClusterRoles: []rbac.ClusterRole{\n\t\t\t\t\t{\n\t\t\t\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\t\t\t\tKind:       \"ClusterRole\",\n\t\t\t\t\t\t\tAPIVersion: \"rbac.authorization.k8s.io/v1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"jetstack-secure-agent-pods-reader\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tRules: []rbac.PolicyRule{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tVerbs:     []string{\"get\", \"list\", \"watch\"},\n\t\t\t\t\t\t\t\tAPIGroups: []string{\"\"},\n\t\t\t\t\t\t\t\tResources: []string{\"pods\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tClusterRoleBindings: []rbac.ClusterRoleBinding{\n\t\t\t\t\t{\n\t\t\t\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\t\t\t\tKind:       \"ClusterRoleBinding\",\n\t\t\t\t\t\t\tAPIVersion: \"rbac.authorization.k8s.io/v1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"jetstack-secure-agent-pods-reader\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tSubjects: []rbac.Subject{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKind:      \"ServiceAccount\",\n\t\t\t\t\t\t\t\tName:      \"agent\",\n\t\t\t\t\t\t\t\tNamespace: \"jetstack-secure\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tRoleRef: rbac.RoleRef{\n\t\t\t\t\t\t\tKind:     \"ClusterRole\",\n\t\t\t\t\t\t\tName:     \"jetstack-secure-agent-pods-reader\",\n\t\t\t\t\t\t\tAPIGroup: \"rbac.authorization.k8s.io\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdescription: \"Generate RBAC config for simple pod dg use case where only two namespace are included\",\n\t\t\tdataGatherers: []agent.DataGatherer{\n\t\t\t\t{\n\t\t\t\t\tName: \"k8s/pods\",\n\t\t\t\t\tKind: \"k8s-dynamic\",\n\t\t\t\t\tConfig: &k8sdynamic.ConfigDynamic{\n\t\t\t\t\t\tGroupVersionResource: schema.GroupVersionResource{\n\t\t\t\t\t\t\tVersion:  \"v1\",\n\t\t\t\t\t\t\tResource: \"pods\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tIncludeNamespaces: []string{\"example\", \"foobar\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedAgentRBACManifests: AgentRBACManifests{\n\t\t\t\tClusterRoles: []rbac.ClusterRole{\n\t\t\t\t\t{\n\t\t\t\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\t\t\t\tKind:       \"ClusterRole\",\n\t\t\t\t\t\t\tAPIVersion: \"rbac.authorization.k8s.io/v1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"jetstack-secure-agent-pods-reader\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tRules: []rbac.PolicyRule{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tVerbs:     []string{\"get\", \"list\", \"watch\"},\n\t\t\t\t\t\t\t\tAPIGroups: []string{\"\"},\n\t\t\t\t\t\t\t\tResources: []string{\"pods\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tRoleBindings: []rbac.RoleBinding{\n\t\t\t\t\t{\n\t\t\t\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\t\t\t\tKind:       \"RoleBinding\",\n\t\t\t\t\t\t\tAPIVersion: \"rbac.authorization.k8s.io/v1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName:      \"jetstack-secure-agent-pods-reader\",\n\t\t\t\t\t\t\tNamespace: \"example\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tSubjects: []rbac.Subject{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKind:      \"ServiceAccount\",\n\t\t\t\t\t\t\t\tName:      \"agent\",\n\t\t\t\t\t\t\t\tNamespace: \"jetstack-secure\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tRoleRef: rbac.RoleRef{\n\t\t\t\t\t\t\tKind:     \"ClusterRole\",\n\t\t\t\t\t\t\tName:     \"jetstack-secure-agent-pods-reader\",\n\t\t\t\t\t\t\tAPIGroup: \"rbac.authorization.k8s.io\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\t\t\t\tKind:       \"RoleBinding\",\n\t\t\t\t\t\t\tAPIVersion: \"rbac.authorization.k8s.io/v1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName:      \"jetstack-secure-agent-pods-reader\",\n\t\t\t\t\t\t\tNamespace: \"foobar\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tSubjects: []rbac.Subject{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKind:      \"ServiceAccount\",\n\t\t\t\t\t\t\t\tName:      \"agent\",\n\t\t\t\t\t\t\t\tNamespace: \"jetstack-secure\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tRoleRef: rbac.RoleRef{\n\t\t\t\t\t\t\tKind:     \"ClusterRole\",\n\t\t\t\t\t\t\tName:     \"jetstack-secure-agent-pods-reader\",\n\t\t\t\t\t\t\tAPIGroup: \"rbac.authorization.k8s.io\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, input := range testCases {\n\t\tgot := GenerateAgentRBACManifests(input.dataGatherers)\n\n\t\trequire.Equal(t, input.expectedAgentRBACManifests, got)\n\t}\n}\n"
  },
  {
    "path": "pkg/testutil/envtest.go",
    "content": "package testutil\n\nimport (\n\t\"context\"\n\t\"crypto/tls\"\n\t\"crypto/x509\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com/jetstack/venafi-connection-lib/api/v1alpha1\"\n\t\"github.com/stretchr/testify/require\"\n\tcorev1 \"k8s.io/api/core/v1\"\n\trbacv1 \"k8s.io/api/rbac/v1\"\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/util/yaml\"\n\t\"k8s.io/client-go/rest\"\n\t\"k8s.io/client-go/tools/clientcmd\"\n\tclientcmdapi \"k8s.io/client-go/tools/clientcmd/api\"\n\tctrlruntime \"sigs.k8s.io/controller-runtime/pkg/client\"\n\t\"sigs.k8s.io/controller-runtime/pkg/envtest\"\n\n\t\"github.com/jetstack/preflight/internal/cyberark/dataupload\"\n\t\"github.com/jetstack/preflight/internal/cyberark/identity\"\n\t\"github.com/jetstack/preflight/internal/cyberark/servicediscovery\"\n\t\"github.com/jetstack/preflight/pkg/client\"\n)\n\n// To see the API server logs, set:\n//\n//\texport KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT=true\nfunc WithEnvtest(t testing.TB) (_ *envtest.Environment, _ *rest.Config, kclient ctrlruntime.WithWatch) {\n\tt.Helper()\n\n\t// If KUBEBUILDER_ASSETS isn't set, show a warning to the user.\n\tif os.Getenv(\"KUBEBUILDER_ASSETS\") == \"\" {\n\t\tt.Fatalf(\"KUBEBUILDER_ASSETS isn't set. You can run this test using `make test`.\\n\" +\n\t\t\t\"But if you prefer not to use `make`, run these two commands first:\\n\" +\n\t\t\t\"    make _bin/tools/{kube-apiserver,etcd}\\n\" +\n\t\t\t\"    export KUBEBUILDER_ASSETS=$PWD/_bin/tools\")\n\t}\n\tenvtest := &envtest.Environment{\n\t\tErrorIfCRDPathMissing: true,\n\t\tCRDDirectoryPaths:     []string{\"../../deploy/charts/venafi-kubernetes-agent/crd_bases/jetstack.io_venaficonnections.yaml\"},\n\t}\n\n\trestconf, err := envtest.Start()\n\tt.Cleanup(func() {\n\t\tt.Log(\"Waiting for envtest to exit\")\n\t\te := envtest.Stop()\n\t\trequire.NoError(t, e)\n\t})\n\trequire.NoError(t, err)\n\n\tsch := runtime.NewScheme()\n\t_ = v1alpha1.AddToScheme(sch)\n\t_ = corev1.AddToScheme(sch)\n\t_ = rbacv1.AddToScheme(sch)\n\n\tkclient, err = ctrlruntime.NewWithWatch(restconf, ctrlruntime.Options{Scheme: sch})\n\trequire.NoError(t, err)\n\n\treturn envtest, restconf, kclient\n}\n\n// Copied from https://github.com/kubernetes/client-go/issues/711#issuecomment-1666075787.\nfunc WithKubeconfig(t testing.TB, restCfg *rest.Config) string {\n\tt.Helper()\n\n\tclusters := make(map[string]*clientcmdapi.Cluster)\n\tclusters[\"default-cluster\"] = &clientcmdapi.Cluster{\n\t\tServer:                   restCfg.Host,\n\t\tCertificateAuthorityData: restCfg.CAData,\n\t}\n\tcontexts := make(map[string]*clientcmdapi.Context)\n\tcontexts[\"default-context\"] = &clientcmdapi.Context{\n\t\tCluster:  \"default-cluster\",\n\t\tAuthInfo: \"default-user\",\n\t}\n\tauthinfos := make(map[string]*clientcmdapi.AuthInfo)\n\tauthinfos[\"default-user\"] = &clientcmdapi.AuthInfo{\n\t\tClientCertificateData: restCfg.CertData,\n\t\tClientKeyData:         restCfg.KeyData,\n\t}\n\tclientConfig := clientcmdapi.Config{\n\t\tKind:           \"Config\",\n\t\tAPIVersion:     \"v1\",\n\t\tClusters:       clusters,\n\t\tContexts:       contexts,\n\t\tCurrentContext: \"default-context\",\n\t\tAuthInfos:      authinfos,\n\t}\n\n\td := t.TempDir()\n\tkubeconfig, _ := os.CreateTemp(d, \"kubeconfig\")\n\tdefer kubeconfig.Close()\n\n\terr := clientcmd.WriteToFile(clientConfig, kubeconfig.Name())\n\trequire.NoError(t, err)\n\n\treturn kubeconfig.Name()\n}\n\n// Tests calling to VenConnClient.PostDataReadingsWithOptions must call this\n// function to start the VenafiConnection watcher. If you don't call this, the\n// test will stall.\nfunc VenConnStartWatching(ctx context.Context, t *testing.T, cl client.Client) {\n\tt.Helper()\n\n\trequire.IsType(t, &client.VenConnClient{}, cl)\n\n\t// This `cancel` is important because the below func `Start(ctx)` needs to\n\t// be stopped before the apiserver is stopped. Otherwise, the test fail with\n\t// the message \"timeout waiting for process kube-apiserver to stop\". See:\n\t// https://github.com/jetstack/venafi-connection-lib/pull/158#issuecomment-1949002322\n\t// https://github.com/kubernetes-sigs/controller-runtime/issues/1571#issuecomment-945535598\n\tctx, cancel := context.WithCancel(ctx)\n\tgo func() {\n\t\terr := cl.(*client.VenConnClient).Start(ctx)\n\t\trequire.NoError(t, err)\n\t}()\n\tt.Cleanup(cancel)\n}\n\n// Works with VenafiCloudClient and VenConnClient. Allows you to trust a given\n// CA.\nfunc TrustCA(t *testing.T, cl client.Client, cert *x509.Certificate) {\n\tt.Helper()\n\n\tvar httpClient *http.Client\n\tswitch c := cl.(type) {\n\tcase *client.VenafiCloudClient:\n\t\thttpClient = c.Client\n\tcase *client.VenConnClient:\n\t\thttpClient = c.Client\n\tdefault:\n\t\tt.Fatalf(\"unsupported client type: %T\", cl)\n\t}\n\n\tpool := x509.NewCertPool()\n\tpool.AddCert(cert)\n\n\tif httpClient.Transport == nil {\n\t\thttpClient.Transport = http.DefaultTransport\n\t}\n\tif httpClient.Transport.(*http.Transport).TLSClientConfig == nil {\n\t\thttpClient.Transport.(*http.Transport).TLSClientConfig = &tls.Config{}\n\t}\n\thttpClient.Transport.(*http.Transport).TLSClientConfig.RootCAs = pool\n}\n\n// Parses the YAML manifest. Useful for inlining YAML manifests in Go test\n// files, to be used in conjunction with `undent`.\nfunc Parse(yamlmanifest string) []ctrlruntime.Object {\n\tdec := yaml.NewYAMLOrJSONDecoder(strings.NewReader(yamlmanifest), 4096)\n\tvar objs []ctrlruntime.Object\n\tfor {\n\t\tobj := &unstructured.Unstructured{}\n\t\terr := dec.Decode(obj)\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tobjs = append(objs, obj)\n\t}\n\treturn objs\n}\n\ntype AssertRequest func(t testing.TB, r *http.Request)\n\nfunc FakeVenafiCloud(t *testing.T) (_ *httptest.Server, _ *x509.Certificate, setAssert func(AssertRequest)) {\n\tt.Helper()\n\n\tassertFn := func(_ testing.TB, _ *http.Request) {}\n\tassertFnMu := sync.Mutex{}\n\tsetAssert = func(setAssert AssertRequest) {\n\t\tassertFnMu.Lock()\n\t\tdefer assertFnMu.Unlock()\n\t\tassertFn = setAssert\n\t}\n\n\tserver := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tt.Logf(\"fake api.venafi.cloud received request: %s %s\", r.Method, r.URL.Path)\n\n\t\tassertFnMu.Lock()\n\t\tdefer assertFnMu.Unlock()\n\t\tassertFn(t, r)\n\n\t\tif r.URL.Path == \"/v1/oauth2/v2.0/756db001-280e-11ee-84fb-991f3177e2d0/token\" {\n\t\t\t_, _ = w.Write([]byte(`{\"access_token\":\"VALID_ACCESS_TOKEN\",\"expires_in\":900,\"token_type\":\"bearer\"}`))\n\t\t\treturn\n\t\t} else if r.URL.Path == \"/v1/oauth/token/serviceaccount\" {\n\t\t\t_, _ = w.Write([]byte(`{\"access_token\":\"VALID_ACCESS_TOKEN\",\"expires_in\":900,\"token_type\":\"bearer\"}`))\n\t\t\treturn\n\t\t}\n\n\t\taccessToken := strings.TrimPrefix(r.Header.Get(\"Authorization\"), \"Bearer \")\n\t\tapiKey := r.Header.Get(\"Tppl-Api-Key\")\n\t\tif accessToken != \"VALID_ACCESS_TOKEN\" && apiKey != \"VALID_API_KEY\" {\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\t_, _ = w.Write([]byte(`{\"error\":\"expected header 'Authorization: Bearer VALID_ACCESS_TOKEN' or 'tppl-api-key: VALID_API_KEY', but got Authorization=` + r.Header.Get(\"Authorization\") + ` and tppl-api-key=` + r.Header.Get(\"Tppl-Api-Key\")))\n\t\t\treturn\n\t\t}\n\t\tswitch r.URL.Path {\n\t\tcase \"/v1/tlspk/upload/clusterdata/no\":\n\t\t\tif r.URL.Query().Get(\"name\") != \"test cluster name\" {\n\t\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\t\t_, _ = w.Write([]byte(`{\"error\":\"unexpected name query param in the test server: ` + r.URL.Query().Get(\"name\") + `, expected: 'test cluster name'\"}`))\n\t\t\t\treturn\n\t\t\t}\n\t\t\t_, _ = w.Write([]byte(`{\"status\":\"ok\",\"organization\":\"756db001-280e-11ee-84fb-991f3177e2d0\"}`))\n\t\tcase \"/v1/useraccounts\":\n\t\t\t_, _ = w.Write([]byte(`{\"user\": {\"username\": \"user\",\"id\": \"76a126f0-280e-11ee-84fb-991f3177e2d0\"}}`))\n\t\tdefault:\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t_, _ = w.Write([]byte(`{\"error\":\"unexpected path in the test server\",\"path\":\"` + r.URL.Path + `\"}`))\n\t\t}\n\t}))\n\tt.Cleanup(server.Close)\n\n\tcert, err := x509.ParseCertificate(server.TLS.Certificates[0].Certificate[0])\n\trequire.NoError(t, err)\n\n\treturn server, cert, setAssert\n}\n\nfunc FakeTPP(t testing.TB) (*httptest.Server, *x509.Certificate) {\n\tt.Helper()\n\n\tserver := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tt.Logf(\"fake tpp.example.com received request: %s %s\", r.Method, r.URL.Path)\n\n\t\taccessToken := strings.TrimPrefix(r.Header.Get(\"Authorization\"), \"Bearer \")\n\n\t\tswitch r.URL.Path {\n\t\tcase \"/vedsdk/Identity/Self\":\n\t\t\tif accessToken != \"VALID_ACCESS_TOKEN\" {\n\t\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t_, _ = w.Write([]byte(`{\"Identities\":[{\"Name\":\"TEST\"}]}`))\n\t\tcase \"/vedsdk/certificates/checkpolicy\":\n\t\t\t_, _ = w.Write([]byte(`{\"Policy\":{\"Subject\":{\"Organization\":{\"Value\": \"test-org\"}}}}`))\n\t\tdefault:\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t_, _ = w.Write([]byte(`{\"error\":\"unexpected path in the test server\",\"path\":\"` + r.URL.Path + `\"}`))\n\t\t}\n\t}))\n\tt.Cleanup(server.Close)\n\n\tcert, err := x509.ParseCertificate(server.TLS.Certificates[0].Certificate[0])\n\trequire.NoError(t, err)\n\n\treturn server, cert\n}\n\n// FakeCyberArk returns an HTTP client that will route requests to mock CyberArk\n// Service Discovery, Identity and Discovery and Context APIs. This is useful\n// for testing code that uses all those APIs, such as\n// `cyberark.NewDatauploadClient`.\n//\n// The environment variable `ARK_DISCOVERY_API` is set to the URL of the mock\n// Service Discovery API, for the supplied `testing.TB` so that the client under\n// test will use the mock Service Discovery API.\n//\n// The returned HTTP client has a transport which logs requests and responses\n// depending on log level of the logger supplied in the context.\nfunc FakeCyberArk(t testing.TB) *http.Client {\n\tt.Helper()\n\n\tidentityAPI, _ := identity.MockIdentityServer(t)\n\tdiscoveryContextAPI, _ := dataupload.MockDataUploadServer(t)\n\thttpClient := servicediscovery.MockDiscoveryServer(t, servicediscovery.Services{\n\t\tIdentity: servicediscovery.ServiceEndpoint{\n\t\t\tAPI: identityAPI,\n\t\t},\n\t\tDiscoveryContext: servicediscovery.ServiceEndpoint{\n\t\t\tAPI: discoveryContextAPI,\n\t\t},\n\t})\n\treturn httpClient\n}\n\n// Generated using:\n//\n//\thelm template ./deploy/charts/venafi-kubernetes-agent -n venafi --set crds.venafiConnection.include=true --show-only templates/venafi-connection-rbac.yaml | grep -ivE '(helm|\\/version)'\n//\n// TODO(mael): Once we get the Makefile modules setup, we should generate this\n// based on the Helm chart rather than having it hardcoded here. Ticket:\n// https://venafi.atlassian.net/browse/VC-36331\nconst VenConnRBAC = `\napiVersion: v1\nkind: Namespace\nmetadata:\n  name: venafi\n---\n# Source: venafi-kubernetes-agent/templates/venafi-connection-rbac.yaml\n# The 'venafi-connection' service account is used by multiple\n# controllers. When configuring which resources a VenafiConnection\n# can access, the RBAC rules you create manually must point to this SA.\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: venafi-connection\n  namespace: \"venafi\"\n  labels:\n    app.kubernetes.io/name: \"venafi-connection\"\n    app.kubernetes.io/instance: release-name\n---\n# Source: venafi-kubernetes-agent/templates/venafi-connection-rbac.yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: venafi-connection-role\n  labels:\n    app.kubernetes.io/name: \"venafi-connection\"\n    app.kubernetes.io/instance: release-name\nrules:\n- apiGroups: [ \"\" ]\n  resources: [ \"namespaces\" ]\n  verbs: [ \"get\", \"list\", \"watch\" ]\n\n- apiGroups: [ \"jetstack.io\" ]\n  resources: [ \"venaficonnections\" ]\n  verbs: [ \"get\", \"list\", \"watch\" ]\n\n- apiGroups: [ \"jetstack.io\" ]\n  resources: [ \"venaficonnections/status\" ]\n  verbs: [ \"get\", \"patch\" ]\n---\n# Source: venafi-kubernetes-agent/templates/venafi-connection-rbac.yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: venafi-connection-rolebinding\n  labels:\n    app.kubernetes.io/name: \"venafi-connection\"\n    app.kubernetes.io/instance: release-name\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: venafi-connection-role\nsubjects:\n- kind: ServiceAccount\n  name: venafi-connection\n  namespace: \"venafi\"\n`\n"
  },
  {
    "path": "pkg/testutil/undent.go",
    "content": "package testutil\n\nimport (\n\t\"fmt\"\n)\n\n// Undent removes leading indentation/white-space from given string and returns\n// it as a string. Useful for inlining YAML manifests in Go code. Inline YAML\n// manifests in the Go test files makes it easier to read the test case as\n// opposed to reading verbose-y Go structs.\n//\n// This was copied from https://github.com/jimeh/Undent/blob/main/Undent.go, all\n// credit goes to the author, Jim Myhrberg.\n//\n// For code readability purposes, it is possible to start the literal string\n// with \"\\n\", in which case, the first line is ignored. For example, in the\n// following example, name and labels have the same indentation level but aren't\n// aligned due to the leading '`':\n//\n//\tUndent(\n//\t`    name: foo\n//\t      labels:\n//\t        foo: bar`)\n//\n// Instead, you can write a well-aligned text like this:\n//\n//\tUndent(`\n//\t    name: foo\n//\t    labels:\n//\t       foo: bar`)\n//\n// For code readability purposes, it is also possible to not have the correct\n// number of indentations in the last line. For example:\n//\n//\tUndent(`\n//\t    foo\n//\t    bar\n//\t`)\n//\n// For code readability purposes, you can also omit the indentations for empty\n// lines. For example:\n//\n//\tUndent(`\n//\t    foo     <---- 4 spaces\n//\t            <---- no indentation here\n//\t    bar     <---- 4 spaces\n//\t`)\nfunc Undent(s string) string {\n\tif len(s) == 0 {\n\t\treturn \"\"\n\t}\n\n\t// indentsPerLine is the minimal indent level that we have found up to now.\n\t// For example, \"\\t\\t\" corresponds to an indentation of 2, and \"   \" an\n\t// indentation of 3.\n\tindentsPerLine := 99999999999\n\tindentedLinesCnt := 0\n\n\t// lineOffsets tells you where the beginning of each line is in terms of\n\t// offset. Example:\n\t//  \"\\tfoo\\n\\tbar\\n\"       ->   [0, 5]\n\t//   0       5\n\tvar lineOffsets []int\n\n\t// For code readability purposes, users can leave the first line empty.\n\tif s[0] != '\\n' {\n\t\tlineOffsets = append(lineOffsets, 0)\n\t}\n\n\tcurLineIndent := 0 // Number of tabs or spaces in the current line.\n\tfor pos := range s {\n\t\tif s[pos] == '\\n' {\n\t\t\tif pos+1 < len(s) {\n\t\t\t\tlineOffsets = append(lineOffsets, pos+1)\n\t\t\t}\n\t\t\tcurLineIndent = 0\n\t\t\tcontinue\n\t\t}\n\n\t\t// Skip to the next line if we are already beyond the minimal indent\n\t\t// level that we have found so far. The rest of this line will be kept\n\t\t// as-is.\n\t\tif curLineIndent >= indentsPerLine {\n\t\t\tcontinue\n\t\t}\n\n\t\t// The minimal indent level that we have found so far in previous lines\n\t\t// might not be the smallest indent level. Once we hit the first\n\t\t// non-indent char, let's check whether it is the new minimal indent\n\t\t// level.\n\t\tif s[pos] != ' ' && s[pos] != '\\t' {\n\t\t\tif curLineIndent != 0 {\n\t\t\t\tindentedLinesCnt++\n\t\t\t}\n\t\t\tindentsPerLine = curLineIndent\n\t\t\tcontinue\n\t\t}\n\n\t\tcurLineIndent++\n\t}\n\n\t// Extract each line without indentation.\n\tout := make([]byte, 0, len(s)-(indentsPerLine*indentedLinesCnt))\n\n\tfor line := range lineOffsets {\n\t\tfirst := lineOffsets[line]\n\n\t\t// Index of the last character of the line. It is often the '\\n'\n\t\t// character, except for the last line.\n\t\tvar last int\n\t\tif line == len(lineOffsets)-1 {\n\t\t\tlast = len(s) - 1\n\t\t} else {\n\t\t\tlast = lineOffsets[line+1] - 1\n\t\t}\n\n\t\tvar lineStr string\n\t\tswitch {\n\t\t// Case 0: if the first line is empty, let's skip it.\n\t\tcase line == 0 && first == last:\n\t\t\tlineStr = \"\"\n\n\t\t// Case 1: we want the user to be able to omit some tabs or spaces in\n\t\t// the last line for readability purposes.\n\t\tcase line == len(lineOffsets)-1 && s[last] != '\\n' && isIndent(s[first:last+1]):\n\t\t\tlineStr = \"\"\n\n\t\t// Case 2: we want the user to be able to omit the indentations for\n\t\t// empty lines for readability purposes.\n\t\tcase first == last:\n\t\t\tlineStr = \"\\n\"\n\n\t\t// Case 3: error when a line doesn't contain the correct indentation\n\t\t// level.\n\t\tcase first+indentsPerLine > last:\n\t\t\tpanic(fmt.Sprintf(\"line %d has an incorrect indent level: %q\", line, s[first:last]))\n\n\t\t// Case 4: at this point, the indent level is correct, so let's remove\n\t\t// the indentation and keep the rest.\n\t\tcase first+indentsPerLine <= last:\n\t\t\tlineStr = s[first+indentsPerLine : last+1]\n\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"unexpected case: first: %d, last: %d, indentsPerLine: %d, line: %q\", first, last, indentsPerLine, s[first:last]))\n\t\t}\n\t\tout = append(out, lineStr...)\n\t}\n\n\treturn string(out)\n}\n\n// isIndent returns true if the given string is only made of spaces or a\n// tabs.\nfunc isIndent(s string) bool {\n\tfor _, r := range s {\n\t\tif r != ' ' && r != '\\t' {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n"
  },
  {
    "path": "pkg/testutil/undent_test.go",
    "content": "package testutil\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\n// This is a test for the testing func \"Undent\". I wasn't confident with\n// Undent's behavior, so I wrote this test to verify it.\nfunc Test_Undent(t *testing.T) {\n\tt.Run(\"empty string\", runTest_Undent(``, ``))\n\n\tt.Run(\"if last line has the same indent as other lines and, it is ignored\", runTest_Undent(`\n\t\tfoo\n\t\tbar\n\t\t`, \"foo\\nbar\\n\"))\n\n\tt.Run(\"you can un-indent the last line to make the Go code more readable\", runTest_Undent(`\n\t\tfoo\n\t\tbar\n\t`, \"foo\\nbar\\n\"))\n\n\tt.Run(\"last line may not be an empty line\", runTest_Undent(`\n\t\tfoo\n\t\tbar`, \"foo\\nbar\"))\n\n\tt.Run(\"1 empty line is preserved\", runTest_Undent(\"\\t\\tfoo\\n\\t\\t\\n\\t\\tbar\\n\", \"foo\\n\\nbar\\n\"))\n\n\tt.Run(\"2 empty lines are preserved\", runTest_Undent(\"\\t\\tfoo\\n\\t\\t\\n\\t\\t\\n\\t\\tbar\\n\", \"foo\\n\\n\\nbar\\n\"))\n\n\tt.Run(\"you can also omit the tabs or spaces for empty lines\", runTest_Undent(`\n\t\tfoo\n\n\t\tbar\n\t`, \"foo\\n\\nbar\\n\"))\n\tt.Run(\"bug fix: last char is not omitted\", runTest_Undent(\"\\t\\t{\\n\\t\\t    \\\"kind\\\": \\\"Secret\\\"\\n\\t\\t}\", \"{\\n    \\\"kind\\\": \\\"Secret\\\"\\n}\"))\n}\n\nfunc runTest_Undent(given, expected string) func(t *testing.T) {\n\treturn func(t *testing.T) {\n\t\tt.Helper()\n\t\tgot := Undent(given)\n\t\tassert.Equal(t, expected, got)\n\t}\n}\n"
  },
  {
    "path": "pkg/version/version.go",
    "content": "package version\n\nimport (\n\t\"fmt\"\n\t\"net/http\"\n)\n\n// This variables are injected at build time.\n\n// PreflightVersion hosts the version of the app.\nvar PreflightVersion = \"development\"\n\n// Commit is the commit hash of the build\nvar Commit string\n\n// BuildDate is the date it was built\nvar BuildDate string\n\n// GoVersion is the go version that was used to compile this\nvar GoVersion string\n\n// UserAgent return a standard user agent for use with all HTTP requests. This is implemented in one place so\n// it's uniform across the Kubernetes Agent.\n//\n// TODO(wallrj): The prefix \"Mozilla/5.0\" is currently required by the CyberArk inventory API. Remove the prefix when CyberArk relax the API security settings.\nfunc UserAgent() string {\n\treturn fmt.Sprintf(\"Mozilla/5.0 venafi-kubernetes-agent/%s\", PreflightVersion)\n}\n\n// SetUserAgent augments an http.Request with a standard user agent.\nfunc SetUserAgent(req *http.Request) {\n\treq.Header.Set(\"User-Agent\", UserAgent())\n}\n"
  }
]