Repository: jetstack/preflight Branch: master Commit: 9db49f9f73b5 Files: 256 Total size: 1.4 MB Directory structure: gitextract_2mttmn_y/ ├── .envrc.template ├── .github/ │ ├── ISSUE_TEMPLATE/ │ │ └── bug_report.md │ ├── actions/ │ │ └── repo_access/ │ │ └── action.yaml │ ├── chainguard/ │ │ └── make-self-upgrade.sts.yaml │ ├── renovate.json5 │ └── workflows/ │ ├── govulncheck.yaml │ ├── make-self-upgrade.yaml │ ├── release.yml │ └── tests.yaml ├── .gitignore ├── .golangci.yaml ├── CONTRIBUTING.md ├── LICENSE ├── LICENSES ├── Makefile ├── OWNERS ├── OWNERS_ALIASES ├── README.md ├── RELEASE.md ├── agent.yaml ├── api/ │ ├── agent.go │ ├── common.go │ ├── datareading.go │ └── datareading_test.go ├── cmd/ │ ├── agent.go │ ├── agent_test.go │ ├── ark/ │ │ └── main.go │ ├── echo.go │ ├── helpers.go │ ├── root.go │ └── version.go ├── deploy/ │ └── charts/ │ ├── disco-agent/ │ │ ├── .helmignore │ │ ├── Chart.yaml │ │ ├── README.md │ │ ├── templates/ │ │ │ ├── NOTES.txt │ │ │ ├── _helpers.tpl │ │ │ ├── configmap.yaml │ │ │ ├── deployment.yaml │ │ │ ├── poddisruptionbudget.yaml │ │ │ ├── podmonitor.yaml │ │ │ ├── rbac.yaml │ │ │ └── serviceaccount.yaml │ │ ├── tests/ │ │ │ ├── README.md │ │ │ ├── __snapshot__/ │ │ │ │ └── configmap_test.yaml.snap │ │ │ └── configmap_test.yaml │ │ ├── values.linter.exceptions │ │ ├── values.schema.json │ │ └── values.yaml │ ├── discovery-agent/ │ │ ├── .helmignore │ │ ├── Chart.yaml │ │ ├── README.md │ │ ├── templates/ │ │ │ ├── NOTES.txt │ │ │ ├── _helpers.tpl │ │ │ ├── configmap.yaml │ │ │ ├── deployment.yaml │ │ │ ├── poddisruptionbudget.yaml │ │ │ ├── podmonitor.yaml │ │ │ ├── rbac.yaml │ │ │ └── serviceaccount.yaml │ │ ├── tests/ │ │ │ ├── configmap_test.yaml │ │ │ ├── deployment_test.yaml │ │ │ ├── poddisruptionbudget_test.yaml │ │ │ ├── podmonitor_test.yaml │ │ │ ├── rbac_test.yaml │ │ │ └── serviceaccount_test.yaml │ │ ├── values.linter.exceptions │ │ ├── values.schema.json │ │ └── values.yaml │ └── venafi-kubernetes-agent/ │ ├── .helmignore │ ├── Chart.yaml │ ├── README.md │ ├── crd_bases/ │ │ ├── crd.footer.yaml │ │ ├── crd.header-without-validations.yaml │ │ ├── crd.header.yaml │ │ └── jetstack.io_venaficonnections.yaml │ ├── templates/ │ │ ├── NOTES.txt │ │ ├── _helpers.tpl │ │ ├── _venafi-connection.tpl │ │ ├── configmap.yaml │ │ ├── deployment.yaml │ │ ├── poddisruptionbudget.yaml │ │ ├── podmonitor.yaml │ │ ├── rbac.yaml │ │ ├── serviceaccount.yaml │ │ ├── venafi-connection-crd.without-validations.yaml │ │ ├── venafi-connection-crd.yaml │ │ ├── venafi-connection-rbac.yaml │ │ └── venafi-rbac.yaml │ ├── tests/ │ │ ├── __snapshot__/ │ │ │ └── configmap_test.yaml.snap │ │ ├── configmap_test.yaml │ │ ├── deployment_test.yaml │ │ └── values/ │ │ └── custom-volumes.yaml │ ├── values.linter.exceptions │ ├── values.schema.json │ └── values.yaml ├── docs/ │ └── datagatherers/ │ ├── k8s-discovery.md │ ├── k8s-dynamic.md │ └── local.md ├── examples/ │ ├── cert-manager-agent.yaml │ ├── echo/ │ │ ├── example.json │ │ └── example2.json │ ├── localfile/ │ │ ├── config.yaml │ │ └── input.json │ ├── machinehub/ │ │ ├── config.yaml │ │ └── input.json │ ├── machinehub.yaml │ ├── one-shot-oidc.yaml │ └── one-shot-secret.yaml ├── go.mod ├── go.sum ├── hack/ │ ├── ark/ │ │ ├── cluster-external-secret.yaml │ │ ├── cluster-secret-store.yaml │ │ ├── conjur-connect-configmap.yaml │ │ ├── external-secret.yaml │ │ ├── secret-store.yaml │ │ └── test-e2e.sh │ ├── e2e/ │ │ ├── application-team-1.yaml │ │ ├── test.sh │ │ ├── values.venafi-kubernetes-agent.yaml │ │ └── venafi-components.yaml │ └── ngts/ │ ├── custom_ca.yaml │ └── test-e2e.sh ├── internal/ │ ├── cyberark/ │ │ ├── api/ │ │ │ ├── telemetry.go │ │ │ └── telemetry_test.go │ │ ├── client.go │ │ ├── client_test.go │ │ ├── dataupload/ │ │ │ ├── dataupload.go │ │ │ ├── dataupload_test.go │ │ │ └── mock.go │ │ ├── identity/ │ │ │ ├── advance_authentication_test.go │ │ │ ├── authenticated_http_client.go │ │ │ ├── cmd/ │ │ │ │ └── testidentity/ │ │ │ │ └── main.go │ │ │ ├── identity.go │ │ │ ├── identity_test.go │ │ │ ├── mock.go │ │ │ ├── start_authentication_test.go │ │ │ └── testdata/ │ │ │ ├── advance_authentication_failure.json │ │ │ ├── advance_authentication_success.json │ │ │ ├── start_authentication_bad_user_session_id.json │ │ │ ├── start_authentication_failure.json │ │ │ ├── start_authentication_success.json │ │ │ ├── start_authentication_success_multiple_challenges.json │ │ │ ├── start_authentication_success_multiple_mechanisms.json │ │ │ └── start_authentication_success_no_up_mechanism.json │ │ ├── servicediscovery/ │ │ │ ├── discovery.go │ │ │ ├── discovery_test.go │ │ │ ├── mock.go │ │ │ └── testdata/ │ │ │ ├── README.md │ │ │ └── discovery_success.json.template │ │ └── testing/ │ │ └── testing.go │ └── envelope/ │ ├── doc.go │ ├── keyfetch/ │ │ ├── client.go │ │ ├── client_test.go │ │ ├── doc.go │ │ ├── fake.go │ │ └── fake_test.go │ ├── rsa/ │ │ ├── doc.go │ │ ├── encryptor.go │ │ ├── encryptor_test.go │ │ ├── keys.go │ │ └── keys_test.go │ └── types.go ├── klone.yaml ├── main.go ├── make/ │ ├── 00_mod.mk │ ├── 02_mod.mk │ ├── _shared/ │ │ ├── generate-verify/ │ │ │ ├── 00_mod.mk │ │ │ ├── 02_mod.mk │ │ │ └── util/ │ │ │ └── verify.sh │ │ ├── go/ │ │ │ ├── .golangci.override.yaml │ │ │ ├── 01_mod.mk │ │ │ ├── README.md │ │ │ └── base/ │ │ │ └── .github/ │ │ │ └── workflows/ │ │ │ └── govulncheck.yaml │ │ ├── helm/ │ │ │ ├── 01_mod.mk │ │ │ ├── crd.template.footer.yaml │ │ │ ├── crd.template.header.yaml │ │ │ ├── crds.mk │ │ │ ├── crds_dir.README.md │ │ │ ├── deploy.mk │ │ │ └── helm.mk │ │ ├── help/ │ │ │ ├── 01_mod.mk │ │ │ └── help.sh │ │ ├── kind/ │ │ │ ├── 00_kind_image_versions.mk │ │ │ ├── 00_mod.mk │ │ │ ├── 01_mod.mk │ │ │ ├── kind-image-preload.mk │ │ │ └── kind.mk │ │ ├── klone/ │ │ │ └── 01_mod.mk │ │ ├── licenses/ │ │ │ ├── 00_mod.mk │ │ │ ├── 01_mod.mk │ │ │ └── licenses.tmpl │ │ ├── oci-build/ │ │ │ ├── 00_mod.mk │ │ │ └── 01_mod.mk │ │ ├── oci-publish/ │ │ │ ├── 00_mod.mk │ │ │ ├── 01_mod.mk │ │ │ └── image-exists.sh │ │ ├── repository-base/ │ │ │ ├── 01_mod.mk │ │ │ ├── base/ │ │ │ │ ├── .github/ │ │ │ │ │ ├── chainguard/ │ │ │ │ │ │ └── make-self-upgrade.sts.yaml │ │ │ │ │ └── workflows/ │ │ │ │ │ └── make-self-upgrade.yaml │ │ │ │ ├── Makefile │ │ │ │ └── OWNERS_ALIASES │ │ │ └── renovate-bootstrap-config.json5 │ │ └── tools/ │ │ ├── 00_mod.mk │ │ └── util/ │ │ ├── checkhash.sh │ │ ├── hash.sh │ │ └── lock.sh │ ├── ark/ │ │ ├── 00_mod.mk │ │ └── 02_mod.mk │ ├── connection_crd/ │ │ └── main.go │ ├── extra_tools.mk │ ├── ngts/ │ │ ├── 00_mod.mk │ │ └── 02_mod.mk │ └── test-unit.mk └── pkg/ ├── agent/ │ ├── config.go │ ├── config_test.go │ ├── dummy_data_gatherer.go │ ├── metrics.go │ └── run.go ├── client/ │ ├── client.go │ ├── client_api_token.go │ ├── client_cyberark.go │ ├── client_cyberark_convertdatareadings_test.go │ ├── client_cyberark_test.go │ ├── client_file.go │ ├── client_file_test.go │ ├── client_ngts.go │ ├── client_ngts_test.go │ ├── client_oauth.go │ ├── client_venafi_cloud.go │ ├── client_venconn.go │ ├── client_venconn_test.go │ └── util.go ├── datagatherer/ │ ├── datagatherer.go │ ├── k8sdiscovery/ │ │ └── discovery.go │ ├── k8sdynamic/ │ │ ├── cache.go │ │ ├── cache_test.go │ │ ├── dynamic.go │ │ ├── dynamic_test.go │ │ ├── fieldfilter.go │ │ └── fieldfilter_test.go │ ├── local/ │ │ └── local.go │ └── oidc/ │ ├── oidc.go │ └── oidc_test.go ├── echo/ │ ├── echo.go │ └── echo_test.go ├── kubeconfig/ │ ├── client.go │ ├── client_test.go │ └── kubeconfig.go ├── logs/ │ ├── logs.go │ └── logs_test.go ├── permissions/ │ ├── generate.go │ └── generate_test.go ├── testutil/ │ ├── envtest.go │ ├── undent.go │ └── undent_test.go └── version/ └── version.go ================================================ FILE CONTENTS ================================================ ================================================ FILE: .envrc.template ================================================ # Example .envrc file for use with direnv. # Copy this file to .envrc and edit the values as required. # Do not check in your .envrc file to source control as it may contain secrets. # The following variables are required by the E2E test script: ./hack/e2e/test.sh. export VEN_API_KEY= # your Venafi Cloud API key with full permissions export VEN_API_KEY_PULL= # your Venafi Cloud API key with pull-only permissions export VEN_ZONE= # the Venafi Cloud zone to use for certificate requests export VEN_VCP_REGION= # the Venafi Cloud region to use (us or eu) export VEN_API_HOST= # the Venafi Cloud API host (usually api.venafi.cloud or api.venafi.eu) export OCI_BASE= # the base URL for the OCI registry where the Agent chart and image will be pushed export CLOUDSDK_CORE_PROJECT= # the GCP project ID where a GKE cluster will be created. export CLOUDSDK_COMPUTE_ZONE= # the GCP zone where a GKE cluster will be created. E.g. europe-west2-b export CLUSTER_NAME= # the name of the GKE cluster which will be created. E.g. cluster-1 # The following variables are required for CyberArk / MachineHub integration tests. export ARK_SUBDOMAIN= # your CyberArk tenant subdomain e.g. tlskp-test export ARK_USERNAME= # your CyberArk username export ARK_SECRET= # your CyberArk password # OPTIONAL: the URL for the CyberArk Discovery API if not using the production environment export ARK_DISCOVERY_API=https://platform-discovery.integration-cyberark.cloud/ ================================================ FILE: .github/ISSUE_TEMPLATE/bug_report.md ================================================ --- name: Bug report about: Issue for something that isn't working as expected title: '' labels: '' assignees: '' --- **What happened?** What is the current bug behavior? Give all the context you can, provide relevant logs and/or screenshots. **What should had happened?** Describe what you expected to happen. **Possible fixes** This section is optional and should include possible solutions to explore and discuss further. ================================================ FILE: .github/actions/repo_access/action.yaml ================================================ name: 'Setup repo access' description: 'Setups authenticate to GitHub repos' inputs: DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB: required: true description: "DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB secret" outputs: {} runs: using: "composite" steps: - name: Configure jetstack/venafi-connection-lib repo pull access shell: bash run: | mkdir ~/.ssh chmod 700 ~/.ssh echo "${{ inputs.DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB }}" > ~/.ssh/venafi_connection_lib_id chmod 600 ~/.ssh/venafi_connection_lib_id cat <> ~/.ssh/config Host venafi-connection-lib.github.com HostName github.com IdentityFile ~/.ssh/venafi_connection_lib_id IdentitiesOnly yes EOT cat <> ~/.gitconfig [url "git@venafi-connection-lib.github.com:jetstack/venafi-connection-lib"] insteadOf = https://github.com/jetstack/venafi-connection-lib EOT echo "GOPRIVATE=github.com/jetstack/venafi-connection-lib" >> $GITHUB_ENV ================================================ FILE: .github/chainguard/make-self-upgrade.sts.yaml ================================================ # THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. # Edit https://github.com/cert-manager/makefile-modules/blob/main/modules/repository-base/base/.github/chainguard/make-self-upgrade.sts.yaml instead. issuer: https://token.actions.githubusercontent.com subject_pattern: ^repo:jetstack/jetstack-secure:ref:refs/heads/(main|master)$ permissions: contents: write pull_requests: write workflows: write ================================================ FILE: .github/renovate.json5 ================================================ { $schema: 'https://docs.renovatebot.com/renovate-schema.json', extends: [ 'github>cert-manager/makefile-modules:renovate-config.json5', ], } ================================================ FILE: .github/workflows/govulncheck.yaml ================================================ # This file is MANUALLY maintained, but was originally based on the makefile-modules govulncheck workflow. See the original: # https://github.com/cert-manager/makefile-modules/blob/main/modules/go/base/.github/workflows/govulncheck.yaml # This file is separated from the upstream file so we can add additional auth for pulling # private dependencies. Govulncheck doesn't seem to be able to support skipping private # dependencies. # Run govulncheck at midnight every night on the main branch, # to alert us to recent vulnerabilities which affect the Go code in this # project. name: govulncheck on: workflow_dispatch: {} schedule: - cron: '0 0 * * *' permissions: contents: read jobs: govulncheck: runs-on: ubuntu-latest if: github.repository == 'jetstack/jetstack-secure' steps: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 # Adding `fetch-depth: 0` makes sure tags are also fetched. We need # the tags so `git describe` returns a valid version. # see https://github.com/actions/checkout/issues/701 for extra info about this option with: { fetch-depth: 0 } # NOTE: This step is the change from the upstream workflow. # We need credentials to pull the private dependency. - uses: ./.github/actions/repo_access with: DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB: ${{ secrets.DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB }} - id: go-version run: | make print-go-version >> "$GITHUB_OUTPUT" - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 with: go-version: ${{ steps.go-version.outputs.result }} - run: make verify-govulncheck ================================================ FILE: .github/workflows/make-self-upgrade.yaml ================================================ # THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. # Edit https://github.com/cert-manager/makefile-modules/blob/main/modules/repository-base/base/.github/workflows/make-self-upgrade.yaml instead. name: make-self-upgrade concurrency: make-self-upgrade on: workflow_dispatch: {} schedule: - cron: '0 0 * * *' permissions: contents: read jobs: self_upgrade: runs-on: ubuntu-latest if: github.repository == 'jetstack/jetstack-secure' permissions: id-token: write env: SOURCE_BRANCH: "${{ github.ref_name }}" SELF_UPGRADE_BRANCH: "self-upgrade-${{ github.ref_name }}" steps: - name: Fail if branch is not head of branch. if: ${{ !startsWith(github.ref, 'refs/heads/') && env.SOURCE_BRANCH != '' && env.SELF_UPGRADE_BRANCH != '' }} run: | echo "This workflow should not be run on a non-branch-head." exit 1 - name: Octo STS Token Exchange uses: octo-sts/action@f603d3be9d8dd9871a265776e625a27b00effe05 # v1.1.1 id: octo-sts with: scope: 'jetstack/jetstack-secure' identity: make-self-upgrade - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 # Adding `fetch-depth: 0` makes sure tags are also fetched. We need # the tags so `git describe` returns a valid version. # see https://github.com/actions/checkout/issues/701 for extra info about this option with: fetch-depth: 0 token: ${{ steps.octo-sts.outputs.token }} - id: go-version run: | make print-go-version >> "$GITHUB_OUTPUT" - uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0 with: go-version: ${{ steps.go-version.outputs.result }} - run: | git checkout -B "$SELF_UPGRADE_BRANCH" - run: | make -j upgrade-klone make -j generate - id: is-up-to-date shell: bash run: | git_status=$(git status -s) is_up_to_date="true" if [ -n "$git_status" ]; then is_up_to_date="false" echo "The following changes will be committed:" echo "$git_status" fi echo "result=$is_up_to_date" >> "$GITHUB_OUTPUT" - if: ${{ steps.is-up-to-date.outputs.result != 'true' }} run: | git config --global user.name "cert-manager-bot" git config --global user.email "cert-manager-bot@users.noreply.github.com" git add -A && git commit -m "BOT: run 'make upgrade-klone' and 'make generate'" --signoff git push -f origin "$SELF_UPGRADE_BRANCH" - if: ${{ steps.is-up-to-date.outputs.result != 'true' }} uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0 with: github-token: ${{ steps.octo-sts.outputs.token }} script: | const { repo, owner } = context.repo; const pulls = await github.rest.pulls.list({ owner: owner, repo: repo, head: owner + ':' + process.env.SELF_UPGRADE_BRANCH, base: process.env.SOURCE_BRANCH, state: 'open', }); if (pulls.data.length < 1) { const result = await github.rest.pulls.create({ title: '[CI] Merge ' + process.env.SELF_UPGRADE_BRANCH + ' into ' + process.env.SOURCE_BRANCH, owner: owner, repo: repo, head: process.env.SELF_UPGRADE_BRANCH, base: process.env.SOURCE_BRANCH, body: [ 'This PR is auto-generated to bump the Makefile modules.', ].join('\n'), }); await github.rest.issues.addLabels({ owner, repo, issue_number: result.data.number, labels: ['ok-to-test', 'skip-review', 'release-note-none', 'kind/cleanup'] }); } ================================================ FILE: .github/workflows/release.yml ================================================ name: release on: push: tags: - "v*" env: VERSION: ${{ github.ref_name }} jobs: build_and_push: runs-on: ubuntu-latest permissions: contents: read # needed for checkout id-token: write # needed for keyless signing & google auth steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: ./.github/actions/repo_access with: DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB: ${{ secrets.DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB }} - id: go-version run: | make print-go-version >> "$GITHUB_OUTPUT" - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 with: registry: quay.io username: ${{ secrets.QUAY_USERNAME }} password: ${{ secrets.QUAY_PASSWORD }} - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: go-version: ${{ steps.go-version.outputs.result }} - id: release run: make release ark-release ngts-release outputs: RELEASE_OCI_PREFLIGHT_IMAGE: ${{ steps.release.outputs.RELEASE_OCI_PREFLIGHT_IMAGE }} RELEASE_OCI_PREFLIGHT_TAG: ${{ steps.release.outputs.RELEASE_OCI_PREFLIGHT_TAG }} RELEASE_HELM_CHART_IMAGE: ${{ steps.release.outputs.RELEASE_HELM_CHART_IMAGE }} RELEASE_HELM_CHART_VERSION: ${{ steps.release.outputs.RELEASE_HELM_CHART_VERSION }} ARK_IMAGE: ${{ steps.release.outputs.ARK_IMAGE }} ARK_IMAGE_TAG: ${{ steps.release.outputs.ARK_IMAGE_TAG }} ARK_IMAGE_DIGEST: ${{ steps.release.outputs.ARK_IMAGE_DIGEST }} ARK_CHART: ${{ steps.release.outputs.ARK_CHART }} ARK_CHART_TAG: ${{ steps.release.outputs.ARK_CHART_TAG }} ARK_CHART_DIGEST: ${{ steps.release.outputs.ARK_CHART_DIGEST }} NGTS_IMAGE: ${{ steps.release.outputs.NGTS_IMAGE }} NGTS_IMAGE_TAG: ${{ steps.release.outputs.NGTS_IMAGE_TAG }} NGTS_IMAGE_DIGEST: ${{ steps.release.outputs.NGTS_IMAGE_DIGEST }} NGTS_CHART: ${{ steps.release.outputs.NGTS_CHART }} NGTS_CHART_TAG: ${{ steps.release.outputs.NGTS_CHART_TAG }} NGTS_CHART_DIGEST: ${{ steps.release.outputs.NGTS_CHART_DIGEST }} github_release: runs-on: ubuntu-latest needs: build_and_push permissions: contents: write # needed for creating a PR pull-requests: write # needed for creating a PR steps: - run: | touch .notes-file echo "OCI_PREFLIGHT_IMAGE: ${{ needs.build_and_push.outputs.RELEASE_OCI_PREFLIGHT_IMAGE }}" >> .notes-file echo "OCI_PREFLIGHT_TAG: ${{ needs.build_and_push.outputs.RELEASE_OCI_PREFLIGHT_TAG }}" >> .notes-file echo "HELM_CHART_IMAGE: ${{ needs.build_and_push.outputs.RELEASE_HELM_CHART_IMAGE }}" >> .notes-file echo "HELM_CHART_VERSION: ${{ needs.build_and_push.outputs.RELEASE_HELM_CHART_VERSION }}" >> .notes-file echo "ARK_IMAGE: ${{ needs.build_and_push.outputs.ARK_IMAGE }}" >> .notes-file echo "ARK_IMAGE_TAG: ${{ needs.build_and_push.outputs.ARK_IMAGE_TAG }}" >> .notes-file echo "ARK_IMAGE_DIGEST: ${{ needs.build_and_push.outputs.ARK_IMAGE_DIGEST }}" >> .notes-file echo "ARK_CHART: ${{ needs.build_and_push.outputs.ARK_CHART }}" >> .notes-file echo "ARK_CHART_TAG: ${{ needs.build_and_push.outputs.ARK_CHART_TAG }}" >> .notes-file echo "ARK_CHART_DIGEST: ${{ needs.build_and_push.outputs.ARK_CHART_DIGEST }}" >> .notes-file echo "NGTS_IMAGE: ${{ needs.build_and_push.outputs.NGTS_IMAGE }}" >> .notes-file echo "NGTS_IMAGE_TAG: ${{ needs.build_and_push.outputs.NGTS_IMAGE_TAG }}" >> .notes-file echo "NGTS_IMAGE_DIGEST: ${{ needs.build_and_push.outputs.NGTS_IMAGE_DIGEST }}" >> .notes-file echo "NGTS_CHART: ${{ needs.build_and_push.outputs.NGTS_CHART }}" >> .notes-file echo "NGTS_CHART_TAG: ${{ needs.build_and_push.outputs.NGTS_CHART_TAG }}" >> .notes-file echo "NGTS_CHART_DIGEST: ${{ needs.build_and_push.outputs.NGTS_CHART_DIGEST }}" >> .notes-file - env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | gh release create "$VERSION" \ --repo="$GITHUB_REPOSITORY" \ --title="${VERSION}" \ --draft \ --verify-tag \ --notes-file .notes-file ================================================ FILE: .github/workflows/tests.yaml ================================================ name: tests on: push: branches: [master] pull_request: {} jobs: verify: runs-on: ubuntu-latest timeout-minutes: 15 steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 # Adding `fetch-depth: 0` makes sure tags are also fetched. We need # the tags so `git describe` returns a valid version. # see https://github.com/actions/checkout/issues/701 for extra info about this option with: { fetch-depth: 0 } - uses: ./.github/actions/repo_access with: DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB: ${{ secrets.DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB }} - id: go-version run: | make print-go-version >> "$GITHUB_OUTPUT" - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: go-version: ${{ steps.go-version.outputs.result }} - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 with: path: _bin/downloaded key: downloaded-${{ runner.os }}-${{ hashFiles('klone.yaml') }}-verify - run: make -j verify test: runs-on: ubuntu-latest timeout-minutes: 15 permissions: contents: read # needed for checkout id-token: write # needed for google auth steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 # Adding `fetch-depth: 0` makes sure tags are also fetched. We need # the tags so `git describe` returns a valid version. # see https://github.com/actions/checkout/issues/701 for extra info about this option with: { fetch-depth: 0 } - uses: ./.github/actions/repo_access with: DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB: ${{ secrets.DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB }} - id: go-version run: | make print-go-version >> "$GITHUB_OUTPUT" - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: go-version: ${{ steps.go-version.outputs.result }} - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 with: path: _bin/downloaded key: downloaded-${{ runner.os }}-${{ hashFiles('klone.yaml') }}-test-unit # NB: helm unit tests will be run by "make verify", so we don't run it here - run: make -j test-unit env: # These environment variables are required to run the CyberArk client integration tests ARK_DISCOVERY_API: https://platform-discovery.integration-cyberark.cloud/ ARK_SUBDOMAIN: ${{ secrets.ARK_SUBDOMAIN }} ARK_USERNAME: ${{ secrets.ARK_USERNAME }} ARK_SECRET: ${{ secrets.ARK_SECRET }} ark-test-e2e: # TEMPORARY: require an explicit label to test disco-agent until the test environment fixes a recurring issue # where the e2e fails with a 400 error relating to "conflicting tagging values" # The test is flaky, not broken and re-running eventually makes it pass - but that delays progress on # other unrelated work. if: contains(github.event.pull_request.labels.*.name, 'test-ark') runs-on: ubuntu-latest steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 # Adding `fetch-depth: 0` makes sure tags are also fetched. We need # the tags so `git describe` returns a valid version. # see https://github.com/actions/checkout/issues/701 for extra info about this option with: { fetch-depth: 0 } - uses: ./.github/actions/repo_access with: DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB: ${{ secrets.DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB }} - id: go-version run: | make print-go-version >> "$GITHUB_OUTPUT" - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: go-version: ${{ steps.go-version.outputs.result }} - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 with: path: _bin/downloaded key: downloaded-${{ runner.os }}-${{ hashFiles('klone.yaml') }}-test-unit - run: make -j ark-test-e2e env: OCI_BASE: ${{ secrets.ARK_OCI_BASE }} # These environment variables are required to connect to CyberArk Disco APIs ARK_DISCOVERY_API: https://platform-discovery.integration-cyberark.cloud/ ARK_SUBDOMAIN: ${{ secrets.ARK_SUBDOMAIN }} ARK_USERNAME: ${{ secrets.ARK_USERNAME }} ARK_SECRET: ${{ secrets.ARK_SECRET }} ngts-test-e2e: # TEMPORARY: require an explicit label to test NGTS until we have a stable test environment if: contains(github.event.pull_request.labels.*.name, 'test-ngts') runs-on: ubuntu-latest steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 # Adding `fetch-depth: 0` makes sure tags are also fetched. We need # the tags so `git describe` returns a valid version. # see https://github.com/actions/checkout/issues/701 for extra info about this option with: { fetch-depth: 0 } - uses: ./.github/actions/repo_access with: DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB: ${{ secrets.DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB }} - id: go-version run: | make print-go-version >> "$GITHUB_OUTPUT" - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: go-version: ${{ steps.go-version.outputs.result }} - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 with: path: _bin/downloaded key: downloaded-${{ runner.os }}-${{ hashFiles('klone.yaml') }}-test-unit - run: make -j ngts-test-e2e env: OCI_BASE: ${{ secrets.NGTS_OCI_BASE }} NGTS_CLIENT_ID: ${{ secrets.NGTS_CLIENT_ID }} NGTS_PRIVATE_KEY: ${{ secrets.NGTS_PRIVATE_KEY }} NGTS_TSG_ID: ${{ secrets.NGTS_TSG_ID }} test-e2e: if: contains(github.event.pull_request.labels.*.name, 'test-e2e') runs-on: ubuntu-latest steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 # Adding `fetch-depth: 0` makes sure tags are also fetched. We need # the tags so `git describe` returns a valid version. # see https://github.com/actions/checkout/issues/701 for extra info about this option with: { fetch-depth: 0 } - uses: ./.github/actions/repo_access with: DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB: ${{ secrets.DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB }} - name: Authenticate to Google Cloud uses: google-github-actions/auth@7c6bc770dae815cd3e89ee6cdf493a5fab2cc093 # v3.0.0 with: credentials_json: '${{ secrets.GCP_SA_KEY }}' - name: Set up gcloud uses: google-github-actions/setup-gcloud@aa5489c8933f4cc7a4f7d45035b3b1440c9c10db # v3.0.1 with: install_components: "gke-gcloud-auth-plugin" project_id: machineidentitysecurity-jsci-e - name: Configure Docker for Google Artifact Registry run: gcloud auth configure-docker europe-west1-docker.pkg.dev - id: go-version run: | make print-go-version >> "$GITHUB_OUTPUT" - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: go-version: ${{ steps.go-version.outputs.result }} - name: Generate timestamp for cluster name id: timestamp # Give the step an ID to reference its output run: | # Generate a timestamp in the format YYMMDD-HHMMSS. # Extracting from PR name would require sanitization due to GKE cluster naming constraints TIMESTAMP=$(date +'%y%m%d-%H%M%S') CLUSTER_NAME="test-secretless-${TIMESTAMP}" echo "Generated cluster name: ${CLUSTER_NAME}" echo "cluster_name=${CLUSTER_NAME}" >> $GITHUB_OUTPUT - run: | make helm-plugins make -j test-e2e-gke # The VEN_API_KEY_PULL secret is set to my API key (Mladen) for glow.in.the.dark tenant. env: VEN_API_KEY: ${{ secrets.VEN_API_KEY_PULL }} VEN_API_KEY_PULL: ${{ secrets.VEN_API_KEY_PULL }} OCI_BASE: europe-west1-docker.pkg.dev/machineidentitysecurity-jsci-e/js-agent-ci-repo VEN_API_HOST: api.venafi.cloud VEN_ZONE: k8s-agent-CI\Default VEN_VCP_REGION: us CLOUDSDK_CORE_PROJECT: machineidentitysecurity-jsci-e CLOUDSDK_COMPUTE_ZONE: europe-west1-b CLUSTER_NAME: ${{ steps.timestamp.outputs.cluster_name }} - name: Delete GKE Cluster # 'always()' - Run this step regardless of success or failure. # '!contains(...)' - AND only run if the list of PR labels DOES NOT contain 'keep-e2e-cluster'. # NOTE: You will have to delete the test cluster manually when finished with debugging or incur costs. if: always() && !contains(github.event.pull_request.labels.*.name, 'keep-e2e-cluster') run: | echo "Label 'keep-e2e-cluster' not found. Cleaning up GKE cluster ${{ steps.timestamp.outputs.cluster_name }}" gcloud container clusters delete ${{ steps.timestamp.outputs.cluster_name }} \ --project=machineidentitysecurity-jsci-e \ --zone=europe-west1-b \ --quiet ================================================ FILE: .gitignore ================================================ /preflight /preflight.yaml /builds /bundles /output credentials.json .terraform terraform.tfstate terraform.tfstate.backup bom.xml predicate.json *.pem *.pub *.tgz _bin .envrc ================================================ FILE: .golangci.yaml ================================================ version: "2" linters: default: none exclusions: generated: lax presets: [comments, common-false-positives, legacy, std-error-handling] rules: - linters: - errchkjson - forbidigo - gosec - musttag - nilerr - unparam text: .* paths: [third_party, builtin$, examples$] warn-unused: true settings: staticcheck: checks: ["all", "-ST1000", "-ST1001", "-ST1003", "-ST1005", "-ST1012", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-QF1001", "-QF1003", "-QF1008"] enable: - asasalint - asciicheck - bidichk - bodyclose - canonicalheader - contextcheck - copyloopvar - decorder - dogsled - dupword - durationcheck - errcheck - errchkjson - errname - exhaustive - exptostd - forbidigo - ginkgolinter - gocheckcompilerdirectives - gochecksumtype - gocritic - goheader - goprintffuncname - gosec - gosmopolitan - govet - grouper - importas - ineffassign - interfacebloat - intrange - loggercheck - makezero - mirror - misspell - modernize - musttag - nakedret - nilerr - nilnil - noctx - nosprintfhostport - predeclared - promlinter - protogetter - reassign - sloglint - staticcheck - tagalign - testableexamples - unconvert - unparam - unused - usestdlibvars - usetesting - wastedassign formatters: enable: [gci, gofmt] settings: gci: sections: - standard # Standard section: captures all standard packages. - default # Default section: contains all imports that could not be matched to another section type. - localmodule # Local module section: contains all local packages. This section is not present unless explicitly enabled. - blank # Blank section: contains all blank imports. This section is not present unless explicitly enabled. - dot # Dot section: contains all dot imports. This section is not present unless explicitly enabled. custom-order: true exclusions: generated: lax paths: [third_party, builtin$, examples$] ================================================ FILE: CONTRIBUTING.md ================================================ # Contributing to Discovery Agent Thank you for your interest in contributing! This document provides guidelines and instructions for contributing. Note that this repository holds two separate components: - disco-agent: For CyberArk DisCo - venafi-kubernetes-agent: For TLSPK / Certificate Manager SaaS ## Table of Contents - [Getting Started](#getting-started) - [Development Environment](#development-environment) - [Making Changes](#making-changes) - [Testing](#testing) - [Submitting a Pull Request](#submitting-a-pull-request) - [Code Review Process](#code-review-process) - [Additional Resources](#additional-resources) ### Prerequisites Before you begin, ensure you have the following installed: - [Go](https://golang.org/doc/install) (version specified in `go.mod`) - [Make](https://www.gnu.org/software/make/) - [Git](https://git-scm.com/) - [Docker](https://docs.docker.com/get-docker/) (for building container images) To check which Go version will be used: ```bash make which-go ``` It's also possible to use a vendored version of Go, via `make vendor-go`. ### Repository Tooling Most of the setup logic for provisioning tooling and for handling builds and testing is defined in Makefile logic. Specifically, `the make/_shared` directory contains shared Makefile logic derived from the cert-manager [makefile-modules](https://github.com/cert-manager/makefile-modules/) project. ### Setting Up Your Development Environment 1. **Fork the repository** on GitHub 2. **Clone your fork:** ```bash git clone git@github.com:YOUR-USERNAME/jetstack-secure.git cd jetstack-secure ``` 3. **Add the upstream remote:** ```bash git remote add upstream git@github.com:jetstack/jetstack-secure.git ``` 4. **Run initial verification:** ```bash make verify ``` This ensures your environment is set up correctly. ## Development Environment ### Local Execution To build and run the agent locally: ```bash go run main.go agent --agent-config-file ./path/to/agent/config/file.yaml -p 0h1m0s ``` Example configuration files are available: - [agent.yaml](./agent.yaml) - [examples/one-shot-secret.yaml](./examples/one-shot-secret.yaml) - [examples/cert-manager-agent.yaml](./examples/cert-manager-agent.yaml) You can also run a local echo server to monitor agent requests: ```bash go run main.go echo ``` ### Useful Make Targets - `make help` - Show all available make targets - `make verify` - Run all verification checks (linting, formatting, etc.) - `make test-unit` - Run unit tests - `make test-helm` - Run Helm chart tests - `make generate` - Generate code, documentation, and other artifacts - `make oci-build-preflight` - Build container image - `make clean` - Clean all temporary files ## Making Changes ### Creating a Branch Always create a new branch for your changes: ```bash git checkout -b feature/your-feature-name ``` Use descriptive branch names: - `feature/` for new features - `fix/` for bug fixes - `docs/` for documentation changes - `refactor/` for refactoring ### Code Style This project follows standard Go conventions: - Run `make verify-golangci-lint` to check your code - Run `make fix-golangci-lint` to automatically fix some issues - Ensure all code is formatted with `gofmt` - Follow the [Effective Go](https://golang.org/doc/effective_go) guidelines - Most of the conventions are enforced by linters, and violations will prevent code being merged ### Committing Changes 1. **Stage your changes:** ```bash git add . ``` 2. **Run verification before committing:** ```bash make verify ``` 3. **Commit with a descriptive message:** ```bash git commit -m "Brief description of your changes" ``` Write clear commit messages: - Use the imperative mood ("Add feature" not "Added feature") - Keep the first line under 72 characters - Add additional context in the body if needed ## Testing ### Running Tests Locally Before submitting a PR, ensure all tests pass: ```bash # Run unit tests make test-unit # Run Helm tests make test-helm # Run all verification checks make verify ``` ### End-to-End Tests E2E tests run automatically in CI when you add specific labels to your PR: - Add the `test-e2e` label to trigger GKE-based E2E tests - Add the `keep-e2e-cluster` label if you need to keep the cluster for debugging (remember to delete it manually afterward to avoid costs) The E2E test script is located at [hack/e2e/test.sh](./hack/e2e/test.sh). ### Writing Tests - Add unit tests for all new functionality - Place tests in `*_test.go` files alongside the code they test - Use the [testify](https://github.com/stretchr/testify) library for assertions - Aim for meaningful test coverage, not just high percentages ## Submitting a Pull Request 1. **Push your branch to your fork:** ```bash git push origin feature/your-feature-name ``` 2. **Create a Pull Request** on GitHub from your fork to the `master` branch of `jetstack/jetstack-secure` 3. **Fill out the PR description** with: - Clear description of the changes - Related issue numbers (if applicable) - Testing instructions - Any breaking changes or special considerations 4. **Ensure CI passes:** - All tests must pass - Code must pass verification / linting checks - No merge conflicts ## Code Review Process ### For All Contributors - PRs require approval before merging - Keep PRs focused and reasonably sized - Update your branch if `master` has moved forward: ```bash git fetch upstream git rebase upstream/master git push --force-with-lease origin feature/your-feature-name ``` ### For CyberArk Contributors **Contributors from inside CyberArk should reach out to the cert-manager team for reviews for PRs which are passing CI.** The cert-manager team maintains this project and will provide code reviews and guidance for merging changes. ## Additional Resources - [Project Documentation](https://docs.cyberark.com/mis-saas/vaas/k8s-components/c-tlspk-agent-overview/) - [Issue Tracker](https://github.com/jetstack/jetstack-secure/issues) - [Release Process](./RELEASE.md) - [cert-manager Community](https://cert-manager.io/docs/contributing/) ## Getting Help If you need help or have questions: 1. Check existing [issues](https://github.com/jetstack/jetstack-secure/issues) and [documentation](https://docs.cyberark.com/mis-saas/vaas/k8s-components/c-tlspk-agent-overview/) 2. Open a new issue with the `question` label 3. For CyberArk contributors, reach out to the cert-manager team ## License By contributing, you agree that your contributions will be licensed under the license in the LICENSE file in the root directory of this repository. ================================================ FILE: LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: LICENSES ================================================ This LICENSES file is generated by the `licenses` module in makefile-modules[0]. The licenses below the "---" are determined by the go-licenses tool[1]. The aim of this file is to collect the licenses of all dependencies, and provide a single source of truth for licenses used by this project. ## For Developers If CI reports that this file is out of date, you should be careful to check that the new licenses are acceptable for this project before running `make generate-go-licenses` to update this file. Acceptable licenses are those allowlisted by the CNCF[2]. You MUST NOT add any new dependencies whose licenses are not allowlisted by the CNCF, or which do not have an explicit license exception[3]. ## For Users If this file was included in a release artifact, it is a snapshot of the licenses of all dependencies at the time of the release. You can retrieve the actual license text by following these steps: 1. Find the dependency name in this file 2. Go to the source code repository of this project, and go to the tag corresponding to this release. 3. Find the exact version of the dependency in the `go.mod` file 4. Search for the dependency at the correct version in the [Go package index](https://pkg.go.dev/). ## Links [0]: https://github.com/cert-manager/makefile-modules/ [1]: https://github.com/google/go-licenses [2]: https://github.com/cncf/foundation/blob/db4179134ebe7fa00b140a050c19147db808b6fa/policies-guidance/allowed-third-party-license-policy.md#cncf-allowlist-license-policy [3]: https://github.com/cncf/foundation/blob/db4179134ebe7fa00b140a050c19147db808b6fa/license-exceptions/README.md --- cel.dev/expr,Apache-2.0 github.com/Khan/genqlient/graphql,MIT github.com/Venafi/vcert/v5,Apache-2.0 github.com/antlr4-go/antlr/v4,BSD-3-Clause github.com/aymerick/douceur,MIT github.com/beorn7/perks/quantile,MIT github.com/blang/semver/v4,MIT github.com/cenkalti/backoff/v5,MIT github.com/cespare/xxhash/v2,MIT github.com/davecgh/go-spew/spew,ISC github.com/emicklei/go-restful/v3,MIT github.com/evanphx/json-patch/v5,BSD-3-Clause github.com/fatih/color,MIT github.com/fsnotify/fsnotify,BSD-3-Clause github.com/fxamacker/cbor/v2,MIT github.com/go-http-utils/headers,MIT github.com/go-logr/logr,Apache-2.0 github.com/go-logr/zapr,Apache-2.0 github.com/go-openapi/jsonpointer,Apache-2.0 github.com/go-openapi/jsonreference,Apache-2.0 github.com/go-openapi/swag,Apache-2.0 github.com/go418/concurrentcache,Apache-2.0 github.com/go418/concurrentcache/logger,Apache-2.0 github.com/gogo/protobuf,BSD-3-Clause github.com/golang-jwt/jwt/v4,MIT github.com/golang-jwt/jwt/v5,MIT github.com/google/btree,Apache-2.0 github.com/google/cel-go,Apache-2.0 github.com/google/cel-go,BSD-3-Clause github.com/google/gnostic-models,Apache-2.0 github.com/google/uuid,BSD-3-Clause github.com/gorilla/css/scanner,BSD-3-Clause github.com/gorilla/websocket,BSD-2-Clause github.com/hashicorp/errwrap,MPL-2.0 github.com/hashicorp/go-multierror,MPL-2.0 github.com/josharian/intern,MIT github.com/json-iterator/go,MIT github.com/lestrrat-go/blackmagic,MIT github.com/lestrrat-go/httpcc,MIT github.com/lestrrat-go/httprc/v3,MIT github.com/lestrrat-go/jwx/v3,MIT github.com/lestrrat-go/option/v2,MIT github.com/mailru/easyjson,MIT github.com/mattn/go-colorable,MIT github.com/mattn/go-isatty,MIT github.com/microcosm-cc/bluemonday,BSD-3-Clause github.com/modern-go/concurrent,Apache-2.0 github.com/modern-go/reflect2,Apache-2.0 github.com/munnerz/goautoneg,BSD-3-Clause github.com/pkg/errors,BSD-2-Clause github.com/pmezard/go-difflib/difflib,BSD-3-Clause github.com/pmylund/go-cache,MIT github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil,BSD-3-Clause github.com/prometheus/client_golang/prometheus,Apache-2.0 github.com/prometheus/client_model/go,Apache-2.0 github.com/prometheus/common,Apache-2.0 github.com/prometheus/procfs,Apache-2.0 github.com/sosodev/duration,MIT github.com/spf13/cobra,Apache-2.0 github.com/spf13/pflag,BSD-3-Clause github.com/stoewer/go-strcase,MIT github.com/stretchr/testify,MIT github.com/vektah/gqlparser/v2,MIT github.com/x448/float16,MIT github.com/youmark/pkcs8,MIT go.opentelemetry.io/otel,Apache-2.0 go.opentelemetry.io/otel/trace,Apache-2.0 go.uber.org/multierr,MIT go.uber.org/zap,MIT go.yaml.in/yaml/v2,Apache-2.0 go.yaml.in/yaml/v3,MIT golang.org/x/crypto,BSD-3-Clause golang.org/x/exp,BSD-3-Clause golang.org/x/net,BSD-3-Clause golang.org/x/oauth2,BSD-3-Clause golang.org/x/sync,BSD-3-Clause golang.org/x/sys,BSD-3-Clause golang.org/x/term,BSD-3-Clause golang.org/x/text,BSD-3-Clause golang.org/x/time/rate,BSD-3-Clause gomodules.xyz/jsonpatch/v2,Apache-2.0 google.golang.org/genproto/googleapis/api/expr/v1alpha1,Apache-2.0 google.golang.org/genproto/googleapis/rpc/status,Apache-2.0 google.golang.org/protobuf,BSD-3-Clause gopkg.in/evanphx/json-patch.v4,BSD-3-Clause gopkg.in/inf.v0,BSD-3-Clause gopkg.in/ini.v1,Apache-2.0 gopkg.in/yaml.v2,Apache-2.0 gopkg.in/yaml.v3,MIT k8s.io/api,Apache-2.0 k8s.io/apiextensions-apiserver/pkg,Apache-2.0 k8s.io/apimachinery/pkg,Apache-2.0 k8s.io/apimachinery/third_party/forked/golang,BSD-3-Clause k8s.io/apiserver/pkg,Apache-2.0 k8s.io/client-go,Apache-2.0 k8s.io/component-base,Apache-2.0 k8s.io/klog/v2,Apache-2.0 k8s.io/kube-openapi/pkg,Apache-2.0 k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json,BSD-3-Clause k8s.io/kube-openapi/pkg/internal/third_party/govalidator,MIT k8s.io/kube-openapi/pkg/validation/errors,Apache-2.0 k8s.io/kube-openapi/pkg/validation/spec,Apache-2.0 k8s.io/kube-openapi/pkg/validation/strfmt,Apache-2.0 k8s.io/kube-openapi/pkg/validation/validate,Apache-2.0 k8s.io/utils,Apache-2.0 k8s.io/utils/internal/third_party/forked/golang,BSD-3-Clause sigs.k8s.io/controller-runtime/pkg,Apache-2.0 sigs.k8s.io/json,Apache-2.0 sigs.k8s.io/json,BSD-3-Clause sigs.k8s.io/randfill,Apache-2.0 sigs.k8s.io/structured-merge-diff/v6,Apache-2.0 sigs.k8s.io/yaml,MIT sigs.k8s.io/yaml,Apache-2.0 sigs.k8s.io/yaml,BSD-3-Clause ================================================ FILE: Makefile ================================================ # Copyright 2023 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. # Edit https://github.com/cert-manager/makefile-modules/blob/main/modules/repository-base/base/Makefile instead. # NOTE FOR DEVELOPERS: "How do the Makefiles work and how can I extend them?" # # Shared Makefile logic lives in the make/_shared/ directory. The source of truth for these files # lies outside of this repository, eg. in the cert-manager/makefile-modules repository. # # Logic specific to this repository must be defined in the make/00_mod.mk and make/02_mod.mk files: # - The make/00_mod.mk file is included first and contains variable definitions needed by # the shared Makefile logic. # - The make/02_mod.mk file is included later, it can make use of most of the shared targets # defined in the make/_shared/ directory (all targets defined in 00_mod.mk and 01_mod.mk). # This file should be used to define targets specific to this repository. ################################## # Some modules build their dependencies from variables, we want these to be # evaluated at the last possible moment. For this we use second expansion to # re-evaluate the generate and verify targets a second time. # # See https://www.gnu.org/software/make/manual/html_node/Secondary-Expansion.html .SECONDEXPANSION: # For details on some of these "prelude" settings, see: # https://clarkgrubb.com/makefile-style-guide MAKEFLAGS += --warn-undefined-variables --no-builtin-rules SHELL := /usr/bin/env bash # The `--norc` option prevents "PS1: unbound" errors. # If Bash thinks it is being run with its standard input connected to a network # connection (such as via SSH or via Docker), it reads and executes commands # from ~/.bashrc, regardless of whether it thinks it is in interactive mode. # Bash does not set PS1 in non-interactive environments. But on Ubuntu 24.04 the # default /etc/bash.bashrc file assumes that PS1 is set. # # See https://www.gnu.org/software/bash/manual/bash.html#Invoked-by-remote-shell-daemon .SHELLFLAGS := --norc -uo pipefail -c .DEFAULT_GOAL := help .DELETE_ON_ERROR: .SUFFIXES: FORCE: noop: # do nothing # Set empty value for MAKECMDGOALS to prevent the "warning: undefined variable 'MAKECMDGOALS'" # warning from happening when running make without arguments MAKECMDGOALS ?= ################################## # Host OS and architecture setup # ################################## # The reason we don't use "go env GOOS" or "go env GOARCH" is that the "go" # binary may not be available in the PATH yet when the Makefiles are # evaluated. HOST_OS and HOST_ARCH only support Linux, *BSD and macOS (M1 # and Intel). host_os := $(shell uname -s | tr A-Z a-z) host_arch := $(shell uname -m) HOST_OS ?= $(host_os) HOST_ARCH ?= $(host_arch) ifeq (x86_64, $(HOST_ARCH)) HOST_ARCH = amd64 else ifeq (aarch64, $(HOST_ARCH)) # linux reports the arm64 arch as aarch64 HOST_ARCH = arm64 endif ################################## # Git and versioning information # ################################## git_version := $(shell git describe --tags --always --match='v*' --abbrev=14 --dirty) VERSION ?= $(git_version) IS_PRERELEASE := $(shell git describe --tags --always --match='v*' --abbrev=0 | grep -q '-' && echo true || echo false) GITCOMMIT := $(shell git rev-parse HEAD) GITEPOCH := $(shell git show -s --format=%ct HEAD) ################################## # Global variables and dirs # ################################## bin_dir := _bin # The ARTIFACTS environment variable is set by the CI system to a directory # where artifacts should be placed. These artifacts are then uploaded to a # storage bucket by the CI system (https://docs.prow.k8s.io/docs/components/pod-utilities/). # An example of such an artifact is a jUnit XML file containing test results. # If the ARTIFACTS environment variable is not set, we default to a local # directory in the _bin directory. ARTIFACTS ?= $(bin_dir)/artifacts $(bin_dir) $(ARTIFACTS) $(bin_dir)/scratch: mkdir -p $@ .PHONY: clean ## Clean all temporary files ## @category [shared] Tools clean: rm -rf $(bin_dir) ################################## # Include all the Makefiles # ################################## -include make/00_mod.mk -include make/_shared/*/00_mod.mk -include make/_shared/*/01_mod.mk -include make/02_mod.mk -include make/_shared/*/02_mod.mk ================================================ FILE: OWNERS ================================================ approvers: - j-fuentes - wwwil - charlieegan3 - akvilemar - james-w - tfadeyi reviewers: - j-fuentes - wwwil - charlieegan3 ================================================ FILE: OWNERS_ALIASES ================================================ # THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. # Edit https://github.com/cert-manager/makefile-modules/blob/main/modules/repository-base/base/OWNERS_ALIASES instead. aliases: cm-maintainers: - munnerz - joshvanl - wallrj - jakexks - maelvls - sgtcodfish - inteon - thatsmrtalbot - erikgb - hjoshi123 ================================================ FILE: README.md ================================================ # Discovery Agent [![tests](https://github.com/jetstack/jetstack-secure/actions/workflows/tests.yaml/badge.svg?branch=master&event=push)](https://github.com/jetstack/jetstack-secure/actions/workflows/tests.yaml) [![Go Reference](https://pkg.go.dev/badge/github.com/jetstack/jetstack-secure.svg)](https://pkg.go.dev/github.com/jetstack/jetstack-secure) [![Go Report Card](https://goreportcard.com/badge/github.com/jetstack/jetstack-secure)](https://goreportcard.com/report/github.com/jetstack/jetstack-secure) "The agent" manages your machine identities across Cloud Native Kubernetes and OpenShift environments and builds a detailed view of the enterprise security posture. ## Installation Please [review the documentation](https://docs.cyberark.com/mis-saas/vaas/k8s-components/c-tlspk-agent-overview/) for the agent. Detailed installation instructions are available for a variety of methods. ## Local Execution To build and run a version from master: ```bash go run main.go agent --agent-config-file ./path/to/agent/config/file.yaml -p 0h1m0s ``` You can configure the agent to perform one data gathering loop and output the data to a local file: ```bash go run . agent \ --agent-config-file examples/one-shot-secret.yaml \ --one-shot \ --output-path output.json ``` > Some examples of agent configuration files: > > - [./agent.yaml](./agent.yaml). > - [./examples/one-shot-secret.yaml](./examples/one-shot-secret.yaml). > - [./examples/cert-manager-agent.yaml](./examples/cert-manager-agent.yaml). You might also want to run a local echo server to monitor requests sent by the agent: ```bash go run main.go echo ``` ## Metrics The agent exposes its metrics through a Prometheus server, on port 8081. The Prometheus server is disabled by default but can be enabled by passing the `--enable-metrics` flag to the agent binary. If you deploy the agent using the venafi-kubernetes-agent Helm chart, the metrics server will be enabled by default, on port 8081. If you use the Prometheus Operator, you can use `--set metrics.podmonitor.enabled=true` to deploy a `PodMonitor` resource, which will add the venafi-kubernetes-agent metrics to your Prometheus server. The following metrics are collected: - Go collector: via the [default registry](https://github.com/prometheus/client_golang/blob/34e02e282dc4a3cb55ca6441b489ec182e654d59/prometheus/registry.go#L60-L63) in Prometheus `client_golang`. - Process collector: via the [default registry](https://github.com/prometheus/client_golang/blob/34e02e282dc4a3cb55ca6441b489ec182e654d59/prometheus/registry.go#L60-L63) in Prometheus `client_golang`. - Agent metrics: `data_readings_upload_size`: Data readings upload size (in bytes) sent by the in-cluster agent. ## End to end testing An end to end test script is available in the [./hack/e2e/test.sh](./hack/e2e/test.sh) directory. It is configured to run in CI in the tests.yaml GitHub Actions workflow. To run the script you will need to add the `test-e2e` label to the PR. The script creates a cluster in GKE and cleanups after itself unless the `keep-e2e-cluster` label is set on the PR. Adding that label will leave the cluster running for further debugging but it will incur costs so manually delete the cluster when done. ================================================ FILE: RELEASE.md ================================================ # Release Process > [!NOTE] > Before starting a release let the docs team know that a release is about to be created so that documentation can be prepared in advance. > This is not necessary for pre-releases. The release process is semi-automated. ### Step 1: Git Tag and GitHub Release > [!NOTE] > > Upon pushing the tag, a GitHub Action will do the following: > > - Build and publish the container image: `quay.io/jetstack/venafi-agent`, > - Build and publish the Helm chart: `oci://quay.io/jetstack/charts/venafi-kubernetes-agent`, > - Build and publish the container image: `quay.io/jetstack/disco-agent`, > - Build and publish the Helm chart: `oci://quay.io/jetstack/charts/disco-agent`, > - Build and publish the container image: `quay.io/jetstack/discovery-agent`, > - Build and publish the Helm chart: `oci://quay.io/jetstack/charts/discovery-agent`, > - Create a draft GitHub release, 1. Run govulncheck; it's the best indicator that a dependency needs to be upgraded. ```bash make verify-govulncheck ``` Any failures should be treated extremely seriously and patched before release unless you can be absolutely confident it's a false positive. 2. Consider upgrading Go dependencies using `go-mod-upgrade`: ```bash go install github.com/oligot/go-mod-upgrade@latest go-mod-upgrade make generate ``` Once complete, you'll need to create a PR to merge the changes. 3. Open the [tests GitHub Actions workflow][tests-workflow] and verify that it succeeds on the master branch. 4. Create a tag for the new release: ```sh export VERSION=v1.1.0 git tag --annotate --message="Release ${VERSION}" "${VERSION}" git push origin "${VERSION}" ``` This triggers a [release action](https://github.com/jetstack/jetstack-secure/actions/workflows/release.yml). 5. Wait until the release action finishes. 6. Navigate to the [GitHub Releases](https://github.com/jetstack/jetstack-secure/releases) page and select the draft release to edit. 1. Click on “Generate release notes” to automatically compile the changelog. 2. Review and refine the generated notes to ensure they’re clear and useful for end users. 3. Remove any irrelevant entries, such as “update deps,” “update CI,” “update docs,” or similar internal changes that do not impact user functionality. 7. Publish the release. 8. Inform the `#venafi-kubernetes-agent` channel on Slack that a new version of the Discovery Agent has been released! Consider also messaging the DisCo team at CyberArk (ask in the cert-manager team Slack channel if you don't know who to message) 9. Inform the docs team of the new release so they can update the documentation at . [tests-workflow]: https://github.com/jetstack/jetstack-secure/actions/workflows/tests.yaml?query=branch%3Amaster ## Release Artifact Information For context, the new tag will create the following images: | Image | Automation | | -------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | | `quay.io/jetstack/venafi-agent` | Automatically built by the [release action](.github/workflows/release.yml) on Git tag pushes | | `quay.io/jetstack/disco-agent` | Automatically built by the [release action](.github/workflows/release.yml) on Git tag pushes | | `quay.io/jetstack/discovery-agent` | Automatically built by the [release action](.github/workflows/release.yml) on Git tag pushes | | `registry.venafi.cloud/venafi-agent/venafi-agent` | Automatically mirrored by Harbor Replication rule | | `private-registry.venafi.cloud/venafi-agent/venafi-agent` | Automatically mirrored by Harbor Replication rule | | `private-registry.venafi.eu/venafi-agent/venafi-agent` | Automatically mirrored by Harbor Replication rule | | `registry.ngts.paloaltonetworks.com/disco-agent/disco-agent` | Automatically mirrored by Harbor Replication rule | | `registry.ngts.paloaltonetworks.com/discovery-agent/discovery-agent` | Automatically mirrored by Harbor Replication rule | and the following OCI Helm charts: | Helm Chart | Automation | | -------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | | `oci://quay.io/jetstack/charts/venafi-kubernetes-agent` | Automatically built by the [release action](.github/workflows/release.yml) on Git tag pushes | | `oci://quay.io/jetstack/charts/disco-agent` | Automatically built by the [release action](.github/workflows/release.yml) on Git tag pushes | | `oci://quay.io/jetstack/charts/discovery-agent` | Automatically built by the [release action](.github/workflows/release.yml) on Git tag pushes | | `oci://registry.venafi.cloud/charts/venafi-kubernetes-agent` | Automatically mirrored by Harbor Replication rule | | `oci://private-registry.venafi.cloud/charts/venafi-kubernetes-agent` | Automatically mirrored by Harbor Replication rule | | `oci://private-registry.venafi.eu/charts/venafi-kubernetes-agent` | Automatically mirrored by Harbor Replication rule | | `oci://registry.ngts.paloaltonetworks.com/charts/disco-agent` | Automatically mirrored by Harbor Replication rule | | `oci://registry.ngts.paloaltonetworks.com/charts/discovery-agent` | Automatically mirrored by Harbor Replication rule | ### Replication Flows TODO: These flows are helpful illustrations but describe a process whose source of truth is defined elsewhere. Instead, we should document the replication process where it's defined, in enterprise-builds. Replication flow for the venafi-kubernetes-agent Helm chart: ```text v1.1.0 (Git tag in the jetstack-secure repo) └── oci://quay.io/jetstack/charts/venafi-kubernetes-agent --version 1.1.0 (GitHub Actions in the jetstack-secure repo) └── oci://eu.gcr.io/jetstack-secure-enterprise/charts/venafi-kubernetes-agent (Enterprise Builds's GitHub Actions) ├── oci://registry.venafi.cloud/charts/venafi-kubernetes-agent --version 1.1.0 (Harbor Replication) └── oci://private-registry.venafi.cloud/charts/venafi-kubernetes-agent --version 1.1.0 (Harbor Replication) └── oci://private-registry.venafi.eu/charts/venafi-kubernetes-agent --version 1.1.0 (Harbor Replication) ``` Replication flow for the venafi-kubernetes-agent container image: ```text v1.1.0 (Git tag in the jetstack-secure repo) └── quay.io/jetstack/venafi-agent:v1.1.0 (GitHub Actions in the jetstack-secure repo) └── eu.gcr.io/jetstack-secure-enterprise/venafi-agent:v1.1.0 (Enterprise Builds's GitHub Actions) ├── registry.venafi.cloud/venafi-agent/venafi-agent:v1.1.0 (Harbor Replication) ├── private-registry.venafi.cloud/venafi-agent/venafi-agent:v1.1.0 (Harbor Replication) └── private-registry.venafi.eu/venafi-agent/venafi-agent:v1.1.0 (Harbor Replication) ``` [public-img-and-chart-replication.tf]: https://gitlab.com/venafi/vaas/delivery/harbor/-/blob/3d114f54092eb44a1deb0edc7c4e8a2d4f855aa2/public-registry/module/subsystems/tlspk/replication.tf [private-img-and-chart-replication.tf]: https://gitlab.com/venafi/vaas/delivery/harbor/-/blob/3d114f54092eb44a1deb0edc7c4e8a2d4f855aa2/private-registry/module/subsystems/tlspk/replication.tf [release_enterprise_builds.yaml]: https://github.com/jetstack/enterprise-builds/actions/workflows/release_enterprise_builds.yaml ## Step 2: Testing When a release is complete, consider installing it into a cluster and testing it. TODO: provide guidance on doing those tests. ================================================ FILE: agent.yaml ================================================ server: "https://platform.jetstack.io" organization_id: "my-organization" cluster_id: "my_cluster" period: "0h1m0s" data-gatherers: - kind: "dummy" name: "dummy" config: failed-attempts: 5 - kind: "dummy" name: "dummy-fail" config: always-fail: true venafi-cloud: uploader_id: "example-id" upload_path: "/example/endpoint/path" ================================================ FILE: api/agent.go ================================================ package api // AgentMetadata is metadata about the agent. type AgentMetadata struct { Version string `json:"version"` // ClusterID is the name of the cluster or host where the agent is running. // It may send data for other clusters in its datareadings. ClusterID string `json:"cluster_id"` } ================================================ FILE: api/common.go ================================================ // Package api provides types for Preflight reports and some common helpers. package api import ( "encoding/json" "time" ) // TimeFormat defines the format used for timestamps across all this API. const TimeFormat = time.RFC3339 // Time is a wrapper around time.Time that overrides how it is marshaled into JSON type Time struct { time.Time } // String returns a string representation of the timestamp func (t Time) String() string { return t.Format(TimeFormat) } // MarshalJSON marshals the timestamp with RFC3339 format func (t Time) MarshalJSON() ([]byte, error) { str := t.String() jsonStr, err := json.Marshal(str) if err != nil { return nil, err } return jsonStr, nil } ================================================ FILE: api/datareading.go ================================================ package api import ( "bytes" "encoding/json" "fmt" "time" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/version" ) // DataReadingsPost is the payload in the upload request. type DataReadingsPost struct { AgentMetadata *AgentMetadata `json:"agent_metadata"` // DataGatherTime represents the time that the data readings were gathered DataGatherTime time.Time `json:"data_gather_time"` DataReadings []*DataReading `json:"data_readings"` } // DataReading is the output of a DataGatherer. type DataReading struct { // ClusterID is optional as it can be inferred from the agent // token when using basic authentication. ClusterID string `json:"cluster_id,omitempty"` DataGatherer string `json:"data-gatherer"` Timestamp Time `json:"timestamp"` Data any `json:"data"` SchemaVersion string `json:"schema_version"` } // UnmarshalJSON implements the json.Unmarshaler interface for DataReading. // The function attempts to decode the Data field into known types in a prioritized order. // Empty data is considered an error, because there is no way to discriminate between data types. // TODO(wallrj): Add a discriminator field to DataReading to avoid this complex logic. // E.g. "data_type": "discovery"|"dynamic" func (o *DataReading) UnmarshalJSON(data []byte) error { var tmp struct { ClusterID string `json:"cluster_id,omitempty"` DataGatherer string `json:"data-gatherer"` Timestamp Time `json:"timestamp"` Data json.RawMessage `json:"data"` SchemaVersion string `json:"schema_version"` } // Decode the top-level fields of DataReading if err := jsonUnmarshalStrict(data, &tmp); err != nil { return fmt.Errorf("failed to parse DataReading: %s", err) } // Assign top-level fields to the DataReading object o.ClusterID = tmp.ClusterID o.DataGatherer = tmp.DataGatherer o.Timestamp = tmp.Timestamp o.SchemaVersion = tmp.SchemaVersion // Return an error if data is empty if len(tmp.Data) == 0 || bytes.Equal(tmp.Data, []byte("null")) || bytes.Equal(tmp.Data, []byte("{}")) { return fmt.Errorf("failed to parse DataReading.Data for gatherer %q: empty data", o.DataGatherer) } // Define a list of decoding attempts with prioritized types dataTypes := []struct { target any assign func(any) }{ {&OIDCDiscoveryData{}, func(v any) { o.Data = v.(*OIDCDiscoveryData) }}, {&DiscoveryData{}, func(v any) { o.Data = v.(*DiscoveryData) }}, {&DynamicData{}, func(v any) { o.Data = v.(*DynamicData) }}, } // Attempt to decode the Data field into each type for _, dataType := range dataTypes { if err := jsonUnmarshalStrict(tmp.Data, dataType.target); err == nil { dataType.assign(dataType.target) return nil } } // Return an error if no type matches return fmt.Errorf("failed to parse DataReading.Data for gatherer %q: unknown type", o.DataGatherer) } // jsonUnmarshalStrict unmarshals JSON data into the provided interface, // disallowing unknown fields to ensure strict adherence to the expected structure. func jsonUnmarshalStrict(data []byte, v any) error { decoder := json.NewDecoder(bytes.NewReader(data)) decoder.DisallowUnknownFields() return decoder.Decode(v) } // GatheredResource wraps the raw k8s resource that is sent to the jetstack secure backend type GatheredResource struct { // Resource is a reference to a k8s object that was found by the informer // should be of type unstructured.Unstructured, raw Object Resource any DeletedAt Time } func (v GatheredResource) MarshalJSON() ([]byte, error) { dateString := "" if !v.DeletedAt.IsZero() { dateString = v.DeletedAt.Format(TimeFormat) } data := struct { Resource any `json:"resource"` DeletedAt string `json:"deleted_at,omitempty"` }{ Resource: v.Resource, DeletedAt: dateString, } return json.Marshal(data) } func (v *GatheredResource) UnmarshalJSON(data []byte) error { var tmpResource struct { Resource *unstructured.Unstructured `json:"resource"` DeletedAt Time `json:"deleted_at"` } d := json.NewDecoder(bytes.NewReader(data)) d.DisallowUnknownFields() if err := d.Decode(&tmpResource); err != nil { return err } v.Resource = tmpResource.Resource v.DeletedAt = tmpResource.DeletedAt return nil } // DynamicData is the DataReading.Data returned by the k8sdynamic.DataGathererDynamic // gatherer type DynamicData struct { // Items is a list of GatheredResource Items []*GatheredResource `json:"items"` } // DiscoveryData is the DataReading.Data returned by the k8sdiscovery.DataGathererDiscovery // gatherer type DiscoveryData struct { // ClusterID is the unique ID of the Kubernetes cluster which this snapshot was taken from. // This is sourced from the kube-system namespace UID, // which is assumed to be stable for the lifetime of the cluster. // - https://github.com/kubernetes/kubernetes/issues/77487#issuecomment-489786023 ClusterID string `json:"cluster_id"` // ServerVersion is the version information of the k8s apiserver // See https://godoc.org/k8s.io/apimachinery/pkg/version#Info ServerVersion *version.Info `json:"server_version"` } // OIDCDiscoveryData is the DataReading.Data returned by the oidc.OIDCDiscovery // gatherer type OIDCDiscoveryData struct { // OIDCConfig contains OIDC configuration data from the API server's // `/.well-known/openid-configuration` endpoint OIDCConfig map[string]any `json:"openid_configuration,omitempty"` // OIDCConfigError contains any error encountered while fetching the OIDC configuration OIDCConfigError string `json:"openid_configuration_error,omitempty"` // JWKS contains JWKS data from the API server's `/openid/v1/jwks` endpoint JWKS map[string]any `json:"jwks,omitempty"` // JWKSError contains any error encountered while fetching the JWKS JWKSError string `json:"jwks_error,omitempty"` } ================================================ FILE: api/datareading_test.go ================================================ package api import ( "encoding/json" "testing" "time" "github.com/stretchr/testify/assert" ) func TestJSONGatheredResourceDropsEmptyTime(t *testing.T) { var resource GatheredResource bytes, err := json.Marshal(resource) if err != nil { t.Fatalf("failed to marshal %s", err) } expected := `{"resource":null}` if string(bytes) != expected { t.Fatalf("unexpected json \ngot %s\nwant %s", string(bytes), expected) } } func TestJSONGatheredResourceSetsTimeWhenPresent(t *testing.T) { var resource GatheredResource resource.DeletedAt = Time{time.Date(2021, 3, 29, 0, 0, 0, 0, time.UTC)} bytes, err := json.Marshal(resource) if err != nil { t.Fatalf("failed to marshal %s", err) } expected := `{"resource":null,"deleted_at":"2021-03-29T00:00:00Z"}` if string(bytes) != expected { t.Fatalf("unexpected json \ngot %s\nwant %s", string(bytes), expected) } } // TestDataReading_UnmarshalJSON tests the UnmarshalJSON method of DataReading // with various scenarios including valid and invalid JSON inputs. func TestDataReading_UnmarshalJSON(t *testing.T) { tests := []struct { name string input string wantDataType any expectError string }{ { name: "DiscoveryData type", input: `{ "cluster_id": "61b2db64-fd70-49a6-a257-08397b9b4bae", "data-gatherer": "discovery", "timestamp": "2024-06-01T12:00:00Z", "data": { "cluster_id": "60868ebf-6e47-4184-9bc0-20bb6824e210", "server_version": { "major": "1", "minor": "20", "gitVersion": "v1.20.0" } }, "schema_version": "v1" }`, wantDataType: &DiscoveryData{}, }, { name: "DynamicData type", input: `{ "cluster_id": "69050b54-c61a-4384-95c3-35f890377a67", "data-gatherer": "dynamic", "timestamp": "2024-06-01T12:00:00Z", "data": {"items": []}, "schema_version": "v1" }`, wantDataType: &DynamicData{}, }, { name: "OIDCDiscoveryData type", input: `{ "cluster_id": "11111111-2222-3333-4444-555555555555", "data-gatherer": "oidc", "timestamp": "2024-06-01T12:00:00Z", "data": { "openid_configuration": {"issuer": "https://example.com"}, "jwks": {"keys": []} }, "schema_version": "v1" }`, wantDataType: &OIDCDiscoveryData{}, }, { name: "Invalid JSON", input: `not a json`, expectError: "failed to parse DataReading: invalid character 'o' in literal null (expecting 'u')", }, { name: "Missing data field", input: `{ "cluster_id": "cc5a0429-8dc4-42c8-8e3a-eece9bca15c3", "data-gatherer": "missing-data-field", "timestamp": "2024-06-01T12:00:00Z", "schema_version": "v1" }`, expectError: `failed to parse DataReading.Data for gatherer "missing-data-field": empty data`, }, { name: "Mismatched data type", input: `{ "cluster_id": "c272b13e-b19e-4782-833f-d55a305f3c9e", "data-gatherer": "unknown-data-type", "timestamp": "2024-06-01T12:00:00Z", "data": "this should be an object", "schema_version": "v1" }`, expectError: `failed to parse DataReading.Data for gatherer "unknown-data-type": unknown type`, }, { name: "Empty data field", input: `{ "cluster_id": "07909675-113f-4b59-ba5e-529571a191e6", "data-gatherer": "empty-data", "timestamp": "2024-06-01T12:00:00Z", "data": {}, "schema_version": "v1" }`, expectError: `failed to parse DataReading.Data for gatherer "empty-data": empty data`, }, { name: "Additional field", input: `{ "cluster_id": "11df7332-4b32-4f5a-903b-0cbbef381850", "data-gatherer": "additional-field", "timestamp": "2024-06-01T12:00:00Z", "data": { "cluster_id": "60868ebf-6e47-4184-9bc0-20bb6824e210" }, "extra_field": "should cause error", "schema_version": "v1" }`, expectError: `failed to parse DataReading: json: unknown field "extra_field"`, }, { name: "Additional data field", input: `{ "cluster_id": "ca44c338-987e-4d57-8320-63f538db4292", "data-gatherer": "additional-data-field", "timestamp": "2024-06-01T12:00:00Z", "data": { "cluster_id": "60868ebf-6e47-4184-9bc0-20bb6824e210", "server_version": { "major": "1", "minor": "20", "gitVersion": "v1.20.0" }, "extra_field": "should cause error" }, "schema_version": "v1" }`, expectError: `failed to parse DataReading.Data for gatherer "additional-data-field": unknown type`, }, { name: "Empty JSON object", input: `{}`, expectError: `failed to parse DataReading.Data for gatherer "": empty data`, }, { name: "Null data field", input: `{ "cluster_id": "36281cb3-7f3a-4efa-9879-7c988a9715b0", "data-gatherer": "null-data", "timestamp": "2024-06-01T12:00:00Z", "data": null, "schema_version": "v1" }`, expectError: `failed to parse DataReading.Data for gatherer "null-data": empty data`, }, { name: "Empty string data field", input: `{ "cluster_id": "7b7aa8ee-58ac-4818-9b29-c0a76296ea1d", "data-gatherer": "empty-string-data", "timestamp": "2024-06-01T12:00:00Z", "data": "", "schema_version": "v1" }`, expectError: `failed to parse DataReading.Data for gatherer "empty-string-data": unknown type`, }, { name: "Array instead of object in data field", input: `{ "cluster_id": "94d7757f-d084-4ccb-963b-f60fece0df2d", "data-gatherer": "array-data", "timestamp": "2024-06-01T12:00:00Z", "data": [], "schema_version": "v1" }`, expectError: `failed to parse DataReading.Data for gatherer "array-data": unknown type`, }, { name: "Incorrect timestamp format", input: `{ "cluster_id": "d58f298d-b8c1-4d99-aa85-c27d9aec6f97", "data-gatherer": "bad-timestamp", "timestamp": "not-a-timestamp", "data": { "items": [] }, "schema_version": "v1" }`, expectError: `failed to parse DataReading: parsing time "not-a-timestamp" as "2006-01-02T15:04:05Z07:00": cannot parse "not-a-timestamp" as "2006"`, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var dr DataReading err := dr.UnmarshalJSON([]byte(tt.input)) if tt.expectError != "" { assert.EqualError(t, err, tt.expectError) return } assert.NoError(t, err) assert.IsType(t, tt.wantDataType, dr.Data) }) } } ================================================ FILE: cmd/agent.go ================================================ package cmd import ( "fmt" "os" "github.com/spf13/cobra" "github.com/jetstack/preflight/pkg/agent" "github.com/jetstack/preflight/pkg/permissions" ) var agentCmd = &cobra.Command{ Use: "agent", Short: "start the preflight agent", Long: `The agent will periodically gather data for the configured data gatherers and send it to a remote backend for evaluation`, RunE: agent.Run, } var agentInfoCmd = &cobra.Command{ Use: "info", Short: "print several internal parameters of the agent", Long: `Print several internal parameters of the agent, as the built-in OAuth2 client ID.`, Run: func(cmd *cobra.Command, args []string) { printVersion(true) fmt.Println() printOAuth2Config() }, } var agentRBACCmd = &cobra.Command{ Use: "rbac", Short: "print the agent's minimal RBAC manifest", Long: `Print RBAC string by reading GVRs`, RunE: func(cmd *cobra.Command, args []string) error { b, err := os.ReadFile(agent.Flags.ConfigFilePath) if err != nil { return fmt.Errorf("Failed to read config file: %s", err) } cfg, err := agent.ParseConfig(b) if err != nil { return fmt.Errorf("Failed to parse config file: %s", err) } err = agent.ValidateDataGatherers(cfg.DataGatherers) if err != nil { return fmt.Errorf("Failed to validate data gatherers: %s", err) } out := permissions.GenerateFullManifest(cfg.DataGatherers) fmt.Print(out) return nil }, } func init() { rootCmd.AddCommand(agentCmd) agentCmd.AddCommand(agentInfoCmd) agentCmd.AddCommand(agentRBACCmd) agent.InitAgentCmdFlags(agentCmd, &agent.Flags) } ================================================ FILE: cmd/agent_test.go ================================================ package cmd import ( "bytes" "context" "fmt" "os" "os/exec" "path/filepath" "strings" "testing" "time" "github.com/stretchr/testify/require" arktesting "github.com/jetstack/preflight/internal/cyberark/testing" ) // TestOutputModes tests the different output modes of the agent command. // It does this by running the agent command in a subprocess with the // appropriate flags and configuration files. // It assumes that the test is being run from the "cmd" directory and that // the repository root is the parent directory of the current working directory. func TestOutputModes(t *testing.T) { repoRoot := findRepoRoot(t) t.Run("localfile", func(t *testing.T) { runSubprocess(t, repoRoot, []string{ "--agent-config-file", filepath.Join(repoRoot, "examples/localfile/config.yaml"), "--input-path", filepath.Join(repoRoot, "examples/localfile/input.json"), "--output-path", "/dev/null", }) }) t.Run("machinehub", func(t *testing.T) { if strings.ToLower(os.Getenv("ARK_LIVE_TEST")) != "true" { t.Skip("set ARK_LIVE_TEST=true to run this test against the live service") return } arktesting.SkipIfNoEnv(t) t.Log("This test runs against a live service and has been known to flake. If you see timeout issues it's possible that the test is flaking and it could be unrelated to your changes.") runSubprocess(t, repoRoot, []string{ "--agent-config-file", filepath.Join(repoRoot, "examples/machinehub/config.yaml"), "--input-path", filepath.Join(repoRoot, "examples/machinehub/input.json"), "--machine-hub", }) }) } // findRepoRoot returns the absolute path to the repository root. // It assumes that the test is being run from the "cmd" directory. func findRepoRoot(t *testing.T) string { cwd, err := os.Getwd() require.NoError(t, err) repoRoot, err := filepath.Abs(filepath.Join(cwd, "..")) require.NoError(t, err) return repoRoot } // runSubprocess runs the current test in a subprocess with the given args. // It sets the GO_CHILD environment variable to indicate to the subprocess // that it should run the main function instead of the test function. // It captures and logs the stdout and stderr of the subprocess. // It fails the test if the subprocess exits with a non-zero status. // It uses a timeout to avoid hanging indefinitely. func runSubprocess(t *testing.T, repoRoot string, args []string) { if _, found := os.LookupEnv("GO_CHILD"); found { os.Args = append([]string{ "preflight", "agent", "--log-level", "6", "--one-shot", }, args...) Execute() return } t.Log("Running child process", os.Args[0], "-test.run=^"+t.Name()+"$") ctx, cancel := context.WithTimeout(t.Context(), time.Second*10) defer cancel() cmd := exec.CommandContext(ctx, os.Args[0], "-test.run=^"+t.Name()+"$") var stdout, stderr bytes.Buffer cmd.Stdout = &stdout cmd.Stderr = &stderr cmd.Env = append(os.Environ(), "GO_CHILD=true") err := cmd.Run() t.Logf("STDOUT\n%s\n", stdout.String()) t.Logf("STDERR\n%s\n", stderr.String()) require.NoError(t, err, fmt.Sprintf("Error: %v\nSTDERR: %s", err, stderr.String())) } ================================================ FILE: cmd/ark/main.go ================================================ package main import "github.com/jetstack/preflight/cmd" func main() { cmd.Execute() } ================================================ FILE: cmd/echo.go ================================================ package cmd import ( "github.com/spf13/cobra" "github.com/jetstack/preflight/pkg/echo" ) var echoCmd = &cobra.Command{ Use: "echo", Short: "starts an echo server to test the agent", Long: `The agent sends data to a server. This echo server can be used to act as the server part and echo the data received by the agent.`, RunE: echo.Echo, } func init() { rootCmd.AddCommand(echoCmd) echoCmd.PersistentFlags().StringVarP( &echo.EchoListen, "listen", "l", ":8080", "Address where to listen.", ) echoCmd.PersistentFlags().BoolVarP( &echo.Compact, "compact", "", false, "Prints compact output.", ) } ================================================ FILE: cmd/helpers.go ================================================ package cmd import ( "fmt" "runtime" "github.com/jetstack/preflight/pkg/client" "github.com/jetstack/preflight/pkg/version" ) func printVersion(verbose bool) { fmt.Println("Preflight version: ", version.PreflightVersion, runtime.GOOS+"/"+runtime.GOARCH) if verbose { fmt.Println(" Commit: ", version.Commit) fmt.Println(" Built: ", version.BuildDate) fmt.Println(" Go: ", runtime.Version()) } } func printOAuth2Config() { fmt.Println("OAuth2: ") fmt.Println(" ClientID: ", client.ClientID) fmt.Println(" AuthServerDomain: ", client.AuthServerDomain) } ================================================ FILE: cmd/root.go ================================================ package cmd import ( "context" "fmt" "os" "strings" "github.com/spf13/cobra" "github.com/spf13/pflag" "k8s.io/klog/v2" "github.com/jetstack/preflight/pkg/logs" ) // rootCmd represents the base command when called without any subcommands var rootCmd = &cobra.Command{ Use: "preflight", Short: "Kubernetes cluster configuration checker 🚀", Long: `Preflight is a tool to automatically perform Kubernetes cluster configuration checks using Open Policy Agent (OPA). Preflight checks are bundled into Packages`, PersistentPreRunE: func(cmd *cobra.Command, args []string) error { return logs.Initialize() }, // SilenceErrors and SilenceUsage prevents this command or any sub-command // from printing arbitrary text to stderr. // Why? To ensure that each line of output can be parsed as a single message // for consumption by logging agents such as fluentd. // Usage information is still available on stdout with the `-h` and `--help` // flags. SilenceErrors: true, SilenceUsage: true, } func init() { for _, command := range rootCmd.Commands() { setFlagsFromEnv("PREFLIGHT_", command.PersistentFlags()) } } // Execute adds all child commands to the root command and sets flags appropriately. // This is called by main.main(). It only needs to happen once to the rootCmd. // If the root command or sub-command returns an error, the error message will // be logged and the process will exit with status 1. func Execute() { logs.AddFlags(rootCmd.PersistentFlags()) ctx := klog.NewContext(context.Background(), klog.Background()) var exitCode int if err := rootCmd.ExecuteContext(ctx); err != nil { exitCode = 1 klog.ErrorS(err, "Exiting due to error", "exit-code", exitCode) } klog.FlushAndExit(klog.ExitFlushTimeout, exitCode) } func setFlagsFromEnv(prefix string, fs *pflag.FlagSet) { set := map[string]bool{} fs.Visit(func(f *pflag.Flag) { set[f.Name] = true }) fs.VisitAll(func(f *pflag.Flag) { // ignore flags set from the commandline if set[f.Name] { return } // remove trailing _ to reduce common errors with the prefix, i.e. people setting it to MY_PROG_ cleanPrefix := strings.TrimSuffix(prefix, "_") name := fmt.Sprintf("%s_%s", cleanPrefix, strings.ReplaceAll(strings.ToUpper(f.Name), "-", "_")) if e, ok := os.LookupEnv(name); ok { _ = f.Value.Set(e) } }) } ================================================ FILE: cmd/version.go ================================================ package cmd import ( "github.com/spf13/cobra" ) var verbose bool var versionCmd = &cobra.Command{ Use: "version", Short: "Display the version", Long: `Display preflight version. `, Run: func(cmd *cobra.Command, args []string) { printVersion(verbose) }, } func init() { rootCmd.AddCommand(versionCmd) versionCmd.PersistentFlags().BoolVar( &verbose, "verbose", false, "If enabled, displays the additional information about this build.", ) } ================================================ FILE: deploy/charts/disco-agent/.helmignore ================================================ # Patterns to ignore when building packages. # This supports shell glob matching, relative path matching, and # negation (prefixed with !). Only one pattern per line. .DS_Store # Common VCS dirs .git/ .gitignore .bzr/ .bzrignore .hg/ .hgignore .svn/ # Common backup files *.swp *.bak *.tmp *.orig *~ # Various IDEs .project .idea/ *.tmproj .vscode/ ================================================ FILE: deploy/charts/disco-agent/Chart.yaml ================================================ apiVersion: v2 name: disco-agent description: |- The disco-agent connects your Kubernetes or Openshift cluster to CyberArk Discovery and Context. maintainers: - name: CyberArk email: support@cyberark.com url: https://cyberark.com sources: - https://github.com/jetstack/jetstack-secure # These versions are meant to be overridden by `make helm-chart`. No `v` prefix # for the `version` because Helm doesn't support auto-determining the latest # version for OCI Helm charts that use a `v` prefix. version: 0.0.0 appVersion: "v0.0.0" ================================================ FILE: deploy/charts/disco-agent/README.md ================================================ # disco-agent The Cyberark Discovery and Context Agent connects your Kubernetes or OpenShift cluster to the Discovery and Context service of the CyberArk Identity Security Platform. ## Quick Start ### Create a Namespace Create a namespace for the agent: ```sh export NAMESPACE=cyberark kubectl create ns "$NAMESPACE" || true ``` ### Add credentials to a Secret You will require tenant details and credentials for the CyberArk Identity Security Platform. Put them in the following environment variables: ```sh export ARK_SUBDOMAIN= # your CyberArk tenant subdomain e.g. tlskp-test export ARK_USERNAME= # your CyberArk username export ARK_SECRET= # your CyberArk password # OPTIONAL: the URL for the CyberArk Discovery API if not using the production environment export ARK_DISCOVERY_API=https://platform-discovery.integration-cyberark.cloud/ ``` Create a Secret containing the tenant details and credentials: ```sh kubectl create secret generic agent-credentials \ --namespace "$NAMESPACE" \ --from-literal=ARK_USERNAME=$ARK_USERNAME \ --from-literal=ARK_SECRET=$ARK_SECRET \ --from-literal=ARK_SUBDOMAIN=$ARK_SUBDOMAIN \ --from-literal=ARK_DISCOVERY_API=$ARK_DISCOVERY_API ``` Alternatively, use the following Secret as a template: ```yaml # agent-credentials.yaml apiVersion: v1 kind: Secret metadata: name: agent-credentials namespace: cyberark type: Opaque stringData: ARK_SUBDOMAIN: $ARK_SUBDOMAIN # your CyberArk tenant subdomain e.g. tlskp-test ARK_SECRET: $ARK_SECRET # your CyberArk password ARK_USERNAME: $ARK_USERNAME # your CyberArk username # OPTIONAL: the URL for the CyberArk Discovery API if not using the production environment # ARK_DISCOVERY_API: https://platform-discovery.integration-cyberark.cloud/ ``` ### Deploy the agent Deploy the agent: ```sh helm upgrade agent "oci://${OCI_BASE}/charts/disco-agent" \ --install \ --create-namespace \ --namespace "$NAMESPACE" \ --set fullnameOverride=disco-agent ``` ### Troubleshooting Check the Pod and its events: ```sh kubectl describe -n cyberark pods -l app.kubernetes.io/name=disco-agent ``` Check the logs: ```sh kubectl logs deployments/disco-agent --namespace "${NAMESPACE}" --follow ``` ## Values #### **replicaCount** ~ `number` > Default value: > ```yaml > 1 > ``` This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/ #### **acceptTerms** ~ `bool` > Default value: > ```yaml > false > ``` Must be set to indicate that you have read and accepted the CyberArk Terms of Service. If false, the helm chart will fail to install and will print a message with instructions on how to accept the TOS. #### **imageRegistry** ~ `string` > Default value: > ```yaml > quay.io > ``` The container registry used for disco-agent images by default. This can include path prefixes (e.g. "artifactory.example.com/docker"). #### **imageNamespace** ~ `string` > Default value: > ```yaml > jetstack > ``` The repository namespace used for disco-agent images by default. Examples: - jetstack - custom-namespace #### **image.registry** ~ `string` Deprecated: per-component registry prefix. If set, this value is *prepended* to the image repository that the chart would otherwise render. This applies both when `image.repository` is set and when the repository is computed from `imageRegistry` + `imageNamespace` + `image.name`. This can produce "double registry" style references such as `legacy.example.io/quay.io/jetstack/...`. Prefer using the global `imageRegistry`/`imageNamespace` values. #### **image.repository** ~ `string` > Default value: > ```yaml > "" > ``` Full repository override (takes precedence over `imageRegistry`, `imageNamespace`, and `image.name`). Example: quay.io/jetstack/disco-agent #### **image.name** ~ `string` > Default value: > ```yaml > disco-agent > ``` The image name for the Discovery Agent. This is used (together with `imageRegistry` and `imageNamespace`) to construct the full image reference. #### **image.pullPolicy** ~ `string` > Default value: > ```yaml > IfNotPresent > ``` This sets the pull policy for images. #### **image.tag** ~ `string` > Default value: > ```yaml > "" > ``` Override the image tag to deploy by setting this variable. If no value is set, the chart's appVersion is used. #### **image.digest** ~ `string` > Default value: > ```yaml > "" > ``` Override the image digest to deploy by setting this variable. If set together with `image.tag`, the rendered image will include both tag and digest. #### **imagePullSecrets** ~ `array` > Default value: > ```yaml > [] > ``` This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ #### **nameOverride** ~ `string` > Default value: > ```yaml > "" > ``` This is to override the chart name. #### **fullnameOverride** ~ `string` > Default value: > ```yaml > "" > ``` #### **serviceAccount.create** ~ `bool` > Default value: > ```yaml > true > ``` Specifies whether a service account should be created #### **serviceAccount.automount** ~ `bool` > Default value: > ```yaml > true > ``` Automatically mount a ServiceAccount's API credentials? #### **serviceAccount.annotations** ~ `object` > Default value: > ```yaml > {} > ``` Annotations to add to the service account #### **serviceAccount.name** ~ `string` > Default value: > ```yaml > "" > ``` The name of the service account to use. If not set and create is true, a name is generated using the fullname template #### **podAnnotations** ~ `object` > Default value: > ```yaml > {} > ``` This is for setting Kubernetes Annotations to a Pod. For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ #### **podLabels** ~ `object` > Default value: > ```yaml > {} > ``` This is for setting Kubernetes Labels to a Pod. For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ #### **podSecurityContext** ~ `object` > Default value: > ```yaml > {} > ``` #### **securityContext** ~ `object` > Default value: > ```yaml > allowPrivilegeEscalation: false > capabilities: > drop: > - ALL > readOnlyRootFilesystem: true > runAsNonRoot: true > seccompProfile: > type: RuntimeDefault > ``` Add Container specific SecurityContext settings to the container. Takes precedence over `podSecurityContext` when set. See https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container #### **resources** ~ `object` > Default value: > ```yaml > {} > ``` #### **volumes** ~ `array` > Default value: > ```yaml > [] > ``` Additional volumes on the output Deployment definition. #### **volumeMounts** ~ `array` > Default value: > ```yaml > [] > ``` Additional volumeMounts on the output Deployment definition. #### **nodeSelector** ~ `object` > Default value: > ```yaml > {} > ``` #### **tolerations** ~ `array` > Default value: > ```yaml > [] > ``` #### **affinity** ~ `object` > Default value: > ```yaml > {} > ``` #### **http_proxy** ~ `string` Configures the HTTP_PROXY environment variable where a HTTP proxy is required. #### **https_proxy** ~ `string` Configures the HTTPS_PROXY environment variable where a HTTP proxy is required. #### **no_proxy** ~ `string` Configures the NO_PROXY environment variable where a HTTP proxy is required, but certain domains should be excluded. #### **podDisruptionBudget** ~ `object` > Default value: > ```yaml > enabled: false > ``` Configure a PodDisruptionBudget for the agent's Deployment. If running with multiple replicas, consider setting podDisruptionBudget.enabled to true. #### **config.period** ~ `string` > Default value: > ```yaml > 12h0m0s > ``` Push data every 12 hours unless changed. #### **config.excludeAnnotationKeysRegex** ~ `array` > Default value: > ```yaml > [] > ``` You can configure the agent to exclude some annotations or labels from being pushed . All Kubernetes objects are affected. The objects are still pushed, but the specified annotations and labels are removed before being pushed. Dots is the only character that needs to be escaped in the regex. Use either double quotes with escaped single quotes or unquoted strings for the regex to avoid YAML parsing issues with `\.`. Example: excludeAnnotationKeysRegex: ['^kapp\.k14s\.io/original.*'] #### **config.excludeLabelKeysRegex** ~ `array` > Default value: > ```yaml > [] > ``` #### **config.clusterName** ~ `string` > Default value: > ```yaml > "" > ``` A human readable name for the cluster where the agent is deployed (optional). This cluster name will be associated with the data that the agent uploads to the Discovery and Context service. If empty (the default), the service account name will be used instead. #### **config.clusterDescription** ~ `string` > Default value: > ```yaml > "" > ``` A short description of the cluster where the agent is deployed (optional). This description will be associated with the data that the agent uploads to the Discovery and Context service. The description may include contact information such as the email address of the cluster administrator, so that any problems and risks identified by the Discovery and Context service can be communicated to the people responsible for the affected secrets. #### **config.sendSecretValues** ~ `bool` > Default value: > ```yaml > true > ``` Enable sending of Secret values to CyberArk in addition to metadata. Metadata is always sent, but the actual values of Secrets are not sent by default. When enabled, Secret data is encrypted using envelope encryption using a key managed by CyberArk, fetched from the Discovery and Context service. #### **authentication.secretName** ~ `string` > Default value: > ```yaml > agent-credentials > ``` #### **extraArgs** ~ `array` > Default value: > ```yaml > [] > ``` ```yaml extraArgs: - --logging-format=json - --log-level=6 # To enable HTTP request logging ``` #### **pprof.enabled** ~ `bool` > Default value: > ```yaml > false > ``` Enable profiling with the pprof endpoint #### **metrics.enabled** ~ `bool` > Default value: > ```yaml > true > ``` Enable the metrics server. If false, the metrics server will be disabled and the other metrics fields below will be ignored. #### **metrics.podmonitor.enabled** ~ `bool` > Default value: > ```yaml > false > ``` Create a PodMonitor to add the metrics to Prometheus, if you are using Prometheus Operator. See https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor #### **metrics.podmonitor.namespace** ~ `string` The namespace that the pod monitor should live in. Defaults to the disco-agent namespace. #### **metrics.podmonitor.prometheusInstance** ~ `string` > Default value: > ```yaml > default > ``` Specifies the `prometheus` label on the created PodMonitor. This is used when different Prometheus instances have label selectors matching different PodMonitors. #### **metrics.podmonitor.interval** ~ `string` > Default value: > ```yaml > 60s > ``` The interval to scrape metrics. #### **metrics.podmonitor.scrapeTimeout** ~ `string` > Default value: > ```yaml > 30s > ``` The timeout before a metrics scrape fails. #### **metrics.podmonitor.labels** ~ `object` > Default value: > ```yaml > {} > ``` Additional labels to add to the PodMonitor. #### **metrics.podmonitor.annotations** ~ `object` > Default value: > ```yaml > {} > ``` Additional annotations to add to the PodMonitor. #### **metrics.podmonitor.honorLabels** ~ `bool` > Default value: > ```yaml > false > ``` Keep labels from scraped data, overriding server-side labels. #### **metrics.podmonitor.endpointAdditionalProperties** ~ `object` > Default value: > ```yaml > {} > ``` EndpointAdditionalProperties allows setting additional properties on the endpoint such as relabelings, metricRelabelings etc. For example: ```yaml endpointAdditionalProperties: relabelings: - action: replace sourceLabels: - __meta_kubernetes_pod_node_name targetLabel: instance ``` ================================================ FILE: deploy/charts/disco-agent/templates/NOTES.txt ================================================ CHART NAME: {{ .Chart.Name }} CHART VERSION: {{ .Chart.Version }} APP VERSION: {{ .Chart.AppVersion }} - Check the application is running: > kubectl get pods -n {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} - Check the application logs for successful connection to the platform: > kubectl logs -n {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} {{ if .Values.config.sendSecretValues }} NB: sendSecretValues is set to "true". Encrypted secret data will be sent to the CyberArk Discovery and Context service {{ end }} ================================================ FILE: deploy/charts/disco-agent/templates/_helpers.tpl ================================================ {{/* Expand the name of the chart. */}} {{- define "disco-agent.name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} {{- end }} {{/* Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). If release name contains chart name it will be used as a full name. */}} {{- define "disco-agent.fullname" -}} {{- if .Values.fullnameOverride }} {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} {{- else }} {{- $name := default .Chart.Name .Values.nameOverride }} {{- if contains $name .Release.Name }} {{- .Release.Name | trunc 63 | trimSuffix "-" }} {{- else }} {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} {{- end }} {{- end }} {{- end }} {{/* Create chart name and version as used by the chart label. */}} {{- define "disco-agent.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} {{- end }} {{/* Common labels */}} {{- define "disco-agent.labels" -}} helm.sh/chart: {{ include "disco-agent.chart" . }} {{ include "disco-agent.selectorLabels" . }} {{- if .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- end }} app.kubernetes.io/managed-by: {{ .Release.Service }} {{- end }} {{/* Selector labels */}} {{- define "disco-agent.selectorLabels" -}} app.kubernetes.io/name: {{ include "disco-agent.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} {{/* Create the name of the service account to use */}} {{- define "disco-agent.serviceAccountName" -}} {{- if .Values.serviceAccount.create }} {{- default (include "disco-agent.fullname" .) .Values.serviceAccount.name }} {{- else }} {{- default "default" .Values.serviceAccount.name }} {{- end }} {{- end }} {{/* Util function for generating an image reference based on the provided options. This function is derived from similar functions used in the cert-manager GitHub organization */}} {{- define "disco-agent.image" -}} {{- /* Calling convention: - (tuple ) We intentionally pass imageRegistry/imageNamespace as explicit arguments rather than reading from `.Values` inside this helper, because `helm-tool lint` does not reliably track `.Values.*` usage through tuple/variable indirection. */ -}} {{- if ne (len .) 4 -}} {{- fail (printf "ERROR: template \"disco-agent.image\" expects (tuple ), got %d arguments" (len .)) -}} {{- end -}} {{- $image := index . 0 -}} {{- $imageRegistry := index . 1 | default "" -}} {{- $imageNamespace := index . 2 | default "" -}} {{- $defaultReference := index . 3 -}} {{- $repository := "" -}} {{- if $image.repository -}} {{- $repository = $image.repository -}} {{- /* Backwards compatibility: if image.registry is set, additionally prefix the repository with this registry. */ -}} {{- if $image.registry -}} {{- $repository = printf "%s/%s" $image.registry $repository -}} {{- end -}} {{- else -}} {{- $name := required "ERROR: image.name must be set when image.repository is empty" $image.name -}} {{- $repository = $name -}} {{- if $imageNamespace -}} {{- $repository = printf "%s/%s" $imageNamespace $repository -}} {{- end -}} {{- if $imageRegistry -}} {{- $repository = printf "%s/%s" $imageRegistry $repository -}} {{- end -}} {{- /* Backwards compatibility: if image.registry is set, additionally prefix the repository with this registry. */ -}} {{- if $image.registry -}} {{- $repository = printf "%s/%s" $image.registry $repository -}} {{- end -}} {{- end -}} {{- $repository -}} {{- if and $image.tag $image.digest -}} {{- printf ":%s@%s" $image.tag $image.digest -}} {{- else if $image.tag -}} {{- printf ":%s" $image.tag -}} {{- else if $image.digest -}} {{- printf "@%s" $image.digest -}} {{- else -}} {{- printf "%s" $defaultReference -}} {{- end -}} {{- end }} ================================================ FILE: deploy/charts/disco-agent/templates/configmap.yaml ================================================ apiVersion: v1 kind: ConfigMap metadata: name: {{ include "disco-agent.fullname" . }}-config namespace: {{ .Release.Namespace }} labels: {{- include "disco-agent.labels" . | nindent 4 }} data: config.yaml: |- cluster_name: {{ .Values.config.clusterName | quote }} cluster_description: {{ .Values.config.clusterDescription | quote }} period: {{ .Values.config.period | quote }} {{- with .Values.config.excludeAnnotationKeysRegex }} exclude-annotation-keys-regex: {{- . | toYaml | nindent 6 }} {{- end }} {{- with .Values.config.excludeLabelKeysRegex }} exclude-label-keys-regex: {{- . | toYaml | nindent 6 }} {{- end }} data-gatherers: - kind: oidc name: ark/oidc - kind: k8s-discovery name: ark/discovery - kind: k8s-dynamic name: ark/secrets config: resource-type: version: v1 resource: secrets field-selectors: - type!=kubernetes.io/dockercfg - type!=kubernetes.io/dockerconfigjson - type!=bootstrap.kubernetes.io/token - type!=helm.sh/release.v1 - kind: k8s-dynamic name: ark/serviceaccounts config: resource-type: resource: serviceaccounts version: v1 - kind: k8s-dynamic name: ark/roles config: resource-type: version: v1 group: rbac.authorization.k8s.io resource: roles - kind: k8s-dynamic name: ark/clusterroles config: resource-type: version: v1 group: rbac.authorization.k8s.io resource: clusterroles - kind: k8s-dynamic name: ark/rolebindings config: resource-type: version: v1 group: rbac.authorization.k8s.io resource: rolebindings - kind: k8s-dynamic name: ark/clusterrolebindings config: resource-type: version: v1 group: rbac.authorization.k8s.io resource: clusterrolebindings - kind: k8s-dynamic name: ark/jobs config: resource-type: version: v1 group: batch resource: jobs - kind: k8s-dynamic name: ark/cronjobs config: resource-type: version: v1 group: batch resource: cronjobs - kind: k8s-dynamic name: ark/deployments config: resource-type: version: v1 group: apps resource: deployments - kind: k8s-dynamic name: ark/statefulsets config: resource-type: version: v1 group: apps resource: statefulsets - kind: k8s-dynamic name: ark/daemonsets config: resource-type: version: v1 group: apps resource: daemonsets - kind: k8s-dynamic name: ark/pods config: resource-type: version: v1 resource: pods - kind: k8s-dynamic name: ark/configmaps config: resource-type: resource: configmaps version: v1 label-selectors: - conjur.org/name=conjur-connect-configmap - kind: k8s-dynamic name: ark/esoexternalsecrets config: resource-type: group: external-secrets.io version: v1 resource: externalsecrets - kind: k8s-dynamic name: ark/esosecretstores config: resource-type: group: external-secrets.io version: v1 resource: secretstores - kind: k8s-dynamic name: ark/esoclusterexternalsecrets config: resource-type: group: external-secrets.io version: v1 resource: clusterexternalsecrets - kind: k8s-dynamic name: ark/esoclustersecretstores config: resource-type: group: external-secrets.io version: v1 resource: clustersecretstores ================================================ FILE: deploy/charts/disco-agent/templates/deployment.yaml ================================================ {{- if not .Values.acceptTerms }} {{- fail "\n\n=================================================================\n Terms & Conditions Notice\n=================================================================\n\nBefore installing this application, you must review and accept\nthe terms and conditions available at:\nhttps://www.cyberark.com/contract-terms/\n\nTo proceed with installation, you must indicate acceptance by\nsetting:\n\n - In your values file: acceptTerms: true\n or\n - Via the Helm flag: --set acceptTerms=true\n\nBy continuing with the next command, you confirm that you have\nreviewed and accepted these terms and conditions.\n\n=================================================================\n" }} {{- end }} apiVersion: apps/v1 kind: Deployment metadata: name: {{ include "disco-agent.fullname" . }} labels: {{- include "disco-agent.labels" . | nindent 4 }} spec: replicas: {{ .Values.replicaCount }} selector: matchLabels: {{- include "disco-agent.selectorLabels" . | nindent 6 }} template: metadata: {{- with .Values.podAnnotations }} annotations: {{- toYaml . | nindent 8 }} {{- end }} labels: {{- include "disco-agent.labels" . | nindent 8 }} {{- with .Values.podLabels }} {{- toYaml . | nindent 8 }} {{- end }} spec: {{- with .Values.imagePullSecrets }} imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} serviceAccountName: {{ include "disco-agent.serviceAccountName" . }} {{- with .Values.podSecurityContext }} securityContext: {{- toYaml . | nindent 8 }} {{- end }} containers: - name: agent {{- with .Values.securityContext }} securityContext: {{- toYaml . | nindent 12 }} {{- end }} image: "{{ template "disco-agent.image" (tuple .Values.image .Values.imageRegistry .Values.imageNamespace (printf ":%s" .Chart.AppVersion)) }}" imagePullPolicy: {{ .Values.image.pullPolicy }} env: - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: POD_UID valueFrom: fieldRef: fieldPath: metadata.uid - name: POD_NODE valueFrom: fieldRef: fieldPath: spec.nodeName - name: ARK_USERNAME valueFrom: secretKeyRef: name: {{ .Values.authentication.secretName }} key: ARK_USERNAME - name: ARK_SECRET valueFrom: secretKeyRef: name: {{ .Values.authentication.secretName }} key: ARK_SECRET - name: ARK_SUBDOMAIN valueFrom: secretKeyRef: name: {{ .Values.authentication.secretName }} key: ARK_SUBDOMAIN - name: ARK_DISCOVERY_API valueFrom: secretKeyRef: name: {{ .Values.authentication.secretName }} key: ARK_DISCOVERY_API optional: true - name: ARK_SEND_SECRET_VALUES value: {{ .Values.config.sendSecretValues | default "false" | quote }} {{- with .Values.http_proxy }} - name: HTTP_PROXY value: {{ . }} {{- end }} {{- with .Values.https_proxy }} - name: HTTPS_PROXY value: {{ . }} {{- end }} {{- with .Values.no_proxy }} - name: NO_PROXY value: {{ . }} {{- end }} args: - "agent" - "-c" - "/etc/disco-agent/config.yaml" - --machine-hub - --logging-format=json {{- if .Values.metrics.enabled }} - --enable-metrics {{- end }} {{- if .Values.pprof.enabled }} - --enable-pprof {{- end }} {{- range .Values.extraArgs }} - {{ . | quote }} {{- end }} {{- with .Values.resources }} resources: {{- toYaml . | nindent 12 }} {{- end }} volumeMounts: - name: config mountPath: "/etc/disco-agent" readOnly: true {{- with .Values.volumeMounts }} {{- toYaml . | nindent 12 }} {{- end }} ports: - name: agent-api containerPort: 8081 volumes: - name: config configMap: name: {{ include "disco-agent.fullname" . }}-config optional: false {{- with .Values.volumes }} {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.affinity }} affinity: {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.tolerations }} tolerations: {{- toYaml . | nindent 8 }} {{- end }} ================================================ FILE: deploy/charts/disco-agent/templates/poddisruptionbudget.yaml ================================================ {{- if .Values.podDisruptionBudget.enabled }} apiVersion: policy/v1 kind: PodDisruptionBudget metadata: name: {{ include "disco-agent.fullname" . }} namespace: {{ .Release.Namespace }} labels: {{- include "disco-agent.labels" . | nindent 4 }} spec: selector: matchLabels: {{- include "disco-agent.selectorLabels" . | nindent 6 }} {{- if not (or (hasKey .Values.podDisruptionBudget "minAvailable") (hasKey .Values.podDisruptionBudget "maxUnavailable")) }} minAvailable: 1 # Default value because minAvailable and maxUnavailable are not set {{- end }} {{- if hasKey .Values.podDisruptionBudget "minAvailable" }} minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} {{- end }} {{- if hasKey .Values.podDisruptionBudget "maxUnavailable" }} maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} {{- end }} {{- end }} ================================================ FILE: deploy/charts/disco-agent/templates/podmonitor.yaml ================================================ {{- if and .Values.metrics.enabled .Values.metrics.podmonitor.enabled }} apiVersion: monitoring.coreos.com/v1 kind: PodMonitor metadata: name: {{ include "disco-agent.fullname" . }} {{- if .Values.metrics.podmonitor.namespace }} namespace: {{ .Values.metrics.podmonitor.namespace }} {{- else }} namespace: {{ .Release.Namespace | quote }} {{- end }} labels: {{- include "disco-agent.labels" . | nindent 4 }} prometheus: {{ .Values.metrics.podmonitor.prometheusInstance }} {{- with .Values.metrics.podmonitor.labels }} {{- toYaml . | nindent 4 }} {{- end }} {{- with .Values.metrics.podmonitor.annotations }} annotations: {{- toYaml . | nindent 4 }} {{- end }} spec: jobLabel: {{ include "disco-agent.fullname" . }} selector: matchLabels: {{- include "disco-agent.selectorLabels" . | nindent 6 }} {{- if .Values.metrics.podmonitor.namespace }} namespaceSelector: matchNames: - {{ .Release.Namespace | quote }} {{- end }} podMetricsEndpoints: - port: agent-api path: /metrics interval: {{ .Values.metrics.podmonitor.interval }} scrapeTimeout: {{ .Values.metrics.podmonitor.scrapeTimeout }} honorLabels: {{ .Values.metrics.podmonitor.honorLabels }} {{- with .Values.metrics.podmonitor.endpointAdditionalProperties }} {{- toYaml . | nindent 4 }} {{- end }} {{- end }} ================================================ FILE: deploy/charts/disco-agent/templates/rbac.yaml ================================================ --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: {{ include "disco-agent.fullname" . }}-event-emitted labels: {{- include "disco-agent.labels" . | nindent 4 }} rules: - apiGroups: [""] resources: ["events"] verbs: ["create"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: {{ include "disco-agent.fullname" . }}-event-emitted labels: {{- include "disco-agent.labels" . | nindent 4 }} roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: {{ include "disco-agent.fullname" . }}-event-emitted subjects: - kind: ServiceAccount name: {{ include "disco-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: {{ include "disco-agent.fullname" . }}-cluster-viewer labels: {{- include "disco-agent.labels" . | nindent 4 }} roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: view subjects: - kind: ServiceAccount name: {{ include "disco-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ include "disco-agent.fullname" . }}-secret-reader labels: {{- include "disco-agent.labels" . | nindent 4 }} rules: - apiGroups: [""] resources: ["secrets"] verbs: ["get", "list", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: {{ include "disco-agent.fullname" . }}-secret-reader labels: {{- include "disco-agent.labels" . | nindent 4 }} roleRef: kind: ClusterRole name: {{ include "disco-agent.fullname" . }}-secret-reader apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: {{ include "disco-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ include "disco-agent.fullname" . }}-rbac-reader labels: {{- include "disco-agent.labels" . | nindent 4 }} rules: - apiGroups: ["rbac.authorization.k8s.io"] resources: - roles - clusterroles - rolebindings - clusterrolebindings verbs: ["get", "list", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: {{ include "disco-agent.fullname" . }}-rbac-reader labels: {{- include "disco-agent.labels" . | nindent 4 }} roleRef: kind: ClusterRole name: {{ include "disco-agent.fullname" . }}-rbac-reader apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: {{ include "disco-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: {{ include "disco-agent.fullname" . }}-oidc-discovery labels: {{- include "disco-agent.labels" . | nindent 4 }} roleRef: kind: ClusterRole name: system:service-account-issuer-discovery apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: {{ include "disco-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ include "disco-agent.fullname" . }}-eso-reader labels: {{- include "disco-agent.labels" . | nindent 4 }} rules: - apiGroups: ["external-secrets.io"] resources: - externalsecrets - clusterexternalsecrets - secretstores - clustersecretstores verbs: ["get", "list", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: {{ include "disco-agent.fullname" . }}-eso-reader labels: {{- include "disco-agent.labels" . | nindent 4 }} roleRef: kind: ClusterRole name: {{ include "disco-agent.fullname" . }}-eso-reader apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: {{ include "disco-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} ================================================ FILE: deploy/charts/disco-agent/templates/serviceaccount.yaml ================================================ {{- if .Values.serviceAccount.create -}} apiVersion: v1 kind: ServiceAccount metadata: name: {{ include "disco-agent.serviceAccountName" . }} labels: {{- include "disco-agent.labels" . | nindent 4 }} {{- with .Values.serviceAccount.annotations }} annotations: {{- toYaml . | nindent 4 }} {{- end }} automountServiceAccountToken: {{ .Values.serviceAccount.automount }} {{- end }} ================================================ FILE: deploy/charts/disco-agent/tests/README.md ================================================ # `helm unittest` We use `helm unittest` to test the YAML output coming out of the Helm chart. In order to update the snapshots, run the following command: ```bash make test-helm-snapshot ``` ================================================ FILE: deploy/charts/disco-agent/tests/__snapshot__/configmap_test.yaml.snap ================================================ custom-cluster-description: 1: | apiVersion: v1 data: config.yaml: |- cluster_name: "" cluster_description: "A cloud hosted Kubernetes cluster hosting production workloads.\n\nteam: team-1\nemail: team-1@example.com\npurpose: Production workloads\n" period: "12h0m0s" data-gatherers: - kind: oidc name: ark/oidc - kind: k8s-discovery name: ark/discovery - kind: k8s-dynamic name: ark/secrets config: resource-type: version: v1 resource: secrets field-selectors: - type!=kubernetes.io/dockercfg - type!=kubernetes.io/dockerconfigjson - type!=bootstrap.kubernetes.io/token - type!=helm.sh/release.v1 - kind: k8s-dynamic name: ark/serviceaccounts config: resource-type: resource: serviceaccounts version: v1 - kind: k8s-dynamic name: ark/roles config: resource-type: version: v1 group: rbac.authorization.k8s.io resource: roles - kind: k8s-dynamic name: ark/clusterroles config: resource-type: version: v1 group: rbac.authorization.k8s.io resource: clusterroles - kind: k8s-dynamic name: ark/rolebindings config: resource-type: version: v1 group: rbac.authorization.k8s.io resource: rolebindings - kind: k8s-dynamic name: ark/clusterrolebindings config: resource-type: version: v1 group: rbac.authorization.k8s.io resource: clusterrolebindings - kind: k8s-dynamic name: ark/jobs config: resource-type: version: v1 group: batch resource: jobs - kind: k8s-dynamic name: ark/cronjobs config: resource-type: version: v1 group: batch resource: cronjobs - kind: k8s-dynamic name: ark/deployments config: resource-type: version: v1 group: apps resource: deployments - kind: k8s-dynamic name: ark/statefulsets config: resource-type: version: v1 group: apps resource: statefulsets - kind: k8s-dynamic name: ark/daemonsets config: resource-type: version: v1 group: apps resource: daemonsets - kind: k8s-dynamic name: ark/pods config: resource-type: version: v1 resource: pods - kind: k8s-dynamic name: ark/configmaps config: resource-type: resource: configmaps version: v1 label-selectors: - conjur.org/name=conjur-connect-configmap - kind: k8s-dynamic name: ark/esoexternalsecrets config: resource-type: group: external-secrets.io version: v1 resource: externalsecrets - kind: k8s-dynamic name: ark/esosecretstores config: resource-type: group: external-secrets.io version: v1 resource: secretstores - kind: k8s-dynamic name: ark/esoclusterexternalsecrets config: resource-type: group: external-secrets.io version: v1 resource: clusterexternalsecrets - kind: k8s-dynamic name: ark/esoclustersecretstores config: resource-type: group: external-secrets.io version: v1 resource: clustersecretstores kind: ConfigMap metadata: labels: app.kubernetes.io/instance: test app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: disco-agent app.kubernetes.io/version: v0.0.0 helm.sh/chart: disco-agent-0.0.0 name: test-disco-agent-config namespace: test-ns custom-cluster-name: 1: | apiVersion: v1 data: config.yaml: |- cluster_name: "cluster-1 region-1 cloud-1 " cluster_description: "" period: "12h0m0s" data-gatherers: - kind: oidc name: ark/oidc - kind: k8s-discovery name: ark/discovery - kind: k8s-dynamic name: ark/secrets config: resource-type: version: v1 resource: secrets field-selectors: - type!=kubernetes.io/dockercfg - type!=kubernetes.io/dockerconfigjson - type!=bootstrap.kubernetes.io/token - type!=helm.sh/release.v1 - kind: k8s-dynamic name: ark/serviceaccounts config: resource-type: resource: serviceaccounts version: v1 - kind: k8s-dynamic name: ark/roles config: resource-type: version: v1 group: rbac.authorization.k8s.io resource: roles - kind: k8s-dynamic name: ark/clusterroles config: resource-type: version: v1 group: rbac.authorization.k8s.io resource: clusterroles - kind: k8s-dynamic name: ark/rolebindings config: resource-type: version: v1 group: rbac.authorization.k8s.io resource: rolebindings - kind: k8s-dynamic name: ark/clusterrolebindings config: resource-type: version: v1 group: rbac.authorization.k8s.io resource: clusterrolebindings - kind: k8s-dynamic name: ark/jobs config: resource-type: version: v1 group: batch resource: jobs - kind: k8s-dynamic name: ark/cronjobs config: resource-type: version: v1 group: batch resource: cronjobs - kind: k8s-dynamic name: ark/deployments config: resource-type: version: v1 group: apps resource: deployments - kind: k8s-dynamic name: ark/statefulsets config: resource-type: version: v1 group: apps resource: statefulsets - kind: k8s-dynamic name: ark/daemonsets config: resource-type: version: v1 group: apps resource: daemonsets - kind: k8s-dynamic name: ark/pods config: resource-type: version: v1 resource: pods - kind: k8s-dynamic name: ark/configmaps config: resource-type: resource: configmaps version: v1 label-selectors: - conjur.org/name=conjur-connect-configmap - kind: k8s-dynamic name: ark/esoexternalsecrets config: resource-type: group: external-secrets.io version: v1 resource: externalsecrets - kind: k8s-dynamic name: ark/esosecretstores config: resource-type: group: external-secrets.io version: v1 resource: secretstores - kind: k8s-dynamic name: ark/esoclusterexternalsecrets config: resource-type: group: external-secrets.io version: v1 resource: clusterexternalsecrets - kind: k8s-dynamic name: ark/esoclustersecretstores config: resource-type: group: external-secrets.io version: v1 resource: clustersecretstores kind: ConfigMap metadata: labels: app.kubernetes.io/instance: test app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: disco-agent app.kubernetes.io/version: v0.0.0 helm.sh/chart: disco-agent-0.0.0 name: test-disco-agent-config namespace: test-ns custom-period: 1: | apiVersion: v1 data: config.yaml: |- cluster_name: "" cluster_description: "" period: "1m" data-gatherers: - kind: oidc name: ark/oidc - kind: k8s-discovery name: ark/discovery - kind: k8s-dynamic name: ark/secrets config: resource-type: version: v1 resource: secrets field-selectors: - type!=kubernetes.io/dockercfg - type!=kubernetes.io/dockerconfigjson - type!=bootstrap.kubernetes.io/token - type!=helm.sh/release.v1 - kind: k8s-dynamic name: ark/serviceaccounts config: resource-type: resource: serviceaccounts version: v1 - kind: k8s-dynamic name: ark/roles config: resource-type: version: v1 group: rbac.authorization.k8s.io resource: roles - kind: k8s-dynamic name: ark/clusterroles config: resource-type: version: v1 group: rbac.authorization.k8s.io resource: clusterroles - kind: k8s-dynamic name: ark/rolebindings config: resource-type: version: v1 group: rbac.authorization.k8s.io resource: rolebindings - kind: k8s-dynamic name: ark/clusterrolebindings config: resource-type: version: v1 group: rbac.authorization.k8s.io resource: clusterrolebindings - kind: k8s-dynamic name: ark/jobs config: resource-type: version: v1 group: batch resource: jobs - kind: k8s-dynamic name: ark/cronjobs config: resource-type: version: v1 group: batch resource: cronjobs - kind: k8s-dynamic name: ark/deployments config: resource-type: version: v1 group: apps resource: deployments - kind: k8s-dynamic name: ark/statefulsets config: resource-type: version: v1 group: apps resource: statefulsets - kind: k8s-dynamic name: ark/daemonsets config: resource-type: version: v1 group: apps resource: daemonsets - kind: k8s-dynamic name: ark/pods config: resource-type: version: v1 resource: pods - kind: k8s-dynamic name: ark/configmaps config: resource-type: resource: configmaps version: v1 label-selectors: - conjur.org/name=conjur-connect-configmap - kind: k8s-dynamic name: ark/esoexternalsecrets config: resource-type: group: external-secrets.io version: v1 resource: externalsecrets - kind: k8s-dynamic name: ark/esosecretstores config: resource-type: group: external-secrets.io version: v1 resource: secretstores - kind: k8s-dynamic name: ark/esoclusterexternalsecrets config: resource-type: group: external-secrets.io version: v1 resource: clusterexternalsecrets - kind: k8s-dynamic name: ark/esoclustersecretstores config: resource-type: group: external-secrets.io version: v1 resource: clustersecretstores kind: ConfigMap metadata: labels: app.kubernetes.io/instance: test app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: disco-agent app.kubernetes.io/version: v0.0.0 helm.sh/chart: disco-agent-0.0.0 name: test-disco-agent-config namespace: test-ns defaults: 1: | apiVersion: v1 data: config.yaml: |- cluster_name: "" cluster_description: "" period: "12h0m0s" data-gatherers: - kind: oidc name: ark/oidc - kind: k8s-discovery name: ark/discovery - kind: k8s-dynamic name: ark/secrets config: resource-type: version: v1 resource: secrets field-selectors: - type!=kubernetes.io/dockercfg - type!=kubernetes.io/dockerconfigjson - type!=bootstrap.kubernetes.io/token - type!=helm.sh/release.v1 - kind: k8s-dynamic name: ark/serviceaccounts config: resource-type: resource: serviceaccounts version: v1 - kind: k8s-dynamic name: ark/roles config: resource-type: version: v1 group: rbac.authorization.k8s.io resource: roles - kind: k8s-dynamic name: ark/clusterroles config: resource-type: version: v1 group: rbac.authorization.k8s.io resource: clusterroles - kind: k8s-dynamic name: ark/rolebindings config: resource-type: version: v1 group: rbac.authorization.k8s.io resource: rolebindings - kind: k8s-dynamic name: ark/clusterrolebindings config: resource-type: version: v1 group: rbac.authorization.k8s.io resource: clusterrolebindings - kind: k8s-dynamic name: ark/jobs config: resource-type: version: v1 group: batch resource: jobs - kind: k8s-dynamic name: ark/cronjobs config: resource-type: version: v1 group: batch resource: cronjobs - kind: k8s-dynamic name: ark/deployments config: resource-type: version: v1 group: apps resource: deployments - kind: k8s-dynamic name: ark/statefulsets config: resource-type: version: v1 group: apps resource: statefulsets - kind: k8s-dynamic name: ark/daemonsets config: resource-type: version: v1 group: apps resource: daemonsets - kind: k8s-dynamic name: ark/pods config: resource-type: version: v1 resource: pods - kind: k8s-dynamic name: ark/configmaps config: resource-type: resource: configmaps version: v1 label-selectors: - conjur.org/name=conjur-connect-configmap - kind: k8s-dynamic name: ark/esoexternalsecrets config: resource-type: group: external-secrets.io version: v1 resource: externalsecrets - kind: k8s-dynamic name: ark/esosecretstores config: resource-type: group: external-secrets.io version: v1 resource: secretstores - kind: k8s-dynamic name: ark/esoclusterexternalsecrets config: resource-type: group: external-secrets.io version: v1 resource: clusterexternalsecrets - kind: k8s-dynamic name: ark/esoclustersecretstores config: resource-type: group: external-secrets.io version: v1 resource: clustersecretstores kind: ConfigMap metadata: labels: app.kubernetes.io/instance: test app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: disco-agent app.kubernetes.io/version: v0.0.0 helm.sh/chart: disco-agent-0.0.0 name: test-disco-agent-config namespace: test-ns ================================================ FILE: deploy/charts/disco-agent/tests/configmap_test.yaml ================================================ suite: test the contents of the config.yaml templates: - configmap.yaml release: name: test namespace: test-ns tests: - it: defaults asserts: - matchSnapshot: {} - it: custom-period set: config.period: 1m asserts: - matchSnapshot: {} - it: custom-cluster-name set: config.clusterName: "cluster-1 region-1 cloud-1 " asserts: - matchSnapshot: {} - it: custom-cluster-description set: config.clusterDescription: | A cloud hosted Kubernetes cluster hosting production workloads. team: team-1 email: team-1@example.com purpose: Production workloads asserts: - matchSnapshot: {} ================================================ FILE: deploy/charts/disco-agent/values.linter.exceptions ================================================ ================================================ FILE: deploy/charts/disco-agent/values.schema.json ================================================ { "$defs": { "helm-values": { "additionalProperties": false, "properties": { "acceptTerms": { "$ref": "#/$defs/helm-values.acceptTerms" }, "affinity": { "$ref": "#/$defs/helm-values.affinity" }, "authentication": { "$ref": "#/$defs/helm-values.authentication" }, "config": { "$ref": "#/$defs/helm-values.config" }, "extraArgs": { "$ref": "#/$defs/helm-values.extraArgs" }, "fullnameOverride": { "$ref": "#/$defs/helm-values.fullnameOverride" }, "global": { "$ref": "#/$defs/helm-values.global" }, "http_proxy": { "$ref": "#/$defs/helm-values.http_proxy" }, "https_proxy": { "$ref": "#/$defs/helm-values.https_proxy" }, "image": { "$ref": "#/$defs/helm-values.image" }, "imageNamespace": { "$ref": "#/$defs/helm-values.imageNamespace" }, "imagePullSecrets": { "$ref": "#/$defs/helm-values.imagePullSecrets" }, "imageRegistry": { "$ref": "#/$defs/helm-values.imageRegistry" }, "metrics": { "$ref": "#/$defs/helm-values.metrics" }, "nameOverride": { "$ref": "#/$defs/helm-values.nameOverride" }, "no_proxy": { "$ref": "#/$defs/helm-values.no_proxy" }, "nodeSelector": { "$ref": "#/$defs/helm-values.nodeSelector" }, "podAnnotations": { "$ref": "#/$defs/helm-values.podAnnotations" }, "podDisruptionBudget": { "$ref": "#/$defs/helm-values.podDisruptionBudget" }, "podLabels": { "$ref": "#/$defs/helm-values.podLabels" }, "podSecurityContext": { "$ref": "#/$defs/helm-values.podSecurityContext" }, "pprof": { "$ref": "#/$defs/helm-values.pprof" }, "replicaCount": { "$ref": "#/$defs/helm-values.replicaCount" }, "resources": { "$ref": "#/$defs/helm-values.resources" }, "securityContext": { "$ref": "#/$defs/helm-values.securityContext" }, "serviceAccount": { "$ref": "#/$defs/helm-values.serviceAccount" }, "tolerations": { "$ref": "#/$defs/helm-values.tolerations" }, "volumeMounts": { "$ref": "#/$defs/helm-values.volumeMounts" }, "volumes": { "$ref": "#/$defs/helm-values.volumes" } }, "type": "object" }, "helm-values.acceptTerms": { "default": false, "description": "Must be set to indicate that you have read and accepted the CyberArk Terms of Service. If false, the helm chart will fail to install and will print a message with instructions on how to accept the TOS.", "type": "boolean" }, "helm-values.affinity": { "default": {}, "type": "object" }, "helm-values.authentication": { "additionalProperties": false, "properties": { "secretName": { "$ref": "#/$defs/helm-values.authentication.secretName" } }, "type": "object" }, "helm-values.authentication.secretName": { "default": "agent-credentials", "type": "string" }, "helm-values.config": { "additionalProperties": false, "properties": { "clusterDescription": { "$ref": "#/$defs/helm-values.config.clusterDescription" }, "clusterName": { "$ref": "#/$defs/helm-values.config.clusterName" }, "excludeAnnotationKeysRegex": { "$ref": "#/$defs/helm-values.config.excludeAnnotationKeysRegex" }, "excludeLabelKeysRegex": { "$ref": "#/$defs/helm-values.config.excludeLabelKeysRegex" }, "period": { "$ref": "#/$defs/helm-values.config.period" }, "sendSecretValues": { "$ref": "#/$defs/helm-values.config.sendSecretValues" } }, "type": "object" }, "helm-values.config.clusterDescription": { "default": "", "description": "A short description of the cluster where the agent is deployed (optional).\n\nThis description will be associated with the data that the agent uploads to the Discovery and Context service. The description may include contact information such as the email address of the cluster administrator, so that any problems and risks identified by the Discovery and Context service can be communicated to the people responsible for the affected secrets.", "type": "string" }, "helm-values.config.clusterName": { "default": "", "description": "A human readable name for the cluster where the agent is deployed (optional).\n\nThis cluster name will be associated with the data that the agent uploads to the Discovery and Context service. If empty (the default), the service account name will be used instead.", "type": "string" }, "helm-values.config.excludeAnnotationKeysRegex": { "default": [], "description": "You can configure the agent to exclude some annotations or labels from being pushed . All Kubernetes objects are affected. The objects are still pushed, but the specified annotations and labels are removed before being pushed.\n\nDots is the only character that needs to be escaped in the regex. Use either double quotes with escaped single quotes or unquoted strings for the regex to avoid YAML parsing issues with `\\.`.\n\nExample: excludeAnnotationKeysRegex: ['^kapp\\.k14s\\.io/original.*']", "items": {}, "type": "array" }, "helm-values.config.excludeLabelKeysRegex": { "default": [], "items": {}, "type": "array" }, "helm-values.config.period": { "default": "12h0m0s", "description": "Push data every 12 hours unless changed.", "type": "string" }, "helm-values.config.sendSecretValues": { "default": true, "description": "Enable sending of Secret values to CyberArk in addition to metadata. Metadata is always sent, but the actual values of Secrets are not sent by default. When enabled, Secret data is encrypted using envelope encryption using a key managed by CyberArk, fetched from the Discovery and Context service.", "type": "boolean" }, "helm-values.extraArgs": { "default": [], "description": "extraArgs:\n- --logging-format=json\n- --log-level=6 # To enable HTTP request logging", "items": {}, "type": "array" }, "helm-values.fullnameOverride": { "default": "", "type": "string" }, "helm-values.global": { "description": "Global values shared across all (sub)charts" }, "helm-values.http_proxy": { "description": "Configures the HTTP_PROXY environment variable where a HTTP proxy is required.", "type": "string" }, "helm-values.https_proxy": { "description": "Configures the HTTPS_PROXY environment variable where a HTTP proxy is required.", "type": "string" }, "helm-values.image": { "additionalProperties": false, "properties": { "digest": { "$ref": "#/$defs/helm-values.image.digest" }, "name": { "$ref": "#/$defs/helm-values.image.name" }, "pullPolicy": { "$ref": "#/$defs/helm-values.image.pullPolicy" }, "registry": { "$ref": "#/$defs/helm-values.image.registry" }, "repository": { "$ref": "#/$defs/helm-values.image.repository" }, "tag": { "$ref": "#/$defs/helm-values.image.tag" } }, "type": "object" }, "helm-values.image.digest": { "default": "", "description": "Override the image digest to deploy by setting this variable. If set together with `image.tag`, the rendered image will include both tag and digest.", "type": "string" }, "helm-values.image.name": { "default": "disco-agent", "description": "The image name for the Discovery Agent.\nThis is used (together with `imageRegistry` and `imageNamespace`) to construct the full image reference.", "type": "string" }, "helm-values.image.pullPolicy": { "default": "IfNotPresent", "description": "This sets the pull policy for images.", "type": "string" }, "helm-values.image.registry": { "description": "Deprecated: per-component registry prefix.\n\nIf set, this value is *prepended* to the image repository that the chart would otherwise render. This applies both when `image.repository` is set and when the repository is computed from\n`imageRegistry` + `imageNamespace` + `image.name`.\n\nThis can produce \"double registry\" style references such as\n`legacy.example.io/quay.io/jetstack/...`. Prefer using the global\n`imageRegistry`/`imageNamespace` values.", "type": "string" }, "helm-values.image.repository": { "default": "", "description": "Full repository override (takes precedence over `imageRegistry`, `imageNamespace`, and `image.name`).\nExample: quay.io/jetstack/disco-agent", "type": "string" }, "helm-values.image.tag": { "default": "", "description": "Override the image tag to deploy by setting this variable. If no value is set, the chart's appVersion is used.", "type": "string" }, "helm-values.imageNamespace": { "default": "jetstack", "description": "The repository namespace used for disco-agent images by default.\nExamples:\n- jetstack\n- custom-namespace", "type": "string" }, "helm-values.imagePullSecrets": { "default": [], "description": "This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/", "items": {}, "type": "array" }, "helm-values.imageRegistry": { "default": "quay.io", "description": "The container registry used for disco-agent images by default. This can include path prefixes (e.g. \"artifactory.example.com/docker\").", "type": "string" }, "helm-values.metrics": { "additionalProperties": false, "properties": { "enabled": { "$ref": "#/$defs/helm-values.metrics.enabled" }, "podmonitor": { "$ref": "#/$defs/helm-values.metrics.podmonitor" } }, "type": "object" }, "helm-values.metrics.enabled": { "default": true, "description": "Enable the metrics server.\nIf false, the metrics server will be disabled and the other metrics fields below will be ignored.", "type": "boolean" }, "helm-values.metrics.podmonitor": { "additionalProperties": false, "properties": { "annotations": { "$ref": "#/$defs/helm-values.metrics.podmonitor.annotations" }, "enabled": { "$ref": "#/$defs/helm-values.metrics.podmonitor.enabled" }, "endpointAdditionalProperties": { "$ref": "#/$defs/helm-values.metrics.podmonitor.endpointAdditionalProperties" }, "honorLabels": { "$ref": "#/$defs/helm-values.metrics.podmonitor.honorLabels" }, "interval": { "$ref": "#/$defs/helm-values.metrics.podmonitor.interval" }, "labels": { "$ref": "#/$defs/helm-values.metrics.podmonitor.labels" }, "namespace": { "$ref": "#/$defs/helm-values.metrics.podmonitor.namespace" }, "prometheusInstance": { "$ref": "#/$defs/helm-values.metrics.podmonitor.prometheusInstance" }, "scrapeTimeout": { "$ref": "#/$defs/helm-values.metrics.podmonitor.scrapeTimeout" } }, "type": "object" }, "helm-values.metrics.podmonitor.annotations": { "default": {}, "description": "Additional annotations to add to the PodMonitor.", "type": "object" }, "helm-values.metrics.podmonitor.enabled": { "default": false, "description": "Create a PodMonitor to add the metrics to Prometheus, if you are using Prometheus Operator. See https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor", "type": "boolean" }, "helm-values.metrics.podmonitor.endpointAdditionalProperties": { "default": {}, "description": "EndpointAdditionalProperties allows setting additional properties on the endpoint such as relabelings, metricRelabelings etc.\n\nFor example:\nendpointAdditionalProperties:\n relabelings:\n - action: replace\n sourceLabels:\n - __meta_kubernetes_pod_node_name\n targetLabel: instance", "type": "object" }, "helm-values.metrics.podmonitor.honorLabels": { "default": false, "description": "Keep labels from scraped data, overriding server-side labels.", "type": "boolean" }, "helm-values.metrics.podmonitor.interval": { "default": "60s", "description": "The interval to scrape metrics.", "type": "string" }, "helm-values.metrics.podmonitor.labels": { "default": {}, "description": "Additional labels to add to the PodMonitor.", "type": "object" }, "helm-values.metrics.podmonitor.namespace": { "description": "The namespace that the pod monitor should live in.\nDefaults to the disco-agent namespace.", "type": "string" }, "helm-values.metrics.podmonitor.prometheusInstance": { "default": "default", "description": "Specifies the `prometheus` label on the created PodMonitor. This is used when different Prometheus instances have label selectors matching different PodMonitors.", "type": "string" }, "helm-values.metrics.podmonitor.scrapeTimeout": { "default": "30s", "description": "The timeout before a metrics scrape fails.", "type": "string" }, "helm-values.nameOverride": { "default": "", "description": "This is to override the chart name.", "type": "string" }, "helm-values.no_proxy": { "description": "Configures the NO_PROXY environment variable where a HTTP proxy is required, but certain domains should be excluded.", "type": "string" }, "helm-values.nodeSelector": { "default": {}, "type": "object" }, "helm-values.podAnnotations": { "default": {}, "description": "This is for setting Kubernetes Annotations to a Pod. For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/", "type": "object" }, "helm-values.podDisruptionBudget": { "default": { "enabled": false }, "description": "Configure a PodDisruptionBudget for the agent's Deployment. If running with multiple replicas, consider setting podDisruptionBudget.enabled to true.", "type": "object" }, "helm-values.podLabels": { "default": {}, "description": "This is for setting Kubernetes Labels to a Pod.\nFor more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", "type": "object" }, "helm-values.podSecurityContext": { "default": {}, "type": "object" }, "helm-values.pprof": { "additionalProperties": false, "properties": { "enabled": { "$ref": "#/$defs/helm-values.pprof.enabled" } }, "type": "object" }, "helm-values.pprof.enabled": { "default": false, "description": "Enable profiling with the pprof endpoint", "type": "boolean" }, "helm-values.replicaCount": { "default": 1, "description": "This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/", "type": "number" }, "helm-values.resources": { "default": {}, "type": "object" }, "helm-values.securityContext": { "default": { "allowPrivilegeEscalation": false, "capabilities": { "drop": [ "ALL" ] }, "readOnlyRootFilesystem": true, "runAsNonRoot": true, "seccompProfile": { "type": "RuntimeDefault" } }, "description": "Add Container specific SecurityContext settings to the container. Takes precedence over `podSecurityContext` when set. See https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container", "type": "object" }, "helm-values.serviceAccount": { "additionalProperties": false, "properties": { "annotations": { "$ref": "#/$defs/helm-values.serviceAccount.annotations" }, "automount": { "$ref": "#/$defs/helm-values.serviceAccount.automount" }, "create": { "$ref": "#/$defs/helm-values.serviceAccount.create" }, "name": { "$ref": "#/$defs/helm-values.serviceAccount.name" } }, "type": "object" }, "helm-values.serviceAccount.annotations": { "default": {}, "description": "Annotations to add to the service account", "type": "object" }, "helm-values.serviceAccount.automount": { "default": true, "description": "Automatically mount a ServiceAccount's API credentials?", "type": "boolean" }, "helm-values.serviceAccount.create": { "default": true, "description": "Specifies whether a service account should be created", "type": "boolean" }, "helm-values.serviceAccount.name": { "default": "", "description": "The name of the service account to use.\nIf not set and create is true, a name is generated using the fullname template", "type": "string" }, "helm-values.tolerations": { "default": [], "items": {}, "type": "array" }, "helm-values.volumeMounts": { "default": [], "description": "Additional volumeMounts on the output Deployment definition.", "items": {}, "type": "array" }, "helm-values.volumes": { "default": [], "description": "Additional volumes on the output Deployment definition.", "items": {}, "type": "array" } }, "$ref": "#/$defs/helm-values", "$schema": "http://json-schema.org/draft-07/schema#" } ================================================ FILE: deploy/charts/disco-agent/values.yaml ================================================ # Default values for disco-agent. # This is a YAML-formatted file. # Declare variables to be passed into your templates. # This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/ replicaCount: 1 # Must be set to indicate that you have read and accepted the CyberArk Terms of Service. If false, the helm chart will fail to install and will print a message with instructions on how to accept the TOS. acceptTerms: false # The container registry used for disco-agent images by default. # This can include path prefixes (e.g. "artifactory.example.com/docker"). # +docs:property imageRegistry: "quay.io" # The repository namespace used for disco-agent images by default. # Examples: # - jetstack # - custom-namespace # +docs:property imageNamespace: "jetstack" # This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/ image: # Deprecated: per-component registry prefix. # # If set, this value is *prepended* to the image repository that the chart would otherwise render. # This applies both when `image.repository` is set and when the repository is computed from # `imageRegistry` + `imageNamespace` + `image.name`. # # This can produce "double registry" style references such as # `legacy.example.io/quay.io/jetstack/...`. Prefer using the global # `imageRegistry`/`imageNamespace` values. # +docs:property # registry: quay.io # Full repository override (takes precedence over `imageRegistry`, `imageNamespace`, # and `image.name`). # Example: quay.io/jetstack/disco-agent # +docs:property repository: "" # The image name for the Discovery Agent. # This is used (together with `imageRegistry` and `imageNamespace`) to construct the full # image reference. # +docs:property name: disco-agent # This sets the pull policy for images. pullPolicy: IfNotPresent # Override the image tag to deploy by setting this variable. # If no value is set, the chart's appVersion is used. tag: "" # Override the image digest to deploy by setting this variable. # If set together with `image.tag`, the rendered image will include both tag and digest. digest: "" # This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ imagePullSecrets: [] # This is to override the chart name. nameOverride: "" fullnameOverride: "" # This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/ serviceAccount: # Specifies whether a service account should be created create: true # Automatically mount a ServiceAccount's API credentials? automount: true # Annotations to add to the service account annotations: {} # The name of the service account to use. # If not set and create is true, a name is generated using the fullname template name: "" # This is for setting Kubernetes Annotations to a Pod. # For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ podAnnotations: {} # This is for setting Kubernetes Labels to a Pod. # For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ podLabels: {} podSecurityContext: {} # fsGroup: 2000 # Add Container specific SecurityContext settings to the container. Takes # precedence over `podSecurityContext` when set. See # https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container # +docs:property securityContext: capabilities: drop: - ALL readOnlyRootFilesystem: true runAsNonRoot: true allowPrivilegeEscalation: false seccompProfile: { type: RuntimeDefault } resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi # Additional volumes on the output Deployment definition. volumes: [] # - name: foo # secret: # secretName: mysecret # optional: false # Additional volumeMounts on the output Deployment definition. volumeMounts: [] # - name: foo # mountPath: "/etc/foo" # readOnly: true nodeSelector: {} tolerations: [] affinity: {} # Configures the HTTP_PROXY environment variable where a HTTP proxy is required. # +docs:property # http_proxy: "http://proxy:8080" # Configures the HTTPS_PROXY environment variable where a HTTP proxy is required. # +docs:property # https_proxy: "https://proxy:8080" # Configures the NO_PROXY environment variable where a HTTP proxy is required, # but certain domains should be excluded. # +docs:property # no_proxy: 127.0.0.1,localhost # Configure a PodDisruptionBudget for the agent's Deployment. If running with multiple # replicas, consider setting podDisruptionBudget.enabled to true. # +docs:property podDisruptionBudget: # Enable or disable the PodDisruptionBudget resource, which helps prevent downtime # during voluntary disruptions such as during a Node upgrade. enabled: false # Configure the minimum available pods for disruptions. Can either be set to # an integer (e.g. 1) or a percentage value (e.g. 25%). # Cannot be used if `maxUnavailable` is set. # +docs:property # minAvailable: 1 # Configure the maximum unavailable pods for disruptions. Can either be set to # an integer (e.g. 1) or a percentage value (e.g. 25%). # Cannot be used if `minAvailable` is set. # +docs:property # maxUnavailable: 1 # Configuration for the agent config: # Push data every 12 hours unless changed. period: "12h0m0s" # You can configure the agent to exclude some annotations or # labels from being pushed . All Kubernetes objects # are affected. The objects are still pushed, but the specified annotations # and labels are removed before being pushed. # # Dots is the only character that needs to be escaped in the regex. Use either # double quotes with escaped single quotes or unquoted strings for the regex # to avoid YAML parsing issues with `\.`. # # Example: excludeAnnotationKeysRegex: ['^kapp\.k14s\.io/original.*'] excludeAnnotationKeysRegex: [] excludeLabelKeysRegex: [] # A human readable name for the cluster where the agent is deployed (optional). # # This cluster name will be associated with the data that the agent uploads to # the Discovery and Context service. If empty (the default), the service # account name will be used instead. clusterName: "" # A short description of the cluster where the agent is deployed (optional). # # This description will be associated with the data that the agent uploads to # the Discovery and Context service. The description may include contact # information such as the email address of the cluster administrator, so that # any problems and risks identified by the Discovery and Context service can # be communicated to the people responsible for the affected secrets. clusterDescription: "" # Enable sending of Secret values to CyberArk in addition to metadata. # Metadata is always sent, but the actual values of Secrets are not sent by default. # When enabled, Secret data is encrypted using envelope encryption using # a key managed by CyberArk, fetched from the Discovery and Context service. sendSecretValues: true authentication: secretName: agent-credentials # extraArgs: # - --logging-format=json # - --log-level=6 # To enable HTTP request logging extraArgs: [] pprof: # Enable profiling with the pprof endpoint enabled: false metrics: # Enable the metrics server. # If false, the metrics server will be disabled and the other metrics fields below will be ignored. enabled: true podmonitor: # Create a PodMonitor to add the metrics to Prometheus, if you are using Prometheus Operator. # See https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor enabled: false # The namespace that the pod monitor should live in. # Defaults to the disco-agent namespace. # +docs:property # namespace: cyberark # Specifies the `prometheus` label on the created PodMonitor. # This is used when different Prometheus instances have label selectors # matching different PodMonitors. prometheusInstance: default # The interval to scrape metrics. interval: 60s # The timeout before a metrics scrape fails. scrapeTimeout: 30s # Additional labels to add to the PodMonitor. labels: {} # Additional annotations to add to the PodMonitor. annotations: {} # Keep labels from scraped data, overriding server-side labels. honorLabels: false # EndpointAdditionalProperties allows setting additional properties on the endpoint such as relabelings, metricRelabelings etc. # # For example: # endpointAdditionalProperties: # relabelings: # - action: replace # sourceLabels: # - __meta_kubernetes_pod_node_name # targetLabel: instance endpointAdditionalProperties: {} ================================================ FILE: deploy/charts/discovery-agent/.helmignore ================================================ # Patterns to ignore when building packages. # This supports shell glob matching, relative path matching, and # negation (prefixed with !). Only one pattern per line. .DS_Store # Common VCS dirs .git/ .gitignore .bzr/ .bzrignore .hg/ .hgignore .svn/ # Common backup files *.swp *.bak *.tmp *.orig *~ # Various IDEs .project .idea/ *.tmproj .vscode/ ================================================ FILE: deploy/charts/discovery-agent/Chart.yaml ================================================ apiVersion: v2 name: discovery-agent description: |- The discovery-agent connects your Kubernetes or Openshift cluster to NGTS for discovery and monitoring. maintainers: - name: Palo Alto Networks url: https://www.paloaltonetworks.com sources: - https://github.com/jetstack/jetstack-secure # These versions are meant to be overridden by `make helm-chart`. No `v` prefix # for the `version` because Helm doesn't support auto-determining the latest # version for OCI Helm charts that use a `v` prefix. version: 0.0.0 appVersion: "v0.0.0" ================================================ FILE: deploy/charts/discovery-agent/README.md ================================================ # discovery-agent The Discovery Agent connects your Kubernetes or OpenShift cluster to Palo Alto NGTS. ## Values #### **config.tsgID** ~ `string` > Default value: > ```yaml > "" > ``` Required: The TSG (Tenant Service Group) ID to use when connecting to SCM. NB: TSG IDs are numeric, but must be treated as strings to avoid issues with YAML data types. With the Helm CLI use `--set-string`; with YAML always pass TSG IDs in double quotes. #### **config.clusterName** ~ `string` > Default value: > ```yaml > "" > ``` Required: A human readable name for the cluster into which the agent is being deployed. This cluster name will be associated with the data that the agent uploads to the backend. #### **config.clusterDescription** ~ `string` > Default value: > ```yaml > "" > ``` A short description of the cluster where the agent is deployed (optional). This description will be associated with the data that the agent uploads to the backend. #### **config.claimableCerts** ~ `bool` > Default value: > ```yaml > false > ``` Whether discovered certs can be claimed by other tenants (optional). true = certs are left unassigned, available for any tenant to claim. false (default) = certs are owned by this cluster's tenant. #### **config.period** ~ `string` > Default value: > ```yaml > 0h1m0s > ``` How often to push data to the remote server #### **config.excludeAnnotationKeysRegex** ~ `array` > Default value: > ```yaml > [] > ``` You can configure the agent to exclude some annotations or labels from being pushed. All Kubernetes objects are affected. The objects are still pushed, but the specified annotations and labels are removed before being pushed. Dots is the only character that needs to be escaped in the regex. Use either double quotes with escaped single quotes or unquoted strings for the regex to avoid YAML parsing issues with `\.`. Example: excludeAnnotationKeysRegex: ['^kapp\.k14s\.io/original.*'] #### **config.excludeLabelKeysRegex** ~ `array` > Default value: > ```yaml > [] > ``` #### **config.clientID** ~ `string` > Default value: > ```yaml > "" > ``` Deprecated: Client ID for the configured service account. The client ID should be provided in the "clientID" field of the authentication secret (see config.secretName). This field is provided for compatibility for users migrating from the "venafi-kubernetes-agent" chart. #### **config.secretName** ~ `string` > Default value: > ```yaml > discovery-agent-credentials > ``` The name of the Secret containing the NGTS built-in service account credentials. The Secret must contain the following key: - privatekey.pem: PEM-encoded private key for the service account The Secret should also contain the following key: - clientID: Service account client ID (config.clientID must be set if not present) #### **replicaCount** ~ `number` > Default value: > ```yaml > 1 > ``` This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/ #### **imageRegistry** ~ `string` > Default value: > ```yaml > quay.io > ``` The container registry used for discovery-agent images by default. This can include path prefixes (e.g. "artifactory.example.com/docker"). #### **imageNamespace** ~ `string` > Default value: > ```yaml > jetstack > ``` The repository namespace used for discovery-agent images by default. Examples: - jetstack - custom-namespace #### **image.repository** ~ `string` > Default value: > ```yaml > "" > ``` Full repository override (takes precedence over `imageRegistry`, `imageNamespace`, and `image.name`). Example: quay.io/jetstack/discovery-agent #### **image.name** ~ `string` > Default value: > ```yaml > discovery-agent > ``` The image name for the Discovery Agent. This is used (together with `imageRegistry` and `imageNamespace`) to construct the full image reference. #### **image.pullPolicy** ~ `string` > Default value: > ```yaml > IfNotPresent > ``` This sets the pull policy for images. #### **image.tag** ~ `string` > Default value: > ```yaml > "" > ``` Override the image tag to deploy by setting this variable. If no value is set, the chart's appVersion is used. #### **image.digest** ~ `string` > Default value: > ```yaml > "" > ``` Override the image digest to deploy by setting this variable. If set together with `image.tag`, the rendered image will include both tag and digest. #### **imagePullSecrets** ~ `array` > Default value: > ```yaml > [] > ``` This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ #### **nameOverride** ~ `string` > Default value: > ```yaml > "" > ``` This is to override the chart name. #### **fullnameOverride** ~ `string` > Default value: > ```yaml > "" > ``` #### **serviceAccount.create** ~ `bool` > Default value: > ```yaml > true > ``` Specifies whether a service account should be created #### **serviceAccount.automount** ~ `bool` > Default value: > ```yaml > true > ``` Automatically mount a ServiceAccount's API credentials? #### **serviceAccount.annotations** ~ `object` > Default value: > ```yaml > {} > ``` Annotations to add to the service account #### **serviceAccount.name** ~ `string` > Default value: > ```yaml > "" > ``` The name of the service account to use. If not set and create is true, a name is generated using the fullname template #### **podAnnotations** ~ `object` > Default value: > ```yaml > {} > ``` This is for setting Kubernetes Annotations to a Pod. For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ #### **podLabels** ~ `object` > Default value: > ```yaml > {} > ``` This is for setting Kubernetes Labels to a Pod. For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ #### **podSecurityContext** ~ `object` > Default value: > ```yaml > {} > ``` #### **securityContext** ~ `object` > Default value: > ```yaml > allowPrivilegeEscalation: false > capabilities: > drop: > - ALL > readOnlyRootFilesystem: true > runAsNonRoot: true > seccompProfile: > type: RuntimeDefault > ``` Add Container specific SecurityContext settings to the container. Takes precedence over `podSecurityContext` when set. See https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container #### **resources** ~ `object` > Default value: > ```yaml > {} > ``` #### **volumes** ~ `array` > Default value: > ```yaml > [] > ``` Additional volumes on the output Deployment definition. #### **volumeMounts** ~ `array` > Default value: > ```yaml > [] > ``` Additional volumeMounts on the output Deployment definition. #### **nodeSelector** ~ `object` > Default value: > ```yaml > {} > ``` #### **tolerations** ~ `array` > Default value: > ```yaml > [] > ``` #### **affinity** ~ `object` > Default value: > ```yaml > {} > ``` #### **http_proxy** ~ `string` Configures the HTTP_PROXY environment variable where a HTTP proxy is required. #### **https_proxy** ~ `string` Configures the HTTPS_PROXY environment variable where a HTTP proxy is required. #### **no_proxy** ~ `string` Configures the NO_PROXY environment variable where a HTTP proxy is required, but certain domains should be excluded. #### **podDisruptionBudget** ~ `object` > Default value: > ```yaml > enabled: false > ``` Configure a PodDisruptionBudget for the agent's Deployment. If running with multiple replicas, consider setting podDisruptionBudget.enabled to true. #### **extraArgs** ~ `array` > Default value: > ```yaml > [] > ``` ```yaml extraArgs: - --logging-format=json - --log-level=6 # To enable HTTP request logging ``` #### **pprof.enabled** ~ `bool` > Default value: > ```yaml > false > ``` Enable profiling with the pprof endpoint #### **metrics.enabled** ~ `bool` > Default value: > ```yaml > true > ``` Enable the metrics server. If false, the metrics server will be disabled and the other metrics fields below will be ignored. #### **metrics.podmonitor.enabled** ~ `bool` > Default value: > ```yaml > false > ``` Create a PodMonitor to add the metrics to Prometheus, if you are using Prometheus Operator. See https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor #### **metrics.podmonitor.namespace** ~ `string` The namespace that the pod monitor should live in. Defaults to the discovery-agent namespace. #### **metrics.podmonitor.prometheusInstance** ~ `string` > Default value: > ```yaml > default > ``` Specifies the `prometheus` label on the created PodMonitor. This is used when different Prometheus instances have label selectors matching different PodMonitors. #### **metrics.podmonitor.interval** ~ `string` > Default value: > ```yaml > 60s > ``` The interval to scrape metrics. #### **metrics.podmonitor.scrapeTimeout** ~ `string` > Default value: > ```yaml > 30s > ``` The timeout before a metrics scrape fails. #### **metrics.podmonitor.labels** ~ `object` > Default value: > ```yaml > {} > ``` Additional labels to add to the PodMonitor. #### **metrics.podmonitor.annotations** ~ `object` > Default value: > ```yaml > {} > ``` Additional annotations to add to the PodMonitor. #### **metrics.podmonitor.honorLabels** ~ `bool` > Default value: > ```yaml > false > ``` Keep labels from scraped data, overriding server-side labels. #### **metrics.podmonitor.endpointAdditionalProperties** ~ `object` > Default value: > ```yaml > {} > ``` EndpointAdditionalProperties allows setting additional properties on the endpoint such as relabelings, metricRelabelings etc. For example: ```yaml endpointAdditionalProperties: relabelings: - action: replace sourceLabels: - __meta_kubernetes_pod_node_name targetLabel: instance ``` ================================================ FILE: deploy/charts/discovery-agent/templates/NOTES.txt ================================================ CHART NAME: {{ .Chart.Name }} CHART VERSION: {{ .Chart.Version }} APP VERSION: {{ .Chart.AppVersion }} - Check the application is running: > kubectl get pods -n {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} - Check the application logs for successful connection to NGTS: > kubectl logs -n {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} ================================================ FILE: deploy/charts/discovery-agent/templates/_helpers.tpl ================================================ {{/* Expand the name of the chart. */}} {{- define "discovery-agent.name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} {{- end }} {{/* Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). If release name contains chart name it will be used as a full name. */}} {{- define "discovery-agent.fullname" -}} {{- if .Values.fullnameOverride }} {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} {{- else }} {{- $name := default .Chart.Name .Values.nameOverride }} {{- if contains $name .Release.Name }} {{- .Release.Name | trunc 63 | trimSuffix "-" }} {{- else }} {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} {{- end }} {{- end }} {{- end }} {{/* Create chart name and version as used by the chart label. */}} {{- define "discovery-agent.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} {{- end }} {{/* Common labels */}} {{- define "discovery-agent.labels" -}} helm.sh/chart: {{ include "discovery-agent.chart" . }} {{ include "discovery-agent.selectorLabels" . }} {{- if .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- end }} app.kubernetes.io/managed-by: {{ .Release.Service }} {{- end }} {{/* Selector labels */}} {{- define "discovery-agent.selectorLabels" -}} app.kubernetes.io/name: {{ include "discovery-agent.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} {{/* Create the name of the service account to use */}} {{- define "discovery-agent.serviceAccountName" -}} {{- if .Values.serviceAccount.create }} {{- default (include "discovery-agent.fullname" .) .Values.serviceAccount.name }} {{- else }} {{- default "default" .Values.serviceAccount.name }} {{- end }} {{- end }} {{/* Util function for generating an image reference based on the provided options. This function is derived from similar functions used in the cert-manager GitHub organization */}} {{- define "discovery-agent.image" -}} {{- /* Calling convention: - (tuple ) We intentionally pass imageRegistry/imageNamespace as explicit arguments rather than reading from `.Values` inside this helper, because `helm-tool lint` does not reliably track `.Values.*` usage through tuple/variable indirection. */ -}} {{- if ne (len .) 4 -}} {{- fail (printf "ERROR: template \"discovery-agent.image\" expects (tuple ), got %d arguments" (len .)) -}} {{- end -}} {{- $image := index . 0 -}} {{- $imageRegistry := index . 1 | default "" -}} {{- $imageNamespace := index . 2 | default "" -}} {{- $defaultReference := index . 3 -}} {{- $repository := "" -}} {{- if $image.repository -}} {{- $repository = $image.repository -}} {{- else -}} {{- $name := required "ERROR: image.name must be set when image.repository is empty" $image.name -}} {{- $repository = $name -}} {{- if $imageNamespace -}} {{- $repository = printf "%s/%s" $imageNamespace $repository -}} {{- end -}} {{- if $imageRegistry -}} {{- $repository = printf "%s/%s" $imageRegistry $repository -}} {{- end -}} {{- end -}} {{- $repository -}} {{- if and $image.tag $image.digest -}} {{- printf ":%s@%s" $image.tag $image.digest -}} {{- else if $image.tag -}} {{- printf ":%s" $image.tag -}} {{- else if $image.digest -}} {{- printf "@%s" $image.digest -}} {{- else -}} {{- printf "%s" $defaultReference -}} {{- end -}} {{- end }} ================================================ FILE: deploy/charts/discovery-agent/templates/configmap.yaml ================================================ apiVersion: v1 kind: ConfigMap metadata: name: {{ include "discovery-agent.fullname" . }}-config namespace: {{ .Release.Namespace }} labels: {{- include "discovery-agent.labels" . | nindent 4 }} data: config.yaml: |- cluster_name: {{ required "config.clusterName is required" .Values.config.clusterName | quote }} cluster_description: {{ .Values.config.clusterDescription | quote }} {{- if .Values.config.claimableCerts }} claimable_certs: true {{- end }} period: {{ .Values.config.period | quote }} {{- with .Values.config.excludeAnnotationKeysRegex }} exclude-annotation-keys-regex: {{- . | toYaml | nindent 6 }} {{- end }} {{- with .Values.config.excludeLabelKeysRegex }} exclude-label-keys-regex: {{- . | toYaml | nindent 6 }} {{- end }} data-gatherers: - kind: k8s-discovery name: k8s/discovery - kind: k8s-dynamic name: k8s/secrets config: resource-type: version: v1 resource: secrets field-selectors: - type!=kubernetes.io/dockercfg - type!=kubernetes.io/dockerconfigjson - type!=bootstrap.kubernetes.io/token - type!=helm.sh/release.v1 - kind: k8s-dynamic name: k8s/jobs config: resource-type: version: v1 group: batch resource: jobs - kind: k8s-dynamic name: k8s/cronjobs config: resource-type: version: v1 group: batch resource: cronjobs - kind: k8s-dynamic name: k8s/deployments config: resource-type: version: v1 group: apps resource: deployments - kind: k8s-dynamic name: k8s/statefulsets config: resource-type: version: v1 group: apps resource: statefulsets - kind: k8s-dynamic name: k8s/daemonsets config: resource-type: version: v1 group: apps resource: daemonsets - kind: k8s-dynamic name: k8s/pods config: resource-type: version: v1 resource: pods ================================================ FILE: deploy/charts/discovery-agent/templates/deployment.yaml ================================================ apiVersion: apps/v1 kind: Deployment metadata: name: {{ include "discovery-agent.fullname" . }} labels: {{- include "discovery-agent.labels" . | nindent 4 }} spec: replicas: {{ .Values.replicaCount }} selector: matchLabels: {{- include "discovery-agent.selectorLabels" . | nindent 6 }} template: metadata: {{- with .Values.podAnnotations }} annotations: {{- toYaml . | nindent 8 }} {{- end }} labels: {{- include "discovery-agent.labels" . | nindent 8 }} {{- with .Values.podLabels }} {{- toYaml . | nindent 8 }} {{- end }} spec: {{- with .Values.imagePullSecrets }} imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} serviceAccountName: {{ include "discovery-agent.serviceAccountName" . }} {{- with .Values.podSecurityContext }} securityContext: {{- toYaml . | nindent 8 }} {{- end }} containers: - name: agent {{- with .Values.securityContext }} securityContext: {{- toYaml . | nindent 12 }} {{- end }} image: "{{ template "discovery-agent.image" (tuple .Values.image .Values.imageRegistry .Values.imageNamespace (printf ":%s" .Chart.AppVersion)) }}" imagePullPolicy: {{ .Values.image.pullPolicy }} env: - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: POD_UID valueFrom: fieldRef: fieldPath: metadata.uid - name: POD_NODE valueFrom: fieldRef: fieldPath: spec.nodeName {{- with .Values.http_proxy }} - name: HTTP_PROXY value: {{ . }} {{- end }} {{- with .Values.https_proxy }} - name: HTTPS_PROXY value: {{ . }} {{- end }} {{- with .Values.no_proxy }} - name: NO_PROXY value: {{ . }} {{- end }} args: - "agent" - "-c" - "/etc/discovery-agent/config.yaml" - --ngts - --tsg-id - {{ required "config.tsgID is required" .Values.config.tsgID | toString | quote }} {{- with .Values.config.serverURL }} - --ngts-server-url - {{ . | quote }} {{- end }} {{- if or .Values.config.clientID .Values.config.clientId }} - --client-id - {{ .Values.config.clientID | default .Values.config.clientId }} {{- end }} - --private-key-path - /etc/discovery-agent/credentials/privatekey.pem - --logging-format=json {{- if .Values.metrics.enabled }} - --enable-metrics {{- end }} {{- if .Values.pprof.enabled }} - --enable-pprof {{- end }} {{- range .Values.extraArgs }} - {{ . | quote }} {{- end }} {{- with .Values.resources }} resources: {{- toYaml . | nindent 12 }} {{- end }} volumeMounts: - name: config mountPath: "/etc/discovery-agent" readOnly: true - name: credentials mountPath: "/etc/discovery-agent/credentials" readOnly: true {{- with .Values.volumeMounts }} {{- toYaml . | nindent 12 }} {{- end }} ports: - name: agent-api containerPort: 8081 volumes: - name: config configMap: name: {{ include "discovery-agent.fullname" . }}-config optional: false - name: credentials secret: secretName: {{ .Values.config.secretName }} optional: false {{- with .Values.volumes }} {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.affinity }} affinity: {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.tolerations }} tolerations: {{- toYaml . | nindent 8 }} {{- end }} ================================================ FILE: deploy/charts/discovery-agent/templates/poddisruptionbudget.yaml ================================================ {{- if .Values.podDisruptionBudget.enabled }} apiVersion: policy/v1 kind: PodDisruptionBudget metadata: name: {{ include "discovery-agent.fullname" . }} namespace: {{ .Release.Namespace }} labels: {{- include "discovery-agent.labels" . | nindent 4 }} spec: selector: matchLabels: {{- include "discovery-agent.selectorLabels" . | nindent 6 }} {{- if not (or (hasKey .Values.podDisruptionBudget "minAvailable") (hasKey .Values.podDisruptionBudget "maxUnavailable")) }} minAvailable: 1 # Default value because minAvailable and maxUnavailable are not set {{- end }} {{- if hasKey .Values.podDisruptionBudget "minAvailable" }} minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} {{- end }} {{- if hasKey .Values.podDisruptionBudget "maxUnavailable" }} maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} {{- end }} {{- end }} ================================================ FILE: deploy/charts/discovery-agent/templates/podmonitor.yaml ================================================ {{- if and .Values.metrics.enabled .Values.metrics.podmonitor.enabled }} apiVersion: monitoring.coreos.com/v1 kind: PodMonitor metadata: name: {{ include "discovery-agent.fullname" . }} {{- if .Values.metrics.podmonitor.namespace }} namespace: {{ .Values.metrics.podmonitor.namespace }} {{- else }} namespace: {{ .Release.Namespace | quote }} {{- end }} labels: {{- include "discovery-agent.labels" . | nindent 4 }} prometheus: {{ .Values.metrics.podmonitor.prometheusInstance }} {{- with .Values.metrics.podmonitor.labels }} {{- toYaml . | nindent 4 }} {{- end }} {{- with .Values.metrics.podmonitor.annotations }} annotations: {{- toYaml . | nindent 4 }} {{- end }} spec: jobLabel: {{ include "discovery-agent.fullname" . }} selector: matchLabels: {{- include "discovery-agent.selectorLabels" . | nindent 6 }} {{- if .Values.metrics.podmonitor.namespace }} namespaceSelector: matchNames: - {{ .Release.Namespace | quote }} {{- end }} podMetricsEndpoints: - port: agent-api path: /metrics interval: {{ .Values.metrics.podmonitor.interval }} scrapeTimeout: {{ .Values.metrics.podmonitor.scrapeTimeout }} honorLabels: {{ .Values.metrics.podmonitor.honorLabels }} {{- with .Values.metrics.podmonitor.endpointAdditionalProperties }} {{- toYaml . | nindent 4 }} {{- end }} {{- end }} ================================================ FILE: deploy/charts/discovery-agent/templates/rbac.yaml ================================================ --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: {{ include "discovery-agent.fullname" . }}-event-emitted labels: {{- include "discovery-agent.labels" . | nindent 4 }} rules: - apiGroups: [""] resources: ["events"] verbs: ["create"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: {{ include "discovery-agent.fullname" . }}-event-emitted labels: {{- include "discovery-agent.labels" . | nindent 4 }} roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: {{ include "discovery-agent.fullname" . }}-event-emitted subjects: - kind: ServiceAccount name: {{ include "discovery-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: {{ include "discovery-agent.fullname" . }}-cluster-viewer labels: {{- include "discovery-agent.labels" . | nindent 4 }} roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: view subjects: - kind: ServiceAccount name: {{ include "discovery-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ include "discovery-agent.fullname" . }}-secret-reader labels: {{- include "discovery-agent.labels" . | nindent 4 }} rules: - apiGroups: [""] resources: ["secrets"] verbs: ["get", "list", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: {{ include "discovery-agent.fullname" . }}-secret-reader labels: {{- include "discovery-agent.labels" . | nindent 4 }} roleRef: kind: ClusterRole name: {{ include "discovery-agent.fullname" . }}-secret-reader apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: {{ include "discovery-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ include "discovery-agent.fullname" . }}-rbac-reader labels: {{- include "discovery-agent.labels" . | nindent 4 }} rules: - apiGroups: ["rbac.authorization.k8s.io"] resources: - roles - clusterroles - rolebindings - clusterrolebindings verbs: ["get", "list", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: {{ include "discovery-agent.fullname" . }}-rbac-reader labels: {{- include "discovery-agent.labels" . | nindent 4 }} roleRef: kind: ClusterRole name: {{ include "discovery-agent.fullname" . }}-rbac-reader apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: {{ include "discovery-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: {{ include "discovery-agent.fullname" . }}-oidc-discovery labels: {{- include "discovery-agent.labels" . | nindent 4 }} roleRef: kind: ClusterRole name: system:service-account-issuer-discovery apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: {{ include "discovery-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} ================================================ FILE: deploy/charts/discovery-agent/templates/serviceaccount.yaml ================================================ {{- if .Values.serviceAccount.create -}} apiVersion: v1 kind: ServiceAccount metadata: name: {{ include "discovery-agent.serviceAccountName" . }} labels: {{- include "discovery-agent.labels" . | nindent 4 }} {{- with .Values.serviceAccount.annotations }} annotations: {{- toYaml . | nindent 4 }} {{- end }} automountServiceAccountToken: {{ .Values.serviceAccount.automount }} {{- end }} ================================================ FILE: deploy/charts/discovery-agent/tests/configmap_test.yaml ================================================ suite: test configmap templates: - configmap.yaml tests: # Test basic ConfigMap rendering - it: should create ConfigMap with required values set: config.clusterName: my-test-cluster config.tsgID: "123456" asserts: - isKind: of: ConfigMap - equal: path: metadata.name value: RELEASE-NAME-discovery-agent-config - matchRegex: path: data["config.yaml"] pattern: 'cluster_name: "my-test-cluster"' # Test cluster description - it: should include cluster description when set set: config.clusterName: test-cluster config.tsgID: "123456" config.clusterDescription: "This is a test cluster" asserts: - matchRegex: path: data["config.yaml"] pattern: 'cluster_description: "This is a test cluster"' # Test period configuration - it: should set custom period set: config.clusterName: test-cluster config.tsgID: "123456" config.period: "0h5m0s" asserts: - matchRegex: path: data["config.yaml"] pattern: 'period: "0h5m0s"' # Test exclude annotation keys regex - it: should include excludeAnnotationKeysRegex when set set: config.clusterName: test-cluster config.tsgID: "123456" config.excludeAnnotationKeysRegex: - "^kapp\\.k14s\\.io/original.*" - "^kubectl\\.kubernetes\\.io/.*" asserts: - matchRegex: path: data["config.yaml"] pattern: 'exclude-annotation-keys-regex:' - matchRegex: path: data["config.yaml"] pattern: '\^kapp\\\.k14s\\\.io/original\.\*' # Test exclude label keys regex - it: should include excludeLabelKeysRegex when set set: config.clusterName: test-cluster config.tsgID: "123456" config.excludeLabelKeysRegex: - "^helm\\.sh/.*" asserts: - matchRegex: path: data["config.yaml"] pattern: 'exclude-label-keys-regex:' - matchRegex: path: data["config.yaml"] pattern: '\^helm\\\.sh/\.\*' # Test data-gatherers are present - it: should include all data-gatherers set: config.clusterName: test-cluster config.tsgID: "123456" asserts: - matchRegex: path: data["config.yaml"] pattern: 'kind: k8s-discovery' - matchRegex: path: data["config.yaml"] pattern: 'name: k8s/secrets' - matchRegex: path: data["config.yaml"] pattern: 'name: k8s/jobs' - matchRegex: path: data["config.yaml"] pattern: 'name: k8s/deployments' ================================================ FILE: deploy/charts/discovery-agent/tests/deployment_test.yaml ================================================ suite: test deployment templates: - deployment.yaml tests: # Test that tsgID is rendered correctly as a string - it: tsgID is rendered as string in deployment args set: config.clusterName: test-cluster config.tsgID: "987654321" template: deployment.yaml asserts: - isKind: of: Deployment - contains: path: spec.template.spec.containers[0].args content: --tsg-id - contains: path: spec.template.spec.containers[0].args content: "987654321" # Test that tsgID preserves leading zeros (only possible with string type) # NB: TSG IDs are defined to start with "1", but this test is defence in depth - it: tsgID preserves leading zeros when provided as string set: config.clusterName: test-cluster config.tsgID: "0001234" template: deployment.yaml asserts: - isKind: of: Deployment - contains: path: spec.template.spec.containers[0].args content: --tsg-id - contains: path: spec.template.spec.containers[0].args content: "0001234" # Test basic deployment rendering with all required values - it: deployment templates correctly with required values set: config.clusterName: my-test-cluster config.tsgID: "123456" template: deployment.yaml asserts: - isKind: of: Deployment - matchRegex: path: metadata.name pattern: ^.*-discovery-agent$ # Test replica count - it: should set replica count correctly set: config.clusterName: test-cluster config.tsgID: "123456" replicaCount: 3 asserts: - equal: path: spec.replicas value: 3 # Test security contexts - it: should apply pod security context set: config.clusterName: test-cluster config.tsgID: "123456" podSecurityContext: fsGroup: 2000 asserts: - equal: path: spec.template.spec.securityContext.fsGroup value: 2000 - it: should apply container security context defaults set: config.clusterName: test-cluster config.tsgID: "123456" asserts: - equal: path: spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem value: true - equal: path: spec.template.spec.containers[0].securityContext.runAsNonRoot value: true - equal: path: spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation value: false # Test resources - it: should set resources when specified set: config.clusterName: test-cluster config.tsgID: "123456" resources: limits: cpu: 200m memory: 256Mi requests: cpu: 100m memory: 128Mi asserts: - equal: path: spec.template.spec.containers[0].resources.limits.cpu value: 200m - equal: path: spec.template.spec.containers[0].resources.requests.memory value: 128Mi # Test environment variables - it: should set HTTP_PROXY environment variable set: config.clusterName: test-cluster config.tsgID: "123456" http_proxy: "http://proxy:8080" asserts: - contains: path: spec.template.spec.containers[0].env content: name: HTTP_PROXY value: "http://proxy:8080" - it: should set HTTPS_PROXY environment variable set: config.clusterName: test-cluster config.tsgID: "123456" https_proxy: "https://proxy:8443" asserts: - contains: path: spec.template.spec.containers[0].env content: name: HTTPS_PROXY value: "https://proxy:8443" - it: should set NO_PROXY environment variable set: config.clusterName: test-cluster config.tsgID: "123456" no_proxy: "127.0.0.1,localhost" asserts: - contains: path: spec.template.spec.containers[0].env content: name: NO_PROXY value: "127.0.0.1,localhost" # Test command line arguments - it: should include metrics flag when enabled set: config.clusterName: test-cluster config.tsgID: "123456" metrics.enabled: true asserts: - contains: path: spec.template.spec.containers[0].args content: --enable-metrics - it: should include pprof flag when enabled set: config.clusterName: test-cluster config.tsgID: "123456" pprof.enabled: true asserts: - contains: path: spec.template.spec.containers[0].args content: --enable-pprof - it: should include custom server URL when set set: config.clusterName: test-cluster config.tsgID: "123456" config.serverURL: "https://custom.example.com" asserts: - contains: path: spec.template.spec.containers[0].args content: --ngts-server-url - contains: path: spec.template.spec.containers[0].args content: "https://custom.example.com" - it: should include client ID when set set: config.clusterName: test-cluster config.tsgID: "123456" config.clientID: "test-client-id" asserts: - contains: path: spec.template.spec.containers[0].args content: --client-id - contains: path: spec.template.spec.containers[0].args content: test-client-id - it: should include client ID when clientId is set (lowercase d) set: config.clusterName: test-cluster config.tsgID: "123456" config.clientId: "test-client-id-lowercase" asserts: - contains: path: spec.template.spec.containers[0].args content: --client-id - contains: path: spec.template.spec.containers[0].args content: test-client-id-lowercase - it: should prefer clientID over clientId when both are set set: config.clusterName: test-cluster config.tsgID: "123456" config.clientID: "uppercase-takes-precedence" config.clientId: "lowercase-ignored" asserts: - contains: path: spec.template.spec.containers[0].args content: --client-id - contains: path: spec.template.spec.containers[0].args content: uppercase-takes-precedence - it: should include extra args set: config.clusterName: test-cluster config.tsgID: "123456" extraArgs: - --log-level=6 - --custom-flag=value asserts: - contains: path: spec.template.spec.containers[0].args content: "--log-level=6" - contains: path: spec.template.spec.containers[0].args content: "--custom-flag=value" # Test volumes and volume mounts - it: should mount config and credentials volumes set: config.clusterName: test-cluster config.tsgID: "123456" asserts: - contains: path: spec.template.spec.containers[0].volumeMounts content: name: config mountPath: "/etc/discovery-agent" readOnly: true - contains: path: spec.template.spec.containers[0].volumeMounts content: name: credentials mountPath: "/etc/discovery-agent/credentials" readOnly: true - contains: path: spec.template.spec.volumes content: name: config configMap: name: RELEASE-NAME-discovery-agent-config optional: false - it: should use custom secret name set: config.clusterName: test-cluster config.tsgID: "123456" config.secretName: custom-secret asserts: - contains: path: spec.template.spec.volumes content: name: credentials secret: secretName: custom-secret optional: false # Test pod annotations and labels - it: should apply pod annotations set: config.clusterName: test-cluster config.tsgID: "123456" podAnnotations: annotation-key: annotation-value asserts: - equal: path: spec.template.metadata.annotations.annotation-key value: annotation-value - it: should apply pod labels set: config.clusterName: test-cluster config.tsgID: "123456" podLabels: custom-label: label-value asserts: - equal: path: spec.template.metadata.labels.custom-label value: label-value # Test node selector, tolerations, and affinity - it: should apply node selector set: config.clusterName: test-cluster config.tsgID: "123456" nodeSelector: disktype: ssd asserts: - equal: path: spec.template.spec.nodeSelector.disktype value: ssd - it: should apply tolerations set: config.clusterName: test-cluster config.tsgID: "123456" tolerations: - key: "key1" operator: "Equal" value: "value1" effect: "NoSchedule" asserts: - contains: path: spec.template.spec.tolerations content: key: "key1" operator: "Equal" value: "value1" effect: "NoSchedule" - it: should apply affinity set: config.clusterName: test-cluster config.tsgID: "123456" affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: kubernetes.io/hostname operator: In values: - node1 asserts: - isNotEmpty: path: spec.template.spec.affinity.nodeAffinity # Test image pull secrets - it: should apply image pull secrets set: config.clusterName: test-cluster config.tsgID: "123456" imagePullSecrets: - name: my-secret asserts: - contains: path: spec.template.spec.imagePullSecrets content: name: my-secret ================================================ FILE: deploy/charts/discovery-agent/tests/poddisruptionbudget_test.yaml ================================================ suite: test poddisruptionbudget templates: - poddisruptionbudget.yaml tests: # Test PodDisruptionBudget is not created by default - it: should not create PodDisruptionBudget when disabled set: config.clusterName: test-cluster config.tsgID: "123456" podDisruptionBudget.enabled: false asserts: - hasDocuments: count: 0 # Test PodDisruptionBudget is created when enabled - it: should create PodDisruptionBudget when enabled set: config.clusterName: test-cluster config.tsgID: "123456" podDisruptionBudget.enabled: true asserts: - isKind: of: PodDisruptionBudget - equal: path: metadata.name value: RELEASE-NAME-discovery-agent # Test default minAvailable when neither minAvailable nor maxUnavailable is set - it: should set default minAvailable when no disruption values are set set: config.clusterName: test-cluster config.tsgID: "123456" podDisruptionBudget.enabled: true asserts: - equal: path: spec.minAvailable value: 1 - isNull: path: spec.maxUnavailable # Test custom minAvailable - it: should set custom minAvailable set: config.clusterName: test-cluster config.tsgID: "123456" podDisruptionBudget.enabled: true podDisruptionBudget.minAvailable: 2 asserts: - equal: path: spec.minAvailable value: 2 - isNull: path: spec.maxUnavailable # Test minAvailable as percentage - it: should set minAvailable as percentage set: config.clusterName: test-cluster config.tsgID: "123456" podDisruptionBudget.enabled: true podDisruptionBudget.minAvailable: "50%" asserts: - equal: path: spec.minAvailable value: "50%" # Test custom maxUnavailable - it: should set custom maxUnavailable set: config.clusterName: test-cluster config.tsgID: "123456" podDisruptionBudget.enabled: true podDisruptionBudget.maxUnavailable: 1 asserts: - equal: path: spec.maxUnavailable value: 1 - isNull: path: spec.minAvailable # Test maxUnavailable as percentage - it: should set maxUnavailable as percentage set: config.clusterName: test-cluster config.tsgID: "123456" podDisruptionBudget.enabled: true podDisruptionBudget.maxUnavailable: "25%" asserts: - equal: path: spec.maxUnavailable value: "25%" # Test selector labels - it: should use correct selector labels set: config.clusterName: test-cluster config.tsgID: "123456" podDisruptionBudget.enabled: true asserts: - isNotEmpty: path: spec.selector.matchLabels ================================================ FILE: deploy/charts/discovery-agent/tests/podmonitor_test.yaml ================================================ suite: test podmonitor templates: - podmonitor.yaml tests: # Test PodMonitor is not created by default - it: should not create PodMonitor when metrics.podmonitor.enabled is false set: config.clusterName: test-cluster config.tsgID: "123456" metrics.enabled: true metrics.podmonitor.enabled: false asserts: - hasDocuments: count: 0 # Test PodMonitor is not created when metrics are disabled - it: should not create PodMonitor when metrics.enabled is false set: config.clusterName: test-cluster config.tsgID: "123456" metrics.enabled: false metrics.podmonitor.enabled: true asserts: - hasDocuments: count: 0 # Test PodMonitor is created when both metrics and podmonitor are enabled - it: should create PodMonitor when both metrics and podmonitor are enabled set: config.clusterName: test-cluster config.tsgID: "123456" metrics.enabled: true metrics.podmonitor.enabled: true asserts: - isKind: of: PodMonitor - equal: path: metadata.name value: RELEASE-NAME-discovery-agent # Test PodMonitor namespace defaults to Release namespace - it: should use Release namespace by default set: config.clusterName: test-cluster config.tsgID: "123456" metrics.enabled: true metrics.podmonitor.enabled: true release: namespace: my-namespace asserts: - equal: path: metadata.namespace value: my-namespace # Test custom PodMonitor namespace - it: should use custom namespace when specified set: config.clusterName: test-cluster config.tsgID: "123456" metrics.enabled: true metrics.podmonitor.enabled: true metrics.podmonitor.namespace: monitoring release: namespace: default asserts: - equal: path: metadata.namespace value: monitoring - contains: path: spec.namespaceSelector.matchNames content: default # Test prometheus instance label - it: should set prometheus instance label set: config.clusterName: test-cluster config.tsgID: "123456" metrics.enabled: true metrics.podmonitor.enabled: true metrics.podmonitor.prometheusInstance: custom-prometheus asserts: - equal: path: metadata.labels.prometheus value: custom-prometheus # Test custom labels - it: should apply custom labels set: config.clusterName: test-cluster config.tsgID: "123456" metrics.enabled: true metrics.podmonitor.enabled: true metrics.podmonitor.labels: custom-label: custom-value another-label: another-value asserts: - equal: path: metadata.labels.custom-label value: custom-value - equal: path: metadata.labels.another-label value: another-value # Test custom annotations - it: should apply custom annotations set: config.clusterName: test-cluster config.tsgID: "123456" metrics.enabled: true metrics.podmonitor.enabled: true metrics.podmonitor.annotations: custom-annotation: custom-value asserts: - equal: path: metadata.annotations.custom-annotation value: custom-value # Test scrape configuration - it: should configure scrape interval and timeout set: config.clusterName: test-cluster config.tsgID: "123456" metrics.enabled: true metrics.podmonitor.enabled: true metrics.podmonitor.interval: 30s metrics.podmonitor.scrapeTimeout: 15s asserts: - equal: path: spec.podMetricsEndpoints[0].interval value: 30s - equal: path: spec.podMetricsEndpoints[0].scrapeTimeout value: 15s # Test honorLabels setting - it: should set honorLabels correctly set: config.clusterName: test-cluster config.tsgID: "123456" metrics.enabled: true metrics.podmonitor.enabled: true metrics.podmonitor.honorLabels: true asserts: - equal: path: spec.podMetricsEndpoints[0].honorLabels value: true # Test metrics endpoint configuration - it: should configure metrics endpoint correctly set: config.clusterName: test-cluster config.tsgID: "123456" metrics.enabled: true metrics.podmonitor.enabled: true asserts: - equal: path: spec.podMetricsEndpoints[0].port value: agent-api - equal: path: spec.podMetricsEndpoints[0].path value: /metrics ================================================ FILE: deploy/charts/discovery-agent/tests/rbac_test.yaml ================================================ suite: test rbac templates: - rbac.yaml tests: # Test that all RBAC resources are created - it: should create all RBAC resources set: config.clusterName: test-cluster config.tsgID: "123456" asserts: - hasDocuments: count: 8 # Test Role for event emission - it: should create Role for event emission set: config.clusterName: test-cluster config.tsgID: "123456" documentIndex: 0 asserts: - isKind: of: Role - equal: path: metadata.name value: RELEASE-NAME-discovery-agent-event-emitted - contains: path: rules content: apiGroups: [""] resources: ["events"] verbs: ["create"] # Test RoleBinding for event emission - it: should create RoleBinding for event emission set: config.clusterName: test-cluster config.tsgID: "123456" documentIndex: 1 asserts: - isKind: of: RoleBinding - equal: path: metadata.name value: RELEASE-NAME-discovery-agent-event-emitted - equal: path: roleRef.kind value: Role - equal: path: roleRef.name value: RELEASE-NAME-discovery-agent-event-emitted - contains: path: subjects content: kind: ServiceAccount name: RELEASE-NAME-discovery-agent namespace: NAMESPACE # Test ClusterRoleBinding for cluster viewer - it: should create ClusterRoleBinding for cluster viewer set: config.clusterName: test-cluster config.tsgID: "123456" documentIndex: 2 asserts: - isKind: of: ClusterRoleBinding - equal: path: metadata.name value: RELEASE-NAME-discovery-agent-cluster-viewer - equal: path: roleRef.kind value: ClusterRole - equal: path: roleRef.name value: view - contains: path: subjects content: kind: ServiceAccount name: RELEASE-NAME-discovery-agent namespace: NAMESPACE # Test ClusterRole for secret reader - it: should create ClusterRole for secret reader set: config.clusterName: test-cluster config.tsgID: "123456" documentIndex: 3 asserts: - isKind: of: ClusterRole - equal: path: metadata.name value: RELEASE-NAME-discovery-agent-secret-reader - contains: path: rules content: apiGroups: [""] resources: ["secrets"] verbs: ["get", "list", "watch"] # Test ClusterRoleBinding for secret reader - it: should create ClusterRoleBinding for secret reader set: config.clusterName: test-cluster config.tsgID: "123456" documentIndex: 4 asserts: - isKind: of: ClusterRoleBinding - equal: path: metadata.name value: RELEASE-NAME-discovery-agent-secret-reader - equal: path: roleRef.kind value: ClusterRole - equal: path: roleRef.name value: RELEASE-NAME-discovery-agent-secret-reader # Test ClusterRole for RBAC reader - it: should create ClusterRole for RBAC reader set: config.clusterName: test-cluster config.tsgID: "123456" documentIndex: 5 asserts: - isKind: of: ClusterRole - equal: path: metadata.name value: RELEASE-NAME-discovery-agent-rbac-reader - contains: path: rules[0].resources content: roles - contains: path: rules[0].resources content: clusterroles - contains: path: rules[0].resources content: rolebindings - contains: path: rules[0].resources content: clusterrolebindings # Test ClusterRoleBinding for RBAC reader - it: should create ClusterRoleBinding for RBAC reader set: config.clusterName: test-cluster config.tsgID: "123456" documentIndex: 6 asserts: - isKind: of: ClusterRoleBinding - equal: path: metadata.name value: RELEASE-NAME-discovery-agent-rbac-reader - equal: path: roleRef.kind value: ClusterRole - equal: path: roleRef.name value: RELEASE-NAME-discovery-agent-rbac-reader # Test ClusterRoleBinding for OIDC discovery - it: should create ClusterRoleBinding for OIDC discovery set: config.clusterName: test-cluster config.tsgID: "123456" documentIndex: 7 asserts: - isKind: of: ClusterRoleBinding - equal: path: metadata.name value: RELEASE-NAME-discovery-agent-oidc-discovery - equal: path: roleRef.kind value: ClusterRole - equal: path: roleRef.name value: system:service-account-issuer-discovery ================================================ FILE: deploy/charts/discovery-agent/tests/serviceaccount_test.yaml ================================================ suite: test serviceaccount templates: - serviceaccount.yaml tests: # Test ServiceAccount is created by default - it: should create ServiceAccount when serviceAccount.create is true set: config.clusterName: test-cluster config.tsgID: "123456" serviceAccount.create: true asserts: - isKind: of: ServiceAccount - equal: path: metadata.name value: RELEASE-NAME-discovery-agent # Test ServiceAccount is not created when disabled - it: should not create ServiceAccount when serviceAccount.create is false set: config.clusterName: test-cluster config.tsgID: "123456" serviceAccount.create: false asserts: - hasDocuments: count: 0 # Test custom ServiceAccount name - it: should use custom name when serviceAccount.name is set set: config.clusterName: test-cluster config.tsgID: "123456" serviceAccount.create: true serviceAccount.name: custom-sa-name asserts: - equal: path: metadata.name value: custom-sa-name # Test automountServiceAccountToken setting - it: should set automountServiceAccountToken correctly set: config.clusterName: test-cluster config.tsgID: "123456" serviceAccount.create: true serviceAccount.automount: false asserts: - equal: path: automountServiceAccountToken value: false - it: should enable automountServiceAccountToken by default set: config.clusterName: test-cluster config.tsgID: "123456" serviceAccount.create: true asserts: - equal: path: automountServiceAccountToken value: true # Test ServiceAccount annotations - it: should apply annotations to ServiceAccount set: config.clusterName: test-cluster config.tsgID: "123456" serviceAccount.create: true serviceAccount.annotations: eks.amazonaws.com/role-arn: arn:aws:iam::123456789012:role/my-role custom-annotation: custom-value asserts: - equal: path: metadata.annotations["eks.amazonaws.com/role-arn"] value: arn:aws:iam::123456789012:role/my-role - equal: path: metadata.annotations.custom-annotation value: custom-value ================================================ FILE: deploy/charts/discovery-agent/values.linter.exceptions ================================================ ================================================ FILE: deploy/charts/discovery-agent/values.schema.json ================================================ { "$defs": { "helm-values": { "additionalProperties": false, "properties": { "affinity": { "$ref": "#/$defs/helm-values.affinity" }, "config": { "$ref": "#/$defs/helm-values.config" }, "extraArgs": { "$ref": "#/$defs/helm-values.extraArgs" }, "fullnameOverride": { "$ref": "#/$defs/helm-values.fullnameOverride" }, "global": { "$ref": "#/$defs/helm-values.global" }, "http_proxy": { "$ref": "#/$defs/helm-values.http_proxy" }, "https_proxy": { "$ref": "#/$defs/helm-values.https_proxy" }, "image": { "$ref": "#/$defs/helm-values.image" }, "imageNamespace": { "$ref": "#/$defs/helm-values.imageNamespace" }, "imagePullSecrets": { "$ref": "#/$defs/helm-values.imagePullSecrets" }, "imageRegistry": { "$ref": "#/$defs/helm-values.imageRegistry" }, "metrics": { "$ref": "#/$defs/helm-values.metrics" }, "nameOverride": { "$ref": "#/$defs/helm-values.nameOverride" }, "no_proxy": { "$ref": "#/$defs/helm-values.no_proxy" }, "nodeSelector": { "$ref": "#/$defs/helm-values.nodeSelector" }, "podAnnotations": { "$ref": "#/$defs/helm-values.podAnnotations" }, "podDisruptionBudget": { "$ref": "#/$defs/helm-values.podDisruptionBudget" }, "podLabels": { "$ref": "#/$defs/helm-values.podLabels" }, "podSecurityContext": { "$ref": "#/$defs/helm-values.podSecurityContext" }, "pprof": { "$ref": "#/$defs/helm-values.pprof" }, "replicaCount": { "$ref": "#/$defs/helm-values.replicaCount" }, "resources": { "$ref": "#/$defs/helm-values.resources" }, "securityContext": { "$ref": "#/$defs/helm-values.securityContext" }, "serviceAccount": { "$ref": "#/$defs/helm-values.serviceAccount" }, "tolerations": { "$ref": "#/$defs/helm-values.tolerations" }, "volumeMounts": { "$ref": "#/$defs/helm-values.volumeMounts" }, "volumes": { "$ref": "#/$defs/helm-values.volumes" } }, "type": "object" }, "helm-values.affinity": { "default": {}, "type": "object" }, "helm-values.config": { "additionalProperties": false, "properties": { "claimableCerts": { "$ref": "#/$defs/helm-values.config.claimableCerts" }, "clientID": { "$ref": "#/$defs/helm-values.config.clientID" }, "clientId": { "$ref": "#/$defs/helm-values.config.clientId" }, "clusterDescription": { "$ref": "#/$defs/helm-values.config.clusterDescription" }, "clusterName": { "$ref": "#/$defs/helm-values.config.clusterName" }, "excludeAnnotationKeysRegex": { "$ref": "#/$defs/helm-values.config.excludeAnnotationKeysRegex" }, "excludeLabelKeysRegex": { "$ref": "#/$defs/helm-values.config.excludeLabelKeysRegex" }, "period": { "$ref": "#/$defs/helm-values.config.period" }, "secretName": { "$ref": "#/$defs/helm-values.config.secretName" }, "serverURL": { "$ref": "#/$defs/helm-values.config.serverURL" }, "tsgID": { "$ref": "#/$defs/helm-values.config.tsgID" } }, "type": "object" }, "helm-values.config.claimableCerts": { "default": false, "description": "Whether discovered certs can be claimed by other tenants (optional). true = certs are left unassigned, available for any tenant to claim. false (default) = certs are owned by this cluster's tenant.", "type": "boolean" }, "helm-values.config.clientID": { "default": "", "description": "Deprecated: Client ID for the configured service account. The client ID should be provided in the \"clientID\" field of the authentication secret (see config.secretName). This field is provided for compatibility for users migrating from the \"venafi-kubernetes-agent\" chart.", "type": "string" }, "helm-values.config.clientId": { "default": "", "description": "Deprecated: Client ID for the configured service account (alternative to clientID). The client ID should be provided in the \"clientID\" field of the authentication secret (see config.secretName). This field is provided for compatibility for users migrating from the \"venafi-kubernetes-agent\" chart. If both clientID and clientId are set, clientID takes precedence.", "type": "string" }, "helm-values.config.clusterDescription": { "default": "", "description": "A short description of the cluster where the agent is deployed (optional).\n\nThis description will be associated with the data that the agent uploads to the backend.", "type": "string" }, "helm-values.config.clusterName": { "default": "", "description": "Required: A human readable name for the cluster into which the agent is being deployed.\n\nThis cluster name will be associated with the data that the agent uploads to the backend.", "type": "string" }, "helm-values.config.excludeAnnotationKeysRegex": { "default": [], "description": "You can configure the agent to exclude some annotations or labels from being pushed. All Kubernetes objects are affected. The objects are still pushed, but the specified annotations and labels are removed before being pushed.\n\nDots is the only character that needs to be escaped in the regex. Use either double quotes with escaped single quotes or unquoted strings for the regex to avoid YAML parsing issues with `\\.`.\n\nExample: excludeAnnotationKeysRegex: ['^kapp\\.k14s\\.io/original.*']", "items": {}, "type": "array" }, "helm-values.config.excludeLabelKeysRegex": { "default": [], "items": {}, "type": "array" }, "helm-values.config.period": { "default": "0h1m0s", "description": "How often to push data to the remote server", "type": "string" }, "helm-values.config.secretName": { "default": "discovery-agent-credentials", "description": "The name of the Secret containing the NGTS built-in service account credentials.\nThe Secret must contain the following key:\n- privatekey.pem: PEM-encoded private key for the service account\nThe Secret should also contain the following key:\n- clientID: Service account client ID (config.clientID must be set if not present)", "type": "string" }, "helm-values.config.serverURL": { "default": "", "description": "Explicit SCM server URL (optional).\nIf not set, a production SCM server URL will be created based on the TSG ID. This value is intended for development purposes only and should not be set in production.", "type": "string" }, "helm-values.config.tsgID": { "default": "", "description": "Required: The TSG (Tenant Service Group) ID to use when connecting to SCM. NB: TSG IDs are numeric, but must be treated as strings to avoid issues with YAML data types. With the Helm CLI use `--set-string`; with YAML always pass TSG IDs in double quotes.", "type": "string" }, "helm-values.extraArgs": { "default": [], "description": "extraArgs:\n- --logging-format=json\n- --log-level=6 # To enable HTTP request logging", "items": {}, "type": "array" }, "helm-values.fullnameOverride": { "default": "", "type": "string" }, "helm-values.global": { "description": "Global values shared across all (sub)charts" }, "helm-values.http_proxy": { "description": "Configures the HTTP_PROXY environment variable where a HTTP proxy is required.", "type": "string" }, "helm-values.https_proxy": { "description": "Configures the HTTPS_PROXY environment variable where a HTTP proxy is required.", "type": "string" }, "helm-values.image": { "additionalProperties": false, "properties": { "digest": { "$ref": "#/$defs/helm-values.image.digest" }, "name": { "$ref": "#/$defs/helm-values.image.name" }, "pullPolicy": { "$ref": "#/$defs/helm-values.image.pullPolicy" }, "repository": { "$ref": "#/$defs/helm-values.image.repository" }, "tag": { "$ref": "#/$defs/helm-values.image.tag" } }, "type": "object" }, "helm-values.image.digest": { "default": "", "description": "Override the image digest to deploy by setting this variable. If set together with `image.tag`, the rendered image will include both tag and digest.", "type": "string" }, "helm-values.image.name": { "default": "discovery-agent", "description": "The image name for the Discovery Agent.\nThis is used (together with `imageRegistry` and `imageNamespace`) to construct the full image reference.", "type": "string" }, "helm-values.image.pullPolicy": { "default": "IfNotPresent", "description": "This sets the pull policy for images.", "type": "string" }, "helm-values.image.repository": { "default": "", "description": "Full repository override (takes precedence over `imageRegistry`, `imageNamespace`, and `image.name`).\nExample: quay.io/jetstack/discovery-agent", "type": "string" }, "helm-values.image.tag": { "default": "", "description": "Override the image tag to deploy by setting this variable. If no value is set, the chart's appVersion is used.", "type": "string" }, "helm-values.imageNamespace": { "default": "jetstack", "description": "The repository namespace used for discovery-agent images by default.\nExamples:\n- jetstack\n- custom-namespace", "type": "string" }, "helm-values.imagePullSecrets": { "default": [], "description": "This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/", "items": {}, "type": "array" }, "helm-values.imageRegistry": { "default": "quay.io", "description": "The container registry used for discovery-agent images by default. This can include path prefixes (e.g. \"artifactory.example.com/docker\").", "type": "string" }, "helm-values.metrics": { "additionalProperties": false, "properties": { "enabled": { "$ref": "#/$defs/helm-values.metrics.enabled" }, "podmonitor": { "$ref": "#/$defs/helm-values.metrics.podmonitor" } }, "type": "object" }, "helm-values.metrics.enabled": { "default": true, "description": "Enable the metrics server.\nIf false, the metrics server will be disabled and the other metrics fields below will be ignored.", "type": "boolean" }, "helm-values.metrics.podmonitor": { "additionalProperties": false, "properties": { "annotations": { "$ref": "#/$defs/helm-values.metrics.podmonitor.annotations" }, "enabled": { "$ref": "#/$defs/helm-values.metrics.podmonitor.enabled" }, "endpointAdditionalProperties": { "$ref": "#/$defs/helm-values.metrics.podmonitor.endpointAdditionalProperties" }, "honorLabels": { "$ref": "#/$defs/helm-values.metrics.podmonitor.honorLabels" }, "interval": { "$ref": "#/$defs/helm-values.metrics.podmonitor.interval" }, "labels": { "$ref": "#/$defs/helm-values.metrics.podmonitor.labels" }, "namespace": { "$ref": "#/$defs/helm-values.metrics.podmonitor.namespace" }, "prometheusInstance": { "$ref": "#/$defs/helm-values.metrics.podmonitor.prometheusInstance" }, "scrapeTimeout": { "$ref": "#/$defs/helm-values.metrics.podmonitor.scrapeTimeout" } }, "type": "object" }, "helm-values.metrics.podmonitor.annotations": { "default": {}, "description": "Additional annotations to add to the PodMonitor.", "type": "object" }, "helm-values.metrics.podmonitor.enabled": { "default": false, "description": "Create a PodMonitor to add the metrics to Prometheus, if you are using Prometheus Operator. See https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor", "type": "boolean" }, "helm-values.metrics.podmonitor.endpointAdditionalProperties": { "default": {}, "description": "EndpointAdditionalProperties allows setting additional properties on the endpoint such as relabelings, metricRelabelings etc.\n\nFor example:\nendpointAdditionalProperties:\n relabelings:\n - action: replace\n sourceLabels:\n - __meta_kubernetes_pod_node_name\n targetLabel: instance", "type": "object" }, "helm-values.metrics.podmonitor.honorLabels": { "default": false, "description": "Keep labels from scraped data, overriding server-side labels.", "type": "boolean" }, "helm-values.metrics.podmonitor.interval": { "default": "60s", "description": "The interval to scrape metrics.", "type": "string" }, "helm-values.metrics.podmonitor.labels": { "default": {}, "description": "Additional labels to add to the PodMonitor.", "type": "object" }, "helm-values.metrics.podmonitor.namespace": { "description": "The namespace that the pod monitor should live in.\nDefaults to the discovery-agent namespace.", "type": "string" }, "helm-values.metrics.podmonitor.prometheusInstance": { "default": "default", "description": "Specifies the `prometheus` label on the created PodMonitor. This is used when different Prometheus instances have label selectors matching different PodMonitors.", "type": "string" }, "helm-values.metrics.podmonitor.scrapeTimeout": { "default": "30s", "description": "The timeout before a metrics scrape fails.", "type": "string" }, "helm-values.nameOverride": { "default": "", "description": "This is to override the chart name.", "type": "string" }, "helm-values.no_proxy": { "description": "Configures the NO_PROXY environment variable where a HTTP proxy is required, but certain domains should be excluded.", "type": "string" }, "helm-values.nodeSelector": { "default": {}, "type": "object" }, "helm-values.podAnnotations": { "default": {}, "description": "This is for setting Kubernetes Annotations to a Pod. For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/", "type": "object" }, "helm-values.podDisruptionBudget": { "default": { "enabled": false }, "description": "Configure a PodDisruptionBudget for the agent's Deployment. If running with multiple replicas, consider setting podDisruptionBudget.enabled to true.", "type": "object" }, "helm-values.podLabels": { "default": {}, "description": "This is for setting Kubernetes Labels to a Pod.\nFor more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", "type": "object" }, "helm-values.podSecurityContext": { "default": {}, "type": "object" }, "helm-values.pprof": { "additionalProperties": false, "properties": { "enabled": { "$ref": "#/$defs/helm-values.pprof.enabled" } }, "type": "object" }, "helm-values.pprof.enabled": { "default": false, "description": "Enable profiling with the pprof endpoint", "type": "boolean" }, "helm-values.replicaCount": { "default": 1, "description": "This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/", "type": "number" }, "helm-values.resources": { "default": {}, "type": "object" }, "helm-values.securityContext": { "default": { "allowPrivilegeEscalation": false, "capabilities": { "drop": [ "ALL" ] }, "readOnlyRootFilesystem": true, "runAsNonRoot": true, "seccompProfile": { "type": "RuntimeDefault" } }, "description": "Add Container specific SecurityContext settings to the container. Takes precedence over `podSecurityContext` when set. See https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container", "type": "object" }, "helm-values.serviceAccount": { "additionalProperties": false, "properties": { "annotations": { "$ref": "#/$defs/helm-values.serviceAccount.annotations" }, "automount": { "$ref": "#/$defs/helm-values.serviceAccount.automount" }, "create": { "$ref": "#/$defs/helm-values.serviceAccount.create" }, "name": { "$ref": "#/$defs/helm-values.serviceAccount.name" } }, "type": "object" }, "helm-values.serviceAccount.annotations": { "default": {}, "description": "Annotations to add to the service account", "type": "object" }, "helm-values.serviceAccount.automount": { "default": true, "description": "Automatically mount a ServiceAccount's API credentials?", "type": "boolean" }, "helm-values.serviceAccount.create": { "default": true, "description": "Specifies whether a service account should be created", "type": "boolean" }, "helm-values.serviceAccount.name": { "default": "", "description": "The name of the service account to use.\nIf not set and create is true, a name is generated using the fullname template", "type": "string" }, "helm-values.tolerations": { "default": [], "items": {}, "type": "array" }, "helm-values.volumeMounts": { "default": [], "description": "Additional volumeMounts on the output Deployment definition.", "items": {}, "type": "array" }, "helm-values.volumes": { "default": [], "description": "Additional volumes on the output Deployment definition.", "items": {}, "type": "array" } }, "$ref": "#/$defs/helm-values", "$schema": "http://json-schema.org/draft-07/schema#" } ================================================ FILE: deploy/charts/discovery-agent/values.yaml ================================================ # Configuration for the Discovery Agent config: # Required: The TSG (Tenant Service Group) ID to use when connecting to SCM. # NB: TSG IDs are numeric, but must be treated as strings to avoid issues with YAML data types. # With the Helm CLI use `--set-string`; with YAML always pass TSG IDs in double quotes. # +docs:property # +docs:type=string tsgID: "" # Required: A human readable name for the cluster into which the agent is being deployed. # # This cluster name will be associated with the data that the agent uploads to the backend. # +docs:property clusterName: "" # A short description of the cluster where the agent is deployed (optional). # # This description will be associated with the data that the agent uploads to the backend. # +docs:property clusterDescription: "" # Whether discovered certs can be claimed by other tenants (optional). # true = certs are left unassigned, available for any tenant to claim. # false (default) = certs are owned by this cluster's tenant. claimableCerts: false # How often to push data to the remote server # +docs:property period: "0h1m0s" # You can configure the agent to exclude some annotations or # labels from being pushed. All Kubernetes objects # are affected. The objects are still pushed, but the specified annotations # and labels are removed before being pushed. # # Dots is the only character that needs to be escaped in the regex. Use either # double quotes with escaped single quotes or unquoted strings for the regex # to avoid YAML parsing issues with `\.`. # # Example: excludeAnnotationKeysRegex: ['^kapp\.k14s\.io/original.*'] excludeAnnotationKeysRegex: [] excludeLabelKeysRegex: [] # Deprecated: Client ID for the configured service account. # The client ID should be provided in the "clientID" field of the authentication secret (see config.secretName). # This field is provided for compatibility for users migrating from the "venafi-kubernetes-agent" chart. # +docs:property clientID: "" # Deprecated: Client ID for the configured service account (alternative to clientID). # The client ID should be provided in the "clientID" field of the authentication secret (see config.secretName). # This field is provided for compatibility for users migrating from the "venafi-kubernetes-agent" chart. # If both clientID and clientId are set, clientID takes precedence. # +docs:hidden clientId: "" # The name of the Secret containing the NGTS built-in service account credentials. # The Secret must contain the following key: # - privatekey.pem: PEM-encoded private key for the service account # The Secret should also contain the following key: # - clientID: Service account client ID (config.clientID must be set if not present) # +docs:property secretName: discovery-agent-credentials # Explicit SCM server URL (optional). # If not set, a production SCM server URL will be created based on the TSG ID. # This value is intended for development purposes only and should not be set in production. # +docs:hidden serverURL: "" # This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/ replicaCount: 1 # The container registry used for discovery-agent images by default. # This can include path prefixes (e.g. "artifactory.example.com/docker"). # +docs:property imageRegistry: "quay.io" # The repository namespace used for discovery-agent images by default. # Examples: # - jetstack # - custom-namespace # +docs:property imageNamespace: "jetstack" # This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/ image: # Full repository override (takes precedence over `imageRegistry`, `imageNamespace`, # and `image.name`). # Example: quay.io/jetstack/discovery-agent # +docs:property repository: "" # The image name for the Discovery Agent. # This is used (together with `imageRegistry` and `imageNamespace`) to construct the full # image reference. # +docs:property name: discovery-agent # This sets the pull policy for images. pullPolicy: IfNotPresent # Override the image tag to deploy by setting this variable. # If no value is set, the chart's appVersion is used. tag: "" # Override the image digest to deploy by setting this variable. # If set together with `image.tag`, the rendered image will include both tag and digest. digest: "" # This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ imagePullSecrets: [] # This is to override the chart name. nameOverride: "" fullnameOverride: "" # This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/ serviceAccount: # Specifies whether a service account should be created create: true # Automatically mount a ServiceAccount's API credentials? automount: true # Annotations to add to the service account annotations: {} # The name of the service account to use. # If not set and create is true, a name is generated using the fullname template name: "" # This is for setting Kubernetes Annotations to a Pod. # For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ podAnnotations: {} # This is for setting Kubernetes Labels to a Pod. # For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ podLabels: {} podSecurityContext: {} # fsGroup: 2000 # Add Container specific SecurityContext settings to the container. Takes # precedence over `podSecurityContext` when set. See # https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container # +docs:property securityContext: capabilities: drop: - ALL readOnlyRootFilesystem: true runAsNonRoot: true allowPrivilegeEscalation: false seccompProfile: { type: RuntimeDefault } resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi # Additional volumes on the output Deployment definition. volumes: [] # - name: foo # secret: # secretName: mysecret # optional: false # Additional volumeMounts on the output Deployment definition. volumeMounts: [] # - name: foo # mountPath: "/etc/foo" # readOnly: true nodeSelector: {} tolerations: [] affinity: {} # Configures the HTTP_PROXY environment variable where a HTTP proxy is required. # +docs:property # http_proxy: "http://proxy:8080" # Configures the HTTPS_PROXY environment variable where a HTTP proxy is required. # +docs:property # https_proxy: "https://proxy:8080" # Configures the NO_PROXY environment variable where a HTTP proxy is required, # but certain domains should be excluded. # +docs:property # no_proxy: 127.0.0.1,localhost # Configure a PodDisruptionBudget for the agent's Deployment. If running with multiple # replicas, consider setting podDisruptionBudget.enabled to true. # +docs:property podDisruptionBudget: # Enable or disable the PodDisruptionBudget resource, which helps prevent downtime # during voluntary disruptions such as during a Node upgrade. enabled: false # Configure the minimum available pods for disruptions. Can either be set to # an integer (e.g. 1) or a percentage value (e.g. 25%). # Cannot be used if `maxUnavailable` is set. # +docs:property # minAvailable: 1 # Configure the maximum unavailable pods for disruptions. Can either be set to # an integer (e.g. 1) or a percentage value (e.g. 25%). # Cannot be used if `minAvailable` is set. # +docs:property # maxUnavailable: 1 # extraArgs: # - --logging-format=json # - --log-level=6 # To enable HTTP request logging extraArgs: [] pprof: # Enable profiling with the pprof endpoint enabled: false metrics: # Enable the metrics server. # If false, the metrics server will be disabled and the other metrics fields below will be ignored. enabled: true podmonitor: # Create a PodMonitor to add the metrics to Prometheus, if you are using Prometheus Operator. # See https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor enabled: false # The namespace that the pod monitor should live in. # Defaults to the discovery-agent namespace. # +docs:property # namespace: ngts # Specifies the `prometheus` label on the created PodMonitor. # This is used when different Prometheus instances have label selectors # matching different PodMonitors. prometheusInstance: default # The interval to scrape metrics. interval: 60s # The timeout before a metrics scrape fails. scrapeTimeout: 30s # Additional labels to add to the PodMonitor. labels: {} # Additional annotations to add to the PodMonitor. annotations: {} # Keep labels from scraped data, overriding server-side labels. honorLabels: false # EndpointAdditionalProperties allows setting additional properties on the endpoint such as relabelings, metricRelabelings etc. # # For example: # endpointAdditionalProperties: # relabelings: # - action: replace # sourceLabels: # - __meta_kubernetes_pod_node_name # targetLabel: instance endpointAdditionalProperties: {} ================================================ FILE: deploy/charts/venafi-kubernetes-agent/.helmignore ================================================ # Patterns to ignore when building packages. # This supports shell glob matching, relative path matching, and # negation (prefixed with !). Only one pattern per line. .DS_Store # Common VCS dirs .git/ .gitignore .bzr/ .bzrignore .hg/ .hgignore .svn/ # Common backup files *.swp *.bak *.tmp *.orig *~ # Various IDEs .project .idea/ *.tmproj .vscode/ ================================================ FILE: deploy/charts/venafi-kubernetes-agent/Chart.yaml ================================================ apiVersion: v2 name: venafi-kubernetes-agent type: application description: |- The Discovery Agent connects your Kubernetes or OpenShift cluster to the CyberArk Certificate Manager. maintainers: - name: CyberArk email: mis.support@cyberark.com url: https://www.cyberark.com sources: - https://github.com/jetstack/jetstack-secure # These versions are meant to be overridden by `make helm-chart`. No `v` prefix # for the `version` because Helm doesn't support auto-determining the latest # version for OCI Helm charts that use a `v` prefix. version: 0.0.0 appVersion: "v0.0.0" ================================================ FILE: deploy/charts/venafi-kubernetes-agent/README.md ================================================ # venafi-kubernetes-agent The Discovery Agent connects your Kubernetes or OpenShift cluster to the CyberArk Certificate Manager (formerly Venafi Control Plane). You will require a CyberArk Certificate Manager account to connect your cluster. If you do not have one, you can sign up for a free trial now at: - https://www.cyberark.com/try-buy/certificate-manager-saas-trial/ > 📖 Read the [Discovery Agent documentation](https://docs.cyberark.com/mis-saas/vaas/k8s-components/c-tlspk-agent-overview/), > to learn how install and configure this Helm chart. ## Values #### **metrics.enabled** ~ `bool` > Default value: > ```yaml > true > ``` Enable the metrics server. If false, the metrics server will be disabled and the other metrics fields below will be ignored. #### **metrics.podmonitor.enabled** ~ `bool` > Default value: > ```yaml > false > ``` Create a PodMonitor to add the metrics to Prometheus, if you are using Prometheus Operator. See https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor #### **metrics.podmonitor.namespace** ~ `string` The namespace that the pod monitor should live in. Defaults to the venafi-kubernetes-agent namespace. #### **metrics.podmonitor.prometheusInstance** ~ `string` > Default value: > ```yaml > default > ``` Specifies the `prometheus` label on the created PodMonitor. This is used when different Prometheus instances have label selectors matching different PodMonitors. #### **metrics.podmonitor.interval** ~ `string` > Default value: > ```yaml > 60s > ``` The interval to scrape metrics. #### **metrics.podmonitor.scrapeTimeout** ~ `string` > Default value: > ```yaml > 30s > ``` The timeout before a metrics scrape fails. #### **metrics.podmonitor.labels** ~ `object` > Default value: > ```yaml > {} > ``` Additional labels to add to the PodMonitor. #### **metrics.podmonitor.annotations** ~ `object` > Default value: > ```yaml > {} > ``` Additional annotations to add to the PodMonitor. #### **metrics.podmonitor.honorLabels** ~ `bool` > Default value: > ```yaml > false > ``` Keep labels from scraped data, overriding server-side labels. #### **metrics.podmonitor.endpointAdditionalProperties** ~ `object` > Default value: > ```yaml > {} > ``` EndpointAdditionalProperties allows setting additional properties on the endpoint such as relabelings, metricRelabelings etc. For example: ```yaml endpointAdditionalProperties: relabelings: - action: replace sourceLabels: - __meta_kubernetes_pod_node_name targetLabel: instance ``` #### **replicaCount** ~ `number` > Default value: > ```yaml > 1 > ``` default replicas, do not scale up #### **imageRegistry** ~ `string` > Default value: > ```yaml > registry.venafi.cloud > ``` The container registry used for venafi-kubernetes-agent images by default. This can include path prefixes (e.g. "artifactory.example.com/docker"). #### **imageNamespace** ~ `string` > Default value: > ```yaml > venafi-agent > ``` The repository namespace used for venafi-kubernetes-agent images by default. Examples: - venafi-agent - custom-namespace #### **image.registry** ~ `string` Deprecated: per-component registry prefix. If set, this value is *prepended* to the image repository that the chart would otherwise render. This applies both when `image.repository` is set and when the repository is computed from `imageRegistry` + `imageNamespace` + `image.name`. This can produce "double registry" style references such as `legacy.example.io/registry.venafi.cloud/venafi-agent/...`. Prefer using the global `imageRegistry`/`imageNamespace` values. #### **image.repository** ~ `string` > Default value: > ```yaml > "" > ``` Full repository override (takes precedence over `imageRegistry`, `imageNamespace`, and `image.name`). Example: registry.venafi.cloud/venafi-agent/venafi-agent #### **image.name** ~ `string` > Default value: > ```yaml > venafi-agent > ``` The image name for the Discovery Agent. This is used (together with `imageRegistry` and `imageNamespace`) to construct the full image reference. #### **image.pullPolicy** ~ `string` > Default value: > ```yaml > IfNotPresent > ``` Kubernetes imagePullPolicy on Deployment. #### **image.tag** ~ `string` > Default value: > ```yaml > "" > ``` Override the image tag to deploy by setting this variable. If no value is set, the chart's appVersion is used. #### **image.digest** ~ `string` > Default value: > ```yaml > "" > ``` Override the image digest to deploy by setting this variable. If set together with `image.tag`, the rendered image will include both tag and digest. #### **imagePullSecrets** ~ `array` > Default value: > ```yaml > [] > ``` Specify image pull credentials if using a private registry. Example: - name: my-pull-secret #### **nameOverride** ~ `string` > Default value: > ```yaml > "" > ``` Helm default setting to override release name, usually leave blank. #### **fullnameOverride** ~ `string` > Default value: > ```yaml > "" > ``` Helm default setting, use this to shorten the full install name. #### **serviceAccount.create** ~ `bool` > Default value: > ```yaml > true > ``` Specifies whether a service account should be created. #### **serviceAccount.annotations** ~ `object` > Default value: > ```yaml > {} > ``` Annotations YAML to add to the service account. #### **serviceAccount.name** ~ `string` > Default value: > ```yaml > "" > ``` The name of the service account to use. If blank and `serviceAccount.create` is true, a name is generated using the fullname template of the release. #### **podAnnotations** ~ `object` > Default value: > ```yaml > {} > ``` Additional YAML annotations to add the the pod. #### **podSecurityContext** ~ `object` > Default value: > ```yaml > {} > ``` Optional Pod (all containers) `SecurityContext` options, see https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod. Example: podSecurityContext ```yaml runAsUser: 1000 runAsGroup: 3000 fsGroup: 2000 ``` #### **http_proxy** ~ `string` Configures the HTTP_PROXY environment variable where a HTTP proxy is required. #### **https_proxy** ~ `string` Configures the HTTPS_PROXY environment variable where a HTTP proxy is required. #### **no_proxy** ~ `string` Configures the NO_PROXY environment variable where a HTTP proxy is required, but certain domains should be excluded. #### **securityContext** ~ `object` > Default value: > ```yaml > allowPrivilegeEscalation: false > capabilities: > drop: > - ALL > readOnlyRootFilesystem: true > runAsNonRoot: true > seccompProfile: > type: RuntimeDefault > ``` Add Container specific SecurityContext settings to the container. Takes precedence over `podSecurityContext` when set. See https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container #### **resources** ~ `object` > Default value: > ```yaml > limits: > memory: 500Mi > requests: > cpu: 200m > memory: 200Mi > ``` Set resource requests and limits for the pod. Read [Venafi Kubernetes components deployment best practices](https://docs.cyberark.com/mis-saas/vaas/k8s-components/c-k8s-components-best-practice/#scaling) to learn how to choose suitable CPU and memory resource requests and limits. #### **nodeSelector** ~ `object` > Default value: > ```yaml > {} > ``` Embed YAML for nodeSelector settings, see https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/ #### **tolerations** ~ `array` > Default value: > ```yaml > [] > ``` Embed YAML for toleration settings, see https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ #### **affinity** ~ `object` > Default value: > ```yaml > {} > ``` Embed YAML for Node affinity settings, see https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/. #### **command** ~ `array` > Default value: > ```yaml > [] > ``` Specify the command to run overriding default binary. #### **extraArgs** ~ `array` > Default value: > ```yaml > [] > ``` Specify additional arguments to pass to the agent binary. For example, to enable JSON logging use `--logging-format`, or to increase the logging verbosity use `--log-level`. The log levels are: 0=Info, 1=Debug, 2=Trace. Use 6-9 for increasingly verbose HTTP request logging. The default log level is 0. Example: ```yaml extraArgs: - --logging-format=json - --log-level=6 # To enable HTTP request logging ``` #### **volumes** ~ `array` > Default value: > ```yaml > [] > ``` Additional volumes to add to the Discovery Agent container. This is useful for mounting a custom CA bundle. For example: ```yaml volumes: - name: cabundle configMap: name: cabundle optional: false defaultMode: 0644 ``` In order to create the ConfigMap, you can use the following command: kubectl create configmap cabundle \ --from-file=cabundle=./your/custom/ca/bundle.pem #### **volumeMounts** ~ `array` > Default value: > ```yaml > [] > ``` Additional volume mounts to add to the Discovery Agent container. This is useful for mounting a custom CA bundle. Any PEM certificate mounted under /etc/ssl/certs will be loaded by the Discovery Agent. For ```yaml example: ``` ```yaml volumeMounts: - name: cabundle mountPath: /etc/ssl/certs/cabundle subPath: cabundle readOnly: true ``` #### **authentication.secretName** ~ `string` > Default value: > ```yaml > agent-credentials > ``` Name of the secret containing the private key #### **authentication.secretKey** ~ `string` > Default value: > ```yaml > privatekey.pem > ``` Key name in the referenced secret ### Venafi Connection Configure VenafiConnection authentication #### **authentication.venafiConnection.enabled** ~ `bool` > Default value: > ```yaml > false > ``` When set to true, the Discovery Agent will authenticate to CyberArk Certificate Manager using the configuration in a VenafiConnection resource. Use `venafiConnection.enabled=true` for [secretless authentication](https://docs.cyberark.com/mis-saas/vaas/k8s-components/t-install-tlspk-agent/). When set to true, the `authentication.secret` values will be ignored and the. Secret with `authentication.secretName` will _not_ be mounted into the Discovery Agent Pod. #### **authentication.venafiConnection.name** ~ `string` > Default value: > ```yaml > venafi-components > ``` The name of a VenafiConnection resource which contains the configuration for authenticating to Venafi. #### **authentication.venafiConnection.namespace** ~ `string` > Default value: > ```yaml > venafi > ``` The namespace of a VenafiConnection resource which contains the configuration for authenticating to Venafi. #### **config.server** ~ `string` > Default value: > ```yaml > https://api.venafi.cloud/ > ``` API URL of the CyberArk Certificate Manager API. For EU tenants, set this value to https://api.venafi.eu/. If you are using the VenafiConnection authentication method, you must set the API URL using the field `spec.vcp.url` on the VenafiConnection resource instead. #### **config.clientId** ~ `string` > Default value: > ```yaml > "" > ``` The client-id to be used for authenticating with the Venafi Control. Plane. Only useful when using a Key Pair Service Account in the Venafi. Control Plane. You can obtain the cliend ID by creating a Key Pair Service Account in the CyberArk Certificate Manager. #### **config.period** ~ `string` > Default value: > ```yaml > 0h1m0s > ``` Send data back to the platform every minute unless changed. #### **config.clusterName** ~ `string` > Default value: > ```yaml > "" > ``` Name for the cluster resource if it needs to be created in Venafi Control Plane. #### **config.clusterDescription** ~ `string` > Default value: > ```yaml > "" > ``` Description for the cluster resource if it needs to be created in Venafi Control Plane. #### **config.ignoredSecretTypes[0]** ~ `string` > Default value: > ```yaml > kubernetes.io/service-account-token > ``` #### **config.ignoredSecretTypes[1]** ~ `string` > Default value: > ```yaml > kubernetes.io/dockercfg > ``` #### **config.ignoredSecretTypes[2]** ~ `string` > Default value: > ```yaml > kubernetes.io/dockerconfigjson > ``` #### **config.ignoredSecretTypes[3]** ~ `string` > Default value: > ```yaml > kubernetes.io/basic-auth > ``` #### **config.ignoredSecretTypes[4]** ~ `string` > Default value: > ```yaml > kubernetes.io/ssh-auth > ``` #### **config.ignoredSecretTypes[5]** ~ `string` > Default value: > ```yaml > bootstrap.kubernetes.io/token > ``` #### **config.ignoredSecretTypes[6]** ~ `string` > Default value: > ```yaml > helm.sh/release.v1 > ``` #### **config.excludeAnnotationKeysRegex** ~ `array` > Default value: > ```yaml > [] > ``` You can configure Discovery Agent to exclude some annotations or labels from being pushed to the CyberArk Certificate Manager. All Kubernetes objects are affected. The objects are still pushed, but the specified annotations and labels are removed before being sent to the CyberArk Certificate Manager. Dots is the only character that needs to be escaped in the regex. Use either double quotes with escaped single quotes or unquoted strings for the regex to avoid YAML parsing issues with `\.`. Example: excludeAnnotationKeysRegex: ['^kapp\.k14s\.io/original.*'] #### **config.excludeLabelKeysRegex** ~ `array` > Default value: > ```yaml > [] > ``` #### **config.configmap.name** ~ `unknown` > Default value: > ```yaml > null > ``` #### **config.configmap.key** ~ `unknown` > Default value: > ```yaml > null > ``` #### **podDisruptionBudget.enabled** ~ `bool` > Default value: > ```yaml > false > ``` Enable or disable the PodDisruptionBudget resource, which helps prevent downtime during voluntary disruptions such as during a Node upgrade. #### **podDisruptionBudget.minAvailable** ~ `number` Configure the minimum available pods for disruptions. Can either be set to an integer (e.g. 1) or a percentage value (e.g. 25%). Cannot be used if `maxUnavailable` is set. #### **podDisruptionBudget.maxUnavailable** ~ `number` Configure the maximum unavailable pods for disruptions. Can either be set to an integer (e.g. 1) or a percentage value (e.g. 25%). Cannot be used if `minAvailable` is set. ### CRDs The CRDs installed by this chart are annotated with "helm.sh/resource-policy: keep", this prevents them from being accidentally removed by Helm when this chart is deleted. After deleting the installed chart, the user still has to manually remove the remaining CRDs. #### **crds.forceRemoveValidationAnnotations** ~ `bool` > Default value: > ```yaml > false > ``` The 'x-kubernetes-validations' annotation is not supported in Kubernetes 1.22 and below. This annotation is used by CEL, which is a feature introduced in Kubernetes 1.25 that improves how validation is performed. This option allows to force the 'x-kubernetes-validations' annotation to be excluded, even on Kubernetes 1.25+ clusters. #### **crds.keep** ~ `bool` > Default value: > ```yaml > false > ``` This option makes it so that the "helm.sh/resource-policy": keep annotation is added to the CRD. This will prevent Helm from uninstalling the CRD when the Helm release is uninstalled. #### **crds.venafiConnection.include** ~ `bool` > Default value: > ```yaml > false > ``` When set to false, the rendered output does not contain the. VenafiConnection CRDs and RBAC. This is useful for when the. Venafi Connection resources are already installed separately. ================================================ FILE: deploy/charts/venafi-kubernetes-agent/crd_bases/crd.footer.yaml ================================================ {{ end }} {{ end }} ================================================ FILE: deploy/charts/venafi-kubernetes-agent/crd_bases/crd.header-without-validations.yaml ================================================ {{/* DO NOT EDIT. Use 'make generate-crds-venconn' to regenerate. */}} {{- if .Values.crds.venafiConnection.include }} {{- if (or (semverCompare "<1.25" .Capabilities.KubeVersion.GitVersion) .Values.crds.forceRemoveValidationAnnotations) }} apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: "venaficonnections.jetstack.io" {{- if .Values.crds.keep }} annotations: # This annotation prevents the CRD from being pruned by Helm when this chart # is deleted. helm.sh/resource-policy: keep {{- end }} labels: {{- include "venafi-connection.labels" . | nindent 4 }} ================================================ FILE: deploy/charts/venafi-kubernetes-agent/crd_bases/crd.header.yaml ================================================ {{/* DO NOT EDIT. Use 'make generate-crds-venconn' to regenerate. */}} {{- if .Values.crds.venafiConnection.include }} {{- if not (or (semverCompare "<1.25" .Capabilities.KubeVersion.GitVersion) .Values.crds.forceRemoveValidationAnnotations) }} apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: "venaficonnections.jetstack.io" {{- if .Values.crds.keep }} annotations: # This annotation prevents the CRD from being pruned by Helm when this chart # is deleted. helm.sh/resource-policy: keep {{- end }} labels: {{- include "venafi-connection.labels" . | nindent 4 }} ================================================ FILE: deploy/charts/venafi-kubernetes-agent/crd_bases/jetstack.io_venaficonnections.yaml ================================================ # DO NOT EDIT: Use 'make generate-crds-venconn' to regenerate. --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.19.0 name: venaficonnections.jetstack.io spec: group: jetstack.io names: kind: VenafiConnection listKind: VenafiConnectionList plural: venaficonnections shortNames: - vc singular: venaficonnection scope: Namespaced versions: - name: v1alpha1 schema: openAPIV3Schema: description: VenafiConnection is the Schema for the VenafiConnection API properties: apiVersion: description: |- APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: description: |- Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: properties: allowReferencesFrom: description: |- A namespace selector that specifies what namespaces this VenafiConnection is allowed to be used from. If not set/ null, the VenafiConnection can only be used within its namespace. An empty selector ({}) matches all namespaces. If set to a non-empty selector, the VenafiConnection can only be used from namespaces that match the selector. This possibly excludes the namespace the VenafiConnection is in. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: description: |- A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: description: |- operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: description: |- values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array x-kubernetes-list-type: atomic required: - key - operator type: object type: array x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string description: |- matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic firefly: properties: accessToken: description: |- The list of steps to retrieve the Access Token that will be used to connect to Firefly. items: properties: hashicorpVaultLDAP: description: |- HashicorpVaultLDAP is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: ldapPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/ldap/static-cred/:role_name or /v1/ldap/creds/:role_name type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - ldapPath type: object hashicorpVaultOAuth: description: |- HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource step to provide an OAuth token, which this step uses to authenticate to Vault. The output of this step is a Vault token. This step allows you to use the step `HashicorpVaultSecret` afterwards. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with HashiCorp Vault. The only supported value is "OIDC". enum: - OIDC type: string authPath: description: |- The login URL used for obtaining the Vault token. Example: /v1/auth/oidc/login type: string clientId: description: 'Deprecated: This field does nothing and will be removed in the future.' type: string role: description: |- The role defined in Vault that we want to use when authenticating to Vault. type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - authInputType - authPath - role type: object hashicorpVaultSecret: description: |- HashicorpVaultSecret is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: fields: description: |- The fields are Vault keys pointing to the secrets passed to the next SecretSource step. Example 1 (TPP, username and password): imagining that you have stored the username and password for TPP under the keys "username" and "password", you will want to set this field to `["username", "password"]`. The username is expected to be given first, the password second. items: type: string type: array secretPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/secret/data/application-team-a/tpp-username-password type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - fields - secretPath type: object secret: description: |- Secret is a SecretSource step meant to be the first step. It retrieves secret values from a Kubernetes Secret, and passes them to the next step. properties: fields: description: |- The names of the fields we want to extract from the Kubernetes secret. These fields are passed to the next step in the chain. items: type: string type: array name: description: The name of the Kubernetes secret. type: string required: - fields - name type: object serviceAccountToken: description: |- ServiceAccountToken is a SecretSource step meant to be the first step. It uses the Kubernetes TokenRequest API to retrieve a token for a given service account, and passes it to the next step. properties: audiences: description: |- Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences. items: type: string type: array expirationSeconds: description: |- ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response. format: int64 type: integer name: description: The name of the Kubernetes service account. type: string required: - audiences - name type: object tppOAuth: description: |- TPPOAuth is a SecretSource step that authenticates to a TPP server. This step is meant to be the last step and requires a prior step that depends on the `authInputType`. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with TPP. The supported values are "UsernamePassword" and "JWT". enum: - UsernamePassword - JWT type: string clientId: description: ClientID is the clientId used to authenticate with TPP. type: string url: description: |- The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs https://tpp.example.com and https://tpp.example.com/vedsdk are equivalent. The ending `/vedsdk` is optional and is stripped out by our client. If not set, defaults to the URL defined at the top-level of the TPP configuration. type: string required: - authInputType type: object vcpOAuth: description: |- VCPOAuth is a SecretSource step that authenticates to the Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step that outputs a JWT token. properties: tenantID: description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS. type: string type: object type: object x-kubernetes-validations: - message: must have exactly one field set rule: '((has(self.secret) ? 1 : 0) + (has(self.serviceAccountToken) ? 1 : 0) + (has(self.hashicorpVaultOAuth) ? 1 : 0) + (has(self.hashicorpVaultSecret) ? 1 : 0) + (has(self.hashicorpVaultLDAP) ? 1 : 0) + (has(self.tppOAuth) ? 1 : 0) + (has(self.vcpOAuth) ? 1 : 0)) == 1' maxItems: 50 type: array x-kubernetes-list-type: atomic url: description: The URL to connect to the Workload Identity Manager instance. type: string required: - url type: object tpp: properties: accessToken: description: The list of steps to retrieve a TPP access token. items: properties: hashicorpVaultLDAP: description: |- HashicorpVaultLDAP is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: ldapPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/ldap/static-cred/:role_name or /v1/ldap/creds/:role_name type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - ldapPath type: object hashicorpVaultOAuth: description: |- HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource step to provide an OAuth token, which this step uses to authenticate to Vault. The output of this step is a Vault token. This step allows you to use the step `HashicorpVaultSecret` afterwards. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with HashiCorp Vault. The only supported value is "OIDC". enum: - OIDC type: string authPath: description: |- The login URL used for obtaining the Vault token. Example: /v1/auth/oidc/login type: string clientId: description: 'Deprecated: This field does nothing and will be removed in the future.' type: string role: description: |- The role defined in Vault that we want to use when authenticating to Vault. type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - authInputType - authPath - role type: object hashicorpVaultSecret: description: |- HashicorpVaultSecret is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: fields: description: |- The fields are Vault keys pointing to the secrets passed to the next SecretSource step. Example 1 (TPP, username and password): imagining that you have stored the username and password for TPP under the keys "username" and "password", you will want to set this field to `["username", "password"]`. The username is expected to be given first, the password second. items: type: string type: array secretPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/secret/data/application-team-a/tpp-username-password type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - fields - secretPath type: object secret: description: |- Secret is a SecretSource step meant to be the first step. It retrieves secret values from a Kubernetes Secret, and passes them to the next step. properties: fields: description: |- The names of the fields we want to extract from the Kubernetes secret. These fields are passed to the next step in the chain. items: type: string type: array name: description: The name of the Kubernetes secret. type: string required: - fields - name type: object serviceAccountToken: description: |- ServiceAccountToken is a SecretSource step meant to be the first step. It uses the Kubernetes TokenRequest API to retrieve a token for a given service account, and passes it to the next step. properties: audiences: description: |- Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences. items: type: string type: array expirationSeconds: description: |- ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response. format: int64 type: integer name: description: The name of the Kubernetes service account. type: string required: - audiences - name type: object tppOAuth: description: |- TPPOAuth is a SecretSource step that authenticates to a TPP server. This step is meant to be the last step and requires a prior step that depends on the `authInputType`. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with TPP. The supported values are "UsernamePassword" and "JWT". enum: - UsernamePassword - JWT type: string clientId: description: ClientID is the clientId used to authenticate with TPP. type: string url: description: |- The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs https://tpp.example.com and https://tpp.example.com/vedsdk are equivalent. The ending `/vedsdk` is optional and is stripped out by our client. If not set, defaults to the URL defined at the top-level of the TPP configuration. type: string required: - authInputType type: object vcpOAuth: description: |- VCPOAuth is a SecretSource step that authenticates to the Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step that outputs a JWT token. properties: tenantID: description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS. type: string type: object type: object x-kubernetes-validations: - message: must have exactly one field set rule: '((has(self.secret) ? 1 : 0) + (has(self.serviceAccountToken) ? 1 : 0) + (has(self.hashicorpVaultOAuth) ? 1 : 0) + (has(self.hashicorpVaultSecret) ? 1 : 0) + (has(self.hashicorpVaultLDAP) ? 1 : 0) + (has(self.tppOAuth) ? 1 : 0) + (has(self.vcpOAuth) ? 1 : 0)) == 1' maxItems: 50 type: array x-kubernetes-list-type: atomic url: description: |- The URL to connect to the Certificate Manager Self-Hosted instance. The two URLs https://tpp.example.com and https://tpp.example.com/vedsdk are equivalent. The ending `/vedsdk` is optional and is stripped out by venafi-connection-lib. type: string required: - url type: object vaas: description: 'Deprecated: The ''vaas'' field is deprecated use the field called ''vcp'' instead.' properties: accessToken: description: |- The list of steps to retrieve the Access Token that will be used to connect to Certificate Manager, SaaS. items: properties: hashicorpVaultLDAP: description: |- HashicorpVaultLDAP is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: ldapPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/ldap/static-cred/:role_name or /v1/ldap/creds/:role_name type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - ldapPath type: object hashicorpVaultOAuth: description: |- HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource step to provide an OAuth token, which this step uses to authenticate to Vault. The output of this step is a Vault token. This step allows you to use the step `HashicorpVaultSecret` afterwards. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with HashiCorp Vault. The only supported value is "OIDC". enum: - OIDC type: string authPath: description: |- The login URL used for obtaining the Vault token. Example: /v1/auth/oidc/login type: string clientId: description: 'Deprecated: This field does nothing and will be removed in the future.' type: string role: description: |- The role defined in Vault that we want to use when authenticating to Vault. type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - authInputType - authPath - role type: object hashicorpVaultSecret: description: |- HashicorpVaultSecret is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: fields: description: |- The fields are Vault keys pointing to the secrets passed to the next SecretSource step. Example 1 (TPP, username and password): imagining that you have stored the username and password for TPP under the keys "username" and "password", you will want to set this field to `["username", "password"]`. The username is expected to be given first, the password second. items: type: string type: array secretPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/secret/data/application-team-a/tpp-username-password type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - fields - secretPath type: object secret: description: |- Secret is a SecretSource step meant to be the first step. It retrieves secret values from a Kubernetes Secret, and passes them to the next step. properties: fields: description: |- The names of the fields we want to extract from the Kubernetes secret. These fields are passed to the next step in the chain. items: type: string type: array name: description: The name of the Kubernetes secret. type: string required: - fields - name type: object serviceAccountToken: description: |- ServiceAccountToken is a SecretSource step meant to be the first step. It uses the Kubernetes TokenRequest API to retrieve a token for a given service account, and passes it to the next step. properties: audiences: description: |- Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences. items: type: string type: array expirationSeconds: description: |- ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response. format: int64 type: integer name: description: The name of the Kubernetes service account. type: string required: - audiences - name type: object tppOAuth: description: |- TPPOAuth is a SecretSource step that authenticates to a TPP server. This step is meant to be the last step and requires a prior step that depends on the `authInputType`. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with TPP. The supported values are "UsernamePassword" and "JWT". enum: - UsernamePassword - JWT type: string clientId: description: ClientID is the clientId used to authenticate with TPP. type: string url: description: |- The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs https://tpp.example.com and https://tpp.example.com/vedsdk are equivalent. The ending `/vedsdk` is optional and is stripped out by our client. If not set, defaults to the URL defined at the top-level of the TPP configuration. type: string required: - authInputType type: object vcpOAuth: description: |- VCPOAuth is a SecretSource step that authenticates to the Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step that outputs a JWT token. properties: tenantID: description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS. type: string type: object type: object x-kubernetes-validations: - message: must have exactly one field set rule: '((has(self.secret) ? 1 : 0) + (has(self.serviceAccountToken) ? 1 : 0) + (has(self.hashicorpVaultOAuth) ? 1 : 0) + (has(self.hashicorpVaultSecret) ? 1 : 0) + (has(self.hashicorpVaultLDAP) ? 1 : 0) + (has(self.tppOAuth) ? 1 : 0) + (has(self.vcpOAuth) ? 1 : 0)) == 1' maxItems: 50 type: array x-kubernetes-list-type: atomic apiKey: description: |- The list of steps to retrieve the API key that will be used to connect to Certificate Manager, SaaS. items: properties: hashicorpVaultLDAP: description: |- HashicorpVaultLDAP is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: ldapPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/ldap/static-cred/:role_name or /v1/ldap/creds/:role_name type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - ldapPath type: object hashicorpVaultOAuth: description: |- HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource step to provide an OAuth token, which this step uses to authenticate to Vault. The output of this step is a Vault token. This step allows you to use the step `HashicorpVaultSecret` afterwards. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with HashiCorp Vault. The only supported value is "OIDC". enum: - OIDC type: string authPath: description: |- The login URL used for obtaining the Vault token. Example: /v1/auth/oidc/login type: string clientId: description: 'Deprecated: This field does nothing and will be removed in the future.' type: string role: description: |- The role defined in Vault that we want to use when authenticating to Vault. type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - authInputType - authPath - role type: object hashicorpVaultSecret: description: |- HashicorpVaultSecret is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: fields: description: |- The fields are Vault keys pointing to the secrets passed to the next SecretSource step. Example 1 (TPP, username and password): imagining that you have stored the username and password for TPP under the keys "username" and "password", you will want to set this field to `["username", "password"]`. The username is expected to be given first, the password second. items: type: string type: array secretPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/secret/data/application-team-a/tpp-username-password type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - fields - secretPath type: object secret: description: |- Secret is a SecretSource step meant to be the first step. It retrieves secret values from a Kubernetes Secret, and passes them to the next step. properties: fields: description: |- The names of the fields we want to extract from the Kubernetes secret. These fields are passed to the next step in the chain. items: type: string type: array name: description: The name of the Kubernetes secret. type: string required: - fields - name type: object serviceAccountToken: description: |- ServiceAccountToken is a SecretSource step meant to be the first step. It uses the Kubernetes TokenRequest API to retrieve a token for a given service account, and passes it to the next step. properties: audiences: description: |- Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences. items: type: string type: array expirationSeconds: description: |- ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response. format: int64 type: integer name: description: The name of the Kubernetes service account. type: string required: - audiences - name type: object tppOAuth: description: |- TPPOAuth is a SecretSource step that authenticates to a TPP server. This step is meant to be the last step and requires a prior step that depends on the `authInputType`. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with TPP. The supported values are "UsernamePassword" and "JWT". enum: - UsernamePassword - JWT type: string clientId: description: ClientID is the clientId used to authenticate with TPP. type: string url: description: |- The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs https://tpp.example.com and https://tpp.example.com/vedsdk are equivalent. The ending `/vedsdk` is optional and is stripped out by our client. If not set, defaults to the URL defined at the top-level of the TPP configuration. type: string required: - authInputType type: object vcpOAuth: description: |- VCPOAuth is a SecretSource step that authenticates to the Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step that outputs a JWT token. properties: tenantID: description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS. type: string type: object type: object x-kubernetes-validations: - message: must have exactly one field set rule: '((has(self.secret) ? 1 : 0) + (has(self.serviceAccountToken) ? 1 : 0) + (has(self.hashicorpVaultOAuth) ? 1 : 0) + (has(self.hashicorpVaultSecret) ? 1 : 0) + (has(self.hashicorpVaultLDAP) ? 1 : 0) + (has(self.tppOAuth) ? 1 : 0) + (has(self.vcpOAuth) ? 1 : 0)) == 1' maxItems: 50 type: array x-kubernetes-list-type: atomic url: description: |- The URL to connect to the Certificate Manager, SaaS instance. If not set, the default value https://api.venafi.cloud is used. type: string type: object x-kubernetes-validations: - message: 'must have exactly ONE of the following fields set: apiKey or accessToken' rule: '(has(self.apiKey) ? 1 : 0) + (has(self.accessToken) ? 1 : 0) == 1' vcp: properties: accessToken: description: |- The list of steps to retrieve the Access Token that will be used to connect to Certificate Manager, SaaS. items: properties: hashicorpVaultLDAP: description: |- HashicorpVaultLDAP is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: ldapPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/ldap/static-cred/:role_name or /v1/ldap/creds/:role_name type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - ldapPath type: object hashicorpVaultOAuth: description: |- HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource step to provide an OAuth token, which this step uses to authenticate to Vault. The output of this step is a Vault token. This step allows you to use the step `HashicorpVaultSecret` afterwards. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with HashiCorp Vault. The only supported value is "OIDC". enum: - OIDC type: string authPath: description: |- The login URL used for obtaining the Vault token. Example: /v1/auth/oidc/login type: string clientId: description: 'Deprecated: This field does nothing and will be removed in the future.' type: string role: description: |- The role defined in Vault that we want to use when authenticating to Vault. type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - authInputType - authPath - role type: object hashicorpVaultSecret: description: |- HashicorpVaultSecret is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: fields: description: |- The fields are Vault keys pointing to the secrets passed to the next SecretSource step. Example 1 (TPP, username and password): imagining that you have stored the username and password for TPP under the keys "username" and "password", you will want to set this field to `["username", "password"]`. The username is expected to be given first, the password second. items: type: string type: array secretPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/secret/data/application-team-a/tpp-username-password type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - fields - secretPath type: object secret: description: |- Secret is a SecretSource step meant to be the first step. It retrieves secret values from a Kubernetes Secret, and passes them to the next step. properties: fields: description: |- The names of the fields we want to extract from the Kubernetes secret. These fields are passed to the next step in the chain. items: type: string type: array name: description: The name of the Kubernetes secret. type: string required: - fields - name type: object serviceAccountToken: description: |- ServiceAccountToken is a SecretSource step meant to be the first step. It uses the Kubernetes TokenRequest API to retrieve a token for a given service account, and passes it to the next step. properties: audiences: description: |- Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences. items: type: string type: array expirationSeconds: description: |- ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response. format: int64 type: integer name: description: The name of the Kubernetes service account. type: string required: - audiences - name type: object tppOAuth: description: |- TPPOAuth is a SecretSource step that authenticates to a TPP server. This step is meant to be the last step and requires a prior step that depends on the `authInputType`. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with TPP. The supported values are "UsernamePassword" and "JWT". enum: - UsernamePassword - JWT type: string clientId: description: ClientID is the clientId used to authenticate with TPP. type: string url: description: |- The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs https://tpp.example.com and https://tpp.example.com/vedsdk are equivalent. The ending `/vedsdk` is optional and is stripped out by our client. If not set, defaults to the URL defined at the top-level of the TPP configuration. type: string required: - authInputType type: object vcpOAuth: description: |- VCPOAuth is a SecretSource step that authenticates to the Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step that outputs a JWT token. properties: tenantID: description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS. type: string type: object type: object x-kubernetes-validations: - message: must have exactly one field set rule: '((has(self.secret) ? 1 : 0) + (has(self.serviceAccountToken) ? 1 : 0) + (has(self.hashicorpVaultOAuth) ? 1 : 0) + (has(self.hashicorpVaultSecret) ? 1 : 0) + (has(self.hashicorpVaultLDAP) ? 1 : 0) + (has(self.tppOAuth) ? 1 : 0) + (has(self.vcpOAuth) ? 1 : 0)) == 1' maxItems: 50 type: array x-kubernetes-list-type: atomic apiKey: description: |- The list of steps to retrieve the API key that will be used to connect to Certificate Manager, SaaS. items: properties: hashicorpVaultLDAP: description: |- HashicorpVaultLDAP is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: ldapPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/ldap/static-cred/:role_name or /v1/ldap/creds/:role_name type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - ldapPath type: object hashicorpVaultOAuth: description: |- HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource step to provide an OAuth token, which this step uses to authenticate to Vault. The output of this step is a Vault token. This step allows you to use the step `HashicorpVaultSecret` afterwards. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with HashiCorp Vault. The only supported value is "OIDC". enum: - OIDC type: string authPath: description: |- The login URL used for obtaining the Vault token. Example: /v1/auth/oidc/login type: string clientId: description: 'Deprecated: This field does nothing and will be removed in the future.' type: string role: description: |- The role defined in Vault that we want to use when authenticating to Vault. type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - authInputType - authPath - role type: object hashicorpVaultSecret: description: |- HashicorpVaultSecret is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: fields: description: |- The fields are Vault keys pointing to the secrets passed to the next SecretSource step. Example 1 (TPP, username and password): imagining that you have stored the username and password for TPP under the keys "username" and "password", you will want to set this field to `["username", "password"]`. The username is expected to be given first, the password second. items: type: string type: array secretPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/secret/data/application-team-a/tpp-username-password type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - fields - secretPath type: object secret: description: |- Secret is a SecretSource step meant to be the first step. It retrieves secret values from a Kubernetes Secret, and passes them to the next step. properties: fields: description: |- The names of the fields we want to extract from the Kubernetes secret. These fields are passed to the next step in the chain. items: type: string type: array name: description: The name of the Kubernetes secret. type: string required: - fields - name type: object serviceAccountToken: description: |- ServiceAccountToken is a SecretSource step meant to be the first step. It uses the Kubernetes TokenRequest API to retrieve a token for a given service account, and passes it to the next step. properties: audiences: description: |- Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences. items: type: string type: array expirationSeconds: description: |- ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response. format: int64 type: integer name: description: The name of the Kubernetes service account. type: string required: - audiences - name type: object tppOAuth: description: |- TPPOAuth is a SecretSource step that authenticates to a TPP server. This step is meant to be the last step and requires a prior step that depends on the `authInputType`. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with TPP. The supported values are "UsernamePassword" and "JWT". enum: - UsernamePassword - JWT type: string clientId: description: ClientID is the clientId used to authenticate with TPP. type: string url: description: |- The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs https://tpp.example.com and https://tpp.example.com/vedsdk are equivalent. The ending `/vedsdk` is optional and is stripped out by our client. If not set, defaults to the URL defined at the top-level of the TPP configuration. type: string required: - authInputType type: object vcpOAuth: description: |- VCPOAuth is a SecretSource step that authenticates to the Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step that outputs a JWT token. properties: tenantID: description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS. type: string type: object type: object x-kubernetes-validations: - message: must have exactly one field set rule: '((has(self.secret) ? 1 : 0) + (has(self.serviceAccountToken) ? 1 : 0) + (has(self.hashicorpVaultOAuth) ? 1 : 0) + (has(self.hashicorpVaultSecret) ? 1 : 0) + (has(self.hashicorpVaultLDAP) ? 1 : 0) + (has(self.tppOAuth) ? 1 : 0) + (has(self.vcpOAuth) ? 1 : 0)) == 1' maxItems: 50 type: array x-kubernetes-list-type: atomic url: description: |- The URL to connect to the Certificate Manager, SaaS instance. If not set, the default value https://api.venafi.cloud is used. type: string type: object x-kubernetes-validations: - message: 'must have exactly ONE of the following fields set: apiKey or accessToken' rule: '(has(self.apiKey) ? 1 : 0) + (has(self.accessToken) ? 1 : 0) == 1' type: object x-kubernetes-validations: - message: 'must have exactly ONE of the following fields set: tpp or vcp' rule: '(has(self.tpp) ? 1 : 0) + (has(self.vaas) ? 1 : 0) + (has(self.vcp) ? 1 : 0) + (has(self.firefly) ? 1 : 0) == 1' status: properties: conditions: description: List of status conditions to indicate the status of a VenafiConnection. items: description: ConnectionCondition contains condition information for a VenafiConnection. properties: lastTransitionTime: description: |- LastTransitionTime is the timestamp corresponding to the last status change of this condition. format: date-time type: string lastUpdateTime: description: lastUpdateTime is the time of the last update to this condition format: date-time type: string message: description: |- Message is a human readable description of the details of the last transition, complementing reason. type: string observedGeneration: description: |- If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Issuer. format: int64 type: integer reason: description: |- Reason is a brief machine readable explanation for the condition's last transition. type: string status: description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string tokenValidUntil: description: |- The ValidUntil time of the token used to authenticate with the Certificate Manager, SaaS. format: date-time type: string type: description: |- Type of the condition, should be a combination of the unique name of the operator and the type of condition. eg. `VenafiEnhancedIssuerReady` type: string required: - status - type type: object type: array x-kubernetes-list-map-keys: - type x-kubernetes-list-type: map type: object required: - metadata - spec type: object served: true storage: true subresources: status: {} ================================================ FILE: deploy/charts/venafi-kubernetes-agent/templates/NOTES.txt ================================================ {{- if .Values.config.configmap.name }} You are using a custom configuration in the following ConfigMap: {{ .Values.config.configmap.name | quote }}. DEPRECATION: The `cluster_id` configuration field is deprecated. If your configuration contains `cluster_id`, it will continue to work as a fallback, but please migrate to `cluster_name` to avoid ambiguity. {{- end }} {{- if .Values.authentication.venafiConnection.enabled }} - Check the VenafiConnection exists: "{{ .Values.authentication.venafiConnection.namespace }}/{{ .Values.authentication.venafiConnection.name }}" > kubectl get VenafiConnection -n {{ .Values.authentication.venafiConnection.namespace }} {{ .Values.authentication.venafiConnection.name }} {{- else }} - Check the credentials Secret exists: "{{ .Values.authentication.secretName }}" > kubectl get secret -n {{ .Release.Namespace }} {{ .Values.authentication.secretName }} {{- end }} - Check the application is running: > kubectl get pods -n {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} - Check the application logs for successful connection to the platform: > kubectl logs -n {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} ================================================ FILE: deploy/charts/venafi-kubernetes-agent/templates/_helpers.tpl ================================================ {{/* Expand the name of the chart. */}} {{- define "venafi-kubernetes-agent.name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} {{- end }} {{/* Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). If release name contains chart name it will be used as a full name. */}} {{- define "venafi-kubernetes-agent.fullname" -}} {{- if .Values.fullnameOverride }} {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} {{- else }} {{- $name := default .Chart.Name .Values.nameOverride }} {{- if contains $name .Release.Name }} {{- .Release.Name | trunc 63 | trimSuffix "-" }} {{- else }} {{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" }} {{- end }} {{- end }} {{- end }} {{/* Create chart name and version as used by the chart label. */}} {{- define "venafi-kubernetes-agent.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} {{- end }} {{/* Common labels */}} {{- define "venafi-kubernetes-agent.labels" -}} helm.sh/chart: {{ include "venafi-kubernetes-agent.chart" . }} {{ include "venafi-kubernetes-agent.selectorLabels" . }} {{- if .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- end }} app.kubernetes.io/managed-by: {{ .Release.Service }} {{- end }} {{/* Selector labels */}} {{- define "venafi-kubernetes-agent.selectorLabels" -}} app.kubernetes.io/name: {{ include "venafi-kubernetes-agent.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} {{/* Create the name of the service account to use */}} {{- define "venafi-kubernetes-agent.serviceAccountName" -}} {{- if .Values.serviceAccount.create }} {{- default (include "venafi-kubernetes-agent.fullname" .) .Values.serviceAccount.name }} {{- else }} {{- default "default" .Values.serviceAccount.name }} {{- end }} {{- end }} {{/* Util function for generating an image reference based on the provided options. This function is derviced from similar functions used in the cert-manager GitHub organization */}} {{- define "venafi-kubernetes-agent.image" -}} {{- /* Calling convention: - (tuple ) We intentionally pass imageRegistry/imageNamespace as explicit arguments rather than reading from `.Values` inside this helper, because `helm-tool lint` does not reliably track `.Values.*` usage through tuple/variable indirection. */ -}} {{- if ne (len .) 4 -}} {{- fail (printf "ERROR: template \"venafi-kubernetes-agent.image\" expects (tuple ), got %d arguments" (len .)) -}} {{- end -}} {{- $image := index . 0 -}} {{- $imageRegistry := index . 1 | default "" -}} {{- $imageNamespace := index . 2 | default "" -}} {{- $defaultReference := index . 3 -}} {{- $repository := "" -}} {{- if $image.repository -}} {{- $repository = $image.repository -}} {{- /* Backwards compatibility: if image.registry is set, additionally prefix the repository with this registry. */ -}} {{- if $image.registry -}} {{- $repository = printf "%s/%s" $image.registry $repository -}} {{- end -}} {{- else -}} {{- $name := required "ERROR: image.name must be set when image.repository is empty" $image.name -}} {{- $repository = $name -}} {{- if $imageNamespace -}} {{- $repository = printf "%s/%s" $imageNamespace $repository -}} {{- end -}} {{- if $imageRegistry -}} {{- $repository = printf "%s/%s" $imageRegistry $repository -}} {{- end -}} {{- /* Backwards compatibility: if image.registry is set, additionally prefix the repository with this registry. */ -}} {{- if $image.registry -}} {{- $repository = printf "%s/%s" $image.registry $repository -}} {{- end -}} {{- end -}} {{- $repository -}} {{- if and $image.tag $image.digest -}} {{- printf ":%s@%s" $image.tag $image.digest -}} {{- else if $image.tag -}} {{- printf ":%s" $image.tag -}} {{- else if $image.digest -}} {{- printf "@%s" $image.digest -}} {{- else -}} {{- printf "%s" $defaultReference -}} {{- end -}} {{- end }} ================================================ FILE: deploy/charts/venafi-kubernetes-agent/templates/_venafi-connection.tpl ================================================ {{/* Create chart name and version as used by the chart label. */}} {{- define "venafi-connection.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} {{- end }} {{/* Common labels */}} {{- define "venafi-connection.labels" -}} helm.sh/chart: {{ include "venafi-connection.chart" . }} {{ include "venafi-connection.selectorLabels" . }} {{- if .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- end }} app.kubernetes.io/managed-by: {{ .Release.Service }} {{- end }} {{/* Selector labels */}} {{- define "venafi-connection.selectorLabels" -}} app.kubernetes.io/name: "venafi-connection" app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} ================================================ FILE: deploy/charts/venafi-kubernetes-agent/templates/configmap.yaml ================================================ {{ if not .Values.config.configmap.name }} --- apiVersion: v1 kind: ConfigMap metadata: name: agent-config namespace: {{ .Release.Namespace }} labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} data: config.yaml: |- cluster_name: {{ .Values.config.clusterName | quote }} cluster_description: {{ .Values.config.clusterDescription | quote }} server: {{ .Values.config.server | quote }} period: {{ .Values.config.period | quote }} {{- with .Values.config.excludeAnnotationKeysRegex }} exclude-annotation-keys-regex: {{- . | toYaml | nindent 6 }} {{- end }} {{- with .Values.config.excludeLabelKeysRegex }} exclude-label-keys-regex: {{- . | toYaml | nindent 6 }} {{- end }} venafi-cloud: uploader_id: "no" upload_path: "/v1/tlspk/upload/clusterdata" data-gatherers: # gather k8s apiserver version information - kind: "k8s-discovery" name: "k8s-discovery" # pods data is used in the pods and application_versions packages - kind: "k8s-dynamic" name: "k8s/pods" config: resource-type: resource: pods version: v1 - kind: "k8s-dynamic" name: "k8s/namespaces" config: resource-type: resource: namespaces version: v1 # gather services for pod readiness probe rules - kind: "k8s-dynamic" name: "k8s/services" config: resource-type: resource: services version: v1 # gather higher level resources to ensure data to determine ownership is present - kind: "k8s-dynamic" name: "k8s/deployments" config: resource-type: version: v1 resource: deployments group: apps - kind: "k8s-dynamic" name: "k8s/statefulsets" config: resource-type: version: v1 resource: statefulsets group: apps - kind: "k8s-dynamic" name: "k8s/daemonsets" config: resource-type: version: v1 resource: daemonsets group: apps - kind: "k8s-dynamic" name: "k8s/jobs" config: resource-type: version: v1 resource: jobs group: batch - kind: "k8s-dynamic" name: "k8s/cronjobs" config: resource-type: version: v1 resource: cronjobs group: batch - kind: "k8s-dynamic" name: "k8s/ingresses" config: resource-type: group: networking.k8s.io version: v1 resource: ingresses - kind: "k8s-dynamic" name: "k8s/secrets" config: resource-type: version: v1 resource: secrets {{- with .Values.config.ignoredSecretTypes }} field-selectors: {{- range . }} - type!={{ . }} {{- end }} {{- end }} - kind: "k8s-dynamic" name: "k8s/certificates" config: resource-type: group: cert-manager.io version: v1 resource: certificates - kind: "k8s-dynamic" name: "k8s/certificaterequests" config: resource-type: group: cert-manager.io version: v1 resource: certificaterequests - kind: "k8s-dynamic" name: "k8s/issuers" config: resource-type: group: cert-manager.io version: v1 resource: issuers - kind: "k8s-dynamic" name: "k8s/clusterissuers" config: resource-type: group: cert-manager.io version: v1 resource: clusterissuers - kind: "k8s-dynamic" name: "k8s/googlecasissuers" config: resource-type: group: cas-issuer.jetstack.io version: v1beta1 resource: googlecasissuers - kind: "k8s-dynamic" name: "k8s/googlecasclusterissuers" config: resource-type: group: cas-issuer.jetstack.io version: v1beta1 resource: googlecasclusterissuers - kind: "k8s-dynamic" name: "k8s/awspcaissuer" config: resource-type: group: awspca.cert-manager.io version: v1beta1 resource: awspcaissuers - kind: "k8s-dynamic" name: "k8s/awspcaclusterissuers" config: resource-type: group: awspca.cert-manager.io version: v1beta1 resource: awspcaclusterissuers - kind: "k8s-dynamic" name: "k8s/mutatingwebhookconfigurations" config: resource-type: group: admissionregistration.k8s.io version: v1 resource: mutatingwebhookconfigurations - kind: "k8s-dynamic" name: "k8s/validatingwebhookconfigurations" config: resource-type: group: admissionregistration.k8s.io version: v1 resource: validatingwebhookconfigurations - kind: "k8s-dynamic" name: "k8s/gateways" config: resource-type: group: networking.istio.io version: v1alpha3 resource: gateways - kind: "k8s-dynamic" name: "k8s/virtualservices" config: resource-type: group: networking.istio.io version: v1alpha3 resource: virtualservices - kind: "k8s-dynamic" name: "k8s/routes" config: resource-type: version: v1 group: route.openshift.io resource: routes - kind: "k8s-dynamic" name: "k8s/venaficonnections" config: resource-type: group: jetstack.io version: v1alpha1 resource: venaficonnections - kind: "k8s-dynamic" name: "k8s/venaficlusterissuers" config: resource-type: group: jetstack.io version: v1alpha1 resource: venaficlusterissuers - kind: "k8s-dynamic" name: "k8s/venafiissuers" config: resource-type: group: jetstack.io version: v1alpha1 resource: venafiissuers - kind: "k8s-dynamic" name: "k8s/fireflyissuers" config: resource-type: group: firefly.venafi.com version: v1 resource: issuers - kind: "k8s-dynamic" name: "k8s/stepissuers" config: resource-type: group: certmanager.step.sm version: v1beta1 resource: stepissuers - kind: "k8s-dynamic" name: "k8s/stepclusterissuers" config: resource-type: group: certmanager.step.sm version: v1beta1 resource: stepclusterissuers - kind: "k8s-dynamic" name: "k8s/originissuers" config: resource-type: group: cert-manager.k8s.cloudflare.com version: v1 resource: originissuers - kind: "k8s-dynamic" name: "k8s/clusteroriginissuers" config: resource-type: group: cert-manager.k8s.cloudflare.com version: v1 resource: clusteroriginissuers - kind: "k8s-dynamic" name: "k8s/freeipaissuers" config: resource-type: group: certmanager.freeipa.org version: v1beta1 resource: issuers - kind: "k8s-dynamic" name: "k8s/freeipaclusterissuers" config: resource-type: group: certmanager.freeipa.org version: v1beta1 resource: clusterissuers - kind: "k8s-dynamic" name: "k8s/ejbcaissuers" config: resource-type: group: ejbca-issuer.keyfactor.com version: v1alpha1 resource: issuers - kind: "k8s-dynamic" name: "k8s/ejbcaclusterissuers" config: resource-type: group: ejbca-issuer.keyfactor.com version: v1alpha1 resource: clusterissuers {{- end }} ================================================ FILE: deploy/charts/venafi-kubernetes-agent/templates/deployment.yaml ================================================ apiVersion: apps/v1 kind: Deployment metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }} namespace: {{ .Release.Namespace }} labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} spec: replicas: {{ .Values.replicaCount }} selector: matchLabels: {{- include "venafi-kubernetes-agent.selectorLabels" . | nindent 6 }} template: metadata: {{- with .Values.podAnnotations }} annotations: {{- toYaml . | nindent 8 }} {{- end }} labels: {{- include "venafi-kubernetes-agent.selectorLabels" . | nindent 8 }} spec: {{- with .Values.imagePullSecrets }} imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} serviceAccountName: {{ include "venafi-kubernetes-agent.serviceAccountName" . }} securityContext: {{- toYaml .Values.podSecurityContext | nindent 8 }} containers: - name: {{ .Chart.Name }} securityContext: {{- toYaml .Values.securityContext | nindent 12 }} image: "{{ template "venafi-kubernetes-agent.image" (tuple .Values.image .Values.imageRegistry .Values.imageNamespace (printf ":%s" .Chart.AppVersion)) }}" imagePullPolicy: {{ .Values.image.pullPolicy }} env: - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: POD_UID valueFrom: fieldRef: fieldPath: metadata.uid - name: POD_NODE valueFrom: fieldRef: fieldPath: spec.nodeName {{- with .Values.http_proxy }} - name: HTTP_PROXY value: {{ . }} {{- end }} {{- with .Values.https_proxy }} - name: HTTPS_PROXY value: {{ . }} {{- end }} {{- with .Values.no_proxy }} - name: NO_PROXY value: {{ . }} {{- end }} {{- if not (empty .Values.command) }} command: {{- range .Values.command }} - {{ . | quote }} {{- end }} {{- end }} args: - "agent" - "-c" - "/etc/venafi/agent/config/{{ default "config.yaml" .Values.config.configmap.key }}" {{- if .Values.authentication.venafiConnection.enabled }} - --venafi-connection - {{ .Values.authentication.venafiConnection.name | quote }} - --venafi-connection-namespace - {{ .Values.authentication.venafiConnection.namespace | quote }} {{- else }} - "--client-id" - {{ .Values.config.clientId | quote }} - "--private-key-path" - "/etc/venafi/agent/key/{{ .Values.authentication.secretKey }}" {{- end }} - --venafi-cloud {{- if .Values.metrics.enabled }} - --enable-metrics {{- end }} {{- range .Values.extraArgs }} - {{ . | quote }} {{- end }} resources: {{- toYaml .Values.resources | nindent 12 }} volumeMounts: - name: config mountPath: "/etc/venafi/agent/config" readOnly: true {{- if not .Values.authentication.venafiConnection.enabled }} - name: credentials mountPath: "/etc/venafi/agent/key" readOnly: true {{- end }} {{- with .Values.volumeMounts }} {{- toYaml . | nindent 12 }} {{- end }} {{- if .Values.metrics.enabled }} ports: - containerPort: 8081 name: http-metrics {{- end }} livenessProbe: httpGet: path: /healthz port: 8081 initialDelaySeconds: 15 periodSeconds: 20 readinessProbe: httpGet: path: /readyz port: 8081 initialDelaySeconds: 5 periodSeconds: 10 {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.affinity }} affinity: {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.tolerations }} tolerations: {{- toYaml . | nindent 8 }} {{- end }} volumes: - name: config configMap: name: {{ default "agent-config" .Values.config.configmap.name }} optional: false {{- if not .Values.authentication.venafiConnection.enabled }} - name: credentials secret: secretName: {{ .Values.authentication.secretName }} optional: false {{- end }} {{- with .Values.volumes }} {{- toYaml . | nindent 8 }} {{- end }} ================================================ FILE: deploy/charts/venafi-kubernetes-agent/templates/poddisruptionbudget.yaml ================================================ {{- if .Values.podDisruptionBudget.enabled }} apiVersion: policy/v1 kind: PodDisruptionBudget metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }} namespace: {{ .Release.Namespace }} labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} spec: selector: matchLabels: {{- include "venafi-kubernetes-agent.selectorLabels" . | nindent 6 }} {{- if not (or (hasKey .Values.podDisruptionBudget "minAvailable") (hasKey .Values.podDisruptionBudget "maxUnavailable")) }} minAvailable: 1 # Default value because minAvailable and maxUnavailable are not set {{- end }} {{- if hasKey .Values.podDisruptionBudget "minAvailable" }} minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} {{- end }} {{- if hasKey .Values.podDisruptionBudget "maxUnavailable" }} maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} {{- end }} {{- end }} ================================================ FILE: deploy/charts/venafi-kubernetes-agent/templates/podmonitor.yaml ================================================ {{- if and .Values.metrics.enabled .Values.metrics.podmonitor.enabled }} apiVersion: monitoring.coreos.com/v1 kind: PodMonitor metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }} {{- if .Values.metrics.podmonitor.namespace }} namespace: {{ .Values.metrics.podmonitor.namespace }} {{- else }} namespace: {{ .Release.Namespace | quote }} {{- end }} labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} prometheus: {{ .Values.metrics.podmonitor.prometheusInstance }} {{- with .Values.metrics.podmonitor.labels }} {{- toYaml . | nindent 4 }} {{- end }} {{- with .Values.metrics.podmonitor.annotations }} annotations: {{- toYaml . | nindent 4 }} {{- end }} spec: jobLabel: {{ include "venafi-kubernetes-agent.fullname" . }} selector: matchLabels: {{- include "venafi-kubernetes-agent.selectorLabels" . | nindent 6 }} {{- if .Values.metrics.podmonitor.namespace }} namespaceSelector: matchNames: - {{ .Release.Namespace | quote }} {{- end }} podMetricsEndpoints: - port: http-metrics path: /metrics interval: {{ .Values.metrics.podmonitor.interval }} scrapeTimeout: {{ .Values.metrics.podmonitor.scrapeTimeout }} honorLabels: {{ .Values.metrics.podmonitor.honorLabels }} {{- with .Values.metrics.podmonitor.endpointAdditionalProperties }} {{- toYaml . | nindent 4 }} {{- end }} {{- end }} ================================================ FILE: deploy/charts/venafi-kubernetes-agent/templates/rbac.yaml ================================================ --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-event-emitted labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} rules: - apiGroups: [""] resources: ["events"] verbs: ["create"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-event-emitted labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: {{ include "venafi-kubernetes-agent.fullname" . }}-event-emitted subjects: - kind: ServiceAccount name: {{ include "venafi-kubernetes-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-cluster-viewer labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: view subjects: - kind: ServiceAccount name: {{ include "venafi-kubernetes-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-node-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} rules: - apiGroups: [""] resources: ["nodes"] verbs: ["get", "list", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-node-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} roleRef: kind: ClusterRole name: {{ include "venafi-kubernetes-agent.fullname" . }}-node-reader apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: {{ include "venafi-kubernetes-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-secret-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} rules: - apiGroups: [""] resources: ["secrets"] verbs: ["get", "list", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-secret-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} roleRef: kind: ClusterRole name: {{ include "venafi-kubernetes-agent.fullname" . }}-secret-reader apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: {{ include "venafi-kubernetes-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-cert-manager-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} rules: - apiGroups: ["cert-manager.io"] resources: - certificates - certificaterequests - issuers - clusterissuers verbs: ["get", "list", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-cert-manager-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} roleRef: kind: ClusterRole name: {{ include "venafi-kubernetes-agent.fullname" . }}-cert-manager-reader apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: {{ include "venafi-kubernetes-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-googlecas-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} rules: - apiGroups: ["cas-issuer.jetstack.io"] resources: - googlecasissuers - googlecasclusterissuers verbs: ["get", "list", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-googlecas-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} roleRef: kind: ClusterRole name: {{ include "venafi-kubernetes-agent.fullname" . }}-googlecas-reader apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: {{ include "venafi-kubernetes-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-awspca-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} rules: - apiGroups: ["awspca.cert-manager.io"] resources: - awspcaissuers - awspcaclusterissuers verbs: ["get", "list", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-awspca-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} roleRef: kind: ClusterRole name: {{ include "venafi-kubernetes-agent.fullname" . }}-awspca-reader apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: {{ include "venafi-kubernetes-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-webhook-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} rules: - apiGroups: ["admissionregistration.k8s.io"] resources: - validatingwebhookconfigurations - mutatingwebhookconfigurations verbs: ["get", "list", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-webhook-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} roleRef: kind: ClusterRole name: {{ include "venafi-kubernetes-agent.fullname" . }}-webhook-reader apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: {{ include "venafi-kubernetes-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-openshift-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} rules: - apiGroups: ["route.openshift.io"] resources: - routes verbs: ["get", "list", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-openshift-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} roleRef: kind: ClusterRole name: {{ include "venafi-kubernetes-agent.fullname" . }}-openshift-reader apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: {{ include "venafi-kubernetes-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-istio-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} rules: - apiGroups: ["networking.istio.io"] resources: - virtualservices - gateways verbs: ["get", "list", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-istio-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} roleRef: kind: ClusterRole name: {{ include "venafi-kubernetes-agent.fullname" . }}-istio-reader apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: {{ include "venafi-kubernetes-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-venafi-connection-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} rules: - apiGroups: ["jetstack.io"] resources: - venaficonnections verbs: ["get", "list", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-venafi-connection-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} roleRef: kind: ClusterRole name: {{ include "venafi-kubernetes-agent.fullname" . }}-venafi-connection-reader apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: {{ include "venafi-kubernetes-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-venafi-enhanced-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} rules: - apiGroups: ["jetstack.io"] resources: - venafiissuers - venaficlusterissuers verbs: ["get", "list", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-venafi-enhanced-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} roleRef: kind: ClusterRole name: {{ include "venafi-kubernetes-agent.fullname" . }}-venafi-enhanced-reader apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: {{ include "venafi-kubernetes-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-firefly-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} rules: - apiGroups: ["firefly.venafi.com"] resources: - issuers verbs: ["get", "list", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-firefly-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} roleRef: kind: ClusterRole name: {{ include "venafi-kubernetes-agent.fullname" . }}-firefly-reader apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: {{ include "venafi-kubernetes-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-step-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} rules: - apiGroups: ["certmanager.step.sm"] resources: - stepissuers - stepclusterissuers verbs: ["get", "list", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-step-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} roleRef: kind: ClusterRole name: {{ include "venafi-kubernetes-agent.fullname" . }}-step-reader apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: {{ include "venafi-kubernetes-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-cloudflare-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} rules: - apiGroups: ["cert-manager.k8s.cloudflare.com"] resources: - originissuers - clusteroriginissuers verbs: ["get", "list", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-cloudflare-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} roleRef: kind: ClusterRole name: {{ include "venafi-kubernetes-agent.fullname" . }}-cloudflare-reader apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: {{ include "venafi-kubernetes-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-freeipa-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} rules: - apiGroups: ["certmanager.freeipa.org"] resources: - issuers - clusterissuers verbs: ["get", "list", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-freeipa-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} roleRef: kind: ClusterRole name: {{ include "venafi-kubernetes-agent.fullname" . }}-freeipa-reader apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: {{ include "venafi-kubernetes-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-keyfactor-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} rules: - apiGroups: ["ejbca-issuer.keyfactor.com"] resources: - issuers - clusterissuers verbs: ["get", "list", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: {{ include "venafi-kubernetes-agent.fullname" . }}-keyfactor-reader labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} roleRef: kind: ClusterRole name: {{ include "venafi-kubernetes-agent.fullname" . }}-keyfactor-reader apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: {{ include "venafi-kubernetes-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} ================================================ FILE: deploy/charts/venafi-kubernetes-agent/templates/serviceaccount.yaml ================================================ {{- if .Values.serviceAccount.create -}} apiVersion: v1 kind: ServiceAccount metadata: name: {{ include "venafi-kubernetes-agent.serviceAccountName" . }} namespace: {{ .Release.Namespace }} labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} {{- with .Values.serviceAccount.annotations }} annotations: {{- toYaml . | nindent 4 }} {{- end }} {{- end }} ================================================ FILE: deploy/charts/venafi-kubernetes-agent/templates/venafi-connection-crd.without-validations.yaml ================================================ {{/* DO NOT EDIT. Use 'make generate-crds-venconn' to regenerate. */}} {{- if .Values.crds.venafiConnection.include }} {{- if (or (semverCompare "<1.25" .Capabilities.KubeVersion.GitVersion) .Values.crds.forceRemoveValidationAnnotations) }} apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: "venaficonnections.jetstack.io" {{- if .Values.crds.keep }} annotations: # This annotation prevents the CRD from being pruned by Helm when this chart # is deleted. helm.sh/resource-policy: keep {{- end }} labels: {{- include "venafi-connection.labels" . | nindent 4 }} spec: group: jetstack.io names: kind: VenafiConnection listKind: VenafiConnectionList plural: venaficonnections shortNames: - vc singular: venaficonnection scope: Namespaced versions: - name: v1alpha1 schema: openAPIV3Schema: description: VenafiConnection is the Schema for the VenafiConnection API properties: apiVersion: description: |- APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: description: |- Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: properties: allowReferencesFrom: description: |- A namespace selector that specifies what namespaces this VenafiConnection is allowed to be used from. If not set/ null, the VenafiConnection can only be used within its namespace. An empty selector ({}) matches all namespaces. If set to a non-empty selector, the VenafiConnection can only be used from namespaces that match the selector. This possibly excludes the namespace the VenafiConnection is in. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: description: |- A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: description: |- operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: description: |- values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array x-kubernetes-list-type: atomic required: - key - operator type: object type: array x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string description: |- matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic firefly: properties: accessToken: description: |- The list of steps to retrieve the Access Token that will be used to connect to Firefly. items: properties: hashicorpVaultLDAP: description: |- HashicorpVaultLDAP is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: ldapPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/ldap/static-cred/:role_name or /v1/ldap/creds/:role_name type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - ldapPath type: object hashicorpVaultOAuth: description: |- HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource step to provide an OAuth token, which this step uses to authenticate to Vault. The output of this step is a Vault token. This step allows you to use the step `HashicorpVaultSecret` afterwards. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with HashiCorp Vault. The only supported value is "OIDC". enum: - OIDC type: string authPath: description: |- The login URL used for obtaining the Vault token. Example: /v1/auth/oidc/login type: string clientId: description: 'Deprecated: This field does nothing and will be removed in the future.' type: string role: description: |- The role defined in Vault that we want to use when authenticating to Vault. type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - authInputType - authPath - role type: object hashicorpVaultSecret: description: |- HashicorpVaultSecret is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: fields: description: |- The fields are Vault keys pointing to the secrets passed to the next SecretSource step. Example 1 (TPP, username and password): imagining that you have stored the username and password for TPP under the keys "username" and "password", you will want to set this field to `["username", "password"]`. The username is expected to be given first, the password second. items: type: string type: array secretPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/secret/data/application-team-a/tpp-username-password type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - fields - secretPath type: object secret: description: |- Secret is a SecretSource step meant to be the first step. It retrieves secret values from a Kubernetes Secret, and passes them to the next step. properties: fields: description: |- The names of the fields we want to extract from the Kubernetes secret. These fields are passed to the next step in the chain. items: type: string type: array name: description: The name of the Kubernetes secret. type: string required: - fields - name type: object serviceAccountToken: description: |- ServiceAccountToken is a SecretSource step meant to be the first step. It uses the Kubernetes TokenRequest API to retrieve a token for a given service account, and passes it to the next step. properties: audiences: description: |- Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences. items: type: string type: array expirationSeconds: description: |- ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response. format: int64 type: integer name: description: The name of the Kubernetes service account. type: string required: - audiences - name type: object tppOAuth: description: |- TPPOAuth is a SecretSource step that authenticates to a TPP server. This step is meant to be the last step and requires a prior step that depends on the `authInputType`. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with TPP. The supported values are "UsernamePassword" and "JWT". enum: - UsernamePassword - JWT type: string clientId: description: ClientID is the clientId used to authenticate with TPP. type: string url: description: |- The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs https://tpp.example.com and https://tpp.example.com/vedsdk are equivalent. The ending `/vedsdk` is optional and is stripped out by our client. If not set, defaults to the URL defined at the top-level of the TPP configuration. type: string required: - authInputType type: object vcpOAuth: description: |- VCPOAuth is a SecretSource step that authenticates to the Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step that outputs a JWT token. properties: tenantID: description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS. type: string type: object type: object maxItems: 50 type: array x-kubernetes-list-type: atomic url: description: The URL to connect to the Workload Identity Manager instance. type: string required: - url type: object tpp: properties: accessToken: description: The list of steps to retrieve a TPP access token. items: properties: hashicorpVaultLDAP: description: |- HashicorpVaultLDAP is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: ldapPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/ldap/static-cred/:role_name or /v1/ldap/creds/:role_name type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - ldapPath type: object hashicorpVaultOAuth: description: |- HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource step to provide an OAuth token, which this step uses to authenticate to Vault. The output of this step is a Vault token. This step allows you to use the step `HashicorpVaultSecret` afterwards. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with HashiCorp Vault. The only supported value is "OIDC". enum: - OIDC type: string authPath: description: |- The login URL used for obtaining the Vault token. Example: /v1/auth/oidc/login type: string clientId: description: 'Deprecated: This field does nothing and will be removed in the future.' type: string role: description: |- The role defined in Vault that we want to use when authenticating to Vault. type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - authInputType - authPath - role type: object hashicorpVaultSecret: description: |- HashicorpVaultSecret is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: fields: description: |- The fields are Vault keys pointing to the secrets passed to the next SecretSource step. Example 1 (TPP, username and password): imagining that you have stored the username and password for TPP under the keys "username" and "password", you will want to set this field to `["username", "password"]`. The username is expected to be given first, the password second. items: type: string type: array secretPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/secret/data/application-team-a/tpp-username-password type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - fields - secretPath type: object secret: description: |- Secret is a SecretSource step meant to be the first step. It retrieves secret values from a Kubernetes Secret, and passes them to the next step. properties: fields: description: |- The names of the fields we want to extract from the Kubernetes secret. These fields are passed to the next step in the chain. items: type: string type: array name: description: The name of the Kubernetes secret. type: string required: - fields - name type: object serviceAccountToken: description: |- ServiceAccountToken is a SecretSource step meant to be the first step. It uses the Kubernetes TokenRequest API to retrieve a token for a given service account, and passes it to the next step. properties: audiences: description: |- Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences. items: type: string type: array expirationSeconds: description: |- ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response. format: int64 type: integer name: description: The name of the Kubernetes service account. type: string required: - audiences - name type: object tppOAuth: description: |- TPPOAuth is a SecretSource step that authenticates to a TPP server. This step is meant to be the last step and requires a prior step that depends on the `authInputType`. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with TPP. The supported values are "UsernamePassword" and "JWT". enum: - UsernamePassword - JWT type: string clientId: description: ClientID is the clientId used to authenticate with TPP. type: string url: description: |- The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs https://tpp.example.com and https://tpp.example.com/vedsdk are equivalent. The ending `/vedsdk` is optional and is stripped out by our client. If not set, defaults to the URL defined at the top-level of the TPP configuration. type: string required: - authInputType type: object vcpOAuth: description: |- VCPOAuth is a SecretSource step that authenticates to the Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step that outputs a JWT token. properties: tenantID: description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS. type: string type: object type: object maxItems: 50 type: array x-kubernetes-list-type: atomic url: description: |- The URL to connect to the Certificate Manager Self-Hosted instance. The two URLs https://tpp.example.com and https://tpp.example.com/vedsdk are equivalent. The ending `/vedsdk` is optional and is stripped out by venafi-connection-lib. type: string required: - url type: object vaas: description: 'Deprecated: The ''vaas'' field is deprecated use the field called ''vcp'' instead.' properties: accessToken: description: |- The list of steps to retrieve the Access Token that will be used to connect to Certificate Manager, SaaS. items: properties: hashicorpVaultLDAP: description: |- HashicorpVaultLDAP is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: ldapPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/ldap/static-cred/:role_name or /v1/ldap/creds/:role_name type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - ldapPath type: object hashicorpVaultOAuth: description: |- HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource step to provide an OAuth token, which this step uses to authenticate to Vault. The output of this step is a Vault token. This step allows you to use the step `HashicorpVaultSecret` afterwards. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with HashiCorp Vault. The only supported value is "OIDC". enum: - OIDC type: string authPath: description: |- The login URL used for obtaining the Vault token. Example: /v1/auth/oidc/login type: string clientId: description: 'Deprecated: This field does nothing and will be removed in the future.' type: string role: description: |- The role defined in Vault that we want to use when authenticating to Vault. type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - authInputType - authPath - role type: object hashicorpVaultSecret: description: |- HashicorpVaultSecret is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: fields: description: |- The fields are Vault keys pointing to the secrets passed to the next SecretSource step. Example 1 (TPP, username and password): imagining that you have stored the username and password for TPP under the keys "username" and "password", you will want to set this field to `["username", "password"]`. The username is expected to be given first, the password second. items: type: string type: array secretPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/secret/data/application-team-a/tpp-username-password type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - fields - secretPath type: object secret: description: |- Secret is a SecretSource step meant to be the first step. It retrieves secret values from a Kubernetes Secret, and passes them to the next step. properties: fields: description: |- The names of the fields we want to extract from the Kubernetes secret. These fields are passed to the next step in the chain. items: type: string type: array name: description: The name of the Kubernetes secret. type: string required: - fields - name type: object serviceAccountToken: description: |- ServiceAccountToken is a SecretSource step meant to be the first step. It uses the Kubernetes TokenRequest API to retrieve a token for a given service account, and passes it to the next step. properties: audiences: description: |- Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences. items: type: string type: array expirationSeconds: description: |- ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response. format: int64 type: integer name: description: The name of the Kubernetes service account. type: string required: - audiences - name type: object tppOAuth: description: |- TPPOAuth is a SecretSource step that authenticates to a TPP server. This step is meant to be the last step and requires a prior step that depends on the `authInputType`. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with TPP. The supported values are "UsernamePassword" and "JWT". enum: - UsernamePassword - JWT type: string clientId: description: ClientID is the clientId used to authenticate with TPP. type: string url: description: |- The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs https://tpp.example.com and https://tpp.example.com/vedsdk are equivalent. The ending `/vedsdk` is optional and is stripped out by our client. If not set, defaults to the URL defined at the top-level of the TPP configuration. type: string required: - authInputType type: object vcpOAuth: description: |- VCPOAuth is a SecretSource step that authenticates to the Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step that outputs a JWT token. properties: tenantID: description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS. type: string type: object type: object maxItems: 50 type: array x-kubernetes-list-type: atomic apiKey: description: |- The list of steps to retrieve the API key that will be used to connect to Certificate Manager, SaaS. items: properties: hashicorpVaultLDAP: description: |- HashicorpVaultLDAP is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: ldapPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/ldap/static-cred/:role_name or /v1/ldap/creds/:role_name type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - ldapPath type: object hashicorpVaultOAuth: description: |- HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource step to provide an OAuth token, which this step uses to authenticate to Vault. The output of this step is a Vault token. This step allows you to use the step `HashicorpVaultSecret` afterwards. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with HashiCorp Vault. The only supported value is "OIDC". enum: - OIDC type: string authPath: description: |- The login URL used for obtaining the Vault token. Example: /v1/auth/oidc/login type: string clientId: description: 'Deprecated: This field does nothing and will be removed in the future.' type: string role: description: |- The role defined in Vault that we want to use when authenticating to Vault. type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - authInputType - authPath - role type: object hashicorpVaultSecret: description: |- HashicorpVaultSecret is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: fields: description: |- The fields are Vault keys pointing to the secrets passed to the next SecretSource step. Example 1 (TPP, username and password): imagining that you have stored the username and password for TPP under the keys "username" and "password", you will want to set this field to `["username", "password"]`. The username is expected to be given first, the password second. items: type: string type: array secretPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/secret/data/application-team-a/tpp-username-password type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - fields - secretPath type: object secret: description: |- Secret is a SecretSource step meant to be the first step. It retrieves secret values from a Kubernetes Secret, and passes them to the next step. properties: fields: description: |- The names of the fields we want to extract from the Kubernetes secret. These fields are passed to the next step in the chain. items: type: string type: array name: description: The name of the Kubernetes secret. type: string required: - fields - name type: object serviceAccountToken: description: |- ServiceAccountToken is a SecretSource step meant to be the first step. It uses the Kubernetes TokenRequest API to retrieve a token for a given service account, and passes it to the next step. properties: audiences: description: |- Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences. items: type: string type: array expirationSeconds: description: |- ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response. format: int64 type: integer name: description: The name of the Kubernetes service account. type: string required: - audiences - name type: object tppOAuth: description: |- TPPOAuth is a SecretSource step that authenticates to a TPP server. This step is meant to be the last step and requires a prior step that depends on the `authInputType`. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with TPP. The supported values are "UsernamePassword" and "JWT". enum: - UsernamePassword - JWT type: string clientId: description: ClientID is the clientId used to authenticate with TPP. type: string url: description: |- The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs https://tpp.example.com and https://tpp.example.com/vedsdk are equivalent. The ending `/vedsdk` is optional and is stripped out by our client. If not set, defaults to the URL defined at the top-level of the TPP configuration. type: string required: - authInputType type: object vcpOAuth: description: |- VCPOAuth is a SecretSource step that authenticates to the Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step that outputs a JWT token. properties: tenantID: description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS. type: string type: object type: object maxItems: 50 type: array x-kubernetes-list-type: atomic url: description: |- The URL to connect to the Certificate Manager, SaaS instance. If not set, the default value https://api.venafi.cloud is used. type: string type: object vcp: properties: accessToken: description: |- The list of steps to retrieve the Access Token that will be used to connect to Certificate Manager, SaaS. items: properties: hashicorpVaultLDAP: description: |- HashicorpVaultLDAP is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: ldapPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/ldap/static-cred/:role_name or /v1/ldap/creds/:role_name type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - ldapPath type: object hashicorpVaultOAuth: description: |- HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource step to provide an OAuth token, which this step uses to authenticate to Vault. The output of this step is a Vault token. This step allows you to use the step `HashicorpVaultSecret` afterwards. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with HashiCorp Vault. The only supported value is "OIDC". enum: - OIDC type: string authPath: description: |- The login URL used for obtaining the Vault token. Example: /v1/auth/oidc/login type: string clientId: description: 'Deprecated: This field does nothing and will be removed in the future.' type: string role: description: |- The role defined in Vault that we want to use when authenticating to Vault. type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - authInputType - authPath - role type: object hashicorpVaultSecret: description: |- HashicorpVaultSecret is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: fields: description: |- The fields are Vault keys pointing to the secrets passed to the next SecretSource step. Example 1 (TPP, username and password): imagining that you have stored the username and password for TPP under the keys "username" and "password", you will want to set this field to `["username", "password"]`. The username is expected to be given first, the password second. items: type: string type: array secretPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/secret/data/application-team-a/tpp-username-password type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - fields - secretPath type: object secret: description: |- Secret is a SecretSource step meant to be the first step. It retrieves secret values from a Kubernetes Secret, and passes them to the next step. properties: fields: description: |- The names of the fields we want to extract from the Kubernetes secret. These fields are passed to the next step in the chain. items: type: string type: array name: description: The name of the Kubernetes secret. type: string required: - fields - name type: object serviceAccountToken: description: |- ServiceAccountToken is a SecretSource step meant to be the first step. It uses the Kubernetes TokenRequest API to retrieve a token for a given service account, and passes it to the next step. properties: audiences: description: |- Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences. items: type: string type: array expirationSeconds: description: |- ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response. format: int64 type: integer name: description: The name of the Kubernetes service account. type: string required: - audiences - name type: object tppOAuth: description: |- TPPOAuth is a SecretSource step that authenticates to a TPP server. This step is meant to be the last step and requires a prior step that depends on the `authInputType`. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with TPP. The supported values are "UsernamePassword" and "JWT". enum: - UsernamePassword - JWT type: string clientId: description: ClientID is the clientId used to authenticate with TPP. type: string url: description: |- The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs https://tpp.example.com and https://tpp.example.com/vedsdk are equivalent. The ending `/vedsdk` is optional and is stripped out by our client. If not set, defaults to the URL defined at the top-level of the TPP configuration. type: string required: - authInputType type: object vcpOAuth: description: |- VCPOAuth is a SecretSource step that authenticates to the Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step that outputs a JWT token. properties: tenantID: description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS. type: string type: object type: object maxItems: 50 type: array x-kubernetes-list-type: atomic apiKey: description: |- The list of steps to retrieve the API key that will be used to connect to Certificate Manager, SaaS. items: properties: hashicorpVaultLDAP: description: |- HashicorpVaultLDAP is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: ldapPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/ldap/static-cred/:role_name or /v1/ldap/creds/:role_name type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - ldapPath type: object hashicorpVaultOAuth: description: |- HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource step to provide an OAuth token, which this step uses to authenticate to Vault. The output of this step is a Vault token. This step allows you to use the step `HashicorpVaultSecret` afterwards. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with HashiCorp Vault. The only supported value is "OIDC". enum: - OIDC type: string authPath: description: |- The login URL used for obtaining the Vault token. Example: /v1/auth/oidc/login type: string clientId: description: 'Deprecated: This field does nothing and will be removed in the future.' type: string role: description: |- The role defined in Vault that we want to use when authenticating to Vault. type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - authInputType - authPath - role type: object hashicorpVaultSecret: description: |- HashicorpVaultSecret is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: fields: description: |- The fields are Vault keys pointing to the secrets passed to the next SecretSource step. Example 1 (TPP, username and password): imagining that you have stored the username and password for TPP under the keys "username" and "password", you will want to set this field to `["username", "password"]`. The username is expected to be given first, the password second. items: type: string type: array secretPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/secret/data/application-team-a/tpp-username-password type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - fields - secretPath type: object secret: description: |- Secret is a SecretSource step meant to be the first step. It retrieves secret values from a Kubernetes Secret, and passes them to the next step. properties: fields: description: |- The names of the fields we want to extract from the Kubernetes secret. These fields are passed to the next step in the chain. items: type: string type: array name: description: The name of the Kubernetes secret. type: string required: - fields - name type: object serviceAccountToken: description: |- ServiceAccountToken is a SecretSource step meant to be the first step. It uses the Kubernetes TokenRequest API to retrieve a token for a given service account, and passes it to the next step. properties: audiences: description: |- Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences. items: type: string type: array expirationSeconds: description: |- ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response. format: int64 type: integer name: description: The name of the Kubernetes service account. type: string required: - audiences - name type: object tppOAuth: description: |- TPPOAuth is a SecretSource step that authenticates to a TPP server. This step is meant to be the last step and requires a prior step that depends on the `authInputType`. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with TPP. The supported values are "UsernamePassword" and "JWT". enum: - UsernamePassword - JWT type: string clientId: description: ClientID is the clientId used to authenticate with TPP. type: string url: description: |- The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs https://tpp.example.com and https://tpp.example.com/vedsdk are equivalent. The ending `/vedsdk` is optional and is stripped out by our client. If not set, defaults to the URL defined at the top-level of the TPP configuration. type: string required: - authInputType type: object vcpOAuth: description: |- VCPOAuth is a SecretSource step that authenticates to the Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step that outputs a JWT token. properties: tenantID: description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS. type: string type: object type: object maxItems: 50 type: array x-kubernetes-list-type: atomic url: description: |- The URL to connect to the Certificate Manager, SaaS instance. If not set, the default value https://api.venafi.cloud is used. type: string type: object type: object status: properties: conditions: description: List of status conditions to indicate the status of a VenafiConnection. items: description: ConnectionCondition contains condition information for a VenafiConnection. properties: lastTransitionTime: description: |- LastTransitionTime is the timestamp corresponding to the last status change of this condition. format: date-time type: string lastUpdateTime: description: lastUpdateTime is the time of the last update to this condition format: date-time type: string message: description: |- Message is a human readable description of the details of the last transition, complementing reason. type: string observedGeneration: description: |- If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Issuer. format: int64 type: integer reason: description: |- Reason is a brief machine readable explanation for the condition's last transition. type: string status: description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string tokenValidUntil: description: |- The ValidUntil time of the token used to authenticate with the Certificate Manager, SaaS. format: date-time type: string type: description: |- Type of the condition, should be a combination of the unique name of the operator and the type of condition. eg. `VenafiEnhancedIssuerReady` type: string required: - status - type type: object type: array x-kubernetes-list-map-keys: - type x-kubernetes-list-type: map type: object required: - metadata - spec type: object served: true storage: true subresources: status: {} {{ end }} {{ end }} ================================================ FILE: deploy/charts/venafi-kubernetes-agent/templates/venafi-connection-crd.yaml ================================================ {{/* DO NOT EDIT. Use 'make generate-crds-venconn' to regenerate. */}} {{- if .Values.crds.venafiConnection.include }} {{- if not (or (semverCompare "<1.25" .Capabilities.KubeVersion.GitVersion) .Values.crds.forceRemoveValidationAnnotations) }} apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: "venaficonnections.jetstack.io" {{- if .Values.crds.keep }} annotations: # This annotation prevents the CRD from being pruned by Helm when this chart # is deleted. helm.sh/resource-policy: keep {{- end }} labels: {{- include "venafi-connection.labels" . | nindent 4 }} spec: group: jetstack.io names: kind: VenafiConnection listKind: VenafiConnectionList plural: venaficonnections shortNames: - vc singular: venaficonnection scope: Namespaced versions: - name: v1alpha1 schema: openAPIV3Schema: description: VenafiConnection is the Schema for the VenafiConnection API properties: apiVersion: description: |- APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: description: |- Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: properties: allowReferencesFrom: description: |- A namespace selector that specifies what namespaces this VenafiConnection is allowed to be used from. If not set/ null, the VenafiConnection can only be used within its namespace. An empty selector ({}) matches all namespaces. If set to a non-empty selector, the VenafiConnection can only be used from namespaces that match the selector. This possibly excludes the namespace the VenafiConnection is in. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: description: |- A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: description: |- operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: description: |- values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array x-kubernetes-list-type: atomic required: - key - operator type: object type: array x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string description: |- matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic firefly: properties: accessToken: description: |- The list of steps to retrieve the Access Token that will be used to connect to Firefly. items: properties: hashicorpVaultLDAP: description: |- HashicorpVaultLDAP is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: ldapPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/ldap/static-cred/:role_name or /v1/ldap/creds/:role_name type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - ldapPath type: object hashicorpVaultOAuth: description: |- HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource step to provide an OAuth token, which this step uses to authenticate to Vault. The output of this step is a Vault token. This step allows you to use the step `HashicorpVaultSecret` afterwards. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with HashiCorp Vault. The only supported value is "OIDC". enum: - OIDC type: string authPath: description: |- The login URL used for obtaining the Vault token. Example: /v1/auth/oidc/login type: string clientId: description: 'Deprecated: This field does nothing and will be removed in the future.' type: string role: description: |- The role defined in Vault that we want to use when authenticating to Vault. type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - authInputType - authPath - role type: object hashicorpVaultSecret: description: |- HashicorpVaultSecret is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: fields: description: |- The fields are Vault keys pointing to the secrets passed to the next SecretSource step. Example 1 (TPP, username and password): imagining that you have stored the username and password for TPP under the keys "username" and "password", you will want to set this field to `["username", "password"]`. The username is expected to be given first, the password second. items: type: string type: array secretPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/secret/data/application-team-a/tpp-username-password type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - fields - secretPath type: object secret: description: |- Secret is a SecretSource step meant to be the first step. It retrieves secret values from a Kubernetes Secret, and passes them to the next step. properties: fields: description: |- The names of the fields we want to extract from the Kubernetes secret. These fields are passed to the next step in the chain. items: type: string type: array name: description: The name of the Kubernetes secret. type: string required: - fields - name type: object serviceAccountToken: description: |- ServiceAccountToken is a SecretSource step meant to be the first step. It uses the Kubernetes TokenRequest API to retrieve a token for a given service account, and passes it to the next step. properties: audiences: description: |- Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences. items: type: string type: array expirationSeconds: description: |- ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response. format: int64 type: integer name: description: The name of the Kubernetes service account. type: string required: - audiences - name type: object tppOAuth: description: |- TPPOAuth is a SecretSource step that authenticates to a TPP server. This step is meant to be the last step and requires a prior step that depends on the `authInputType`. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with TPP. The supported values are "UsernamePassword" and "JWT". enum: - UsernamePassword - JWT type: string clientId: description: ClientID is the clientId used to authenticate with TPP. type: string url: description: |- The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs https://tpp.example.com and https://tpp.example.com/vedsdk are equivalent. The ending `/vedsdk` is optional and is stripped out by our client. If not set, defaults to the URL defined at the top-level of the TPP configuration. type: string required: - authInputType type: object vcpOAuth: description: |- VCPOAuth is a SecretSource step that authenticates to the Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step that outputs a JWT token. properties: tenantID: description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS. type: string type: object type: object x-kubernetes-validations: - message: must have exactly one field set rule: '((has(self.secret) ? 1 : 0) + (has(self.serviceAccountToken) ? 1 : 0) + (has(self.hashicorpVaultOAuth) ? 1 : 0) + (has(self.hashicorpVaultSecret) ? 1 : 0) + (has(self.hashicorpVaultLDAP) ? 1 : 0) + (has(self.tppOAuth) ? 1 : 0) + (has(self.vcpOAuth) ? 1 : 0)) == 1' maxItems: 50 type: array x-kubernetes-list-type: atomic url: description: The URL to connect to the Workload Identity Manager instance. type: string required: - url type: object tpp: properties: accessToken: description: The list of steps to retrieve a TPP access token. items: properties: hashicorpVaultLDAP: description: |- HashicorpVaultLDAP is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: ldapPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/ldap/static-cred/:role_name or /v1/ldap/creds/:role_name type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - ldapPath type: object hashicorpVaultOAuth: description: |- HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource step to provide an OAuth token, which this step uses to authenticate to Vault. The output of this step is a Vault token. This step allows you to use the step `HashicorpVaultSecret` afterwards. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with HashiCorp Vault. The only supported value is "OIDC". enum: - OIDC type: string authPath: description: |- The login URL used for obtaining the Vault token. Example: /v1/auth/oidc/login type: string clientId: description: 'Deprecated: This field does nothing and will be removed in the future.' type: string role: description: |- The role defined in Vault that we want to use when authenticating to Vault. type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - authInputType - authPath - role type: object hashicorpVaultSecret: description: |- HashicorpVaultSecret is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: fields: description: |- The fields are Vault keys pointing to the secrets passed to the next SecretSource step. Example 1 (TPP, username and password): imagining that you have stored the username and password for TPP under the keys "username" and "password", you will want to set this field to `["username", "password"]`. The username is expected to be given first, the password second. items: type: string type: array secretPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/secret/data/application-team-a/tpp-username-password type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - fields - secretPath type: object secret: description: |- Secret is a SecretSource step meant to be the first step. It retrieves secret values from a Kubernetes Secret, and passes them to the next step. properties: fields: description: |- The names of the fields we want to extract from the Kubernetes secret. These fields are passed to the next step in the chain. items: type: string type: array name: description: The name of the Kubernetes secret. type: string required: - fields - name type: object serviceAccountToken: description: |- ServiceAccountToken is a SecretSource step meant to be the first step. It uses the Kubernetes TokenRequest API to retrieve a token for a given service account, and passes it to the next step. properties: audiences: description: |- Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences. items: type: string type: array expirationSeconds: description: |- ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response. format: int64 type: integer name: description: The name of the Kubernetes service account. type: string required: - audiences - name type: object tppOAuth: description: |- TPPOAuth is a SecretSource step that authenticates to a TPP server. This step is meant to be the last step and requires a prior step that depends on the `authInputType`. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with TPP. The supported values are "UsernamePassword" and "JWT". enum: - UsernamePassword - JWT type: string clientId: description: ClientID is the clientId used to authenticate with TPP. type: string url: description: |- The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs https://tpp.example.com and https://tpp.example.com/vedsdk are equivalent. The ending `/vedsdk` is optional and is stripped out by our client. If not set, defaults to the URL defined at the top-level of the TPP configuration. type: string required: - authInputType type: object vcpOAuth: description: |- VCPOAuth is a SecretSource step that authenticates to the Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step that outputs a JWT token. properties: tenantID: description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS. type: string type: object type: object x-kubernetes-validations: - message: must have exactly one field set rule: '((has(self.secret) ? 1 : 0) + (has(self.serviceAccountToken) ? 1 : 0) + (has(self.hashicorpVaultOAuth) ? 1 : 0) + (has(self.hashicorpVaultSecret) ? 1 : 0) + (has(self.hashicorpVaultLDAP) ? 1 : 0) + (has(self.tppOAuth) ? 1 : 0) + (has(self.vcpOAuth) ? 1 : 0)) == 1' maxItems: 50 type: array x-kubernetes-list-type: atomic url: description: |- The URL to connect to the Certificate Manager Self-Hosted instance. The two URLs https://tpp.example.com and https://tpp.example.com/vedsdk are equivalent. The ending `/vedsdk` is optional and is stripped out by venafi-connection-lib. type: string required: - url type: object vaas: description: 'Deprecated: The ''vaas'' field is deprecated use the field called ''vcp'' instead.' properties: accessToken: description: |- The list of steps to retrieve the Access Token that will be used to connect to Certificate Manager, SaaS. items: properties: hashicorpVaultLDAP: description: |- HashicorpVaultLDAP is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: ldapPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/ldap/static-cred/:role_name or /v1/ldap/creds/:role_name type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - ldapPath type: object hashicorpVaultOAuth: description: |- HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource step to provide an OAuth token, which this step uses to authenticate to Vault. The output of this step is a Vault token. This step allows you to use the step `HashicorpVaultSecret` afterwards. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with HashiCorp Vault. The only supported value is "OIDC". enum: - OIDC type: string authPath: description: |- The login URL used for obtaining the Vault token. Example: /v1/auth/oidc/login type: string clientId: description: 'Deprecated: This field does nothing and will be removed in the future.' type: string role: description: |- The role defined in Vault that we want to use when authenticating to Vault. type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - authInputType - authPath - role type: object hashicorpVaultSecret: description: |- HashicorpVaultSecret is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: fields: description: |- The fields are Vault keys pointing to the secrets passed to the next SecretSource step. Example 1 (TPP, username and password): imagining that you have stored the username and password for TPP under the keys "username" and "password", you will want to set this field to `["username", "password"]`. The username is expected to be given first, the password second. items: type: string type: array secretPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/secret/data/application-team-a/tpp-username-password type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - fields - secretPath type: object secret: description: |- Secret is a SecretSource step meant to be the first step. It retrieves secret values from a Kubernetes Secret, and passes them to the next step. properties: fields: description: |- The names of the fields we want to extract from the Kubernetes secret. These fields are passed to the next step in the chain. items: type: string type: array name: description: The name of the Kubernetes secret. type: string required: - fields - name type: object serviceAccountToken: description: |- ServiceAccountToken is a SecretSource step meant to be the first step. It uses the Kubernetes TokenRequest API to retrieve a token for a given service account, and passes it to the next step. properties: audiences: description: |- Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences. items: type: string type: array expirationSeconds: description: |- ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response. format: int64 type: integer name: description: The name of the Kubernetes service account. type: string required: - audiences - name type: object tppOAuth: description: |- TPPOAuth is a SecretSource step that authenticates to a TPP server. This step is meant to be the last step and requires a prior step that depends on the `authInputType`. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with TPP. The supported values are "UsernamePassword" and "JWT". enum: - UsernamePassword - JWT type: string clientId: description: ClientID is the clientId used to authenticate with TPP. type: string url: description: |- The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs https://tpp.example.com and https://tpp.example.com/vedsdk are equivalent. The ending `/vedsdk` is optional and is stripped out by our client. If not set, defaults to the URL defined at the top-level of the TPP configuration. type: string required: - authInputType type: object vcpOAuth: description: |- VCPOAuth is a SecretSource step that authenticates to the Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step that outputs a JWT token. properties: tenantID: description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS. type: string type: object type: object x-kubernetes-validations: - message: must have exactly one field set rule: '((has(self.secret) ? 1 : 0) + (has(self.serviceAccountToken) ? 1 : 0) + (has(self.hashicorpVaultOAuth) ? 1 : 0) + (has(self.hashicorpVaultSecret) ? 1 : 0) + (has(self.hashicorpVaultLDAP) ? 1 : 0) + (has(self.tppOAuth) ? 1 : 0) + (has(self.vcpOAuth) ? 1 : 0)) == 1' maxItems: 50 type: array x-kubernetes-list-type: atomic apiKey: description: |- The list of steps to retrieve the API key that will be used to connect to Certificate Manager, SaaS. items: properties: hashicorpVaultLDAP: description: |- HashicorpVaultLDAP is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: ldapPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/ldap/static-cred/:role_name or /v1/ldap/creds/:role_name type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - ldapPath type: object hashicorpVaultOAuth: description: |- HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource step to provide an OAuth token, which this step uses to authenticate to Vault. The output of this step is a Vault token. This step allows you to use the step `HashicorpVaultSecret` afterwards. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with HashiCorp Vault. The only supported value is "OIDC". enum: - OIDC type: string authPath: description: |- The login URL used for obtaining the Vault token. Example: /v1/auth/oidc/login type: string clientId: description: 'Deprecated: This field does nothing and will be removed in the future.' type: string role: description: |- The role defined in Vault that we want to use when authenticating to Vault. type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - authInputType - authPath - role type: object hashicorpVaultSecret: description: |- HashicorpVaultSecret is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: fields: description: |- The fields are Vault keys pointing to the secrets passed to the next SecretSource step. Example 1 (TPP, username and password): imagining that you have stored the username and password for TPP under the keys "username" and "password", you will want to set this field to `["username", "password"]`. The username is expected to be given first, the password second. items: type: string type: array secretPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/secret/data/application-team-a/tpp-username-password type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - fields - secretPath type: object secret: description: |- Secret is a SecretSource step meant to be the first step. It retrieves secret values from a Kubernetes Secret, and passes them to the next step. properties: fields: description: |- The names of the fields we want to extract from the Kubernetes secret. These fields are passed to the next step in the chain. items: type: string type: array name: description: The name of the Kubernetes secret. type: string required: - fields - name type: object serviceAccountToken: description: |- ServiceAccountToken is a SecretSource step meant to be the first step. It uses the Kubernetes TokenRequest API to retrieve a token for a given service account, and passes it to the next step. properties: audiences: description: |- Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences. items: type: string type: array expirationSeconds: description: |- ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response. format: int64 type: integer name: description: The name of the Kubernetes service account. type: string required: - audiences - name type: object tppOAuth: description: |- TPPOAuth is a SecretSource step that authenticates to a TPP server. This step is meant to be the last step and requires a prior step that depends on the `authInputType`. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with TPP. The supported values are "UsernamePassword" and "JWT". enum: - UsernamePassword - JWT type: string clientId: description: ClientID is the clientId used to authenticate with TPP. type: string url: description: |- The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs https://tpp.example.com and https://tpp.example.com/vedsdk are equivalent. The ending `/vedsdk` is optional and is stripped out by our client. If not set, defaults to the URL defined at the top-level of the TPP configuration. type: string required: - authInputType type: object vcpOAuth: description: |- VCPOAuth is a SecretSource step that authenticates to the Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step that outputs a JWT token. properties: tenantID: description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS. type: string type: object type: object x-kubernetes-validations: - message: must have exactly one field set rule: '((has(self.secret) ? 1 : 0) + (has(self.serviceAccountToken) ? 1 : 0) + (has(self.hashicorpVaultOAuth) ? 1 : 0) + (has(self.hashicorpVaultSecret) ? 1 : 0) + (has(self.hashicorpVaultLDAP) ? 1 : 0) + (has(self.tppOAuth) ? 1 : 0) + (has(self.vcpOAuth) ? 1 : 0)) == 1' maxItems: 50 type: array x-kubernetes-list-type: atomic url: description: |- The URL to connect to the Certificate Manager, SaaS instance. If not set, the default value https://api.venafi.cloud is used. type: string type: object x-kubernetes-validations: - message: 'must have exactly ONE of the following fields set: apiKey or accessToken' rule: '(has(self.apiKey) ? 1 : 0) + (has(self.accessToken) ? 1 : 0) == 1' vcp: properties: accessToken: description: |- The list of steps to retrieve the Access Token that will be used to connect to Certificate Manager, SaaS. items: properties: hashicorpVaultLDAP: description: |- HashicorpVaultLDAP is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: ldapPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/ldap/static-cred/:role_name or /v1/ldap/creds/:role_name type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - ldapPath type: object hashicorpVaultOAuth: description: |- HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource step to provide an OAuth token, which this step uses to authenticate to Vault. The output of this step is a Vault token. This step allows you to use the step `HashicorpVaultSecret` afterwards. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with HashiCorp Vault. The only supported value is "OIDC". enum: - OIDC type: string authPath: description: |- The login URL used for obtaining the Vault token. Example: /v1/auth/oidc/login type: string clientId: description: 'Deprecated: This field does nothing and will be removed in the future.' type: string role: description: |- The role defined in Vault that we want to use when authenticating to Vault. type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - authInputType - authPath - role type: object hashicorpVaultSecret: description: |- HashicorpVaultSecret is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: fields: description: |- The fields are Vault keys pointing to the secrets passed to the next SecretSource step. Example 1 (TPP, username and password): imagining that you have stored the username and password for TPP under the keys "username" and "password", you will want to set this field to `["username", "password"]`. The username is expected to be given first, the password second. items: type: string type: array secretPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/secret/data/application-team-a/tpp-username-password type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - fields - secretPath type: object secret: description: |- Secret is a SecretSource step meant to be the first step. It retrieves secret values from a Kubernetes Secret, and passes them to the next step. properties: fields: description: |- The names of the fields we want to extract from the Kubernetes secret. These fields are passed to the next step in the chain. items: type: string type: array name: description: The name of the Kubernetes secret. type: string required: - fields - name type: object serviceAccountToken: description: |- ServiceAccountToken is a SecretSource step meant to be the first step. It uses the Kubernetes TokenRequest API to retrieve a token for a given service account, and passes it to the next step. properties: audiences: description: |- Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences. items: type: string type: array expirationSeconds: description: |- ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response. format: int64 type: integer name: description: The name of the Kubernetes service account. type: string required: - audiences - name type: object tppOAuth: description: |- TPPOAuth is a SecretSource step that authenticates to a TPP server. This step is meant to be the last step and requires a prior step that depends on the `authInputType`. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with TPP. The supported values are "UsernamePassword" and "JWT". enum: - UsernamePassword - JWT type: string clientId: description: ClientID is the clientId used to authenticate with TPP. type: string url: description: |- The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs https://tpp.example.com and https://tpp.example.com/vedsdk are equivalent. The ending `/vedsdk` is optional and is stripped out by our client. If not set, defaults to the URL defined at the top-level of the TPP configuration. type: string required: - authInputType type: object vcpOAuth: description: |- VCPOAuth is a SecretSource step that authenticates to the Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step that outputs a JWT token. properties: tenantID: description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS. type: string type: object type: object x-kubernetes-validations: - message: must have exactly one field set rule: '((has(self.secret) ? 1 : 0) + (has(self.serviceAccountToken) ? 1 : 0) + (has(self.hashicorpVaultOAuth) ? 1 : 0) + (has(self.hashicorpVaultSecret) ? 1 : 0) + (has(self.hashicorpVaultLDAP) ? 1 : 0) + (has(self.tppOAuth) ? 1 : 0) + (has(self.vcpOAuth) ? 1 : 0)) == 1' maxItems: 50 type: array x-kubernetes-list-type: atomic apiKey: description: |- The list of steps to retrieve the API key that will be used to connect to Certificate Manager, SaaS. items: properties: hashicorpVaultLDAP: description: |- HashicorpVaultLDAP is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: ldapPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/ldap/static-cred/:role_name or /v1/ldap/creds/:role_name type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - ldapPath type: object hashicorpVaultOAuth: description: |- HashicorpVaultOAuth is a SecretSource that relies on a prior SecretSource step to provide an OAuth token, which this step uses to authenticate to Vault. The output of this step is a Vault token. This step allows you to use the step `HashicorpVaultSecret` afterwards. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with HashiCorp Vault. The only supported value is "OIDC". enum: - OIDC type: string authPath: description: |- The login URL used for obtaining the Vault token. Example: /v1/auth/oidc/login type: string clientId: description: 'Deprecated: This field does nothing and will be removed in the future.' type: string role: description: |- The role defined in Vault that we want to use when authenticating to Vault. type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - authInputType - authPath - role type: object hashicorpVaultSecret: description: |- HashicorpVaultSecret is a SecretSource step that requires a Vault token in the previous step, either using a step `HashicorpVaultOAuth` or `Secret`. It then fetches the requested secrets from Vault for use in the next step. properties: fields: description: |- The fields are Vault keys pointing to the secrets passed to the next SecretSource step. Example 1 (TPP, username and password): imagining that you have stored the username and password for TPP under the keys "username" and "password", you will want to set this field to `["username", "password"]`. The username is expected to be given first, the password second. items: type: string type: array secretPath: description: |- The full HTTP path to the secret in Vault. Example: /v1/secret/data/application-team-a/tpp-username-password type: string url: description: The URL to connect to your HashiCorp Vault instance. type: string required: - fields - secretPath type: object secret: description: |- Secret is a SecretSource step meant to be the first step. It retrieves secret values from a Kubernetes Secret, and passes them to the next step. properties: fields: description: |- The names of the fields we want to extract from the Kubernetes secret. These fields are passed to the next step in the chain. items: type: string type: array name: description: The name of the Kubernetes secret. type: string required: - fields - name type: object serviceAccountToken: description: |- ServiceAccountToken is a SecretSource step meant to be the first step. It uses the Kubernetes TokenRequest API to retrieve a token for a given service account, and passes it to the next step. properties: audiences: description: |- Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences. items: type: string type: array expirationSeconds: description: |- ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response. format: int64 type: integer name: description: The name of the Kubernetes service account. type: string required: - audiences - name type: object tppOAuth: description: |- TPPOAuth is a SecretSource step that authenticates to a TPP server. This step is meant to be the last step and requires a prior step that depends on the `authInputType`. properties: authInputType: description: |- AuthInputType is the authentication method to be used to authenticate with TPP. The supported values are "UsernamePassword" and "JWT". enum: - UsernamePassword - JWT type: string clientId: description: ClientID is the clientId used to authenticate with TPP. type: string url: description: |- The URL to connect to the Certificate Manager, Self-Hosted instance. The two URLs https://tpp.example.com and https://tpp.example.com/vedsdk are equivalent. The ending `/vedsdk` is optional and is stripped out by our client. If not set, defaults to the URL defined at the top-level of the TPP configuration. type: string required: - authInputType type: object vcpOAuth: description: |- VCPOAuth is a SecretSource step that authenticates to the Certificate Manager, SaaS. This step is meant to be the last step and requires a prior step that outputs a JWT token. properties: tenantID: description: TenantID is the tenant ID used to authenticate with Certificate Manager, SaaS. type: string type: object type: object x-kubernetes-validations: - message: must have exactly one field set rule: '((has(self.secret) ? 1 : 0) + (has(self.serviceAccountToken) ? 1 : 0) + (has(self.hashicorpVaultOAuth) ? 1 : 0) + (has(self.hashicorpVaultSecret) ? 1 : 0) + (has(self.hashicorpVaultLDAP) ? 1 : 0) + (has(self.tppOAuth) ? 1 : 0) + (has(self.vcpOAuth) ? 1 : 0)) == 1' maxItems: 50 type: array x-kubernetes-list-type: atomic url: description: |- The URL to connect to the Certificate Manager, SaaS instance. If not set, the default value https://api.venafi.cloud is used. type: string type: object x-kubernetes-validations: - message: 'must have exactly ONE of the following fields set: apiKey or accessToken' rule: '(has(self.apiKey) ? 1 : 0) + (has(self.accessToken) ? 1 : 0) == 1' type: object x-kubernetes-validations: - message: 'must have exactly ONE of the following fields set: tpp or vcp' rule: '(has(self.tpp) ? 1 : 0) + (has(self.vaas) ? 1 : 0) + (has(self.vcp) ? 1 : 0) + (has(self.firefly) ? 1 : 0) == 1' status: properties: conditions: description: List of status conditions to indicate the status of a VenafiConnection. items: description: ConnectionCondition contains condition information for a VenafiConnection. properties: lastTransitionTime: description: |- LastTransitionTime is the timestamp corresponding to the last status change of this condition. format: date-time type: string lastUpdateTime: description: lastUpdateTime is the time of the last update to this condition format: date-time type: string message: description: |- Message is a human readable description of the details of the last transition, complementing reason. type: string observedGeneration: description: |- If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Issuer. format: int64 type: integer reason: description: |- Reason is a brief machine readable explanation for the condition's last transition. type: string status: description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string tokenValidUntil: description: |- The ValidUntil time of the token used to authenticate with the Certificate Manager, SaaS. format: date-time type: string type: description: |- Type of the condition, should be a combination of the unique name of the operator and the type of condition. eg. `VenafiEnhancedIssuerReady` type: string required: - status - type type: object type: array x-kubernetes-list-map-keys: - type x-kubernetes-list-type: map type: object required: - metadata - spec type: object served: true storage: true subresources: status: {} {{ end }} {{ end }} ================================================ FILE: deploy/charts/venafi-kubernetes-agent/templates/venafi-connection-rbac.yaml ================================================ {{- if .Values.crds.venafiConnection.include }} # The 'venafi-connection' service account is used by multiple # controllers. When configuring which resources a VenafiConnection # can access, the RBAC rules you create manually must point to this SA. apiVersion: v1 kind: ServiceAccount metadata: name: venafi-connection namespace: {{ $.Release.Namespace | quote }} labels: {{- include "venafi-connection.labels" $ | nindent 4 }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: venafi-connection-role labels: {{- include "venafi-connection.labels" $ | nindent 4 }} rules: - apiGroups: [ "" ] resources: [ "namespaces" ] verbs: [ "get", "list", "watch" ] - apiGroups: [ "jetstack.io" ] resources: [ "venaficonnections" ] verbs: [ "get", "list", "watch" ] - apiGroups: [ "jetstack.io" ] resources: [ "venaficonnections/status" ] verbs: [ "get", "patch" ] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: venafi-connection-rolebinding labels: {{- include "venafi-connection.labels" $ | nindent 4 }} roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: venafi-connection-role subjects: - kind: ServiceAccount name: venafi-connection namespace: {{ $.Release.Namespace | quote }} {{- end }} ================================================ FILE: deploy/charts/venafi-kubernetes-agent/templates/venafi-rbac.yaml ================================================ {{- if .Values.authentication.venafiConnection.enabled }} apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: venafi-kubernetes-agent-impersonate-role namespace: {{ $.Release.Namespace | quote }} labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} rules: - apiGroups: [ "" ] resources: [ "serviceaccounts" ] verbs: [ "impersonate" ] resourceNames: [ "venafi-connection" ] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: venafi-kubernetes-agent-impersonate-rolebinding namespace: {{ $.Release.Namespace | quote }} labels: {{- include "venafi-kubernetes-agent.labels" . | nindent 4 }} roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: venafi-kubernetes-agent-impersonate-role subjects: - kind: ServiceAccount name: {{ include "venafi-kubernetes-agent.serviceAccountName" . }} namespace: {{ $.Release.Namespace | quote }} {{- end }} ================================================ FILE: deploy/charts/venafi-kubernetes-agent/tests/__snapshot__/configmap_test.yaml.snap ================================================ custom-cluster-description: 1: | raw: | - Check the credentials Secret exists: "agent-credentials" > kubectl get secret -n test-ns agent-credentials - Check the application is running: > kubectl get pods -n test-ns -l app.kubernetes.io/instance=test - Check the application logs for successful connection to the platform: > kubectl logs -n test-ns -l app.kubernetes.io/instance=test 2: | apiVersion: v1 data: config.yaml: |- cluster_name: "" cluster_description: "A cloud hosted Kubernetes cluster hosting production workloads.\n\nteam: team-1\nemail: team-1@example.com\npurpose: Production workloads\n" server: "https://api.venafi.cloud/" period: "0h1m0s" venafi-cloud: uploader_id: "no" upload_path: "/v1/tlspk/upload/clusterdata" data-gatherers: # gather k8s apiserver version information - kind: "k8s-discovery" name: "k8s-discovery" # pods data is used in the pods and application_versions packages - kind: "k8s-dynamic" name: "k8s/pods" config: resource-type: resource: pods version: v1 - kind: "k8s-dynamic" name: "k8s/namespaces" config: resource-type: resource: namespaces version: v1 # gather services for pod readiness probe rules - kind: "k8s-dynamic" name: "k8s/services" config: resource-type: resource: services version: v1 # gather higher level resources to ensure data to determine ownership is present - kind: "k8s-dynamic" name: "k8s/deployments" config: resource-type: version: v1 resource: deployments group: apps - kind: "k8s-dynamic" name: "k8s/statefulsets" config: resource-type: version: v1 resource: statefulsets group: apps - kind: "k8s-dynamic" name: "k8s/daemonsets" config: resource-type: version: v1 resource: daemonsets group: apps - kind: "k8s-dynamic" name: "k8s/jobs" config: resource-type: version: v1 resource: jobs group: batch - kind: "k8s-dynamic" name: "k8s/cronjobs" config: resource-type: version: v1 resource: cronjobs group: batch - kind: "k8s-dynamic" name: "k8s/ingresses" config: resource-type: group: networking.k8s.io version: v1 resource: ingresses - kind: "k8s-dynamic" name: "k8s/secrets" config: resource-type: version: v1 resource: secrets field-selectors: - type!=kubernetes.io/service-account-token - type!=kubernetes.io/dockercfg - type!=kubernetes.io/dockerconfigjson - type!=kubernetes.io/basic-auth - type!=kubernetes.io/ssh-auth - type!=bootstrap.kubernetes.io/token - type!=helm.sh/release.v1 - kind: "k8s-dynamic" name: "k8s/certificates" config: resource-type: group: cert-manager.io version: v1 resource: certificates - kind: "k8s-dynamic" name: "k8s/certificaterequests" config: resource-type: group: cert-manager.io version: v1 resource: certificaterequests - kind: "k8s-dynamic" name: "k8s/issuers" config: resource-type: group: cert-manager.io version: v1 resource: issuers - kind: "k8s-dynamic" name: "k8s/clusterissuers" config: resource-type: group: cert-manager.io version: v1 resource: clusterissuers - kind: "k8s-dynamic" name: "k8s/googlecasissuers" config: resource-type: group: cas-issuer.jetstack.io version: v1beta1 resource: googlecasissuers - kind: "k8s-dynamic" name: "k8s/googlecasclusterissuers" config: resource-type: group: cas-issuer.jetstack.io version: v1beta1 resource: googlecasclusterissuers - kind: "k8s-dynamic" name: "k8s/awspcaissuer" config: resource-type: group: awspca.cert-manager.io version: v1beta1 resource: awspcaissuers - kind: "k8s-dynamic" name: "k8s/awspcaclusterissuers" config: resource-type: group: awspca.cert-manager.io version: v1beta1 resource: awspcaclusterissuers - kind: "k8s-dynamic" name: "k8s/mutatingwebhookconfigurations" config: resource-type: group: admissionregistration.k8s.io version: v1 resource: mutatingwebhookconfigurations - kind: "k8s-dynamic" name: "k8s/validatingwebhookconfigurations" config: resource-type: group: admissionregistration.k8s.io version: v1 resource: validatingwebhookconfigurations - kind: "k8s-dynamic" name: "k8s/gateways" config: resource-type: group: networking.istio.io version: v1alpha3 resource: gateways - kind: "k8s-dynamic" name: "k8s/virtualservices" config: resource-type: group: networking.istio.io version: v1alpha3 resource: virtualservices - kind: "k8s-dynamic" name: "k8s/routes" config: resource-type: version: v1 group: route.openshift.io resource: routes - kind: "k8s-dynamic" name: "k8s/venaficonnections" config: resource-type: group: jetstack.io version: v1alpha1 resource: venaficonnections - kind: "k8s-dynamic" name: "k8s/venaficlusterissuers" config: resource-type: group: jetstack.io version: v1alpha1 resource: venaficlusterissuers - kind: "k8s-dynamic" name: "k8s/venafiissuers" config: resource-type: group: jetstack.io version: v1alpha1 resource: venafiissuers - kind: "k8s-dynamic" name: "k8s/fireflyissuers" config: resource-type: group: firefly.venafi.com version: v1 resource: issuers - kind: "k8s-dynamic" name: "k8s/stepissuers" config: resource-type: group: certmanager.step.sm version: v1beta1 resource: stepissuers - kind: "k8s-dynamic" name: "k8s/stepclusterissuers" config: resource-type: group: certmanager.step.sm version: v1beta1 resource: stepclusterissuers - kind: "k8s-dynamic" name: "k8s/originissuers" config: resource-type: group: cert-manager.k8s.cloudflare.com version: v1 resource: originissuers - kind: "k8s-dynamic" name: "k8s/clusteroriginissuers" config: resource-type: group: cert-manager.k8s.cloudflare.com version: v1 resource: clusteroriginissuers - kind: "k8s-dynamic" name: "k8s/freeipaissuers" config: resource-type: group: certmanager.freeipa.org version: v1beta1 resource: issuers - kind: "k8s-dynamic" name: "k8s/freeipaclusterissuers" config: resource-type: group: certmanager.freeipa.org version: v1beta1 resource: clusterissuers - kind: "k8s-dynamic" name: "k8s/ejbcaissuers" config: resource-type: group: ejbca-issuer.keyfactor.com version: v1alpha1 resource: issuers - kind: "k8s-dynamic" name: "k8s/ejbcaclusterissuers" config: resource-type: group: ejbca-issuer.keyfactor.com version: v1alpha1 resource: clusterissuers kind: ConfigMap metadata: labels: app.kubernetes.io/instance: test app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: venafi-kubernetes-agent app.kubernetes.io/version: v0.0.0 helm.sh/chart: venafi-kubernetes-agent-0.0.0 name: agent-config namespace: test-ns custom-cluster-name: 1: | raw: | - Check the credentials Secret exists: "agent-credentials" > kubectl get secret -n test-ns agent-credentials - Check the application is running: > kubectl get pods -n test-ns -l app.kubernetes.io/instance=test - Check the application logs for successful connection to the platform: > kubectl logs -n test-ns -l app.kubernetes.io/instance=test 2: | apiVersion: v1 data: config.yaml: |- cluster_name: "cluster-1 region-1 cloud-1 " cluster_description: "" server: "https://api.venafi.cloud/" period: "0h1m0s" venafi-cloud: uploader_id: "no" upload_path: "/v1/tlspk/upload/clusterdata" data-gatherers: # gather k8s apiserver version information - kind: "k8s-discovery" name: "k8s-discovery" # pods data is used in the pods and application_versions packages - kind: "k8s-dynamic" name: "k8s/pods" config: resource-type: resource: pods version: v1 - kind: "k8s-dynamic" name: "k8s/namespaces" config: resource-type: resource: namespaces version: v1 # gather services for pod readiness probe rules - kind: "k8s-dynamic" name: "k8s/services" config: resource-type: resource: services version: v1 # gather higher level resources to ensure data to determine ownership is present - kind: "k8s-dynamic" name: "k8s/deployments" config: resource-type: version: v1 resource: deployments group: apps - kind: "k8s-dynamic" name: "k8s/statefulsets" config: resource-type: version: v1 resource: statefulsets group: apps - kind: "k8s-dynamic" name: "k8s/daemonsets" config: resource-type: version: v1 resource: daemonsets group: apps - kind: "k8s-dynamic" name: "k8s/jobs" config: resource-type: version: v1 resource: jobs group: batch - kind: "k8s-dynamic" name: "k8s/cronjobs" config: resource-type: version: v1 resource: cronjobs group: batch - kind: "k8s-dynamic" name: "k8s/ingresses" config: resource-type: group: networking.k8s.io version: v1 resource: ingresses - kind: "k8s-dynamic" name: "k8s/secrets" config: resource-type: version: v1 resource: secrets field-selectors: - type!=kubernetes.io/service-account-token - type!=kubernetes.io/dockercfg - type!=kubernetes.io/dockerconfigjson - type!=kubernetes.io/basic-auth - type!=kubernetes.io/ssh-auth - type!=bootstrap.kubernetes.io/token - type!=helm.sh/release.v1 - kind: "k8s-dynamic" name: "k8s/certificates" config: resource-type: group: cert-manager.io version: v1 resource: certificates - kind: "k8s-dynamic" name: "k8s/certificaterequests" config: resource-type: group: cert-manager.io version: v1 resource: certificaterequests - kind: "k8s-dynamic" name: "k8s/issuers" config: resource-type: group: cert-manager.io version: v1 resource: issuers - kind: "k8s-dynamic" name: "k8s/clusterissuers" config: resource-type: group: cert-manager.io version: v1 resource: clusterissuers - kind: "k8s-dynamic" name: "k8s/googlecasissuers" config: resource-type: group: cas-issuer.jetstack.io version: v1beta1 resource: googlecasissuers - kind: "k8s-dynamic" name: "k8s/googlecasclusterissuers" config: resource-type: group: cas-issuer.jetstack.io version: v1beta1 resource: googlecasclusterissuers - kind: "k8s-dynamic" name: "k8s/awspcaissuer" config: resource-type: group: awspca.cert-manager.io version: v1beta1 resource: awspcaissuers - kind: "k8s-dynamic" name: "k8s/awspcaclusterissuers" config: resource-type: group: awspca.cert-manager.io version: v1beta1 resource: awspcaclusterissuers - kind: "k8s-dynamic" name: "k8s/mutatingwebhookconfigurations" config: resource-type: group: admissionregistration.k8s.io version: v1 resource: mutatingwebhookconfigurations - kind: "k8s-dynamic" name: "k8s/validatingwebhookconfigurations" config: resource-type: group: admissionregistration.k8s.io version: v1 resource: validatingwebhookconfigurations - kind: "k8s-dynamic" name: "k8s/gateways" config: resource-type: group: networking.istio.io version: v1alpha3 resource: gateways - kind: "k8s-dynamic" name: "k8s/virtualservices" config: resource-type: group: networking.istio.io version: v1alpha3 resource: virtualservices - kind: "k8s-dynamic" name: "k8s/routes" config: resource-type: version: v1 group: route.openshift.io resource: routes - kind: "k8s-dynamic" name: "k8s/venaficonnections" config: resource-type: group: jetstack.io version: v1alpha1 resource: venaficonnections - kind: "k8s-dynamic" name: "k8s/venaficlusterissuers" config: resource-type: group: jetstack.io version: v1alpha1 resource: venaficlusterissuers - kind: "k8s-dynamic" name: "k8s/venafiissuers" config: resource-type: group: jetstack.io version: v1alpha1 resource: venafiissuers - kind: "k8s-dynamic" name: "k8s/fireflyissuers" config: resource-type: group: firefly.venafi.com version: v1 resource: issuers - kind: "k8s-dynamic" name: "k8s/stepissuers" config: resource-type: group: certmanager.step.sm version: v1beta1 resource: stepissuers - kind: "k8s-dynamic" name: "k8s/stepclusterissuers" config: resource-type: group: certmanager.step.sm version: v1beta1 resource: stepclusterissuers - kind: "k8s-dynamic" name: "k8s/originissuers" config: resource-type: group: cert-manager.k8s.cloudflare.com version: v1 resource: originissuers - kind: "k8s-dynamic" name: "k8s/clusteroriginissuers" config: resource-type: group: cert-manager.k8s.cloudflare.com version: v1 resource: clusteroriginissuers - kind: "k8s-dynamic" name: "k8s/freeipaissuers" config: resource-type: group: certmanager.freeipa.org version: v1beta1 resource: issuers - kind: "k8s-dynamic" name: "k8s/freeipaclusterissuers" config: resource-type: group: certmanager.freeipa.org version: v1beta1 resource: clusterissuers - kind: "k8s-dynamic" name: "k8s/ejbcaissuers" config: resource-type: group: ejbca-issuer.keyfactor.com version: v1alpha1 resource: issuers - kind: "k8s-dynamic" name: "k8s/ejbcaclusterissuers" config: resource-type: group: ejbca-issuer.keyfactor.com version: v1alpha1 resource: clusterissuers kind: ConfigMap metadata: labels: app.kubernetes.io/instance: test app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: venafi-kubernetes-agent app.kubernetes.io/version: v0.0.0 helm.sh/chart: venafi-kubernetes-agent-0.0.0 name: agent-config namespace: test-ns custom-configmap: 1: | | You are using a custom configuration in the following ConfigMap: "agent-custom-config". DEPRECATION: The `cluster_id` configuration field is deprecated. If your configuration contains `cluster_id`, it will continue to work as a fallback, but please migrate to `cluster_name` to avoid ambiguity. - Check the credentials Secret exists: "agent-credentials" > kubectl get secret -n test-ns agent-credentials - Check the application is running: > kubectl get pods -n test-ns -l app.kubernetes.io/instance=test - Check the application logs for successful connection to the platform: > kubectl logs -n test-ns -l app.kubernetes.io/instance=test custom-period: 1: | raw: | - Check the credentials Secret exists: "agent-credentials" > kubectl get secret -n test-ns agent-credentials - Check the application is running: > kubectl get pods -n test-ns -l app.kubernetes.io/instance=test - Check the application logs for successful connection to the platform: > kubectl logs -n test-ns -l app.kubernetes.io/instance=test 2: | apiVersion: v1 data: config.yaml: |- cluster_name: "" cluster_description: "" server: "https://api.venafi.cloud/" period: "1m" venafi-cloud: uploader_id: "no" upload_path: "/v1/tlspk/upload/clusterdata" data-gatherers: # gather k8s apiserver version information - kind: "k8s-discovery" name: "k8s-discovery" # pods data is used in the pods and application_versions packages - kind: "k8s-dynamic" name: "k8s/pods" config: resource-type: resource: pods version: v1 - kind: "k8s-dynamic" name: "k8s/namespaces" config: resource-type: resource: namespaces version: v1 # gather services for pod readiness probe rules - kind: "k8s-dynamic" name: "k8s/services" config: resource-type: resource: services version: v1 # gather higher level resources to ensure data to determine ownership is present - kind: "k8s-dynamic" name: "k8s/deployments" config: resource-type: version: v1 resource: deployments group: apps - kind: "k8s-dynamic" name: "k8s/statefulsets" config: resource-type: version: v1 resource: statefulsets group: apps - kind: "k8s-dynamic" name: "k8s/daemonsets" config: resource-type: version: v1 resource: daemonsets group: apps - kind: "k8s-dynamic" name: "k8s/jobs" config: resource-type: version: v1 resource: jobs group: batch - kind: "k8s-dynamic" name: "k8s/cronjobs" config: resource-type: version: v1 resource: cronjobs group: batch - kind: "k8s-dynamic" name: "k8s/ingresses" config: resource-type: group: networking.k8s.io version: v1 resource: ingresses - kind: "k8s-dynamic" name: "k8s/secrets" config: resource-type: version: v1 resource: secrets field-selectors: - type!=kubernetes.io/service-account-token - type!=kubernetes.io/dockercfg - type!=kubernetes.io/dockerconfigjson - type!=kubernetes.io/basic-auth - type!=kubernetes.io/ssh-auth - type!=bootstrap.kubernetes.io/token - type!=helm.sh/release.v1 - kind: "k8s-dynamic" name: "k8s/certificates" config: resource-type: group: cert-manager.io version: v1 resource: certificates - kind: "k8s-dynamic" name: "k8s/certificaterequests" config: resource-type: group: cert-manager.io version: v1 resource: certificaterequests - kind: "k8s-dynamic" name: "k8s/issuers" config: resource-type: group: cert-manager.io version: v1 resource: issuers - kind: "k8s-dynamic" name: "k8s/clusterissuers" config: resource-type: group: cert-manager.io version: v1 resource: clusterissuers - kind: "k8s-dynamic" name: "k8s/googlecasissuers" config: resource-type: group: cas-issuer.jetstack.io version: v1beta1 resource: googlecasissuers - kind: "k8s-dynamic" name: "k8s/googlecasclusterissuers" config: resource-type: group: cas-issuer.jetstack.io version: v1beta1 resource: googlecasclusterissuers - kind: "k8s-dynamic" name: "k8s/awspcaissuer" config: resource-type: group: awspca.cert-manager.io version: v1beta1 resource: awspcaissuers - kind: "k8s-dynamic" name: "k8s/awspcaclusterissuers" config: resource-type: group: awspca.cert-manager.io version: v1beta1 resource: awspcaclusterissuers - kind: "k8s-dynamic" name: "k8s/mutatingwebhookconfigurations" config: resource-type: group: admissionregistration.k8s.io version: v1 resource: mutatingwebhookconfigurations - kind: "k8s-dynamic" name: "k8s/validatingwebhookconfigurations" config: resource-type: group: admissionregistration.k8s.io version: v1 resource: validatingwebhookconfigurations - kind: "k8s-dynamic" name: "k8s/gateways" config: resource-type: group: networking.istio.io version: v1alpha3 resource: gateways - kind: "k8s-dynamic" name: "k8s/virtualservices" config: resource-type: group: networking.istio.io version: v1alpha3 resource: virtualservices - kind: "k8s-dynamic" name: "k8s/routes" config: resource-type: version: v1 group: route.openshift.io resource: routes - kind: "k8s-dynamic" name: "k8s/venaficonnections" config: resource-type: group: jetstack.io version: v1alpha1 resource: venaficonnections - kind: "k8s-dynamic" name: "k8s/venaficlusterissuers" config: resource-type: group: jetstack.io version: v1alpha1 resource: venaficlusterissuers - kind: "k8s-dynamic" name: "k8s/venafiissuers" config: resource-type: group: jetstack.io version: v1alpha1 resource: venafiissuers - kind: "k8s-dynamic" name: "k8s/fireflyissuers" config: resource-type: group: firefly.venafi.com version: v1 resource: issuers - kind: "k8s-dynamic" name: "k8s/stepissuers" config: resource-type: group: certmanager.step.sm version: v1beta1 resource: stepissuers - kind: "k8s-dynamic" name: "k8s/stepclusterissuers" config: resource-type: group: certmanager.step.sm version: v1beta1 resource: stepclusterissuers - kind: "k8s-dynamic" name: "k8s/originissuers" config: resource-type: group: cert-manager.k8s.cloudflare.com version: v1 resource: originissuers - kind: "k8s-dynamic" name: "k8s/clusteroriginissuers" config: resource-type: group: cert-manager.k8s.cloudflare.com version: v1 resource: clusteroriginissuers - kind: "k8s-dynamic" name: "k8s/freeipaissuers" config: resource-type: group: certmanager.freeipa.org version: v1beta1 resource: issuers - kind: "k8s-dynamic" name: "k8s/freeipaclusterissuers" config: resource-type: group: certmanager.freeipa.org version: v1beta1 resource: clusterissuers - kind: "k8s-dynamic" name: "k8s/ejbcaissuers" config: resource-type: group: ejbca-issuer.keyfactor.com version: v1alpha1 resource: issuers - kind: "k8s-dynamic" name: "k8s/ejbcaclusterissuers" config: resource-type: group: ejbca-issuer.keyfactor.com version: v1alpha1 resource: clusterissuers kind: ConfigMap metadata: labels: app.kubernetes.io/instance: test app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: venafi-kubernetes-agent app.kubernetes.io/version: v0.0.0 helm.sh/chart: venafi-kubernetes-agent-0.0.0 name: agent-config namespace: test-ns defaults: 1: | raw: | - Check the credentials Secret exists: "agent-credentials" > kubectl get secret -n test-ns agent-credentials - Check the application is running: > kubectl get pods -n test-ns -l app.kubernetes.io/instance=test - Check the application logs for successful connection to the platform: > kubectl logs -n test-ns -l app.kubernetes.io/instance=test 2: | apiVersion: v1 data: config.yaml: |- cluster_name: "" cluster_description: "" server: "https://api.venafi.cloud/" period: "0h1m0s" venafi-cloud: uploader_id: "no" upload_path: "/v1/tlspk/upload/clusterdata" data-gatherers: # gather k8s apiserver version information - kind: "k8s-discovery" name: "k8s-discovery" # pods data is used in the pods and application_versions packages - kind: "k8s-dynamic" name: "k8s/pods" config: resource-type: resource: pods version: v1 - kind: "k8s-dynamic" name: "k8s/namespaces" config: resource-type: resource: namespaces version: v1 # gather services for pod readiness probe rules - kind: "k8s-dynamic" name: "k8s/services" config: resource-type: resource: services version: v1 # gather higher level resources to ensure data to determine ownership is present - kind: "k8s-dynamic" name: "k8s/deployments" config: resource-type: version: v1 resource: deployments group: apps - kind: "k8s-dynamic" name: "k8s/statefulsets" config: resource-type: version: v1 resource: statefulsets group: apps - kind: "k8s-dynamic" name: "k8s/daemonsets" config: resource-type: version: v1 resource: daemonsets group: apps - kind: "k8s-dynamic" name: "k8s/jobs" config: resource-type: version: v1 resource: jobs group: batch - kind: "k8s-dynamic" name: "k8s/cronjobs" config: resource-type: version: v1 resource: cronjobs group: batch - kind: "k8s-dynamic" name: "k8s/ingresses" config: resource-type: group: networking.k8s.io version: v1 resource: ingresses - kind: "k8s-dynamic" name: "k8s/secrets" config: resource-type: version: v1 resource: secrets field-selectors: - type!=kubernetes.io/service-account-token - type!=kubernetes.io/dockercfg - type!=kubernetes.io/dockerconfigjson - type!=kubernetes.io/basic-auth - type!=kubernetes.io/ssh-auth - type!=bootstrap.kubernetes.io/token - type!=helm.sh/release.v1 - kind: "k8s-dynamic" name: "k8s/certificates" config: resource-type: group: cert-manager.io version: v1 resource: certificates - kind: "k8s-dynamic" name: "k8s/certificaterequests" config: resource-type: group: cert-manager.io version: v1 resource: certificaterequests - kind: "k8s-dynamic" name: "k8s/issuers" config: resource-type: group: cert-manager.io version: v1 resource: issuers - kind: "k8s-dynamic" name: "k8s/clusterissuers" config: resource-type: group: cert-manager.io version: v1 resource: clusterissuers - kind: "k8s-dynamic" name: "k8s/googlecasissuers" config: resource-type: group: cas-issuer.jetstack.io version: v1beta1 resource: googlecasissuers - kind: "k8s-dynamic" name: "k8s/googlecasclusterissuers" config: resource-type: group: cas-issuer.jetstack.io version: v1beta1 resource: googlecasclusterissuers - kind: "k8s-dynamic" name: "k8s/awspcaissuer" config: resource-type: group: awspca.cert-manager.io version: v1beta1 resource: awspcaissuers - kind: "k8s-dynamic" name: "k8s/awspcaclusterissuers" config: resource-type: group: awspca.cert-manager.io version: v1beta1 resource: awspcaclusterissuers - kind: "k8s-dynamic" name: "k8s/mutatingwebhookconfigurations" config: resource-type: group: admissionregistration.k8s.io version: v1 resource: mutatingwebhookconfigurations - kind: "k8s-dynamic" name: "k8s/validatingwebhookconfigurations" config: resource-type: group: admissionregistration.k8s.io version: v1 resource: validatingwebhookconfigurations - kind: "k8s-dynamic" name: "k8s/gateways" config: resource-type: group: networking.istio.io version: v1alpha3 resource: gateways - kind: "k8s-dynamic" name: "k8s/virtualservices" config: resource-type: group: networking.istio.io version: v1alpha3 resource: virtualservices - kind: "k8s-dynamic" name: "k8s/routes" config: resource-type: version: v1 group: route.openshift.io resource: routes - kind: "k8s-dynamic" name: "k8s/venaficonnections" config: resource-type: group: jetstack.io version: v1alpha1 resource: venaficonnections - kind: "k8s-dynamic" name: "k8s/venaficlusterissuers" config: resource-type: group: jetstack.io version: v1alpha1 resource: venaficlusterissuers - kind: "k8s-dynamic" name: "k8s/venafiissuers" config: resource-type: group: jetstack.io version: v1alpha1 resource: venafiissuers - kind: "k8s-dynamic" name: "k8s/fireflyissuers" config: resource-type: group: firefly.venafi.com version: v1 resource: issuers - kind: "k8s-dynamic" name: "k8s/stepissuers" config: resource-type: group: certmanager.step.sm version: v1beta1 resource: stepissuers - kind: "k8s-dynamic" name: "k8s/stepclusterissuers" config: resource-type: group: certmanager.step.sm version: v1beta1 resource: stepclusterissuers - kind: "k8s-dynamic" name: "k8s/originissuers" config: resource-type: group: cert-manager.k8s.cloudflare.com version: v1 resource: originissuers - kind: "k8s-dynamic" name: "k8s/clusteroriginissuers" config: resource-type: group: cert-manager.k8s.cloudflare.com version: v1 resource: clusteroriginissuers - kind: "k8s-dynamic" name: "k8s/freeipaissuers" config: resource-type: group: certmanager.freeipa.org version: v1beta1 resource: issuers - kind: "k8s-dynamic" name: "k8s/freeipaclusterissuers" config: resource-type: group: certmanager.freeipa.org version: v1beta1 resource: clusterissuers - kind: "k8s-dynamic" name: "k8s/ejbcaissuers" config: resource-type: group: ejbca-issuer.keyfactor.com version: v1alpha1 resource: issuers - kind: "k8s-dynamic" name: "k8s/ejbcaclusterissuers" config: resource-type: group: ejbca-issuer.keyfactor.com version: v1alpha1 resource: clusterissuers kind: ConfigMap metadata: labels: app.kubernetes.io/instance: test app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: venafi-kubernetes-agent app.kubernetes.io/version: v0.0.0 helm.sh/chart: venafi-kubernetes-agent-0.0.0 name: agent-config namespace: test-ns ================================================ FILE: deploy/charts/venafi-kubernetes-agent/tests/configmap_test.yaml ================================================ suite: test the contents of the config.yaml templates: - configmap.yaml - NOTES.txt release: name: test namespace: test-ns tests: - it: defaults asserts: - matchSnapshot: {} - it: custom-period set: config.period: 1m asserts: - matchSnapshot: {} - it: custom-cluster-name set: config.clusterName: "cluster-1 region-1 cloud-1 " asserts: - matchSnapshot: {} - it: custom-cluster-description set: config.clusterDescription: | A cloud hosted Kubernetes cluster hosting production workloads. team: team-1 email: team-1@example.com purpose: Production workloads asserts: - matchSnapshot: {} - it: custom-configmap set: config: configmap: name: agent-custom-config asserts: - matchSnapshotRaw: {} ================================================ FILE: deploy/charts/venafi-kubernetes-agent/tests/deployment_test.yaml ================================================ suite: test deployment templates: - deployment.yaml tests: # Basic checks on deployment - it: templates as expected set: image.tag: latest config.clientId: "00000000-0000-0000-0000-000000000000" template: deployment.yaml asserts: - isKind: of: Deployment # Validate name matches - matchRegex: path: metadata.name pattern: ^venafi-kubernetes-agent-* # Check is latest is set as tag that it uses that tag - equal: path: spec.template.spec.containers[0].image value: registry.venafi.cloud/venafi-agent/venafi-agent:latest # Check naming works with nameOverride - it: Deployment name is set when nameOverride is used set: nameOverride: example template: deployment.yaml asserts: - isKind: of: Deployment - matchRegex: path: metadata.name pattern: ^example-RELEASE-NAME$ # Check similar with fullnameOverride - it: Deployment name is set when fullnameOverride is used set: config.clientId: "00000000-0000-0000-0000-000000000000" fullnameOverride: example template: deployment.yaml asserts: - isKind: of: Deployment - equal: path: metadata.name value: example # Checking extraArgs are passed - it: Extra Args passed in a valid format when supplied set: config.clientId: "00000000-0000-0000-0000-000000000000" extraArgs: ["--strict", "--one-shot"] template: deployment.yaml asserts: - isKind: of: Deployment - contains: path: spec.template.spec.containers[0].args content: --strict - contains: path: spec.template.spec.containers[0].args content: --one-shot # Check command is present when configured - it: Command passes to deployment manifest set: config.clientId: "00000000-0000-0000-0000-000000000000" command: ["notpreflight"] template: deployment.yaml asserts: - isKind: of: Deployment - contains: path: spec.template.spec.containers[0].command content: notpreflight # Check the volumes and volumeMounts works correctly - it: Volumes and VolumeMounts added correctly values: - ./values/custom-volumes.yaml asserts: - isKind: of: Deployment - equal: # In template this comes after credentials and agent config volumeMounts path: spec.template.spec.containers[0].volumeMounts[?(@.name == "cabundle")] value: name: cabundle mountPath: /etc/ssl/certs/ca-certificates.crt subPath: ca-certificates.crt readOnly: true - equal: path: spec.template.spec.volumes[?(@.name == "cabundle")].configMap value: name: cabundle optional: false defaultMode: 0644 items: - key: cabundle path: ca-certificates.crt # Check proxy settings are additive not overriding and set to correct values. # Values from our documentation: https://docs.cyberark.com/mis-saas/vaas/k8s-components/c-vcp-network-requirements/#modifying-network-settings-for-kubernetes - it: All environment variables present when all proxy settings are supplied set: http_proxy: "http://:" https_proxy: "https://:" no_proxy: "127.0.0.1,localhost,kubernetes.default.svc,kubernetes.default.svc.cluster.local" template: deployment.yaml asserts: - isKind: of: Deployment - lengthEqual : path: spec.template.spec.containers[0].env count: 7 - equal: path: spec.template.spec.containers[0].env[?(@.name == "NO_PROXY")].value value: "127.0.0.1,localhost,kubernetes.default.svc,kubernetes.default.svc.cluster.local" - equal: path: spec.template.spec.containers[0].env[?(@.name == "HTTPS_PROXY")].value value: "https://:" - equal: path: spec.template.spec.containers[0].env[?(@.name == "HTTP_PROXY")].value value: "http://:" # Check no proxy settings are set when no proxy settings are provided - it: Only default environment variables are set when no proxy settings are provided template: deployment.yaml asserts: - isKind: of: Deployment - lengthEqual : path: spec.template.spec.containers[0].env count: 4 ================================================ FILE: deploy/charts/venafi-kubernetes-agent/tests/values/custom-volumes.yaml ================================================ volumes: - name: cabundle configMap: name: cabundle optional: false defaultMode: 0644 items: - key: cabundle path: ca-certificates.crt volumeMounts: - name: cabundle mountPath: /etc/ssl/certs/ca-certificates.crt subPath: ca-certificates.crt readOnly: true ================================================ FILE: deploy/charts/venafi-kubernetes-agent/values.linter.exceptions ================================================ ================================================ FILE: deploy/charts/venafi-kubernetes-agent/values.schema.json ================================================ { "$defs": { "helm-values": { "additionalProperties": false, "properties": { "affinity": { "$ref": "#/$defs/helm-values.affinity" }, "authentication": { "$ref": "#/$defs/helm-values.authentication" }, "command": { "$ref": "#/$defs/helm-values.command" }, "config": { "$ref": "#/$defs/helm-values.config" }, "crds": { "$ref": "#/$defs/helm-values.crds" }, "extraArgs": { "$ref": "#/$defs/helm-values.extraArgs" }, "fullnameOverride": { "$ref": "#/$defs/helm-values.fullnameOverride" }, "global": { "$ref": "#/$defs/helm-values.global" }, "http_proxy": { "$ref": "#/$defs/helm-values.http_proxy" }, "https_proxy": { "$ref": "#/$defs/helm-values.https_proxy" }, "image": { "$ref": "#/$defs/helm-values.image" }, "imageNamespace": { "$ref": "#/$defs/helm-values.imageNamespace" }, "imagePullSecrets": { "$ref": "#/$defs/helm-values.imagePullSecrets" }, "imageRegistry": { "$ref": "#/$defs/helm-values.imageRegistry" }, "metrics": { "$ref": "#/$defs/helm-values.metrics" }, "nameOverride": { "$ref": "#/$defs/helm-values.nameOverride" }, "no_proxy": { "$ref": "#/$defs/helm-values.no_proxy" }, "nodeSelector": { "$ref": "#/$defs/helm-values.nodeSelector" }, "podAnnotations": { "$ref": "#/$defs/helm-values.podAnnotations" }, "podDisruptionBudget": { "$ref": "#/$defs/helm-values.podDisruptionBudget" }, "podSecurityContext": { "$ref": "#/$defs/helm-values.podSecurityContext" }, "replicaCount": { "$ref": "#/$defs/helm-values.replicaCount" }, "resources": { "$ref": "#/$defs/helm-values.resources" }, "securityContext": { "$ref": "#/$defs/helm-values.securityContext" }, "serviceAccount": { "$ref": "#/$defs/helm-values.serviceAccount" }, "tolerations": { "$ref": "#/$defs/helm-values.tolerations" }, "volumeMounts": { "$ref": "#/$defs/helm-values.volumeMounts" }, "volumes": { "$ref": "#/$defs/helm-values.volumes" } }, "type": "object" }, "helm-values.affinity": { "default": {}, "description": "Embed YAML for Node affinity settings, see\nhttps://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/.", "type": "object" }, "helm-values.authentication": { "additionalProperties": false, "properties": { "secretKey": { "$ref": "#/$defs/helm-values.authentication.secretKey" }, "secretName": { "$ref": "#/$defs/helm-values.authentication.secretName" }, "venafiConnection": { "$ref": "#/$defs/helm-values.authentication.venafiConnection" } }, "type": "object" }, "helm-values.authentication.secretKey": { "default": "privatekey.pem", "description": "Key name in the referenced secret", "type": "string" }, "helm-values.authentication.secretName": { "default": "agent-credentials", "description": "Name of the secret containing the private key", "type": "string" }, "helm-values.authentication.venafiConnection": { "additionalProperties": false, "properties": { "enabled": { "$ref": "#/$defs/helm-values.authentication.venafiConnection.enabled" }, "name": { "$ref": "#/$defs/helm-values.authentication.venafiConnection.name" }, "namespace": { "$ref": "#/$defs/helm-values.authentication.venafiConnection.namespace" } }, "type": "object" }, "helm-values.authentication.venafiConnection.enabled": { "default": false, "description": "When set to true, the Discovery Agent will authenticate to CyberArk Certificate Manager using the configuration in a VenafiConnection resource. Use `venafiConnection.enabled=true` for [secretless authentication](https://docs.cyberark.com/mis-saas/vaas/k8s-components/t-install-tlspk-agent/). When set to true, the `authentication.secret` values will be ignored and the. Secret with `authentication.secretName` will _not_ be mounted into the\nDiscovery Agent Pod.", "type": "boolean" }, "helm-values.authentication.venafiConnection.name": { "default": "venafi-components", "description": "The name of a VenafiConnection resource which contains the configuration for authenticating to Venafi.", "type": "string" }, "helm-values.authentication.venafiConnection.namespace": { "default": "venafi", "description": "The namespace of a VenafiConnection resource which contains the configuration for authenticating to Venafi.", "type": "string" }, "helm-values.command": { "default": [], "description": "Specify the command to run overriding default binary.", "items": {}, "type": "array" }, "helm-values.config": { "additionalProperties": false, "properties": { "clientId": { "$ref": "#/$defs/helm-values.config.clientId" }, "clusterDescription": { "$ref": "#/$defs/helm-values.config.clusterDescription" }, "clusterName": { "$ref": "#/$defs/helm-values.config.clusterName" }, "configmap": { "$ref": "#/$defs/helm-values.config.configmap" }, "excludeAnnotationKeysRegex": { "$ref": "#/$defs/helm-values.config.excludeAnnotationKeysRegex" }, "excludeLabelKeysRegex": { "$ref": "#/$defs/helm-values.config.excludeLabelKeysRegex" }, "ignoredSecretTypes": { "$ref": "#/$defs/helm-values.config.ignoredSecretTypes" }, "period": { "$ref": "#/$defs/helm-values.config.period" }, "server": { "$ref": "#/$defs/helm-values.config.server" } }, "type": "object" }, "helm-values.config.clientId": { "default": "", "description": "The client-id to be used for authenticating with the Venafi Control. Plane. Only useful when using a Key Pair Service Account in the Venafi. Control Plane. You can obtain the cliend ID by creating a Key Pair Service\nAccount in the CyberArk Certificate Manager.", "type": "string" }, "helm-values.config.clusterDescription": { "default": "", "description": "Description for the cluster resource if it needs to be created in Venafi\nControl Plane.", "type": "string" }, "helm-values.config.clusterName": { "default": "", "description": "Name for the cluster resource if it needs to be created in Venafi Control\nPlane.", "type": "string" }, "helm-values.config.configmap": { "additionalProperties": false, "properties": { "key": { "$ref": "#/$defs/helm-values.config.configmap.key" }, "name": { "$ref": "#/$defs/helm-values.config.configmap.name" } }, "type": "object" }, "helm-values.config.configmap.key": {}, "helm-values.config.configmap.name": {}, "helm-values.config.excludeAnnotationKeysRegex": { "default": [], "description": "You can configure Discovery Agent to exclude some annotations or labels from being pushed to the CyberArk Certificate Manager. All Kubernetes objects are affected. The objects are still pushed, but the specified annotations and labels are removed before being sent to the CyberArk Certificate Manager.\n\nDots is the only character that needs to be escaped in the regex. Use either double quotes with escaped single quotes or unquoted strings for the regex to avoid YAML parsing issues with `\\.`.\n\nExample: excludeAnnotationKeysRegex: ['^kapp\\.k14s\\.io/original.*']", "items": {}, "type": "array" }, "helm-values.config.excludeLabelKeysRegex": { "default": [], "items": {}, "type": "array" }, "helm-values.config.ignoredSecretTypes": { "items": { "$ref": "#/$defs/helm-values.config.ignoredSecretTypes[0]" }, "type": "array" }, "helm-values.config.ignoredSecretTypes[0]": { "default": "kubernetes.io/service-account-token", "type": "string" }, "helm-values.config.ignoredSecretTypes[1]": { "default": "kubernetes.io/dockercfg", "type": "string" }, "helm-values.config.ignoredSecretTypes[2]": { "default": "kubernetes.io/dockerconfigjson", "type": "string" }, "helm-values.config.ignoredSecretTypes[3]": { "default": "kubernetes.io/basic-auth", "type": "string" }, "helm-values.config.ignoredSecretTypes[4]": { "default": "kubernetes.io/ssh-auth", "type": "string" }, "helm-values.config.ignoredSecretTypes[5]": { "default": "bootstrap.kubernetes.io/token", "type": "string" }, "helm-values.config.ignoredSecretTypes[6]": { "default": "helm.sh/release.v1", "type": "string" }, "helm-values.config.period": { "default": "0h1m0s", "description": "Send data back to the platform every minute unless changed.", "type": "string" }, "helm-values.config.server": { "default": "https://api.venafi.cloud/", "description": "API URL of the CyberArk Certificate Manager API. For EU tenants, set this value to https://api.venafi.eu/. If you are using the VenafiConnection authentication method, you must set the API URL using the field `spec.vcp.url` on the\nVenafiConnection resource instead.", "type": "string" }, "helm-values.crds": { "additionalProperties": false, "properties": { "forceRemoveValidationAnnotations": { "$ref": "#/$defs/helm-values.crds.forceRemoveValidationAnnotations" }, "keep": { "$ref": "#/$defs/helm-values.crds.keep" }, "venafiConnection": { "$ref": "#/$defs/helm-values.crds.venafiConnection" } }, "type": "object" }, "helm-values.crds.forceRemoveValidationAnnotations": { "default": false, "description": "The 'x-kubernetes-validations' annotation is not supported in Kubernetes 1.22 and below. This annotation is used by CEL, which is a feature introduced in Kubernetes 1.25 that improves how validation is performed. This option allows to force the 'x-kubernetes-validations' annotation to be excluded, even on Kubernetes 1.25+ clusters.", "type": "boolean" }, "helm-values.crds.keep": { "default": false, "description": "This option makes it so that the \"helm.sh/resource-policy\": keep annotation is added to the CRD. This will prevent Helm from uninstalling the CRD when the Helm release is uninstalled.", "type": "boolean" }, "helm-values.crds.venafiConnection": { "additionalProperties": false, "properties": { "include": { "$ref": "#/$defs/helm-values.crds.venafiConnection.include" } }, "type": "object" }, "helm-values.crds.venafiConnection.include": { "default": false, "description": "When set to false, the rendered output does not contain the. VenafiConnection CRDs and RBAC. This is useful for when the. Venafi Connection resources are already installed separately.", "type": "boolean" }, "helm-values.extraArgs": { "default": [], "description": "Specify additional arguments to pass to the agent binary. For example, to enable JSON logging use `--logging-format`, or to increase the logging verbosity use `--log-level`.\nThe log levels are: 0=Info, 1=Debug, 2=Trace.\nUse 6-9 for increasingly verbose HTTP request logging.\nThe default log level is 0.\n\nExample:\nextraArgs:\n- --logging-format=json\n- --log-level=6 # To enable HTTP request logging", "items": {}, "type": "array" }, "helm-values.fullnameOverride": { "default": "", "description": "Helm default setting, use this to shorten the full install name.", "type": "string" }, "helm-values.global": { "description": "Global values shared across all (sub)charts" }, "helm-values.http_proxy": { "description": "Configures the HTTP_PROXY environment variable where a HTTP proxy is required.", "type": "string" }, "helm-values.https_proxy": { "description": "Configures the HTTPS_PROXY environment variable where a HTTP proxy is required.", "type": "string" }, "helm-values.image": { "additionalProperties": false, "properties": { "digest": { "$ref": "#/$defs/helm-values.image.digest" }, "name": { "$ref": "#/$defs/helm-values.image.name" }, "pullPolicy": { "$ref": "#/$defs/helm-values.image.pullPolicy" }, "registry": { "$ref": "#/$defs/helm-values.image.registry" }, "repository": { "$ref": "#/$defs/helm-values.image.repository" }, "tag": { "$ref": "#/$defs/helm-values.image.tag" } }, "type": "object" }, "helm-values.image.digest": { "default": "", "description": "Override the image digest to deploy by setting this variable. If set together with `image.tag`, the rendered image will include both tag and digest.", "type": "string" }, "helm-values.image.name": { "default": "venafi-agent", "description": "The image name for the Discovery Agent.\nThis is used (together with `imageRegistry` and `imageNamespace`) to construct the full image reference.", "type": "string" }, "helm-values.image.pullPolicy": { "default": "IfNotPresent", "description": "Kubernetes imagePullPolicy on Deployment.", "type": "string" }, "helm-values.image.registry": { "description": "Deprecated: per-component registry prefix.\n\nIf set, this value is *prepended* to the image repository that the chart would otherwise render. This applies both when `image.repository` is set and when the repository is computed from\n`imageRegistry` + `imageNamespace` + `image.name`.\n\nThis can produce \"double registry\" style references such as\n`legacy.example.io/registry.venafi.cloud/venafi-agent/...`. Prefer using the global\n`imageRegistry`/`imageNamespace` values.", "type": "string" }, "helm-values.image.repository": { "default": "", "description": "Full repository override (takes precedence over `imageRegistry`, `imageNamespace`, and `image.name`). Example: registry.venafi.cloud/venafi-agent/venafi-agent", "type": "string" }, "helm-values.image.tag": { "default": "", "description": "Override the image tag to deploy by setting this variable. If no value is set, the chart's appVersion is used.", "type": "string" }, "helm-values.imageNamespace": { "default": "venafi-agent", "description": "The repository namespace used for venafi-kubernetes-agent images by default.\nExamples:\n- venafi-agent\n- custom-namespace", "type": "string" }, "helm-values.imagePullSecrets": { "default": [], "description": "Specify image pull credentials if using a private registry. Example:\n - name: my-pull-secret", "items": {}, "type": "array" }, "helm-values.imageRegistry": { "default": "registry.venafi.cloud", "description": "The container registry used for venafi-kubernetes-agent images by default. This can include path prefixes (e.g. \"artifactory.example.com/docker\").", "type": "string" }, "helm-values.metrics": { "additionalProperties": false, "properties": { "enabled": { "$ref": "#/$defs/helm-values.metrics.enabled" }, "podmonitor": { "$ref": "#/$defs/helm-values.metrics.podmonitor" } }, "type": "object" }, "helm-values.metrics.enabled": { "default": true, "description": "Enable the metrics server.\nIf false, the metrics server will be disabled and the other metrics fields below will be ignored.", "type": "boolean" }, "helm-values.metrics.podmonitor": { "additionalProperties": false, "properties": { "annotations": { "$ref": "#/$defs/helm-values.metrics.podmonitor.annotations" }, "enabled": { "$ref": "#/$defs/helm-values.metrics.podmonitor.enabled" }, "endpointAdditionalProperties": { "$ref": "#/$defs/helm-values.metrics.podmonitor.endpointAdditionalProperties" }, "honorLabels": { "$ref": "#/$defs/helm-values.metrics.podmonitor.honorLabels" }, "interval": { "$ref": "#/$defs/helm-values.metrics.podmonitor.interval" }, "labels": { "$ref": "#/$defs/helm-values.metrics.podmonitor.labels" }, "namespace": { "$ref": "#/$defs/helm-values.metrics.podmonitor.namespace" }, "prometheusInstance": { "$ref": "#/$defs/helm-values.metrics.podmonitor.prometheusInstance" }, "scrapeTimeout": { "$ref": "#/$defs/helm-values.metrics.podmonitor.scrapeTimeout" } }, "type": "object" }, "helm-values.metrics.podmonitor.annotations": { "default": {}, "description": "Additional annotations to add to the PodMonitor.", "type": "object" }, "helm-values.metrics.podmonitor.enabled": { "default": false, "description": "Create a PodMonitor to add the metrics to Prometheus, if you are using Prometheus Operator. See https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor", "type": "boolean" }, "helm-values.metrics.podmonitor.endpointAdditionalProperties": { "default": {}, "description": "EndpointAdditionalProperties allows setting additional properties on the endpoint such as relabelings, metricRelabelings etc.\n\nFor example:\nendpointAdditionalProperties:\n relabelings:\n - action: replace\n sourceLabels:\n - __meta_kubernetes_pod_node_name\n targetLabel: instance", "type": "object" }, "helm-values.metrics.podmonitor.honorLabels": { "default": false, "description": "Keep labels from scraped data, overriding server-side labels.", "type": "boolean" }, "helm-values.metrics.podmonitor.interval": { "default": "60s", "description": "The interval to scrape metrics.", "type": "string" }, "helm-values.metrics.podmonitor.labels": { "default": {}, "description": "Additional labels to add to the PodMonitor.", "type": "object" }, "helm-values.metrics.podmonitor.namespace": { "description": "The namespace that the pod monitor should live in. Defaults to the venafi-kubernetes-agent namespace.", "type": "string" }, "helm-values.metrics.podmonitor.prometheusInstance": { "default": "default", "description": "Specifies the `prometheus` label on the created PodMonitor. This is used when different Prometheus instances have label selectors matching different PodMonitors.", "type": "string" }, "helm-values.metrics.podmonitor.scrapeTimeout": { "default": "30s", "description": "The timeout before a metrics scrape fails.", "type": "string" }, "helm-values.nameOverride": { "default": "", "description": "Helm default setting to override release name, usually leave blank.", "type": "string" }, "helm-values.no_proxy": { "description": "Configures the NO_PROXY environment variable where a HTTP proxy is required, but certain domains should be excluded.", "type": "string" }, "helm-values.nodeSelector": { "default": {}, "description": "Embed YAML for nodeSelector settings, see\nhttps://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/", "type": "object" }, "helm-values.podAnnotations": { "default": {}, "description": "Additional YAML annotations to add the the pod.", "type": "object" }, "helm-values.podDisruptionBudget": { "additionalProperties": false, "properties": { "enabled": { "$ref": "#/$defs/helm-values.podDisruptionBudget.enabled" }, "maxUnavailable": { "$ref": "#/$defs/helm-values.podDisruptionBudget.maxUnavailable" }, "minAvailable": { "$ref": "#/$defs/helm-values.podDisruptionBudget.minAvailable" } }, "type": "object" }, "helm-values.podDisruptionBudget.enabled": { "default": false, "description": "Enable or disable the PodDisruptionBudget resource, which helps prevent downtime during voluntary disruptions such as during a Node upgrade.", "type": "boolean" }, "helm-values.podDisruptionBudget.maxUnavailable": { "description": "Configure the maximum unavailable pods for disruptions. Can either be set to an integer (e.g. 1) or a percentage value (e.g. 25%).\nCannot be used if `minAvailable` is set.", "type": "number" }, "helm-values.podDisruptionBudget.minAvailable": { "description": "Configure the minimum available pods for disruptions. Can either be set to an integer (e.g. 1) or a percentage value (e.g. 25%).\nCannot be used if `maxUnavailable` is set.", "type": "number" }, "helm-values.podSecurityContext": { "default": {}, "description": "Optional Pod (all containers) `SecurityContext` options, see https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod.\n\nExample:\n\n podSecurityContext\nrunAsUser: 1000\nrunAsGroup: 3000\nfsGroup: 2000", "type": "object" }, "helm-values.replicaCount": { "default": 1, "description": "default replicas, do not scale up", "type": "number" }, "helm-values.resources": { "default": { "limits": { "memory": "500Mi" }, "requests": { "cpu": "200m", "memory": "200Mi" } }, "description": "Set resource requests and limits for the pod.\n\nRead [Venafi Kubernetes components deployment best practices](https://docs.cyberark.com/mis-saas/vaas/k8s-components/c-k8s-components-best-practice/#scaling) to learn how to choose suitable CPU and memory resource requests and limits.", "type": "object" }, "helm-values.securityContext": { "default": { "allowPrivilegeEscalation": false, "capabilities": { "drop": [ "ALL" ] }, "readOnlyRootFilesystem": true, "runAsNonRoot": true, "seccompProfile": { "type": "RuntimeDefault" } }, "description": "Add Container specific SecurityContext settings to the container. Takes precedence over `podSecurityContext` when set. See https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container", "type": "object" }, "helm-values.serviceAccount": { "additionalProperties": false, "properties": { "annotations": { "$ref": "#/$defs/helm-values.serviceAccount.annotations" }, "create": { "$ref": "#/$defs/helm-values.serviceAccount.create" }, "name": { "$ref": "#/$defs/helm-values.serviceAccount.name" } }, "type": "object" }, "helm-values.serviceAccount.annotations": { "default": {}, "description": "Annotations YAML to add to the service account.", "type": "object" }, "helm-values.serviceAccount.create": { "default": true, "description": "Specifies whether a service account should be created.", "type": "boolean" }, "helm-values.serviceAccount.name": { "default": "", "description": "The name of the service account to use. If blank and `serviceAccount.create` is true, a name is generated using the fullname template of the release.", "type": "string" }, "helm-values.tolerations": { "default": [], "description": "Embed YAML for toleration settings, see\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/", "items": {}, "type": "array" }, "helm-values.volumeMounts": { "default": [], "description": "Additional volume mounts to add to the Discovery Agent container. This is useful for mounting a custom CA bundle. Any PEM certificate mounted under /etc/ssl/certs will be loaded by the Discovery Agent. For\nexample:\n\nvolumeMounts:\n - name: cabundle\n mountPath: /etc/ssl/certs/cabundle\n subPath: cabundle\n readOnly: true", "items": {}, "type": "array" }, "helm-values.volumes": { "default": [], "description": "Additional volumes to add to the Discovery Agent container. This is useful for mounting a custom CA bundle. For example:\nvolumes:\n - name: cabundle\n configMap:\n name: cabundle\n optional: false\n defaultMode: 0644\nIn order to create the ConfigMap, you can use the following command:\n\n kubectl create configmap cabundle \\\n --from-file=cabundle=./your/custom/ca/bundle.pem", "items": {}, "type": "array" } }, "$ref": "#/$defs/helm-values", "$schema": "http://json-schema.org/draft-07/schema#" } ================================================ FILE: deploy/charts/venafi-kubernetes-agent/values.yaml ================================================ # Default values for jetstack-agent. # This is a YAML-formatted file. # Declare variables to be passed into your templates. metrics: # Enable the metrics server. # If false, the metrics server will be disabled and the other metrics fields below will be ignored. enabled: true podmonitor: # Create a PodMonitor to add the metrics to Prometheus, if you are using Prometheus Operator. # See https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor enabled: false # The namespace that the pod monitor should live in. # Defaults to the venafi-kubernetes-agent namespace. # +docs:property # namespace: venafi # Specifies the `prometheus` label on the created PodMonitor. # This is used when different Prometheus instances have label selectors # matching different PodMonitors. prometheusInstance: default # The interval to scrape metrics. interval: 60s # The timeout before a metrics scrape fails. scrapeTimeout: 30s # Additional labels to add to the PodMonitor. labels: {} # Additional annotations to add to the PodMonitor. annotations: {} # Keep labels from scraped data, overriding server-side labels. honorLabels: false # EndpointAdditionalProperties allows setting additional properties on the endpoint such as relabelings, metricRelabelings etc. # # For example: # endpointAdditionalProperties: # relabelings: # - action: replace # sourceLabels: # - __meta_kubernetes_pod_node_name # targetLabel: instance endpointAdditionalProperties: {} # default replicas, do not scale up replicaCount: 1 # The container registry used for venafi-kubernetes-agent images by default. # This can include path prefixes (e.g. "artifactory.example.com/docker"). # +docs:property imageRegistry: registry.venafi.cloud # The repository namespace used for venafi-kubernetes-agent images by default. # Examples: # - venafi-agent # - custom-namespace # +docs:property imageNamespace: venafi-agent image: # Deprecated: per-component registry prefix. # # If set, this value is *prepended* to the image repository that the chart would otherwise render. # This applies both when `image.repository` is set and when the repository is computed from # `imageRegistry` + `imageNamespace` + `image.name`. # # This can produce "double registry" style references such as # `legacy.example.io/registry.venafi.cloud/venafi-agent/...`. Prefer using the global # `imageRegistry`/`imageNamespace` values. # +docs:property # registry: registry.venafi.cloud # Full repository override (takes precedence over `imageRegistry`, `imageNamespace`, # and `image.name`). # Example: registry.venafi.cloud/venafi-agent/venafi-agent # +docs:property repository: "" # The image name for the Discovery Agent. # This is used (together with `imageRegistry` and `imageNamespace`) to construct the full # image reference. # +docs:property name: venafi-agent # Kubernetes imagePullPolicy on Deployment. pullPolicy: IfNotPresent # Override the image tag to deploy by setting this variable. # If no value is set, the chart's appVersion is used. tag: "" # Override the image digest to deploy by setting this variable. # If set together with `image.tag`, the rendered image will include both tag and digest. digest: "" # Specify image pull credentials if using a private registry. Example: # - name: my-pull-secret imagePullSecrets: [] # Helm default setting to override release name, usually leave blank. nameOverride: "" # Helm default setting, use this to shorten the full install name. fullnameOverride: "" serviceAccount: # Specifies whether a service account should be created. create: true # Annotations YAML to add to the service account. annotations: {} # The name of the service account to use. If blank and `serviceAccount.create` # is true, a name is generated using the fullname template of the release. name: "" # Additional YAML annotations to add the the pod. podAnnotations: {} # Optional Pod (all containers) `SecurityContext` options, see # https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod. # # Example: # # podSecurityContext # runAsUser: 1000 # runAsGroup: 3000 # fsGroup: 2000 podSecurityContext: {} # Use these variables to configure the HTTP_PROXY environment variables. # Configures the HTTP_PROXY environment variable where a HTTP proxy is required. # +docs:property # http_proxy: "http://proxy:8080" # Configures the HTTPS_PROXY environment variable where a HTTP proxy is required. # +docs:property # https_proxy: "https://proxy:8080" # Configures the NO_PROXY environment variable where a HTTP proxy is required, # but certain domains should be excluded. # +docs:property # no_proxy: 127.0.0.1,localhost # Add Container specific SecurityContext settings to the container. Takes # precedence over `podSecurityContext` when set. See # https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container # +docs:property securityContext: capabilities: drop: - ALL readOnlyRootFilesystem: true runAsNonRoot: true allowPrivilegeEscalation: false seccompProfile: { type: RuntimeDefault } # Set resource requests and limits for the pod. # # Read [Venafi Kubernetes components deployment best # practices](https://docs.cyberark.com/mis-saas/vaas/k8s-components/c-k8s-components-best-practice/#scaling) # to learn how to choose suitable CPU and memory resource requests and limits. # +docs:property resources: requests: memory: 200Mi cpu: 200m limits: memory: 500Mi # Embed YAML for nodeSelector settings, see # https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/ nodeSelector: {} # Embed YAML for toleration settings, see # https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ tolerations: [] # Embed YAML for Node affinity settings, see # https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/. affinity: {} # Specify the command to run overriding default binary. command: [] # Specify additional arguments to pass to the agent binary. # For example, to enable JSON logging use `--logging-format`, or # to increase the logging verbosity use `--log-level`. # The log levels are: 0=Info, 1=Debug, 2=Trace. # Use 6-9 for increasingly verbose HTTP request logging. # The default log level is 0. # # Example: # extraArgs: # - --logging-format=json # - --log-level=6 # To enable HTTP request logging extraArgs: [] # Additional volumes to add to the Discovery Agent container. This is # useful for mounting a custom CA bundle. For example: # # volumes: # - name: cabundle # configMap: # name: cabundle # optional: false # defaultMode: 0644 # # In order to create the ConfigMap, you can use the following command: # # kubectl create configmap cabundle \ # --from-file=cabundle=./your/custom/ca/bundle.pem volumes: [] # Additional volume mounts to add to the Discovery Agent container. # This is useful for mounting a custom CA bundle. Any PEM certificate mounted # under /etc/ssl/certs will be loaded by the Discovery Agent. For # example: # # volumeMounts: # - name: cabundle # mountPath: /etc/ssl/certs/cabundle # subPath: cabundle # readOnly: true volumeMounts: [] # Authentication details for the Discovery Agent authentication: # Name of the secret containing the private key secretName: agent-credentials # Key name in the referenced secret secretKey: "privatekey.pem" # +docs:section=Venafi Connection # Configure VenafiConnection authentication venafiConnection: # When set to true, the Discovery Agent will authenticate to CyberArk Certificate Manager # using the configuration in a VenafiConnection resource. # Use `venafiConnection.enabled=true` for [secretless authentication](https://docs.cyberark.com/mis-saas/vaas/k8s-components/t-install-tlspk-agent/). # When set to true, the `authentication.secret` values will be ignored and the # Secret with `authentication.secretName` will _not_ be mounted into the # Discovery Agent Pod. enabled: false # The name of a VenafiConnection resource which contains the configuration # for authenticating to Venafi. name: venafi-components # The namespace of a VenafiConnection resource which contains the # configuration for authenticating to Venafi. namespace: venafi # Configuration section for the Discovery Agent itself config: # API URL of the CyberArk Certificate Manager API. For EU tenants, set this value to # https://api.venafi.eu/. If you are using the VenafiConnection authentication # method, you must set the API URL using the field `spec.vcp.url` on the # VenafiConnection resource instead. server: "https://api.venafi.cloud/" # The client-id to be used for authenticating with the Venafi Control # Plane. Only useful when using a Key Pair Service Account in the Venafi # Control Plane. You can obtain the cliend ID by creating a Key Pair Service # Account in the CyberArk Certificate Manager. clientId: "" # Send data back to the platform every minute unless changed. period: "0h1m0s" # Name for the cluster resource if it needs to be created in Venafi Control # Plane. clusterName: "" # Description for the cluster resource if it needs to be created in Venafi # Control Plane. clusterDescription: "" # Reduce the memory usage of the agent and reduce the load on the Kubernetes # API server by omitting various common Secret types when listing Secrets. # These Secret types will be added to a "type!=" field selector in the # agent config. # * https://docs.cyberark.com/mis-saas/vaas/k8s-components/t-cfg-tlspk-agent/#configuration # * https://kubernetes.io/docs/concepts/configuration/secret/#secret-types # * https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/#list-of-supported-fields ignoredSecretTypes: - kubernetes.io/service-account-token - kubernetes.io/dockercfg - kubernetes.io/dockerconfigjson - kubernetes.io/basic-auth - kubernetes.io/ssh-auth - bootstrap.kubernetes.io/token - helm.sh/release.v1 # You can configure Discovery Agent to exclude some annotations or # labels from being pushed to the CyberArk Certificate Manager. All Kubernetes objects # are affected. The objects are still pushed, but the specified annotations # and labels are removed before being sent to the CyberArk Certificate Manager. # # Dots is the only character that needs to be escaped in the regex. Use either # double quotes with escaped single quotes or unquoted strings for the regex # to avoid YAML parsing issues with `\.`. # # Example: excludeAnnotationKeysRegex: ['^kapp\.k14s\.io/original.*'] excludeAnnotationKeysRegex: [] excludeLabelKeysRegex: [] # Specify ConfigMap details to load config from an existing resource. # This should be blank by default unless you have you own config. configmap: name: key: # Configure a PodDisruptionBudget for the agent's Deployment. If running with multiple # replicas, consider setting podDisruptionBudget.enabled to true. podDisruptionBudget: # Enable or disable the PodDisruptionBudget resource, which helps prevent downtime # during voluntary disruptions such as during a Node upgrade. enabled: false # Configure the minimum available pods for disruptions. Can either be set to # an integer (e.g. 1) or a percentage value (e.g. 25%). # Cannot be used if `maxUnavailable` is set. # +docs:property # minAvailable: 1 # Configure the maximum unavailable pods for disruptions. Can either be set to # an integer (e.g. 1) or a percentage value (e.g. 25%). # Cannot be used if `minAvailable` is set. # +docs:property # maxUnavailable: 1 # +docs:section=CRDs # The CRDs installed by this chart are annotated with "helm.sh/resource-policy: keep", this # prevents them from being accidentally removed by Helm when this chart is deleted. After # deleting the installed chart, the user still has to manually remove the remaining CRDs. crds: # The 'x-kubernetes-validations' annotation is not supported in Kubernetes 1.22 and below. # This annotation is used by CEL, which is a feature introduced in Kubernetes 1.25 that # improves how validation is performed. # This option allows to force the 'x-kubernetes-validations' annotation to be excluded, # even on Kubernetes 1.25+ clusters. forceRemoveValidationAnnotations: false # This option makes it so that the "helm.sh/resource-policy": keep # annotation is added to the CRD. This will prevent Helm from uninstalling # the CRD when the Helm release is uninstalled. keep: false # Optionally include the VenafiConnection CRDs venafiConnection: # When set to false, the rendered output does not contain the # VenafiConnection CRDs and RBAC. This is useful for when the # Venafi Connection resources are already installed separately. include: false ================================================ FILE: docs/datagatherers/k8s-discovery.md ================================================ # k8s-discovery This datagatherer uses the [DiscoveryClient](https://godoc.org/k8s.io/client-go/discovery#DiscoveryClient) to get API server version information. Include the following in your agent config: ``` data-gatherers: - kind: "k8s-discovery" name: "k8s-discovery" ``` or specify a kubeconfig file: ``` data-gatherers: - kind: "k8s-discovery" name: "k8s-discovery" config: kubeconfig: other_kube_config_path ``` ================================================ FILE: docs/datagatherers/k8s-dynamic.md ================================================ # Kubernetes Data Gatherer The Kubernetes dynamic data gatherer collects information about resources stored in the Kubernetes API. ## Data The data gathered depends on your configuration. Resources are selected based on their Group-Version-Kind identifiers, e.g.: * Core resources such as `Service`, use: `k8s/services.v1` * `Ingress`, use: `k8s/ingresses.v1beta1.networking.k8s.io` * Custom resources such as `Certificates`, use: `k8s/certificates.v1alpha2.cert-manager.io` To see an example of the data being gathered, using `k8s/services.v1` is comparable to the output from: ```bash kubectl get services --all-namespaces -o json ``` ## Configuration You can collect different resources using difference Group-Version-Kind as below: ```yaml data-gatherers: # basic usage - kind: "k8s-dynamic" name: "k8s/pods" config: resource-type: resource: pods version: v1 # CRD usage - kind: "k8s-dynamic" name: "k8s/certificates.v1alpha2.cert-manager.io" config: resource-type: group: cert-manager.io version: v1alpha2 resource: certificates # you might event want to gather resources from another cluster - kind: "k8s-dynamic" name: "k8s/pods" config: kubeconfig: other_kube_config_path ``` The `kubeconfig` field should point to your Kubernetes config file - this is typically found at `~/.kube/config`. Preflight will use the context that is active in that config file. ## Permissions The user or service account used by the Kubernetes config to authenticate with the Kubernetes API must have permission to perform `list` and `get` on the resource referenced in the `kind` for that datagatherer. There is an example `ClusterRole` and `ClusterRoleBinding` which can be found in [`./deployment/kubernetes/base/00-rbac.yaml`](./deployment/kubernetes/base/00-rbac.yaml). ## Secrets Secrets can be gathered using the following config: ```yaml - kind: "k8s-dynamic" name: "k8s/secrets" config: resource-type: version: v1 resource: secrets ``` Before Secrets are sent to the Preflight backend, they are redacted so no secret data is transmitted. See [`fieldfilter.go`](./../../pkg/datagatherer/k8s/fieldfilter.go) to see the details of which fields are filtered and which ones are redacted. > **All resource other than Kubernetes Secrets are sent in full, so make sure that you don't store secret information on arbitrary resources.** ## Field Selectors You can use [field selectors](https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/#list-of-supported-fields) to include or exclude certain resources. For example, you can reduce the memory usage of the agent and reduce the load on the Kubernetes API server by omitting various common [Secret types](https://kubernetes.io/docs/concepts/configuration/secret/#secret-types) when listing Secrets. ```yaml - kind: "k8s-dynamic" name: "k8s/secrets" config: resource-type: version: v1 resource: secrets field-selectors: - type!=kubernetes.io/service-account-token - type!=kubernetes.io/dockercfg - type!=kubernetes.io/dockerconfigjson - type!=kubernetes.io/basic-auth - type!=kubernetes.io/ssh-auth, - type!=bootstrap.kubernetes.io/token - type!=helm.sh/release.v1 ``` ================================================ FILE: docs/datagatherers/local.md ================================================ # Local Data Gatherer The Local data gatherer is intended to be used for reading data for evaluation from the local file system. It can also be used for 'stubbing' remote data sources when testing other data gatherers. ## Configuration Stubbing another datagatherer for testing: ```yaml data-gatherers: - kind: "gke" name: "gke" config: # fetch from local path instead of GKE data-path: ./examples/data/example.json ``` Loading other data as 'local': ```yaml data-gatherers: - kind: "local" name: "local" config: data-path: ./examples/data/example.json ``` ## Data Data is gathered from the local file system - whatever is read from the file is used. ## Permissions Permissions to read the local path. ================================================ FILE: examples/cert-manager-agent.yaml ================================================ organization_id: "my-organization" cluster_id: "my_cluster" schedule: "* * * *" token: xxxx endpoint: protocol: https host: "preflight.jetstack.io" path: "/api/v1/datareadings" data-gatherers: - kind: "k8s-dynamic" name: "k8s/secrets.v1" config: resource-type: version: v1 resource: secrets - kind: "k8s-dynamic" name: "k8s/certificates.v1.cert-manager.io" config: resource-type: group: cert-manager.io version: v1 resource: certificates - kind: "k8s-dynamic" name: "k8s/ingresses.v1.networking.k8s.io" config: resource-type: group: networking.k8s.io version: v1 resource: ingresses - kind: "k8s-dynamic" name: "k8s/certificaterequests.v1.cert-manager.io" config: resource-type: group: cert-manager.io version: v1 resource: certificaterequests ================================================ FILE: examples/echo/example.json ================================================ { "sampledata": 1 } ================================================ FILE: examples/echo/example2.json ================================================ { "sampledata": 1 } ================================================ FILE: examples/localfile/config.yaml ================================================ # No config is required to run the agent with an input file and an output file. ================================================ FILE: examples/localfile/input.json ================================================ [] ================================================ FILE: examples/machinehub/config.yaml ================================================ # Not used ================================================ FILE: examples/machinehub/input.json ================================================ [ { "data-gatherer": "ark/oidc", "data": { "openid_configuration": { "id_token_signing_alg_values_supported": [ "RS256" ], "issuer": "https://kubernetes.default.svc.cluster.local", "jwks_uri": "https://10.10.1.2:6443/openid/v1/jwks", "response_types_supported": [ "id_token" ], "subject_types_supported": [ "public" ] }, "jwks": { "keys": [ { "alg": "RS256", "e": "AQAB", "kid": "C-2916LkMJqepqULK2nqhq6uzVB6So_yyGnqyuor71Q", "kty": "RSA", "n": "sYh6rDpl5DyzBk8qlnYXo6Sf9WbplnXJv3tPxWTvhCFsVu9G5oWjknkafVDq5UOJrlybJJNjBmUyiEi1wbdnuhceJS7rZ3sRnNp3aNoS0omCR6iHJCOuoboSlcaPuRmYw4oWXlVUXlKyw8PYPVbNCcTLuq9nqf8y33mIqe7XJsf5-Z5P05WbK9Rzj-SJvlZLQ4dSFtIiwqLkm_2fpRLj0d8Af1F6vuztnhhUE2_PDsfIWdl_kJKkrK3B5x7k5tgTyFrNQPzlRBgK9jmK0HskwAFIDaLKb7FUWuUiQjn94rjKCED4iy201YPAoZBKIHFDlFVkQ_S3quwPcRyOS18r7w", "use": "sig" } ] } } }, { "data-gatherer": "ark/discovery", "data": { "cluster_id": "0e069229-d83b-4075-a4c8-95838ff5c437", "server_version": { "gitVersion": "v1.27.6" } } }, { "data-gatherer": "ark/secrets", "data": { "items": [ { "resource": { "kind": "Secret", "apiVersion": "v1", "metadata": { "name": "app-1-secret-1", "namespace": "team-1" } } }, { "deleted_at": "2024-06-10T12:00:00Z", "resource": { "kind": "Secret", "apiVersion": "v1", "metadata": { "name": "deleted-secret-1", "namespace": "team-2" } } } ] } }, { "data-gatherer": "ark/pods", "data": { "items": [ { "resource": { "kind": "Pod", "apiVersion": "v1", "metadata": { "name": "app-1-pod-1", "namespace": "team-1" } } }, { "deleted_at": "2024-06-10T12:00:00Z", "resource": { "kind": "Pod", "apiVersion": "v1", "metadata": { "name": "deleted-pod-1", "namespace": "team-2" } } } ] } }, { "data-gatherer": "ark/statefulsets", "data": { "items": [] } }, { "data-gatherer": "ark/deployments", "data": { "items": [] } }, { "data-gatherer": "ark/clusterroles", "data": { "items": [] } }, { "data-gatherer": "ark/roles", "data": { "items": [] } }, { "data-gatherer": "ark/clusterrolebindings", "data": { "items": [] } }, { "data-gatherer": "ark/rolebindings", "data": { "items": [] } }, { "data-gatherer": "ark/cronjobs", "data": { "items": [] } }, { "data-gatherer": "ark/jobs", "data": { "items": [] } }, { "data-gatherer": "ark/daemonsets", "data": { "items": [] } }, { "data-gatherer": "ark/serviceaccounts", "data": { "items": [] } }, { "data-gatherer": "ark/configmaps", "data": { "items": [] } }, { "data-gatherer": "ark/esoexternalsecrets", "data": { "items": [] } }, { "data-gatherer": "ark/esosecretstores", "data": { "items": [] } }, { "data-gatherer": "ark/esoclusterexternalsecrets", "data": { "items": [] } }, { "data-gatherer": "ark/esoclustersecretstores", "data": { "items": [] } } ] ================================================ FILE: examples/machinehub.yaml ================================================ # An example agent config for MachineHub output mode. # # For example: # # export ARK_SUBDOMAIN= # your CyberArk tenant subdomain # export ARK_USERNAME= # your CyberArk username # export ARK_SECRET= # your CyberArk password # # OPTIONAL: the URL for the CyberArk Discovery API if not using the production environment # # export ARK_DISCOVERY_API=https://platform-discovery.integration-cyberark.cloud/api/v2 # # go run . agent --one-shot --machine-hub -v 6 --agent-config-file ./examples/machinehub.yaml data-gatherers: # Gather Kubernetes OIDC information - name: ark/oidc kind: oidc # Gather Kubernetes API server version information - name: ark/discovery kind: k8s-discovery # Gather Kubernetes secrets, excluding specific types - name: ark/secrets kind: k8s-dynamic config: resource-type: version: v1 resource: secrets field-selectors: - type!=kubernetes.io/dockercfg - type!=kubernetes.io/dockerconfigjson - type!=bootstrap.kubernetes.io/token - type!=helm.sh/release.v1 # Gather Kubernetes service accounts - name: ark/serviceaccounts kind: k8s-dynamic config: resource-type: resource: serviceaccounts version: v1 # Gather Kubernetes roles - name: ark/roles kind: k8s-dynamic config: resource-type: version: v1 group: rbac.authorization.k8s.io resource: roles # Gather Kubernetes cluster roles - name: ark/clusterroles kind: k8s-dynamic config: resource-type: version: v1 group: rbac.authorization.k8s.io resource: clusterroles # Gather Kubernetes role bindings - name: ark/rolebindings kind: k8s-dynamic config: resource-type: version: v1 group: rbac.authorization.k8s.io resource: rolebindings # Gather Kubernetes cluster role bindings - name: ark/clusterrolebindings kind: k8s-dynamic config: resource-type: version: v1 group: rbac.authorization.k8s.io resource: clusterrolebindings # Gather Kubernetes jobs - name: ark/jobs kind: k8s-dynamic config: resource-type: version: v1 group: batch resource: jobs # Gather Kubernetes cron jobs - name: ark/cronjobs kind: k8s-dynamic config: resource-type: version: v1 group: batch resource: cronjobs # Gather Kubernetes deployments - name: ark/deployments kind: k8s-dynamic config: resource-type: version: v1 group: apps resource: deployments # Gather Kubernetes stateful sets - name: ark/statefulsets kind: k8s-dynamic config: resource-type: version: v1 group: apps resource: statefulsets # Gather Kubernetes daemon sets - name: ark/daemonsets kind: k8s-dynamic config: resource-type: version: v1 group: apps resource: daemonsets # Gather Kubernetes pods - name: ark/pods kind: k8s-dynamic config: resource-type: version: v1 resource: pods # Gather Kubernetes config maps with specific conjur.org label - name: ark/configmaps kind: k8s-dynamic config: resource-type: resource: configmaps version: v1 label-selectors: - conjur.org/name=conjur-connect-configmap # Gather External Secrets Operator ExternalSecret resources - name: ark/esoexternalsecrets kind: k8s-dynamic config: resource-type: group: external-secrets.io version: v1 resource: externalsecrets # Gather External Secrets Operator SecretStore resources - name: ark/esosecretstores kind: k8s-dynamic config: resource-type: group: external-secrets.io version: v1 resource: secretstores # Gather External Secrets Operator ClusterExternalSecret resources - name: ark/esoclusterexternalsecrets kind: k8s-dynamic config: resource-type: group: external-secrets.io version: v1 resource: clusterexternalsecrets # Gather External Secrets Operator ClusterSecretStore resources - name: ark/esoclustersecretstores kind: k8s-dynamic config: resource-type: group: external-secrets.io version: v1 resource: clustersecretstores ================================================ FILE: examples/one-shot-oidc.yaml ================================================ # one-shot-oidc.yaml # # An example configuration file which can be used for local testing. # For example: # # go run . agent \ # --agent-config-file examples/one-shot-oidc.yaml \ # --one-shot \ # --output-path output.json # organization_id: "my-organization" cluster_id: "my_cluster" period: 1m data-gatherers: - kind: "oidc" name: "ark/oidc" ================================================ FILE: examples/one-shot-secret.yaml ================================================ # one-shot-secret.yaml # # An example configuration file which can be used for local testing. # It gathers only secrets and it does not attempt to upload to Venafi. # For example: # # go run . agent \ # --agent-config-file examples/one-shot-secret.yaml \ # --one-shot \ # --output-path output.json # organization_id: "my-organization" cluster_id: "my_cluster" period: 1m data-gatherers: - kind: "k8s-dynamic" name: "k8s/secrets" config: resource-type: version: v1 resource: secrets field-selectors: - type!=kubernetes.io/service-account-token - type!=kubernetes.io/dockercfg - type!=kubernetes.io/dockerconfigjson - type!=kubernetes.io/basic-auth - type!=kubernetes.io/ssh-auth, - type!=bootstrap.kubernetes.io/token - type!=helm.sh/release.v1 ================================================ FILE: go.mod ================================================ // TODO(wallrj): Rename the Go module to match the repository name module github.com/jetstack/preflight go 1.24.4 require ( github.com/Venafi/vcert/v5 v5.12.2 github.com/cenkalti/backoff/v5 v5.0.3 github.com/fatih/color v1.18.0 github.com/google/uuid v1.6.0 github.com/hashicorp/go-multierror v1.1.1 github.com/jetstack/venafi-connection-lib v0.5.2 github.com/lestrrat-go/jwx/v3 v3.0.13 github.com/microcosm-cc/bluemonday v1.0.27 github.com/pmylund/go-cache v2.1.0+incompatible github.com/prometheus/client_golang v1.23.2 github.com/spf13/cobra v1.10.2 github.com/spf13/pflag v1.0.10 github.com/stretchr/testify v1.11.1 golang.org/x/sync v0.19.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.34.3 k8s.io/apimachinery v0.34.3 k8s.io/client-go v0.34.3 k8s.io/component-base v0.34.3 sigs.k8s.io/controller-runtime v0.22.4 sigs.k8s.io/yaml v1.6.0 ) require ( cel.dev/expr v0.24.0 // indirect github.com/Khan/genqlient v0.8.1 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/aymerick/douceur v0.2.0 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/fxamacker/cbor/v2 v2.9.0 // indirect github.com/go-http-utils/headers v0.0.0-20181008091004-fed159eddc2a // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go418/concurrentcache v0.6.0 // indirect github.com/go418/concurrentcache/logger v0.0.0-20250207095056-c0b7f8cc8bc2 // indirect github.com/goccy/go-json v0.10.3 // indirect github.com/golang-jwt/jwt/v5 v5.3.0 // indirect github.com/google/btree v1.1.3 // indirect github.com/google/cel-go v0.26.0 // indirect github.com/google/gnostic-models v0.7.0 // indirect github.com/gorilla/css v1.0.1 // indirect github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/lestrrat-go/blackmagic v1.0.4 // indirect github.com/lestrrat-go/httpcc v1.0.1 // indirect github.com/lestrrat-go/httprc/v3 v3.0.2 // indirect github.com/lestrrat-go/option/v2 v2.0.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/segmentio/asm v1.2.1 // indirect github.com/sosodev/duration v1.3.1 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/vektah/gqlparser/v2 v2.5.30 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect go.opentelemetry.io/otel v1.35.0 // indirect go.opentelemetry.io/otel/trace v1.35.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/crypto v0.46.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/net v0.47.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect k8s.io/apiextensions-apiserver v0.34.3 // indirect k8s.io/apiserver v0.34.3 // indirect sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect ) require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.12.2 // indirect github.com/go-logr/logr v1.4.3 github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.2 github.com/google/go-cmp v0.7.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.66.1 // indirect github.com/prometheus/procfs v0.16.1 // indirect golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/sys v0.39.0 // indirect golang.org/x/term v0.38.0 // indirect golang.org/x/text v0.32.0 // indirect golang.org/x/time v0.9.0 // indirect google.golang.org/protobuf v1.36.8 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 k8s.io/klog/v2 v2.130.1 k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect ) ================================================ FILE: go.sum ================================================ cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= github.com/Khan/genqlient v0.8.1 h1:wtOCc8N9rNynRLXN3k3CnfzheCUNKBcvXmVv5zt6WCs= github.com/Khan/genqlient v0.8.1/go.mod h1:R2G6DzjBvCbhjsEajfRjbWdVglSH/73kSivC9TLWVjU= github.com/Venafi/vcert/v5 v5.12.2 h1:Ee3/A9fZRiisuwuz22/Nqgl19H0ztQjWv35AC63qPcA= github.com/Venafi/vcert/v5 v5.12.2/go.mod h1:x3l0pB0q0E6wuhPe7nzfkUEwwraK7amnBWQ4LtT1bbw= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/go-http-utils/headers v0.0.0-20181008091004-fed159eddc2a h1:v6zMvHuY9yue4+QkG/HQ/W67wvtQmWJ4SDo9aK/GIno= github.com/go-http-utils/headers v0.0.0-20181008091004-fed159eddc2a/go.mod h1:I79BieaU4fxrw4LMXby6q5OS9XnoR9UIKLOzDFjUmuw= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go418/concurrentcache v0.6.0 h1:36A7j+c0dChEAMotq+lBQwQPyI4CMCy5HgMCcw8sY1g= github.com/go418/concurrentcache v0.6.0/go.mod h1:F498AylMP488QhU9KSE8VoN3u2FhGt7hXOgJ2CdvysM= github.com/go418/concurrentcache/logger v0.0.0-20250207095056-c0b7f8cc8bc2 h1:wVvBhfD+7srZ470Z06t5rp93faukGddvUJR4+owL0Kw= github.com/go418/concurrentcache/logger v0.0.0-20250207095056-c0b7f8cc8bc2/go.mod h1:DpmmUFByr4p8fGMbp2gsGJhqgcP1SXjyVZDiW0f8aSY= github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/goccy/go-yaml v1.19.0 h1:EmkZ9RIsX+Uq4DYFowegAuJo8+xdX3T/2dwNPXbxEYE= github.com/goccy/go-yaml v1.19.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI= github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM= github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8= github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jetstack/venafi-connection-lib v0.5.2 h1:Mzn8PANYQc5mBPHOhgkTW0VsvnKJsQmO+WcAjDwoR8E= github.com/jetstack/venafi-connection-lib v0.5.2/go.mod h1:0seQ/uP6MpB3KVMxf56jUzs/HBVpmRQLKU3Juak9p3Q= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lestrrat-go/blackmagic v1.0.4 h1:IwQibdnf8l2KoO+qC3uT4OaTWsW7tuRQXy9TRN9QanA= github.com/lestrrat-go/blackmagic v1.0.4/go.mod h1:6AWFyKNNj0zEXQYfTMPfZrAXUWUfTIZ5ECEUEJaijtw= github.com/lestrrat-go/dsig v1.0.0 h1:OE09s2r9Z81kxzJYRn07TFM9XA4akrUdoMwr0L8xj38= github.com/lestrrat-go/dsig v1.0.0/go.mod h1:dEgoOYYEJvW6XGbLasr8TFcAxoWrKlbQvmJgCR0qkDo= github.com/lestrrat-go/dsig-secp256k1 v1.0.0 h1:JpDe4Aybfl0soBvoVwjqDbp+9S1Y2OM7gcrVVMFPOzY= github.com/lestrrat-go/dsig-secp256k1 v1.0.0/go.mod h1:CxUgAhssb8FToqbL8NjSPoGQlnO4w3LG1P0qPWQm/NU= github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE= github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= github.com/lestrrat-go/httprc/v3 v3.0.2 h1:7u4HUaD0NQbf2/n5+fyp+T10hNCsAnwKfqn4A4Baif0= github.com/lestrrat-go/httprc/v3 v3.0.2/go.mod h1:mSMtkZW92Z98M5YoNNztbRGxbXHql7tSitCvaxvo9l0= github.com/lestrrat-go/jwx/v3 v3.0.13 h1:AdHKiPIYeCSnOJtvdpipPg/0SuFh9rdkN+HF3O0VdSk= github.com/lestrrat-go/jwx/v3 v3.0.13/go.mod h1:2m0PV1A9tM4b/jVLMx8rh6rBl7F6WGb3EG2hufN9OQU= github.com/lestrrat-go/option/v2 v2.0.0 h1:XxrcaJESE1fokHy3FpaQ/cXW8ZsIdWcdFzzLOcID3Ss= github.com/lestrrat-go/option/v2 v2.0.0/go.mod h1:oSySsmzMoR0iRzCDCaUfsCzxQHUEuhOViQObyy7S6Vg= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk= github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmylund/go-cache v2.1.0+incompatible h1:n+7K51jLz6a3sCvff3BppuCAkixuDHuJ/C57Vw/XjTE= github.com/pmylund/go-cache v2.1.0+incompatible/go.mod h1:hmz95dGvINpbRZGsqPcd7B5xXY5+EKb5PpGhQY3NTHk= github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/segmentio/asm v1.2.1 h1:DTNbBqs57ioxAD4PrArqftgypG4/qNpXoJx8TVXxPR0= github.com/segmentio/asm v1.2.1/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= github.com/sosodev/duration v1.3.1 h1:qtHBDMQ6lvMQsL15g4aopM4HEfOaYuhWBw3NPTtlqq4= github.com/sosodev/duration v1.3.1/go.mod h1:RQIBBX0+fMLc/D9+Jb/fwvVmo0eZvDDEERAikUR6SDg= github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/valyala/fastjson v1.6.7 h1:ZE4tRy0CIkh+qDc5McjatheGX2czdn8slQjomexVpBM= github.com/valyala/fastjson v1.6.7/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= github.com/vektah/gqlparser/v2 v2.5.30 h1:EqLwGAFLIzt1wpx1IPpY67DwUujF1OfzgEyDsLrN6kE= github.com/vektah/gqlparser/v2 v2.5.30/go.mod h1:D1/VCZtV3LPnQrcPBeR/q5jkSQIPti0uYCP/RI0gIeo= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM= github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/etcd/api/v3 v3.6.4 h1:7F6N7toCKcV72QmoUKa23yYLiiljMrT4xCeBL9BmXdo= go.etcd.io/etcd/api/v3 v3.6.4/go.mod h1:eFhhvfR8Px1P6SEuLT600v+vrhdDTdcfMzmnxVXXSbk= go.etcd.io/etcd/client/pkg/v3 v3.6.4 h1:9HBYrjppeOfFjBjaMTRxT3R7xT0GLK8EJMVC4xg6ok0= go.etcd.io/etcd/client/pkg/v3 v3.6.4/go.mod h1:sbdzr2cl3HzVmxNw//PH7aLGVtY4QySjQFuaCgcRFAI= go.etcd.io/etcd/client/v3 v3.6.4 h1:YOMrCfMhRzY8NgtzUsHl8hC2EBSnuqbR3dh84Uryl7A= go.etcd.io/etcd/client/v3 v3.6.4/go.mod h1:jaNNHCyg2FdALyKWnd7hxZXZxZANb0+KGY+YQaEMISo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE= go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4= google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA= google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/api v0.34.3 h1:D12sTP257/jSH2vHV2EDYrb16bS7ULlHpdNdNhEw2S4= k8s.io/api v0.34.3/go.mod h1:PyVQBF886Q5RSQZOim7DybQjAbVs8g7gwJNhGtY5MBk= k8s.io/apiextensions-apiserver v0.34.3 h1:p10fGlkDY09eWKOTeUSioxwLukJnm+KuDZdrW71y40g= k8s.io/apiextensions-apiserver v0.34.3/go.mod h1:aujxvqGFRdb/cmXYfcRTeppN7S2XV/t7WMEc64zB5A0= k8s.io/apimachinery v0.34.3 h1:/TB+SFEiQvN9HPldtlWOTp0hWbJ+fjU+wkxysf/aQnE= k8s.io/apimachinery v0.34.3/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= k8s.io/apiserver v0.34.3 h1:uGH1qpDvSiYG4HVFqc6A3L4CKiX+aBWDrrsxHYK0Bdo= k8s.io/apiserver v0.34.3/go.mod h1:QPnnahMO5C2m3lm6fPW3+JmyQbvHZQ8uudAu/493P2w= k8s.io/client-go v0.34.3 h1:wtYtpzy/OPNYf7WyNBTj3iUA0XaBHVqhv4Iv3tbrF5A= k8s.io/client-go v0.34.3/go.mod h1:OxxeYagaP9Kdf78UrKLa3YZixMCfP6bgPwPwNBQBzpM= k8s.io/component-base v0.34.3 h1:zsEgw6ELqK0XncCQomgO9DpUIzlrYuZYA0Cgo+JWpVk= k8s.io/component-base v0.34.3/go.mod h1:5iIlD8wPfWE/xSHTRfbjuvUul2WZbI2nOUK65XL0E/c= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A= sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= ================================================ FILE: hack/ark/cluster-external-secret.yaml ================================================ # Sample ClusterExternalSecret for e2e testing # This is a minimal ClusterExternalSecret CR that will be discovered by the agent. # This is a cluster-scoped resource that can create ExternalSecrets in multiple namespaces. apiVersion: external-secrets.io/v1 kind: ClusterExternalSecret metadata: name: e2e-test-cluster-external-secret labels: app.kubernetes.io/name: e2e-test app.kubernetes.io/component: cluster-external-secret spec: externalSecretSpec: refreshInterval: 1h secretStoreRef: name: e2e-test-cluster-secret-store kind: ClusterSecretStore target: name: e2e-test-synced-secret creationPolicy: Owner data: - secretKey: example-key remoteRef: key: dummy/path/to/secret property: password namespaceSelector: matchLabels: environment: test ================================================ FILE: hack/ark/cluster-secret-store.yaml ================================================ # Sample ClusterSecretStore for e2e testing # This is a minimal ClusterSecretStore CR that will be discovered by the agent. # This is a cluster-scoped resource that can be referenced by ExternalSecrets in any namespace. apiVersion: external-secrets.io/v1 kind: ClusterSecretStore metadata: name: e2e-test-cluster-secret-store labels: app.kubernetes.io/name: e2e-test app.kubernetes.io/component: cluster-secret-store spec: provider: # Fake provider configuration - this won't actually work but allows the CR to be created fake: data: - key: dummy/path/to/secret value: dummy-value version: "1" ================================================ FILE: hack/ark/conjur-connect-configmap.yaml ================================================ apiVersion: v1 kind: ConfigMap metadata: name: conjur-connect-configmap namespace: default labels: conjur.org/name: conjur-connect-configmap app.kubernetes.io/name: authn-k8s app.kubernetes.io/component: conjur-conn-configmap app.kubernetes.io/instance: pet-store-authn-k8s app.kubernetes.io/part-of: app-namespace-config app.kubernetes.io/managed-by: helm helm.sh/chart: authn-k8s-namespace-prep-1.0.0 data: CONJUR_ACCOUNT: myConjurAccount CONJUR_APPLIANCE_URL: https://conjur.conjur-ns.svc.cluster.local CONJUR_AUTHN_URL: https://conjur.conjur-ns.svc.cluster.local/authn-k8s/my-authenticator-id CONJUR_AUTHENTICATOR_ID: my-authenticator-id CONJUR_SSL_CERTIFICATE: | -----BEGIN CERTIFICATE----- MIIDYTCCAkmgAwIBAgIUTXBJk7Fm+M9kVD5x66jPiwU2JfcwDQYJKoZIhvcNAQEL BQAwQDErMCkGA1UEAwwiY29uanVyLmNvbmp1ci1ucy5zdmMuY2x1c3Rlci5sb2Nh bDERMA8GA1UECgwIRTJFIFRlc3QwHhcNMjYwMTI4MTMwNzA5WhcNMzYwMTI2MTMw NzA5WjBAMSswKQYDVQQDDCJjb25qdXIuY29uanVyLW5zLnN2Yy5jbHVzdGVyLmxv Y2FsMREwDwYDVQQKDAhFMkUgVGVzdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC AQoCggEBALdJ9InvV4oOy5LzP/JfZ7iAuM7RIQzeD1fDjm1EEfQcLqSgobH2yZtA YETlj/c2bfJ8Cc2dTJMoTefwofwjA6iR43SBf0e78raKsGSmR3ors9BqaulvgII5 Tk3y5jdZxty7UNIGOJP9QoJ4kPQHu37HhSfaA517yQJNCOa4NSLkpHWK155o6Cvf k03M6Szzs5uL7GTK/8IJnl0WSXJezC7lQ8Q+0VVCR6Cq4CzAKm2ZoVCPGkYDZb+Y 2i0aGe8ideO0JgTOsHzXiv5x1DzaEdX0+DhV+aQKbRJYENa2w5LCG0b1Z6Hpyvm6 uT0LobEgNLxJ8fOxa3LEq2IryzHFZjUCAwEAAaNTMFEwHQYDVR0OBBYEFHuXVFoC IaF7T3Iic7fKxyKwVhpkMB8GA1UdIwQYMBaAFHuXVFoCIaF7T3Iic7fKxyKwVhpk MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAF/7DwNERFTpucWi roDVME2SH1kTKiemcKzguoeOkDBZd70GbLejy64gWF9nIbcQ9WYxRIuqSI2h0j8d ED9SGQ66nic3uw16GN5IJk21ucFwAJstgQG3kvWPBbSrxMO9TB0pounRozZ5DkZe ZI+vZ4BNOZDT9TAE08xXLrzVhzVDM8DGAydzXUlvscfhYpTe77Cm7yMxmItO7QTA xTrBaamgxM1XYbx+DiS8nTm1U2G3UVACCv9zH6MXDe2DDREBuX1U3skqqbJlsypf 68ckx8fzdxIU5OLx0LZ4QZOR66cHyambDtngoD3iKqDcR1L8EdXajq+IaPRZfcD6 VLEtA4Y= -----END CERTIFICATE----- ================================================ FILE: hack/ark/external-secret.yaml ================================================ # Sample ExternalSecret for e2e testing # This is a minimal ExternalSecret CR that will be discovered by the agent. # Note: This requires the External Secrets Operator CRDs to be installed, # but does not require a working secrets backend. apiVersion: external-secrets.io/v1 kind: ExternalSecret metadata: name: e2e-test-external-secret namespace: default labels: app.kubernetes.io/name: e2e-test app.kubernetes.io/component: external-secret spec: refreshInterval: 1h secretStoreRef: name: e2e-test-secret-store kind: SecretStore target: name: e2e-test-synced-secret creationPolicy: Owner data: - secretKey: example-key remoteRef: key: dummy/path/to/secret property: password ================================================ FILE: hack/ark/secret-store.yaml ================================================ # Sample SecretStore for e2e testing # This is a minimal SecretStore CR that will be discovered by the agent. # Note: This requires the External Secrets Operator CRDs to be installed, # but does not require a working secrets backend. apiVersion: external-secrets.io/v1 kind: SecretStore metadata: name: e2e-test-secret-store namespace: default labels: app.kubernetes.io/name: e2e-test app.kubernetes.io/component: secret-store spec: provider: # Fake provider configuration - this won't actually work but allows the CR to be created fake: data: - key: dummy/path/to/secret value: dummy-value version: "1" ================================================ FILE: hack/ark/test-e2e.sh ================================================ #!/usr/bin/env bash # # Build and deploy the disco-agent Helm chart. # Wait for the agent to log a message indicating successful data upload. # # Prerequisites: # * kubectl: https://kubernetes.io/docs/tasks/tools/#kubectl # * kind: https://kind.sigs.k8s.io/docs/user/quick-start/ # * helm: https://helm.sh/docs/intro/install/ # * jq: https://jqlang.github.io/jq/download/ # * make: https://www.gnu.org/software/make/ # # You can run `make ark-test-e2e` which will automatically download all # prerequisites and then run this script. set -o nounset set -o errexit set -o pipefail # CyberArk API configuration : ${ARK_USERNAME?} : ${ARK_SECRET?} : ${ARK_SUBDOMAIN?} : ${ARK_DISCOVERY_API?} # The base URL of the OCI registry used for Docker images and Helm charts # E.g. ttl.sh/7e6ca67c-96dc-4dea-9437-80b0f3a69fb1 : ${OCI_BASE?} # The Kubernetes namespace to install into : ${NAMESPACE:=cyberark} # Set to true to use an existing cluster, otherwise a new kind cluster will be created. # Note: the cluster will not be deleted after the test completes. : ${USE_EXISTING_CLUSTER:=false} script_dir=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd) root_dir=$(cd "${script_dir}/../.." && pwd) export TERM=dumb tmp_dir="$(mktemp -d /tmp/jetstack-secure.XXXXX)" trap 'rm -rf "${tmp_dir}"' EXIT pushd "${tmp_dir}" > release.env make -C "$root_dir" ark-release \ GITHUB_OUTPUT="${tmp_dir}/release.env" \ OCI_SIGN_ON_PUSH=false \ oci_platforms="" \ ARK_OCI_BASE="${OCI_BASE}" cat release.env source release.env if [[ "$USE_EXISTING_CLUSTER" != true ]]; then kind create cluster || true fi kubectl create ns "$NAMESPACE" || true kubectl delete secret agent-credentials --namespace "$NAMESPACE" --ignore-not-found kubectl create secret generic agent-credentials \ --namespace "$NAMESPACE" \ --from-literal=ARK_USERNAME=$ARK_USERNAME \ --from-literal=ARK_SECRET=$ARK_SECRET \ --from-literal=ARK_SUBDOMAIN=$ARK_SUBDOMAIN \ --from-literal=ARK_DISCOVERY_API=$ARK_DISCOVERY_API # Create a sample secret in the cluster # # TODO(wallrj): See if there's an API for checking that this secret has been # imported by the backend. For now we have to log into the Disco web UI and # search for this secret. kubectl create secret generic e2e-sample-secret-$(date '+%s') \ --namespace default \ --from-literal=username=${RANDOM} # Create a sample ConfigMap in the cluster that will be discovered by the agent # # This ConfigMap has the label that matches the default label-selector configured # in the ark/configmaps data gatherer (conjur.org/name=conjur-connect-configmap). kubectl apply -f "${root_dir}/hack/ark/conjur-connect-configmap.yaml" # Install External Secrets Operator CRDs and controller # # This is required for the agent to discover ExternalSecret and SecretStore resources. echo "Installing External Secrets Operator..." helm repo add external-secrets https://charts.external-secrets.io helm repo update helm upgrade --install external-secrets \ external-secrets/external-secrets \ --namespace external-secrets-system \ --create-namespace \ --wait \ --set installCRDs=true # Create sample External Secrets Operator resources that will be discovered by the agent kubectl apply -f "${root_dir}/hack/ark/secret-store.yaml" kubectl apply -f "${root_dir}/hack/ark/external-secret.yaml" kubectl apply -f "${root_dir}/hack/ark/cluster-secret-store.yaml" kubectl apply -f "${root_dir}/hack/ark/cluster-external-secret.yaml" # We use a non-existent tag and omit the `--version` flag, to work around a Helm # v4 bug. See: https://github.com/helm/helm/issues/31600 helm upgrade agent "oci://${ARK_CHART}:NON_EXISTENT_TAG@${ARK_CHART_DIGEST}" \ --install \ --wait \ --create-namespace \ --namespace "$NAMESPACE" \ --set-json extraArgs='["--log-level=6"]' \ --set pprof.enabled=true \ --set fullnameOverride=disco-agent \ --set "imageRegistry=${OCI_BASE}" \ --set "imageNamespace=" \ --set "image.digest=${ARK_IMAGE_DIGEST}" \ --set config.clusterName="e2e-test-cluster" \ --set config.clusterDescription="A temporary cluster for E2E testing. Contact @wallrj-cyberark." \ --set config.period=60s \ --set acceptTerms=true \ --set-json "podLabels={\"disco-agent.cyberark.cloud/test-id\": \"${RANDOM}\"}" kubectl rollout status deployments/disco-agent --namespace "${NAMESPACE}" # Wait 60s for log message indicating success. # Parse logs as JSON using jq to ensure logs are all JSON formatted. timeout 60 jq -n \ 'inputs | if .msg | test("Data sent successfully") then . | halt_error(0) else . end' \ <(kubectl logs deployments/disco-agent --namespace "${NAMESPACE}" --follow) # Query the Prometheus metrics endpoint to ensure it's working. kubectl get pod \ --namespace $NAMESPACE \ --selector app.kubernetes.io/name=disco-agent \ --output jsonpath={.items[*].metadata.name} \ | xargs -I{} kubectl get --raw /api/v1/namespaces/$NAMESPACE/pods/{}:8081/proxy/metrics \ | grep '^process_' # Query the pprof endpoint to ensure it's working. kubectl get pod \ --namespace $NAMESPACE \ --selector app.kubernetes.io/name=disco-agent \ --output jsonpath={.items[*].metadata.name} \ | xargs -I{} kubectl get --raw /api/v1/namespaces/$NAMESPACE/pods/{}:8081/proxy/debug/pprof/cmdline \ | xargs -0 ================================================ FILE: hack/e2e/application-team-1.yaml ================================================ apiVersion: v1 kind: Namespace metadata: name: team-1 --- apiVersion: policy.cert-manager.io/v1alpha1 kind: CertificateRequestPolicy metadata: name: team-1 spec: allowed: commonName: value: '*' dnsNames: values: - '*' subject: countries: values: - '*' localities: values: - '*' organizationalUnits: values: - '*' organizations: values: - '*' postalCodes: values: - '*' provinces: values: - '*' serialNumber: value: '*' streetAddresses: values: - '*' usages: - digital signature - key encipherment - server auth - client auth plugins: venafi: values: venafiConnectionName: venafi-components zone: ${VEN_ZONE} selector: issuerRef: group: jetstack.io kind: VenafiIssuer name: venafi-cloud namespace: matchNames: - team-1 --- apiVersion: jetstack.io/v1alpha1 kind: VenafiIssuer metadata: name: venafi-cloud namespace: team-1 spec: certificateNameExpression: request.namespace + "_" + request.name venafiConnectionName: venafi-components venafiConnectionNamespace: venafi zone: ${VEN_ZONE} --- apiVersion: cert-manager.io/v1 kind: Certificate metadata: name: app-0 namespace: team-1 spec: commonName: app-0.team-1 duration: 720h0m0s renewBefore: 719h0m0s issuerRef: group: jetstack.io kind: VenafiIssuer name: venafi-cloud privateKey: algorithm: RSA rotationPolicy: Always size: 2048 revisionHistoryLimit: 1 secretName: app-0 --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: cert-manager-policy:allow namespace: team-1 rules: - apiGroups: ["policy.cert-manager.io"] resources: ["certificaterequestpolicies"] verbs: ["use"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: cert-manager-policy:allow namespace: team-1 roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: cert-manager-policy:allow subjects: - kind: Group name: system:authenticated apiGroup: rbac.authorization.k8s.io ================================================ FILE: hack/e2e/test.sh ================================================ #!/usr/bin/env bash # # Build and install venafi-kubernetes-agent for VenafiConnection based authentication. # Wait for it to log a message indicating successful data upload. # # A VenafiConnection resource is created which directly loads a bearer token # from a Kubernetes Secret. # This is the simplest way of testing the VenafiConnection integration, # but it does not fully test "secretless" (workload identity federation) authentication. # # Prerequisites: # * kubectl: https://kubernetes.io/docs/tasks/tools/#kubectl # * venctl: https://docs.cyberark.com/mis-saas/vaas/venctl/t-venctl-install/ # * jq: https://jqlang.github.io/jq/download/ # * step: https://smallstep.com/docs/step-cli/installation/ # * curl: https://www.man7.org/linux/man-pages/man1/curl.1.html # * envsubst: https://www.man7.org/linux/man-pages/man1/envsubst.1.html # * gcloud: https://cloud.google.com/sdk/docs/install # * gke-gcloud-auth-plugin: https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl # > :warning: If you installed gcloud using snap, you have to install the kubectl plugin using apt: # > https://github.com/actions/runner-images/issues/6778#issuecomment-1360360603 # # In case metrics and logs are missing from your cluster, see: # * https://cloud.google.com/kubernetes-engine/docs/troubleshooting/dashboards#write_permissions set -o nounset set -o errexit set -o pipefail # Commenting out for CI, uncomment for local debugging #set -o xtrace script_dir=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd) root_dir=$(cd "${script_dir}/../.." && pwd) export TERM=dumb # Your Venafi Cloud API key. : ${VEN_API_KEY?} # Separate API Key for getting a pull secret, if your main venafi cloud tenant # doesn't allow you to create registry service accounts. : ${VEN_API_KEY_PULL?} # The Venafi Cloud zone (application/issuing_template) which will be used by the # issuer an policy. : ${VEN_ZONE?} # The hostname of the Venafi API server. # US: api.venafi.cloud # EU: api.venafi.eu : ${VEN_API_HOST?} # The base URL of the OCI registry used for Docker images and Helm charts # E.g. ttl.sh/63773370-0bcf-4ac0-bd42-5515616089ff : ${OCI_BASE?} # Required gcloud environment variables # https://cloud.google.com/sdk/docs/configurations#setting_configuration_properties : ${CLOUDSDK_CORE_PROJECT?} : ${CLOUDSDK_COMPUTE_ZONE?} # The name of the cluster to create : ${CLUSTER_NAME?} cd "${script_dir}" pushd "${root_dir}" > release.env make release \ OCI_SIGN_ON_PUSH=false \ oci_platforms=linux/amd64 \ oci_preflight_image_name=$OCI_BASE/images/venafi-agent \ helm_chart_image_name=$OCI_BASE/charts/venafi-kubernetes-agent \ GITHUB_OUTPUT=release.env source release.env popd export USE_GKE_GCLOUD_AUTH_PLUGIN=True if ! gcloud container clusters get-credentials "${CLUSTER_NAME}"; then gcloud container clusters create "${CLUSTER_NAME}" \ --preemptible \ --machine-type e2-small \ --num-nodes 3 fi kubectl create ns venafi || true # Pull secret for Venafi OCI registry # IMPORTANT: we pick the first team as the owning team for the registry and # workload identity service account as it doesn't matter. if ! kubectl get secret venafi-image-pull-secret -n venafi; then venctl iam service-accounts registry create \ --api-key $VEN_API_KEY_PULL \ --no-prompts \ --owning-team "$(curl --fail-with-body -sS "https://${VEN_API_HOST}/v1/teams" -H "tppl-api-key: ${VEN_API_KEY_PULL}" | jq '.teams[0].id' -r)" \ --name "venafi-kubernetes-agent-e2e-registry-${RANDOM}" \ --scopes enterprise-cert-manager,enterprise-venafi-issuer,enterprise-approver-policy \ | jq '{ "apiVersion": "v1", "kind": "Secret", "metadata": { "name": "venafi-image-pull-secret" }, "type": "kubernetes.io/dockerconfigjson", "stringData": { ".dockerconfigjson": { "auths": { "\(.oci_registry)": { "username": .username, "password": .password } } } | tostring } }' \ | kubectl create -n venafi -f - fi export VENAFI_KUBERNETES_AGENT_CLIENT_ID="not-used-but-required-by-venctl" venctl components kubernetes apply \ --region $VEN_VCP_REGION \ --cert-manager \ --venafi-enhanced-issuer \ --approver-policy-enterprise \ --venafi-kubernetes-agent \ --venafi-kubernetes-agent-version "${RELEASE_HELM_CHART_VERSION}" \ --venafi-kubernetes-agent-values-files "${script_dir}/values.venafi-kubernetes-agent.yaml" \ --venafi-kubernetes-agent-custom-image-registry "${OCI_BASE}/images" \ --venafi-kubernetes-agent-custom-chart-repository "oci://${OCI_BASE}/charts" kubectl apply -n venafi -f venafi-components.yaml subject="system:serviceaccount:venafi:venafi-components" audience="https://${VEN_API_HOST}" issuerURL="$(kubectl create token -n venafi venafi-components | step crypto jwt inspect --insecure | jq -r '.payload.iss')" openidDiscoveryURL="${issuerURL}/.well-known/openid-configuration" jwksURI=$(curl --fail-with-body -sSL ${openidDiscoveryURL} | jq -r '.jwks_uri') # Create the Venafi agent service account if one does not already exist # IMPORTANT: we pick the first team as the owning team for the registry and # workload identity service account as it doesn't matter. while true; do tenantID=$(curl --fail-with-body -sSL -H "tppl-api-key: $VEN_API_KEY" https://${VEN_API_HOST}/v1/serviceaccounts \ | jq -r '.[] | select(.issuerURL==$issuerURL and .subject == $subject) | .companyId' \ --arg issuerURL "${issuerURL}" \ --arg subject "${subject}") if [[ "${tenantID}" != "" ]]; then break fi jq -n '{ "name": "venafi-kubernetes-agent-e2e-agent-\($random)", "authenticationType": "rsaKeyFederated", "scopes": ["kubernetes-discovery-federated", "certificate-issuance"], "subject": $subject, "audience": $audience, "issuerURL": $issuerURL, "jwksURI": $jwksURI, "applications": [$applications.applications[].id], "owner": $owningTeamID }' \ --arg random "${RANDOM}" \ --arg subject "${subject}" \ --arg audience "${audience}" \ --arg issuerURL "${issuerURL}" \ --arg jwksURI "${jwksURI}" \ --arg owningTeamID "$(curl --fail-with-body -sS "https://${VEN_API_HOST}/v1/teams" -H "tppl-api-key: $VEN_API_KEY" | jq '.teams[0].id' -r)" \ --argjson applications "$(curl https://${VEN_API_HOST}/outagedetection/v1/applications --fail-with-body -sSL -H tppl-api-key:\ ${VEN_API_KEY})" \ | curl https://${VEN_API_HOST}/v1/serviceaccounts \ -H "tppl-api-key: $VEN_API_KEY" \ --fail-with-body \ -sSL --json @- done kubectl apply -n venafi -f - </dev/null && pwd) root_dir=$(cd "${script_dir}/../.." && pwd) export TERM=dumb tmp_dir="$(mktemp -d /tmp/jetstack-secure.XXXXX)" trap 'rm -rf "${tmp_dir}"' EXIT pushd "${tmp_dir}" > release.env make -C "$root_dir" ngts-release \ GITHUB_OUTPUT="${tmp_dir}/release.env" \ OCI_SIGN_ON_PUSH=false \ oci_platforms="" \ NGTS_OCI_BASE="${OCI_BASE}" cat release.env source release.env if [[ "$USE_EXISTING_CLUSTER" != true ]]; then kind create cluster || true fi kubectl create ns "$NAMESPACE" || true kubectl delete secret discovery-agent-credentials --namespace "$NAMESPACE" --ignore-not-found kubectl create secret generic discovery-agent-credentials \ --namespace "$NAMESPACE" \ --from-literal=clientID=$NGTS_CLIENT_ID \ --from-literal=privatekey.pem="$NGTS_PRIVATE_KEY" # Create a sample secret in the cluster kubectl create secret generic e2e-sample-secret-$(date '+%s') \ --namespace default \ --from-literal=username=${RANDOM} # Create values.yaml file for the helm chart cat > "${tmp_dir}/values.yaml" < $CA_BUNDLE_FILE kubectl create configmap custom-ca --namespace="$NAMESPACE" --from-file=ca_certs.crt="$CA_BUNDLE_FILE" # Need to update values.yaml to add the custom CA bundle custom_ca_yaml="${script_dir}/custom_ca.yaml" yq eval-all '. as $item ireduce ({}; . * $item)' "${tmp_dir}/values.yaml" "$custom_ca_yaml" > "${tmp_dir}/values.merged.yaml" mv "${tmp_dir}/values.merged.yaml" "${tmp_dir}/values.yaml" fi # We use a non-existent tag and omit the `--version` flag, to work around a Helm # v4 bug. See: https://github.com/helm/helm/issues/31600 helm upgrade agent "oci://${NGTS_CHART}:NON_EXISTENT_TAG@${NGTS_CHART_DIGEST}" \ --install \ --wait \ --create-namespace \ --namespace "$NAMESPACE" \ --values "${tmp_dir}/values.yaml" kubectl rollout status deployments/discovery-agent --namespace "${NAMESPACE}" # Wait for log message indicating success. # Parse logs as JSON using jq to ensure logs are all JSON formatted. timeout 120 jq -n \ 'inputs | if .msg | test("Data sent successfully") then . | halt_error(0) else . end' \ <(kubectl logs deployments/discovery-agent --namespace "${NAMESPACE}" --follow) # Query the Prometheus metrics endpoint to ensure it's working. kubectl get pod \ --namespace ${NAMESPACE} \ --selector app.kubernetes.io/name=discovery-agent \ --output jsonpath={.items[*].metadata.name} \ | xargs -I{} kubectl get --raw /api/v1/namespaces/$NAMESPACE/pods/{}:8081/proxy/metrics \ | grep '^process_' # Query the pprof endpoint to ensure it's working. kubectl get pod \ --namespace ${NAMESPACE} \ --selector app.kubernetes.io/name=discovery-agent \ --output jsonpath={.items[*].metadata.name} \ | xargs -I{} kubectl get --raw /api/v1/namespaces/$NAMESPACE/pods/{}:8081/proxy/debug/pprof/cmdline \ | xargs -0 # TODO: should call to SCM and verify that certs are actually uploaded ================================================ FILE: internal/cyberark/api/telemetry.go ================================================ package api import ( "encoding/base64" "net/http" "net/url" "github.com/jetstack/preflight/pkg/version" ) // Integrations working with the Identity Security Platform, should add metadata // in their API calls, to provide insights into how customers utilize each API. // // - IntegrationName (in): The vendor integration name (required) // - IntegrationType (it): Integration Type (required) // - IntegrationVersion (iv): The plugin version being used (required) // - VendorName (vn): Vendor name (required) // - VendorVersion (vv): Version of the vendor product in which the plugin is used (if applicable) const ( // TelemetryHeaderKey is the name of the HTTP header to use for telemetry TelemetryHeaderKey = "X-Cybr-Telemetry" ) var ( telemetryValues url.Values telemetryValueEncoded string ) func init() { telemetryValues = url.Values{} telemetryValues.Set("in", "disco-agent") telemetryValues.Set("vn", "CyberArk") telemetryValues.Set("it", "KubernetesAgent") telemetryValues.Set("iv", version.PreflightVersion) telemetryValueEncoded = base64.URLEncoding.EncodeToString([]byte(telemetryValues.Encode())) } // SetTelemetryRequestHeader adds the x-cybr-telemetry header to the given HTTP // request, with information about this integration. func SetTelemetryRequestHeader(req *http.Request) { req.Header.Set(TelemetryHeaderKey, telemetryValueEncoded) } ================================================ FILE: internal/cyberark/api/telemetry_test.go ================================================ package api import ( "encoding/base64" "net/http" "net/url" "testing" "github.com/stretchr/testify/require" ) // Test the SetTelemetryRequestHeader function func TestSetTelemetryRequestHeader(t *testing.T) { // Create a new HTTP request req, err := http.NewRequestWithContext(t.Context(), http.MethodGet, "http://example.com", nil) require.NoError(t, err, "failed to create HTTP request") // Call the function to set the telemetry header SetTelemetryRequestHeader(req) base64Value := req.Header.Get(TelemetryHeaderKey) // Check that the header is set require.NotEmpty(t, base64Value, "telemetry header should be set") queryString, err := base64.URLEncoding.DecodeString(base64Value) require.NoError(t, err, "failed to decode telemetry header value") values, err := url.ParseQuery(string(queryString)) require.NoError(t, err, "failed to parse telemetry header value") require.Equal(t, telemetryValues, values, "telemetry header value should match expected values") } ================================================ FILE: internal/cyberark/client.go ================================================ package cyberark import ( "context" "errors" "net/http" "os" "github.com/jetstack/preflight/internal/cyberark/dataupload" "github.com/jetstack/preflight/internal/cyberark/identity" "github.com/jetstack/preflight/internal/cyberark/servicediscovery" ) // ClientConfig holds the configuration needed to initialize a CyberArk client. type ClientConfig struct { Subdomain string Username string Secret string } // ClientConfigLoader is a function type that loads and returns a ClientConfig. type ClientConfigLoader func() (ClientConfig, error) // ErrMissingEnvironmentVariables is returned when required environment variables are not set. var ErrMissingEnvironmentVariables = errors.New("missing environment variables: ARK_SUBDOMAIN, ARK_USERNAME, ARK_SECRET") // LoadClientConfigFromEnvironment loads the CyberArk client configuration from environment variables. // It expects the following environment variables to be set: // - ARK_SUBDOMAIN: The CyberArk subdomain to use. // - ARK_USERNAME: The username for authentication. // - ARK_SECRET: The secret for authentication. func LoadClientConfigFromEnvironment() (ClientConfig, error) { subdomain := os.Getenv("ARK_SUBDOMAIN") username := os.Getenv("ARK_USERNAME") secret := os.Getenv("ARK_SECRET") if subdomain == "" || username == "" || secret == "" { return ClientConfig{}, ErrMissingEnvironmentVariables } return ClientConfig{ Subdomain: subdomain, Username: username, Secret: secret, }, nil } // NewDatauploadClient initializes and returns a new CyberArk Data Upload client. // It performs service discovery to find the necessary API endpoints and authenticates // using the provided client configuration. func NewDatauploadClient(ctx context.Context, httpClient *http.Client, serviceMap *servicediscovery.Services, tenantUUID string, cfg ClientConfig) (*dataupload.CyberArkClient, error) { identityAPI := serviceMap.Identity.API if identityAPI == "" { return nil, errors.New("service discovery returned an empty identity API") } discoveryAPI := serviceMap.DiscoveryContext.API if discoveryAPI == "" { return nil, errors.New("service discovery returned an empty discovery API") } identityClient := identity.New(httpClient, identityAPI, cfg.Subdomain) err := identityClient.LoginUsernamePassword(ctx, cfg.Username, []byte(cfg.Secret)) if err != nil { return nil, err } return dataupload.New(httpClient, discoveryAPI, tenantUUID, identityClient.AuthenticateRequest), nil } ================================================ FILE: internal/cyberark/client_test.go ================================================ package cyberark_test import ( "crypto/x509" "os" "strings" "testing" "github.com/jetstack/venafi-connection-lib/http_client" "github.com/stretchr/testify/require" "k8s.io/klog/v2" "k8s.io/klog/v2/ktesting" "github.com/jetstack/preflight/internal/cyberark" "github.com/jetstack/preflight/internal/cyberark/dataupload" "github.com/jetstack/preflight/internal/cyberark/servicediscovery" arktesting "github.com/jetstack/preflight/internal/cyberark/testing" "github.com/jetstack/preflight/pkg/testutil" "github.com/jetstack/preflight/pkg/version" _ "k8s.io/klog/v2/ktesting/init" ) // TestCyberArkClient_PutSnapshot_MockAPI demonstrates that NewDatauploadClient works with the mock API. func TestCyberArkClient_PutSnapshot_MockAPI(t *testing.T) { logger := ktesting.NewLogger(t, ktesting.DefaultConfig) ctx := klog.NewContext(t.Context(), logger) httpClient := testutil.FakeCyberArk(t) cfg := cyberark.ClientConfig{ Subdomain: servicediscovery.MockDiscoverySubdomain, Username: "test@example.com", Secret: "somepassword", } discoveryClient := servicediscovery.New(httpClient, cfg.Subdomain) serviceMap, tenantUUID, err := discoveryClient.DiscoverServices(t.Context()) if err != nil { t.Fatalf("failed to discover mock services: %v", err) } cl, err := cyberark.NewDatauploadClient(ctx, httpClient, serviceMap, tenantUUID, cfg) require.NoError(t, err) err = cl.PutSnapshot(ctx, dataupload.Snapshot{ ClusterID: "ffffffff-ffff-ffff-ffff-ffffffffffff", AgentVersion: version.PreflightVersion, }) require.NoError(t, err) } // TestCyberArkClient_PutSnapshot_RealAPI demonstrates that NewDatauploadClient works with the real inventory API. // // An API token is obtained by authenticating with the ARK_USERNAME and ARK_SECRET from the environment. // ARK_SUBDOMAIN should be your tenant subdomain. // // To test against a tenant on the integration platform, also set: // ARK_DISCOVERY_API=https://platform-discovery.integration-cyberark.cloud/ // // To enable verbose request logging: // // go test ./internal/cyberark \ // -v -count 1 -run TestCyberArkClient_PutSnapshot_RealAPI -args -testing.v 6 func TestCyberArkClient_PutSnapshot_RealAPI(t *testing.T) { if strings.ToLower(os.Getenv("ARK_LIVE_TEST")) != "true" { t.Skip("set ARK_LIVE_TEST=true to run this test against the live service") return } arktesting.SkipIfNoEnv(t) t.Log("This test runs against a live service and has been known to flake. If you see timeout issues it's possible that the test is flaking and it could be unrelated to your changes.") logger := ktesting.NewLogger(t, ktesting.DefaultConfig) ctx := klog.NewContext(t.Context(), logger) var rootCAs *x509.CertPool httpClient := http_client.NewDefaultClient(version.UserAgent(), rootCAs) cfg, err := cyberark.LoadClientConfigFromEnvironment() require.NoError(t, err) discoveryClient := servicediscovery.New(httpClient, cfg.Subdomain) serviceMap, tenantUUID, err := discoveryClient.DiscoverServices(t.Context()) if err != nil { t.Fatalf("failed to discover services: %v", err) } cl, err := cyberark.NewDatauploadClient(ctx, httpClient, serviceMap, tenantUUID, cfg) require.NoError(t, err) err = cl.PutSnapshot(ctx, dataupload.Snapshot{ ClusterID: "ffffffff-ffff-ffff-ffff-ffffffffffff", AgentVersion: version.PreflightVersion, }) require.NoError(t, err) } ================================================ FILE: internal/cyberark/dataupload/dataupload.go ================================================ package dataupload import ( "bytes" "context" "crypto/sha256" "encoding/base64" "encoding/hex" "encoding/json" "fmt" "io" "net/http" "net/url" "k8s.io/apimachinery/pkg/runtime" arkapi "github.com/jetstack/preflight/internal/cyberark/api" "github.com/jetstack/preflight/internal/cyberark/identity" "github.com/jetstack/preflight/pkg/version" ) const ( // maxRetrievePresignedUploadURLBodySize is the maximum allowed size for a response body from the // Retrieve Presigned Upload URL service. maxRetrievePresignedUploadURLBodySize = 10 * 1024 // apiPathSnapshotLinks is the URL path of the snapshot-links endpoint of the inventory API. // This endpoint returns an AWS presigned URL. // TODO(wallrj): Link to CyberArk API documentation when it is published. apiPathSnapshotLinks = "/ingestions/kubernetes/snapshot-links" ) type CyberArkClient struct { baseURL string httpClient *http.Client tenantUUID string authenticateRequest identity.RequestAuthenticator } // New creates a new CyberArkClient. The tenant UUID is best sourced from service discovery along with the base URL. func New(httpClient *http.Client, baseURL string, tenantUUID string, authenticateRequest identity.RequestAuthenticator) *CyberArkClient { return &CyberArkClient{ baseURL: baseURL, httpClient: httpClient, tenantUUID: tenantUUID, authenticateRequest: authenticateRequest, } } // Snapshot is the JSON that the CyberArk Discovery and Context API expects to // be uploaded to the AWS presigned URL. type Snapshot struct { // AgentVersion is the version of the Venafi Kubernetes Agent which is uploading this snapshot. AgentVersion string `json:"agent_version"` // ClusterID is the unique ID of the Kubernetes cluster which this snapshot was taken from. ClusterID string `json:"cluster_id"` // ClusterName is the name of the Kubernetes cluster which this snapshot was taken from. ClusterName string `json:"cluster_name"` // ClusterDescription is an optional description of the Kubernetes cluster which this snapshot was taken from. ClusterDescription string `json:"cluster_description,omitempty"` // K8SVersion is the version of Kubernetes which the cluster is running. K8SVersion string `json:"k8s_version"` // OIDCConfig contains OIDC configuration data from the API server's // `/.well-known/openid-configuration` endpoint OIDCConfig map[string]any `json:"openid_configuration,omitempty"` // OIDCConfigError contains any error encountered while fetching the OIDC configuration OIDCConfigError string `json:"openid_configuration_error,omitempty"` // JWKS contains JWKS data from the API server's `/openid/v1/jwks` endpoint JWKS map[string]any `json:"jwks,omitempty"` // JWKSError contains any error encountered while fetching the JWKS JWKSError string `json:"jwks_error,omitempty"` // Secrets is a list of Secret resources in the cluster. Not all Secret // types are included and only a subset of the Secret data is included. Secrets []runtime.Object `json:"secrets"` // ServiceAccounts is a list of ServiceAccount resources in the cluster. ServiceAccounts []runtime.Object `json:"serviceaccounts"` // ConfigMaps is a list of ConfigMap resources in the cluster. ConfigMaps []runtime.Object `json:"configmaps"` // ExternalSecrets is a list of ExternalSecret resources in the cluster. ExternalSecrets []runtime.Object `json:"externalsecrets"` // SecretStores is a list of SecretStore resources in the cluster. SecretStores []runtime.Object `json:"secretstores"` // ClusterExternalSecrets is a list of ClusterExternalSecret resources in the cluster. ClusterExternalSecrets []runtime.Object `json:"clusterexternalsecrets"` // ClusterSecretStores is a list of ClusterSecretStore resources in the cluster. ClusterSecretStores []runtime.Object `json:"clustersecretstores"` // Roles is a list of Role resources in the cluster. Roles []runtime.Object `json:"roles"` // ClusterRoles is a list of ClusterRole resources in the cluster. ClusterRoles []runtime.Object `json:"clusterroles"` // RoleBindings is a list of RoleBinding resources in the cluster. RoleBindings []runtime.Object `json:"rolebindings"` // ClusterRoleBindings is a list of ClusterRoleBinding resources in the cluster. ClusterRoleBindings []runtime.Object `json:"clusterrolebindings"` // Jobs is a list of Job resources in the cluster. Jobs []runtime.Object `json:"jobs"` // CronJobs is a list of CronJob resources in the cluster. CronJobs []runtime.Object `json:"cronjobs"` // Deployments is a list of Deployment resources in the cluster. Deployments []runtime.Object `json:"deployments"` // Statefulsets is a list of StatefulSet resources in the cluster. Statefulsets []runtime.Object `json:"statefulsets"` // Daemonsets is a list of DaemonSet resources in the cluster. Daemonsets []runtime.Object `json:"daemonsets"` // Pods is a list of Pod resources in the cluster. Pods []runtime.Object `json:"pods"` } // PutSnapshot PUTs the supplied snapshot to an [AWS presigned URL] which it obtains via the CyberArk inventory API. // [AWS presigned URL]: https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html // // A SHA256 checksum header is included in the request, to verify that the payload // has been received intact. // Read [Checking object integrity for data uploads in Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity-upload.html), // to learn more. func (c *CyberArkClient) PutSnapshot(ctx context.Context, snapshot Snapshot) error { if snapshot.ClusterID == "" { return fmt.Errorf("programmer mistake: the snapshot cluster ID cannot be left empty") } encodedBody := &bytes.Buffer{} hash := sha256.New() if err := json.NewEncoder(io.MultiWriter(encodedBody, hash)).Encode(snapshot); err != nil { return err } checksum := hash.Sum(nil) checksumHex := hex.EncodeToString(checksum) checksumBase64 := base64.StdEncoding.EncodeToString(checksum) presignedUploadURL, username, err := c.retrievePresignedUploadURL(ctx, checksumHex, snapshot.ClusterID, int64(encodedBody.Len())) if err != nil { return fmt.Errorf("while retrieving snapshot upload URL: %s", err) } // The snapshot-links endpoint returns an AWS presigned URL which only supports the PUT verb. req, err := http.NewRequestWithContext(ctx, http.MethodPut, presignedUploadURL, encodedBody) if err != nil { return err } req.Header.Set("X-Amz-Checksum-Sha256", checksumBase64) req.Header.Set("X-Amz-Server-Side-Encryption", "AES256") q := url.Values{} q.Add("agent_version", snapshot.AgentVersion) q.Add("tenant_id", c.tenantUUID) q.Add("upload_type", "k8s_snapshot") q.Add("uploader_id", snapshot.ClusterID) q.Add("username", username) q.Add("vendor", "k8s") req.Header.Set("X-Amz-Tagging", q.Encode()) version.SetUserAgent(req) res, err := c.httpClient.Do(req) if err != nil { return err } defer res.Body.Close() if code := res.StatusCode; code < 200 || code >= 300 { body, _ := io.ReadAll(io.LimitReader(res.Body, 500)) if len(body) == 0 { body = []byte(``) } return fmt.Errorf("received response with status code %d: %s", code, bytes.TrimSpace(body)) } return nil } const SigV4Support = "sigv4" // RetrievePresignedUploadURLRequest is the JSON body sent to the inventory API to request a presigned upload URL. type RetrievePresignedUploadURLRequest struct { ClusterID string `json:"cluster_id"` Checksum string `json:"checksum_sha256"` // AgentVersion is the v-prefixed version of the agent uploading the snapshot. // Note that some versions of the backend rely on this version being v-prefixed semver, // but that requirement was dropped in favour of the SigV4Support field below. AgentVersion string `json:"agent_version"` // FileSize is the size of the data we'll upload in bytes FileSize int64 `json:"file_size"` // SignatureVersion allows the agent to specify which version of AWS's signature scheme it expects for the presigned URL. // Older versions of the agent will not send this. All versions which support this field will unconditionally set it to the // value of SigV4Support, so the backend can rely on this field being set. SignatureVersion string `json:"signature_version"` } func (c *CyberArkClient) retrievePresignedUploadURL(ctx context.Context, checksum string, clusterID string, fileSize int64) (string, string, error) { uploadURL, err := url.JoinPath(c.baseURL, apiPathSnapshotLinks) if err != nil { return "", "", err } request := RetrievePresignedUploadURLRequest{ ClusterID: clusterID, Checksum: checksum, AgentVersion: version.PreflightVersion, FileSize: fileSize, SignatureVersion: SigV4Support, } encodedBody := &bytes.Buffer{} if err := json.NewEncoder(encodedBody).Encode(request); err != nil { return "", "", err } req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadURL, encodedBody) if err != nil { return "", "", err } req.Header.Set("Content-Type", "application/json") username, err := c.authenticateRequest(req) if err != nil { return "", "", fmt.Errorf("failed to authenticate request: %s", err) } version.SetUserAgent(req) // Add telemetry headers arkapi.SetTelemetryRequestHeader(req) res, err := c.httpClient.Do(req) if err != nil { return "", "", err } defer res.Body.Close() if code := res.StatusCode; code < 200 || code >= 300 { body, _ := io.ReadAll(io.LimitReader(res.Body, 500)) if len(body) == 0 { body = []byte(``) } return "", "", fmt.Errorf("received response with status code %d: %s", code, bytes.TrimSpace(body)) } response := struct { URL string `json:"url"` }{} if err := json.NewDecoder(io.LimitReader(res.Body, maxRetrievePresignedUploadURLBodySize)).Decode(&response); err != nil { if err == io.ErrUnexpectedEOF { return "", "", fmt.Errorf("rejecting JSON response from server as it was too large or was truncated") } return "", "", fmt.Errorf("failed to parse JSON from otherwise successful request to start data upload: %s", err) } return response.URL, username, nil } ================================================ FILE: internal/cyberark/dataupload/dataupload_test.go ================================================ package dataupload_test import ( "fmt" "net/http" "testing" "github.com/stretchr/testify/require" "k8s.io/klog/v2" "k8s.io/klog/v2/ktesting" "github.com/jetstack/preflight/internal/cyberark/dataupload" "github.com/jetstack/preflight/internal/cyberark/identity" "github.com/jetstack/preflight/pkg/version" _ "k8s.io/klog/v2/ktesting/init" ) // TestCyberArkClient_PutSnapshot_MockAPI tests the dataupload code against a // mock API server. The mock server is configured to return different responses // based on the cluster ID and bearer token used in the request. func TestCyberArkClient_PutSnapshot_MockAPI(t *testing.T) { setToken := func(token string) identity.RequestAuthenticator { return func(req *http.Request) (string, error) { req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) return "foo@example.com", nil // set a dummy username for testing purposes; the actual value is not important for these tests } } tests := []struct { name string snapshot dataupload.Snapshot authenticate identity.RequestAuthenticator requireFn func(t *testing.T, err error) }{ { name: "successful upload", snapshot: dataupload.Snapshot{ ClusterID: "ffffffff-ffff-ffff-ffff-ffffffffffff", AgentVersion: version.PreflightVersion, }, authenticate: setToken("success-token"), requireFn: func(t *testing.T, err error) { require.NoError(t, err) }, }, { name: "error when cluster ID is empty", snapshot: dataupload.Snapshot{ ClusterID: "", AgentVersion: "test-version", }, authenticate: setToken("success-token"), requireFn: func(t *testing.T, err error) { require.ErrorContains(t, err, "programmer mistake: the snapshot cluster ID cannot be left empty") }, }, { name: "error when bearer token is incorrect", snapshot: dataupload.Snapshot{ ClusterID: "test", AgentVersion: "test-version", }, authenticate: setToken("fail-token"), requireFn: func(t *testing.T, err error) { require.ErrorContains(t, err, "while retrieving snapshot upload URL: received response with status code 500: should authenticate using the correct bearer token") }, }, { name: "invalid JSON from server (RetrievePresignedUploadURL step)", snapshot: dataupload.Snapshot{ ClusterID: "invalid-json-retrieve-presigned", AgentVersion: "test-version", }, authenticate: setToken("success-token"), requireFn: func(t *testing.T, err error) { require.ErrorContains(t, err, "while retrieving snapshot upload URL: rejecting JSON response from server as it was too large or was truncated") }, }, { name: "500 from server (RetrievePresignedUploadURL step)", snapshot: dataupload.Snapshot{ ClusterID: "invalid-response-post-data", AgentVersion: "test-version", }, authenticate: setToken("success-token"), requireFn: func(t *testing.T, err error) { require.ErrorContains(t, err, "while retrieving snapshot upload URL: received response with status code 500: mock error") }, }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { logger := ktesting.NewLogger(t, ktesting.DefaultConfig) ctx := klog.NewContext(t.Context(), logger) datauploadAPIBaseURL, httpClient := dataupload.MockDataUploadServer(t) cyberArkClient := dataupload.New(httpClient, datauploadAPIBaseURL, "test-tenant-uuid", tc.authenticate) err := cyberArkClient.PutSnapshot(ctx, tc.snapshot) tc.requireFn(t, err) }) } } ================================================ FILE: internal/cyberark/dataupload/mock.go ================================================ package dataupload import ( "bytes" "crypto/rand" "crypto/sha256" "encoding/base64" "encoding/hex" "encoding/json" "fmt" "io" "net/http" "net/http/httptest" "net/url" "sync" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/client-go/transport" arkapi "github.com/jetstack/preflight/internal/cyberark/api" "github.com/jetstack/preflight/pkg/version" ) const ( successBearerToken = "success-token" successClusterID = "ffffffff-ffff-ffff-ffff-ffffffffffff" ) type uploadValues struct { ClusterID string FileSize int64 } type mockDataUploadServer struct { t testing.TB serverURL string mux *http.ServeMux expectedUploadValues map[string]uploadValues expectedUploadValuesMutex sync.Mutex } // MockDataUploadServer starts a server which mocks the CyberArk // Discovery and Context API, and an HTTP client with the CA certs needed to // connect to it. // // The returned URL can be supplied to the `dataupload.New` function as the base // URL for the discoverycontext API. // // The returned HTTP client has a transport which logs requests and responses // depending on log level of the logger supplied in the context. // // The mock server will return a successful response when the cluster ID matches // successClusterID. Other cluster IDs can be used to trigger various failure // responses. func MockDataUploadServer(t testing.TB) (string, *http.Client) { mux := http.NewServeMux() mds := &mockDataUploadServer{ t: t, expectedUploadValues: make(map[string]uploadValues), } mux.HandleFunc("POST "+apiPathSnapshotLinks, mds.handleSnapshotLinks) // The path includes random data to ensure that each request is treated separately by the mock server, allowing us to track data across calls. // It also ensures that the client isn't using some pre-saved path and is actually using the presigned URL returned by the mock server in the previous step, which is important for test validity. mux.HandleFunc("PUT /presigned-upload/{randData}", mds.handlePresignedUpload) server := httptest.NewTLSServer(mds) t.Cleanup(server.Close) mds.mux = mux mds.serverURL = server.URL httpClient := server.Client() httpClient.Transport = transport.NewDebuggingRoundTripper(httpClient.Transport, transport.DebugByContext) return server.URL, httpClient } func (mds *mockDataUploadServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { mds.t.Log(r.Method, r.RequestURI) mds.mux.ServeHTTP(w, r) } // randHex reads 8 random bytes and returns them as a hex string. It is used to generate // unique paths per-request to ensure that file size is tracked across calls. func randHex() string { b := make([]byte, 8) _, err := rand.Read(b) if err != nil { panic("failed to read random bytes: " + err.Error()) } return hex.EncodeToString(b) } func (mds *mockDataUploadServer) handleSnapshotLinks(w http.ResponseWriter, r *http.Request) { if r.Header.Get("User-Agent") != version.UserAgent() { http.Error(w, "should set user agent on all requests", http.StatusInternalServerError) return } if r.Header.Get(arkapi.TelemetryHeaderKey) == "" { w.WriteHeader(http.StatusInternalServerError) _, _ = w.Write([]byte("should set telemetry header on all requests")) return } if r.Header.Get("Content-Type") != "application/json" { http.Error(w, "should send JSON on all requests", http.StatusInternalServerError) return } if r.Header.Get("Authorization") != "Bearer "+successBearerToken { http.Error(w, "should authenticate using the correct bearer token", http.StatusInternalServerError) return } var req RetrievePresignedUploadURLRequest decoder := json.NewDecoder(r.Body) decoder.DisallowUnknownFields() if err := decoder.Decode(&req); err != nil { http.Error(w, `{"error": "Invalid request format"}`, http.StatusBadRequest) return } if req.SignatureVersion != SigV4Support { http.Error(w, fmt.Sprintf("post body does not set signature_version=%s", SigV4Support), http.StatusInternalServerError) return } if req.AgentVersion != version.PreflightVersion { http.Error(w, fmt.Sprintf("post body contains unexpected agent version: %s", req.AgentVersion), http.StatusInternalServerError) return } // Simulate invalid JSON response for RetrievePresignedUploadURL step if req.ClusterID == "invalid-json-retrieve-presigned" { w.WriteHeader(http.StatusOK) w.Header().Set("Content-Type", "application/json") _, _ = w.Write([]byte(`{"url":`)) // invalid JSON return } // Simulate invalid JSON response for RetrievePresignedUploadURL step if req.ClusterID == "invalid-response-post-data" { http.Error(w, "mock error", http.StatusInternalServerError) return } if req.ClusterID != successClusterID { http.Error(w, "post body contains cluster ID", http.StatusInternalServerError) return } if req.FileSize <= 0 { http.Error(w, "file size must be greater than 0", http.StatusInternalServerError) return } randomData := randHex() mds.expectedUploadValuesMutex.Lock() defer mds.expectedUploadValuesMutex.Unlock() uploadValues := uploadValues{ ClusterID: req.ClusterID, FileSize: req.FileSize, } mds.expectedUploadValues[randomData] = uploadValues presignedURL, err := url.JoinPath(mds.serverURL, "presigned-upload", randomData) if err != nil { http.Error(w, "failed to generate presigned URL", http.StatusInternalServerError) mds.t.Logf("failed to generate presigned URL: %v", err) return } // Write response body w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) _ = json.NewEncoder(w).Encode(struct { URL string `json:"url"` }{presignedURL}) } // An example of a real checksum mismatch error from the AWS API when the // request body does not match the checksum in the request header. const amzExampleChecksumError = ` BadDigest The SHA256 you specified did not match the calculated checksum. THR2V1RX700Z8SC7 F0xSC0H93Xs0BlCx6RjasZgrtjNkNB7lF4+yz1AiPQHswpdEoqj3iTgEN8SUWgV2Qm/laPobVIMz9SYTNHqdoA== ` func (mds *mockDataUploadServer) handlePresignedUpload(w http.ResponseWriter, r *http.Request) { randData := r.PathValue("randData") if randData == "" { http.Error(w, "missing randData in path; should match that returned in presigned url", http.StatusInternalServerError) return } mds.expectedUploadValuesMutex.Lock() uploadValues, ok := mds.expectedUploadValues[randData] mds.expectedUploadValuesMutex.Unlock() if !ok { http.Error(w, "didn't find a prior call to generate presigned URL", http.StatusInternalServerError) return } if r.Header.Get("User-Agent") != version.UserAgent() { http.Error(w, "should set user agent on all requests", http.StatusInternalServerError) return } if r.Header.Get(arkapi.TelemetryHeaderKey) != "" { w.WriteHeader(http.StatusInternalServerError) _, _ = w.Write([]byte("should NOT set telemetry header on requests to presigned URL")) return } amzChecksum := r.Header.Get("X-Amz-Checksum-Sha256") if amzChecksum == "" { http.Error(w, "should set x-amz-checksum-sha256 header on all requests", http.StatusInternalServerError) return } sseHeader := r.Header.Get("X-Amz-Server-Side-Encryption") if sseHeader != "AES256" { http.Error(w, "should set x-amz-server-side-encryption header to AES256 on all requests", http.StatusInternalServerError) return } taggingHeader := r.Header.Get("X-Amz-Tagging") if taggingHeader == "" { http.Error(w, "should set x-amz-tagging header on all requests", http.StatusInternalServerError) return } tags, err := url.ParseQuery(taggingHeader) if err != nil { http.Error(w, "x-amz-tagging header should be encoded as a valid query string", http.StatusInternalServerError) return } if tags.Get("agent_version") != version.PreflightVersion { http.Error(w, fmt.Sprintf("x-amz-tagging should contain an agent_version tag with value %s", version.PreflightVersion), http.StatusInternalServerError) return } if tags.Get("tenant_id") == "" { // TODO: if we change setup a bit, we can check the tenant_id matches the expected tenant_id from the test config, but for now, just check it's set http.Error(w, "x-amz-tagging should contain a tenant_id tag", http.StatusInternalServerError) return } if tags.Get("upload_type") != "k8s_snapshot" { http.Error(w, "x-amz-tagging should contain an upload_type tag with value k8s_snapshot", http.StatusInternalServerError) return } if tags.Get("uploader_id") != uploadValues.ClusterID { http.Error(w, "x-amz-tagging should contain an uploader_id tag which matches the cluster ID sent in the RetrievePresignedUploadURL request", http.StatusInternalServerError) return } if tags.Get("username") == "" { // TODO: if we change setup a bit, we can check the username matches the expected username from the test config // but for now, just check it's set http.Error(w, "x-amz-tagging should contain a username tag", http.StatusInternalServerError) return } if tags.Get("vendor") != "k8s" { http.Error(w, "x-amz-tagging should contain a vendor tag with value k8s", http.StatusInternalServerError) return } body, err := io.ReadAll(r.Body) require.NoError(mds.t, err) if uploadValues.FileSize != int64(len(body)) { http.Error(w, fmt.Sprintf("file size in request body should match that sent in RetrievePresignedUploadURL request; expected %d, got %d", uploadValues.FileSize, len(body)), http.StatusInternalServerError) return } hash := sha256.New() _, err = hash.Write(body) require.NoError(mds.t, err) // AWS S3 responds with a BadDigest error if the request body has a // different checksum than the checksum supplied in the request header. if amzChecksum != base64.StdEncoding.EncodeToString(hash.Sum(nil)) { w.Header().Set("Content-Type", "application/xml") http.Error(w, amzExampleChecksumError, http.StatusBadRequest) } // Verifies that the new Snapshot format is used in the request body. var snapshot Snapshot d := json.NewDecoder(bytes.NewBuffer(body)) d.DisallowUnknownFields() err = d.Decode(&snapshot) require.NoError(mds.t, err) assert.Equal(mds.t, successClusterID, snapshot.ClusterID) assert.Equal(mds.t, version.PreflightVersion, snapshot.AgentVersion) // AWS S3 responds with an empty body if the PUT succeeds w.WriteHeader(http.StatusOK) } ================================================ FILE: internal/cyberark/identity/advance_authentication_test.go ================================================ package identity import ( "fmt" "testing" "k8s.io/klog/v2" "k8s.io/klog/v2/ktesting" "github.com/jetstack/preflight/internal/cyberark/servicediscovery" _ "k8s.io/klog/v2/ktesting/init" ) func Test_IdentityAdvanceAuthentication(t *testing.T) { tests := map[string]struct { username string password []byte advanceBody advanceAuthenticationRequestBody expectedError error }{ "success": { username: successUser, password: []byte(successPassword), advanceBody: advanceAuthenticationRequestBody{ Action: ActionAnswer, MechanismID: successMechanismID, SessionID: successSessionID, TenantID: "foo", PersistentLogin: true, }, expectedError: nil, }, "incorrect password": { username: successUser, password: []byte("foo"), advanceBody: advanceAuthenticationRequestBody{ Action: ActionAnswer, MechanismID: successMechanismID, SessionID: successSessionID, TenantID: "foo", PersistentLogin: true, }, expectedError: fmt.Errorf(`got a failure response from request to advance authentication: message="Authentication (login or challenge) has failed. Please try again or contact your system administrator.", error="aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee:55555555555555555555555555555555"`), }, "bad action": { username: successUser, password: []byte(successPassword), advanceBody: advanceAuthenticationRequestBody{ Action: "foo", MechanismID: successMechanismID, SessionID: successSessionID, TenantID: "foo", PersistentLogin: true, }, expectedError: fmt.Errorf(`got a failure response from request to advance authentication: message="Authentication (login or challenge) has failed. Please try again or contact your system administrator.", error="aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee:55555555555555555555555555555555"`), }, "bad mechanism id": { username: successUser, password: []byte(successPassword), advanceBody: advanceAuthenticationRequestBody{ Action: ActionAnswer, MechanismID: "foo", SessionID: successSessionID, TenantID: "foo", PersistentLogin: true, }, expectedError: fmt.Errorf(`got a failure response from request to advance authentication: message="Authentication (login or challenge) has failed. Please try again or contact your system administrator.", error="aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee:55555555555555555555555555555555"`), }, "bad session id": { username: successUser, password: []byte(successPassword), advanceBody: advanceAuthenticationRequestBody{ Action: ActionAnswer, MechanismID: successMechanismID, SessionID: "foo", TenantID: "foo", PersistentLogin: true, }, expectedError: fmt.Errorf(`got a failure response from request to advance authentication: message="Authentication (login or challenge) has failed. Please try again or contact your system administrator.", error="aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee:55555555555555555555555555555555"`), }, "persistent login not set": { username: successUser, password: []byte(successPassword), advanceBody: advanceAuthenticationRequestBody{ Action: ActionAnswer, MechanismID: successMechanismID, SessionID: successSessionID, TenantID: "foo", PersistentLogin: false, }, expectedError: fmt.Errorf("got unexpected status code 403 Forbidden from request to advance authentication in CyberArk Identity API"), }, } for name, testSpec := range tests { t.Run(name, func(t *testing.T) { logger := ktesting.NewLogger(t, ktesting.DefaultConfig) ctx := klog.NewContext(t.Context(), logger) identityAPI, httpClient := MockIdentityServer(t) client := New(httpClient, identityAPI, servicediscovery.MockDiscoverySubdomain) err := client.doAdvanceAuthentication(ctx, testSpec.username, &testSpec.password, testSpec.advanceBody) if testSpec.expectedError != err { if testSpec.expectedError == nil { t.Errorf("didn't expect an error but got %v", err) return } if err == nil { t.Errorf("expected no error but got err=%v", testSpec.expectedError) return } if err.Error() != testSpec.expectedError.Error() { t.Errorf("expected err=%v\nbut got err=%v", testSpec.expectedError, err) return } } if testSpec.expectedError != nil { return } if client.tokenCached.Username != testSpec.username { t.Errorf("expected username %s to be set on cached token after authentication but got %q", testSpec.username, client.tokenCached.Username) return } if len(client.tokenCached.Token) == 0 { t.Errorf("expected token for %s to be set to %q but wasn't found", testSpec.username, mockSuccessfulStartAuthenticationToken) return } if client.tokenCached.Token != mockSuccessfulStartAuthenticationToken { t.Errorf("expected token for %s to be set to %q but was set to %q", testSpec.username, mockSuccessfulStartAuthenticationToken, client.tokenCached.Token) } }) } } ================================================ FILE: internal/cyberark/identity/authenticated_http_client.go ================================================ package identity import ( "fmt" "net/http" ) type RequestAuthenticator func(req *http.Request) (string, error) // AuthenticateRequest is a helper function that adds the Authorization header to an HTTP request using a cached token. // It sets the Header directly, and if successful returns the username corresponding to the token. func (c *Client) AuthenticateRequest(req *http.Request) (string, error) { c.tokenCachedMutex.Lock() defer c.tokenCachedMutex.Unlock() if len(c.tokenCached.Token) == 0 { return "", fmt.Errorf("no token cached") } req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.tokenCached.Token)) return c.tokenCached.Username, nil } ================================================ FILE: internal/cyberark/identity/cmd/testidentity/main.go ================================================ package main import ( "context" "crypto/x509" "flag" "fmt" "os" "os/signal" "github.com/jetstack/venafi-connection-lib/http_client" "k8s.io/klog/v2" "github.com/jetstack/preflight/internal/cyberark/identity" "github.com/jetstack/preflight/internal/cyberark/servicediscovery" "github.com/jetstack/preflight/pkg/version" ) // This is a trivial CLI application for testing our identity client end-to-end. // It's not intended for distribution; it simply allows us to run our client and check // the login is successful. // // To test against a tenant on the integration platform, set: // ARK_DISCOVERY_API=https://platform-discovery.integration-cyberark.cloud/ const ( subdomainFlag = "subdomain" usernameFlag = "username" passwordEnv = "ARK_SECRET" ) var ( subdomain string username string ) func run(ctx context.Context) error { if subdomain == "" { return fmt.Errorf("no %s flag provided", subdomainFlag) } if username == "" { return fmt.Errorf("no %s flag provided", usernameFlag) } password := os.Getenv(passwordEnv) if password == "" { return fmt.Errorf("no password provided in %s", passwordEnv) } var rootCAs *x509.CertPool httpClient := http_client.NewDefaultClient(version.UserAgent(), rootCAs) sdClient := servicediscovery.New(httpClient, subdomain) services, _, err := sdClient.DiscoverServices(ctx) if err != nil { return fmt.Errorf("while performing service discovery: %s", err) } client := identity.New(httpClient, services.Identity.API, subdomain) err = client.LoginUsernamePassword(ctx, username, []byte(password)) if err != nil { return fmt.Errorf("while performing login with username and password: %s", err) } return nil } func main() { defer klog.Flush() flagSet := flag.NewFlagSet("test", flag.ExitOnError) klog.InitFlags(flagSet) _ = flagSet.Parse([]string{"--v", "6"}) logger := klog.Background() ctx := klog.NewContext(context.Background(), logger) ctx, cancel := signal.NotifyContext(ctx, os.Interrupt) defer cancel() flag.StringVar(&subdomain, subdomainFlag, "cert-manager", "The subdomain to use for service discovery") flag.StringVar(&username, usernameFlag, "", fmt.Sprintf("Username to log in with. Password should be provided via %s envvar", passwordEnv), ) flag.Parse() errCode := 0 err := run(ctx) if err != nil { logger.Error(err, "execution failed") errCode = 1 } klog.FlushAndExit(klog.ExitFlushTimeout, errCode) } ================================================ FILE: internal/cyberark/identity/identity.go ================================================ package identity import ( "bytes" "context" "encoding/json" "fmt" "io" "net/http" "net/url" "sync" "time" "k8s.io/klog/v2" arkapi "github.com/jetstack/preflight/internal/cyberark/api" "github.com/jetstack/preflight/pkg/logs" "github.com/jetstack/preflight/pkg/version" ) const ( // MechanismUsernamePassword is the string which identifies the username/password mechanism for completing // a login attempt MechanismUsernamePassword = "UP" // ActionAnswer is the string which is sent to an AdvanceAuthentication request to indicate we're providing // the credentials in band in text format (i.e., we're sending a password) ActionAnswer = "Answer" // SummaryLoginSuccess is returned by a StartAuthentication to indicate that login does not need // to proceed to the AdvanceAuthentication step. // We don't handle this because we don't expect it to happen. SummaryLoginSuccess = "LoginSuccess" // SummaryNewPackage is returned by a StartAuthentication call when the user must complete a challenge // to complete the log in. This is expected on a first login. SummaryNewPackage = "NewPackage" // maxStartAuthenticationBodySize is the maximum allowed size for a response body from the CyberArk Identity // StartAuthentication endpoint. // As of 2025-04-30, a response from the integration environment is ~1kB maxStartAuthenticationBodySize = 10 * 1024 // maxAdvanceAuthenticationBodySize is the maximum allowed size for a response body from the CyberArk Identity // AdvanceAuthentication endpoint. // As of 2025-04-30, a response from the integration environment is ~3kB maxAdvanceAuthenticationBodySize = 30 * 1024 ) var ( errNoUPMechanism = fmt.Errorf("found no authentication mechanism with the username + password type (%s); unable to complete login using this identity", MechanismUsernamePassword) ) // startAuthenticationRequestBody is the body sent to the StartAuthentication endpoint in CyberArk Identity; // see https://api-docs.cyberark.com/identity-docs-api/docs/security-api#/Login/start-authentication type startAuthenticationRequestBody struct { // TenantID is the internal ID of the tenant containing the user attempting to log in. In testing, // it seems that the subdomain works in this field. TenantID string `json:"TenantId"` // Version is set to 1.0 Version string `json:"Version"` // User is the username of the user trying to log in. For a human, this is likely to be an email address. User string `json:"User"` } // identityResponseBody generically wraps a response from the Identity server; the Result will differ for // responses from different endpoint, but the other fields are similar. // Not all fields in the JSON returned from the server are replicated here, since we only need a subset. type identityResponseBody[T any] struct { // Success is a simple boolean indicator from the server of success. // NB: The JSON key is lowercase, in contrast to other JSON keys in the response. Success bool `json:"success"` // Result holds the information we need to parse from successful responses Result T `json:"Result"` // Message holds an information message such as an error message. Experimentally it seems to be null // for successful attempts. Message string `json:"Message"` // ErrorID holds an error ID when something goes wrong with the call. // Not to be confused with ErrorCode; for failure messages, we see ErrorID set and ErrorCode null. ErrorID string `json:"ErrorID"` // NB: Other fields omitted since we don't need them } // startAuthenticationResponseBody is the response returned by the server from a request to StartAuthentication. type startAuthenticationResponseBody identityResponseBody[startAuthenticationResponseResult] // advanceAuthenticationResponseBody is the response from the AdvanceAuthentication endpoint. type advanceAuthenticationResponseBody identityResponseBody[advanceAuthenticationResponseResult] // startAuthenticationResponseResult holds the important data we need to pass to AdvanceAuthentication type startAuthenticationResponseResult struct { // SessionID identifies this login attempt, and must be passed with the // follow-up AdvanceAuthentication request. SessionID string `json:"SessionId"` // Challenges provides a list of methods for logging in. We need to look // for the correct login method we want to use, and then find the MechanismId // for that login method to pass to the AdvanceAuthentication request. Challenges []startAuthenticationChallenge `json:"Challenges"` // Summary indicates whether a StartAuthentication calls needs to be followed up with an AdvanceAuthentication // call. From the docs: // > If the user exists, the response contains a Summary of either LoginSuccess or NewPackage. // > You receive LoginSuccess when the request includes an .ASPXAUTH cookie from prior successful authentication. Summary string `json:"Summary"` } // startAuthenticationChallenge is an entry in the array of MFA mechanisms; // at least one MFA mechanism should be satisfied by the user. type startAuthenticationChallenge struct { Mechanisms []startAuthenticationMechanism `json:"Mechanisms"` } // startAuthenticationMechanism holds details of a given mechanism for authenticating. // This corresponds to "how" the user authenticates, e.g. via password or email, etc type startAuthenticationMechanism struct { // Name represents the name of the challenge mechanism. This is usually an upper-case // string, such as "UP" for "username / password" Name string `json:"Name"` // Enrolled is true if the given mechanism is available for the user attempting // to authenticate. Enrolled bool `json:"Enrolled"` // MechanismID uniquely identifies a particular mechanism, and must be passed // to the AdvanceAuthentication request when authenticating. MechanismID string `json:"MechanismId"` } // advanceAuthenticationRequestBody is a request body for the AdvanceAuthentication call to CyberArk Identity, // which should usually be obtained by making requests to StartAuthentication first. // WARNING: This struct can hold secret data (a user's password) // See: https://api-docs.cyberark.com/identity-docs-api/docs/security-api#/Login/advance-authentication type advanceAuthenticationRequestBody struct { // Action is a string identifying how we're intending to log in; for username/password, this is // set to "Answer" to indicate that the password is held in the Answer field Action string `json:"Action"` // Answer holds the user's password to send to the server // WARNING: THIS IS SECRET DATA. Answer string `json:"Answer"` // MechanismID identifies the login mechanism and must be retrieved from a call to StartAuthentication MechanismID string `json:"MechanismId"` // SessionID identifies the login session and must be retrieved from a call to StartAuthentication SessionID string `json:"SessionId"` // TenantID identifies the tenant; this can be inferred from the URL if we used service discovery to // get the Identity API URL, but we set it anyway to be explicit. TenantID string `json:"TenantId"` // PersistentLogin is documented to "[indicate] whether the session should persist after the user // closes the browser"; for service-to-service auth which we're trying to do, we set this to true. PersistentLogin bool `json:"PersistentLogin"` } // advanceAuthenticationResponseResult is the specific information returned for a successful AdvanceAuthentication call type advanceAuthenticationResponseResult struct { // Summary holds a "brief summary of the authentication outcome" Summary string `json:"Summary"` // Token is the auth token we need to save; this is the result of the login // process which can be sent as a bearer token to other services. Token string `json:"Token"` // Other fields omitted as they're not needed } // Client is an client for interacting with the CyberArk Identity API and performing a login using a username and password. // For context on the behaviour of this client, see the Python SDK: https://github.com/cyberark/ark-sdk-python/blob/3be12c3f2d3a2d0407025028943e584b6edc5996/ark_sdk_python/auth/identity/ark_identity.py type Client struct { httpClient *http.Client baseURL string subdomain string tokenCached token tokenCachedMutex sync.Mutex tokenCachedTime time.Time } // token is a wrapper type for holding auth tokens we want to cache. type token struct { Username string Token string } // New returns an initialized CyberArk Identity client using a default service discovery client. func New(httpClient *http.Client, baseURL string, subdomain string) *Client { return &Client{ httpClient: httpClient, baseURL: baseURL, subdomain: subdomain, tokenCached: token{}, tokenCachedMutex: sync.Mutex{}, } } // LoginUsernamePassword performs a blocking call to fetch an auth token from CyberArk Identity using the given username and password. // The password is zeroed after use. // Tokens are cached internally and are not directly accessible to code; use Client.AuthenticateRequest to add credentials // to an *http.Request. func (c *Client) LoginUsernamePassword(ctx context.Context, username string, password []byte) error { // note: we hold the mutex for the whole login attempt to ensure that only one login attempt can be in flight at once, // and to ensure that the token cache is correctly updated c.tokenCachedMutex.Lock() defer c.tokenCachedMutex.Unlock() defer func() { for i := range password { password[i] = 0x00 } }() if time.Since(c.tokenCachedTime) < 15*time.Minute && c.tokenCached.Username == username { // If the cached token is recent and for the same username, we can reuse it. klog.FromContext(ctx).V(2).Info("reusing cached token for user", "username", username) return nil } advanceRequestBody, err := c.doStartAuthentication(ctx, username) if err != nil { return err } // NB: We explicitly pass advanceRequestBody by value here so that when we add the password // in doAdvanceAuthentication we don't create a copy of the password slice elsewhere. err = c.doAdvanceAuthentication(ctx, username, &password, advanceRequestBody) if err != nil { return err } return err } // doStartAuthentication performs the initial request to start the login process using a username and password. // It returns a partially initialized advanceAuthenticationRequestBody ready to send to the server to complete // the login. As this function doesn't have access to the password, it must be added to the returned request body // by the caller before being used as a request to AdvanceAuthentication. // See https://api-docs.cyberark.com/identity-docs-api/docs/security-api#/Login/start-authentication func (c *Client) doStartAuthentication(ctx context.Context, username string) (advanceAuthenticationRequestBody, error) { response := advanceAuthenticationRequestBody{} logger := klog.FromContext(ctx).WithValues("source", "Identity.doStartAuthentication") body := startAuthenticationRequestBody{ Version: "1.0", // this is the only value in the docs TenantID: c.subdomain, User: username, } bodyJSON, err := json.Marshal(body) if err != nil { return response, fmt.Errorf("failed to marshal JSON for request to StartAuthentication endpoint: %s", err) } endpoint, err := url.JoinPath(c.baseURL, "Security", "StartAuthentication") if err != nil { return response, fmt.Errorf("failed to create URL for request to CyberArk Identity StartAuthentication: %s", err) } request, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewReader(bodyJSON)) if err != nil { return response, fmt.Errorf("failed to initialise request to Identity endpoint %s: %s", endpoint, err) } setIdentityHeaders(request) httpResponse, err := c.httpClient.Do(request) if err != nil { return response, fmt.Errorf("failed to perform HTTP request to start authentication: %s", err) } defer httpResponse.Body.Close() if httpResponse.StatusCode != http.StatusOK { err := fmt.Errorf("got unexpected status code %s from request to start authentication in CyberArk Identity API", httpResponse.Status) if httpResponse.StatusCode >= 500 || httpResponse.StatusCode < 400 { return response, err } // If we got a 4xx error, we shouldn't retry return response, err } startAuthResponse := startAuthenticationResponseBody{} err = json.NewDecoder(io.LimitReader(httpResponse.Body, maxStartAuthenticationBodySize)).Decode(&startAuthResponse) if err != nil { if err == io.ErrUnexpectedEOF { return response, fmt.Errorf("rejecting JSON response from server as it was too large or was truncated") } return response, fmt.Errorf("failed to parse JSON from otherwise successful request to start authentication: %s", err) } if !startAuthResponse.Success { return response, fmt.Errorf("got a failure response from request to start authentication: message=%q, error=%q", startAuthResponse.Message, startAuthResponse.ErrorID) } logger.V(logs.Debug).Info("made successful request to StartAuthentication", "summary", startAuthResponse.Result.Summary) if startAuthResponse.Result.Summary != SummaryNewPackage { // This means we can't respond to whatever summary the server sent. // The best thing to do is try and find a challenge we can solve anyway. klog.FromContext(ctx).Info("got an unexpected Summary from StartAuthentication response; will attempt to complete a login challenge anyway", "summary", startAuthResponse.Result.Summary) } // We can only handle a UP type challenge, and if there are any other challenges, we'll have to fail because we can't handle them. // https://github.com/cyberark/ark-sdk-python/blob/3be12c3f2d3a2d0407025028943e584b6edc5996/ark_sdk_python/auth/identity/ark_identity.py#L405 switch len(startAuthResponse.Result.Challenges) { case 0: return response, fmt.Errorf("got no valid challenges in response to start authentication; unable to log in") case 1: // do nothing, this is ideal default: return response, fmt.Errorf("got %d challenges in response to start authentication, which means MFA may be enabled; unable to log in", len(startAuthResponse.Result.Challenges)) } challenge := startAuthResponse.Result.Challenges[0] switch len(challenge.Mechanisms) { case 0: // presumably this shouldn't happen, but handle the case anyway return response, fmt.Errorf("got no mechanisms for challenge from Identity server") case 1: // do nothing, this is ideal default: return response, fmt.Errorf("got %d mechanisms in response to start authentication, which means MFA may be enabled; unable to log in", len(challenge.Mechanisms)) } mechanism := challenge.Mechanisms[0] if !mechanism.Enrolled || mechanism.Name != MechanismUsernamePassword { return response, errNoUPMechanism } response.Action = ActionAnswer response.MechanismID = mechanism.MechanismID response.SessionID = startAuthResponse.Result.SessionID response.TenantID = c.subdomain response.PersistentLogin = true return response, nil } // doAdvanceAuthentication performs the second step of the login process, sending the password to the server // and receiving a token in response. // See: https://api-docs.cyberark.com/identity-docs-api/docs/security-api#/Login/advance-authentication func (c *Client) doAdvanceAuthentication(ctx context.Context, username string, password *[]byte, requestBody advanceAuthenticationRequestBody) error { if password == nil { return fmt.Errorf("password must not be nil; this is a programming error") } requestBody.Answer = string(*password) bodyJSON, err := json.Marshal(requestBody) if err != nil { return fmt.Errorf("failed to marshal JSON for request to AdvanceAuthentication endpoint: %s", err) } endpoint, err := url.JoinPath(c.baseURL, "Security", "AdvanceAuthentication") if err != nil { return fmt.Errorf("failed to create URL for request to CyberArk Identity AdvanceAuthentication: %s", err) } request, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewReader(bodyJSON)) if err != nil { return fmt.Errorf("failed to initialise request to Identity endpoint %s: %s", endpoint, err) } setIdentityHeaders(request) httpResponse, err := c.httpClient.Do(request) if err != nil { return fmt.Errorf("failed to perform HTTP request to advance authentication: %s", err) } defer httpResponse.Body.Close() // Important: Even login failures can produce a 200 status code, so this // check won't catch all failures if httpResponse.StatusCode != http.StatusOK { return fmt.Errorf("got unexpected status code %s from request to advance authentication in CyberArk Identity API", httpResponse.Status) } advanceAuthResponse := advanceAuthenticationResponseBody{} err = json.NewDecoder(io.LimitReader(httpResponse.Body, maxAdvanceAuthenticationBodySize)).Decode(&advanceAuthResponse) if err != nil { if err == io.ErrUnexpectedEOF { return fmt.Errorf("rejecting JSON response from server as it was too large or was truncated") } return fmt.Errorf("failed to parse JSON from otherwise successful request to advance authentication: %s", err) } if !advanceAuthResponse.Success { return fmt.Errorf("got a failure response from request to advance authentication: message=%q, error=%q", advanceAuthResponse.Message, advanceAuthResponse.ErrorID) } if advanceAuthResponse.Result.Summary != SummaryLoginSuccess { // IF MFA was enabled and we got here, there's probably nothing to be gained from a retry // and the best thing to do is fail now so the user can fix MFA settings. return fmt.Errorf("got a %s response from AdvanceAuthentication; this implies that the user account %s requires MFA, which is not supported. Try unlocking MFA for this user", advanceAuthResponse.Result.Summary, username) } klog.FromContext(ctx).Info("successfully completed AdvanceAuthentication request to CyberArk Identity; login complete", "username", username) // NB: This assumes we already hold the token cache mutex, which we do in LoginUsernamePassword, so this is safe. c.tokenCachedTime = time.Now() c.tokenCached = token{ Username: username, Token: advanceAuthResponse.Result.Token, } return nil } // setIdentityHeaders sets the headers required for requests to the CyberArk Identity API. // From the docs: // Your request header must contain X-IDAP-NATIVE-CLIENT:true to indicate that an application is invoking // the CyberArk Identity endpoint, and // Content-Type: application/json to indicate that the body is in JSON format. // Experimentally, it seems the X-IDAP-NATIVE-CLIENT is not required but we'll follow the docs. func setIdentityHeaders(r *http.Request) { // The "canonicalheader" linter warns us that the IDAP-NATIVE-CLIENT header isn't canonical, but we silence it here // since we want to exactly match the docs. r.Header.Set("Content-Type", "application/json") r.Header.Set("X-IDAP-NATIVE-CLIENT", "true") //nolint: canonicalheader version.SetUserAgent(r) // Add telemetry headers arkapi.SetTelemetryRequestHeader(r) } ================================================ FILE: internal/cyberark/identity/identity_test.go ================================================ package identity // This file contains tests for the LoginUsernamePassword function in the // identity package. The tests cover both a mock API server and the real API, // depending on the environment variables set. The tests are intended to // demonstrate that the mock API behaves the same as the real API import ( "net/http" "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/klog/v2" "k8s.io/klog/v2/ktesting" "github.com/jetstack/preflight/internal/cyberark/servicediscovery" arktesting "github.com/jetstack/preflight/internal/cyberark/testing" _ "k8s.io/klog/v2/ktesting/init" ) // inputs holds the various input values for the tests. type inputs struct { httpClient *http.Client baseURL string subdomain string username string password string } // TestLoginUsernamePassword_MockAPI tests the LoginUsernamePassword function // against a mock API server. The mock server is configured to return different // responses based on the username and password used in the request. func TestLoginUsernamePassword_MockAPI(t *testing.T) { loginUsernamePasswordTests(t, func(t testing.TB) inputs { baseURL, httpClient := MockIdentityServer(t) return inputs{ httpClient: httpClient, baseURL: baseURL, subdomain: "subdomain-ignored-by-mock", username: successUser, password: successPassword, } }) } // TestLoginUsernamePassword_RealAPI tests the LoginUsernamePassword function // against the real API. The environment variables are used to configure the // client. func TestLoginUsernamePassword_RealAPI(t *testing.T) { arktesting.SkipIfNoEnv(t) subdomain := os.Getenv("ARK_SUBDOMAIN") httpClient := http.DefaultClient services, _, err := servicediscovery.New(httpClient, subdomain).DiscoverServices(t.Context()) require.NoError(t, err) loginUsernamePasswordTests(t, func(t testing.TB) inputs { return inputs{ httpClient: httpClient, baseURL: services.Identity.API, subdomain: subdomain, username: os.Getenv("ARK_USERNAME"), password: os.Getenv("ARK_SECRET"), } }) } // loginUsernamePasswordTests runs tests which are expected to pass regardless of // whether the mock or real API is used. func loginUsernamePasswordTests(t *testing.T, inputsGenerator func(t testing.TB) inputs) { type testCase struct { name string modifier func(in *inputs) expectedError string } tests := []testCase{ { name: "success", }, { name: "bad-username", modifier: func(in *inputs) { in.username = failureUser }, expectedError: `^got a failure response from request to advance authentication: ` + `message="Authentication \(login or challenge\) has failed\. ` + `Please try again or contact your system administrator\."`, }, { name: "empty-username", modifier: func(in *inputs) { in.username = "" }, expectedError: `^got a failure response from request to start authentication: ` + `message="Authentication \(login or challenge\) has failed\. ` + `Please try again or contact your system administrator\."`, }, { name: "bad-password", modifier: func(in *inputs) { in.password = "bad-password" }, expectedError: `^got a failure response from request to advance authentication: ` + `message="Authentication \(login or challenge\) has failed\. ` + `Please try again or contact your system administrator\."`, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { logger := ktesting.NewLogger(t, ktesting.DefaultConfig) ctx := klog.NewContext(t.Context(), logger) in := inputsGenerator(t) if test.modifier != nil { test.modifier(&in) } cl := New(in.httpClient, in.baseURL, in.subdomain) err := cl.LoginUsernamePassword(ctx, in.username, []byte(in.password)) if test.expectedError != "" { if assert.Error(t, err) { assert.Regexp(t, test.expectedError, err.Error()) } return } require.NoError(t, err) }) } } ================================================ FILE: internal/cyberark/identity/mock.go ================================================ package identity import ( "encoding/json" "errors" "fmt" "net/http" "net/http/httptest" "testing" "github.com/stretchr/testify/assert" "k8s.io/client-go/transport" arkapi "github.com/jetstack/preflight/internal/cyberark/api" "github.com/jetstack/preflight/pkg/version" _ "embed" ) const ( successUser = "test@example.com" failureUser = "test-fail@example.com" successUserMultipleChallenges = "test-multiple-challenges@example.com" successUserMultipleMechanisms = "test-multiple-mechanisms@example.com" noUPMechanism = "noup@example.com" successMechanismID = "aaaaaaa_AAAAAAAAAAAAAAAAAAAAAAAAAAAA-1111111" successSessionID = "mysessionid101" successPassword = "somepassword" // mockSuccessfulStartAuthenticationToken is the token returned by the // mock server in response to a successful AdvanceAuthentication request // Must match what's in testdata/advance_authentication_success.json mockSuccessfulStartAuthenticationToken = "success-token" ) var ( //go:embed testdata/start_authentication_success.json startAuthenticationSuccessResponse string //go:embed testdata/start_authentication_bad_user_session_id.json startAuthenticationBadUserResponse string //go:embed testdata/start_authentication_success_multiple_challenges.json startAuthenticationSuccessMultipleChallengesResponse string //go:embed testdata/start_authentication_success_multiple_mechanisms.json startAuthenticationSuccessMultipleMechanismsResponse string //go:embed testdata/start_authentication_success_no_up_mechanism.json startAuthenticationNoUPMechanismResponse string //go:embed testdata/start_authentication_failure.json startAuthenticationFailureResponse string //go:embed testdata/advance_authentication_success.json advanceAuthenticationSuccessResponse string //go:embed testdata/advance_authentication_failure.json advanceAuthenticationFailureResponse string ) type mockIdentityServer struct { t testing.TB } // MockIdentityServer returns a URL of a mocked CyberArk identity server and an // HTTP client with the CA certs needed to connect to it.. func MockIdentityServer(t testing.TB) (string, *http.Client) { mis := &mockIdentityServer{ t: t, } server := httptest.NewTLSServer(mis) t.Cleanup(server.Close) httpClient := server.Client() httpClient.Transport = transport.NewDebuggingRoundTripper(httpClient.Transport, transport.DebugByContext) return server.URL, httpClient } func (mis *mockIdentityServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { mis.t.Log(r.Method, r.RequestURI) switch r.URL.String() { case "/Security/StartAuthentication": mis.handleStartAuthentication(w, r) return case "/Security/AdvanceAuthentication": mis.handleAdvanceAuthentication(w, r) return default: // The server returns an HTML page for this case, but that doesn't seem important for us to replicate w.WriteHeader(http.StatusNotFound) _, _ = w.Write([]byte("not found")) } } func checkRequestHeaders(r *http.Request) error { var errs []error if r.Header.Get("User-Agent") != version.UserAgent() { errs = append(errs, fmt.Errorf("should set user agent on all requests")) } if r.Header.Get("Content-Type") != "application/json" { errs = append(errs, fmt.Errorf("should request JSON on all requests")) } if r.Header.Get("X-IDAP-NATIVE-CLIENT") != "true" { //nolint: canonicalheader errs = append(errs, fmt.Errorf("should set X-IDAP-NATIVE-CLIENT header to true on all requests")) } if r.Header.Get(arkapi.TelemetryHeaderKey) == "" { errs = append(errs, fmt.Errorf("should set telemetry header on all requests")) } return errors.Join(errs...) } func (mis *mockIdentityServer) handleStartAuthentication(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { // Empirically we saw that a PUT and a DELETE request to this endpoint was actually successful, // but the endpoint is documented to use POST so we'll ensure that only that method is used. w.WriteHeader(http.StatusForbidden) _, _ = w.Write([]byte(`{"message":"endpoint is documented to only accept POST"}`)) return } if err := checkRequestHeaders(r); !assert.NoError(mis.t, err, "request headers are not correct") { w.WriteHeader(http.StatusForbidden) return } reqBody := startAuthenticationRequestBody{} decoder := json.NewDecoder(r.Body) decoder.DisallowUnknownFields() if err := decoder.Decode(&reqBody); err != nil { w.WriteHeader(http.StatusInternalServerError) fmt.Fprintf(w, `{"message":"failed to unmarshal request body: %s"}`, err) return } switch reqBody.User { case successUser: w.WriteHeader(http.StatusOK) _, _ = w.Write([]byte(startAuthenticationSuccessResponse)) case successUserMultipleChallenges: w.WriteHeader(http.StatusOK) _, _ = w.Write([]byte(startAuthenticationSuccessMultipleChallengesResponse)) case successUserMultipleMechanisms: w.WriteHeader(http.StatusOK) _, _ = w.Write([]byte(startAuthenticationSuccessMultipleMechanismsResponse)) case noUPMechanism: w.WriteHeader(http.StatusOK) _, _ = w.Write([]byte(startAuthenticationNoUPMechanismResponse)) case "": // experimentally, this case produces a 200 response but a "failed" body w.WriteHeader(http.StatusOK) _, _ = w.Write([]byte(startAuthenticationFailureResponse)) case failureUser: // Experimentally, the real API produces a 200 response and what looks // like a success response body. but the login is rejected later by the // AdvanceAuthentication stage, perhaps by virtue of the sessionID which // is returned here and supplied to AdvanceAuthentication. w.WriteHeader(http.StatusOK) _, _ = w.Write([]byte(startAuthenticationBadUserResponse)) default: panic("programmer error: should not be reached") } } func (mis *mockIdentityServer) handleAdvanceAuthentication(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { w.WriteHeader(http.StatusForbidden) _, _ = w.Write([]byte(`{"message":"endpoint is documented to only accept POST"}`)) return } if err := checkRequestHeaders(r); err != nil { w.WriteHeader(http.StatusForbidden) fmt.Fprintf(w, `{"message":"issues with headers sent to mock server: %s"}`, err.Error()) } decoder := json.NewDecoder(r.Body) decoder.DisallowUnknownFields() advanceBody := &advanceAuthenticationRequestBody{} if err := decoder.Decode(&advanceBody); err != nil { w.WriteHeader(http.StatusInternalServerError) fmt.Fprintf(w, `{"message":"failed to unmarshal request body: %s"}`, err) return } // Important: The actual server will return 200 OK even if the login fails. // Most failure responses should copy that. if !advanceBody.PersistentLogin { // this is something we enforce but wouldn't actually be an error from // a real server, so we return a different error here w.WriteHeader(http.StatusForbidden) _, _ = w.Write([]byte(`expected PersistentLogin to be true`)) return } if advanceBody.SessionID != successSessionID || advanceBody.MechanismID != successMechanismID || advanceBody.Action != ActionAnswer || advanceBody.Answer != successPassword { w.WriteHeader(http.StatusOK) _, _ = w.Write([]byte(advanceAuthenticationFailureResponse)) return } w.WriteHeader(http.StatusOK) _, _ = w.Write([]byte(advanceAuthenticationSuccessResponse)) } ================================================ FILE: internal/cyberark/identity/start_authentication_test.go ================================================ package identity import ( "fmt" "testing" "github.com/jetstack/preflight/internal/cyberark/servicediscovery" ) func Test_IdentityStartAuthentication(t *testing.T) { tests := map[string]struct { username string expectedError error }{ "successful request": { username: successUser, expectedError: nil, }, "successful request, multiple challenges": { username: successUserMultipleChallenges, expectedError: fmt.Errorf("got 2 challenges in response to start authentication, which means MFA may be enabled; unable to log in"), }, "successful request, multiple mechanisms": { username: successUserMultipleMechanisms, expectedError: fmt.Errorf("got 2 mechanisms in response to start authentication, which means MFA may be enabled; unable to log in"), }, "successful request, no username / password (UP) mechanism available": { username: noUPMechanism, expectedError: errNoUPMechanism, }, "failed request": { // experimentally we've seen the failure response when passing an empty username username: "", expectedError: fmt.Errorf(`got a failure response from request to start authentication: message="Authentication (login or challenge) has failed. Please try again or contact your system administrator.", error="00000000-0400-4000-1111-222222222222:01234567890abcdef"`), }, } for name, testSpec := range tests { t.Run(name, func(t *testing.T) { ctx := t.Context() identityServer, httpClient := MockIdentityServer(t) client := New(httpClient, identityServer, servicediscovery.MockDiscoverySubdomain) advanceBody, err := client.doStartAuthentication(ctx, testSpec.username) if err != nil { if testSpec.expectedError == nil { t.Errorf("didn't expect an error but got %v", err) return } if err.Error() != testSpec.expectedError.Error() { t.Errorf("expected err=%v\nbut got err=%v", testSpec.expectedError, err) return } } if testSpec.expectedError != nil { return } if advanceBody.TenantID != client.subdomain { t.Errorf("expected advanceAuthenticationRequestBody.TenantID to be %s but got %s", client.subdomain, advanceBody.TenantID) } if advanceBody.SessionID != successSessionID { t.Errorf("expected advanceAuthenticationRequestBody.SessionID to be %s but got %s", successSessionID, advanceBody.SessionID) } if advanceBody.MechanismID != successMechanismID { t.Errorf("expected advanceAuthenticationRequestBody.MechanismID to be %s but got %s", successMechanismID, advanceBody.MechanismID) } if advanceBody.Action != ActionAnswer { t.Errorf("expected advanceAuthenticationRequestBody.Action to be %s but got %s", ActionAnswer, advanceBody.Action) } if !advanceBody.PersistentLogin { t.Error("expected advanceAuthenticationRequestBody.PersistentLogin to be true but it wasn't") } }) } } ================================================ FILE: internal/cyberark/identity/testdata/advance_authentication_failure.json ================================================ { "success": false, "Result": { "Summary": "Failure" }, "Message": "Authentication (login or challenge) has failed. Please try again or contact your system administrator.", "MessageID": null, "Exception": null, "ErrorID": "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee:55555555555555555555555555555555", "ErrorCode": null, "IsSoftError": false, "InnerExceptions": null } ================================================ FILE: internal/cyberark/identity/testdata/advance_authentication_success.json ================================================ { "success": true, "Result": { "AuthLevel": "Normal", "DisplayName": "Namey McNamerson", "Token": "success-token", "Auth": "auth-auth", "UserId": "11111111-2222-3333-4444-555555555555", "EmailAddress": "name@example.com", "UserDirectory": "CDS", "PodFqdn": "xxx0000.id.integration-cyberark.cloud", "User": "name@example.org.111111", "CustomerID": "XXX0000", "SystemID": "XXX0000", "SourceDsType": "CDS", "Summary": "LoginSuccess" }, "Message": null, "MessageID": null, "Exception": null, "ErrorID": null, "ErrorCode": null, "IsSoftError": false, "InnerExceptions": null } ================================================ FILE: internal/cyberark/identity/testdata/start_authentication_bad_user_session_id.json ================================================ { "success": true, "Result": { "ClientHints": { "PersistDefault": false, "AllowPersist": true, "AllowForgotPassword": true, "EndpointAuthenticationEnabled": false }, "Version": "1.0", "SessionId": "bad-user-session-id", "EventDescription": null, "RetryWaitingTime": 0, "SecurityImageName": null, "AllowLoginMfaCache": false, "Challenges": [ { "Mechanisms": [ { "AnswerType": "Text", "Name": "UP", "PromptMechChosen": "Enter Password", "PromptSelectMech": "Password", "MechanismId": "aaaaaaa_AAAAAAAAAAAAAAAAAAAAAAAAAAAA-1111111", "Enrolled": true } ] } ], "Summary": "NewPackage", "TenantId": "TENANTID" }, "Message": null, "MessageID": null, "Exception": null, "ErrorID": null, "ErrorCode": null, "IsSoftError": false, "InnerExceptions": null } ================================================ FILE: internal/cyberark/identity/testdata/start_authentication_failure.json ================================================ { "success": false, "Result": { "Summary": "Undefined" }, "Message": "Authentication (login or challenge) has failed. Please try again or contact your system administrator.", "MessageID": null, "Exception": null, "ErrorID": "00000000-0400-4000-1111-222222222222:01234567890abcdef", "ErrorCode": null, "IsSoftError": false, "InnerExceptions": null } ================================================ FILE: internal/cyberark/identity/testdata/start_authentication_success.json ================================================ { "success": true, "Result": { "ClientHints": { "PersistDefault": false, "AllowPersist": true, "AllowForgotPassword": true, "EndpointAuthenticationEnabled": false }, "Version": "1.0", "SessionId": "mysessionid101", "EventDescription": null, "RetryWaitingTime": 0, "SecurityImageName": null, "AllowLoginMfaCache": false, "Challenges": [ { "Mechanisms": [ { "AnswerType": "Text", "Name": "UP", "PromptMechChosen": "Enter Password", "PromptSelectMech": "Password", "MechanismId": "aaaaaaa_AAAAAAAAAAAAAAAAAAAAAAAAAAAA-1111111", "Enrolled": true } ] } ], "Summary": "NewPackage", "TenantId": "TENANTID" }, "Message": null, "MessageID": null, "Exception": null, "ErrorID": null, "ErrorCode": null, "IsSoftError": false, "InnerExceptions": null } ================================================ FILE: internal/cyberark/identity/testdata/start_authentication_success_multiple_challenges.json ================================================ { "success": true, "Result": { "ClientHints": { "PersistDefault": false, "AllowPersist": true, "AllowForgotPassword": true, "EndpointAuthenticationEnabled": false }, "Version": "1.0", "SessionId": "mysessionid101", "EventDescription": null, "RetryWaitingTime": 0, "SecurityImageName": null, "AllowLoginMfaCache": false, "Challenges": [ { "Mechanisms": [ { "AnswerType": "StartOob", "Name": "PF", "PartialPhoneNumber": "0775", "PromptMechChosen": "We will now attempt to call your phone (0000). Please follow the instructions to proceed with authentication.", "PromptSelectMech": "Phone Call... XXX-0000", "MechanismId": "bbbbbbb_BBBBBBBBBBBBBBBBBBBBBBBBBBBB-2222222", "Enrolled": true } ] }, { "Mechanisms": [ { "AnswerType": "Text", "Name": "UP", "PromptMechChosen": "Enter Password", "PromptSelectMech": "Password", "MechanismId": "aaaaaaa_AAAAAAAAAAAAAAAAAAAAAAAAAAAA-1111111", "Enrolled": true } ] } ], "Summary": "NewPackage", "TenantId": "TENANTID" }, "Message": null, "MessageID": null, "Exception": null, "ErrorID": null, "ErrorCode": null, "IsSoftError": false, "InnerExceptions": null } ================================================ FILE: internal/cyberark/identity/testdata/start_authentication_success_multiple_mechanisms.json ================================================ { "success": true, "Result": { "ClientHints": { "PersistDefault": false, "AllowPersist": true, "AllowForgotPassword": true, "EndpointAuthenticationEnabled": false }, "Version": "1.0", "SessionId": "mysessionid101", "EventDescription": null, "RetryWaitingTime": 0, "SecurityImageName": null, "AllowLoginMfaCache": false, "Challenges": [ { "Mechanisms": [ { "AnswerType": "Text", "Name": "UP", "PromptMechChosen": "Enter Password", "PromptSelectMech": "Password", "MechanismId": "aaaaaaa_AAAAAAAAAAAAAAAAAAAAAAAAAAAA-1111111", "Enrolled": true }, { "AnswerType": "StartOob", "Name": "PF", "PartialPhoneNumber": "0775", "PromptMechChosen": "We will now attempt to call your phone (0000). Please follow the instructions to proceed with authentication.", "PromptSelectMech": "Phone Call... XXX-0000", "MechanismId": "bbbbbbb_BBBBBBBBBBBBBBBBBBBBBBBBBBBB-2222222", "Enrolled": true } ] } ], "Summary": "NewPackage", "TenantId": "TENANTID" }, "Message": null, "MessageID": null, "Exception": null, "ErrorID": null, "ErrorCode": null, "IsSoftError": false, "InnerExceptions": null } ================================================ FILE: internal/cyberark/identity/testdata/start_authentication_success_no_up_mechanism.json ================================================ { "success": true, "Result": { "ClientHints": { "PersistDefault": false, "AllowPersist": true, "AllowForgotPassword": true, "EndpointAuthenticationEnabled": false }, "Version": "1.0", "SessionId": "mysessionid101", "EventDescription": null, "RetryWaitingTime": 0, "SecurityImageName": null, "AllowLoginMfaCache": false, "Challenges": [ { "Mechanisms": [ { "AnswerType": "StartOob", "Name": "PF", "PartialPhoneNumber": "0775", "PromptMechChosen": "We will now attempt to call your phone (0000). Please follow the instructions to proceed with authentication.", "PromptSelectMech": "Phone Call... XXX-0000", "MechanismId": "bbbbbbb_BBBBBBBBBBBBBBBBBBBBBBBBBBBB-2222222", "Enrolled": true } ] } ], "Summary": "NewPackage", "TenantId": "TENANTID" }, "Message": null, "MessageID": null, "Exception": null, "ErrorID": null, "ErrorCode": null, "IsSoftError": false, "InnerExceptions": null } ================================================ FILE: internal/cyberark/servicediscovery/discovery.go ================================================ package servicediscovery import ( "context" "encoding/json" "fmt" "io" "net/http" "net/url" "os" "path" "sync" "time" arkapi "github.com/jetstack/preflight/internal/cyberark/api" "github.com/jetstack/preflight/pkg/version" ) const ( // ProdDiscoveryAPIBaseURL is the base URL for the production CyberArk Service Discovery API ProdDiscoveryAPIBaseURL = "https://platform-discovery.cyberark.cloud/" // IdentityServiceName is the name of the identity service we're looking for in responses from the Service Discovery API // We were told to use the identity_administration field, not the identity_user_portal field. IdentityServiceName = "identity_administration" // DiscoveryContextServiceName is the name of the discovery and context API // in responses from the Service Discovery API. DiscoveryContextServiceName = "discoverycontext" // maxDiscoverBodySize is the maximum allowed size for a response body from the CyberArk Service Discovery subdomain endpoint // As of 2025-04-16, a response from the integration environment is ~4kB maxDiscoverBodySize = 2 * 1024 * 1024 ) // Client is a Golang client for interacting with the CyberArk Discovery Service. It allows // users to fetch URLs for various APIs available in CyberArk. This client is specialised to // fetch only API endpoints, since only API endpoints are required by the Venafi Kubernetes Agent currently. type Client struct { client *http.Client baseURL string subdomain string cachedResponse *Services cachedTenantID string cachedResponseTime time.Time cachedResponseMutex sync.Mutex } // New creates a new CyberArk Service Discovery client. If the ARK_DISCOVERY_API // environment variable is set, it is used as the base URL for the service // discovery API. Otherwise, the production URL is used. func New(httpClient *http.Client, subdomain string) *Client { baseURL := os.Getenv("ARK_DISCOVERY_API") if baseURL == "" { baseURL = ProdDiscoveryAPIBaseURL } client := &Client{ client: httpClient, baseURL: baseURL, subdomain: subdomain, cachedResponse: nil, cachedTenantID: "", cachedResponseTime: time.Time{}, cachedResponseMutex: sync.Mutex{}, } return client } // DiscoveryResponse represents the full JSON response returned by the CyberArk api/tenant-discovery/public API // The API is documented here https://ca-il-confluence.il.cyber-ark.com/spaces/EV/pages/575618345/Updated+PD+APIs+doc type DiscoveryResponse struct { Region string `json:"region"` DRRegion string `json:"dr_region"` Subdomain string `json:"subdomain"` TenantID string `json:"tenant_id"` PlatformID string `json:"platform_id"` IdentityID string `json:"identity_id"` DefaultURL string `json:"default_url"` TenantFlags map[string]any `json:"tenant_flags"` Services []Service `json:"services"` } type Service struct { ServiceName string `json:"service_name"` ServiceSubdomains []string `json:"service_subdomains"` Region string `json:"region"` Endpoints []ServiceEndpoint `json:"endpoints"` } // ServiceEndpoint represents a single service endpoint returned by the CyberArk // Service Discovery API. The JSON field names here must match the field names // returned by the Service Discovery API. type ServiceEndpoint struct { IsActive bool `json:"is_active"` Type string `json:"type"` UI string `json:"ui"` API string `json:"api"` } // This is a convenience struct to hold the two ServiceEndpoints we care about. // Currently, we only care about the Identity API and the Discovery Context API. type Services struct { Identity ServiceEndpoint DiscoveryContext ServiceEndpoint } // DiscoverServices fetches from the service discovery service for the configured subdomain // and parses the CyberArk Identity API URL and Inventory API URL. // It also returns the Tenant ID UUID corresponding to the subdomain. func (c *Client) DiscoverServices(ctx context.Context) (*Services, string, error) { c.cachedResponseMutex.Lock() defer c.cachedResponseMutex.Unlock() if c.cachedResponse != nil && time.Since(c.cachedResponseTime) < 1*time.Hour { return c.cachedResponse, c.cachedTenantID, nil } u, err := url.Parse(c.baseURL) if err != nil { return nil, "", fmt.Errorf("invalid base URL for service discovery: %w", err) } u.Path = path.Join(u.Path, "api/public/tenant-discovery") u.RawQuery = url.Values{"bySubdomain": []string{c.subdomain}}.Encode() endpoint := u.String() request, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil) if err != nil { return nil, "", fmt.Errorf("failed to initialise request to %s: %s", endpoint, err) } request.Header.Set("Accept", "application/json") version.SetUserAgent(request) // Add telemetry headers arkapi.SetTelemetryRequestHeader(request) resp, err := c.client.Do(request) if err != nil { return nil, "", fmt.Errorf("failed to perform HTTP request: %s", err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { // a 404 error is returned with an empty JSON body "{}" if the subdomain is unknown; at the time of writing, we haven't observed // any other errors and so we can't special case them if resp.StatusCode == http.StatusNotFound { return nil, "", fmt.Errorf("got an HTTP 404 response from service discovery; maybe the subdomain %q is incorrect or does not exist?", c.subdomain) } return nil, "", fmt.Errorf("got unexpected status code %s from request to service discovery API", resp.Status) } var discoveryResp DiscoveryResponse err = json.NewDecoder(io.LimitReader(resp.Body, maxDiscoverBodySize)).Decode(&discoveryResp) if err != nil { if err == io.ErrUnexpectedEOF { return nil, "", fmt.Errorf("rejecting JSON response from server as it was too large or was truncated") } return nil, "", fmt.Errorf("failed to parse JSON from otherwise successful request to service discovery endpoint: %s", err) } var identityAPI, discoveryContextAPI string for _, svc := range discoveryResp.Services { switch svc.ServiceName { case IdentityServiceName: for _, ep := range svc.Endpoints { if ep.Type == "main" && ep.IsActive && ep.API != "" { identityAPI = ep.API break } } case DiscoveryContextServiceName: for _, ep := range svc.Endpoints { if ep.Type == "main" && ep.IsActive && ep.API != "" { discoveryContextAPI = ep.API break } } } } if identityAPI == "" { return nil, "", fmt.Errorf("didn't find %s in service discovery response, "+ "which may indicate a suspended tenant; unable to detect CyberArk Identity API URL", IdentityServiceName) } //TODO: Should add a check for discoveryContextAPI too? services := &Services{ Identity: ServiceEndpoint{API: identityAPI}, DiscoveryContext: ServiceEndpoint{API: discoveryContextAPI}, } c.cachedResponse = services c.cachedTenantID = discoveryResp.TenantID c.cachedResponseTime = time.Now() return services, discoveryResp.TenantID, nil } ================================================ FILE: internal/cyberark/servicediscovery/discovery_test.go ================================================ package servicediscovery import ( "fmt" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/klog/v2" "k8s.io/klog/v2/ktesting" _ "k8s.io/klog/v2/ktesting/init" ) func Test_DiscoverIdentityAPIURL(t *testing.T) { tests := map[string]struct { subdomain string expectedURL string expectedError error }{ "successful request": { subdomain: MockDiscoverySubdomain, expectedURL: "https://ajp5871.id.integration-cyberark.cloud", expectedError: nil, }, "subdomain not found": { subdomain: "something-random", expectedURL: "", expectedError: fmt.Errorf("got an HTTP 404 response from service discovery; maybe the subdomain %q is incorrect or does not exist?", "something-random"), }, "no identity service in response": { subdomain: "no-identity", expectedURL: "", expectedError: fmt.Errorf("didn't find %s in service discovery response, which may indicate a suspended tenant; unable to detect CyberArk Identity API URL", IdentityServiceName), }, "unexpected HTTP response": { subdomain: "bad-request", expectedURL: "", expectedError: fmt.Errorf("got unexpected status code 400 Bad Request from request to service discovery API"), }, "response JSON too long": { subdomain: "json-too-long", expectedURL: "", expectedError: fmt.Errorf("rejecting JSON response from server as it was too large or was truncated"), }, "response JSON invalid": { subdomain: "json-invalid", expectedURL: "", expectedError: fmt.Errorf("failed to parse JSON from otherwise successful request to service discovery endpoint: invalid character 'a' looking for beginning of value"), }, } for name, testSpec := range tests { t.Run(name, func(t *testing.T) { logger := ktesting.NewLogger(t, ktesting.DefaultConfig) ctx := klog.NewContext(t.Context(), logger) httpClient := MockDiscoveryServer(t, Services{ Identity: ServiceEndpoint{ API: mockIdentityAPIURL, }, DiscoveryContext: ServiceEndpoint{ API: mockDiscoveryContextAPIURL, }, }) client := New(httpClient, testSpec.subdomain) services, _, err := client.DiscoverServices(ctx) if testSpec.expectedError != nil { assert.EqualError(t, err, testSpec.expectedError.Error()) assert.Nil(t, services) return } require.NoError(t, err) if services.Identity.API != testSpec.expectedURL { t.Errorf("expected API URL=%s\nobserved API URL=%s", testSpec.expectedURL, services.Identity.API) } }) } } ================================================ FILE: internal/cyberark/servicediscovery/mock.go ================================================ package servicediscovery import ( "bytes" "crypto/rand" "encoding/hex" "encoding/json" "net/http" "net/http/httptest" "strings" "testing" "text/template" "k8s.io/client-go/transport" arkapi "github.com/jetstack/preflight/internal/cyberark/api" "github.com/jetstack/preflight/pkg/version" _ "embed" ) const ( // MockDiscoverySubdomain is the subdomain for which the MockDiscoveryServer will return a success response MockDiscoverySubdomain = "tlskp-test" mockIdentityAPIURL = "https://ajp5871.id.integration-cyberark.cloud" mockDiscoveryContextAPIURL = "https://venafi-test.inventory.integration-cyberark.cloud/" prefix = "/api/public/tenant-discovery?bySubdomain=" ) //go:embed testdata/discovery_success.json.template var discoverySuccessTemplate string type mockDiscoveryServer struct { t testing.TB successResponse string } // MockDiscoveryServer starts a mocked CyberArk service discovery server and // returns an HTTP client with the CA certs needed to connect to it. // // The URL of the mock server is set in the `ARK_DISCOVERY_API` environment // variable, so any code using the `servicediscovery.Client` will use this mock // server. // // The mock server will return a successful response when the subdomain is // `MockDiscoverySubdomain`, and the API URLs in the response will match those // supplied in `services`. // Other subdomains, can be used to trigger various failure responses. // // The returned HTTP client has a transport which logs requests and responses // depending on log level of the logger supplied in the context. func MockDiscoveryServer(t testing.TB, services Services) *http.Client { tmpl := template.Must(template.New("mockDiscoverySuccess").Parse(discoverySuccessTemplate)) buf := &bytes.Buffer{} err := tmpl.Execute(buf, services) if err != nil { panic(err) } mds := &mockDiscoveryServer{ t: t, successResponse: buf.String(), } server := httptest.NewTLSServer(mds) t.Cleanup(server.Close) t.Setenv("ARK_DISCOVERY_API", server.URL) httpClient := server.Client() httpClient.Transport = transport.NewDebuggingRoundTripper(httpClient.Transport, transport.DebugByContext) return httpClient } func (mds *mockDiscoveryServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { mds.t.Log(r.Method, r.RequestURI) if r.Method != http.MethodGet { // This was observed by making a POST request to the integration environment // Normally, we'd expect 405 Method Not Allowed but we match the observed response here w.WriteHeader(http.StatusForbidden) _, _ = w.Write([]byte(`{"message":"Missing Authentication Token"}`)) return } if !strings.HasPrefix(r.URL.String(), prefix) { // This was observed by making a request to /api/v2/services/asd // Normally, we'd expect 404 Not Found but we match the observed response here w.WriteHeader(http.StatusForbidden) _, _ = w.Write([]byte(`{"message":"Missing Authentication Token"}`)) return } if r.Header.Get("User-Agent") != version.UserAgent() { w.WriteHeader(http.StatusInternalServerError) _, _ = w.Write([]byte("should set user agent on all requests")) return } if r.Header.Get(arkapi.TelemetryHeaderKey) == "" { w.WriteHeader(http.StatusInternalServerError) _, _ = w.Write([]byte("should set telemetry header on all requests")) return } if r.Header.Get("Accept") != "application/json" { w.WriteHeader(http.StatusInternalServerError) _, _ = w.Write([]byte("should request JSON on all requests")) return } subdomain := strings.TrimPrefix(r.URL.String(), prefix) switch subdomain { case MockDiscoverySubdomain: _, _ = w.Write([]byte(mds.successResponse)) case "no-identity": // return a snippet of valid service discovery JSON, but don't include the identity service _, _ = w.Write([]byte(`{ "services": [ { "service_name": "data_privacy", "region": "us-east-1", "endpoints": [ { "is_active": true, "type": "main", "ui": "https://ui.dataprivacy.integration-cyberark.cloud/", "api": "https://us-east-1.dataprivacy.integration-cyberark.cloud/api" } ] } ] }`)) case "bad-request": // test how the client handles a random unexpected response w.WriteHeader(http.StatusBadRequest) _, _ = w.Write([]byte("{}")) case "json-invalid": // test that the client correctly rejects handles invalid JSON w.WriteHeader(http.StatusOK) _, _ = w.Write([]byte(`{"a": a}`)) case "json-too-long": // test that the client correctly rejects JSON which is too long w.WriteHeader(http.StatusOK) // we'll hex encode the random bytes (doubling the size) longData := make([]byte, 1+maxDiscoverBodySize/2) _, _ = rand.Read(longData) longJSON, err := json.Marshal(map[string]string{"key": hex.EncodeToString(longData)}) if err != nil { panic(err) } _, _ = w.Write(longJSON) default: w.WriteHeader(http.StatusNotFound) _, _ = w.Write([]byte("{}")) } } ================================================ FILE: internal/cyberark/servicediscovery/testdata/README.md ================================================ # Test data for CyberArk Discovery All data in this folder is derived from an unauthenticated endpoint accessible from the public Internet. To get the original data: NOTE: This API is not implemented yet as of 02.09.2025 but is expected to be finalised by end of PI3 2025. ```bash curl -fsSL "${ARK_DISCOVERY_API}?bySubdomain=${ARK_SUBDOMAIN}" | jq ``` Then replace `identity_administration.api` with `{{ .Identity.API }}` and `discoverycontext.api` with `{{ .DiscoveryContext.API }}`. Those Go template fields will be substituted in the tests. ================================================ FILE: internal/cyberark/servicediscovery/testdata/discovery_success.json.template ================================================ { "region": "us-east-1", "dr_region": "us-east-2", "subdomain": "venafi-test", "platform_id": "platform-123", "tenant_id": "tenant-123", "identity_id": "identity-456", "default_url": "https://venafi-test.integration-cyberark.cloud", "tenant_flags": { "is_crdr_supported": "true", "is_crdr_active": "true" }, "services": [ { "service_name": "data_privacy", "region": "us-east-1", "endpoints": [ { "is_active": true, "type": "main", "ui": "https://ui.dataprivacy.integration-cyberark.cloud/", "api": "https://us-east-1.dataprivacy.integration-cyberark.cloud/api" } ] }, { "service_name": "secrets_manager", "region": "us-east-2", "endpoints": [ { "is_active": true, "type": "main", "ui": "https://ui.test-conjur.cloud", "api": "https://venafi-test.secretsmgr.integration-cyberark.cloud/api" } ] }, { "service_name": "idaptive_risk_analytics", "region": "US-East-Pod", "endpoints": [ { "is_active": true, "type": "main", "ui": "https://ajp5871-my.analytics.idaptive.qa", "api": "https://ajp5871-my.analytics.idaptive.qa" } ] }, { "service_name": "component_manager", "region": "us-east-1", "endpoints": [ { "is_active": true, "type": "main", "ui": "https://ui-connectormanagement.connectormanagement.integration-cyberark.cloud", "api": "https://venafi-test.connectormanagement.integration-cyberark.cloud/api" } ] }, { "service_name": "recording", "region": "us-east-1", "endpoints": [ { "is_active": true, "type": "main", "ui": "https://us-east-1.rec-ui.recording.integration-cyberark.cloud", "api": "https://venafi-test.recording.integration-cyberark.cloud/api" } ] }, { "service_name": "identity_user_portal", "region": "US-East-Pod", "endpoints": [ { "is_active": true, "type": "main", "ui": "https://ajp5871.id.integration-cyberark.cloud", "api": "https://ajp5871.id.integration-cyberark.cloud" } ] }, { "service_name": "userportal", "region": "us-east-1", "endpoints": [ { "is_active": true, "type": "main", "ui": "https://us-east-1.ui.userportal.integration-cyberark.cloud/", "api": "https://venafi-test.api.userportal.integration-cyberark.cloud/api" } ] }, { "service_name": "cloud_onboarding", "region": "us-east-1", "endpoints": [ { "is_active": true, "type": "main", "ui": "https://ui-cloudonboarding.cloudonboarding.integration-cyberark.cloud/", "api": "https://venafi-test.cloudonboarding.integration-cyberark.cloud/api" } ] }, { "service_name": "identity_administration", "region": "US-East-Pod", "endpoints": [ { "is_active": true, "type": "main", "ui": "https://ajp5871.id.integration-cyberark.cloud", "api": "{{ .Identity.API }}" } ] }, { "service_name": "adminportal", "region": "us-east-1", "endpoints": [ { "is_active": true, "type": "main", "ui": "https://ui-adminportal.adminportal.integration-cyberark.cloud", "api": "https://venafi-test.adminportal.integration-cyberark.cloud/api" } ] }, { "service_name": "analytics", "region": "us-east-1", "endpoints": [ { "is_active": true, "type": "main", "ui": "https://venafi-test.analytics.integration-cyberark.cloud/", "api": "https://venafi-test.analytics.integration-cyberark.cloud/api" } ] }, { "service_name": "session_monitoring", "region": "us-east-1", "endpoints": [ { "is_active": true, "type": "main", "ui": "https://us-east-1.sm-ui.sessionmonitoring.integration-cyberark.cloud", "api": "https://venafi-test.sessionmonitoring.integration-cyberark.cloud/api" } ] }, { "service_name": "audit", "region": "us-east-1", "endpoints": [ { "is_active": true, "type": "main", "ui": "https://ui.audit-ui.integration-cyberark.cloud", "api": "https://venafi-test.audit.integration-cyberark.cloud/api" } ] }, { "service_name": "fmcdp", "region": "us-east-1", "endpoints": [ { "is_active": true, "type": "main", "ui": "https://tagtig.io/", "api": "https://tagtig.io/api" } ] }, { "service_name": "featureadopt", "region": "us-east-1", "endpoints": [ { "is_active": true, "type": "main", "ui": "https://ui-featureadopt.featureadopt.integration-cyberark.cloud/", "api": "https://us-east-1-featureadopt.featureadopt.integration-cyberark.cloud/api" } ] }, { "service_name": "discoverycontext", "region": "us-east-1", "endpoints": [ { "is_active": true, "type": "main", "ui": "https://ui-inventory.inventory.integration-cyberark.cloud/", "api": "{{ .DiscoveryContext.API }}" } ] } ] } ================================================ FILE: internal/cyberark/testing/testing.go ================================================ package testing import ( "os" "testing" ) // SkipIfNoEnv skips the test if the required CyberArk environment variables are not set. func SkipIfNoEnv(t testing.TB) { t.Helper() if os.Getenv("ARK_SUBDOMAIN") == "" || os.Getenv("ARK_USERNAME") == "" || os.Getenv("ARK_SECRET") == "" { t.Skip("Skipping test because one of ARK_SUBDOMAIN, ARK_USERNAME or ARK_SECRET isn't set") } } ================================================ FILE: internal/envelope/doc.go ================================================ // Package envelope provides types and interfaces for envelope encryption. // // Envelope encryption combines asymmetric and symmetric cryptography to // efficiently encrypt data. The Encryptor interface defines the encryption // operation, returning data in JWE (JSON Web Encryption) format as defined // in RFC 7516. // // Implementations are available in subpackages: // // - internal/envelope/rsa: RSA-OAEP-256 + AES-256-GCM using JWE // // See subpackage documentation for usage examples. package envelope ================================================ FILE: internal/envelope/keyfetch/client.go ================================================ package keyfetch import ( "context" "crypto/rsa" "crypto/x509" "fmt" "io" "net/http" "net/url" "sync" "time" "github.com/jetstack/venafi-connection-lib/http_client" "github.com/lestrrat-go/jwx/v3/jwk" "k8s.io/klog/v2" "github.com/jetstack/preflight/internal/cyberark" "github.com/jetstack/preflight/internal/cyberark/identity" "github.com/jetstack/preflight/internal/cyberark/servicediscovery" "github.com/jetstack/preflight/pkg/version" ) const ( // minRSAKeySize is the minimum RSA key size in bits; we'd expect that keys will be larger but 2048 is a sane floor // to enforce to ensure that a weak key can't accidentally be used minRSAKeySize = 2048 ) // KeyFetcher is an interface for fetching public keys. type KeyFetcher interface { // FetchKey retrieves a public key from the key source. FetchKey(ctx context.Context) (PublicKey, error) } // Compile-time check that Client implements KeyFetcher var _ KeyFetcher = (*Client)(nil) // PublicKey represents an RSA public key retrieved from the key server. type PublicKey struct { // KeyID is the unique identifier for this key KeyID string // Key is the actual RSA public key Key *rsa.PublicKey } // Client fetches public keys from a CyberArk HTTP endpoint that provides keys in JWKS format. // It can be expanded in future to support other key types and formats, but for now it only supports RSA keys // and ignored other types. type Client struct { discoveryClient *servicediscovery.Client identityClient *identity.Client cfg cyberark.ClientConfig // httpClient is the HTTP client used for requests httpClient *http.Client cachedKey PublicKey cachedKeyMutex sync.Mutex cachedKeyTime time.Time } // NewClient creates a new key fetching client. // Uses CyberArk service discovery to derive the JWKS endpoint and CyberArk identity client for authentication. // Constructing the client involves a service discovery call to initialise the identity client, // so this may return an error if the discovery client is not able to connect to the service discovery endpoint. // If httpClient is nil, a default HTTP client will be created. func NewClient(ctx context.Context, discoveryClient *servicediscovery.Client, cfg cyberark.ClientConfig, httpClient *http.Client) (*Client, error) { if httpClient == nil { var rootCAs *x509.CertPool httpClient = http_client.NewDefaultClient(version.UserAgent(), rootCAs) } services, _, err := discoveryClient.DiscoverServices(ctx) if err != nil { return nil, fmt.Errorf("failed to get services from discovery client for initialising identity client: %w", err) } return &Client{ discoveryClient: discoveryClient, identityClient: identity.New(httpClient, services.Identity.API, cfg.Subdomain), cfg: cfg, httpClient: httpClient, }, nil } // FetchKey retrieves the public keys from the configured endpoint. // It returns a slice of PublicKey structs containing the key material and metadata. func (c *Client) FetchKey(ctx context.Context) (PublicKey, error) { logger := klog.FromContext(ctx).WithName("keyfetch") c.cachedKeyMutex.Lock() defer c.cachedKeyMutex.Unlock() if time.Since(c.cachedKeyTime) < 15*time.Minute { klog.FromContext(ctx).WithName("keyfetch").V(2).Info("using cached key", "fetchedAt", c.cachedKeyTime.Format(time.RFC3339Nano), "kid", c.cachedKey.KeyID) return c.cachedKey, nil } services, _, err := c.discoveryClient.DiscoverServices(ctx) if err != nil { return PublicKey{}, fmt.Errorf("failed to get services from discovery client: %w", err) } err = c.identityClient.LoginUsernamePassword(ctx, c.cfg.Username, []byte(c.cfg.Secret)) if err != nil { return PublicKey{}, fmt.Errorf("failed to authenticate for fetching JWKs: %w", err) } endpoint, err := url.JoinPath(services.DiscoveryContext.API, "discovery-context/jwks") if err != nil { return PublicKey{}, fmt.Errorf("failed to construct endpoint URL: %w", err) } req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil) if err != nil { return PublicKey{}, fmt.Errorf("failed to create request: %w", err) } _, err = c.identityClient.AuthenticateRequest(req) if err != nil { return PublicKey{}, fmt.Errorf("failed to authenticate request: %s", err) } req.Header.Set("Accept", "application/json") version.SetUserAgent(req) resp, err := c.httpClient.Do(req) if err != nil { return PublicKey{}, fmt.Errorf("failed to fetch keys from %s: %w", endpoint, err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { body, _ := io.ReadAll(resp.Body) return PublicKey{}, fmt.Errorf("unexpected status code %d from %s: %s", resp.StatusCode, endpoint, string(body)) } body, err := io.ReadAll(resp.Body) if err != nil { return PublicKey{}, fmt.Errorf("failed to read response body: %w", err) } keySet, err := jwk.Parse(body) if err != nil { return PublicKey{}, fmt.Errorf("failed to parse JWKs response: %w", err) } for i := range keySet.Len() { key, ok := keySet.Key(i) if !ok { continue } // Only process RSA keys if key.KeyType().String() != "RSA" { continue } var rawKey any if err := jwk.Export(key, &rawKey); err != nil { // skip unparseable keys continue } rsaKey, ok := rawKey.(*rsa.PublicKey) if !ok { // only process RSA keys (for now) continue } if rsaKey.N.BitLen() < minRSAKeySize { // skip keys that are too small to be secure continue } kid, ok := key.KeyID() if !ok { // skip any keys which don't have an ID continue } alg, ok := key.Algorithm() if !ok { // skip any keys which don't have an algorithm specified continue } if alg.String() != "RSA-OAEP-256" { // we only use RSA keys for RSA-OAEP-256 continue } // return the first valid key we find logger.Info("fetched valid RSA key", "kid", kid) c.cachedKey = PublicKey{ KeyID: kid, Key: rsaKey, } c.cachedKeyTime = time.Now() return c.cachedKey, nil } return PublicKey{}, fmt.Errorf("no valid RSA keys found at %s", endpoint) } ================================================ FILE: internal/envelope/keyfetch/client_test.go ================================================ package keyfetch import ( "context" "net/http" "net/http/httptest" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/jetstack/preflight/internal/cyberark" "github.com/jetstack/preflight/internal/cyberark/identity" "github.com/jetstack/preflight/internal/cyberark/servicediscovery" ) // testClientSetup sets up a complete test environment with mock identity and discovery servers // and returns a configured client along with the test ClientConfig func testClientSetup(t *testing.T, jwksServerURL string) (*Client, cyberark.ClientConfig) { t.Helper() // Create mock identity server identityURL, httpClient := identity.MockIdentityServer(t) // Set up services for mock discovery server services := servicediscovery.Services{ Identity: servicediscovery.ServiceEndpoint{ IsActive: true, Type: "main", API: identityURL, }, DiscoveryContext: servicediscovery.ServiceEndpoint{ IsActive: true, Type: "main", API: jwksServerURL, }, } // Create mock discovery server _ = servicediscovery.MockDiscoveryServer(t, services) // Create discovery client discoveryClient := servicediscovery.New(httpClient, servicediscovery.MockDiscoverySubdomain) // Create test config with credentials that match the mock identity server cfg := cyberark.ClientConfig{ Subdomain: servicediscovery.MockDiscoverySubdomain, Username: "test@example.com", // matches successUser in mock identity server Secret: "somepassword", // matches successPassword in mock identity server } // Create the keyfetch client with the properly configured httpClient client, err := NewClient(t.Context(), discoveryClient, cfg, httpClient) require.NoError(t, err) return client, cfg } func mockJWKSServer(t *testing.T, statusCode int, jwksResponse string) *httptest.Server { t.Helper() server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // Check if this is the JWKS endpoint if r.URL.Path == "/discovery-context/jwks" { assert.Equal(t, http.MethodGet, r.Method) assert.Equal(t, "application/json", r.Header.Get("Accept")) w.Header().Set("Content-Type", "application/json") w.WriteHeader(statusCode) _, err := w.Write([]byte(jwksResponse)) require.NoError(t, err) } })) t.Cleanup(server.Close) return server } func TestClient_FetchKey(t *testing.T) { // Sample JWKs response with a valid RSA key // This is a minimal example with the required fields, used in multiple tests jwksResponse := `{ "keys": [ { "kty": "RSA", "use": "enc", "kid": "test-key-1", "alg": "RSA-OAEP-256", "n": "vDdioGpDuAEQDd4WRXyWa4sZ5EeS9OPsRrU_jU3PbZdDcANxfh_WSeSvSBKGfGXGC3fIzu0Ernk9VjXcs3LeFdRq2N4nNRZvCzsd_MjBtn7CWgjM_Sk9DXEGn3cHHilcJUJQ4i2YgX9bHu0odNgE6cSVIUEMIC2EGuGk_I7lwroinAAwXpNLLQkV_25kv_QQof2i5f7AocY6QTd0SAo8ZUqFBzanupkeFpl3-Bsz6_zdt_N0x9k5XHQn42Q2oTupTwvXFbE1x8XtCpiaP3_fsQ9dN7t4z6HtwlNUJB2tFfF6PgdKZ9LuJpYjFPYzJQ6Rv28fuc8YHcF7Jittjyzmew", "e": "AQAB" } ] }` t.Run("successful fetch", func(t *testing.T) { server := mockJWKSServer(t, http.StatusOK, jwksResponse) client, _ := testClientSetup(t, server.URL) key, err := client.FetchKey(t.Context()) require.NoError(t, err) assert.Equal(t, "test-key-1", key.KeyID) assert.NotNil(t, key.Key) assert.NotNil(t, key.Key.N) assert.Greater(t, key.Key.E, 0) }) t.Run("multiple keys", func(t *testing.T) { // want to check that FetchKey returns the first valid RSA key, even if there are multiple keys in the response multiKeyResponse := `{ "keys": [ { "kty": "RSA", "kid": "key-1", "alg": "RSA-OAEP-256", "n": "vDdioGpDuAEQDd4WRXyWa4sZ5EeS9OPsRrU_jU3PbZdDcANxfh_WSeSvSBKGfGXGC3fIzu0Ernk9VjXcs3LeFdRq2N4nNRZvCzsd_MjBtn7CWgjM_Sk9DXEGn3cHHilcJUJQ4i2YgX9bHu0odNgE6cSVIUEMIC2EGuGk_I7lwroinAAwXpNLLQkV_25kv_QQof2i5f7AocY6QTd0SAo8ZUqFBzanupkeFpl3-Bsz6_zdt_N0x9k5XHQn42Q2oTupTwvXFbE1x8XtCpiaP3_fsQ9dN7t4z6HtwlNUJB2tFfF6PgdKZ9LuJpYjFPYzJQ6Rv28fuc8YHcF7Jittjyzmew", "e": "AQAB" }, { "kty": "RSA", "kid": "key-2", "alg": "RSA-OAEP-256", "n": "4J0VE8FK1rSQUBGiLpk4MkPyFApCyCugOfkuH0hiHclxZay96JgyZylH97eqs-ZmWXtv42ynYctIj2ZleaoqVDfMOqZ1GsbccyNAYReDtUYgeUtJEajpfUo1vitoh6OEB6nB0Hau07ELLqcUoxH_zkH5Kwoi_BgxByJDQ1HOut6nyEPTXLTMrAYK_pqL_kzsU0OtrCgSBh6j-11ToqUfxsLupbadRC0t5zrq4-3mZKqxBUz4XB2g3b9d2lH7mOTl5J_E8jcD4tK9DePzjdbkRWonBEJetWl9f2mh_VD1sxJbie1kzM5cdQylXzV_AvhSr58w00qy6XR_QXI10UU16Q", "e": "AQAB" } ] }` server := mockJWKSServer(t, http.StatusOK, multiKeyResponse) client, _ := testClientSetup(t, server.URL) key, err := client.FetchKey(t.Context()) require.NoError(t, err) assert.Equal(t, "key-1", key.KeyID) }) t.Run("filters non-RSA keys", func(t *testing.T) { // check that the client correctly filters out non-RSA keys and returns the first valid RSA key mixedKeyResponse := `{ "keys": [ { "kty": "EC", "kid": "ec-key-1", "alg": "ES256", "crv": "P-256", "x": "WKn-ZIGevcwGIyyrzFoZNBdaq9_TsqzGl96oc0CWuis", "y": "y77t-RvAHRKTsSGdIYUfweuOvwrvDD-Q3Hv5J0fSKbE" }, { "kty": "RSA", "kid": "rsa-key-1", "alg": "RSA-OAEP-256", "n": "vDdioGpDuAEQDd4WRXyWa4sZ5EeS9OPsRrU_jU3PbZdDcANxfh_WSeSvSBKGfGXGC3fIzu0Ernk9VjXcs3LeFdRq2N4nNRZvCzsd_MjBtn7CWgjM_Sk9DXEGn3cHHilcJUJQ4i2YgX9bHu0odNgE6cSVIUEMIC2EGuGk_I7lwroinAAwXpNLLQkV_25kv_QQof2i5f7AocY6QTd0SAo8ZUqFBzanupkeFpl3-Bsz6_zdt_N0x9k5XHQn42Q2oTupTwvXFbE1x8XtCpiaP3_fsQ9dN7t4z6HtwlNUJB2tFfF6PgdKZ9LuJpYjFPYzJQ6Rv28fuc8YHcF7Jittjyzmew", "e": "AQAB" } ] }` server := mockJWKSServer(t, http.StatusOK, mixedKeyResponse) client, _ := testClientSetup(t, server.URL) key, err := client.FetchKey(t.Context()) require.NoError(t, err) assert.Equal(t, "rsa-key-1", key.KeyID) }) t.Run("error on non-200 status", func(t *testing.T) { server := mockJWKSServer(t, http.StatusInternalServerError, "") // Response body won't be used since we return 500 client, _ := testClientSetup(t, server.URL) _, err := client.FetchKey(t.Context()) require.Error(t, err) assert.Contains(t, err.Error(), "unexpected status code 500") }) t.Run("error on invalid JSON", func(t *testing.T) { server := mockJWKSServer(t, http.StatusOK, "invalid json") client, _ := testClientSetup(t, server.URL) _, err := client.FetchKey(t.Context()) require.Error(t, err) assert.Contains(t, err.Error(), "failed to parse JWKs response") }) t.Run("error on no RSA keys", func(t *testing.T) { ecOnlyResponse := `{ "keys": [ { "kty": "EC", "kid": "ec-key-1", "alg": "ES256", "crv": "P-256", "x": "WKn-ZIGevcwGIyyrzFoZNBdaq9_TsqzGl96oc0CWuis", "y": "y77t-RvAHRKTsSGdIYUfweuOvwrvDD-Q3Hv5J0fSKbE" } ] }` server := mockJWKSServer(t, http.StatusOK, ecOnlyResponse) client, _ := testClientSetup(t, server.URL) _, err := client.FetchKey(t.Context()) require.Error(t, err) assert.Contains(t, err.Error(), "no valid RSA keys found") }) t.Run("context cancellation", func(t *testing.T) { server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // This handler will never respond <-r.Context().Done() })) defer server.Close() client, _ := testClientSetup(t, server.URL) ctx, cancel := context.WithCancel(context.Background()) cancel() // Cancel immediately _, err := client.FetchKey(ctx) require.Error(t, err) assert.Contains(t, err.Error(), "context canceled") }) t.Run("authentication failure", func(t *testing.T) { server := mockJWKSServer(t, http.StatusOK, jwksResponse) // Create mock identity server identityURL, httpClient := identity.MockIdentityServer(t) // Set up services for mock discovery server services := servicediscovery.Services{ Identity: servicediscovery.ServiceEndpoint{ IsActive: true, Type: "main", API: identityURL, }, DiscoveryContext: servicediscovery.ServiceEndpoint{ IsActive: true, Type: "main", API: server.URL, }, } // Create mock discovery server _ = servicediscovery.MockDiscoveryServer(t, services) // Create discovery client discoveryClient := servicediscovery.New(httpClient, servicediscovery.MockDiscoverySubdomain) // Create test config with WRONG credentials // Use the failureUser from the mock identity server cfg := cyberark.ClientConfig{ Subdomain: servicediscovery.MockDiscoverySubdomain, Username: "test-fail@example.com", // This user is configured to fail in the mock server // TODO: export these constants from the identity package to avoid hardcoding them here Secret: "somepassword", } // Create the keyfetch client client, err := NewClient(t.Context(), discoveryClient, cfg, httpClient) require.NoError(t, err) _, err = client.FetchKey(t.Context()) require.Error(t, err) assert.Contains(t, err.Error(), "failed to authenticate") }) t.Run("service discovery fails", func(t *testing.T) { // Create mock identity server (won't be used but needed for setup) identityURL, httpClient := identity.MockIdentityServer(t) // Set up services for mock discovery server services := servicediscovery.Services{ Identity: servicediscovery.ServiceEndpoint{ IsActive: true, Type: "main", API: identityURL, }, } // Create mock discovery server _ = servicediscovery.MockDiscoveryServer(t, services) // Create discovery client with a subdomain that triggers failure discoveryClient := servicediscovery.New(httpClient, "bad-request") cfg := cyberark.ClientConfig{ Subdomain: "bad-request", Username: "test@example.com", Secret: "somepassword", } _, err := NewClient(t.Context(), discoveryClient, cfg, httpClient) require.Error(t, err) assert.Contains(t, err.Error(), "failed to get services from discovery client") }) t.Run("ignores small RSA keys", func(t *testing.T) { // This is a 1024-bit RSA key (half the minimum size) // Generated with: openssl genrsa 1024 | openssl rsa -pubin -outform der | base64url smallKeyResponse := `{ "keys": [ { "kty": "RSA", "kid": "small-key-1", "alg": "RSA-OAEP-256", "n": "wKhJSKlx9aO_TmT4qAqN5EZ8FeXCXmh5F_hGHWL6c4lKvdKc_jBq1YI0H8pCIWZ6WhPKmBZ8JQ4Q2q0TjvdKLYQ8jqzMZxz4J_z4ySbN7yBn7N7xKqL5JN7KqVr7N8KQ", "e": "AQAB" }, { "kty": "RSA", "kid": "valid-key", "alg": "RSA-OAEP-256", "n": "vDdioGpDuAEQDd4WRXyWa4sZ5EeS9OPsRrU_jU3PbZdDcANxfh_WSeSvSBKGfGXGC3fIzu0Ernk9VjXcs3LeFdRq2N4nNRZvCzsd_MjBtn7CWgjM_Sk9DXEGn3cHHilcJUJQ4i2YgX9bHu0odNgE6cSVIUEMIC2EGuGk_I7lwroinAAwXpNLLQkV_25kv_QQof2i5f7AocY6QTd0SAo8ZUqFBzanupkeFpl3-Bsz6_zdt_N0x9k5XHQn42Q2oTupTwvXFbE1x8XtCpiaP3_fsQ9dN7t4z6HtwlNUJB2tFfF6PgdKZ9LuJpYjFPYzJQ6Rv28fuc8YHcF7Jittjyzmew", "e": "AQAB" } ] }` server := mockJWKSServer(t, http.StatusOK, smallKeyResponse) client, _ := testClientSetup(t, server.URL) key, err := client.FetchKey(t.Context()) require.NoError(t, err) // Should skip the small key and return the valid one assert.Equal(t, "valid-key", key.KeyID) }) t.Run("skips keys without kid", func(t *testing.T) { noKidResponse := `{ "keys": [ { "kty": "RSA", "alg": "RSA-OAEP-256", "n": "vDdioGpDuAEQDd4WRXyWa4sZ5EeS9OPsRrU_jU3PbZdDcANxfh_WSeSvSBKGfGXGC3fIzu0Ernk9VjXcs3LeFdRq2N4nNRZvCzsd_MjBtn7CWgjM_Sk9DXEGn3cHHilcJUJQ4i2YgX9bHu0odNgE6cSVIUEMIC2EGuGk_I7lwroinAAwXpNLLQkV_25kv_QQof2i5f7AocY6QTd0SAo8ZUqFBzanupkeFpl3-Bsz6_zdt_N0x9k5XHQn42Q2oTupTwvXFbE1x8XtCpiaP3_fsQ9dN7t4z6HtwlNUJB2tFfF6PgdKZ9LuJpYjFPYzJQ6Rv28fuc8YHcF7Jittjyzmew", "e": "AQAB" } ] }` server := mockJWKSServer(t, http.StatusOK, noKidResponse) client, _ := testClientSetup(t, server.URL) _, err := client.FetchKey(t.Context()) require.Error(t, err) assert.Contains(t, err.Error(), "no valid RSA keys found") }) t.Run("filters keys with wrong algorithm", func(t *testing.T) { wrongAlgResponse := `{ "keys": [ { "kty": "RSA", "kid": "wrong-alg-key", "alg": "RS256", "n": "vDdioGpDuAEQDd4WRXyWa4sZ5EeS9OPsRrU_jU3PbZdDcANxfh_WSeSvSBKGfGXGC3fIzu0Ernk9VjXcs3LeFdRq2N4nNRZvCzsd_MjBtn7CWgjM_Sk9DXEGn3cHHilcJUJQ4i2YgX9bHu0odNgE6cSVIUEMIC2EGuGk_I7lwroinAAwXpNLLQkV_25kv_QQof2i5f7AocY6QTd0SAo8ZUqFBzanupkeFpl3-Bsz6_zdt_N0x9k5XHQn42Q2oTupTwvXFbE1x8XtCpiaP3_fsQ9dN7t4z6HtwlNUJB2tFfF6PgdKZ9LuJpYjFPYzJQ6Rv28fuc8YHcF7Jittjyzmew", "e": "AQAB" }, { "kty": "RSA", "kid": "correct-alg-key", "alg": "RSA-OAEP-256", "n": "4J0VE8FK1rSQUBGiLpk4MkPyFApCyCugOfkuH0hiHclxZay96JgyZylH97eqs-ZmWXtv42ynYctIj2ZleaoqVDfMOqZ1GsbccyNAYReDtUYgeUtJEajpfUo1vitoh6OEB6nB0Hau07ELLqcUoxH_zkH5Kwoi_BgxByJDQ1HOut6nyEPTXLTMrAYK_pqL_kzsU0OtrCgSBh6j-11ToqUfxsLupbadRC0t5zrq4-3mZKqxBUz4XB2g3b9d2lH7mOTl5J_E8jcD4tK9DePzjdbkRWonBEJetWl9f2mh_VD1sxJbie1kzM5cdQylXzV_AvhSr58w00qy6XR_QXI10UU16Q", "e": "AQAB" } ] }` server := mockJWKSServer(t, http.StatusOK, wrongAlgResponse) client, _ := testClientSetup(t, server.URL) key, err := client.FetchKey(t.Context()) require.NoError(t, err) // Should skip the RS256 key and return the RSA-OAEP-256 key assert.Equal(t, "correct-alg-key", key.KeyID) }) t.Run("skips keys without algorithm", func(t *testing.T) { noAlgResponse := `{ "keys": [ { "kty": "RSA", "kid": "no-alg-key", "n": "vDdioGpDuAEQDd4WRXyWa4sZ5EeS9OPsRrU_jU3PbZdDcANxfh_WSeSvSBKGfGXGC3fIzu0Ernk9VjXcs3LeFdRq2N4nNRZvCzsd_MjBtn7CWgjM_Sk9DXEGn3cHHilcJUJQ4i2YgX9bHu0odNgE6cSVIUEMIC2EGuGk_I7lwroinAAwXpNLLQkV_25kv_QQof2i5f7AocY6QTd0SAo8ZUqFBzanupkeFpl3-Bsz6_zdt_N0x9k5XHQn42Q2oTupTwvXFbE1x8XtCpiaP3_fsQ9dN7t4z6HtwlNUJB2tFfF6PgdKZ9LuJpYjFPYzJQ6Rv28fuc8YHcF7Jittjyzmew", "e": "AQAB" } ] }` server := mockJWKSServer(t, http.StatusOK, noAlgResponse) client, _ := testClientSetup(t, server.URL) _, err := client.FetchKey(t.Context()) require.Error(t, err) assert.Contains(t, err.Error(), "no valid RSA keys found") }) t.Run("handles empty key set", func(t *testing.T) { emptyKeysResponse := `{ "keys": [] }` server := mockJWKSServer(t, http.StatusOK, emptyKeysResponse) client, _ := testClientSetup(t, server.URL) _, err := client.FetchKey(t.Context()) require.Error(t, err) assert.Contains(t, err.Error(), "no valid RSA keys found") }) } ================================================ FILE: internal/envelope/keyfetch/doc.go ================================================ // Package keyfetch provides a client for fetching encryption keys from an HTTP endpoint. // // The client retrieves public keys in JSON Web Key Set (JWKs) format from a remote // server and converts them into usable cryptographic keys for envelope encryption. // // This package uses github.com/lestrrat-go/jwx/v3/jwk for JWK parsing and handling. // // Currently, keyfetch only supports RSA keys for envelope encryption. package keyfetch ================================================ FILE: internal/envelope/keyfetch/fake.go ================================================ package keyfetch import ( "context" "crypto/rand" "crypto/rsa" "fmt" ) // Compile-time check that FakeClient implements KeyFetcher var _ KeyFetcher = (*FakeClient)(nil) // FakeClient is a fake implementation of the key fetcher for testing. // It can be configured to return specific keys or errors for testing different scenarios. type FakeClient struct { // Key is the public key that will be returned by FetchKey. // If nil, a random key will be generated on the first call. Key *PublicKey // Err is the error that will be returned by FetchKey. // If both Key and Err are set, Err takes precedence. Err error // FetchKeyCalls tracks how many times FetchKey was called FetchKeyCalls int } // NewFakeClient creates a new fake client for testing. func NewFakeClient() *FakeClient { return &FakeClient{} } // NewFakeClientWithKey creates a new fake client that returns the specified key. func NewFakeClientWithKey(keyID string, key *rsa.PublicKey) *FakeClient { return &FakeClient{ Key: &PublicKey{ KeyID: keyID, Key: key, }, } } // NewFakeClientWithError creates a new fake client that returns the specified error. func NewFakeClientWithError(err error) *FakeClient { return &FakeClient{ Err: err, } } // FetchKey implements the key fetching interface for testing. // It returns the configured key or error, or generates a random key if none is configured. func (f *FakeClient) FetchKey(ctx context.Context) (PublicKey, error) { f.FetchKeyCalls++ // Check if context is canceled if ctx.Err() != nil { return PublicKey{}, ctx.Err() } // If an error is configured, return it if f.Err != nil { return PublicKey{}, f.Err } // If a key is configured, return it if f.Key != nil { return *f.Key, nil } // Generate a random key for testing privateKey, err := rsa.GenerateKey(rand.Reader, minRSAKeySize) if err != nil { return PublicKey{}, fmt.Errorf("failed to generate test key: %w", err) } generatedKey := PublicKey{ KeyID: "test-key", Key: &privateKey.PublicKey, } // Cache the generated key for subsequent calls f.Key = &generatedKey return generatedKey, nil } ================================================ FILE: internal/envelope/keyfetch/fake_test.go ================================================ package keyfetch import ( "context" "crypto/rand" "crypto/rsa" "errors" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestFakeClient(t *testing.T) { t.Run("returns generated key by default", func(t *testing.T) { fake := NewFakeClient() key, err := fake.FetchKey(t.Context()) require.NoError(t, err) assert.Equal(t, "test-key", key.KeyID) assert.NotNil(t, key.Key) assert.Equal(t, 1, fake.FetchKeyCalls) // Subsequent calls return the same key key2, err := fake.FetchKey(t.Context()) require.NoError(t, err) assert.Equal(t, key.KeyID, key2.KeyID) assert.Equal(t, key.Key, key2.Key) assert.Equal(t, 2, fake.FetchKeyCalls) }) t.Run("returns configured key", func(t *testing.T) { privateKey, err := rsa.GenerateKey(rand.Reader, minRSAKeySize) require.NoError(t, err) fake := NewFakeClientWithKey("custom-key", &privateKey.PublicKey) key, err := fake.FetchKey(t.Context()) require.NoError(t, err) assert.Equal(t, "custom-key", key.KeyID) assert.Equal(t, &privateKey.PublicKey, key.Key) assert.Equal(t, 1, fake.FetchKeyCalls) }) t.Run("returns configured error", func(t *testing.T) { expectedErr := errors.New("test error") fake := NewFakeClientWithError(expectedErr) _, err := fake.FetchKey(t.Context()) require.Error(t, err) assert.Equal(t, expectedErr, err) assert.Equal(t, 1, fake.FetchKeyCalls) }) t.Run("respects context cancellation", func(t *testing.T) { fake := NewFakeClient() ctx, cancel := context.WithCancel(t.Context()) cancel() _, err := fake.FetchKey(ctx) require.Error(t, err) assert.Equal(t, context.Canceled, err) assert.Equal(t, 1, fake.FetchKeyCalls) }) t.Run("error takes precedence over key", func(t *testing.T) { privateKey, err := rsa.GenerateKey(rand.Reader, minRSAKeySize) require.NoError(t, err) expectedErr := errors.New("test error") fake := &FakeClient{ Key: &PublicKey{ KeyID: "custom-key", Key: &privateKey.PublicKey, }, Err: expectedErr, } _, err = fake.FetchKey(t.Context()) require.Error(t, err) assert.Equal(t, expectedErr, err) }) } ================================================ FILE: internal/envelope/rsa/doc.go ================================================ // Package rsa implements RSA envelope encryption using JWE (JSON Web Encryption) format. // It conforms to the interface in the envelope package. // // The implementation uses: // - RSA-OAEP-256 (RSA-OAEP with SHA-256) for key encryption // - AES-256-GCM (A256GCM) for content encryption // - JWE Compact Serialization format as defined in RFC 7516 // // The output is a JWE string with 5 base64url-encoded parts separated by dots: // header.encryptedKey.iv.ciphertext.tag package rsa ================================================ FILE: internal/envelope/rsa/encryptor.go ================================================ package rsa import ( "context" "fmt" "github.com/lestrrat-go/jwx/v3/jwa" "github.com/lestrrat-go/jwx/v3/jwe" "github.com/jetstack/preflight/internal/envelope" "github.com/jetstack/preflight/internal/envelope/keyfetch" ) const ( // EncryptionType is the type identifier for RSA JWE encryption EncryptionType = "JWE-RSA" ) // Compile-time check that Encryptor implements envelope.Encryptor var _ envelope.Encryptor = (*Encryptor)(nil) // Encryptor provides envelope encryption using RSA-OAEP-256 for key wrapping // and AES-256-GCM for data encryption, outputting JWE Compact Serialization format. type Encryptor struct { fetcher keyfetch.KeyFetcher } // NewEncryptor creates a new Encryptor with the provided key fetcher. // The encryptor will use RSA-OAEP-256 for key encryption and A256GCM for content encryption. func NewEncryptor(fetcher keyfetch.KeyFetcher) (*Encryptor, error) { return &Encryptor{ fetcher: fetcher, }, nil } // Encrypt performs envelope encryption on the provided data. // It returns an EncryptedData struct containing JWE Compact Serialization format and type metadata. // The JWE uses RSA-OAEP-256 for key encryption and A256GCM for content encryption. func (e *Encryptor) Encrypt(ctx context.Context, data []byte) (*envelope.EncryptedData, error) { if len(data) == 0 { return nil, fmt.Errorf("data to encrypt cannot be empty") } key, err := e.fetcher.FetchKey(ctx) if err != nil { return nil, fmt.Errorf("failed to fetch encryption key: %w", err) } // Create headers with the key ID headers := jwe.NewHeaders() if err := headers.Set("kid", key.KeyID); err != nil { return nil, fmt.Errorf("failed to set key ID header: %w", err) } // Encrypt using RSA-OAEP-256 for key algorithm and A256GCM for content encryption // TODO: When standardised, consider using secret.Do to wrap this call, since it will generate an AES key // (see https://pkg.go.dev/runtime/secret) encrypted, err := jwe.Encrypt( data, jwe.WithKey(jwa.RSA_OAEP_256(), key.Key, jwe.WithPerRecipientHeaders(headers)), jwe.WithContentEncryption(jwa.A256GCM()), jwe.WithCompact(), ) if err != nil { return nil, fmt.Errorf("failed to encrypt data: %w", err) } return &envelope.EncryptedData{ Data: encrypted, Type: EncryptionType, }, nil } ================================================ FILE: internal/envelope/rsa/encryptor_test.go ================================================ package rsa import ( "crypto/rand" "crypto/rsa" "encoding/base64" "strings" "sync" "testing" "github.com/lestrrat-go/jwx/v3/jwa" "github.com/lestrrat-go/jwx/v3/jwe" "github.com/stretchr/testify/require" "github.com/jetstack/preflight/internal/envelope/keyfetch" ) const ( testKeyID = "test-key-id" // minRSAKeySize is the minimum RSA key size used for test key generation minRSAKeySize = 2048 ) var ( testKeyOnce sync.Once internalTestKey *rsa.PrivateKey ) // testKey generates and returns a singleton RSA private key for testing purposes, // to avoid needing to generate a new key for each test. func testKey() *rsa.PrivateKey { testKeyOnce.Do(func() { key, err := rsa.GenerateKey(rand.Reader, minRSAKeySize) if err != nil { panic("failed to generate test RSA key: " + err.Error()) } internalTestKey = key }) return internalTestKey } func TestEncrypt_VariousDataSizes(t *testing.T) { fetcher := keyfetch.NewFakeClient() enc, err := NewEncryptor(fetcher) require.NoError(t, err) tests := []struct { name string dataSize int }{ {"small (10 bytes)", 10}, {"medium (1 KB)", 1024}, {"large (1 MB)", 1024 * 1024}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { data := make([]byte, tt.dataSize) _, err := rand.Read(data) require.NoError(t, err) result, err := enc.Encrypt(t.Context(), data) require.NoError(t, err) require.NotNil(t, result) require.Equal(t, EncryptionType, result.Type, "Type should be JWE-RSA") // Verify JWE Compact Serialization format (5 base64url parts separated by dots) jweString := string(result.Data) parts := strings.Split(jweString, ".") require.Len(t, parts, 5, "JWE Compact Serialization should have 5 parts") // Verify each part is non-empty for i, part := range parts { require.NotEmpty(t, part, "JWE part %d should not be empty", i) _, err = base64.RawURLEncoding.DecodeString(part) require.NoError(t, err, "JWE part %d should be valid base64url: %s", i, part) } // Verify the result differs from input require.NotEqual(t, data, result.Data) }) } } func TestEncrypt_EmptyData(t *testing.T) { fetcher := keyfetch.NewFakeClient() enc, err := NewEncryptor(fetcher) require.NoError(t, err) result, err := enc.Encrypt(t.Context(), []byte{}) require.Error(t, err) require.Nil(t, result) require.Contains(t, err.Error(), "cannot be empty") } func TestEncrypt_NonDeterministic(t *testing.T) { fetcher := keyfetch.NewFakeClient() enc, err := NewEncryptor(fetcher) require.NoError(t, err) data := []byte("test data for encryption") // Encrypt the same data twice result1, err := enc.Encrypt(t.Context(), data) require.NoError(t, err) require.Equal(t, EncryptionType, result1.Type, "Type should be JWE-RSA") result2, err := enc.Encrypt(t.Context(), data) require.NoError(t, err) require.Equal(t, EncryptionType, result2.Type, "Type should be JWE-RSA") // Results should be different due to random nonces and RSA-OAEP randomness require.NotEqual(t, result1.Data, result2.Data, "Encrypting the same data twice should produce different JWE outputs") } func TestEncrypt_JWEFormat(t *testing.T) { key := testKey() fetcher := keyfetch.NewFakeClientWithKey(testKeyID, &key.PublicKey) enc, err := NewEncryptor(fetcher) require.NoError(t, err) data := []byte("test data") result, err := enc.Encrypt(t.Context(), data) require.NoError(t, err) require.Equal(t, EncryptionType, result.Type, "Type should be JWE-RSA") // Parse and decrypt the JWE to verify format and algorithms decrypted, err := jwe.Decrypt(result.Data, jwe.WithKey(jwa.RSA_OAEP_256(), key), jwe.WithContext(t.Context())) require.NoError(t, err, "Result should be valid JWE with RSA-OAEP-256 and A256GCM, and should decrypt successfully") require.Equal(t, data, decrypted, "Decrypted data should match original") } func TestEncrypt_DecryptRoundtrip(t *testing.T) { key := testKey() fetcher := keyfetch.NewFakeClientWithKey(testKeyID, &key.PublicKey) enc, err := NewEncryptor(fetcher) require.NoError(t, err) originalData := []byte("test data for roundtrip encryption and decryption") // Encrypt the data encrypted, err := enc.Encrypt(t.Context(), originalData) require.NoError(t, err) require.Equal(t, EncryptionType, encrypted.Type, "Type should be JWE-RSA") msg, err := jwe.Parse(encrypted.Data) require.NoError(t, err) headers := msg.ProtectedHeaders() kidHeader, ok := headers.KeyID() require.True(t, ok, "JWE should contain 'kid' header") require.Equal(t, testKeyID, kidHeader, "JWE 'kid' header should match the encryptor's key ID") // Decrypt using the private key decrypted, err := jwe.Decrypt(encrypted.Data, jwe.WithKey(jwa.RSA_OAEP_256(), key), jwe.WithContext(t.Context())) require.NoError(t, err, "Decryption should succeed with the correct private key") // Verify the decrypted data matches the original require.Equal(t, originalData, decrypted, "Decrypted data should match original data") } ================================================ FILE: internal/envelope/rsa/keys.go ================================================ package rsa import ( "crypto/rsa" "crypto/x509" "encoding/pem" "fmt" "os" ) // This file contains helpers for loading keys. In practice we'll retrieve keys in some format from a DisCo endpoint const ( // HardcodedPublicKeyPEM contains a temporary hardcoded RSA public key (2048-bit) for envelope encryption. // This is a TEMPORARY solution for initial development and testing. // TODO: Replace with dynamic key fetching from CyberArk Discovery & Context API. HardcodedPublicKeyPEM = `-----BEGIN PUBLIC KEY----- MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAoeq+dk4aoGdV9xjrnGJt VbUh5jvkQgynkP+9Ph2NVeoasXWqYOmOVeKOI7Yr58W/L8Mro6C22iSEJrPFgPF6 t+RJsLAsAY6w1Pocq16COeelAWtxhHQGXt77WQKk0kmwhOJZ4VSeiQC4hWLUnq4N Ft7lwLw/50opTXLuSErrwec/bEV7G/Xp11BMsHGEL7dzpwWAfIrbCEomyWrO/L6p O3SAgYMdfup5ddnszeCU2FbFQziOkuMLOyir91XXk8wgdSy4IGAEGpwNx88i8fuj Qafze2aGWUtpWlOEQPP8lH2cj2TGUgLxGITbczJRcwuGIoJBOzAmPDWi/bapj4b6 zQIDAQAB -----END PUBLIC KEY-----` // hardcodedUID is a temporary hardcoded UID associated with the hardcoded public key // It was randomly generated with the macOS "uuidgen" command hardcodedUID = "A39798E6-8CE7-4E6E-9CF6-24A3C923B3A7" ) // LoadPublicKeyFromPEM parses an RSA public key from PEM-encoded bytes. // The PEM block should be of type "PUBLIC KEY" or "RSA PUBLIC KEY". func LoadPublicKeyFromPEM(pemBytes []byte) (*rsa.PublicKey, error) { block, _ := pem.Decode(pemBytes) if block == nil { return nil, fmt.Errorf("failed to decode PEM block") } // Try parsing as PKIX public key first (most common format) if block.Type == "PUBLIC KEY" { pubKey, err := x509.ParsePKIXPublicKey(block.Bytes) if err != nil { return nil, fmt.Errorf("failed to parse PKIX public key: %w", err) } rsaKey, ok := pubKey.(*rsa.PublicKey) if !ok { return nil, fmt.Errorf("key is not an RSA public key, got %T", pubKey) } return rsaKey, nil } // Try parsing as PKCS1 RSA public key if block.Type == "RSA PUBLIC KEY" { rsaKey, err := x509.ParsePKCS1PublicKey(block.Bytes) if err != nil { return nil, fmt.Errorf("failed to parse PKCS1 RSA public key: %w", err) } return rsaKey, nil } return nil, fmt.Errorf("unsupported PEM block type: %s (expected PUBLIC KEY or RSA PUBLIC KEY)", block.Type) } // LoadPublicKeyFromPEMFile reads and parses an RSA public key from a PEM file. func LoadPublicKeyFromPEMFile(path string) (*rsa.PublicKey, error) { pemBytes, err := os.ReadFile(path) if err != nil { return nil, fmt.Errorf("failed to read PEM file: %w", err) } return LoadPublicKeyFromPEM(pemBytes) } // LoadHardcodedPublicKey loads and parses the hardcoded RSA public key. // Returns a hardcoded UID associated with the key. // This is a temporary solution for initial development and testing. // Returns an error if the hardcoded key is invalid or cannot be parsed. func LoadHardcodedPublicKey() (*rsa.PublicKey, string, error) { key, err := LoadPublicKeyFromPEM([]byte(HardcodedPublicKeyPEM)) if err != nil { return nil, "", err } return key, hardcodedUID, nil } ================================================ FILE: internal/envelope/rsa/keys_test.go ================================================ package rsa_test import ( "crypto/ecdsa" "crypto/elliptic" "crypto/rand" "crypto/rsa" "crypto/x509" "encoding/pem" "os" "path/filepath" "testing" "github.com/stretchr/testify/require" "github.com/jetstack/preflight/internal/envelope/keyfetch" internalrsa "github.com/jetstack/preflight/internal/envelope/rsa" ) func generateTestKeyPEM(t *testing.T, keySize int, pemType string) []byte { t.Helper() privateKey, err := rsa.GenerateKey(rand.Reader, keySize) require.NoError(t, err) var pemBytes []byte if pemType == "PUBLIC KEY" { // PKIX format publicKeyBytes, err := x509.MarshalPKIXPublicKey(&privateKey.PublicKey) require.NoError(t, err) pemBytes = pem.EncodeToMemory(&pem.Block{ Type: "PUBLIC KEY", Bytes: publicKeyBytes, }) } else { // PKCS1 format publicKeyBytes := x509.MarshalPKCS1PublicKey(&privateKey.PublicKey) pemBytes = pem.EncodeToMemory(&pem.Block{ Type: "RSA PUBLIC KEY", Bytes: publicKeyBytes, }) } require.NotNil(t, pemBytes) return pemBytes } func TestLoadPublicKeyFromPEM_PKIX(t *testing.T) { pemBytes := generateTestKeyPEM(t, 2048, "PUBLIC KEY") key, err := internalrsa.LoadPublicKeyFromPEM(pemBytes) require.NoError(t, err) require.NotNil(t, key) require.Equal(t, 2048, key.N.BitLen()) } func TestLoadPublicKeyFromPEM_PKCS1(t *testing.T) { pemBytes := generateTestKeyPEM(t, 2048, "RSA PUBLIC KEY") key, err := internalrsa.LoadPublicKeyFromPEM(pemBytes) require.NoError(t, err) require.NotNil(t, key) require.Equal(t, 2048, key.N.BitLen()) } func TestLoadPublicKeyFromPEM_InvalidPEM(t *testing.T) { invalidPEM := []byte("this is not a valid PEM") key, err := internalrsa.LoadPublicKeyFromPEM(invalidPEM) require.Error(t, err) require.Nil(t, key) require.Contains(t, err.Error(), "failed to decode PEM block") } func TestLoadPublicKeyFromPEM_WrongPEMType(t *testing.T) { // Create a PEM block with wrong type privateKey, err := rsa.GenerateKey(rand.Reader, 2048) require.NoError(t, err) privateKeyBytes := x509.MarshalPKCS1PrivateKey(privateKey) pemBytes := pem.EncodeToMemory(&pem.Block{ Type: "RSA PRIVATE KEY", Bytes: privateKeyBytes, }) key, err := internalrsa.LoadPublicKeyFromPEM(pemBytes) require.Error(t, err) require.Nil(t, key) require.Contains(t, err.Error(), "unsupported PEM block type") } func TestLoadPublicKeyFromPEM_NonRSAKey(t *testing.T) { // Generate a real ECDSA key and try to load it as RSA ecdsaKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) require.NoError(t, err) // Marshal as PKIX public key publicKeyBytes, err := x509.MarshalPKIXPublicKey(&ecdsaKey.PublicKey) require.NoError(t, err) pemBytes := pem.EncodeToMemory(&pem.Block{ Type: "PUBLIC KEY", Bytes: publicKeyBytes, }) key, err := internalrsa.LoadPublicKeyFromPEM(pemBytes) require.Error(t, err) require.Nil(t, key) require.Contains(t, err.Error(), "not an RSA public key") } func TestLoadPublicKeyFromPEMFile_ValidFile(t *testing.T) { tmpDir := t.TempDir() keyPath := filepath.Join(tmpDir, "test_key.pem") pemBytes := generateTestKeyPEM(t, 2048, "PUBLIC KEY") err := os.WriteFile(keyPath, pemBytes, 0600) require.NoError(t, err) key, err := internalrsa.LoadPublicKeyFromPEMFile(keyPath) require.NoError(t, err) require.NotNil(t, key) require.Equal(t, 2048, key.N.BitLen()) } func TestLoadPublicKeyFromPEMFile_MissingFile(t *testing.T) { key, err := internalrsa.LoadPublicKeyFromPEMFile("/nonexistent/path/key.pem") require.Error(t, err) require.Nil(t, key) require.Contains(t, err.Error(), "failed to read PEM file") } func TestLoadPublicKeyFromPEMFile_InvalidContent(t *testing.T) { tmpDir := t.TempDir() keyPath := filepath.Join(tmpDir, "invalid_key.pem") err := os.WriteFile(keyPath, []byte("not a valid PEM"), 0600) require.NoError(t, err) key, err := internalrsa.LoadPublicKeyFromPEMFile(keyPath) require.Error(t, err) require.Nil(t, key) } func TestLoadHardcodedPublicKey_CanBeUsedWithEncryptor(t *testing.T) { // Test that the hardcoded key can be used to create an encryptor // First, test that the key can be loaded successfully key, uid, err := internalrsa.LoadHardcodedPublicKey() require.NoError(t, err) require.NotNil(t, key) require.NotEmpty(t, uid) fetcher := keyfetch.NewFakeClientWithKey(uid, key) encryptor, err := internalrsa.NewEncryptor(fetcher) require.NoError(t, err) require.NotNil(t, encryptor) // Test that the encryptor can encrypt data testData := []byte("test data for encryption") encryptedData, err := encryptor.Encrypt(t.Context(), testData) require.NoError(t, err) require.NotNil(t, encryptedData) require.NotEmpty(t, encryptedData.Data) require.Equal(t, "JWE-RSA", encryptedData.Type) } ================================================ FILE: internal/envelope/types.go ================================================ package envelope import ( "context" "encoding/json" ) // EncryptedData represents encrypted data along with metadata about the encryption type. type EncryptedData struct { // Data contains the encrypted payload Data []byte `json:"data"` // Type indicates the encryption format (e.g., "JWE-RSA") Type string `json:"type"` } // ToMap converts the EncryptedData struct to a map representation. Since we store data as an "_encryptedData" field in // a Kubernetes unstructured object, passing a raw struct would cause a panic due to the behaviour of // https://pkg.go.dev/k8s.io/apimachinery/pkg/runtime#DeepCopyJSONValue // Passing a map to unstructured.SetNestedField avoids this issue. func (ed *EncryptedData) ToMap() map[string]any { marshalled, err := json.Marshal(ed) if err != nil { return nil } var out map[string]any err = json.Unmarshal(marshalled, &out) if err != nil { return nil } return out } // Encryptor performs envelope encryption on arbitrary data. type Encryptor interface { // Encrypt encrypts data using envelope encryption, returning an EncryptedData struct // containing the encrypted payload and encryption type metadata. Encrypt(ctx context.Context, data []byte) (*EncryptedData, error) } ================================================ FILE: klone.yaml ================================================ # This klone.yaml file describes the Makefile modules and versions that are # cloned into the "make/_shared" folder. These modules are dynamically imported # by the root Makefile. The "make upgrade-klone" target can be used to pull # the latest version from the upstream repositories (using the repo_ref value). # # More info can be found here: https://github.com/cert-manager/makefile-modules targets: make/_shared: - folder_name: generate-verify repo_url: https://github.com/cert-manager/makefile-modules.git repo_ref: main repo_hash: 962eeffd065691abd2644eb514a7ec4cc47808fb repo_path: modules/generate-verify - folder_name: go repo_url: https://github.com/cert-manager/makefile-modules.git repo_ref: main repo_hash: 962eeffd065691abd2644eb514a7ec4cc47808fb repo_path: modules/go - folder_name: helm repo_url: https://github.com/cert-manager/makefile-modules.git repo_ref: main repo_hash: 962eeffd065691abd2644eb514a7ec4cc47808fb repo_path: modules/helm - folder_name: help repo_url: https://github.com/cert-manager/makefile-modules.git repo_ref: main repo_hash: 962eeffd065691abd2644eb514a7ec4cc47808fb repo_path: modules/help - folder_name: kind repo_url: https://github.com/cert-manager/makefile-modules.git repo_ref: main repo_hash: 962eeffd065691abd2644eb514a7ec4cc47808fb repo_path: modules/kind - folder_name: klone repo_url: https://github.com/cert-manager/makefile-modules.git repo_ref: main repo_hash: 962eeffd065691abd2644eb514a7ec4cc47808fb repo_path: modules/klone - folder_name: licenses repo_url: https://github.com/cert-manager/makefile-modules.git repo_ref: main repo_hash: 962eeffd065691abd2644eb514a7ec4cc47808fb repo_path: modules/licenses - folder_name: oci-build repo_url: https://github.com/cert-manager/makefile-modules.git repo_ref: main repo_hash: 962eeffd065691abd2644eb514a7ec4cc47808fb repo_path: modules/oci-build - folder_name: oci-publish repo_url: https://github.com/cert-manager/makefile-modules.git repo_ref: main repo_hash: 962eeffd065691abd2644eb514a7ec4cc47808fb repo_path: modules/oci-publish - folder_name: repository-base repo_url: https://github.com/cert-manager/makefile-modules.git repo_ref: main repo_hash: 962eeffd065691abd2644eb514a7ec4cc47808fb repo_path: modules/repository-base - folder_name: tools repo_url: https://github.com/cert-manager/makefile-modules.git repo_ref: main repo_hash: 962eeffd065691abd2644eb514a7ec4cc47808fb repo_path: modules/tools ================================================ FILE: main.go ================================================ package main import "github.com/jetstack/preflight/cmd" func main() { cmd.Execute() } ================================================ FILE: make/00_mod.mk ================================================ repo_name := github.com/jetstack/jetstack-secure # This is a work around for the mismatch between the repo name and the go module # name. It allows golangci-lint to group the github.com/jetstack/preflight # imports correctly. And it allows the version information to be injected into # the version package via Go ldflags. # # TODO(wallrj): Rename the Go module to match the repository name. gomodule_name := github.com/jetstack/preflight generate-golangci-lint-config: repo_name := $(gomodule_name) license_ignore := gitlab.com/venafi,github.com/jetstack kind_cluster_name := preflight kind_cluster_config := $(bin_dir)/scratch/kind_cluster.yaml build_names := preflight go_preflight_main_dir := . go_preflight_mod_dir := . go_preflight_ldflags := \ -X $(gomodule_name)/pkg/version.PreflightVersion=$(VERSION) \ -X $(gomodule_name)/pkg/version.Commit=$(GITCOMMIT) \ -X $(gomodule_name)/pkg/version.BuildDate=$(shell date "+%F-%T-%Z") \ -X $(gomodule_name)/pkg/client.ClientID=k3TrDbfLhCgnpAbOiiT2kIE1AbovKzjo \ -X $(gomodule_name)/pkg/client.ClientSecret=f39w_3KT9Vp0VhzcPzvh-uVbudzqCFmHER3Huj0dvHgJwVrjxsoOQPIw_1SDiCfa \ -X $(gomodule_name)/pkg/client.AuthServerDomain=auth.jetstack.io oci_preflight_base_image_flavor := static oci_preflight_image_name := quay.io/jetstack/venafi-agent oci_preflight_image_tag := $(VERSION) oci_preflight_image_name_development := jetstack.local/venafi-agent # Annotations are the standardised set of annotations we set on every component we publish oci_preflight_build_args := \ --image-annotation="org.opencontainers.image.vendor"="CyberArk Software Ltd." \ --image-annotation="org.opencontainers.image.licenses"="EULA - https://www.cyberark.com/contract-terms/" \ --image-annotation="org.opencontainers.image.authors"="support@cyberark.com" \ --image-annotation="org.opencontainers.image.title"="Discovery Agent for CyberArk Certificate Manager in Kubernetes and OpenShift Environments" \ --image-annotation="org.opencontainers.image.description"="Gathers machine identity data from Kubernetes clusters." \ --image-annotation="org.opencontainers.image.url"="https://www.cyberark.com/products/certificate-manager-for-kubernetes/" \ --image-annotation="org.opencontainers.image.documentation"="https://docs.cyberark.com/mis-saas/vaas/k8s-components/c-tlspk-agent-overview/" \ --image-annotation="org.opencontainers.image.version"="$(VERSION)" \ --image-annotation="org.opencontainers.image.revision"="$(GITCOMMIT)" deploy_name := venafi-kubernetes-agent deploy_namespace := venafi helm_chart_source_dir := deploy/charts/venafi-kubernetes-agent helm_chart_image_name := quay.io/jetstack/charts/venafi-kubernetes-agent helm_chart_version := $(VERSION) helm_labels_template_name := preflight.labels # We skip using the upstream govulncheck generate target because we need to customise the workflow YAML # locally. We provide the targets in this repo instead, and manually maintain the workflow. dont_generate_govulncheck := true helm_image_name ?= $(oci_preflight_image_name) helm_image_tag ?= $(oci_preflight_image_tag) # Allows us to replace the Helm values.yaml's image.repository and image.tag # with the right values. define helm_values_mutation_function echo "no mutations defined for this chart" endef golangci_lint_config := .golangci.yaml go_header_file := /dev/null include make/extra_tools.mk include make/ark/00_mod.mk include make/ngts/00_mod.mk ================================================ FILE: make/02_mod.mk ================================================ include make/test-unit.mk include make/ark/02_mod.mk include make/ngts/02_mod.mk GITHUB_OUTPUT ?= /dev/stderr .PHONY: release ## Publish all release artifacts (image + helm chart) ## @category [shared] Release release: $(MAKE) oci-push-preflight $(MAKE) helm-chart-oci-push @echo "RELEASE_OCI_PREFLIGHT_IMAGE=$(oci_preflight_image_name)" >> "$(GITHUB_OUTPUT)" @echo "RELEASE_OCI_PREFLIGHT_TAG=$(oci_preflight_image_tag)" >> "$(GITHUB_OUTPUT)" @echo "RELEASE_HELM_CHART_IMAGE=$(helm_chart_image_name)" >> "$(GITHUB_OUTPUT)" @echo "RELEASE_HELM_CHART_VERSION=$(helm_chart_version)" >> "$(GITHUB_OUTPUT)" @echo "Release complete!" .PHONY: generate-crds-venconn ## Pulls the VenafiConnection CRD from the venafi-connection-lib Go module. ## @category [shared] Generate/ Verify # # We aren't using "generate-crds" because "generate-crds" only work for projects # from which controller-gen can be used to generate the plain CRDs (plain CRDs = # the non-templated CRDs). In this project, we generate the plain CRDs using `go # run ./make/connection_crd` instead. generate-crds-venconn: $(addprefix $(helm_chart_source_dir)/templates/,venafi-connection-crd.yaml venafi-connection-crd.without-validations.yaml) $(helm_chart_source_dir)/crd_bases/jetstack.io_venaficonnections.yaml: go.mod | $(NEEDS_GO) echo "# DO NOT EDIT: Use 'make generate-crds-venconn' to regenerate." >$@ $(GO) run ./make/connection_crd >>$@ $(helm_chart_source_dir)/templates/venafi-connection-crd.without-validations.yaml: $(helm_chart_source_dir)/crd_bases/jetstack.io_venaficonnections.yaml $(helm_chart_source_dir)/crd_bases/crd.header.yaml $(helm_chart_source_dir)/crd_bases/crd.footer.yaml | $(NEEDS_YQ) cat $(helm_chart_source_dir)/crd_bases/crd.header-without-validations.yaml >$@ $(YQ) -I2 '{"spec": .spec}' $< | $(YQ) 'del(.. | ."x-kubernetes-validations"?) | del(.metadata.creationTimestamp)' | grep -v "DO NOT EDIT" >>$@ cat $(helm_chart_source_dir)/crd_bases/crd.footer.yaml >>$@ $(helm_chart_source_dir)/templates/venafi-connection-crd.yaml: $(helm_chart_source_dir)/crd_bases/jetstack.io_venaficonnections.yaml $(helm_chart_source_dir)/crd_bases/crd.header.yaml $(helm_chart_source_dir)/crd_bases/crd.footer.yaml | $(NEEDS_YQ) cat $(helm_chart_source_dir)/crd_bases/crd.header.yaml >$@ $(YQ) -I2 '{"spec": .spec}' $< | $(YQ) 'del(.metadata.creationTimestamp)' | grep -v "DO NOT EDIT" >>$@ cat $(helm_chart_source_dir)/crd_bases/crd.footer.yaml >>$@ # The generate-crds target doesn't need to be run anymore when running # "generate". Let's replace it with "generate-crds-venconn". shared_generate_targets := $(filter-out generate-crds,$(shared_generate_targets)) shared_generate_targets += generate-crds-venconn .PHONY: test-e2e-gke ## Run a basic E2E test on a GKE cluster ## Build and install venafi-kubernetes-agent for VenafiConnection based authentication. ## Wait for it to log a message indicating successful data upload. ## See `hack/e2e/test.sh` for the full test script. ## @category Testing test-e2e-gke: | $(NEEDS_HELM) $(NEEDS_STEP) $(NEEDS_VENCTL) ./hack/e2e/test.sh .PHONY: test-helm-snapshot ## Update the `helm unittest` snapshots. ## Note that running helm unit tests is done through "make verify" using the Helm makefile-module ## @category Testing test-helm-snapshot: | $(NEEDS_HELM-UNITTEST) $(HELM-UNITTEST) ./deploy/charts/{venafi-kubernetes-agent,disco-agent,discovery-agent} -u .PHONY: helm-plugins ## Install required helm plugins helm-plugins: $(NEEDS_HELM) @if ! $(HELM) plugin list | grep -q diff; then \ echo ">>> Installing helm-diff plugin"; \ $(HELM) plugin install https://github.com/databus23/helm-diff --verify=false; \ else \ echo "helm-diff plugin already installed"; \ fi # https://docs.cyberark.com/mis-saas/vaas/venctl/c-venctl-releases/ venctl_linux_amd64_SHA256SUM=f1027056ec243c7ea9183fe410d5daf99cd4fa18cff9149d64749a106832595a venctl_darwin_amd64_SHA256SUM=4f75900c7b3256cc786004bd5d6193f95f505521e761a9917b3c3d243440f77e venctl_darwin_arm64_SHA256SUM=1648b17020291f90b8c1195be8b963d96f7be31a6e43ba944dd104729f16d1c5 .PRECIOUS: $(DOWNLOAD_DIR)/tools/venctl@$(VENCTL_VERSION)_$(HOST_OS)_$(HOST_ARCH) $(DOWNLOAD_DIR)/tools/venctl@$(VENCTL_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools @source $(lock_script) $@; \ $(CURL) https://dl.venafi.cloud/venctl/$(VENCTL_VERSION)/venctl-$(HOST_OS)-$(HOST_ARCH).zip -o $(outfile).zip; \ $(checkhash_script) $(outfile).zip $(venctl_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \ unzip -p $(outfile).zip venctl > $(outfile); \ chmod +x $(outfile); \ rm -f $(outfile).zip # https://github.com/smallstep/cli/releases/ step_linux_amd64_SHA256SUM=2908f3c7d90181eec430070b231da5c0861e37537bf8e2388d031d3bd6c7b8c6 step_linux_arm64_SHA256SUM=96636a6cc980d53a98c72aa3b99e04f0b874a733d9ddf43fc6b0f1725f425c37 step_darwin_amd64_SHA256SUM=f6e9a9078cfc5f559c8213e023df6e8ebf8d9d36ffbd82749a41ee1c40a23623 step_darwin_arm64_SHA256SUM=b856702ee138a9badbe983e88758c0330907ea4f97e429000334ba038597db5b .PRECIOUS: $(DOWNLOAD_DIR)/tools/step@$(STEP_VERSION)_$(HOST_OS)_$(HOST_ARCH) $(DOWNLOAD_DIR)/tools/step@$(STEP_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools @source $(lock_script) $@; \ $(CURL) https://dl.smallstep.com/gh-release/cli/gh-release-header/v$(STEP_VERSION)/step_$(HOST_OS)_$(STEP_VERSION)_$(HOST_ARCH).tar.gz -o $(outfile).tar.gz; \ $(checkhash_script) $(outfile).tar.gz $(step_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \ tar xfO $(outfile).tar.gz step_$(STEP_VERSION)/bin/step > $(outfile); \ chmod +x $(outfile); \ rm -f $(outfile).tar.gz ================================================ FILE: make/_shared/generate-verify/00_mod.mk ================================================ # Copyright 2023 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. shared_generate_targets ?= shared_generate_targets_dirty ?= shared_verify_targets ?= shared_verify_targets_dirty ?= ================================================ FILE: make/_shared/generate-verify/02_mod.mk ================================================ # Copyright 2023 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. .PHONY: generate ## Generate all generate targets. ## @category [shared] Generate/ Verify generate: $$(shared_generate_targets) @echo "The following targets cannot be run simultaneously with each other or other generate scripts:" $(foreach TARGET,$(shared_generate_targets_dirty), $(MAKE) $(TARGET)) verify_script := $(dir $(lastword $(MAKEFILE_LIST)))/util/verify.sh # Run the supplied make target argument in a temporary workspace and diff the results. verify-%: FORCE +$(verify_script) $(MAKE) $* verify_generated_targets = $(shared_generate_targets:%=verify-%) verify_generated_targets_dirty = $(shared_generate_targets_dirty:%=verify-%) verify_targets = $(sort $(verify_generated_targets) $(shared_verify_targets)) verify_targets_dirty = $(sort $(verify_generated_targets_dirty) $(shared_verify_targets_dirty)) .PHONY: verify ## Verify code and generate targets. ## @category [shared] Generate/ Verify verify: $$(verify_targets) @echo "The following targets create temporary files in the current directory, that is why they have to be run last:" $(foreach TARGET,$(verify_targets_dirty), $(MAKE) $(TARGET)) ================================================ FILE: make/_shared/generate-verify/util/verify.sh ================================================ #!/usr/bin/env bash # Copyright 2023 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Verify that the supplied command does not make any changes to the repository. # # This is called from the Makefile to verify that all code generation scripts # have been run and that their changes have been committed to the repository. # # Runs any of the scripts or Make targets in this repository, after making a # copy of the repository, then reports any changes to the files in the copy. # For example: # # make verify-helm-chart-update || \ # make helm-chart-update # set -o errexit set -o nounset set -o pipefail projectdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../../../.." && pwd )" cd "${projectdir}" # Use short form arguments here to support BSD/macOS. `-d` instructs # it to make a directory, `-t` provides a prefix to use for the directory name. tmp="$(mktemp -d /tmp/verify.sh.XXXXXXXX)" cleanup() { rm -rf "${tmp}" } trap "cleanup" EXIT SIGINT # Why not just "cp" to the tmp dir? # A dumb "cp" will fail sometimes since _bin can get changed while it's being copied if targets are run in parallel, # and cp doesn't have some universal "exclude" option to ignore "_bin" # # We previously used "rsync" here, but: # 1. That's another tool we need to depend on # 2. rsync on macOS 15.4 and newer is actually openrsync, which has different permissions and throws errors when copying git objects # # So, we use find to list all files except _bin, and then copy each in turn find . -maxdepth 1 -not \( -path "./_bin" \) -not \( -path "." \) | xargs -I% cp -af "${projectdir}/%" "${tmp}/" pushd "${tmp}" >/dev/null "$@" popd >/dev/null if ! diff \ --exclude=".git" \ --exclude="_bin" \ --new-file --unified --show-c-function --recursive "${projectdir}" "${tmp}" then echo echo "Project '${projectdir}' is out of date." echo "Please run '${*}' or apply the above diffs" exit 1 fi ================================================ FILE: make/_shared/go/.golangci.override.yaml ================================================ version: "2" linters: default: none exclusions: generated: lax presets: [ comments, common-false-positives, legacy, std-error-handling ] paths: [ third_party, builtin$, examples$ ] warn-unused: true settings: staticcheck: checks: [ "all", "-ST1000", "-ST1001", "-ST1003", "-ST1005", "-ST1012", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-QF1001", "-QF1003", "-QF1008" ] enable: - asasalint - asciicheck - bidichk - bodyclose - canonicalheader - contextcheck - copyloopvar - decorder - dogsled - dupword - durationcheck - errcheck - errchkjson - errname - exhaustive - exptostd - forbidigo - ginkgolinter - gocheckcompilerdirectives - gochecksumtype - gocritic - goheader - goprintffuncname - gosec - gosmopolitan - govet - grouper - importas - ineffassign - interfacebloat - intrange - loggercheck - makezero - mirror - misspell - modernize - musttag - nakedret - nilerr - nilnil - noctx - nosprintfhostport - predeclared - promlinter - protogetter - reassign - sloglint - staticcheck - tagalign - testableexamples - unconvert - unparam - unused - usestdlibvars - usetesting - wastedassign formatters: enable: [ gci, gofmt ] settings: gci: custom-order: true sections: - standard # Standard section: captures all standard packages. - default # Default section: contains all imports that could not be matched to another section type. - localmodule # Local module section: contains all local packages. This section is not present unless explicitly enabled. - blank # Blank section: contains all blank imports. This section is not present unless explicitly enabled. - dot # Dot section: contains all dot imports. This section is not present unless explicitly enabled. exclusions: generated: lax paths: [ third_party, builtin$, examples$ ] ================================================ FILE: make/_shared/go/01_mod.mk ================================================ # Copyright 2023 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ifndef bin_dir $(error bin_dir is not set) endif ifndef repo_name $(error repo_name is not set) endif ifndef golangci_lint_config $(error golangci_lint_config is not set) endif golangci_lint_override := $(dir $(lastword $(MAKEFILE_LIST)))/.golangci.override.yaml .PHONY: go-workspace go-workspace: export GOWORK?=$(abspath go.work) ## Create a go.work file in the repository root (or GOWORK) ## ## @category Development go-workspace: | $(NEEDS_GO) @rm -f $(GOWORK) $(GO) work init @find . -name go.mod -not \( -path "./$(bin_dir)/*" -or -path "./make/_shared/*" \) \ | while read d; do \ target=$$(dirname $${d}); \ $(GO) work use "$${target}"; \ done .PHONY: go-tidy ## Alias for `make generate-go-mod-tidy` ## @category [shared] Generate/ Verify go-tidy: generate-go-mod-tidy .PHONY: generate-go-mod-tidy ## Run `go mod tidy` on all Go modules ## @category [shared] Generate/ Verify generate-go-mod-tidy: | $(NEEDS_GO) @find . -name go.mod -not \( -path "./$(bin_dir)/*" -or -path "./make/_shared/*" \) \ | while read d; do \ target=$$(dirname $${d}); \ echo "Running 'go mod tidy' in directory '$${target}'"; \ pushd "$${target}" >/dev/null; \ $(GO) mod tidy || exit; \ $(GO) get toolchain@none || exit; \ popd >/dev/null; \ echo ""; \ done shared_generate_targets := generate-go-mod-tidy $(shared_generate_targets) ifndef dont_generate_govulncheck govulncheck_base_dir := $(dir $(lastword $(MAKEFILE_LIST)))/base/ .PHONY: generate-govulncheck ## Generate base files in the repository ## @category [shared] Generate/ Verify generate-govulncheck: cp -r $(govulncheck_base_dir)/. ./ cd $(govulncheck_base_dir) && \ find . -type f | while read file; do \ sed "s|{{REPLACE:GH-REPOSITORY}}|$(repo_name:github.com/%=%)|g" "$$file" > "$(CURDIR)/$$file"; \ done shared_generate_targets += generate-govulncheck endif # dont_generate_govulncheck .PHONY: verify-govulncheck ## Verify all Go modules for vulnerabilities using govulncheck ## @category [shared] Generate/ Verify # # Runs `govulncheck` on all Go modules related to the project. # Ignores Go modules among the temporary build artifacts in _bin, to avoid # scanning the code of the vendored Go, after running make vendor-go. # Ignores Go modules in make/_shared, because those will be checked in centrally # in the makefile_modules repository. # # `verify-govulncheck` not added to the `shared_verify_targets` variable and is # not run by `make verify`, because `make verify` is run for each PR, and we do # not want new vulnerabilities in existing code to block the merging of PRs. # Instead `make verify-govulncheck` is intended to be run periodically by a CI job. verify-govulncheck: | $(NEEDS_GOVULNCHECK) @find . -name go.mod -not \( -path "./$(bin_dir)/*" -or -path "./make/_shared/*" \) \ | while read d; do \ target=$$(dirname $${d}); \ echo "Running 'GOTOOLCHAIN=go$(VENDORED_GO_VERSION) $(bin_dir)/tools/govulncheck ./...' in directory '$${target}'"; \ pushd "$${target}" >/dev/null; \ GOTOOLCHAIN=go$(VENDORED_GO_VERSION) $(GOVULNCHECK) ./... || exit; \ popd >/dev/null; \ echo ""; \ done .PHONY: generate-golangci-lint-config ## Generate a golangci-lint configuration file ## @category [shared] Generate/ Verify generate-golangci-lint-config: | $(NEEDS_GOLANGCI-LINT) $(NEEDS_YQ) $(bin_dir)/scratch if [ "$$($(YQ) eval 'has("version") | not' $(golangci_lint_config))" == "true" ]; then \ $(GOLANGCI-LINT) migrate -c $(golangci_lint_config); \ rm $(basename $(golangci_lint_config)).bck$(suffix $(golangci_lint_config)); \ fi cp $(golangci_lint_config) $(bin_dir)/scratch/golangci-lint.yaml.tmp $(YQ) -i 'del(.linters.enable)' $(bin_dir)/scratch/golangci-lint.yaml.tmp $(YQ) eval-all -i '. as $$item ireduce ({}; . * $$item)' $(bin_dir)/scratch/golangci-lint.yaml.tmp $(golangci_lint_override) mv $(bin_dir)/scratch/golangci-lint.yaml.tmp $(golangci_lint_config) shared_generate_targets += generate-golangci-lint-config golangci_lint_timeout ?= 10m .PHONY: verify-golangci-lint ## Verify all Go modules using golangci-lint ## @category [shared] Generate/ Verify verify-golangci-lint: | $(NEEDS_GO) $(NEEDS_GOLANGCI-LINT) $(NEEDS_YQ) $(bin_dir)/scratch @find . -name go.mod -not \( -path "./$(bin_dir)/*" -or -path "./make/_shared/*" \) \ | while read d; do \ target=$$(dirname $${d}); \ echo "Running 'GOVERSION=$(VENDORED_GO_VERSION) $(bin_dir)/tools/golangci-lint run -c $(CURDIR)/$(golangci_lint_config) --timeout $(golangci_lint_timeout)' in directory '$${target}'"; \ pushd "$${target}" >/dev/null; \ GOVERSION=$(VENDORED_GO_VERSION) $(GOLANGCI-LINT) run -c $(CURDIR)/$(golangci_lint_config) --timeout $(golangci_lint_timeout) || exit; \ popd >/dev/null; \ echo ""; \ done shared_verify_targets_dirty += verify-golangci-lint .PHONY: fix-golangci-lint ## Fix all Go modules using golangci-lint ## @category [shared] Generate/ Verify fix-golangci-lint: | $(NEEDS_GOLANGCI-LINT) $(NEEDS_YQ) $(NEEDS_GCI) $(bin_dir)/scratch @find . -name go.mod -not \( -path "./$(bin_dir)/*" -or -path "./make/_shared/*" \) \ | while read d; do \ target=$$(dirname $${d}); \ echo "Running 'GOVERSION=$(VENDORED_GO_VERSION) $(bin_dir)/tools/golangci-lint run --fix -c $(CURDIR)/$(golangci_lint_config) --timeout $(golangci_lint_timeout)' in directory '$${target}'"; \ pushd "$${target}" >/dev/null; \ GOVERSION=$(VENDORED_GO_VERSION) $(GOLANGCI-LINT) run --fix -c $(CURDIR)/$(golangci_lint_config) --timeout $(golangci_lint_timeout) || exit; \ popd >/dev/null; \ echo ""; \ done ================================================ FILE: make/_shared/go/README.md ================================================ # README A module for various Go static checks. ================================================ FILE: make/_shared/go/base/.github/workflows/govulncheck.yaml ================================================ # THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. # Edit https://github.com/cert-manager/makefile-modules/blob/main/modules/go/base/.github/workflows/govulncheck.yaml instead. # Run govulncheck at midnight every night on the main branch, # to alert us to recent vulnerabilities which affect the Go code in this # project. name: govulncheck on: workflow_dispatch: {} schedule: - cron: '0 0 * * *' permissions: contents: read jobs: govulncheck: runs-on: ubuntu-latest if: github.repository == '{{REPLACE:GH-REPOSITORY}}' steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 # Adding `fetch-depth: 0` makes sure tags are also fetched. We need # the tags so `git describe` returns a valid version. # see https://github.com/actions/checkout/issues/701 for extra info about this option with: { fetch-depth: 0 } - id: go-version run: | make print-go-version >> "$GITHUB_OUTPUT" - uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0 with: go-version: ${{ steps.go-version.outputs.result }} - run: make verify-govulncheck ================================================ FILE: make/_shared/helm/01_mod.mk ================================================ # Copyright 2023 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ifndef helm_dont_include_crds include $(dir $(lastword $(MAKEFILE_LIST)))/crds.mk endif include $(dir $(lastword $(MAKEFILE_LIST)))/helm.mk include $(dir $(lastword $(MAKEFILE_LIST)))/deploy.mk ================================================ FILE: make/_shared/helm/crd.template.footer.yaml ================================================ {{- end }} ================================================ FILE: make/_shared/helm/crd.template.header.yaml ================================================ {{- if REPLACE_CRD_EXPRESSION }} apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: "REPLACE_CRD_NAME" {{- if .Values.crds.keep }} annotations: helm.sh/resource-policy: keep {{- end }} labels: {{- include "REPLACE_LABELS_TEMPLATE" . | nindent 4 }} ================================================ FILE: make/_shared/helm/crds.mk ================================================ # Copyright 2023 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ################ # Check Inputs # ################ ifndef helm_chart_source_dir $(error helm_chart_source_dir is not set) endif ifndef helm_labels_template_name $(error helm_labels_template_name is not set) endif ################ # Add targets # ################ crd_template_header := $(dir $(lastword $(MAKEFILE_LIST)))/crd.template.header.yaml crd_template_footer := $(dir $(lastword $(MAKEFILE_LIST)))/crd.template.footer.yaml # see https://stackoverflow.com/a/53408233 sed_inplace := sed -i'' ifeq ($(HOST_OS),darwin) sed_inplace := sed -i '' endif crds_dir ?= deploy/crds crds_dir_readme := $(dir $(lastword $(MAKEFILE_LIST)))/crds_dir.README.md crds_expression ?= .Values.crds.enabled crds_template_include_pattern := *.yaml # Space-separated list of basenames to exclude (e.g. foo.yaml *_test.yaml) crds_template_exclude_pattern ?= define filter-out-basenames $(if $(strip $(2)), \ $(foreach f,$(1),$(if $(filter $(2),$(notdir $(f))),,$(f))), \ $(1)) endef .PHONY: generate-crds ## Generate CRD manifests. ## @category [shared] Generate/ Verify generate-crds: | $(NEEDS_CONTROLLER-GEN) $(NEEDS_YQ) $(eval crds_gen_temp := $(bin_dir)/scratch/crds) $(eval directories := $(shell ls -d */ | grep -v -e 'make' $(shell git check-ignore -- * | sed 's/^/-e /'))) rm -rf $(crds_gen_temp) mkdir -p $(crds_gen_temp) $(CONTROLLER-GEN) crd \ $(directories:%=paths=./%...) \ output:crd:artifacts:config=$(crds_gen_temp) @echo "Updating CRDs with helm templating, writing to $(helm_chart_source_dir)/templates" $(eval crds_gen_temp_all_files := $(wildcard $(crds_gen_temp)/$(crds_template_include_pattern))) $(eval crds_gen_temp_files := $(if $(crds_template_exclude_pattern), \ $(call filter-out-basenames,$(crds_gen_temp_all_files),$(crds_template_exclude_pattern)), \ $(crds_gen_temp_all_files))) @for f in $(crds_gen_temp_files); do \ crd_name=$$($(YQ) eval '.metadata.name' $$f); \ crd_template_file="$(helm_chart_source_dir)/templates/crd-$$(basename $$f)"; \ cat $(crd_template_header) > $$crd_template_file; \ $(sed_inplace) "s/REPLACE_CRD_EXPRESSION/$(crds_expression)/g" $$crd_template_file; \ $(sed_inplace) "s/REPLACE_CRD_NAME/$$crd_name/g" $$crd_template_file; \ $(sed_inplace) "s/REPLACE_LABELS_TEMPLATE/$(helm_labels_template_name)/g" $$crd_template_file; \ $(YQ) -I2 '{"spec": .spec}' $$f >> $$crd_template_file; \ cat $(crd_template_footer) >> $$crd_template_file; \ done @if [ -n "$$(ls $(crds_gen_temp) 2>/dev/null)" ]; then \ cp $(crds_gen_temp)/* $(crds_dir)/ ; \ cp $(crds_dir_readme) $(crds_dir)/README.md ; \ fi shared_generate_targets += generate-crds ================================================ FILE: make/_shared/helm/crds_dir.README.md ================================================ # CRDs source directory > **WARNING**: if you are an end-user, you probably should NOT need to use the > files in this directory. These files are for **reference, development and testing purposes only**. This directory contains 'source code' used to build our CustomResourceDefinition resources consumed by our officially supported deployment methods (e.g. the Helm chart). The CRDs in this directory might be incomplete, and should **NOT** be used to provision the operator. ================================================ FILE: make/_shared/helm/deploy.mk ================================================ # Copyright 2023 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ifndef deploy_name $(error deploy_name is not set) endif ifndef deploy_namespace $(error deploy_namespace is not set) endif # Install options allows the user configuration of extra flags INSTALL_OPTIONS ?= ########################################## .PHONY: install ## Install controller helm chart on the current active K8S cluster. ## @category [shared] Deployment install: $(helm_chart_archive) | $(NEEDS_HELM) $(HELM) upgrade $(deploy_name) $(helm_chart_archive) \ --wait \ --install \ --create-namespace \ $(INSTALL_OPTIONS) \ --namespace $(deploy_namespace) .PHONY: uninstall ## Uninstall controller helm chart from the current active K8S cluster. ## @category [shared] Deployment uninstall: | $(NEEDS_HELM) $(HELM) uninstall $(deploy_name) \ --wait \ --namespace $(deploy_namespace) .PHONY: template ## Template the helm chart. ## @category [shared] Deployment template: $(helm_chart_archive) | $(NEEDS_HELM) @$(HELM) template $(deploy_name) $(helm_chart_archive) \ --create-namespace \ $(INSTALL_OPTIONS) \ --namespace $(deploy_namespace) ================================================ FILE: make/_shared/helm/helm.mk ================================================ # Copyright 2023 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ifndef bin_dir $(error bin_dir is not set) endif ifndef helm_chart_source_dir $(error helm_chart_source_dir is not set) endif ifndef helm_chart_image_name $(error helm_chart_image_name is not set) endif ifndef helm_chart_version $(error helm_chart_version is not set) endif ifneq ($(helm_chart_version:v%=v),v) $(error helm_chart_version "$(helm_chart_version)" should start with a "v" - did you forget to pull tags from the remote repository?) endif ifndef helm_values_mutation_function $(error helm_values_mutation_function is not set) endif ########################################## helm_chart_name := $(notdir $(helm_chart_image_name)) helm_chart_image_registry := $(dir $(helm_chart_image_name)) helm_chart_image_tag := $(helm_chart_version) helm_chart_sources := $(shell find $(helm_chart_source_dir) -maxdepth 1 -type f) $(shell find $(helm_chart_source_dir)/templates -type f) helm_chart_archive := $(bin_dir)/scratch/helm/$(helm_chart_name)-$(helm_chart_version).tgz helm_digest_path := $(bin_dir)/scratch/helm/$(helm_chart_name)-$(helm_chart_version).digests helm_digest = $(shell head -1 $(helm_digest_path) 2> /dev/null) $(bin_dir)/scratch/helm: @mkdir -p $@ $(helm_chart_archive): $(helm_chart_sources) | $(NEEDS_HELM) $(NEEDS_YQ) $(bin_dir)/scratch/helm $(eval helm_chart_source_dir_versioned := $@.tmp) rm -rf $(helm_chart_source_dir_versioned) mkdir -p $(dir $(helm_chart_source_dir_versioned)) cp -a $(helm_chart_source_dir) $(helm_chart_source_dir_versioned) $(call helm_values_mutation_function,$(helm_chart_source_dir_versioned)/values.yaml) @if ! $(YQ) -oy '.name' $(helm_chart_source_dir_versioned)/Chart.yaml | grep -q '^$(helm_chart_name)$$'; then \ echo "Chart name does not match the name in the helm_chart_name variable"; \ exit 1; \ fi $(YQ) '.annotations."artifacthub.io/prerelease" = "$(IS_PRERELEASE)"' \ --inplace $(helm_chart_source_dir_versioned)/Chart.yaml mkdir -p $(dir $@) $(HELM) package $(helm_chart_source_dir_versioned) \ --app-version $(helm_chart_version) \ --version $(helm_chart_version) \ --destination $(dir $@) .PHONY: helm-chart-oci-push ## Create and push Helm chart to OCI registry. ## Will also create a non-v-prefixed tag for the OCI image. ## @category [shared] Publish helm-chart-oci-push: $(helm_chart_archive) | $(NEEDS_HELM) $(NEEDS_CRANE) $(HELM) push "$(helm_chart_archive)" "oci://$(helm_chart_image_registry)" 2>&1 \ | tee >(grep -o "sha256:.\+" | tee $(helm_digest_path)) @# $(helm_chart_image_tag:v%=%) removes the v prefix from the value stored in helm_chart_image_tag. @# See https://www.gnu.org/software/make/manual/html_node/Substitution-Refs.html for the manual on the syntax. helm_digest=$$(cat $(helm_digest_path)) && \ $(CRANE) copy "$(helm_chart_image_name)@$$helm_digest" "$(helm_chart_image_name):$(helm_chart_image_tag:v%=%)" .PHONY: helm-chart ## Create a helm chart ## @category [shared] Helm Chart helm-chart: $(helm_chart_archive) helm_tool_header_search ?= ^ helm_tool_footer_search ?= ^ .PHONY: generate-helm-docs ## Generate Helm chart documentation. ## @category [shared] Generate/ Verify generate-helm-docs: | $(NEEDS_HELM-TOOL) $(HELM-TOOL) inject -i $(helm_chart_source_dir)/values.yaml -o $(helm_chart_source_dir)/README.md --header-search "$(helm_tool_header_search)" --footer-search "$(helm_tool_footer_search)" shared_generate_targets += generate-helm-docs .PHONY: generate-helm-schema ## Generate Helm chart schema. ## @category [shared] Generate/ Verify generate-helm-schema: | $(NEEDS_HELM-TOOL) $(NEEDS_GOJQ) $(HELM-TOOL) schema -i $(helm_chart_source_dir)/values.yaml | $(GOJQ) > $(helm_chart_source_dir)/values.schema.json shared_generate_targets += generate-helm-schema .PHONY: verify-helm-values ## Verify Helm chart values using helm-tool. ## @category [shared] Generate/ Verify verify-helm-values: | $(NEEDS_HELM-TOOL) $(NEEDS_GOJQ) $(HELM-TOOL) lint -i $(helm_chart_source_dir)/values.yaml -d $(helm_chart_source_dir)/templates -e $(helm_chart_source_dir)/values.linter.exceptions shared_verify_targets += verify-helm-values .PHONY: verify-helm-unittest ## Run Helm chart unit tests using helm-unittest. ## @category [shared] Generate/ Verify verify-helm-unittest: | $(NEEDS_HELM-UNITTEST) $(HELM-UNITTEST) $(helm_chart_source_dir) shared_verify_targets += verify-helm-unittest $(bin_dir)/scratch/kyverno: @mkdir -p $@ $(bin_dir)/scratch/kyverno/pod-security-policy.yaml: | $(NEEDS_KUSTOMIZE) $(bin_dir)/scratch/kyverno @$(KUSTOMIZE) build https://github.com/kyverno/policies/pod-security/enforce > $@ # Extra arguments for kyverno apply. kyverno_apply_extra_args := # Allows known policy violations to be skipped by supplying Kyverno policy # exceptions as a Kyverno YAML resource, e.g.: # apiVersion: kyverno.io/v2 # kind: PolicyException # metadata: # name: pod-security-exceptions # spec: # exceptions: # - policyName: disallow-privilege-escalation # ruleNames: # - autogen-privilege-escalation # - policyName: restrict-seccomp-strict # ruleNames: # - autogen-check-seccomp-strict # match: # any: # - resources: # kinds: # - Deployment # namespaces: # - mynamespace # names: # - my-deployment ifneq ("$(wildcard make/verify-pod-security-standards-exceptions.yaml)","") kyverno_apply_extra_args += --exceptions make/verify-pod-security-standards-exceptions.yaml endif .PHONY: verify-pod-security-standards ## Verify that the Helm chart complies with the pod security standards. ## ## You can add Kyverno policy exceptions to ## `make/verify-pod-security-standards-exceptions.yaml`, to skip some of the pod ## security policy rules. ## ## @category [shared] Generate/ Verify verify-pod-security-standards: $(helm_chart_archive) $(bin_dir)/scratch/kyverno/pod-security-policy.yaml | $(NEEDS_KYVERNO) $(NEEDS_HELM) @$(HELM) template $(helm_chart_archive) $(INSTALL_OPTIONS) \ | $(KYVERNO) apply $(bin_dir)/scratch/kyverno/pod-security-policy.yaml \ $(kyverno_apply_extra_args) \ --resource - \ --table shared_verify_targets_dirty += verify-pod-security-standards .PHONY: verify-helm-lint ## Verify that the Helm chart is linted. ## @category [shared] Generate/ Verify verify-helm-lint: $(helm_chart_archive) | $(NEEDS_HELM) $(HELM) lint $(helm_chart_archive) shared_verify_targets_dirty += verify-helm-lint .PHONY: verify-helm-kubeconform ## Verify that the Helm chart passes a strict check using kubeconform ## @category [shared] Generate/ Verify verify-helm-kubeconform: $(helm_chart_archive) | $(NEEDS_KUBECONFORM) @$(HELM) template $(helm_chart_archive) $(INSTALL_OPTIONS) \ | $(KUBECONFORM) \ -schema-location default \ -schema-location "https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master/{{.NormalizedKubernetesVersion}}/{{.ResourceKind}}.json" \ -schema-location "https://raw.githubusercontent.com/datreeio/CRDs-catalog/main/{{.Group}}/{{.ResourceKind}}_{{.ResourceAPIVersion}}.json" \ -strict shared_verify_targets_dirty += verify-helm-kubeconform ================================================ FILE: make/_shared/help/01_mod.mk ================================================ # Copyright 2023 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. help_sh := $(dir $(lastword $(MAKEFILE_LIST)))/help.sh .PHONY: help help: @MAKEFILE_LIST="$(MAKEFILE_LIST)" \ MAKE="$(MAKE)" \ $(help_sh) ================================================ FILE: make/_shared/help/help.sh ================================================ #!/usr/bin/env bash # Copyright 2023 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -o errexit set -o nounset set -o pipefail ## 1. Build set of extracted line items EMPTYLINE_REGEX="^[[:space:]]*$" DOCBLOCK_REGEX="^##[[:space:]]*(.*)$" CATEGORY_REGEX="^##[[:space:]]*@category[[:space:]]*(.*)$" TARGET_REGEX="^(([a-zA-Z0-9\_\/\%\$\(\)]|-)+):.*$" EMPTY_ITEM="" # shellcheck disable=SC2086 raw_lines=$(cat ${MAKEFILE_LIST} | tr '\t' ' ' | grep -E "($TARGET_REGEX|$DOCBLOCK_REGEX|$EMPTYLINE_REGEX)") extracted_lines="" extracted_current="$EMPTY_ITEM" max_target_length=0 ## Extract all the commented targets from the Makefile while read -r line; do if [[ $line =~ $EMPTYLINE_REGEX ]]; then # Reset current item. extracted_current="$EMPTY_ITEM" elif [[ $line =~ $CATEGORY_REGEX ]]; then extracted_current=${extracted_current///${BASH_REMATCH[1]}} elif [[ $line =~ $TARGET_REGEX ]]; then # only keep the target if there is a comment if [[ $extracted_current != *""* ]]; then max_target_length=$(( ${#BASH_REMATCH[1]} > max_target_length ? ${#BASH_REMATCH[1]} : max_target_length )) extracted_current=${extracted_current///${BASH_REMATCH[1]}} extracted_lines="$extracted_lines\n$extracted_current" fi extracted_current="$EMPTY_ITEM" elif [[ $line =~ $DOCBLOCK_REGEX ]]; then extracted_current=${extracted_current///${BASH_REMATCH[1]}} fi done <<< "$raw_lines" ## 2. Build mapping for expanding targets ASSIGNMENT_REGEX="^(([a-zA-Z0-9\_\/\%\$\(\)]|-)+)[[:space:]]*:=[[:space:]]*(.*)$" raw_expansions=$(${MAKE} --dry-run --print-data-base noop | tr '\t' ' ' | grep -E "$ASSIGNMENT_REGEX") extracted_expansions="" while read -r line; do if [[ $line =~ $ASSIGNMENT_REGEX ]]; then target=${BASH_REMATCH[1]} expansion=${BASH_REMATCH[3]// /, } extracted_expansions="$extracted_expansions\n$target$expansion" fi done <<< "$raw_expansions" ## 3. Sort and print the extracted line items RULE_COLOR="$(TERM=xterm tput setaf 6)" CATEGORY_COLOR="$(TERM=xterm tput setaf 3)" CLEAR_STYLE="$(TERM=xterm tput sgr0)" PURPLE=$(TERM=xterm tput setaf 125) extracted_lines=$(echo -e "$extracted_lines" | LC_ALL=C sort -r) current_category="" ## Print the help echo "Usage: make [target1] [target2] ..." IFS=$'\n'; for line in $extracted_lines; do category=$([[ $line =~ \(.*)\ ]] && echo "${BASH_REMATCH[1]}") target=$([[ $line =~ \(.*)\ ]] && echo "${BASH_REMATCH[1]}") comment=$([[ $line =~ \(.*)\ ]] && echo -e "${BASH_REMATCH[1]///\\n}") # Print the category header if it's changed if [[ "$current_category" != "$category" ]]; then current_category=$category echo -e "\n${CATEGORY_COLOR}${current_category}${CLEAR_STYLE}" fi # replace any $(...) with the actual value if [[ $target =~ \$\((.*)\) ]]; then new_target=$(echo -e "$extracted_expansions" | grep "${BASH_REMATCH[1]}" || true) if [[ -n "$new_target" ]]; then target=$([[ $new_target =~ \(.*)\ ]] && echo -e "${BASH_REMATCH[1]}") fi fi # Print the target and its multiline comment is_first_line=true while read -r comment_line; do if [[ "$is_first_line" == true ]]; then is_first_line=false padding=$(( max_target_length - ${#target} )) printf " %s%${padding}s ${PURPLE}>${CLEAR_STYLE} %s\n" "${RULE_COLOR}${target}${CLEAR_STYLE}" "" "${comment_line}" else printf " %${max_target_length}s %s\n" "" "${comment_line}" fi done <<< "$comment" done ================================================ FILE: make/_shared/kind/00_kind_image_versions.mk ================================================ # Copyright 2024 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This file is auto-generated by the learn_kind_images.sh script in the makefile-modules repo. # Do not edit manually. kind_image_kindversion := v0.31.0 kind_image_kube_1.31_amd64 := docker.io/kindest/node:v1.31.14@sha256:e360318c07a2bb22ced43884c6884208a82d3da24828c9f1329222dd517adc06 kind_image_kube_1.31_arm64 := docker.io/kindest/node:v1.31.14@sha256:cb9072fa3db2b4aaa4fa146193064cd1ddd3fe00666c12c5189e80d3735027b5 kind_image_kube_1.32_amd64 := docker.io/kindest/node:v1.32.11@sha256:831a3aa45e399a20b3aef41d6d8572cc6ff07b1f76cac1242ce26be0ccf86402 kind_image_kube_1.32_arm64 := docker.io/kindest/node:v1.32.11@sha256:6c3e552f3046d9e4b3602f642a54797ebe8bfcd18f3720cac129ae90bf802365 kind_image_kube_1.33_amd64 := docker.io/kindest/node:v1.33.7@sha256:eb929cd8aca88dd03836180c65f3892ba8ccc79d80de1cc6666bcb9a35c1334e kind_image_kube_1.33_arm64 := docker.io/kindest/node:v1.33.7@sha256:09d327961491ceb25a987350e34c5335246f1e28aa48189d815f1905dea66079 kind_image_kube_1.34_amd64 := docker.io/kindest/node:v1.34.3@sha256:babda82416d417f720a4d6dbd35deec5263af2a6c164c81c08cde0044c2b9f78 kind_image_kube_1.34_arm64 := docker.io/kindest/node:v1.34.3@sha256:55cc745d5da0ef8c7a24a9f25f2df7cc6af0fadf85cf24bd639d2c2f02bacfab kind_image_kube_1.35_amd64 := docker.io/kindest/node:v1.35.0@sha256:b7f5e1f621afb1156eb0f27f26c804e5265c07d8e9c55516d25d66400043629b kind_image_kube_1.35_arm64 := docker.io/kindest/node:v1.35.0@sha256:0aa5e1a411b2c3197184286d7699424a123cd4d18c04c24317173dc5256c6110 kind_image_latest_amd64 := $(kind_image_kube_1.35_amd64) kind_image_latest_arm64 := $(kind_image_kube_1.35_arm64) ================================================ FILE: make/_shared/kind/00_mod.mk ================================================ # Copyright 2023 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. include $(dir $(lastword $(MAKEFILE_LIST)))/00_kind_image_versions.mk images_amd64 ?= images_arm64 ?= # K8S_VERSION can be used to specify a specific # kubernetes version to use with Kind. K8S_VERSION ?= ifeq ($(K8S_VERSION),) images_amd64 += $(kind_image_latest_amd64) images_arm64 += $(kind_image_latest_arm64) else fatal_if_undefined = $(if $(findstring undefined,$(origin $1)),$(error $1 is not set)) $(call fatal_if_undefined,kind_image_kube_$(K8S_VERSION)_amd64) $(call fatal_if_undefined,kind_image_kube_$(K8S_VERSION)_arm64) images_amd64 += $(kind_image_kube_$(K8S_VERSION)_amd64) images_arm64 += $(kind_image_kube_$(K8S_VERSION)_arm64) endif ================================================ FILE: make/_shared/kind/01_mod.mk ================================================ # Copyright 2023 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. include $(dir $(lastword $(MAKEFILE_LIST)))/kind.mk include $(dir $(lastword $(MAKEFILE_LIST)))/kind-image-preload.mk ================================================ FILE: make/_shared/kind/kind-image-preload.mk ================================================ # Copyright 2023 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ifndef bin_dir $(error bin_dir is not set) endif ifndef images_amd64 $(error images_amd64 is not set) endif ifndef images_arm64 $(error images_arm64 is not set) endif ########################################## images := $(images_$(HOST_ARCH)) images_tar_dir := $(bin_dir)/downloaded/containers/$(HOST_ARCH) images_tars := $(foreach image,$(images),$(images_tar_dir)/$(subst :,+,$(image)).tar) # Download the images as tarballs. After downloading the image using # its digest, we use image-tool to modify the .[0].RepoTags[0] value in # the manifest.json file to have the correct tag (instead of "i-was-a-digest" # which is set when the image is pulled using its digest). This tag is used # to reference the image after it has been imported using docker or kind. Otherwise, # the image would be imported with the tag "i-was-a-digest" which is not very useful. # We would have to use digests to reference the image everywhere which might # not always be possible and does not match the default behavior of eg. our helm charts. # NOTE: the tag is fully determined based on the input, we fully allow the remote # tag to point to a different digest. This prevents CI from breaking due to upstream # changes. However, it also means that we can incorrectly combine digests with tags, # hence caution is advised. $(images_tars): $(images_tar_dir)/%.tar: | $(NEEDS_IMAGE-TOOL) $(NEEDS_CRANE) $(NEEDS_GOJQ) @$(eval full_image=$(subst +,:,$*)) @$(eval bare_image=$(word 1,$(subst :, ,$(full_image)))) @$(eval digest=$(word 2,$(subst @, ,$(full_image)))) @$(eval tag=$(word 2,$(subst :, ,$(word 1,$(subst @, ,$(full_image)))))) @mkdir -p $(dir $@) $(CRANE) pull "$(bare_image)@$(digest)" $@ --platform=linux/$(HOST_ARCH) $(IMAGE-TOOL) tag-docker-tar $@ "$(bare_image):$(tag)" # $1 = image # $2 = image:tag@sha256:digest define image_variables $1.TAR := $(images_tar_dir)/$(subst :,+,$2).tar $1.REPO := $1 $1.TAG := $(word 2,$(subst :, ,$(word 1,$(subst @, ,$2)))) $1.FULL := $(word 1,$(subst @, ,$2)) endef $(foreach image,$(images),$(eval $(call image_variables,$(word 1,$(subst :, ,$(image))),$(image)))) .PHONY: images-preload ## Preload images. ## @category [shared] Kind cluster images-preload: | $(images_tars) ================================================ FILE: make/_shared/kind/kind.mk ================================================ # Copyright 2023 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ifndef bin_dir $(error bin_dir is not set) endif ifndef kind_cluster_name $(error kind_cluster_name is not set) endif ifndef kind_cluster_config $(error kind_cluster_config is not set) endif ########################################## kind_kubeconfig := $(bin_dir)/scratch/kube.config absolute_kubeconfig := $(CURDIR)/$(kind_kubeconfig) $(bin_dir)/scratch/cluster-check: FORCE | $(NEEDS_KIND) $(bin_dir)/scratch @if ! $(KIND) get clusters -q | grep -q "^$(kind_cluster_name)\$$"; then \ echo "❌ cluster $(kind_cluster_name) not found. Starting ..."; \ echo "trigger" > $@; \ else \ echo "✅ existing cluster $(kind_cluster_name) found"; \ fi $(eval export KUBECONFIG=$(absolute_kubeconfig)) kind_post_create_hook ?= $(kind_kubeconfig): $(kind_cluster_config) $(bin_dir)/scratch/cluster-check | images-preload $(bin_dir)/scratch $(NEEDS_KIND) $(NEEDS_KUBECTL) $(NEEDS_CTR) @[ -f "$(bin_dir)/scratch/cluster-check" ] && ( \ $(KIND) delete cluster --name $(kind_cluster_name); \ $(CTR) load -i $(docker.io/kindest/node.TAR); \ $(KIND) create cluster \ --image $(docker.io/kindest/node.FULL) \ --name $(kind_cluster_name) \ --config "$<"; \ $(CTR) exec $(kind_cluster_name)-control-plane find /mounted_images/ -name "*.tar" -exec echo {} \; -exec ctr --namespace=k8s.io images import --all-platforms --no-unpack --digests {} \; ; \ $(MAKE) --no-print-directory noop $(kind_post_create_hook); \ $(KUBECTL) config use-context kind-$(kind_cluster_name); \ ) || true $(KIND) get kubeconfig --name $(kind_cluster_name) > $@ .PHONY: kind-cluster kind-cluster: $(kind_kubeconfig) .PHONY: kind-cluster-load ## Create Kind cluster and wait for nodes to be ready ## Load the kubeconfig into the default location so that ## it can be easily queried by kubectl. This target is ## meant to be used directly, NOT as a dependency. ## Use `kind-cluster` as a dependency instead. ## @category [shared] Kind cluster kind-cluster-load: kind-cluster | $(NEEDS_KUBECTL) mkdir -p ~/.kube KUBECONFIG=~/.kube/config:$(kind_kubeconfig) $(KUBECTL) config view --flatten > ~/.kube/config $(KUBECTL) config use-context kind-$(kind_cluster_name) .PHONY: kind-cluster-clean ## Delete the Kind cluster ## @category [shared] Kind cluster kind-cluster-clean: $(NEEDS_KIND) $(KIND) delete cluster --name $(kind_cluster_name) rm -rf $(kind_kubeconfig) $(MAKE) --no-print-directory noop $(kind_post_create_hook) .PHONY: kind-logs ## Get the Kind cluster ## @category [shared] Kind cluster kind-logs: | kind-cluster $(NEEDS_KIND) $(ARTIFACTS) rm -rf $(ARTIFACTS)/e2e-logs mkdir -p $(ARTIFACTS)/e2e-logs $(KIND) export logs $(ARTIFACTS)/e2e-logs --name=$(kind_cluster_name) ================================================ FILE: make/_shared/klone/01_mod.mk ================================================ # Copyright 2023 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. .PHONY: generate-klone ## Generate klone shared Makefiles ## @category [shared] Generate/ Verify generate-klone: | $(NEEDS_KLONE) $(KLONE) sync shared_generate_targets += generate-klone .PHONY: upgrade-klone ## Upgrade klone Makefile modules to latest version ## @category [shared] Self-upgrade upgrade-klone: | $(NEEDS_KLONE) $(KLONE) upgrade ================================================ FILE: make/_shared/licenses/00_mod.mk ================================================ # Copyright 2024 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Define default config for generating licenses license_ignore ?= ================================================ FILE: make/_shared/licenses/01_mod.mk ================================================ # Copyright 2024 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###################### Generate LICENSES files ###################### # _module_dir is the directory containing this Makefile, used to retrieve the path of the licenses.tmpl file _module_dir := $(abspath $(dir $(lastword $(MAKEFILE_LIST)))) # Create a go.work file so that go-licenses can discover the LICENSE file of the # other modules in the repo. # # Without this, go-licenses *guesses* the wrong LICENSE for local dependencies and # links to the wrong versions of LICENSES for transitive dependencies. licenses_go_work := $(bin_dir)/scratch/LICENSES.go.work $(licenses_go_work): $(bin_dir)/scratch GOWORK=$(abspath $@) \ $(MAKE) go-workspace ## Generate licenses for the golang dependencies ## @category [shared] Generate/ Verify generate-go-licenses: # shared_generate_targets += generate-go-licenses define licenses_target $1/LICENSES: $1/go.mod $(licenses_go_work) $(_module_dir)/licenses.tmpl | $(NEEDS_GO-LICENSES) cd $$(dir $$@) && \ GOWORK=$(abspath $(licenses_go_work)) \ GOOS=linux GOARCH=amd64 \ $(GO-LICENSES) report --ignore "$$(license_ignore)" --template $(_module_dir)/licenses.tmpl ./... > LICENSES generate-go-licenses: $1/LICENSES # The /LICENSE targets make sure these files exist. # Otherwise, make will error. generate-go-licenses: $1/LICENSE endef # Calculate all the go.mod directories, build targets may share go.mod dirs so # we use $(sort) to de-duplicate. go_mod_dirs := $(foreach build_name,$(build_names),$(go_$(build_name)_mod_dir)) ifneq ("$(wildcard go.mod)","") go_mod_dirs += . endif go_mod_dirs := $(sort $(go_mod_dirs)) $(foreach go_mod_dir,$(go_mod_dirs),$(eval $(call licenses_target,$(go_mod_dir)))) ###################### Include LICENSES in OCI image ###################### define license_layer license_layer_path_$1 := $$(abspath $(bin_dir)/scratch/licenses-$1) # Target to generate image layer containing license information .PHONY: oci-license-layer-$1 oci-license-layer-$1: | $(bin_dir)/scratch $(NEEDS_GO-LICENSES) rm -rf $$(license_layer_path_$1) mkdir -p $$(license_layer_path_$1)/licenses cp $$(go_$1_mod_dir)/LICENSE $$(license_layer_path_$1)/licenses/LICENSE cp $$(go_$1_mod_dir)/LICENSES $$(license_layer_path_$1)/licenses/LICENSES oci-build-$1: oci-license-layer-$1 oci-build-$1__local: oci-license-layer-$1 oci_$1_additional_layers += $$(license_layer_path_$1) endef $(foreach build_name,$(build_names),$(eval $(call license_layer,$(build_name)))) ================================================ FILE: make/_shared/licenses/licenses.tmpl ================================================ This LICENSES file is generated by the `licenses` module in makefile-modules[0]. The licenses below the "---" are determined by the go-licenses tool[1]. The aim of this file is to collect the licenses of all dependencies, and provide a single source of truth for licenses used by this project. ## For Developers If CI reports that this file is out of date, you should be careful to check that the new licenses are acceptable for this project before running `make generate-go-licenses` to update this file. Acceptable licenses are those allowlisted by the CNCF[2]. You MUST NOT add any new dependencies whose licenses are not allowlisted by the CNCF, or which do not have an explicit license exception[3]. ## For Users If this file was included in a release artifact, it is a snapshot of the licenses of all dependencies at the time of the release. You can retrieve the actual license text by following these steps: 1. Find the dependency name in this file 2. Go to the source code repository of this project, and go to the tag corresponding to this release. 3. Find the exact version of the dependency in the `go.mod` file 4. Search for the dependency at the correct version in the [Go package index](https://pkg.go.dev/). ## Links [0]: https://github.com/cert-manager/makefile-modules/ [1]: https://github.com/google/go-licenses [2]: https://github.com/cncf/foundation/blob/db4179134ebe7fa00b140a050c19147db808b6fa/policies-guidance/allowed-third-party-license-policy.md#cncf-allowlist-license-policy [3]: https://github.com/cncf/foundation/blob/db4179134ebe7fa00b140a050c19147db808b6fa/license-exceptions/README.md --- {{ range . -}} {{ .Name }},{{ .LicenseName }} {{ end -}} ================================================ FILE: make/_shared/oci-build/00_mod.mk ================================================ # Copyright 2023 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Use distroless as minimal base image to package the manager binary # To get latest SHA run "crane digest quay.io/jetstack/base-static:latest" base_image_static := quay.io/jetstack/base-static@sha256:bcdce6869d855fb0b8808ebfc5315360e3413b9975776b5c9e8899744b1ee8a9 # Use custom apko-built image as minimal base image to package the manager binary # To get latest SHA run "crane digest quay.io/jetstack/base-static-csi:latest" base_image_csi-static := quay.io/jetstack/base-static-csi@sha256:e8c56285c3bd5bb98f8c0b3d30c5b28d81c087e333b6f9e3296c2eb51faca47e # Utility functions fatal_if_undefined = $(if $(findstring undefined,$(origin $1)),$(error $1 is not set)) fatal_if_deprecated_defined = $(if $(findstring undefined,$(origin $1)),,$(error $1 is deprecated, use $2 instead)) # Validate globals that are required $(call fatal_if_undefined,build_names) # Set default config values CGO_ENABLED ?= 0 GOEXPERIMENT ?= # empty by default oci_platforms ?= linux/amd64,linux/arm/v7,linux/arm64,linux/ppc64le # Default variables per build_names entry # # $1 - build_name define default_per_build_variables go_$1_cgo_enabled ?= $(CGO_ENABLED) go_$1_goexperiment ?= $(GOEXPERIMENT) go_$1_flags ?= -tags= oci_$1_platforms ?= $(oci_platforms) oci_$1_additional_layers ?= oci_$1_linux_capabilities ?= oci_$1_build_args ?= endef $(foreach build_name,$(build_names),$(eval $(call default_per_build_variables,$(build_name)))) # Validate variables per build_names entry # # $1 - build_name define check_per_build_variables # Validate deprecated variables $(call fatal_if_deprecated_defined,cgo_enabled_$1,go_$1_cgo_enabled) $(call fatal_if_deprecated_defined,goexperiment_$1,go_$1_goexperiment) $(call fatal_if_deprecated_defined,oci_additional_layers_$1,oci_$1_additional_layers) # Validate required config exists $(call fatal_if_undefined,go_$1_ldflags) $(call fatal_if_undefined,go_$1_main_dir) $(call fatal_if_undefined,go_$1_mod_dir) $(call fatal_if_undefined,oci_$1_base_image_flavor) $(call fatal_if_undefined,oci_$1_image_name_development) # Validate we have valid base image config ifeq ($(oci_$1_base_image_flavor),static) oci_$1_base_image := $(base_image_static) else ifeq ($(oci_$1_base_image_flavor),csi-static) oci_$1_base_image := $(base_image_csi-static) else ifeq ($(oci_$1_base_image_flavor),custom) $$(call fatal_if_undefined,oci_$1_base_image) else $$(error oci_$1_base_image_flavor has unknown value "$(oci_$1_base_image_flavor)") endif # Validate the config required to build the golang based images ifneq ($(go_$1_main_dir:.%=.),.) $$(error go_$1_main_dir "$(go_$1_main_dir)" should be a directory path that DOES start with ".") endif ifeq ($(go_$1_main_dir:%/=/),/) $$(error go_$1_main_dir "$(go_$1_main_dir)" should be a directory path that DOES NOT end with "/") endif ifeq ($(go_$1_main_dir:%.go=.go),.go) $$(error go_$1_main_dir "$(go_$1_main_dir)" should be a directory path that DOES NOT end with ".go") endif ifneq ($(go_$1_mod_dir:.%=.),.) $$(error go_$1_mod_dir "$(go_$1_mod_dir)" should be a directory path that DOES start with ".") endif ifeq ($(go_$1_mod_dir:%/=/),/) $$(error go_$1_mod_dir "$(go_$1_mod_dir)" should be a directory path that DOES NOT end with "/") endif ifeq ($(go_$1_mod_dir:%.go=.go),.go) $$(error go_$1_mod_dir "$(go_$1_mod_dir)" should be a directory path that DOES NOT end with ".go") endif ifeq ($(wildcard $(go_$1_mod_dir)/go.mod),) $$(error go_$1_mod_dir "$(go_$1_mod_dir)" does not contain a go.mod file) endif ifeq ($(wildcard $(go_$1_mod_dir)/$(go_$1_main_dir)/main.go),) $$(error go_$1_main_dir "$(go_$1_mod_dir)/$(go_$1_main_dir)" does not contain a main.go file) endif # Validate the config required to build OCI images ifneq ($(words $(oci_$1_image_name_development)),1) $$(error oci_$1_image_name_development "$(oci_$1_image_name_development)" should be a single image name) endif # Validate that the build name does not end in __local ifeq ($(1:%__local=__local),__local) $$(error build_name "$1" SHOULD NOT end in __local) endif endef $(foreach build_name,$(build_names),$(eval $(call check_per_build_variables,$(build_name)))) # Create variables holding targets # # We create the following targets for each $(build_names) # - oci-build-$(build_name) = build the oci directory (multi-arch) # - oci-build-$(build_name)__local = build the oci directory (local arch: linux/$(HOST_ARCH)) # - oci-load-$(build_name) = load the image into docker using the oci_$(build_name)_image_name_development variable # - docker-tarball-$(build_name) = build a "docker load" compatible tarball of the image oci_build_targets := $(build_names:%=oci-build-%) oci_build_targets += $(build_names:%=oci-build-%__local) oci_load_targets := $(build_names:%=oci-load-%) docker_tarball_targets := $(build_names:%=docker-tarball-%) # Derive config based on user config # # - oci_layout_path_$(build_name) = path that the OCI image will be saved in OCI layout directory format # - oci_digest_path_$(build_name) = path to the file that will contain the digests # - docker_tarball_path_$(build_name) = path that the docker tarball that the docker-tarball-$(build_name) will produce $(foreach build_name,$(build_names),$(eval oci_layout_path_$(build_name) := $(bin_dir)/scratch/image/oci-layout-$(build_name))) $(foreach build_name,$(build_names),$(eval oci_digest_path_$(build_name) := $(CURDIR)/$(oci_layout_path_$(build_name)).digests)) $(foreach build_name,$(build_names),$(eval docker_tarball_path_$(build_name) := $(CURDIR)/$(oci_layout_path_$(build_name)).docker.tar)) ================================================ FILE: make/_shared/oci-build/01_mod.mk ================================================ # Copyright 2023 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. $(bin_dir)/scratch/image: @mkdir -p $@ .PHONY: $(oci_build_targets) ## Build the OCI image. ## - oci-build-$(build_name) = build the oci directory (multi-arch) ## - oci-build-$(build_name)__local = build the oci directory (local arch: linux/$(HOST_ARCH)) ## @category [shared] Build $(oci_build_targets): oci-build-%: | $(NEEDS_KO) $(NEEDS_GO) $(NEEDS_YQ) $(NEEDS_IMAGE-TOOL) $(bin_dir)/scratch/image $(eval a := $(patsubst %__local,%,$*)) $(eval is_local := $(if $(findstring $a__local,$*),true)) $(eval layout_path := $(if $(is_local),$(oci_layout_path_$a).local,$(oci_layout_path_$a))) $(eval digest_path := $(if $(is_local),$(oci_digest_path_$a).local,$(oci_digest_path_$a))) rm -rf $(CURDIR)/$(layout_path) echo '{}' | \ $(YQ) '.defaultBaseImage = "$(oci_$a_base_image)"' | \ $(YQ) '.builds[0].id = "$a"' | \ $(YQ) '.builds[0].dir = "$(go_$a_mod_dir)"' | \ $(YQ) '.builds[0].main = "$(go_$a_main_dir)"' | \ $(YQ) '.builds[0].env[0] = "CGO_ENABLED=$(go_$a_cgo_enabled)"' | \ $(YQ) '.builds[0].env[1] = "GOEXPERIMENT=$(go_$a_goexperiment)"' | \ $(YQ) '.builds[0].ldflags[0] = "-s"' | \ $(YQ) '.builds[0].ldflags[1] = "-w"' | \ $(YQ) '.builds[0].ldflags[2] = "{{.Env.LDFLAGS}}"' | \ $(YQ) '.builds[0].flags[0] = "$(go_$a_flags)"' | \ $(YQ) '.builds[0].linux_capabilities = "$(oci_$a_linux_capabilities)"' \ > $(CURDIR)/$(layout_path).ko_config.yaml GOWORK=off \ KO_DOCKER_REPO=$(oci_$a_image_name_development) \ KOCACHE=$(CURDIR)/$(bin_dir)/scratch/image/ko_cache \ KO_CONFIG_PATH=$(CURDIR)/$(layout_path).ko_config.yaml \ SOURCE_DATE_EPOCH=$(GITEPOCH) \ KO_GO_PATH=$(GO) \ LDFLAGS="$(go_$a_ldflags)" \ $(KO) build $(go_$a_mod_dir)/$(go_$a_main_dir) \ --platform=$(if $(is_local),linux/$(HOST_ARCH),$(oci_$a_platforms)) \ $(oci_$a_build_args) \ --oci-layout-path=$(layout_path) \ --sbom-dir=$(CURDIR)/$(layout_path).sbom \ --sbom=spdx \ --push=false \ --bare $(IMAGE-TOOL) append-layers \ $(CURDIR)/$(layout_path) \ $(oci_$a_additional_layers) $(IMAGE-TOOL) list-digests \ $(CURDIR)/$(layout_path) \ > $(digest_path) # Only include the oci-load target if kind is provided by the kind makefile-module ifdef kind_cluster_name .PHONY: $(oci_load_targets) ## Build OCI image for the local architecture and load ## it into the $(kind_cluster_name) kind cluster. ## @category [shared] Build $(oci_load_targets): oci-load-%: docker-tarball-% | kind-cluster $(NEEDS_KIND) $(KIND) load image-archive --name $(kind_cluster_name) $(docker_tarball_path_$*) endif ## Build Docker tarball image for the local architecture ## @category [shared] Build .PHONY: $(docker_tarball_targets) $(docker_tarball_targets): docker-tarball-%: oci-build-%__local | $(NEEDS_GO) $(NEEDS_IMAGE-TOOL) $(IMAGE-TOOL) convert-to-docker-tar $(CURDIR)/$(oci_layout_path_$*).local $(docker_tarball_path_$*) $(oci_$*_image_name_development):$(oci_$*_image_tag) ================================================ FILE: make/_shared/oci-publish/00_mod.mk ================================================ # Copyright 2023 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Push names is equivalent to build_names, additional names can be added for # pushing images that are not build with the oci-build module push_names ?= push_names += $(build_names) # Sometimes we need to push to one registry, but pull from another. This allows # that. # # The lines should be in the format a=b # # The value on the left is the domain you include in your oci__image_name # variable, the one on the right is the domain that is actually pushed to. # # For example, if we set up a vanity domain for the current quay: # # oci_controller_image_name = registry.cert-manager.io/cert-manager-controller` # image_registry_rewrite += registry.cert-manager.io=quay.io/jetstack # # This would push to quay.io/jetstack/cert-manager-controller. # # The general idea is oci__image_name contains the final image name, after replication, after vanity domains etc. image_registry_rewrite ?= # Utilities for extracting the key and value from a foo=bar style line kv_key = $(word 1,$(subst =, ,$1)) kv_value = $(word 2,$(subst =, ,$1)) # Apply the image_registry_rewrite rules, if no rules match an image then the # image name is not changed. Any rules that match will be applied. # # For example, if there was a rule vanity-domain.com=real-registry.com/foo # then any references to vanity-domain.com/image would be rewritten to # real-registry.com/foo/image image_registry_rewrite_rules_for_image = $(strip $(sort $(foreach rule,$(image_registry_rewrite),$(if $(findstring $(call kv_key,$(rule)),$1),$(rule))))) apply_image_registry_rewrite_rules_to_image = $(if $(call image_registry_rewrite_rules_for_image,$1),\ $(foreach rule,$(call image_registry_rewrite_rules_for_image,$1),$(subst $(call kv_key,$(rule)),$(call kv_value,$(rule)),$1)),\ $1) apply_image_registry_rewrite_rules = $(foreach image_name,$1,$(call apply_image_registry_rewrite_rules_to_image,$(image_name))) # This is a helper function to return the image names for a given build_name. # It will apply all rewrite rules to the image names oci_image_names_for = $(call apply_image_registry_rewrite_rules,$(oci_$1_image_name)) oci_image_tag_for = $(oci_$1_image_tag) ================================================ FILE: make/_shared/oci-publish/01_mod.mk ================================================ # Copyright 2023 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Utility functions fatal_if_undefined = $(if $(findstring undefined,$(origin $1)),$(error $1 is not set)) oci_digest = $(shell head -1 $(oci_digest_path_$1) 2> /dev/null) sanitize_target = $(subst :,-,$1) registry_for = $(firstword $(subst /, ,$1)) # Utility variables current_makefile_directory := $(dir $(lastword $(MAKEFILE_LIST))) image_exists_script := $(current_makefile_directory)/image-exists.sh # Validate globals that are required $(call fatal_if_undefined,bin_dir) $(call fatal_if_undefined,push_names) # Set default config values RELEASE_DRYRUN ?= false CRANE_FLAGS ?= # empty by default COSIGN_FLAGS ?= # empty by default OCI_SIGN_ON_PUSH ?= true # Default variables per push_names entry # # $1 - build_name define default_per_build_variables release_dryrun_$1 ?= $(RELEASE_DRYRUN) crane_flags_$1 ?= $(CRANE_FLAGS) cosign_flags_$1 ?= $(COSIGN_FLAGS) oci_sign_on_push_$1 ?= $(OCI_SIGN_ON_PUSH) endef $(foreach build_name,$(push_names),$(eval $(call default_per_build_variables,$(build_name)))) # Validate variables per push_names entry # # $1 - build_name define check_per_build_variables $(call fatal_if_undefined,oci_digest_path_$1) $(call fatal_if_undefined,oci_layout_path_$1) $(call fatal_if_undefined,oci_$1_image_name) $(call fatal_if_undefined,oci_$1_image_tag) endef $(foreach build_name,$(push_names),$(eval $(call check_per_build_variables,$(build_name)))) # Create variables holding targets # # We create the following targets for each $(push_names) # - oci-build-$(build_name) = build the oci directory # - oci-load-$(build_name) = load the image into docker using the oci_$(build_name)_image_name_development variable # - docker-tarball-$(build_name) = build a "docker load" compatible tarball of the image # - ko-config-$(build_name) = generate "ko" config for a given build oci_push_targets := $(push_names:%=oci-push-%) oci_sign_targets := $(push_names:%=oci-sign-%) oci_maybe_push_targets := $(push_names:%=oci-maybe-push-%) # Define push target # $1 - build_name # $2 - image_name define oci_push_target .PHONY: $(call sanitize_target,oci-push-$2) $(call sanitize_target,oci-push-$2): oci-build-$1 | $(NEEDS_CRANE) $$(CRANE) $(crane_flags_$1) push "$(oci_layout_path_$1)" "$2:$(call oci_image_tag_for,$1)" $(if $(filter true,$(oci_sign_on_push_$1)),$(MAKE) $(call sanitize_target,oci-sign-$2)) .PHONY: $(call sanitize_target,oci-maybe-push-$2) $(call sanitize_target,oci-maybe-push-$2): oci-build-$1 | $(NEEDS_CRANE) CRANE="$$(CRANE) $(crane_flags_$1)" \ source $(image_exists_script) $2:$(call oci_image_tag_for,$1); \ $$(CRANE) $(crane_flags_$1) push "$(oci_layout_path_$1)" "$2:$(call oci_image_tag_for,$1)"; \ $(if $(filter true,$(oci_sign_on_push_$1)),$(MAKE) $(call sanitize_target,oci-sign-$2)) oci-push-$1: $(call sanitize_target,oci-push-$2) oci-maybe-push-$1: $(call sanitize_target,oci-maybe-push-$2) endef oci_push_target_per_image = $(foreach image_name,$2,$(eval $(call oci_push_target,$1,$(image_name)))) $(foreach build_name,$(push_names),$(eval $(call oci_push_target_per_image,$(build_name),$(call oci_image_names_for,$(build_name))))) .PHONY: $(oci_push_targets) ## Build and push OCI image. ## If the tag already exists, this target will overwrite it. ## If an identical image was already built before, we will add a new tag to it, but we will not sign it again. ## Expected pushed images: ## - :v1.2.3, @sha256:0000001 ## - :v1.2.3.sig, :sha256-0000001.sig ## @category [shared] Publish $(oci_push_targets): .PHONY: $(oci_maybe_push_targets) ## Push image if tag does not already exist in registry. ## @category [shared] Publish $(oci_maybe_push_targets): # Define sign target # $1 - build_name # $2 - image_name define oci_sign_target .PHONY: $(call sanitize_target,oci-sign-$2) $(call sanitize_target,oci-sign-$2): $(oci_digest_path_$1) | $(NEEDS_CRANE) $(NEEDS_COSIGN) $$(CRANE) $(crane_flags_$1) manifest $2:$$(subst :,-,$$(call oci_digest,$1)).sig > /dev/null 2>&1 || \ $$(COSIGN) sign --yes=true $(cosign_flags_$1) "$2@$$(call oci_digest,$1)" oci-sign-$1: $(call sanitize_target,oci-sign-$2) endef oci_sign_target_per_image = $(foreach image_name,$2,$(eval $(call oci_sign_target,$1,$(image_name)))) $(foreach build_name,$(push_names),$(eval $(call oci_sign_target_per_image,$(build_name),$(call oci_image_names_for,$(build_name))))) .PHONY: $(oci_sign_targets) ## Sign an OCI image. ## If a signature already exists, this will not overwrite it. ## @category [shared] Publish $(oci_sign_targets): ================================================ FILE: make/_shared/oci-publish/image-exists.sh ================================================ #!/usr/bin/env bash # Copyright 2022 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -o errexit set -o nounset set -o pipefail # This script checks if a given image exists in the upstream registry, and if it # does, whether it contains all the expected architectures. crane=${CRANE:-} FULL_IMAGE=${1:-} function print_usage() { echo "usage: $0 [commands...]" } if [[ -z $FULL_IMAGE ]]; then print_usage echo "Missing full-image" exit 1 fi if [[ -z $crane ]]; then echo "CRANE environment variable must be set to the path of the crane binary" exit 1 fi shift 1 manifest=$(mktemp) trap 'rm -f "$manifest"' EXIT SIGINT manifest_error=$(mktemp) trap 'rm -f "$manifest_error"' EXIT SIGINT echo "+++ searching for $FULL_IMAGE in upstream registry" set +o errexit $crane manifest "$FULL_IMAGE" > "$manifest" 2> "$manifest_error" exit_code=$? set -o errexit manifest_error_data=$(cat "$manifest_error") if [[ $exit_code -eq 0 ]]; then echo "+++ upstream registry appears to contain $FULL_IMAGE, exiting" exit 0 elif [[ "$manifest_error_data" == *"MANIFEST_UNKNOWN"* ]]; then echo "+++ upstream registry does not contain $FULL_IMAGE, will build and push" # fall through to run the commands passed to this script else echo "FATAL: upstream registry returned an unexpected error: $manifest_error_data, exiting" exit 1 fi ================================================ FILE: make/_shared/repository-base/01_mod.mk ================================================ # Copyright 2023 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ifndef repo_name $(error repo_name is not set) endif _repository_base_module_dir := $(dir $(lastword $(MAKEFILE_LIST))) repository_base_dir := $(_repository_base_module_dir)base/ .PHONY: generate-base ## Generate base files in the repository ## @category [shared] Generate/ Verify generate-base: cp -r $(repository_base_dir)/. ./ cd $(repository_base_dir) && \ find . -type f | while read file; do \ sed "s|{{REPLACE:GH-REPOSITORY}}|$(repo_name:github.com/%=%)|g" "$$file" > "$(CURDIR)/$$file"; \ done if [ ! -e ./.github/renovate.json5 ]; then \ mkdir -p ./.github; \ cp $(_repository_base_module_dir)/renovate-bootstrap-config.json5 ./.github/renovate.json5; \ fi shared_generate_targets += generate-base ================================================ FILE: make/_shared/repository-base/base/.github/chainguard/make-self-upgrade.sts.yaml ================================================ # THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. # Edit https://github.com/cert-manager/makefile-modules/blob/main/modules/repository-base/base/.github/chainguard/make-self-upgrade.sts.yaml instead. issuer: https://token.actions.githubusercontent.com subject_pattern: ^repo:{{REPLACE:GH-REPOSITORY}}:ref:refs/heads/(main|master)$ permissions: contents: write pull_requests: write workflows: write ================================================ FILE: make/_shared/repository-base/base/.github/workflows/make-self-upgrade.yaml ================================================ # THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. # Edit https://github.com/cert-manager/makefile-modules/blob/main/modules/repository-base/base/.github/workflows/make-self-upgrade.yaml instead. name: make-self-upgrade concurrency: make-self-upgrade on: workflow_dispatch: {} schedule: - cron: '0 0 * * *' permissions: contents: read jobs: self_upgrade: runs-on: ubuntu-latest if: github.repository == '{{REPLACE:GH-REPOSITORY}}' permissions: id-token: write env: SOURCE_BRANCH: "${{ github.ref_name }}" SELF_UPGRADE_BRANCH: "self-upgrade-${{ github.ref_name }}" steps: - name: Fail if branch is not head of branch. if: ${{ !startsWith(github.ref, 'refs/heads/') && env.SOURCE_BRANCH != '' && env.SELF_UPGRADE_BRANCH != '' }} run: | echo "This workflow should not be run on a non-branch-head." exit 1 - name: Octo STS Token Exchange uses: octo-sts/action@f603d3be9d8dd9871a265776e625a27b00effe05 # v1.1.1 id: octo-sts with: scope: '{{REPLACE:GH-REPOSITORY}}' identity: make-self-upgrade - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 # Adding `fetch-depth: 0` makes sure tags are also fetched. We need # the tags so `git describe` returns a valid version. # see https://github.com/actions/checkout/issues/701 for extra info about this option with: fetch-depth: 0 token: ${{ steps.octo-sts.outputs.token }} - id: go-version run: | make print-go-version >> "$GITHUB_OUTPUT" - uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0 with: go-version: ${{ steps.go-version.outputs.result }} - run: | git checkout -B "$SELF_UPGRADE_BRANCH" - run: | make -j upgrade-klone make -j generate - id: is-up-to-date shell: bash run: | git_status=$(git status -s) is_up_to_date="true" if [ -n "$git_status" ]; then is_up_to_date="false" echo "The following changes will be committed:" echo "$git_status" fi echo "result=$is_up_to_date" >> "$GITHUB_OUTPUT" - if: ${{ steps.is-up-to-date.outputs.result != 'true' }} run: | git config --global user.name "cert-manager-bot" git config --global user.email "cert-manager-bot@users.noreply.github.com" git add -A && git commit -m "BOT: run 'make upgrade-klone' and 'make generate'" --signoff git push -f origin "$SELF_UPGRADE_BRANCH" - if: ${{ steps.is-up-to-date.outputs.result != 'true' }} uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0 with: github-token: ${{ steps.octo-sts.outputs.token }} script: | const { repo, owner } = context.repo; const pulls = await github.rest.pulls.list({ owner: owner, repo: repo, head: owner + ':' + process.env.SELF_UPGRADE_BRANCH, base: process.env.SOURCE_BRANCH, state: 'open', }); if (pulls.data.length < 1) { const result = await github.rest.pulls.create({ title: '[CI] Merge ' + process.env.SELF_UPGRADE_BRANCH + ' into ' + process.env.SOURCE_BRANCH, owner: owner, repo: repo, head: process.env.SELF_UPGRADE_BRANCH, base: process.env.SOURCE_BRANCH, body: [ 'This PR is auto-generated to bump the Makefile modules.', ].join('\n'), }); await github.rest.issues.addLabels({ owner, repo, issue_number: result.data.number, labels: ['ok-to-test', 'skip-review', 'release-note-none', 'kind/cleanup'] }); } ================================================ FILE: make/_shared/repository-base/base/Makefile ================================================ # Copyright 2023 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. # Edit https://github.com/cert-manager/makefile-modules/blob/main/modules/repository-base/base/Makefile instead. # NOTE FOR DEVELOPERS: "How do the Makefiles work and how can I extend them?" # # Shared Makefile logic lives in the make/_shared/ directory. The source of truth for these files # lies outside of this repository, eg. in the cert-manager/makefile-modules repository. # # Logic specific to this repository must be defined in the make/00_mod.mk and make/02_mod.mk files: # - The make/00_mod.mk file is included first and contains variable definitions needed by # the shared Makefile logic. # - The make/02_mod.mk file is included later, it can make use of most of the shared targets # defined in the make/_shared/ directory (all targets defined in 00_mod.mk and 01_mod.mk). # This file should be used to define targets specific to this repository. ################################## # Some modules build their dependencies from variables, we want these to be # evaluated at the last possible moment. For this we use second expansion to # re-evaluate the generate and verify targets a second time. # # See https://www.gnu.org/software/make/manual/html_node/Secondary-Expansion.html .SECONDEXPANSION: # For details on some of these "prelude" settings, see: # https://clarkgrubb.com/makefile-style-guide MAKEFLAGS += --warn-undefined-variables --no-builtin-rules SHELL := /usr/bin/env bash # The `--norc` option prevents "PS1: unbound" errors. # If Bash thinks it is being run with its standard input connected to a network # connection (such as via SSH or via Docker), it reads and executes commands # from ~/.bashrc, regardless of whether it thinks it is in interactive mode. # Bash does not set PS1 in non-interactive environments. But on Ubuntu 24.04 the # default /etc/bash.bashrc file assumes that PS1 is set. # # See https://www.gnu.org/software/bash/manual/bash.html#Invoked-by-remote-shell-daemon .SHELLFLAGS := --norc -uo pipefail -c .DEFAULT_GOAL := help .DELETE_ON_ERROR: .SUFFIXES: FORCE: noop: # do nothing # Set empty value for MAKECMDGOALS to prevent the "warning: undefined variable 'MAKECMDGOALS'" # warning from happening when running make without arguments MAKECMDGOALS ?= ################################## # Host OS and architecture setup # ################################## # The reason we don't use "go env GOOS" or "go env GOARCH" is that the "go" # binary may not be available in the PATH yet when the Makefiles are # evaluated. HOST_OS and HOST_ARCH only support Linux, *BSD and macOS (M1 # and Intel). host_os := $(shell uname -s | tr A-Z a-z) host_arch := $(shell uname -m) HOST_OS ?= $(host_os) HOST_ARCH ?= $(host_arch) ifeq (x86_64, $(HOST_ARCH)) HOST_ARCH = amd64 else ifeq (aarch64, $(HOST_ARCH)) # linux reports the arm64 arch as aarch64 HOST_ARCH = arm64 endif ################################## # Git and versioning information # ################################## git_version := $(shell git describe --tags --always --match='v*' --abbrev=14 --dirty) VERSION ?= $(git_version) IS_PRERELEASE := $(shell git describe --tags --always --match='v*' --abbrev=0 | grep -q '-' && echo true || echo false) GITCOMMIT := $(shell git rev-parse HEAD) GITEPOCH := $(shell git show -s --format=%ct HEAD) ################################## # Global variables and dirs # ################################## bin_dir := _bin # The ARTIFACTS environment variable is set by the CI system to a directory # where artifacts should be placed. These artifacts are then uploaded to a # storage bucket by the CI system (https://docs.prow.k8s.io/docs/components/pod-utilities/). # An example of such an artifact is a jUnit XML file containing test results. # If the ARTIFACTS environment variable is not set, we default to a local # directory in the _bin directory. ARTIFACTS ?= $(bin_dir)/artifacts $(bin_dir) $(ARTIFACTS) $(bin_dir)/scratch: mkdir -p $@ .PHONY: clean ## Clean all temporary files ## @category [shared] Tools clean: rm -rf $(bin_dir) ################################## # Include all the Makefiles # ################################## -include make/00_mod.mk -include make/_shared/*/00_mod.mk -include make/_shared/*/01_mod.mk -include make/02_mod.mk -include make/_shared/*/02_mod.mk ================================================ FILE: make/_shared/repository-base/base/OWNERS_ALIASES ================================================ # THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. # Edit https://github.com/cert-manager/makefile-modules/blob/main/modules/repository-base/base/OWNERS_ALIASES instead. aliases: cm-maintainers: - munnerz - joshvanl - wallrj - jakexks - maelvls - sgtcodfish - inteon - thatsmrtalbot - erikgb - hjoshi123 ================================================ FILE: make/_shared/repository-base/renovate-bootstrap-config.json5 ================================================ { $schema: 'https://docs.renovatebot.com/renovate-schema.json', extends: [ 'github>cert-manager/makefile-modules:renovate-config.json5', ], } ================================================ FILE: make/_shared/tools/00_mod.mk ================================================ # Copyright 2023 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ifndef bin_dir $(error bin_dir is not set) endif ########################################## default_shared_dir := $(CURDIR)/$(bin_dir) # If $(HOME) is set and $(CI) is not, use the $(HOME)/.cache # folder to store downloaded binaries. ifneq ($(shell printenv HOME),) ifeq ($(shell printenv CI),) default_shared_dir := $(HOME)/.cache/makefile-modules endif endif export DOWNLOAD_DIR ?= $(default_shared_dir)/downloaded export GOVENDOR_DIR ?= $(default_shared_dir)/go_vendor # https://go.dev/dl/ # renovate: datasource=golang-version packageName=go VENDORED_GO_VERSION := 1.26.2 $(bin_dir)/tools $(DOWNLOAD_DIR)/tools: @mkdir -p $@ checkhash_script := $(dir $(lastword $(MAKEFILE_LIST)))/util/checkhash.sh lock_script := $(dir $(lastword $(MAKEFILE_LIST)))/util/lock.sh # $outfile is a variable in the lock script # Escape the dollar sign so it's passed literally to the shell script, not expanded by make outfile := $$outfile # Helper function to iterate over key=value pairs and call a function for each pair # Usage: $(call for_each_kv,function_name,list_of_key=value_pairs) # For each item, splits on "=" and calls function_name with key as $1 and value as $2 for_each_kv = $(foreach item,$2,$(eval $(call $1,$(word 1,$(subst =, ,$(item))),$(word 2,$(subst =, ,$(item)))))) # To make sure we use the right version of each tool, we put symlink in # $(bin_dir)/tools, and the actual binaries are in $(bin_dir)/downloaded. When bumping # the version of the tools, this symlink gets updated. # Let's have $(bin_dir)/tools in front of the PATH so that we don't inadvertently # pick up the wrong binary somewhere. Watch out, $(shell echo $$PATH) will # still print the original PATH, since GNU make does not honor exported # variables: https://stackoverflow.com/questions/54726457 export PATH := $(CURDIR)/$(bin_dir)/tools:$(PATH) CTR ?= docker .PHONY: __require-ctr ifneq ($(shell command -v $(CTR) >/dev/null || echo notfound),) __require-ctr: @:$(error "$(CTR) (or set CTR to a docker-compatible tool)") endif NEEDS_CTR = __require-ctr tools := # https://github.com/helm/helm/releases # renovate: datasource=github-releases packageName=helm/helm tools += helm=v4.1.4 # https://github.com/helm-unittest/helm-unittest/releases # renovate: datasource=github-releases packageName=helm-unittest/helm-unittest tools += helm-unittest=v1.0.3 # https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl # renovate: datasource=github-releases packageName=kubernetes/kubernetes tools += kubectl=v1.35.4 # https://github.com/kubernetes-sigs/kind/releases # renovate: datasource=github-releases packageName=kubernetes-sigs/kind tools += kind=v0.31.0 # https://www.vaultproject.io/downloads # renovate: datasource=github-releases packageName=hashicorp/vault tools += vault=v1.21.4 # https://github.com/Azure/azure-workload-identity/releases # renovate: datasource=github-releases packageName=Azure/azure-workload-identity tools += azwi=v1.5.1 # https://github.com/kyverno/kyverno/releases # renovate: datasource=github-releases packageName=kyverno/kyverno tools += kyverno=v1.17.1 # https://github.com/mikefarah/yq/releases # renovate: datasource=github-releases packageName=mikefarah/yq tools += yq=v4.53.2 # https://github.com/ko-build/ko/releases # renovate: datasource=github-releases packageName=ko-build/ko tools += ko=0.18.1 # https://github.com/protocolbuffers/protobuf/releases # renovate: datasource=github-releases packageName=protocolbuffers/protobuf tools += protoc=v34.1 # https://github.com/aquasecurity/trivy/releases # renovate: datasource=github-releases packageName=aquasecurity/trivy tools += trivy=v0.70.0 # https://github.com/vmware-tanzu/carvel-ytt/releases # renovate: datasource=github-releases packageName=vmware-tanzu/carvel-ytt tools += ytt=v0.53.2 # https://github.com/rclone/rclone/releases # renovate: datasource=github-releases packageName=rclone/rclone tools += rclone=v1.73.4 # https://github.com/istio/istio/releases # renovate: datasource=github-releases packageName=istio/istio tools += istioctl=1.29.2 ### go packages # https://pkg.go.dev/sigs.k8s.io/controller-tools/cmd/controller-gen?tab=versions # renovate: datasource=go packageName=sigs.k8s.io/controller-tools tools += controller-gen=v0.20.1 # https://pkg.go.dev/golang.org/x/tools/cmd/goimports?tab=versions # renovate: datasource=go packageName=golang.org/x/tools tools += goimports=v0.44.0 # https://pkg.go.dev/github.com/google/go-licenses/v2?tab=versions # renovate: datasource=go packageName=github.com/inteon/go-licenses/v2 tools += go-licenses=v2.0.0-20250821024731-e4be79958780 # https://pkg.go.dev/gotest.tools/gotestsum?tab=versions # renovate: datasource=github-releases packageName=gotestyourself/gotestsum tools += gotestsum=v1.13.0 # https://pkg.go.dev/sigs.k8s.io/kustomize/kustomize/v5?tab=versions # renovate: datasource=go packageName=sigs.k8s.io/kustomize/kustomize/v5 tools += kustomize=v5.8.1 # https://pkg.go.dev/github.com/itchyny/gojq?tab=versions # renovate: datasource=go packageName=github.com/itchyny/gojq tools += gojq=v0.12.19 # https://pkg.go.dev/github.com/google/go-containerregistry/pkg/crane?tab=versions # renovate: datasource=go packageName=github.com/google/go-containerregistry tools += crane=v0.21.5 # https://pkg.go.dev/google.golang.org/protobuf/cmd/protoc-gen-go?tab=versions # renovate: datasource=go packageName=google.golang.org/protobuf tools += protoc-gen-go=v1.36.11 # https://pkg.go.dev/github.com/sigstore/cosign/v2/cmd/cosign?tab=versions # renovate: datasource=go packageName=github.com/sigstore/cosign/v2 tools += cosign=v2.6.3 # https://pkg.go.dev/github.com/cert-manager/boilersuite?tab=versions # renovate: datasource=go packageName=github.com/cert-manager/boilersuite tools += boilersuite=v0.2.0 # https://pkg.go.dev/github.com/princjef/gomarkdoc/cmd/gomarkdoc?tab=versions # renovate: datasource=go packageName=github.com/princjef/gomarkdoc tools += gomarkdoc=v1.1.0 # https://pkg.go.dev/oras.land/oras/cmd/oras?tab=versions # renovate: datasource=go packageName=oras.land/oras tools += oras=v1.3.1 # https://pkg.go.dev/github.com/onsi/ginkgo/v2/ginkgo?tab=versions # The gingko version should be kept in sync with the version used in code. # If there is no go.mod file (which is only the case for the makefile-modules # repo), then we default to a version that we know exists. We have to do this # because otherwise the awk failure renders the whole makefile unusable. detected_ginkgo_version := $(shell [[ -f go.mod ]] && awk '/ginkgo\/v2/ {print $$2}' go.mod || echo "v2.23.4") tools += ginkgo=$(detected_ginkgo_version) # https://pkg.go.dev/github.com/cert-manager/klone?tab=versions # renovate: datasource=go packageName=github.com/cert-manager/klone tools += klone=v0.2.0 # https://pkg.go.dev/github.com/goreleaser/goreleaser/v2?tab=versions # renovate: datasource=go packageName=github.com/goreleaser/goreleaser/v2 tools += goreleaser=v2.15.3 # https://pkg.go.dev/github.com/anchore/syft/cmd/syft?tab=versions # renovate: datasource=go packageName=github.com/anchore/syft tools += syft=v1.42.4 # https://github.com/cert-manager/helm-tool/releases # renovate: datasource=github-releases packageName=cert-manager/helm-tool tools += helm-tool=v0.5.3 # https://github.com/cert-manager/image-tool/releases # renovate: datasource=github-releases packageName=cert-manager/image-tool tools += image-tool=v0.1.0 # https://github.com/cert-manager/cmctl/releases # renovate: datasource=github-releases packageName=cert-manager/cmctl tools += cmctl=v2.4.1 # https://pkg.go.dev/github.com/cert-manager/release/cmd/cmrel?tab=versions # renovate: datasource=go packageName=github.com/cert-manager/release tools += cmrel=v1.12.15-0.20241121151736-e3cbe5171488 # https://pkg.go.dev/github.com/golangci/golangci-lint/v2/cmd/golangci-lint?tab=versions # renovate: datasource=go packageName=github.com/golangci/golangci-lint/v2 tools += golangci-lint=v2.11.4 # https://pkg.go.dev/golang.org/x/vuln?tab=versions # renovate: datasource=go packageName=golang.org/x/vuln tools += govulncheck=v1.2.0 # https://github.com/operator-framework/operator-sdk/releases # renovate: datasource=github-releases packageName=operator-framework/operator-sdk tools += operator-sdk=v1.42.2 # https://pkg.go.dev/github.com/cli/cli/v2?tab=versions # renovate: datasource=go packageName=github.com/cli/cli/v2 tools += gh=v2.90.0 # https://github.com/redhat-openshift-ecosystem/openshift-preflight/releases # renovate: datasource=github-releases packageName=redhat-openshift-ecosystem/openshift-preflight tools += preflight=1.17.1 # https://github.com/daixiang0/gci/releases # renovate: datasource=github-releases packageName=daixiang0/gci tools += gci=v0.14.0 # https://github.com/google/yamlfmt/releases # renovate: datasource=github-releases packageName=google/yamlfmt tools += yamlfmt=v0.21.0 # https://github.com/yannh/kubeconform/releases # renovate: datasource=github-releases packageName=yannh/kubeconform tools += kubeconform=v0.7.0 # FIXME(erikgb): cert-manager needs the ability to override the version set here # https://pkg.go.dev/k8s.io/code-generator/cmd?tab=versions # renovate: datasource=go packageName=k8s.io/code-generator K8S_CODEGEN_VERSION ?= v0.35.4 tools += client-gen=$(K8S_CODEGEN_VERSION) tools += deepcopy-gen=$(K8S_CODEGEN_VERSION) tools += informer-gen=$(K8S_CODEGEN_VERSION) tools += lister-gen=$(K8S_CODEGEN_VERSION) tools += applyconfiguration-gen=$(K8S_CODEGEN_VERSION) tools += defaulter-gen=$(K8S_CODEGEN_VERSION) tools += conversion-gen=$(K8S_CODEGEN_VERSION) # https://github.com/kubernetes/kube-openapi # renovate: datasource=go packageName=k8s.io/kube-openapi tools += openapi-gen=v0.0.0-20260414162039-ec9c827d403f # https://raw.githubusercontent.com/kubernetes-sigs/controller-tools/master/envtest-releases.yaml # FIXME: Find a way to configure Renovate to suggest upgrades KUBEBUILDER_ASSETS_VERSION := v1.35.0 tools += etcd=$(KUBEBUILDER_ASSETS_VERSION) tools += kube-apiserver=$(KUBEBUILDER_ASSETS_VERSION) # Additional tools can be defined to reuse the tooling in this file ADDITIONAL_TOOLS ?= tools += $(ADDITIONAL_TOOLS) # Print the go version which can be used in GH actions .PHONY: print-go-version print-go-version: @echo result=$(VENDORED_GO_VERSION) # When switching branches which use different versions of the tools, we # need a way to re-trigger the symlinking from $(bin_dir)/downloaded to $(bin_dir)/tools. # This pattern rule creates a version stamp file that tracks the tool version. # If the version changes (or file doesn't exist), update the stamp file to trigger rebuild. $(bin_dir)/scratch/%_VERSION: FORCE | $(bin_dir)/scratch @test "$($*_VERSION)" == "$(shell cat $@ 2>/dev/null)" || echo $($*_VERSION) > $@ # --silent = don't print output like progress meters # --show-error = but do print errors when they happen # --fail = exit with a nonzero error code without the response from the server when there's an HTTP error # --location = follow redirects from the server # --retry = the number of times to retry a failed attempt to connect # --retry-connrefused = retry even if the initial connection was refused CURL := curl --silent --show-error --fail --location --retry 10 --retry-connrefused # LN is expected to be an atomic action, meaning that two Make processes # can run the "link $(DOWNLOAD_DIR)/tools/xxx@$(XXX_VERSION)_$(HOST_OS)_$(HOST_ARCH) # to $(bin_dir)/tools/xxx" operation simultaneously without issues (both # will perform the action and the second time the link will be overwritten). # # -s = Create a symbolic link # -f = Force the creation of the link (replace existing links) # -n = If destination already exists, replace it, don't use it as a directory to create a new link inside LN := ln -fsn # Mapping of lowercase to uppercase letters for the uc (uppercase) function upper_map := a:A b:B c:C d:D e:E f:F g:G h:H i:I j:J k:K l:L m:M n:N o:O p:P q:Q r:R s:S t:T u:U v:V w:W x:X y:Y z:Z # Function to convert a string to uppercase (e.g., "helm" -> "HELM") # Works by iterating through upper_map and substituting each lowercase letter with uppercase # Used to create variable names like HELM_VERSION from tool names like "helm" uc = $(strip \ $(eval __upper := $1) \ $(foreach p,$(upper_map), \ $(eval __upper := $(subst $(word 1,$(subst :, ,$p)),$(word 2,$(subst :, ,$p)),$(__upper))) \ ) \ )$(__upper) tool_names := # for each item `xxx` in the tools variable: # - a $(XXX_VERSION) variable is generated # -> this variable contains the version of the tool # - a $(NEEDS_XXX) variable is generated # -> this variable contains the target name for the tool, # which is the relative path of the binary, this target # should be used when adding the tool as a dependency to # your target, you can't use $(XXX) as a dependency because # make does not support an absolute path as a dependency # - a $(XXX) variable is generated # -> this variable contains the absolute path of the binary, # the absolute path should be used when executing the binary # in targets or in scripts, because it is agnostic to the # working directory # - an unversioned target $(bin_dir)/tools/xxx is generated that # creates a link to the corresponding versioned target: # $(DOWNLOAD_DIR)/tools/xxx@$(XXX_VERSION)_$(HOST_OS)_$(HOST_ARCH) define tool_defs tool_names += $1 $(call uc,$1)_VERSION ?= $2 NEEDS_$(call uc,$1) := $$(bin_dir)/tools/$1 $(call uc,$1) := $$(CURDIR)/$$(bin_dir)/tools/$1 # Create symlink from $(bin_dir)/tools/$1 to the versioned binary in $(DOWNLOAD_DIR) $$(bin_dir)/tools/$1: $$(bin_dir)/scratch/$(call uc,$1)_VERSION | $$(DOWNLOAD_DIR)/tools/$1@$$($(call uc,$1)_VERSION)_$$(HOST_OS)_$$(HOST_ARCH) $$(bin_dir)/tools @# cd into tools dir and create relative symlink (e.g., ../downloaded/tools/helm@v4.0.1_darwin_arm64) @# patsubst converts absolute path to relative by replacing $(bin_dir) with .. @cd $$(dir $$@) && $$(LN) $$(patsubst $$(bin_dir)/%,../%,$$(word 1,$$|)) $$(notdir $$@) @touch $$@ # making sure the target of the symlink is newer than *_VERSION endef # For each tool in the tools list (e.g., "helm=v4.0.1"), split on "=" and call tool_defs # with the tool name as first arg and version as second arg $(foreach tool,$(tools),$(eval $(call tool_defs,$(word 1,$(subst =, ,$(tool))),$(word 2,$(subst =, ,$(tool)))))) ###### # Go # ###### # $(NEEDS_GO) is a target that is set as an order-only prerequisite in # any target that calls $(GO), e.g.: # # $(bin_dir)/tools/crane: $(NEEDS_GO) # $(GO) build -o $(bin_dir)/tools/crane # # $(NEEDS_GO) is empty most of the time, except when running "make vendor-go" # or when "make vendor-go" was previously run, in which case $(NEEDS_GO) is set # to $(bin_dir)/tools/go, since $(bin_dir)/tools/go is a prerequisite of # any target depending on Go when "make vendor-go" was run. # Auto-detect if Go vendoring should be enabled: # - Set if "vendor-go" is in the make command goals, OR # - Set if $(bin_dir)/tools/go already exists (vendoring was previously run) detected_vendoring := $(findstring vendor-go,$(MAKECMDGOALS))$(shell [ -f $(bin_dir)/tools/go ] && echo yes) export VENDOR_GO ?= $(detected_vendoring) ifeq ($(VENDOR_GO),) .PHONY: __require-go ifneq ($(shell command -v go >/dev/null || echo notfound),) __require-go: @:$(error "$(GO) (or run 'make vendor-go')") endif GO := go NEEDS_GO = __require-go else export GOROOT := $(CURDIR)/$(bin_dir)/tools/goroot export PATH := $(CURDIR)/$(bin_dir)/tools/goroot/bin:$(PATH) GO := $(CURDIR)/$(bin_dir)/tools/go NEEDS_GO := $(bin_dir)/tools/go MAKE := $(MAKE) vendor-go endif .PHONY: vendor-go ## By default, this Makefile uses the system's Go. You can use a "vendored" ## version of Go that will get downloaded by running this command once. To ## disable vendoring, run "make unvendor-go". When vendoring is enabled, ## you will want to set the following: ## ## export PATH="$PWD/$(bin_dir)/tools:$PATH" ## export GOROOT="$PWD/$(bin_dir)/tools/goroot" ## @category [shared] Tools vendor-go: $(bin_dir)/tools/go .PHONY: unvendor-go unvendor-go: $(bin_dir)/tools/go rm -rf $(bin_dir)/tools/go $(bin_dir)/tools/goroot .PHONY: which-go ## Print the version and path of go which will be used for building and ## testing in Makefile commands. Vendored go will have a path in ./bin ## @category [shared] Tools which-go: | $(NEEDS_GO) @$(GO) version @echo "go binary used for above version information: $(GO)" $(bin_dir)/tools/go: $(bin_dir)/scratch/VENDORED_GO_VERSION | $(bin_dir)/tools/goroot $(bin_dir)/tools @# Create symlink to the go binary inside the goroot @cd $(dir $@) && $(LN) ./goroot/bin/go $(notdir $@) @touch $@ # making sure the target of the symlink is newer than *_VERSION # The "_" in "_bin" prevents "go mod tidy" from trying to tidy the vendored goroot. $(bin_dir)/tools/goroot: $(bin_dir)/scratch/VENDORED_GO_VERSION | $(GOVENDOR_DIR)/go@$(VENDORED_GO_VERSION)_$(HOST_OS)_$(HOST_ARCH)/goroot $(bin_dir)/tools @# Create relative symlink from $(bin_dir)/tools/goroot to $(GOVENDOR_DIR)/... @# patsubst converts the absolute path to relative (e.g., ../../go_vendor/go@1.25.4_darwin_arm64/goroot) @cd $(dir $@) && $(LN) $(patsubst $(bin_dir)/%,../%,$(word 1,$|)) $(notdir $@) @touch $@ # making sure the target of the symlink is newer than *_VERSION # Extract the tar to the $(GOVENDOR_DIR) directory, this directory is not cached across CI runs. $(GOVENDOR_DIR)/go@$(VENDORED_GO_VERSION)_$(HOST_OS)_$(HOST_ARCH)/goroot: | $(DOWNLOAD_DIR)/tools/go@$(VENDORED_GO_VERSION)_$(HOST_OS)_$(HOST_ARCH).tar.gz @# 1. Use lock script to prevent concurrent extraction @# 2. Extract tar.gz to temp directory (creates "go" folder inside) @# 3. Rename the extracted "go" directory to final location @source $(lock_script) $@; \ mkdir -p $(outfile).dir; \ tar xzf $| -C $(outfile).dir; \ mv $(outfile).dir/go $(outfile); \ rm -rf $(outfile).dir ################### # go dependencies # ################### go_dependencies := go_dependencies += ginkgo=github.com/onsi/ginkgo/v2/ginkgo go_dependencies += controller-gen=sigs.k8s.io/controller-tools/cmd/controller-gen go_dependencies += goimports=golang.org/x/tools/cmd/goimports # FIXME: Switch back to github.com/google/go-licenses once # https://github.com/google/go-licenses/pull/327 is merged. # Remember to also update the Go package in the Renovate marker over the version (above). go_dependencies += go-licenses=github.com/inteon/go-licenses/v2 go_dependencies += gotestsum=gotest.tools/gotestsum go_dependencies += kustomize=sigs.k8s.io/kustomize/kustomize/v5 go_dependencies += gojq=github.com/itchyny/gojq/cmd/gojq go_dependencies += crane=github.com/google/go-containerregistry/cmd/crane go_dependencies += protoc-gen-go=google.golang.org/protobuf/cmd/protoc-gen-go go_dependencies += cosign=github.com/sigstore/cosign/v2/cmd/cosign go_dependencies += boilersuite=github.com/cert-manager/boilersuite go_dependencies += gomarkdoc=github.com/princjef/gomarkdoc/cmd/gomarkdoc go_dependencies += oras=oras.land/oras/cmd/oras go_dependencies += klone=github.com/cert-manager/klone go_dependencies += goreleaser=github.com/goreleaser/goreleaser/v2 go_dependencies += syft=github.com/anchore/syft/cmd/syft go_dependencies += client-gen=k8s.io/code-generator/cmd/client-gen go_dependencies += deepcopy-gen=k8s.io/code-generator/cmd/deepcopy-gen go_dependencies += informer-gen=k8s.io/code-generator/cmd/informer-gen go_dependencies += lister-gen=k8s.io/code-generator/cmd/lister-gen go_dependencies += applyconfiguration-gen=k8s.io/code-generator/cmd/applyconfiguration-gen go_dependencies += defaulter-gen=k8s.io/code-generator/cmd/defaulter-gen go_dependencies += conversion-gen=k8s.io/code-generator/cmd/conversion-gen go_dependencies += openapi-gen=k8s.io/kube-openapi/cmd/openapi-gen go_dependencies += helm-tool=github.com/cert-manager/helm-tool go_dependencies += image-tool=github.com/cert-manager/image-tool go_dependencies += cmctl=github.com/cert-manager/cmctl/v2 go_dependencies += cmrel=github.com/cert-manager/release/cmd/cmrel go_dependencies += golangci-lint=github.com/golangci/golangci-lint/v2/cmd/golangci-lint go_dependencies += govulncheck=golang.org/x/vuln/cmd/govulncheck go_dependencies += gh=github.com/cli/cli/v2/cmd/gh go_dependencies += gci=github.com/daixiang0/gci go_dependencies += yamlfmt=github.com/google/yamlfmt/cmd/yamlfmt go_dependencies += kubeconform=github.com/yannh/kubeconform/cmd/kubeconform ################# # go build tags # ################# go_tags := # Additional Go dependencies can be defined to re-use the tooling in this file ADDITIONAL_GO_DEPENDENCIES ?= ADDITIONAL_GO_TAGS ?= go_dependencies += $(ADDITIONAL_GO_DEPENDENCIES) go_tags += $(ADDITIONAL_GO_TAGS) go_tags_init = go_tags_$1 := $(call for_each_kv,go_tags_init,$(go_dependencies)) go_tags_defs = go_tags_$1 += $2 $(call for_each_kv,go_tags_defs,$(go_tags)) go_tool_names := # Template for building Go-based tools from source using "go install" define go_dependency go_tool_names += $1 $$(DOWNLOAD_DIR)/tools/$1@$($(call uc,$1)_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $$(NEEDS_GO) $$(DOWNLOAD_DIR)/tools @# 1. Use lock script to prevent concurrent builds of the same tool @# 2. Install to temp dir using GOBIN, with GOWORK=off to ignore workspace files @# 3. Move the binary to final location @source $$(lock_script) $$@; \ mkdir -p $$(outfile).dir; \ GOWORK=off GOBIN=$$(outfile).dir $$(GO) install --tags "$(strip $(go_tags_$1))" $2@$($(call uc,$1)_VERSION); \ mv $$(outfile).dir/$1 $$(outfile); \ rm -rf $$(outfile).dir endef $(call for_each_kv,go_dependency,$(go_dependencies)) ################## # File downloads # ################## go_linux_amd64_SHA256SUM=990e6b4bbba816dc3ee129eaeaf4b42f17c2800b88a2166c265ac1a200262282 go_linux_arm64_SHA256SUM=c958a1fe1b361391db163a485e21f5f228142d6f8b584f6bef89b26f66dc5b23 go_darwin_amd64_SHA256SUM=bc3f1500d9968c36d705442d90ba91addf9271665033748b82532682e90a7966 go_darwin_arm64_SHA256SUM=32af1522bf3e3ff3975864780a429cc0b41d190ec7bf90faa661d6d64566e7af .PRECIOUS: $(DOWNLOAD_DIR)/tools/go@$(VENDORED_GO_VERSION)_$(HOST_OS)_$(HOST_ARCH).tar.gz $(DOWNLOAD_DIR)/tools/go@$(VENDORED_GO_VERSION)_$(HOST_OS)_$(HOST_ARCH).tar.gz: | $(DOWNLOAD_DIR)/tools @source $(lock_script) $@; \ $(CURL) https://go.dev/dl/go$(VENDORED_GO_VERSION).$(HOST_OS)-$(HOST_ARCH).tar.gz -o $(outfile); \ $(checkhash_script) $(outfile) $(go_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM) helm_linux_amd64_SHA256SUM=70b2c30a19da4db264dfd68c8a3664e05093a361cefd89572ffb36f8abfa3d09 helm_linux_arm64_SHA256SUM=13d03672be289045d2ff00e4e345d61de1c6f21c1257a45955a30e8ae036d8f1 helm_darwin_amd64_SHA256SUM=abf09c8503ad1d8ef76d3737a058c3456a998aae5f5966fce4bb3031aeb1654e helm_darwin_arm64_SHA256SUM=7c2eca678e8001fa863cdf8cbf6ac1b3799f9404a89eb55c08260ef5732e658d .PRECIOUS: $(DOWNLOAD_DIR)/tools/helm@$(HELM_VERSION)_$(HOST_OS)_$(HOST_ARCH) $(DOWNLOAD_DIR)/tools/helm@$(HELM_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools @source $(lock_script) $@; \ $(CURL) https://get.helm.sh/helm-$(HELM_VERSION)-$(HOST_OS)-$(HOST_ARCH).tar.gz -o $(outfile).tar.gz; \ $(checkhash_script) $(outfile).tar.gz $(helm_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \ tar xfO $(outfile).tar.gz $(HOST_OS)-$(HOST_ARCH)/helm > $(outfile); \ chmod +x $(outfile); \ rm -f $(outfile).tar.gz helm-unittest_linux_amd64_SHA256SUM=9761f23d9509c98770c026e019e743b524b57010f4bc29175f78d2582ace0633 helm-unittest_linux_arm64_SHA256SUM=1e645d96b36582cd8b9fbd53240110267f14d80aa01137341251c60438bbe6b0 helm-unittest_darwin_amd64_SHA256SUM=46413a86ded6bfc70cd704ebac16f8d4a0f36712ae399a5d24e32bc44f96985f helm-unittest_darwin_arm64_SHA256SUM=6a6b67b3f638f015e09c093b67c7609a07101b971a1a6d6a83d1a7f75861a4b2 # helm-unittest uses "macos" instead of "darwin" in release filenames helm_unittest_os := $(HOST_OS) ifeq ($(HOST_OS),darwin) helm_unittest_os := macos endif .PRECIOUS: $(DOWNLOAD_DIR)/tools/helm-unittest@$(HELM-UNITTEST_VERSION)_$(HOST_OS)_$(HOST_ARCH) $(DOWNLOAD_DIR)/tools/helm-unittest@$(HELM-UNITTEST_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools @source $(lock_script) $@; \ $(CURL) https://github.com/helm-unittest/helm-unittest/releases/download/$(HELM-UNITTEST_VERSION)/helm-unittest-$(helm_unittest_os)-$(HOST_ARCH)-$(HELM-UNITTEST_VERSION:v%=%).tgz -o $(outfile).tgz; \ $(checkhash_script) $(outfile).tgz $(helm-unittest_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \ tar xfO $(outfile).tgz untt > $(outfile); \ chmod +x $(outfile); \ rm -f $(outfile).tgz kubectl_linux_amd64_SHA256SUM=b529430df69a688fd61b64ad2299edb5fd71cb58be2a4779dba624c7d3510efd kubectl_linux_arm64_SHA256SUM=6a5a4cc4e396d7626a7a693a3044b51c75520f81db30fe6816c2554e53be336f kubectl_darwin_amd64_SHA256SUM=dddb01bddb96f78e48e33105ccfa2feedff585a8b2e3b812f5d0f64c7403710a kubectl_darwin_arm64_SHA256SUM=ec644a2473b64b486987f695dfb1867963ce6d42d267b86e944585a546f92b5d .PRECIOUS: $(DOWNLOAD_DIR)/tools/kubectl@$(KUBECTL_VERSION)_$(HOST_OS)_$(HOST_ARCH) $(DOWNLOAD_DIR)/tools/kubectl@$(KUBECTL_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools @source $(lock_script) $@; \ $(CURL) https://dl.k8s.io/release/$(KUBECTL_VERSION)/bin/$(HOST_OS)/$(HOST_ARCH)/kubectl -o $(outfile); \ $(checkhash_script) $(outfile) $(kubectl_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \ chmod +x $(outfile) kind_linux_amd64_SHA256SUM=eb244cbafcc157dff60cf68693c14c9a75c4e6e6fedaf9cd71c58117cb93e3fa kind_linux_arm64_SHA256SUM=8e1014e87c34901cc422a1445866835d1e666f2a61301c27e722bdeab5a1f7e4 kind_darwin_amd64_SHA256SUM=a8b3cf77b2ad77aec5bf710d1a2589d9117576132af812885cad41e9dede4d4e kind_darwin_arm64_SHA256SUM=88bf554fe9da6311c9f8c2d082613c002911a476f6b5090e9420b35d84e70c5c .PRECIOUS: $(DOWNLOAD_DIR)/tools/kind@$(KIND_VERSION)_$(HOST_OS)_$(HOST_ARCH) $(DOWNLOAD_DIR)/tools/kind@$(KIND_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools @source $(lock_script) $@; \ $(CURL) https://github.com/kubernetes-sigs/kind/releases/download/$(KIND_VERSION)/kind-$(HOST_OS)-$(HOST_ARCH) -o $(outfile); \ $(checkhash_script) $(outfile) $(kind_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \ chmod +x $(outfile) vault_linux_amd64_SHA256SUM=889b681990fe221b884b7932fa9c9dd0ee9811b9349554f1aa287ab63c9f3dae vault_linux_arm64_SHA256SUM=1104ef701aad16e104e2e7b4d2a02a6ec993237559343f3097ac63a00b42e85d vault_darwin_amd64_SHA256SUM=a667be3cf56dd0f21a23ba26b47028d1f51b3ca61e71b0e29ceafef1c2a1dc3a vault_darwin_arm64_SHA256SUM=c79012c1c8aedd682c68b5d9c89149030611c82da57f45383aef004b39a640d2 .PRECIOUS: $(DOWNLOAD_DIR)/tools/vault@$(VAULT_VERSION)_$(HOST_OS)_$(HOST_ARCH) $(DOWNLOAD_DIR)/tools/vault@$(VAULT_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools @source $(lock_script) $@; \ $(CURL) https://releases.hashicorp.com/vault/$(VAULT_VERSION:v%=%)/vault_$(VAULT_VERSION:v%=%)_$(HOST_OS)_$(HOST_ARCH).zip -o $(outfile).zip; \ $(checkhash_script) $(outfile).zip $(vault_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \ unzip -p $(outfile).zip vault > $(outfile); \ chmod +x $(outfile); \ rm -f $(outfile).zip azwi_linux_amd64_SHA256SUM=d816d24c865d86ca101219197b493e399d3f669e8e20e0aaffc5a09f0f4c0aaf azwi_linux_arm64_SHA256SUM=f74799439ec3d33d6f69dcaa237fbdde8501390f06ee6d6fb1edfb36f64e1fa6 azwi_darwin_amd64_SHA256SUM=50dec4f29819a68827d695950a36b296aff501e81420787c16603d6394503c97 azwi_darwin_arm64_SHA256SUM=f267f5fad691cb60d1983a3df5c9a67d83cba0ca0d87aa707a713d2ba4f47776 .PRECIOUS: $(DOWNLOAD_DIR)/tools/azwi@$(AZWI_VERSION)_$(HOST_OS)_$(HOST_ARCH) $(DOWNLOAD_DIR)/tools/azwi@$(AZWI_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools @source $(lock_script) $@; \ $(CURL) https://github.com/Azure/azure-workload-identity/releases/download/$(AZWI_VERSION)/azwi-$(AZWI_VERSION)-$(HOST_OS)-$(HOST_ARCH).tar.gz -o $(outfile).tar.gz; \ $(checkhash_script) $(outfile).tar.gz $(azwi_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \ tar xfO $(outfile).tar.gz azwi > $(outfile) && chmod 775 $(outfile); \ rm -f $(outfile).tar.gz kubebuilder_tools_linux_amd64_SHA256SUM=5716719def14a3fec3ed285e5e8c4280e6268854039b5073a96e8c0adafb1c02 kubebuilder_tools_linux_arm64_SHA256SUM=5057fb45eecf246929da768b21d32434b8c96e22a78ef6cdfe912f1a67aae45a kubebuilder_tools_darwin_amd64_SHA256SUM=e733f72effc8a8076f2c8eb892de4aeb4bb54ea02082808ce3e51f80f2ff85e2 kubebuilder_tools_darwin_arm64_SHA256SUM=3c6b1ebd745b82daed47605fb565f7c670c8a3344b57a377a914d013b6b9eef0 .PRECIOUS: $(DOWNLOAD_DIR)/tools/kubebuilder_tools_$(KUBEBUILDER_ASSETS_VERSION)_$(HOST_OS)_$(HOST_ARCH).tar.gz $(DOWNLOAD_DIR)/tools/kubebuilder_tools_$(KUBEBUILDER_ASSETS_VERSION)_$(HOST_OS)_$(HOST_ARCH).tar.gz: | $(DOWNLOAD_DIR)/tools @source $(lock_script) $@; \ $(CURL) https://github.com/kubernetes-sigs/controller-tools/releases/download/envtest-$(KUBEBUILDER_ASSETS_VERSION)/envtest-$(KUBEBUILDER_ASSETS_VERSION)-$(HOST_OS)-$(HOST_ARCH).tar.gz -o $(outfile); \ $(checkhash_script) $(outfile) $(kubebuilder_tools_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM) $(DOWNLOAD_DIR)/tools/etcd@$(KUBEBUILDER_ASSETS_VERSION)_$(HOST_OS)_$(HOST_ARCH): $(DOWNLOAD_DIR)/tools/kubebuilder_tools_$(KUBEBUILDER_ASSETS_VERSION)_$(HOST_OS)_$(HOST_ARCH).tar.gz | $(DOWNLOAD_DIR)/tools @# Extract specific file from tarball using tar's -O flag (output to stdout) @source $(lock_script) $@; \ tar xfO $< controller-tools/envtest/etcd > $(outfile) && chmod 775 $(outfile) $(DOWNLOAD_DIR)/tools/kube-apiserver@$(KUBEBUILDER_ASSETS_VERSION)_$(HOST_OS)_$(HOST_ARCH): $(DOWNLOAD_DIR)/tools/kubebuilder_tools_$(KUBEBUILDER_ASSETS_VERSION)_$(HOST_OS)_$(HOST_ARCH).tar.gz | $(DOWNLOAD_DIR)/tools @# Extract specific file from tarball using tar's -O flag (output to stdout) @source $(lock_script) $@; \ tar xfO $< controller-tools/envtest/kube-apiserver > $(outfile) && chmod 775 $(outfile) kyverno_linux_amd64_SHA256SUM=d0c0f52e8fc8d66a3663b63942b131e5f91b63f7644b3e446546f79142d1b7a3 kyverno_linux_arm64_SHA256SUM=6f6a66711ba8fc2bd54a28aa1755a62605d053a6a3a758186201ba1f56698ced kyverno_darwin_amd64_SHA256SUM=d221d8d93c622b68a2933f4e0accd61db4f41100336f1ddad141259742f70948 kyverno_darwin_arm64_SHA256SUM=851d1fcc4427a317674cc1892af4f43dcd19983c94498a1a913b6b849f71ef8c .PRECIOUS: $(DOWNLOAD_DIR)/tools/kyverno@$(KYVERNO_VERSION)_$(HOST_OS)_$(HOST_ARCH) $(DOWNLOAD_DIR)/tools/kyverno@$(KYVERNO_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools @# Kyverno uses x86_64 instead of amd64 in download URLs, so translate the architecture $(eval ARCH := $(subst amd64,x86_64,$(HOST_ARCH))) @source $(lock_script) $@; \ $(CURL) https://github.com/kyverno/kyverno/releases/download/$(KYVERNO_VERSION)/kyverno-cli_$(KYVERNO_VERSION)_$(HOST_OS)_$(ARCH).tar.gz -o $(outfile).tar.gz; \ $(checkhash_script) $(outfile).tar.gz $(kyverno_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \ tar xfO $(outfile).tar.gz kyverno > $(outfile); \ chmod +x $(outfile); \ rm -f $(outfile).tar.gz yq_linux_amd64_SHA256SUM=d56bf5c6819e8e696340c312bd70f849dc1678a7cda9c2ad63eebd906371d56b yq_linux_arm64_SHA256SUM=03061b2a50c7a498de2bbb92d7cb078ce433011f085a4994117c2726be4106ea yq_darwin_amd64_SHA256SUM=616b0a0f6a5b79d746f05a169c2b9bb40dee00c605ef165b9a1c1681bba738ac yq_darwin_arm64_SHA256SUM=541ba2287560df70f561955e2d7f7e1cd00cf2a15a884f6b5c87a4bfa887bc07 .PRECIOUS: $(DOWNLOAD_DIR)/tools/yq@$(YQ_VERSION)_$(HOST_OS)_$(HOST_ARCH) $(DOWNLOAD_DIR)/tools/yq@$(YQ_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools @source $(lock_script) $@; \ $(CURL) https://github.com/mikefarah/yq/releases/download/$(YQ_VERSION)/yq_$(HOST_OS)_$(HOST_ARCH) -o $(outfile); \ $(checkhash_script) $(outfile) $(yq_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \ chmod +x $(outfile) ko_linux_amd64_SHA256SUM=048ab11818089a43b7b74bc554494a79a3fd0d9822c061142e5cd3cf8b30cb27 ko_linux_arm64_SHA256SUM=9a26698876892128952fa3d038a4e99bea961d0d225865c60474b79e3db12e99 ko_darwin_amd64_SHA256SUM=0e0dd8fddbefebb8572ece4dca8f07a7472de862fedd7e9845fd9d651e0d5dbe ko_darwin_arm64_SHA256SUM=752a639e0fbc013a35a43974b5ed87e7008bc2aee4952dfd2cc19f0013205492 .PRECIOUS: $(DOWNLOAD_DIR)/tools/ko@$(KO_VERSION)_$(HOST_OS)_$(HOST_ARCH) $(DOWNLOAD_DIR)/tools/ko@$(KO_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools @# Ko uses capitalized OS names (Linux/Darwin) and x86_64 instead of amd64 $(eval OS := $(subst linux,Linux,$(subst darwin,Darwin,$(HOST_OS)))) $(eval ARCH := $(subst amd64,x86_64,$(HOST_ARCH))) @source $(lock_script) $@; \ $(CURL) https://github.com/ko-build/ko/releases/download/v$(KO_VERSION)/ko_$(KO_VERSION)_$(OS)_$(ARCH).tar.gz -o $(outfile).tar.gz; \ $(checkhash_script) $(outfile).tar.gz $(ko_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \ tar xfO $(outfile).tar.gz ko > $(outfile); \ chmod +x $(outfile); \ rm -f $(outfile).tar.gz protoc_linux_amd64_SHA256SUM=af27ea66cd26938fe48587804ca7d4817457a08350021a1c6e23a27ccc8c6904 protoc_linux_arm64_SHA256SUM=31c5e9e3c7bf013cf41fb97765ee255c140024a6b175b6cc9b64beddd7c23ba7 protoc_darwin_amd64_SHA256SUM=ab124429c1f49951f03b6c0c0e911fec04e2c7c20de5c935e0cde7353bbd016c protoc_darwin_arm64_SHA256SUM=2c7e92b8b578916937df132b3032e2e8e6c170862ecf7a8333094a6f3d03650c .PRECIOUS: $(DOWNLOAD_DIR)/tools/protoc@$(PROTOC_VERSION)_$(HOST_OS)_$(HOST_ARCH) $(DOWNLOAD_DIR)/tools/protoc@$(PROTOC_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools @# Protoc uses different naming: darwin->osx, amd64->x86_64, arm64->aarch_64 $(eval OS := $(subst darwin,osx,$(HOST_OS))) $(eval ARCH := $(subst arm64,aarch_64,$(subst amd64,x86_64,$(HOST_ARCH)))) @source $(lock_script) $@; \ $(CURL) https://github.com/protocolbuffers/protobuf/releases/download/$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION:v%=%)-$(OS)-$(ARCH).zip -o $(outfile).zip; \ $(checkhash_script) $(outfile).zip $(protoc_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \ unzip -p $(outfile).zip bin/protoc > $(outfile); \ chmod +x $(outfile); \ rm -f $(outfile).zip trivy_linux_amd64_SHA256SUM=8b4376d5d6befe5c24d503f10ff136d9e0c49f9127a4279fd110b727929a5aa9 trivy_linux_arm64_SHA256SUM=2f6bb988b553a1bbac6bdd1ce890f5e412439564e17522b88a4541b4f364fc8d trivy_darwin_amd64_SHA256SUM=52d531452b19e7593da29366007d02a810e1e0080d02f9cf6a1afb46c35aaa93 trivy_darwin_arm64_SHA256SUM=68e543c51dcc96e1c344053a4fde9660cf602c25565d9f09dc17dd41e13b838a .PRECIOUS: $(DOWNLOAD_DIR)/tools/trivy@$(TRIVY_VERSION)_$(HOST_OS)_$(HOST_ARCH) $(DOWNLOAD_DIR)/tools/trivy@$(TRIVY_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools @# Trivy uses unusual naming: Linux/macOS for OS, 64bit/ARM64 for architecture $(eval OS := $(subst linux,Linux,$(subst darwin,macOS,$(HOST_OS)))) $(eval ARCH := $(subst amd64,64bit,$(subst arm64,ARM64,$(HOST_ARCH)))) @source $(lock_script) $@; \ $(CURL) https://github.com/aquasecurity/trivy/releases/download/$(TRIVY_VERSION)/trivy_$(patsubst v%,%,$(TRIVY_VERSION))_$(OS)-$(ARCH).tar.gz -o $(outfile).tar.gz; \ $(checkhash_script) $(outfile).tar.gz $(trivy_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \ tar xfO $(outfile).tar.gz trivy > $(outfile); \ chmod +x $(outfile); \ rm $(outfile).tar.gz ytt_linux_amd64_SHA256SUM=18fe794d01c2539db39acb90994db0d8e51faa7892d0e749d74c29818017247a ytt_linux_arm64_SHA256SUM=0e9e75b7a5f59161d2413e9d6163a1a13218f270daa1c525656195d1fcef28f6 ytt_darwin_amd64_SHA256SUM=cc51c3040b91bb0871967f9960cd9286bafd334ffd153a86914b883f3adad9ef ytt_darwin_arm64_SHA256SUM=4cc85a5e954d651d547cdef1e673742d995a38b0840273a5897e5318185b4e18 .PRECIOUS: $(DOWNLOAD_DIR)/tools/ytt@$(YTT_VERSION)_$(HOST_OS)_$(HOST_ARCH) $(DOWNLOAD_DIR)/tools/ytt@$(YTT_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools @source $(lock_script) $@; \ $(CURL) -sSfL https://github.com/vmware-tanzu/carvel-ytt/releases/download/$(YTT_VERSION)/ytt-$(HOST_OS)-$(HOST_ARCH) -o $(outfile); \ $(checkhash_script) $(outfile) $(ytt_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \ chmod +x $(outfile) rclone_linux_amd64_SHA256SUM=abc0e6e0f275a469d94645f7ef92c7c7673eed20b6558acec5ff48b74641213c rclone_linux_arm64_SHA256SUM=00c9e230f0004ab5e3b45c00edf7238ba5bff5fc7ea80f5a86a7da5568de6d1c rclone_darwin_amd64_SHA256SUM=4ef15279d857372f3ff84b967ad68fc1c3b113d631effb9c09a18e40f8a78fa7 rclone_darwin_arm64_SHA256SUM=8cfffacc3ce732b1960645a2f7d2ce97c2ac9ba4f2221c13af6378c199a078f9 .PRECIOUS: $(DOWNLOAD_DIR)/tools/rclone@$(RCLONE_VERSION)_$(HOST_OS)_$(HOST_ARCH) $(DOWNLOAD_DIR)/tools/rclone@$(RCLONE_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools @# Rclone uses "osx" instead of "darwin" in download URLs $(eval OS := $(subst darwin,osx,$(HOST_OS))) @source $(lock_script) $@; \ $(CURL) https://github.com/rclone/rclone/releases/download/$(RCLONE_VERSION)/rclone-$(RCLONE_VERSION)-$(OS)-$(HOST_ARCH).zip -o $(outfile).zip; \ $(checkhash_script) $(outfile).zip $(rclone_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \ unzip -p $(outfile).zip rclone-$(RCLONE_VERSION)-$(OS)-$(HOST_ARCH)/rclone > $(outfile); \ chmod +x $(outfile); \ rm -f $(outfile).zip istioctl_linux_amd64_SHA256SUM=904bbf1b917dd0135aa55b99cbfa34edd0a188fdeeeef09bb995d8e8e3165112 istioctl_linux_arm64_SHA256SUM=c4130d32359446fa5e4820c0543d06e2e424883c6890f0f8c59f3ac69dd4b44e istioctl_darwin_amd64_SHA256SUM=0bd51e88f8a2568892523752e12ce720793e4b9a9b25bdd4555d5932048e2bf1 istioctl_darwin_arm64_SHA256SUM=dffa0ff011774cf65fbae5d53f84d54bd12b541a35cff68be60db1c6674f03b4 .PRECIOUS: $(DOWNLOAD_DIR)/tools/istioctl@$(ISTIOCTL_VERSION)_$(HOST_OS)_$(HOST_ARCH) $(DOWNLOAD_DIR)/tools/istioctl@$(ISTIOCTL_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools @# Istio uses "osx" instead of "darwin" in download URLs $(eval OS := $(subst darwin,osx,$(HOST_OS))) @source $(lock_script) $@; \ $(CURL) https://github.com/istio/istio/releases/download/$(ISTIOCTL_VERSION)/istio-$(ISTIOCTL_VERSION)-$(OS)-$(HOST_ARCH).tar.gz -o $(outfile).tar.gz; \ $(checkhash_script) $(outfile).tar.gz $(istioctl_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \ tar xfO $(outfile).tar.gz istio-$(ISTIOCTL_VERSION)/bin/istioctl > $(outfile); \ chmod +x $(outfile); \ rm $(outfile).tar.gz preflight_linux_amd64_SHA256SUM=15f58d0de7212ac948706515f824d0d2f42b94c11fa85cdb1bc08ad8993226ca preflight_linux_arm64_SHA256SUM=a05103b894ce9fd63f47bd56518b8f0b52850ef11e7ef8c21146ac1273d799ad preflight_darwin_amd64_SHA256SUM=f707d9ec7f564ba35dc4a7a73f20562c1f7d11035c93d56b6ae9679649de98e3 preflight_darwin_arm64_SHA256SUM=6b9c2d3aa2b45303272ca29b7ae231d099d6a1f64142c918e01cb229aeee96a6 .PRECIOUS: $(DOWNLOAD_DIR)/tools/preflight@$(PREFLIGHT_VERSION)_$(HOST_OS)_$(HOST_ARCH) $(DOWNLOAD_DIR)/tools/preflight@$(PREFLIGHT_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools @source $(lock_script) $@; \ $(CURL) https://github.com/redhat-openshift-ecosystem/openshift-preflight/releases/download/$(PREFLIGHT_VERSION)/preflight-$(HOST_OS)-$(HOST_ARCH) -o $(outfile); \ $(checkhash_script) $(outfile) $(preflight_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \ chmod +x $(outfile) operator-sdk_linux_amd64_SHA256SUM=8847c45ea994ac62b3cd134f77934df2a16a56a39a634eb988e0d1db99d1a413 operator-sdk_linux_arm64_SHA256SUM=5fbb4c9f1eb3d8f6e9f870bfb48160842b9b541ce644d602282ef86578fedc1c operator-sdk_darwin_amd64_SHA256SUM=0293b988886b5a2a82b6c141c46293915f0c67cae43cabdb36a0ffdf8af042b6 operator-sdk_darwin_arm64_SHA256SUM=8f7c19e35ce6ad4069502fcb66ea89548d0173ff8a02b253b0be4ad4909eeaf6 .PRECIOUS: $(DOWNLOAD_DIR)/tools/operator-sdk@$(OPERATOR-SDK_VERSION)_$(HOST_OS)_$(HOST_ARCH) $(DOWNLOAD_DIR)/tools/operator-sdk@$(OPERATOR-SDK_VERSION)_$(HOST_OS)_$(HOST_ARCH): | $(DOWNLOAD_DIR)/tools @source $(lock_script) $@; \ $(CURL) https://github.com/operator-framework/operator-sdk/releases/download/$(OPERATOR-SDK_VERSION)/operator-sdk_$(HOST_OS)_$(HOST_ARCH) -o $(outfile); \ $(checkhash_script) $(outfile) $(operator-sdk_$(HOST_OS)_$(HOST_ARCH)_SHA256SUM); \ chmod +x $(outfile) ################# # Other Targets # ################# # Although we "vendor" most tools in $(bin_dir)/tools, we still require some binaries # to be available on the system. The vendor-go MAKECMDGOALS trick prevents the # check for the presence of Go when 'make vendor-go' is run. # Gotcha warning: MAKECMDGOALS only contains what the _top level_ make invocation used, and doesn't look at target dependencies # i.e. if we have a target "abc: vendor-go test" and run "make abc", we'll get an error # about go being missing even though abc itself depends on vendor-go! # That means we need to pass vendor-go at the top level if go is not installed (i.e. "make vendor-go abc") # Check for required system tools by testing if each command exists # If a command is missing, echo its name. The && chains mean all tests run, # and "missing" will contain a space-separated list of any missing tools. missing=$(shell (command -v curl >/dev/null || echo curl) \ && (command -v sha256sum >/dev/null || command -v shasum >/dev/null || echo sha256sum) \ && (command -v git >/dev/null || echo git) \ && (command -v xargs >/dev/null || echo xargs) \ && (command -v bash >/dev/null || echo bash)) ifneq ($(missing),) $(error Missing required tools: $(missing)) endif non_go_tool_names := $(filter-out $(go_tool_names),$(tool_names)) .PHONY: non-go-tools ## Download and setup all Non-Go tools ## @category [shared] Tools non-go-tools: $(non_go_tool_names:%=$(bin_dir)/tools/%) .PHONY: go-tools ## Download and setup all Go tools ## NOTE: this target is also used to learn the shas of ## these tools (see scripts/learn_tools_shas.sh in the ## Makefile modules repo) ## @category [shared] Tools go-tools: $(go_tool_names:%=$(bin_dir)/tools/%) .PHONY: tools ## Download and setup all tools ## @category [shared] Tools tools: non-go-tools go-tools ================================================ FILE: make/_shared/tools/util/checkhash.sh ================================================ #!/usr/bin/env bash # Copyright 2023 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -o errexit set -o nounset set -o pipefail SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" # This script takes the hash of its first argument and verifies it against the # hex hash given in its second argument function usage_and_exit() { echo "usage: $0 " echo "or: LEARN_FILE= $0 " exit 1 } HASH_TARGET=${1:-} EXPECTED_HASH=${2:-} if [[ -z $HASH_TARGET ]]; then usage_and_exit fi if [[ -z $EXPECTED_HASH ]]; then usage_and_exit fi SHASUM=$("${SCRIPT_DIR}/hash.sh" "$HASH_TARGET") if [[ "$SHASUM" == "$EXPECTED_HASH" ]]; then exit 0 fi # When running 'make learn-sha-tools', we don't want this script to fail. # Instead we log what sha values are wrong, so the make.mk file can be updated. if [ "${LEARN_FILE:-}" != "" ]; then echo "s/$EXPECTED_HASH/$SHASUM/g" >> "${LEARN_FILE:-}" exit 0 fi echo "invalid checksum for \"$HASH_TARGET\": wanted \"$EXPECTED_HASH\" but got \"$SHASUM\"" exit 1 ================================================ FILE: make/_shared/tools/util/hash.sh ================================================ #!/usr/bin/env bash # Copyright 2023 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -o errexit set -o nounset set -o pipefail # This script is a wrapper for outputting purely the sha256 hash of the input file, # ideally in a portable way. case "$(uname -s)" in Darwin*) shasum -a 256 "$1";; *) sha256sum "$1" esac | cut -d" " -f1 ================================================ FILE: make/_shared/tools/util/lock.sh ================================================ #!/usr/bin/env bash # Copyright 2023 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -o errexit set -o nounset set -o pipefail # This script is used to lock a file while it is being downloaded. It prevents # multiple processes from downloading the same file at the same time or from reading # a half-downloaded file. # We need this solution because we have recursive $(MAKE) calls in our makefile # which each will try to download a set of tools. To prevent them from all downloading # the same files, we re-use the same downloads folder for all $(MAKE) invocations and # use this script to deduplicate the download processes. finalfile="$1" lockfile="$finalfile.lock" # On macOS, flock is not installed, we just skip locking in that case, # this means that running verify in parallel without downloading all # tools first will not work. flock_installed=$(command -v flock >/dev/null && echo "yes" || echo "no") if [[ "$flock_installed" == "yes" ]]; then mkdir -p "$(dirname "$lockfile")" touch "$lockfile" exec {FD}<>"$lockfile" # wait for the file to be unlocked if ! flock -x $FD; then echo "Failed to obtain a lock for $lockfile" exit 1 fi fi # now that we have the lock, check if file is already there if [[ -e "$finalfile" ]]; then exit 0 fi # use a temporary file to prevent Make from thinking the file is ready # while in reality is is only a partial download # shellcheck disable=SC2034 outfile="$finalfile.tmp" finish() { rv=$? if [[ $rv -eq 0 ]]; then mv "$outfile" "$finalfile" echo "[info]: downloaded $finalfile" else rm -rf "$outfile" || true rm -rf "$finalfile" || true fi rm -rf "$lockfile" || true } trap finish EXIT SIGINT ================================================ FILE: make/ark/00_mod.mk ================================================ build_names += ark go_ark_main_dir := ./cmd/ark go_ark_mod_dir := . go_ark_ldflags := \ -X $(gomodule_name)/pkg/version.PreflightVersion=$(VERSION) \ -X $(gomodule_name)/pkg/version.Commit=$(GITCOMMIT) \ -X $(gomodule_name)/pkg/version.BuildDate=$(shell date "+%F-%T-%Z") oci_ark_base_image_flavor := static oci_ark_image_name := quay.io/jetstack/disco-agent oci_ark_image_tag := $(VERSION) oci_ark_image_name_development := jetstack.local/disco-agent # Annotations are the standardised set of annotations we set on every component we publish oci_ark_build_args := \ --image-annotation="org.opencontainers.image.source"="https://github.com/jetstack/jetstack-secure" \ --image-annotation="org.opencontainers.image.vendor"="CyberArk Software Ltd." \ --image-annotation="org.opencontainers.image.licenses"="EULA - https://www.cyberark.com/contract-terms/" \ --image-annotation="org.opencontainers.image.authors"="CyberArk Software Ltd." \ --image-annotation="org.opencontainers.image.title"="CyberArk Discovery and Context Agent" \ --image-annotation="org.opencontainers.image.description"="Gathers machine identity data from Kubernetes clusters." \ --image-annotation="org.opencontainers.image.url"="https://www.cyberark.com/products/" \ --image-annotation="org.opencontainers.image.documentation"="https://docs.cyberark.com" \ --image-annotation="org.opencontainers.image.version"="$(VERSION)" \ --image-annotation="org.opencontainers.image.revision"="$(GITCOMMIT)" define ark_helm_values_mutation_function echo "no mutations defined for this chart" endef ================================================ FILE: make/ark/02_mod.mk ================================================ # Makefile targets for CyberArk Discovery and Context # The base OCI repository for all CyberArk Discovery and Context artifacts ARK_OCI_BASE ?= quay.io/jetstack # The OCI repository (without tag) for the CyberArk Discovery and Context Agent Docker image # Can be overridden when calling `make ark-release` to push to a different repository. ARK_IMAGE ?= $(ARK_OCI_BASE)/disco-agent # The OCI repository (without tag) for the CyberArk Discovery and Context Helm chart # Can be overridden when calling `make ark-release` to push to a different repository. ARK_CHART ?= $(ARK_OCI_BASE)/charts/disco-agent # Used to output variables when running in GitHub Actions GITHUB_OUTPUT ?= /dev/stderr .PHONY: ark-release ## Publish all release artifacts (image + helm chart) ## @category CyberArk Discovery and Context ark-release: oci_ark_image_digest_path := $(bin_dir)/scratch/image/oci-layout-ark.digests ark-release: helm_digest_path := $(bin_dir)/scratch/helm/disco-agent-$(helm_chart_version).digests ark-release: $(MAKE) oci-push-ark helm-chart-oci-push \ oci_ark_image_name="$(ARK_IMAGE)" \ helm_image_name="$(ARK_IMAGE)" \ helm_image_tag="$(oci_ark_image_tag)" \ helm_chart_source_dir=deploy/charts/disco-agent \ helm_chart_image_name="$(ARK_CHART)" @echo "ARK_IMAGE=$(ARK_IMAGE)" >> "$(GITHUB_OUTPUT)" @echo "ARK_IMAGE_TAG=$(oci_ark_image_tag)" >> "$(GITHUB_OUTPUT)" @echo "ARK_IMAGE_DIGEST=$$(head -1 $(oci_ark_image_digest_path))" >> "$(GITHUB_OUTPUT)" @echo "ARK_CHART=$(ARK_CHART)" >> "$(GITHUB_OUTPUT)" @echo "ARK_CHART_TAG=$(helm_chart_version)" >> "$(GITHUB_OUTPUT)" @echo "ARK_CHART_DIGEST=$$(head -1 $(helm_digest_path))" >> "$(GITHUB_OUTPUT)" @echo "Release complete!" .PHONY: ark-test-e2e ## Run a basic E2E test on a Kind cluster ## See `hack/ark/e2e.sh` for the full test script. ## @category CyberArk Discovery and Context ark-test-e2e: $(NEEDS_KIND) $(NEEDS_KUBECTL) $(NEEDS_HELM) PATH="$(bin_dir)/tools:${PATH}" ./hack/ark/test-e2e.sh .PHONY: ark-verify ## Verify the Helm chart ## @category CyberArk Discovery and Context ark-verify: INSTALL_OPTIONS="--set acceptTerms=true" $(MAKE) verify-helm-lint verify-helm-values verify-pod-security-standards verify-helm-kubeconform verify-helm-unittest \ helm_chart_source_dir=deploy/charts/disco-agent \ helm_chart_image_name=$(ARK_CHART) shared_verify_targets += ark-verify .PHONY: ark-generate ## Generate Helm chart documentation and schema ## @category CyberArk Discovery and Context ark-generate: $(MAKE) generate-helm-docs generate-helm-schema \ helm_chart_source_dir=deploy/charts/disco-agent shared_generate_targets += ark-generate ================================================ FILE: make/connection_crd/main.go ================================================ package main import ( "fmt" crd "github.com/jetstack/venafi-connection-lib/config/crd/bases" ) // With this tool, we no longer have to use something like `helm template` to // pull the CRD manifest from the venafi-connection-lib project. func main() { fmt.Print(string(crd.VenafiConnectionCrd)) } ================================================ FILE: make/extra_tools.mk ================================================ ADDITIONAL_TOOLS := ADDITIONAL_GO_DEPENDENCIES := ADDITIONAL_TOOLS += venctl=1.27.0 ADDITIONAL_TOOLS += step=0.28.2 ================================================ FILE: make/ngts/00_mod.mk ================================================ build_names += ngts go_ngts_main_dir := ./cmd/ark go_ngts_mod_dir := . go_ngts_ldflags := \ -X $(gomodule_name)/pkg/version.PreflightVersion=$(VERSION) \ -X $(gomodule_name)/pkg/version.Commit=$(GITCOMMIT) \ -X $(gomodule_name)/pkg/version.BuildDate=$(shell date "+%F-%T-%Z") oci_ngts_base_image_flavor := static oci_ngts_image_name := quay.io/jetstack/discovery-agent oci_ngts_image_tag := $(VERSION) oci_ngts_image_name_development := jetstack.local/discovery-agent # Annotations are the standardised set of annotations we set on every component we publish oci_ngts_build_args := \ --image-annotation="org.opencontainers.image.source"="https://github.com/jetstack/jetstack-secure" \ --image-annotation="org.opencontainers.image.vendor"="Palo Alto Networks" \ --image-annotation="org.opencontainers.image.licenses"="Apache-2.0" \ --image-annotation="org.opencontainers.image.authors"="Palo Alto Networks" \ --image-annotation="org.opencontainers.image.title"="Discovery Agent for NGTS" \ --image-annotation="org.opencontainers.image.description"="Gathers machine identity data from Kubernetes clusters for NGTS." \ --image-annotation="org.opencontainers.image.url"="https://www.paloaltonetworks.com/" \ --image-annotation="org.opencontainers.image.documentation"="https://docs.paloaltonetworks.com/" \ --image-annotation="org.opencontainers.image.version"="$(VERSION)" \ --image-annotation="org.opencontainers.image.revision"="$(GITCOMMIT)" define ngts_helm_values_mutation_function echo "no mutations defined for this chart" endef ================================================ FILE: make/ngts/02_mod.mk ================================================ # Makefile targets for NGTS Discovery Agent # The base OCI repository for all NGTS Discovery Agent artifacts NGTS_OCI_BASE ?= quay.io/jetstack # The OCI repository (without tag) for the NGTS Discovery Agent Docker image # Can be overridden when calling `make ngts-release` to push to a different repository. NGTS_IMAGE ?= $(NGTS_OCI_BASE)/discovery-agent # The OCI repository (without tag) for the NGTS Discovery Agent Helm chart # Can be overridden when calling `make ngts-release` to push to a different repository. NGTS_CHART ?= $(NGTS_OCI_BASE)/charts/discovery-agent # Used to output variables when running in GitHub Actions GITHUB_OUTPUT ?= /dev/stderr .PHONY: ngts-release ## Publish all release artifacts (image + helm chart) ## @category NGTS Discovery Agent ngts-release: oci_ngts_image_digest_path := $(bin_dir)/scratch/image/oci-layout-ngts.digests ngts-release: helm_digest_path := $(bin_dir)/scratch/helm/discovery-agent-$(helm_chart_version).digests ngts-release: $(MAKE) oci-push-ngts helm-chart-oci-push \ oci_ngts_image_name="$(NGTS_IMAGE)" \ helm_image_name="$(NGTS_IMAGE)" \ helm_image_tag="$(oci_ngts_image_tag)" \ helm_chart_source_dir=deploy/charts/discovery-agent \ helm_chart_image_name="$(NGTS_CHART)" @echo "NGTS_IMAGE=$(NGTS_IMAGE)" >> "$(GITHUB_OUTPUT)" @echo "NGTS_IMAGE_TAG=$(oci_ngts_image_tag)" >> "$(GITHUB_OUTPUT)" @echo "NGTS_IMAGE_DIGEST=$$(head -1 $(oci_ngts_image_digest_path))" >> "$(GITHUB_OUTPUT)" @echo "NGTS_CHART=$(NGTS_CHART)" >> "$(GITHUB_OUTPUT)" @echo "NGTS_CHART_TAG=$(helm_chart_version)" >> "$(GITHUB_OUTPUT)" @echo "NGTS_CHART_DIGEST=$$(head -1 $(helm_digest_path))" >> "$(GITHUB_OUTPUT)" @echo "Release complete!" .PHONY: ngts-test-e2e ## Run a basic E2E test on a Kind cluster ## See `hack/ngts/e2e.sh` for the full test script. ## @category NGTS Discovery Agent ngts-test-e2e: $(NEEDS_KIND) $(NEEDS_KUBECTL) $(NEEDS_HELM) $(NEEDS_YQ) PATH="$(bin_dir)/tools:${PATH}" ./hack/ngts/test-e2e.sh .PHONY: ngts-verify ## Verify the Helm chart ## @category NGTS Discovery Agent ngts-verify: INSTALL_OPTIONS="--set-string config.tsgID=1234123412 --set config.clusterName=foo" $(MAKE) verify-helm-lint verify-helm-values verify-pod-security-standards verify-helm-kubeconform verify-helm-unittest \ helm_chart_source_dir=deploy/charts/discovery-agent \ helm_chart_image_name=$(NGTS_CHART) shared_verify_targets += ngts-verify .PHONY: ngts-generate ## Generate Helm chart documentation and schema ## @category NGTS Discovery Agent ngts-generate: $(MAKE) generate-helm-docs generate-helm-schema \ helm_chart_source_dir=deploy/charts/discovery-agent shared_generate_targets += ngts-generate ================================================ FILE: make/test-unit.mk ================================================ .PHONY: test-unit ## Unit tests ## @category Testing test-unit: | $(NEEDS_GO) $(NEEDS_GOTESTSUM) $(ARTIFACTS) $(NEEDS_ETCD) $(NEEDS_KUBE-APISERVER) KUBEBUILDER_ASSETS=$(CURDIR)/$(bin_dir)/tools \ $(GOTESTSUM) \ --junitfile=$(ARTIFACTS)/junit-go-e2e.xml \ -- \ -coverprofile=$(ARTIFACTS)/filtered.cov \ ./... \ -- \ -ldflags $(go_preflight_ldflags) $(GO) tool cover -func=$(ARTIFACTS)/filtered.cov $(GO) tool cover -html=$(ARTIFACTS)/filtered.cov -o=$(ARTIFACTS)/filtered.html ================================================ FILE: pkg/agent/config.go ================================================ package agent import ( "crypto/x509" "fmt" "io" "net/url" "os" "regexp" "time" "github.com/go-logr/logr" "github.com/hashicorp/go-multierror" "github.com/jetstack/venafi-connection-lib/http_client" "github.com/spf13/cobra" "gopkg.in/yaml.v3" "k8s.io/client-go/rest" "github.com/jetstack/preflight/api" "github.com/jetstack/preflight/pkg/client" "github.com/jetstack/preflight/pkg/datagatherer" "github.com/jetstack/preflight/pkg/datagatherer/k8sdiscovery" "github.com/jetstack/preflight/pkg/datagatherer/k8sdynamic" "github.com/jetstack/preflight/pkg/datagatherer/local" "github.com/jetstack/preflight/pkg/datagatherer/oidc" "github.com/jetstack/preflight/pkg/kubeconfig" "github.com/jetstack/preflight/pkg/logs" "github.com/jetstack/preflight/pkg/version" ) // Config defines the YAML configuration file that you can pass using // `--config-file` or `-c`. type Config struct { // Deprecated: Schedule doesn't do anything. Use `period` instead. Schedule string `yaml:"schedule"` Period time.Duration `yaml:"period"` // Deprecated: Use `server` instead. Endpoint Endpoint `yaml:"endpoint"` // Server is the base URL for the Preflight server. It defaults to // https://preflight.jetstack.io in Jetstack Secure OAuth and Jetstack // Secure API Token modes, and https://api.venafi.cloud in Venafi Cloud Key // Pair Service Account mode. It is ignored in Venafi Cloud VenafiConnection // mode and in MachineHub mode. Server string `yaml:"server"` // OrganizationID is only used in Jetstack Secure OAuth and Jetstack Secure // API Token modes. OrganizationID string `yaml:"organization_id"` // ClusterID is the cluster that the agent is scanning. Only used in Jetstack Secure modes. ClusterID string `yaml:"cluster_id"` // ClusterName is the name of the Kubernetes cluster where the agent is running. ClusterName string `yaml:"cluster_name"` // ClusterDescription is a short description of the Kubernetes cluster where the // agent is running. ClusterDescription string `yaml:"cluster_description"` // ClaimableCerts controls whether discovered certs can be claimed by other tenants. // true = certs are left unassigned, available for any tenant to claim. // false (default) = certs are owned by this cluster's tenant. ClaimableCerts bool `yaml:"claimable_certs"` DataGatherers []DataGatherer `yaml:"data-gatherers"` VenafiCloud *VenafiCloudConfig `yaml:"venafi-cloud,omitempty"` // For testing purposes. InputPath string `yaml:"input-path"` // For testing purposes. OutputPath string `yaml:"output-path"` // Skips annotation keys that match the given set of regular expressions. // Example: ".*someprivateannotation.*". ExcludeAnnotationKeysRegex []string `yaml:"exclude-annotation-keys-regex"` // Skips label keys that match the given set of regular expressions. ExcludeLabelKeysRegex []string `yaml:"exclude-label-keys-regex"` } type Endpoint struct { Protocol string `yaml:"protocol"` Host string `yaml:"host"` Path string `yaml:"path"` } type DataGatherer struct { Kind string `yaml:"kind"` Name string `yaml:"name"` DataPath string `yaml:"data_path"` Config datagatherer.Config } type VenafiCloudConfig struct { // Deprecated: UploaderID is ignored by the backend and is not needed. // UploaderID is the upload ID that will be used when creating a cluster // connection. This field is ignored by the backend and is often arbitrarily // set to "no". UploaderID string `yaml:"uploader_id,omitempty"` // UploadPath is the endpoint path for the upload API. Only used in Venafi // Cloud Key Pair Service Account mode. UploadPath string `yaml:"upload_path,omitempty"` } type AgentCmdFlags struct { // ConfigFilePath (--config-file, -c) is the path to the agent configuration // YAML file. ConfigFilePath string // Period (--period, -p) is the time waited between scans. It takes // precedence over the config field `period`. Period time.Duration // VenafiCloudMode (--venafi-cloud) turns on the Venafi Cloud Key Pair // Service Account mode. Must be used in conjunction with // --credentials-file. VenafiCloudMode bool // MachineHubMode configures the agent to send data to CyberArk Machine Hub. MachineHubMode bool // ClientID (--client-id) is the clientID in case of Venafi Cloud Key Pair // Service Account mode. ClientID string // PrivateKeyPath (--private-key-path) is the path for the service account // private key in case of Venafi Cloud Key Pair Service Account mode. PrivateKeyPath string // CredentialsPath (--credentials-file, -k) lets you specify the location of // the credentials file. This is used for the Jetstack Secure OAuth and // Venafi Cloud Key Pair Service Account modes. In Venafi Cloud Key Pair // Service Account mode, you also need to pass --venafi-cloud. CredentialsPath string // OneShot (--one-shot) is used for testing purposes. The agent will run // once and exit. It is often used in conjunction with --output-path and/or // --input-path. OneShot bool // OutputPath (--output-path) is used for testing purposes. In conjunction // with --one-shot, it allows you to write the data readings to a file // instead uploading them to the Venafi Cloud API. OutputPath string // InputPath (--input-path) is used for testing purposes. In conjunction // with --one-shot, it allows you to push manually crafted data readings (in // JSON format) to the Venafi Cloud API without the need to connect to a // Kubernetes cluster. See the jscp-testing-cli's README for more info: // https://gitlab.com/venafi/vaas/applications/tls-protect-for-k8s/cloud-services/-/tree/master/jscp-testing-cli InputPath string // BackoffMaxTime (--backoff-max-time) is the maximum time for which data // gatherers will retry after a failure. BackoffMaxTime time.Duration // StrictMode (--strict) causes the agent to fail at the first attempt. StrictMode bool // APIToken (--api-token) allows you to use the Jetstack Secure API Token // mode. Defaults to the value of the env var API_TOKEN. APIToken string // VenConnName (--venafi-connection) is the name of the VenafiConnection // resource to use. Using this flag will enable Venafi Connection mode. VenConnName string // VenConnNS (--venafi-connection-namespace) is the namespace of the // VenafiConnection resource to use. It is only useful when the // VenafiConnection isn't in the same namespace as the agent. // // May be left empty to use the same namespace as the agent. VenConnNS string // InstallNS (--install-namespace) is the namespace in which the agent is // running in. Only needed when running the agent outside of Kubernetes. // // May be left empty when running in Kubernetes. In Kubernetes, the // namespace is read from the environment variable `POD_NAMESPACE`. InstallNS string // Profiling (--enable-pprof) enables the pprof server. Profiling bool // Prometheus (--enable-metrics) enables the Prometheus metrics server. Prometheus bool // NGTSMode (--ngts) turns on the NGTS mode. The agent will authenticate // using key pair authentication and send data to NGTS endpoints. NGTSMode bool // TSGID (--tsg-id) is the TSG (Tenant Service Group) ID for NGTS mode. TSGID string // NGTSServerURL (--ngts-server-url) is a hidden flag for developers to // override the NGTS server URL for testing purposes. NGTSServerURL string } func InitAgentCmdFlags(c *cobra.Command, cfg *AgentCmdFlags) { c.PersistentFlags().StringVarP( &cfg.ConfigFilePath, "agent-config-file", "c", "./agent.yaml", "Config file location, default is `agent.yaml` in the current working directory.", ) c.PersistentFlags().DurationVarP( &cfg.Period, "period", "p", 0, "Override time between scans in the configuration file (given as XhYmZs).", ) c.PersistentFlags().StringVarP( &cfg.CredentialsPath, "credentials-file", "k", "", fmt.Sprintf("Location of the credentials file. For the %s and %s modes.", JetstackSecureOAuth, VenafiCloudKeypair), ) c.PersistentFlags().BoolVarP( &cfg.VenafiCloudMode, "venafi-cloud", "", false, fmt.Sprintf("Turns on the %s mode. The flag --credentials-file must also be passed.", JetstackSecureOAuth), ) if err := c.PersistentFlags().MarkHidden("venafi-cloud"); err != nil { panic(err) } c.PersistentFlags().StringVarP( &cfg.ClientID, "client-id", "", "", fmt.Sprintf("Turns on the %s mode. If you use this flag you don't need to use --venafi-cloud "+ "as it will assume you are authenticating with Venafi Cloud. Using this removes the need to use a "+ "credentials file.", VenafiCloudKeypair), ) c.PersistentFlags().StringVarP( &cfg.PrivateKeyPath, "private-key-path", "", "", "To be used in conjunction with --client-id. The path to the private key file for the service account.", ) c.PersistentFlags().BoolVarP( &cfg.OneShot, "one-shot", "", false, "For testing purposes. The agent will run once and exit. It is often used in conjunction with --output-path and/or --input-path.", ) c.PersistentFlags().StringVarP( &cfg.OutputPath, "output-path", "", "", "For testing purposes. In conjunction with --one-shot, it allows you to write the data readings to a file instead of uploading to the server.", ) c.PersistentFlags().StringVarP( &cfg.InputPath, "input-path", "", "", "For testing purposes. In conjunction with --one-shot, it allows you to push manually crafted data readings (in JSON format) to the Venafi Cloud API without the need to connect to a Kubernetes cluster.", ) c.PersistentFlags().DurationVarP( &cfg.BackoffMaxTime, "backoff-max-time", "", 10*time.Minute, "Max time for retrying failed data gatherers (given as XhYmZs).", ) c.PersistentFlags().BoolVarP( &cfg.StrictMode, "strict", "", false, "Runs agent in strict mode. No retry attempts will be made for a missing data gatherer's data.", ) c.PersistentFlags().StringVar( &cfg.APIToken, "api-token", os.Getenv("API_TOKEN"), "Turns on the "+string(JetstackSecureAPIToken)+" mode. Defaults to the value of the env var API_TOKEN.", ) c.PersistentFlags().StringVar( &cfg.VenConnName, "venafi-connection", "", "Turns on the "+string(VenafiCloudVenafiConnection)+" mode. "+ "This flag configures the name of the VenafiConnection to be used.", ) c.PersistentFlags().StringVar( &cfg.VenConnNS, "venafi-connection-namespace", "", "Namespace of the VenafiConnection to be used. It is only useful when the "+ "VenafiConnection isn't in the same namespace as the agent. The field `allowReferencesFrom` "+ "must be present on the cross-namespace VenafiConnection for the agent to use it.", ) c.PersistentFlags().StringVar( &cfg.InstallNS, "install-namespace", "", "For testing purposes. Namespace in which the agent is running. "+ "Only needed when running the agent outside of Kubernetes.", ) c.PersistentFlags().BoolVarP( &cfg.Profiling, "enable-pprof", "", false, "Enables the pprof profiling endpoints on the agent server (port: 8081).", ) c.PersistentFlags().BoolVarP( &cfg.Prometheus, "enable-metrics", "", false, "Enables Prometheus metrics server on the agent (port: 8081).", ) var dummy bool c.PersistentFlags().BoolVar( &dummy, "disable-compression", false, "Deprecated. No longer has an effect.", ) if err := c.PersistentFlags().MarkDeprecated("disable-compression", "no longer has an effect"); err != nil { panic(err) } // This is a hidden feature flag we use to build the "Machine Hub" feature // gradually without impacting customers. Once the feature is GA, we will // turn this flag "on" by default. c.PersistentFlags().BoolVar( &cfg.MachineHubMode, "machine-hub", false, "Enables the MachineHub mode. The agent will push data to CyberArk MachineHub.", ) if err := c.PersistentFlags().MarkHidden("machine-hub"); err != nil { panic(err) } c.PersistentFlags().BoolVar( &cfg.NGTSMode, "ngts", false, "Enables NGTS mode. The agent will authenticate using key pair authentication and send data to NGTS endpoints. "+ "Must be used in conjunction with --tsg-id and --private-key-path. --client-id is optional if provided in the credentials secret.", ) c.PersistentFlags().StringVar( &cfg.TSGID, "tsg-id", "", "The TSG (Tenant Service Group) ID for NGTS mode. Required when using --ngts.", ) ngtsServerURLFlag := "ngts-server-url" c.PersistentFlags().StringVar( &cfg.NGTSServerURL, ngtsServerURLFlag, "", "Override the NGTS server URL for testing purposes. This flag is intended for agent development and should not need to be set.", ) // ngts-server-url is intended only for developers, so hide it from help if err := c.PersistentFlags().MarkHidden(ngtsServerURLFlag); err != nil { panic(err) } } // OutputMode controls how the collected data is published. // Only one OutputMode may be provided. type OutputMode string const ( JetstackSecureOAuth OutputMode = "Jetstack Secure OAuth" JetstackSecureAPIToken OutputMode = "Jetstack Secure API Token" VenafiCloudKeypair OutputMode = "Venafi Cloud Key Pair Service Account" VenafiCloudVenafiConnection OutputMode = "Venafi Cloud VenafiConnection" LocalFile OutputMode = "Local File" MachineHub OutputMode = "MachineHub" NGTS OutputMode = "NGTS" ) // The command-line flags and the config file and some environment variables are // combined into this struct by ValidateAndCombineConfig. type CombinedConfig struct { DataGatherers []DataGatherer Period time.Duration BackoffMaxTime time.Duration InstallNS string StrictMode bool OneShot bool OutputMode OutputMode // Only used in JetstackSecure modes. ClusterID string // Used by JetstackSecureOAuth, JetstackSecureAPIToken, and // VenafiCloudKeypair. Ignored in VenafiCloudVenafiConnection mode. Server string // JetstackSecureOAuth and JetstackSecureAPIToken modes only. OrganizationID string EndpointPath string // Deprecated. // VenafiCloudKeypair mode only. UploadPath string // ClusterName is the name of the Kubernetes cluster where the agent is // running. ClusterName string // ClusterDescription is a short description of the Kubernetes cluster where // the agent is running. ClusterDescription string // ClaimableCerts controls whether discovered certs can be claimed by other tenants. // true = certs are left unassigned, available for any tenant to claim. // false (default) = certs are owned by this cluster's tenant. ClaimableCerts bool // VenafiCloudVenafiConnection mode only. VenConnName string VenConnNS string // VenafiCloudKeypair and VenafiCloudVenafiConnection modes only. ExcludeAnnotationKeysRegex []*regexp.Regexp ExcludeLabelKeysRegex []*regexp.Regexp // NGTS mode only. TSGID string NGTSServerURL string // Only used for testing purposes. OutputPath string InputPath string } // ValidateAndCombineConfig combines and validates the input configuration with // the flags passed to the agent and returns the final configuration as well as // the Venafi client to be used to upload data. Does not do any network call. // The logger can be changed for testing purposes. You do not need to call // ValidateDataGatherers as ValidateAndCombineConfig already does that. // // The error returned may be a multierror.Error. Use multierror.Prefix(err, // "context:") rather than fmt.Errorf("context: %w", err) when wrapping the // error. func ValidateAndCombineConfig(log logr.Logger, cfg Config, flags AgentCmdFlags) (CombinedConfig, client.Client, error) { res := CombinedConfig{} { var ( mode OutputMode reason string keysAndValues []any ) switch { case flags.NGTSMode: mode = NGTS reason = "--ngts was specified" keysAndValues = []any{"ngts", true} case flags.VenafiCloudMode && flags.CredentialsPath != "": mode = VenafiCloudKeypair reason = "--venafi-cloud and --credentials-path were specified" keysAndValues = []any{"credentialsPath", flags.CredentialsPath} case flags.ClientID != "" || flags.PrivateKeyPath != "": if flags.PrivateKeyPath == "" { return CombinedConfig{}, nil, fmt.Errorf("if --client-id is specified, --private-key-path must also be specified") } if flags.ClientID == "" { return CombinedConfig{}, nil, fmt.Errorf("--private-key-path is specified, --client-id must also be specified") } mode = VenafiCloudKeypair reason = "--client-id and --private-key-path were specified" keysAndValues = []any{"clientID", flags.ClientID, "privateKeyPath", flags.PrivateKeyPath} case flags.VenConnName != "": mode = VenafiCloudVenafiConnection reason = "--venafi-connection was specified" keysAndValues = []any{"venConnName", flags.VenConnName} case flags.APIToken != "": mode = JetstackSecureAPIToken reason = "--api-token was specified" case !flags.VenafiCloudMode && flags.CredentialsPath != "": mode = JetstackSecureOAuth reason = "--credentials-file was specified without --venafi-cloud" case flags.MachineHubMode: mode = MachineHub reason = "--machine-hub was specified" case flags.OutputPath != "": mode = LocalFile reason = "--output-path was specified" case cfg.OutputPath != "": mode = LocalFile reason = "output-path was specified in the config file" default: return CombinedConfig{}, nil, fmt.Errorf("no output mode specified. " + "To enable one of the output modes, you can:\n" + " - Use --ngts with --tsg-id and --private-key-path to use the " + string(NGTS) + " mode (--client-id is optional if provided in the credentials secret).\n" + " - Use (--venafi-cloud with --credentials-file) or (--client-id with --private-key-path) to use the " + string(VenafiCloudKeypair) + " mode.\n" + " - Use --venafi-connection for the " + string(VenafiCloudVenafiConnection) + " mode.\n" + " - Use --credentials-file alone if you want to use the " + string(JetstackSecureOAuth) + " mode.\n" + " - Use --api-token if you want to use the " + string(JetstackSecureAPIToken) + " mode.\n" + " - Use --machine-hub if you want to use the " + string(MachineHub) + " mode.\n" + " - Use --output-path or output-path in the config file for " + string(LocalFile) + " mode.") } keysAndValues = append(keysAndValues, "mode", mode, "reason", reason) log.V(logs.Debug).Info("Output mode selected", keysAndValues...) res.OutputMode = mode } var errs error // Validation of NGTS mode requirements. if res.OutputMode == NGTS { if flags.TSGID == "" { errs = multierror.Append(errs, fmt.Errorf("--tsg-id is required when using --ngts")) } if flags.PrivateKeyPath == "" { errs = multierror.Append(errs, fmt.Errorf("--private-key-path is required when using --ngts")) } // Error if MachineHub mode is also enabled if flags.MachineHubMode { errs = multierror.Append(errs, fmt.Errorf("--machine-hub cannot be used with --ngts. These are mutually exclusive modes.")) } // Error if VenafiConnection mode flags are used if flags.VenConnName != "" { errs = multierror.Append(errs, fmt.Errorf("--venafi-connection cannot be used with --ngts. Use --client-id and --private-key-path instead.")) } // Error if Jetstack Secure OAuth mode flags are used if !flags.VenafiCloudMode && flags.CredentialsPath != "" { errs = multierror.Append(errs, fmt.Errorf("--credentials-file (for Jetstack Secure OAuth) cannot be used with --ngts. Use --client-id and --private-key-path instead.")) } // Error if API Token mode is used if flags.APIToken != "" { errs = multierror.Append(errs, fmt.Errorf("--api-token cannot be used with --ngts. Use --client-id and --private-key-path instead.")) } // Error if --venafi-cloud is used with --ngts if flags.VenafiCloudMode { errs = multierror.Append(errs, fmt.Errorf("--venafi-cloud cannot be used with --ngts. These are different deployment targets.")) } // Error if organization_id or cluster_id are set in config (these are for Jetstack Secure / CM-SaaS) if cfg.OrganizationID != "" { errs = multierror.Append(errs, fmt.Errorf("organization_id in config file is not supported in NGTS mode. This field is only for Jetstack Secure.")) } if cfg.ClusterID != "" { errs = multierror.Append(errs, fmt.Errorf("cluster_id in config file is not supported in NGTS mode. Use cluster_name instead.")) } res.TSGID = flags.TSGID res.NGTSServerURL = flags.NGTSServerURL } // Validation and defaulting of `server` and the deprecated `endpoint.path`. { // Only relevant if using TLSPK backends hasEndpointField := cfg.Endpoint.Host != "" && cfg.Endpoint.Path != "" hasServerField := cfg.Server != "" var server string var endpointPath string // Deprecated. Only used when the `endpoint` field is set. switch { case hasServerField && !hasEndpointField: server = cfg.Server case hasServerField && hasEndpointField: // The `server` field takes precedence over the deprecated // `endpoint` field. log.Info("The `server` and `endpoint` fields are both set in the config; using the `server` field.") server = cfg.Server case !hasServerField && hasEndpointField: log.Info("Using deprecated Endpoint configuration. User Server instead.") if cfg.Endpoint.Protocol == "" && cfg.Server == "" { cfg.Endpoint.Protocol = "http" } server = fmt.Sprintf("%s://%s", cfg.Endpoint.Protocol, cfg.Endpoint.Host) endpointPath = cfg.Endpoint.Path case !hasServerField && !hasEndpointField: server = "https://preflight.jetstack.io" if res.OutputMode == VenafiCloudKeypair { // The VenafiCloudVenafiConnection mode doesn't need a server. server = client.VenafiCloudProdURL } if res.OutputMode == NGTS { // In NGTS mode, use NGTSServerURL if provided, otherwise we'll use a default // (which will be determined when creating the client) server = res.NGTSServerURL } } // In NGTS mode: ignore the config-file server field entirely; use only // --ngts-server-url when provided (default URL is derived from TSG ID // at client construction time). if res.OutputMode == NGTS { if res.NGTSServerURL != "" { log.Info("Using custom NGTS server URL (for testing)", "url", res.NGTSServerURL) } // config-file server field has no impact in NGTS mode so warn about it if cfg.Server != "" { log.Info(fmt.Sprintf("ignoring the server field in the config file. In %s mode, use --ngts-server-url for testing.", NGTS)) } server = res.NGTSServerURL } url, urlErr := url.Parse(server) if server != "" && (urlErr != nil || url.Hostname() == "") { errs = multierror.Append(errs, fmt.Errorf("server %q is not a valid URL", server)) } if res.OutputMode == VenafiCloudVenafiConnection && server != "" { log.Info(fmt.Sprintf("ignoring the server field specified in the config file. In %s mode, this field is not needed.", VenafiCloudVenafiConnection)) server = "" } res.Server = server res.EndpointPath = endpointPath } // Validation of `venafi-cloud.upload_path`. { var uploadPath string switch res.OutputMode { // nolint:exhaustive case VenafiCloudKeypair: if cfg.VenafiCloud == nil || cfg.VenafiCloud.UploadPath == "" { errs = multierror.Append(errs, fmt.Errorf("the venafi-cloud.upload_path field is required when using the %s mode", res.OutputMode)) break // Skip to the end of the switch statement. } _, urlErr := url.Parse(cfg.VenafiCloud.UploadPath) if urlErr != nil { errs = multierror.Append(errs, fmt.Errorf("upload_path is not a valid URL")) break // Skip to the end of the switch statement. } uploadPath = cfg.VenafiCloud.UploadPath case VenafiCloudVenafiConnection: // The venafi-cloud.upload_path was initially meant to let users // configure HTTP proxies, but it has never been used since HTTP // proxies don't rewrite paths. Thus, we've disabled the ability to // change this value with the new --venafi-connection flag, and this // field is simply ignored. if cfg.VenafiCloud != nil && cfg.VenafiCloud.UploadPath != "" { log.Info(fmt.Sprintf(`ignoring the venafi-cloud.upload_path field in the config file. In %s mode, this field is not needed.`, res.OutputMode)) } uploadPath = "" case NGTS: // NGTS mode doesn't use the upload_path field if cfg.VenafiCloud != nil && cfg.VenafiCloud.UploadPath != "" { log.Info(fmt.Sprintf(`ignoring the venafi-cloud.upload_path field in the config file. In %s mode, this field is not needed.`, res.OutputMode)) } uploadPath = "" } res.UploadPath = uploadPath } // Validation of `uploader_id`. // // We found that `venafi-cloud.uploader_id` doesn't do anything in the // backend. Since the backend requires it for historical reasons (but cannot // be empty), we just ignore whatever the user has set in the config file, // and set it to an arbitrary value in the client since it doesn't matter. // // TODO(mael): Remove the arbitrary `/no` path parameter from the Agent once // https://venafi.atlassian.net/browse/VC-35385 is done. { if cfg.VenafiCloud != nil && cfg.VenafiCloud.UploaderID != "" { log.Info(fmt.Sprintf(`ignoring the venafi-cloud.uploader_id field in the config file. This field is not needed in %s mode.`, res.OutputMode)) } } // Validation of `cluster_name`, `cluster_id` and `organization_id`. { var clusterName string // Required by venafi cloud modes. Optional for MachineHub mode. var clusterID string // Required by the old jetstack-secure mode deprecated for venafi cloud modes. var organizationID string // Only used by the old jetstack-secure mode. switch res.OutputMode { // nolint:exhaustive case NGTS: // NGTS mode requires cluster_name if cfg.ClusterName == "" { errs = multierror.Append(errs, fmt.Errorf("cluster_name is required in %s mode", res.OutputMode)) } clusterName = cfg.ClusterName // cluster_id and organization_id were already validated to not be present in NGTS mode case VenafiCloudKeypair, VenafiCloudVenafiConnection: // For backwards compatibility, use the agent config's `cluster_id` as // ClusterName if `cluster_name` is not set. if cfg.ClusterName == "" && cfg.ClusterID == "" { errs = multierror.Append(errs, fmt.Errorf("cluster_name or cluster_id is required in %s mode", res.OutputMode)) } if cfg.ClusterName != "" && cfg.ClusterID != "" { log.Info(fmt.Sprintf(`Ignoring the cluster_id field in the config file. This field is not needed in %s mode.`, res.OutputMode)) } clusterName = cfg.ClusterName if clusterName == "" { log.Info("Using cluster_id as cluster_name for backwards compatibility", "clusterID", cfg.ClusterID) clusterName = cfg.ClusterID } if cfg.OrganizationID != "" { log.Info(fmt.Sprintf(`Ignoring the organization_id field in the config file. This field is not needed in %s mode.`, res.OutputMode)) } case JetstackSecureOAuth, JetstackSecureAPIToken: if cfg.OrganizationID == "" { errs = multierror.Append(errs, fmt.Errorf("organization_id is required")) } if cfg.ClusterID == "" { errs = multierror.Append(errs, fmt.Errorf("cluster_id is required")) } organizationID = cfg.OrganizationID clusterID = cfg.ClusterID case MachineHub: clusterName = cfg.ClusterName if clusterName == "" { if arkUsername, found := os.LookupEnv("ARK_USERNAME"); found { log.Info("Using ARK_USERNAME environment variable as cluster name", "clusterName", arkUsername) clusterName = arkUsername } } if cfg.OrganizationID != "" { log.Info(fmt.Sprintf(`Ignoring the organization_id field in the config file. This field is not needed in %s mode.`, res.OutputMode)) } if cfg.ClusterID != "" { log.Info(fmt.Sprintf(`Ignoring the cluster_id field in the config file. This field is not needed in %s mode.`, res.OutputMode)) } } res.OrganizationID = organizationID res.ClusterID = clusterID res.ClusterName = clusterName res.ClusterDescription = cfg.ClusterDescription res.ClaimableCerts = cfg.ClaimableCerts } // Validation of `data-gatherers`. { if dgErr := ValidateDataGatherers(cfg.DataGatherers); dgErr != nil { errs = multierror.Append(errs, dgErr) } res.DataGatherers = cfg.DataGatherers } // Validation of --period, -p, and the `period` field, as well as // --backoff-max-time, --one-shot, and --strict. The flag --period/-p takes // precedence over the config `period`. { var period time.Duration switch { case flags.OneShot: // OneShot mode doesn't need a period, skipping validation. case flags.Period == 0 && cfg.Period == 0: errs = multierror.Append(errs, fmt.Errorf("period must be set using --period or -p, or using the 'period' field in the config file")) case flags.Period == 0 && cfg.Period > 0: log.Info("Using period from config", "period", cfg.Period) period = cfg.Period case flags.Period > 0 && cfg.Period == 0: period = flags.Period case flags.Period > 0 && cfg.Period > 0: // The flag takes precedence. log.Info("Both the 'period' field and --period are set. Using the value provided with --period.") period = flags.Period } res.Period = period res.OneShot = flags.OneShot res.BackoffMaxTime = flags.BackoffMaxTime res.StrictMode = flags.StrictMode } // Validation of --install-namespace. { installNS := flags.InstallNS if installNS == "" { var err error installNS, err = getInClusterNamespace() if err != nil { if res.OutputMode == VenafiCloudVenafiConnection { errs = multierror.Append(errs, fmt.Errorf("could not guess which namespace the agent is running in: %w", err)) } } } res.InstallNS = installNS } // Validation of --venafi-connection and --venafi-connection-namespace. if res.OutputMode == VenafiCloudVenafiConnection { res.VenConnName = flags.VenConnName venConnNS := flags.VenConnNS if flags.VenConnNS == "" { venConnNS = res.InstallNS } res.VenConnNS = venConnNS } // Validation of --output-path, --input-path, `output-path`, and // `input-path`. The flags --output-path and --input-path take precedence. { res.InputPath = cfg.InputPath res.OutputPath = cfg.OutputPath if flags.OutputPath != "" { res.OutputPath = flags.OutputPath } if flags.InputPath != "" { res.InputPath = flags.InputPath } } // Validation of the config fields exclude_annotation_keys_regex and // exclude_label_keys_regex. { for i, regex := range cfg.ExcludeAnnotationKeysRegex { r, err := regexp.Compile(regex) if err != nil { errs = multierror.Append(errs, fmt.Errorf("invalid exclude_annotation_keys_regex[%d]: %w", i, err)) continue } res.ExcludeAnnotationKeysRegex = append(res.ExcludeAnnotationKeysRegex, r) } for i, regex := range cfg.ExcludeLabelKeysRegex { r, err := regexp.Compile(regex) if err != nil { errs = multierror.Append(errs, fmt.Errorf("invalid exclude_label_keys_regex[%d]: %w", i, err)) continue } res.ExcludeLabelKeysRegex = append(res.ExcludeLabelKeysRegex, r) } } if errs != nil { return CombinedConfig{}, nil, errs } outputClient, err := validateCredsAndCreateClient(log, flags.CredentialsPath, flags.ClientID, flags.PrivateKeyPath, flags.APIToken, res) if err != nil { return CombinedConfig{}, nil, multierror.Prefix(err, "validating creds:") } return res, outputClient, nil } // Validation of --credentials-file/-k, --client-id, and --private-key-path, // --api-token, and creation of the client. // // The error returned may be a multierror.Error. Use multierror.Prefix(err, // "context:") rather than fmt.Errorf("context: %w", err) when wrapping the // error. func validateCredsAndCreateClient(log logr.Logger, flagCredentialsPath, flagClientID, flagPrivateKeyPath, flagAPIToken string, cfg CombinedConfig) (client.Client, error) { var errs error var outputClient client.Client metadata := &api.AgentMetadata{Version: version.PreflightVersion, ClusterID: cfg.ClusterID} switch cfg.OutputMode { case JetstackSecureOAuth: // Note that there are no command line flags to configure the // JetstackSecureOAuth mode. credsBytes, err := readCredentialsFile(flagCredentialsPath) if err != nil { errs = multierror.Append(errs, multierror.Prefix(err, "credentials file:")) break // Don't continue with parsing if could not load the file. } creds, err := client.ParseOAuthCredentials(credsBytes) if err != nil { errs = multierror.Append(errs, multierror.Prefix(err, "credentials file:")) break // Don't continue with the client if credentials file invalid. } outputClient, err = client.NewOAuthClient(metadata, creds, cfg.Server) if err != nil { errs = multierror.Append(errs, err) } case VenafiCloudKeypair: var creds *client.VenafiSvcAccountCredentials if flagClientID != "" && flagCredentialsPath != "" { errs = multierror.Append(errs, fmt.Errorf("--client-id and --credentials-file cannot be used simultaneously")) break } if flagPrivateKeyPath != "" && flagCredentialsPath != "" { errs = multierror.Append(errs, fmt.Errorf("--private-key-path and --credentials-file cannot be used simultaneously")) break } if flagClientID == "" && flagPrivateKeyPath == "" && flagCredentialsPath == "" { errs = multierror.Append(errs, fmt.Errorf("either --client-id and --private-key-path or --credentials-file must be provided")) break } switch { case flagClientID != "" && flagPrivateKeyPath != "": // If --client-id and --private-key-path are passed, then // --credentials-file is ignored. creds = &client.VenafiSvcAccountCredentials{ ClientID: flagClientID, PrivateKeyFile: flagPrivateKeyPath, } case flagCredentialsPath != "": credsBytes, err := readCredentialsFile(flagCredentialsPath) if err != nil { errs = multierror.Append(errs, multierror.Prefix(err, "credentials file:")) break // Don't continue if couldn't read the creds file. } creds, err = client.ParseVenafiCredentials(credsBytes) if err != nil { errs = multierror.Append(errs, multierror.Prefix(err, "credentials file:")) break // Don't continue with the client since creds is invalid. } default: return nil, fmt.Errorf("programmer mistake: --client-id and --private-key-path or --credentials-file must have been provided") } // The uploader ID isn't actually used in the backend, let's use an // arbitrary value. uploaderID := "no" // We don't do this for the VenafiCloudVenafiConnection mode because // the upload_path field is ignored in that mode. log.Info("Loading upload_path from \"venafi-cloud\" configuration.") var err error outputClient, err = client.NewVenafiCloudClient(metadata, creds, cfg.Server, uploaderID, cfg.UploadPath) if err != nil { errs = multierror.Append(errs, err) } case VenafiCloudVenafiConnection: var restCfg *rest.Config restCfg, err := kubeconfig.LoadRESTConfig("") if err != nil { errs = multierror.Append(errs, fmt.Errorf("loading kubeconfig: %w", err)) break // Don't continue with the client if kubeconfig wasn't loaded. } outputClient, err = client.NewVenConnClient(restCfg, metadata, cfg.InstallNS, cfg.VenConnName, cfg.VenConnNS, nil) if err != nil { errs = multierror.Append(errs, err) } case JetstackSecureAPIToken: var err error outputClient, err = client.NewAPITokenClient(metadata, flagAPIToken, cfg.Server) if err != nil { errs = multierror.Append(errs, err) } case LocalFile: outputClient = client.NewFileClient(cfg.OutputPath) case MachineHub: var ( err error rootCAs *x509.CertPool ) httpClient := http_client.NewDefaultClient(version.UserAgent(), rootCAs) outputClient, err = client.NewCyberArk(httpClient) if err != nil { errs = multierror.Append(errs, err) } case NGTS: var creds *client.NGTSServiceAccountCredentials if flagPrivateKeyPath == "" { errs = multierror.Append(errs, fmt.Errorf("--private-key-path is required for NGTS mode")) break } creds = &client.NGTSServiceAccountCredentials{ ClientID: flagClientID, PrivateKeyFile: flagPrivateKeyPath, } // rootCAs can be used in future to support custom CA certs, but for now will remain empty var rootCAs *x509.CertPool var err error outputClient, err = client.NewNGTSClient(metadata, creds, cfg.Server, cfg.TSGID, rootCAs) if err != nil { errs = multierror.Append(errs, err) } default: panic(fmt.Errorf("programmer mistake: output mode not implemented: %s", cfg.OutputMode)) } if errs != nil { return nil, fmt.Errorf("failed loading config using the %s mode: %w", cfg.OutputMode, errs) } return outputClient, nil } // Same as ValidateAndCombineConfig but just for validating the data gatherers. // This is separate because the `rbac` command only needs to validate the data // gatherers, nothing else. // // The error returned may be a multierror.Error. Use multierror.Prefix(err, // "context:") rather than fmt.Errorf("context: %w", err) when wrapping the // error. func ValidateDataGatherers(dataGatherers []DataGatherer) error { var err error for i, v := range dataGatherers { if v.Kind == "" { err = multierror.Append(err, fmt.Errorf("datagatherer %d/%d is missing a kind", i+1, len(dataGatherers))) } if v.Name == "" { err = multierror.Append(err, fmt.Errorf("datagatherer %d/%d is missing a name", i+1, len(dataGatherers))) } } return err } // Inspired by the controller-runtime project. func getInClusterNamespace() (string, error) { ns := os.Getenv("POD_NAMESPACE") if ns != "" { return ns, nil } return "", fmt.Errorf("POD_NAMESPACE env var not set, meaning that you are probably not running in cluster. Please use --install-namespace or POD_NAMESPACE to specify the namespace in which the agent is running.") } func reMarshal(rawConfig any, config datagatherer.Config) error { bb, err := yaml.Marshal(rawConfig) if err != nil { return nil } err = yaml.Unmarshal(bb, config) if err != nil { return nil } return nil } // UnmarshalYAML unmarshals a dataGatherer resolving the type according to Kind. func (dg *DataGatherer) UnmarshalYAML(unmarshal func(any) error) error { aux := struct { Kind string `yaml:"kind"` Name string `yaml:"name"` DataPath string `yaml:"data-path,omitempty"` RawConfig any `yaml:"config"` }{} err := unmarshal(&aux) if err != nil { return err } dg.Kind = aux.Kind dg.Name = aux.Name dg.DataPath = aux.DataPath var cfg datagatherer.Config switch dg.Kind { case "k8s": cfg = &k8sdynamic.ConfigDynamic{} case "k8s-dynamic": cfg = &k8sdynamic.ConfigDynamic{} case "k8s-discovery": cfg = &k8sdiscovery.ConfigDiscovery{} case "oidc": cfg = &oidc.OIDCDiscovery{} case "local": cfg = &local.Config{} // dummy dataGatherer is just used for testing case "dummy": cfg = &dummyConfig{} default: return fmt.Errorf("cannot parse data-gatherer configuration, kind %q is not supported", dg.Kind) } // we encode aux.RawConfig, which is just a map of reflect.Values, into yaml and decode it again to the right type. err = reMarshal(aux.RawConfig, cfg) if err != nil { return err } dg.Config = cfg return nil } // Dump generates a YAML string of the Config object func (c *Config) Dump() (string, error) { d, err := yaml.Marshal(&c) if err != nil { return "", fmt.Errorf("failed to generate YAML dump of config: %w", err) } return string(d), nil } // ParseConfig only parses. It does not validate anything except for the data // gatherer types. To validate the config, use ValidateDataGatherers or // getConfiguration. func ParseConfig(data []byte) (Config, error) { var config Config err := yaml.Unmarshal(data, &config) if err != nil { return config, err } return config, nil } type credType string const ( CredOldJetstackSecureOAuth credType = "CredOldJetstackSecureOAuth" CredVenafiCloudKeypair credType = "CredVenafiCloudKeypair" ) func readCredentialsFile(path string) ([]byte, error) { file, err := os.Open(path) if err != nil { return nil, fmt.Errorf("failed to load credentials from file %s: %w", path, err) } defer file.Close() b, err := io.ReadAll(file) if err != nil { return nil, fmt.Errorf("failed to read credentials file: %w", err) } return b, nil } ================================================ FILE: pkg/agent/config_test.go ================================================ package agent import ( "context" "fmt" "net/http" "os" "testing" "time" "github.com/go-logr/logr" "github.com/spf13/cobra" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/klog/v2" "k8s.io/klog/v2/ktesting" "github.com/jetstack/preflight/pkg/client" "github.com/jetstack/preflight/pkg/testutil" ) func Test_ValidateAndCombineConfig(t *testing.T) { // For common things like validating `server` and `data-gatherers`, we don't // need to test every auth mode. We just test them using the Jetstack Secure // OAuth mode. fakeCredsPath := withFile(t, `{"user_id":"foo","user_secret":"bar","client_id": "baz","client_secret": "foobar","auth_server_domain":"bazbar"}`) t.Run("In Venafi Connection mode, --install-namespace must be provided if POD_NAMESPACE is not set", func(t *testing.T) { _, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` server: https://api.venafi.eu organization_id: foo cluster_id: bar period: 5m `)), withCmdLineFlags("--venafi-connection", "venafi-components")) assert.EqualError(t, err, "1 error occurred:\n\t* could not guess which namespace the agent is running in: POD_NAMESPACE env var not set, meaning that you are probably not running in cluster. Please use --install-namespace or POD_NAMESPACE to specify the namespace in which the agent is running.\n\n") }) t.Run("period must be given with either --period/-p or period field in config", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") _, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` server: https://api.venafi.eu organization_id: foo cluster_id: bar `)), withCmdLineFlags("--credentials-file", fakeCredsPath)) assert.EqualError(t, err, "1 error occurred:\n\t* period must be set using --period or -p, or using the 'period' field in the config file\n\n") }) t.Run("period can be provided using --period or -p", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") given := withConfig(testutil.Undent(` server: https://api.venafi.eu organization_id: foo cluster_id: bar `)) got, _, err := ValidateAndCombineConfig(discardLogs(), given, withCmdLineFlags("--period", "5m", "--credentials-file", fakeCredsPath)) require.NoError(t, err) assert.Equal(t, 5*time.Minute, got.Period) got, _, err = ValidateAndCombineConfig(discardLogs(), given, withCmdLineFlags("-p", "3m", "--credentials-file", fakeCredsPath)) require.NoError(t, err) assert.Equal(t, 3*time.Minute, got.Period) }) t.Run("period can be provided using the period field in config file", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") got, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` server: https://api.venafi.eu period: 7m organization_id: foo cluster_id: bar `)), withCmdLineFlags("--credentials-file", fakeCredsPath)) require.NoError(t, err) assert.Equal(t, 7*time.Minute, got.Period) }) t.Run("--period flag takes precedence over period field in config, shows warning", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") log, gotLogs := recordLogs(t) got, _, err := ValidateAndCombineConfig(log, withConfig(testutil.Undent(` server: https://api.venafi.eu period: 1111m organization_id: foo cluster_id: bar `)), withCmdLineFlags("--period", "99m", "--credentials-file", fakeCredsPath)) require.NoError(t, err) assert.Equal(t, testutil.Undent(` INFO Output mode selected mode="Jetstack Secure OAuth" reason="--credentials-file was specified without --venafi-cloud" INFO Both the 'period' field and --period are set. Using the value provided with --period. `), gotLogs.String()) assert.Equal(t, 99*time.Minute, got.Period) }) t.Run("jetstack-secure-oauth-auth: server field is not required", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") got, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` period: 1h organization_id: foo cluster_id: bar `)), withCmdLineFlags("--credentials-file", fakeCredsPath)) require.NoError(t, err) assert.Equal(t, "https://preflight.jetstack.io", got.Server) }) t.Run("venafi-cloud-keypair-auth: server field is not required", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") credsPath := withFile(t, `{"client_id": "foo","private_key_file": "`+withFile(t, fakePrivKeyPEM)+`"}`) got, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` period: 1h cluster_id: bar venafi-cloud: upload_path: /foo/bar `)), withCmdLineFlags("--venafi-cloud", "--credentials-file", credsPath)) require.NoError(t, err) assert.Equal(t, "https://api.venafi.cloud", got.Server) }) t.Run("server URL must be valid", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") _, _, gotErr := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` server: "something not a URL" period: 1h organization_id: "my_org" cluster_id: "my_cluster" data-gatherers: - kind: dummy name: dummy `)), withCmdLineFlags("--credentials-file", fakeCredsPath)) assert.EqualError(t, gotErr, testutil.Undent(` 1 error occurred: * server "something not a URL" is not a valid URL `)) }) t.Run("--strict is passed down", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") got, _, gotErr := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` period: 1h organization_id: "my_org" cluster_id: "my_cluster" `)), withCmdLineFlags("--strict", "--credentials-file", fakeCredsPath)) require.NoError(t, gotErr) assert.Equal(t, true, got.StrictMode) }) t.Run("--disable-compression is deprecated and doesn't do anything", func(t *testing.T) { path := withFile(t, `{"user_id":"fpp2624799349@affectionate-hertz6.platform.jetstack.io","user_secret":"foo","client_id": "k3TrDbfLhCgnpAbOiiT2kIE1AbovKzjo","client_secret": "f39w_3KT9Vp0VhzcPzvh-uVbudzqCFmHER3Huj0dvHgJwVrjxsoOQPIw_1SDiCfa","auth_server_domain":"auth.jetstack.io"}`) log, b := recordLogs(t) _, _, err := ValidateAndCombineConfig(log, withConfig(testutil.Undent(` server: https://api.venafi.eu period: 1h organization_id: foo cluster_id: bar `)), withCmdLineFlags("--disable-compression", "--credentials-file", path, "--install-namespace", "venafi")) require.NoError(t, err) // The log line printed by pflag is not captured by the log recorder. assert.Equal(t, testutil.Undent(` INFO Output mode selected mode="Jetstack Secure OAuth" reason="--credentials-file was specified without --venafi-cloud" INFO Using period from config period="1h0m0s" `), b.String()) }) t.Run("error when no output mode specified", func(t *testing.T) { _, cl, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` server: https://api.venafi.eu period: 1h organization_id: foo cluster_id: bar `)), withoutCmdLineFlags(), ) assert.EqualError(t, err, testutil.Undent(` no output mode specified. To enable one of the output modes, you can: - Use --ngts with --tsg-id and --private-key-path to use the NGTS mode (--client-id is optional if provided in the credentials secret). - Use (--venafi-cloud with --credentials-file) or (--client-id with --private-key-path) to use the Venafi Cloud Key Pair Service Account mode. - Use --venafi-connection for the Venafi Cloud VenafiConnection mode. - Use --credentials-file alone if you want to use the Jetstack Secure OAuth mode. - Use --api-token if you want to use the Jetstack Secure API Token mode. - Use --machine-hub if you want to use the MachineHub mode. - Use --output-path or output-path in the config file for Local File mode.`)) assert.Nil(t, cl) }) t.Run("jetstack-secure-oauth-auth: sample config", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") // `client_id`, `client_secret`, and `auth_server_domain` are usually // injected at build time, but we can't do that in tests, so we need to // provide them in the credentials file. credsPath := withFile(t, `{"user_id":"fpp2624799349@affectionate-hertz6.platform.jetstack.io","user_secret":"foo","client_id": "k3TrDbfLhCgnpAbOiiT2kIE1AbovKzjo","client_secret": "f39w_3KT9Vp0VhzcPzvh-uVbudzqCFmHER3Huj0dvHgJwVrjxsoOQPIw_1SDiCfa","auth_server_domain":"auth.jetstack.io"}`) got, cl, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` period: 5m endpoint: host: example.com path: api/v1/data schedule: "* * * * *" organization_id: "example" cluster_id: "example-cluster" data-gatherers: - name: d1 kind: dummy config: always-fail: false `)), withCmdLineFlags("--credentials-file", credsPath), ) expect := CombinedConfig{ OutputMode: "Jetstack Secure OAuth", ClusterID: "example-cluster", DataGatherers: []DataGatherer{{Kind: "dummy", Name: "d1", Config: &dummyConfig{}, }}, Period: 5 * time.Minute, Server: "http://example.com", OrganizationID: "example", EndpointPath: "api/v1/data", BackoffMaxTime: 10 * time.Minute, InstallNS: "venafi", } require.NoError(t, err) assert.Equal(t, expect, got) assert.IsType(t, &client.OAuthClient{}, cl) }) t.Run("venafi-cloud-keypair-auth: extended config using --venafi-cloud and --credentials-file", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") privKeyPath := withFile(t, fakePrivKeyPEM) credsPath := withFile(t, `{"client_id": "5bc7d07c-45da-11ef-a878-523f1e1d7de1","private_key_file": "`+privKeyPath+`"}`) got, cl, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` server: "http://localhost:8080" cluster_id: "legacy cluster_id as cluster name" period: 1h data-gatherers: - name: d1 kind: dummy config: always-fail: false input-path: "/home" output-path: "/nothome" venafi-cloud: uploader_id: test-agent upload_path: "/testing/path" `)), withCmdLineFlags("--venafi-cloud", "--credentials-file", credsPath, "--backoff-max-time", "99m"), ) expect := CombinedConfig{ Server: "http://localhost:8080", Period: time.Hour, DataGatherers: []DataGatherer{ {Name: "d1", Kind: "dummy", Config: &dummyConfig{AlwaysFail: false}}, }, InputPath: "/home", OutputPath: "/nothome", UploadPath: "/testing/path", OutputMode: VenafiCloudKeypair, ClusterName: "legacy cluster_id as cluster name", BackoffMaxTime: 99 * time.Minute, InstallNS: "venafi", } require.NoError(t, err) assert.Equal(t, expect, got) assert.IsType(t, &client.VenafiCloudClient{}, cl) }) t.Run("venafi-cloud-keypair-auth: using --client-id and --private-key-path", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") privKeyPath := withFile(t, fakePrivKeyPEM) got, cl, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` server: "http://localhost:8080" period: 1h cluster_id: "the cluster name" venafi-cloud: upload_path: "/foo/bar" `)), withCmdLineFlags("--client-id", "5bc7d07c-45da-11ef-a878-523f1e1d7de1", "--private-key-path", privKeyPath), ) require.NoError(t, err) assert.Equal(t, VenafiCloudKeypair, got.OutputMode) assert.IsType(t, &client.VenafiCloudClient{}, cl) }) t.Run("jetstack-secure-oauth-auth: fail if organization_id or cluster_id is missing and --venafi-cloud not enabled", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") credsPath := withFile(t, `{"user_id":"fpp2624799349@affectionate-hertz6.platform.jetstack.io","user_secret":"foo","client_id": "k3TrDbfLhCgnpAbOiiT2kIE1AbovKzjo","client_secret": "f39w_3KT9Vp0VhzcPzvh-uVbudzqCFmHER3Huj0dvHgJwVrjxsoOQPIw_1SDiCfa","auth_server_domain":"auth.jetstack.io"}`) _, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(""), withCmdLineFlags("--credentials-file", credsPath)) assert.EqualError(t, err, testutil.Undent(` 3 errors occurred: * organization_id is required * cluster_id is required * period must be set using --period or -p, or using the 'period' field in the config file `)) }) t.Run("venafi-cloud-keypair-auth: authenticated if --client-id set", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") path := withFile(t, fakePrivKeyPEM) _, cl, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` cluster_id: foo venafi-cloud: upload_path: /foo/bar `)), withCmdLineFlags("--venafi-cloud", "--period", "1m", "--client-id", "test-client-id", "--private-key-path", path)) require.NoError(t, err) assert.IsType(t, &client.VenafiCloudClient{}, cl) }) t.Run("venafi-cloud-keypair-auth: valid 1: --client-id and --private-key-path", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") path := withFile(t, fakePrivKeyPEM) _, cl, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` cluster_id: foo venafi-cloud: upload_path: /foo/bar `)), withCmdLineFlags("--venafi-cloud", "--period", "1m", "--private-key-path", path, "--client-id", "test-client-id")) require.NoError(t, err) assert.IsType(t, &client.VenafiCloudClient{}, cl) }) t.Run("venafi-cloud-keypair-auth: valid 2: --venafi-cloud and --credentials-file", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") credsPath := withFile(t, fmt.Sprintf(`{"client_id": "foo","private_key_file": "%s"}`, withFile(t, fakePrivKeyPEM))) _, cl, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` cluster_id: foo venafi-cloud: upload_path: /foo/bar `)), withCmdLineFlags("--venafi-cloud", "--credentials-file", credsPath, "--period", "1m")) require.NoError(t, err) assert.IsType(t, &client.VenafiCloudClient{}, cl) }) t.Run("venafi-cloud-keypair-auth: when --venafi-cloud is used, upload_path is required", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") credsPath := withFile(t, fmt.Sprintf(`{"client_id": "foo","private_key_file": "%s"}`, withFile(t, fakePrivKeyPEM))) _, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` server: "http://localhost:8080" period: 1h venafi-cloud: uploader_id: test-agent cluster_id: "the cluster name" `)), withCmdLineFlags("--venafi-cloud", "--credentials-file", credsPath)) require.EqualError(t, err, "1 error occurred:\n\t* the venafi-cloud.upload_path field is required when using the Venafi Cloud Key Pair Service Account mode\n\n") }) t.Run("jetstack-secure-oauth-auth: --credential-file alone means jetstack-secure oauth auth", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") // `client_id`, `client_secret`, and `auth_server_domain` are usually // injected at build time, but we can't do that in tests, so we need to // provide them in the credentials file. path := withFile(t, `{"user_id":"fpp2624799349@affectionate-hertz6.platform.jetstack.io","user_secret":"foo","client_id": "k3TrDbfLhCgnpAbOiiT2kIE1AbovKzjo","client_secret": "f39w_3KT9Vp0VhzcPzvh-uVbudzqCFmHER3Huj0dvHgJwVrjxsoOQPIw_1SDiCfa","auth_server_domain":"auth.jetstack.io"}`) got, cl, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` server: https://api.venafi.eu period: 1h organization_id: foo cluster_id: bar `)), withCmdLineFlags("--credentials-file", path)) require.NoError(t, err) assert.Equal(t, CombinedConfig{Server: "https://api.venafi.eu", Period: time.Hour, OrganizationID: "foo", ClusterID: "bar", OutputMode: JetstackSecureOAuth, BackoffMaxTime: 10 * time.Minute, InstallNS: "venafi"}, got) assert.IsType(t, &client.OAuthClient{}, cl) }) t.Run("jetstack-secure-oauth-auth: --credential-file used but file is missing", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") got, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` server: https://api.venafi.eu period: 1h organization_id: foo cluster_id: bar `)), withCmdLineFlags("--credentials-file", "credentials.json")) assert.EqualError(t, err, testutil.Undent(` validating creds: failed loading config using the Jetstack Secure OAuth mode: 1 error occurred: * credentials file: failed to load credentials from file credentials.json: open credentials.json: no such file or directory `)) assert.Equal(t, CombinedConfig{}, got) }) t.Run("jetstack-secure-oauth-auth: shows helpful err messages", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") credsPath := withFile(t, `{"user_id":""}`) _, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` server: https://api.venafi.eu period: 1h organization_id: foo cluster_id: bar `)), withCmdLineFlags("--credentials-file", credsPath)) assert.EqualError(t, err, testutil.Undent(` validating creds: failed loading config using the Jetstack Secure OAuth mode: 2 errors occurred: * credentials file: user_id cannot be empty * credentials file: user_secret cannot be empty `)) }) t.Run("venafi-cloud-keypair-auth: --client-id cannot be used alone, it needs --private-key-path", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") got, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` server: https://api.venafi.eu period: 1h `)), withCmdLineFlags("--client-id", "test-client-id")) assert.EqualError(t, err, "if --client-id is specified, --private-key-path must also be specified") assert.Equal(t, CombinedConfig{}, got) }) t.Run("venafi-cloud-keypair-auth: --private-key-path cannot be used alone, it needs --client-id", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") got, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` server: https://api.venafi.eu period: 1h `)), withCmdLineFlags("--private-key-path", "foo")) assert.EqualError(t, err, "--private-key-path is specified, --client-id must also be specified") assert.Equal(t, CombinedConfig{}, got) }) // When --client-id is used, --venafi-cloud is implied. t.Run("venafi-cloud-keypair-auth: valid --client-id and --private-key-path", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") path := withFile(t, fakePrivKeyPEM) got, cl, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` server: https://api.venafi.eu period: 1h cluster_id: legacy cluster_id as cluster name venafi-cloud: upload_path: /foo/bar `)), withCmdLineFlags("--client-id", "5bc7d07c-45da-11ef-a878-523f1e1d7de1", "--private-key-path", path)) require.NoError(t, err) assert.Equal(t, CombinedConfig{Server: "https://api.venafi.eu", Period: time.Hour, OutputMode: VenafiCloudKeypair, ClusterName: "legacy cluster_id as cluster name", UploadPath: "/foo/bar", BackoffMaxTime: 10 * time.Minute, InstallNS: "venafi"}, got) assert.IsType(t, &client.VenafiCloudClient{}, cl) }) // --credentials-file + --venafi-cloud can be used instead of // --client-id and --private-key-path. Unfortunately, --credentials-file // can't contain the private key material, just a path to it, so you // still need to have the private key file somewhere one the filesystem. t.Run("venafi-cloud-keypair-auth: valid --venafi-cloud + --credential-file + private key stored to disk", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") privKeyPath := withFile(t, fakePrivKeyPEM) credsPath := withFile(t, fmt.Sprintf(`{"client_id": "5bc7d07c-45da-11ef-a878-523f1e1d7de1","private_key_file": "%s"}`, privKeyPath)) got, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` server: https://api.venafi.eu period: 1h cluster_id: legacy cluster_id as cluster name venafi-cloud: upload_path: /foo/bar `)), withCmdLineFlags("--venafi-cloud", "--credentials-file", credsPath)) require.NoError(t, err) assert.Equal(t, CombinedConfig{Server: "https://api.venafi.eu", Period: time.Hour, OutputMode: VenafiCloudKeypair, ClusterName: "legacy cluster_id as cluster name", UploadPath: "/foo/bar", BackoffMaxTime: 10 * time.Minute, InstallNS: "venafi"}, got) }) t.Run("venafi-cloud-keypair-auth: venafi-cloud.upload_path field is required", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") privKeyPath := withFile(t, fakePrivKeyPEM) credsPath := withFile(t, fmt.Sprintf(`{"client_id": "5bc7d07c-45da-11ef-a878-523f1e1d7de1","private_key_file": "%s"}`, privKeyPath)) _, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` server: https://api.venafi.eu period: 1h cluster_id: the cluster name venafi-cloud: upload_path: "" # <-- Cannot be left empty `)), withCmdLineFlags("--venafi-cloud", "--credentials-file", credsPath)) require.EqualError(t, err, testutil.Undent(` 1 error occurred: * the venafi-cloud.upload_path field is required when using the Venafi Cloud Key Pair Service Account mode `)) }) t.Run("venafi-cloud-keypair-auth: --private-key-file can be passed with --credential-file", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") privKeyPath := withFile(t, fakePrivKeyPEM) credsPath := withFile(t, `{"client_id": "5bc7d07c-45da-11ef-a878-523f1e1d7de1"}`) got, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` server: https://api.venafi.eu period: 1h cluster_id: the cluster name `)), withCmdLineFlags("--venafi-cloud", "--credentials-file", credsPath, "--private-key-path", privKeyPath)) require.EqualError(t, err, testutil.Undent(` 1 error occurred: * the venafi-cloud.upload_path field is required when using the Venafi Cloud Key Pair Service Account mode `)) assert.Equal(t, CombinedConfig{}, got) }) t.Run("venafi-cloud-keypair-auth: config.venafi-cloud", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") privKeyPath := withFile(t, fakePrivKeyPEM) credsPath := withFile(t, `{"client_id": "5bc7d07c-45da-11ef-a878-523f1e1d7de1"}`) got, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` server: https://api.venafi.eu period: 1h venafi-cloud: uploader_id: test-agent upload_path: /testing/path `)), withCmdLineFlags("--venafi-cloud", "--credentials-file", credsPath, "--private-key-path", privKeyPath)) require.EqualError(t, err, testutil.Undent(` 1 error occurred: * cluster_name or cluster_id is required in Venafi Cloud Key Pair Service Account mode `)) assert.Equal(t, CombinedConfig{}, got) }) t.Run("venafi-cloud-workload-identity-auth: valid --venafi-connection", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") t.Setenv("KUBECONFIG", withFile(t, fakeKubeconfig)) log, gotLogs := recordLogs(t) got, cl, err := ValidateAndCombineConfig(log, withConfig(testutil.Undent(` server: http://should-be-ignored period: 1h cluster_id: legacy cluster_id as cluster name `)), withCmdLineFlags("--venafi-connection", "venafi-components")) require.NoError(t, err) assert.Equal(t, testutil.Undent(` INFO Output mode selected venConnName="venafi-components" mode="Venafi Cloud VenafiConnection" reason="--venafi-connection was specified" INFO ignoring the server field specified in the config file. In Venafi Cloud VenafiConnection mode, this field is not needed. INFO Using cluster_id as cluster_name for backwards compatibility clusterID="legacy cluster_id as cluster name" INFO Using period from config period="1h0m0s" `), gotLogs.String()) assert.Equal(t, CombinedConfig{ Period: 1 * time.Hour, ClusterName: "legacy cluster_id as cluster name", OutputMode: VenafiCloudVenafiConnection, VenConnName: "venafi-components", VenConnNS: "venafi", InstallNS: "venafi", BackoffMaxTime: 10 * time.Minute, }, got) assert.IsType(t, &client.VenConnClient{}, cl) }) t.Run("venafi-cloud-workload-identity-auth: warning about server, venafi-cloud.uploader_id, and venafi-cloud.upload_path being skipped", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") t.Setenv("KUBECONFIG", withFile(t, fakeKubeconfig)) log, gotLogs := recordLogs(t) got, gotCl, err := ValidateAndCombineConfig(log, withConfig(testutil.Undent(` server: https://api.venafi.eu period: 1h cluster_name: cluster-1 cluster_id: should-be-ignored-and-logged-as-ignored venafi-cloud: uploader_id: id upload_path: /path `)), withCmdLineFlags("--venafi-connection", "venafi-components"), ) require.NoError(t, err) assert.Equal(t, testutil.Undent(` INFO Output mode selected venConnName="venafi-components" mode="Venafi Cloud VenafiConnection" reason="--venafi-connection was specified" INFO ignoring the server field specified in the config file. In Venafi Cloud VenafiConnection mode, this field is not needed. INFO ignoring the venafi-cloud.upload_path field in the config file. In Venafi Cloud VenafiConnection mode, this field is not needed. INFO ignoring the venafi-cloud.uploader_id field in the config file. This field is not needed in Venafi Cloud VenafiConnection mode. INFO Ignoring the cluster_id field in the config file. This field is not needed in Venafi Cloud VenafiConnection mode. INFO Using period from config period="1h0m0s" `), gotLogs.String()) assert.Equal(t, VenafiCloudVenafiConnection, got.OutputMode) assert.IsType(t, &client.VenConnClient{}, gotCl) }) t.Run("venafi-cloud-workload-identity-auth: server field can be left empty in venconn mode", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") t.Setenv("KUBECONFIG", withFile(t, fakeKubeconfig)) got, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` server: "" period: 1h cluster_id: foo `)), withCmdLineFlags("--venafi-connection", "venafi-components")) require.NoError(t, err) assert.Equal(t, VenafiCloudVenafiConnection, got.OutputMode) }) const arkUsername = "cluster-1-region-1-cloud-1@cyberark.cloud.123456" t.Run("--machine-hub selects MachineHub mode", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") t.Setenv("KUBECONFIG", withFile(t, fakeKubeconfig)) t.Setenv("ARK_SUBDOMAIN", "tlspk") t.Setenv("ARK_USERNAME", arkUsername) t.Setenv("ARK_SECRET", "test-secret") got, cl, err := ValidateAndCombineConfig(discardLogs(), withConfig(""), withCmdLineFlags("--period", "1m", "--machine-hub")) require.NoError(t, err) assert.Equal(t, MachineHub, got.OutputMode) assert.Equal(t, arkUsername, got.ClusterName, "the ClusterName should default to the ARK_USERNAME value if the cluster_name in the config file is empty") assert.IsType(t, &client.CyberArkClient{}, cl) }) t.Run("--machine-hub with cluster_name override", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") t.Setenv("KUBECONFIG", withFile(t, fakeKubeconfig)) t.Setenv("ARK_SUBDOMAIN", "tlspk") t.Setenv("ARK_USERNAME", arkUsername) t.Setenv("ARK_SECRET", "test-secret") got, cl, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` cluster_name: override-cluster-name `)), withCmdLineFlags("--period", "1m", "--machine-hub")) require.NoError(t, err) assert.Equal(t, MachineHub, got.OutputMode) assert.Equal(t, "override-cluster-name", got.ClusterName, "the cluster_name in the config file should be used if not empty, even if ARK_USERNAME is set") assert.IsType(t, &client.CyberArkClient{}, cl) }) t.Run("--machine-hub without required environment variables", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") t.Setenv("KUBECONFIG", withFile(t, fakeKubeconfig)) t.Setenv("ARK_SUBDOMAIN", "") t.Setenv("ARK_USERNAME", "") t.Setenv("ARK_SECRET", "") got, cl, err := ValidateAndCombineConfig(discardLogs(), withConfig(""), withCmdLineFlags("--period", "1m", "--machine-hub")) assert.Equal(t, CombinedConfig{}, got) assert.Nil(t, cl) assert.EqualError(t, err, testutil.Undent(` validating creds: failed loading config using the MachineHub mode: 1 error occurred: * missing environment variables: ARK_SUBDOMAIN, ARK_USERNAME, ARK_SECRET `)) }) t.Run("argument: --output-file selects local file mode", func(t *testing.T) { log, gotLog := recordLogs(t) got, outputClient, err := ValidateAndCombineConfig(log, withConfig(""), withCmdLineFlags("--period", "1m", "--output-path", "/foo/bar/baz")) require.NoError(t, err) assert.Equal(t, LocalFile, got.OutputMode) assert.Equal(t, testutil.Undent(` INFO Output mode selected mode="Local File" reason="--output-path was specified" `), gotLog.String()) assert.IsType(t, &client.FileClient{}, outputClient) }) t.Run("config: output-path selects local file mode", func(t *testing.T) { log, gotLog := recordLogs(t) got, outputClient, err := ValidateAndCombineConfig(log, withConfig(testutil.Undent(` output-path: /foo/bar/baz `)), withCmdLineFlags("--period=1h")) require.NoError(t, err) assert.Equal(t, LocalFile, got.OutputMode) assert.Equal(t, testutil.Undent(` INFO Output mode selected mode="Local File" reason="output-path was specified in the config file" `), gotLog.String()) assert.IsType(t, &client.FileClient{}, outputClient) }) // When --input-path is supplied, the data is being read from a local file // and the agent is probably running outside the cluster and has no access // to a cluster, so the environment variables which are required for // generating events attached to the Agent pod should not be required: // POD_NAME, POD_NAMESPACE, POD_UID, KUBECONFIG, etc. // This test deliberately does not set those environment variables. // // TODO(wallrj): Some other config settings like cluster_id, organization_id // should also not be required in this situation. We'll fix those in the // future. t.Run("--input-path requires no Kubernetes config", func(t *testing.T) { expectedInputPath := "/foo/bar/baz" got, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` cluster_id: should-not-be-required organization_id: should-not-be-required `)), withCmdLineFlags( "--one-shot", "--input-path", expectedInputPath, "--output-path", "/dev/null", ), ) require.NoError(t, err) assert.Equal(t, expectedInputPath, got.InputPath) }) } func Test_ValidateAndCombineConfig_VenafiCloudKeyPair(t *testing.T) { t.Run("server, uploader_id, and cluster name are correctly passed", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") ctx, cancel := context.WithCancel(t.Context()) defer cancel() log := ktesting.NewLogger(t, ktesting.NewConfig(ktesting.Verbosity(10))) ctx = klog.NewContext(ctx, log) srv, cert, setVenafiCloudAssert := testutil.FakeVenafiCloud(t) setVenafiCloudAssert(func(t testing.TB, gotReq *http.Request) { // Only care about /v1/tlspk/upload/clusterdata/:uploader_id?name= if gotReq.URL.Path == "/v1/oauth/token/serviceaccount" { return } assert.Equal(t, srv.URL, "https://"+gotReq.Host) assert.Equal(t, "test cluster name", gotReq.URL.Query().Get("name")) assert.Equal(t, "/v1/tlspk/upload/clusterdata/no", gotReq.URL.Path) }) privKeyPath := withFile(t, fakePrivKeyPEM) got, cl, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` server: `+srv.URL+` period: 1h cluster_id: "test cluster name" venafi-cloud: uploader_id: no upload_path: /v1/tlspk/upload/clusterdata `)), withCmdLineFlags("--client-id", "5bc7d07c-45da-11ef-a878-523f1e1d7de1", "--private-key-path", privKeyPath), ) require.NoError(t, err) testutil.TrustCA(t, cl, cert) assert.Equal(t, VenafiCloudKeypair, got.OutputMode) err = cl.PostDataReadingsWithOptions(ctx, nil, client.Options{ClusterName: "test cluster name"}) require.NoError(t, err) }) } // Slower test cases due to envtest. That's why they are separated from the // other tests. func Test_ValidateAndCombineConfig_VenafiConnection(t *testing.T) { _, cfg, kcl := testutil.WithEnvtest(t) t.Setenv("KUBECONFIG", testutil.WithKubeconfig(t, cfg)) srv, cert, setVenafiCloudAssert := testutil.FakeVenafiCloud(t) for _, obj := range testutil.Parse( testutil.VenConnRBAC + testutil.Undent(` --- apiVersion: jetstack.io/v1alpha1 kind: VenafiConnection metadata: name: venafi-components namespace: venafi spec: vcp: url: "`+srv.URL+`" accessToken: - secret: name: accesstoken fields: [accesstoken] --- apiVersion: v1 kind: Secret metadata: name: accesstoken namespace: venafi stringData: accesstoken: VALID_ACCESS_TOKEN --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: venafi-connection-accesstoken-reader namespace: venafi rules: - apiGroups: [""] resources: ["secrets"] verbs: ["get"] resourceNames: ["accesstoken"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: venafi-connection-accesstoken-reader namespace: venafi roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: venafi-connection-accesstoken-reader subjects: - kind: ServiceAccount name: venafi-connection namespace: venafi `)) { require.NoError(t, kcl.Create(t.Context(), obj)) } t.Run("err when cluster_id field is empty", func(t *testing.T) { expected := srv.URL setVenafiCloudAssert(func(t testing.TB, gotReq *http.Request) { assert.Equal(t, expected, "https://"+gotReq.Host) }) _, _, err := ValidateAndCombineConfig(discardLogs(), Config{Server: "http://should-be-ignored", Period: 1 * time.Hour}, AgentCmdFlags{VenConnName: "venafi-components", InstallNS: "venafi"}) assert.EqualError(t, err, "1 error occurred:\n\t* cluster_name or cluster_id is required in Venafi Cloud VenafiConnection mode\n\n") }) t.Run("the server field is ignored when VenafiConnection is used", func(t *testing.T) { ctx, cancel := context.WithCancel(t.Context()) defer cancel() log := ktesting.NewLogger(t, ktesting.NewConfig(ktesting.Verbosity(10))) ctx = klog.NewContext(ctx, log) expected := srv.URL setVenafiCloudAssert(func(t testing.TB, gotReq *http.Request) { assert.Equal(t, expected, "https://"+gotReq.Host) }) cfg, cl, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` server: http://this-url-should-be-ignored period: 1h cluster_id: test cluster name `)), withCmdLineFlags("--venafi-connection", "venafi-components", "--install-namespace", "venafi")) require.NoError(t, err) testutil.VenConnStartWatching(ctx, t, cl) testutil.TrustCA(t, cl, cert) // TODO(mael): the client should keep track of the cluster name, we // shouldn't need to pass it as an option to // PostDataReadingsWithOptions. err = cl.PostDataReadingsWithOptions(ctx, nil, client.Options{ClusterName: cfg.ClusterName}) require.NoError(t, err) }) } func Test_ParseConfig(t *testing.T) { t.Run("happy", func(t *testing.T) { cfg, err := ParseConfig([]byte(testutil.Undent(` server: https://api.venafi.eu period: 1h organization_id: foo cluster_id: bar `))) require.NoError(t, err) assert.Equal(t, Config{Server: "https://api.venafi.eu", Period: 1 * time.Hour, OrganizationID: "foo", ClusterID: "bar"}, cfg) }) t.Run("unknown data gatherer kind", func(t *testing.T) { _, err := ParseConfig([]byte(testutil.Undent(` endpoint: host: example.com path: /api/v1/data schedule: "* * * * *" data-gatherers: - kind: "foo" `))) assert.EqualError(t, err, `cannot parse data-gatherer configuration, kind "foo" is not supported`) }) t.Run("validates incorrect schema", func(t *testing.T) { _, gotErr := ParseConfig([]byte(`data-gatherers: "things"`)) assert.EqualError(t, gotErr, "yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `things` into []agent.DataGatherer") }) t.Run("does not show an error when user provides an unknown field", func(t *testing.T) { _, gotErr := ParseConfig([]byte(`some-unknown-field: foo`)) assert.NoError(t, gotErr) }) // The only validation that ParseConfig does it to check if the `kind` is // known. The rest of the validation is done in ValidateDataGatherers and // ValidateAndCombineConfig. t.Run("validates that the kind is known", func(t *testing.T) { _, gotErr := ParseConfig([]byte(testutil.Undent(` data-gatherers: - kind: unknown`, ))) assert.EqualError(t, gotErr, `cannot parse data-gatherer configuration, kind "unknown" is not supported`) }) // ParseConfig only checks the data-gatherer kind. The rest of the // validation is done in ValidateDataGatherers and ValidateAndCombineConfig. t.Run("does not check for missing name", func(t *testing.T) { _, gotErr := ParseConfig([]byte(testutil.Undent(` endpoint: host: example.com path: /api/v1/data schedule: "* * * * *" organization_id: "example" cluster_id: "example-cluster" data-gatherers: - kind: dummy `))) assert.NoError(t, gotErr) }) t.Run("does not check correct server URL", func(t *testing.T) { _, gotErr := ParseConfig([]byte(testutil.Undent(` server: https://api.venafi.eu `))) assert.NoError(t, gotErr) }) } func Test_ValidateDataGatherers(t *testing.T) { t.Run("happy", func(t *testing.T) { err := ValidateDataGatherers(withConfig(testutil.Undent(` data-gatherers: - kind: "k8s" name: "k8s/secrets" - kind: "k8s-discovery" name: "k8s-discovery" - kind: "k8s-dynamic" name: "k8s/secrets" - kind: "local" name: "local" - kind: "dummy" name: "dummy" `)).DataGatherers) require.NoError(t, err) }) t.Run("missing name", func(t *testing.T) { gotErr := ValidateDataGatherers(withConfig(testutil.Undent(` data-gatherers: - kind: dummy `)).DataGatherers) assert.EqualError(t, gotErr, "1 error occurred:\n\t* datagatherer 1/1 is missing a name\n\n") }) // For context, the custom UnmarshalYAML in ParseConfig already validates // the kind. That's why ValidateDataGatherers panics: because it would be a // programmer mistake. t.Run("missing kind triggers a panic", func(t *testing.T) { assert.PanicsWithError(t, `cannot parse data-gatherer configuration, kind "unknown" is not supported`, func() { _ = ValidateDataGatherers(withConfig(testutil.Undent(` data-gatherers: - kind: unknown `)).DataGatherers) }) }) } func withFile(t testing.TB, content string) string { t.Helper() f, err := os.CreateTemp(t.TempDir(), "file") if err != nil { t.Fatalf("failed to create temporary file: %v", err) } defer f.Close() _, err = f.WriteString(content) if err != nil { t.Fatalf("failed to write to temporary file: %v", err) } return f.Name() } func recordLogs(t *testing.T) (logr.Logger, ktesting.Buffer) { log := ktesting.NewLogger(t, ktesting.NewConfig(ktesting.BufferLogs(true))) testingLogger, ok := log.GetSink().(ktesting.Underlier) require.True(t, ok) return log, testingLogger.GetBuffer() } func discardLogs() logr.Logger { return logr.Discard() } // Shortcut for ParseConfig. func withConfig(s string) Config { cfg, err := ParseConfig([]byte(s)) if err != nil { panic(err) } return cfg } func withCmdLineFlags(flags ...string) AgentCmdFlags { parsed := withoutCmdLineFlags() agentCmd := &cobra.Command{} InitAgentCmdFlags(agentCmd, &parsed) err := agentCmd.ParseFlags(flags) if err != nil { panic(err) } return parsed } func withoutCmdLineFlags() AgentCmdFlags { return AgentCmdFlags{} } const fakeKubeconfig = ` apiVersion: v1 clusters: - cluster: certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCVENDQWUyZ0F3SUJBZ0lJVGpXZTMvWXhJbXN3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TkRBM01UVXhOREUxTVRSYUZ3MHpOREEzTVRNeE5ESXdNVFJhTUJVeApFekFSQmdOVkJBTVRDbXQxWW1WeWJtVjBaWE13Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXdnZ0VLCkFvSUJBUUMweVhZSmIyT0JRb0NrYXYySWw1NjNRM0t3RFpGSmluNFRFSkJJbWt6MnpJVU56cHIvV09MY01jdjYKVG9IaTl1c1oyL005dktMcnhYRE1FcFNJaTR4c1psZ3BDN2Erb3hqNW80MVdqRy9rdzhmcVc2MTRUV2ZEekRkWQppRkNKOC9PdmpKdFY2elREZ04vUGtWRytKQWJIOTdnVkc5NXRzRHBIazN3Nk12WkdYK3lqdnhXblV1enlpdFIzCkNLNkhYcE82Y0xBVzJva1FWZHYrZEFUSDFrZVpZZHpMOFp0U0txcUo2QWlRTUtEMG1FbXZPWDNBRk4vUUNQdXkKTVdDUXVkQ1RaQ0t1a1gwRzllakd3NGE1RC9CZnVmYmtWd1g3Vmo3OGJjQ0NId3JJMFZNOHVzYnJzcEs5eGtsVwpodjRXOGVaQ21KZWlMajFLVUhSbTdRVlFYVHNoQWdNQkFBR2pXVEJYTUE0R0ExVWREd0VCL3dRRUF3SUNwREFQCkJnTlZIUk1CQWY4RUJUQURBUUgvTUIwR0ExVWREZ1FXQkJTckNJaE44czZpMmRIMEpwQWU3dFdPL2p2clJqQVYKQmdOVkhSRUVEakFNZ2dwcmRXSmxjbTVsZEdWek1BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQ0pQd2x1OFVhRgo5UnIvUG5QSDNtL0w2amhlcE5Kak5vNThFSWlEMWpjc1Y3R04zZUpha0h1b3g1MGRmR2gvMFFMZEwreUluamFtCkw0Y0R6RnVYeDhCL0ZXQlMwdnYvaG5WQ1JadER4bjB1OW92WC9iblNJdHpBOHNKMHA4cU1YeEFmbkxuZDI0TksKNFZXZmFXTThjbitQeUoybnJ3MHo2YmtYYnZZMGxEV2ZRakorOUJxU3IyeUZYZWM4eXljSzZ6aHlXeHJMV1p1OAoyQngrYjJML1JETDg2T3FXSkthRmljNGlWeDBoK2xDYlBIQmNwazhQOVFvSjZodThhdXdiWjZlMkwxbmZSdWFjCjB3Z1F5OEMzNVExMTdla0dOcjZKMUlrRlE5OGorYTNBTVQ2Z05KclZGZEJOOGlMcjlhMDZJQnRBb04wV2s0bysKL2F5akJBc3hONHo5Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K server: https://127.0.0.1:58453 name: fake contexts: - context: cluster: fake user: fake name: fake current-context: fake kind: Config preferences: {} users: - name: fake user: client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURLVENDQWhHZ0F3SUJBZ0lJV1JQVy9Nblo0VnN3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TkRBM01UVXhOREUxTVRSYUZ3MHlOVEEzTVRVeE5ESXdNVFZhTUR3eApIekFkQmdOVkJBb1RGbXQxWW1WaFpHMDZZMngxYzNSbGNpMWhaRzFwYm5NeEdUQVhCZ05WQkFNVEVHdDFZbVZ5CmJtVjBaWE10WVdSdGFXNHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDcGpIRW4KY2w3QlVURlJLdTVUeU54TmxEdWxHYittalNLcHdsd2FGa0ZyYUZPMXU0MVRVOE9FalZhNDlheHp1SHZYNTZpWgpLMEJCbkJ5aFdYeGVKNE1CTzRWdXk2K09zYVBHWUgxcDZIcGpmUTBwVW5QODFndTgzMloyWmRaazhmZkJVb0pjCjI4b25Mbjd0UERVdjhHVk9WbndZRzE4RGFDWFFjVGR3VjFNYVFKZCtsNGpveHQ5S0J6aDhZUUhZanJMdnl4RncKd2dPbTNITk5GQ3J3Zno2Wis2bi95bHliaTA3amNHVi9nMTVHaVl6azJNWW5EbFBYUHVQYzY0MVp0NWdBcGFwSgpUbUdsaW95Ym85bUVtZmRFbnd0aDJDSTZTdkx6eXlveTJidlhEVktNRzhZTzE5N25kRUd6TE95T1lYT1RMYUNkCnhaWVVCdlNadkxSK1pzMGpBZ01CQUFHalZqQlVNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUsKQmdnckJnRUZCUWNEQWpBTUJnTlZIUk1CQWY4RUFqQUFNQjhHQTFVZEl3UVlNQmFBRktzSWlFM3l6cUxaMGZRbQprQjd1MVk3K08rdEdNQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUJBUUExeXpDdE55Rmp6SHlNZ0FFTVpXalR4OWxWClk2MHRpeTFvYjUvL0thR0MvWmhSbW94NmZ0Sy94dFJDRlptRVYxZ1ZzaXNLc0g2L0YwTEZHRys4V0lrNzVoZXkKVGtoRXUvRVpBdEpRMUNoSmFWMTg4QzNvMmtmSkZOOFlVRlRyS0k3K1NNb0RCTmJJU0VPV3FsZFRiVDdWdkVzNQpsWTRKcS9rU2xnNnNZcWNCRDYzY2pFOHpKU3Y4aDUra3J0d2JVRW90Y0ptN0IvNnpMZksxNWQ5WXBEb0F1anl0CjlVcTVROEhaSGRqWlZ1OWgvNmYvbVMvZkRyek9weDhNOTdPblU1T0MvY2dTNGtUNVhkdVo3SVB3TDJVMkZsTlIKVUdvZ0RndmxDQkFaMDV4WXh4Z2xjNlNYK3JrcURUK3VhWHNtR2dBU21oUjR4OXFkRzA1R2JIdXhoZkJhCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBcVl4eEozSmV3VkV4VVNydVU4amNUWlE3cFJtL3BvMGlxY0pjR2haQmEyaFR0YnVOClUxUERoSTFXdVBXc2M3aDcxK2VvbVN0QVFad2NvVmw4WGllREFUdUZic3V2anJHanhtQjlhZWg2WTMwTktWSnoKL05ZTHZOOW1kbVhXWlBIM3dWS0NYTnZLSnk1KzdUdzFML0JsVGxaOEdCdGZBMmdsMEhFM2NGZFRHa0NYZnBlSQo2TWJmU2djNGZHRUIySTZ5NzhzUmNNSURwdHh6VFJRcThIOCttZnVwLzhwY200dE80M0JsZjROZVJvbU01TmpHCkp3NVQxejdqM091TldiZVlBS1dxU1U1aHBZcU1tNlBaaEpuM1JKOExZZGdpT2tyeTg4c3FNdG03MXcxU2pCdkcKRHRmZTUzUkJzeXpzam1Gemt5MmduY1dXRkFiMG1ieTBmbWJOSXdJREFRQUJBb0lCQUY2dHkzNWdzcU0zYU5mUApwbmpwSUlTOTh6UzJGVHkzY1pUa3NUUHNHNm9UL3pMcndmYTNQdVpsV3ZrOFQ0bnJpbFM5eTN1RkdJUEszbjRICmo1aXdiY3FoWjFqQXE0OStpVnM5Qkt2QW81K3M5RTJQK3E5RkJCYjdsYWNtSlR3SGx2ZkEwSVYwUXdYd1EvYk0KZVZNRTVqMkJ0Qmh1S0hlcGovdy9UTnNTR0pqK2NlNmN2aXVVb2NXWGsxWDl2c1RDaUdtMVdnVkZGQVphVGpMTgpDcEU1dHFpdnpvbEZVbXZIbmVYNTZTOEdFWk01NFA5MFk1enJ3NHBGa0Vud1VMRlBLa1U0cUU0eWVPNVFsWUhCClQ0NklIOVNPcUU5T0pLL3JCSGVzQU45TWNrMTdKblF6Sy95bXh6eHhhcGdPMnk0bVBTcjJaaGk0SENMRHRQV2QKc0ZtRzc2RUNnWUVBeHhQTTJYVFV2bXV5ckZmUVgxblJTSW9jMGhxZFY0MnFaRFlkMzZWVWc1UUVMM0Y4S01aUwptSkNsWlJXYW9IY0NFVUdXakFTWEJaMW9hOHlOMVhSNURTV3ZJMmV5TjE1dnh3NFg1SjV5QzUvY0F4ZW00dUk3CnkzM0VWWktXZXpFQTVVeUFtNlF6ei9lR1R6QkZyNUlxYkJDUitTUldudHRXUHdJTUhkK0VoeEVDZ1lFQTJnY3QKT2h1U0xJeDZZbTFTRHVVT0pSdmtFZFlCazJPQWxRbk5kOVJoaWIxdVlVbjhPTkhYdHBsY2FHZEl3bFdkaEJlcwo4M1F4dXA4MEFydEFtM2FHMXZ6RlZ6Q05KeHA4ZGFxWlFsZk94YlJReUQ0cjdtT2Z5aENFY2VibHAxMkZKRTBQCmNhOFl2TkFuTTdkbnlTSFd0aUo2THFQWDVuMXlRSC9JY1NIaEdQTUNnWUVBa0ZDZFBzSy8rcTZ1SHR1bDFZbVIKK3FrTWpZNzNvdUd5dE9TNk1VZDBCZEtHV2pKRmxIVjRxTnFxMjZXV3ExNjZZL0lOQmNIS0RTcjM2TFduMkNhUQpIbVRFR3NGd1kwMFZjTktacFlUckhkd3NMUjIzUUdCS2dwRFFoRXc0eEdOWXgrRDJsbDJwcGNoRldDQ2hVODU4CjdFdnkxZzV1c01oR05IVHlmYkZzTEZFQ2dZRUF6QXJOVzhVenZuZFZqY25MY3Q4UXBzLzhXR2pVbnJBUFJPdWcKbTlWcDF2TXVXdVJYcElGV0JMQnYxOUZaT1czUWRTK0hEMndkb2c2ZUtUUS9HWDhLWUNhOU5JVGVoTXIzMFZMdwpEVE9KOG1KMiszK2JzNFVPcEpkaXJBb3Z3THI0QUdvUjJ3M0g4K1JGMjlOMzBMYlhieXJDOStVa0I3UTgrWG5kCkIydHljdHNDZ1lCZkxqUTNRUnpQN1Z5Y1VGNkFTYUNYVTJkcE5lckVUbGFpdldIb1FFWVo3NHEyMkFTeFcrMlEKWmtZTEM1RVNGMnZwUU5kZUZhZlRyRm9zR3pLQ1dwYXBUL2QwUC9qaG83TEF1TTJQZEcxSXFoNElRU3FUM3VqNwp4Sm9WUzhIbEg1Ri9sQzZzczZQSm1GWlpsanhFL1FVTDlucDNLYTVCRjFXdXZiZVp0Q2I5Mnc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= ` func Test_ValidateAndCombineConfig_NGTS(t *testing.T) { t.Run("ngts: valid configuration with all required flags", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") privKeyPath := withFile(t, fakePrivKeyPEM) got, cl, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` period: 1h cluster_name: test-cluster cluster_description: Test NGTS cluster `)), withCmdLineFlags("--ngts", "--tsg-id", "test-tsg-123", "--client-id", "test-client-id", "--private-key-path", privKeyPath)) require.NoError(t, err) assert.Equal(t, NGTS, got.OutputMode) assert.Equal(t, "test-tsg-123", got.TSGID) assert.Equal(t, "test-cluster", got.ClusterName) assert.Equal(t, "Test NGTS cluster", got.ClusterDescription) assert.Equal(t, false, got.ClaimableCerts) assert.IsType(t, &client.NGTSClient{}, cl) }) t.Run("ngts: claimable_certs flows from config into CombinedConfig", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") privKeyPath := withFile(t, fakePrivKeyPEM) got, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` period: 1h cluster_name: test-cluster claimable_certs: true `)), withCmdLineFlags("--ngts", "--tsg-id", "test-tsg-123", "--client-id", "test-client-id", "--private-key-path", privKeyPath)) require.NoError(t, err) assert.Equal(t, true, got.ClaimableCerts) }) t.Run("ngts: valid configuration with custom server URL", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") privKeyPath := withFile(t, fakePrivKeyPEM) got, cl, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` period: 1h cluster_name: test-cluster `)), withCmdLineFlags("--ngts", "--tsg-id", "test-tsg-123", "--client-id", "test-client-id", "--private-key-path", privKeyPath, "--ngts-server-url", "https://ngts.test.example.com")) require.NoError(t, err) assert.Equal(t, NGTS, got.OutputMode) assert.Equal(t, "https://ngts.test.example.com", got.NGTSServerURL) assert.IsType(t, &client.NGTSClient{}, cl) }) t.Run("ngts: missing --ngts flag should not trigger NGTS mode", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") privKeyPath := withFile(t, fakePrivKeyPEM) _, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` period: 1h cluster_name: test-cluster `)), withCmdLineFlags("--tsg-id", "test-tsg-123", "--client-id", "test-client-id", "--private-key-path", privKeyPath)) // Should select VenafiCloudKeypair mode instead when --ngts is not specified require.Error(t, err) assert.Contains(t, err.Error(), "venafi-cloud.upload_path") }) t.Run("ngts: missing --tsg-id should error", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") privKeyPath := withFile(t, fakePrivKeyPEM) _, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` period: 1h cluster_name: test-cluster `)), withCmdLineFlags("--ngts", "--client-id", "test-client-id", "--private-key-path", privKeyPath)) require.Error(t, err) assert.Contains(t, err.Error(), "--tsg-id is required when using --ngts") }) t.Run("ngts: missing --client-id should error", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") privKeyPath := withFile(t, fakePrivKeyPEM) _, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` period: 1h cluster_name: test-cluster `)), withCmdLineFlags("--ngts", "--tsg-id", "test-tsg-123", "--private-key-path", privKeyPath)) require.Error(t, err) assert.Contains(t, err.Error(), "client_id cannot be empty") }) t.Run("ngts: missing --private-key-path should error", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") _, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` period: 1h cluster_name: test-cluster `)), withCmdLineFlags("--ngts", "--tsg-id", "test-tsg-123", "--client-id", "test-client-id")) require.Error(t, err) assert.Contains(t, err.Error(), "--private-key-path is required when using --ngts") }) t.Run("ngts: missing cluster_name should error", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") privKeyPath := withFile(t, fakePrivKeyPEM) _, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` period: 1h `)), withCmdLineFlags("--ngts", "--tsg-id", "test-tsg-123", "--client-id", "test-client-id", "--private-key-path", privKeyPath)) require.Error(t, err) assert.Contains(t, err.Error(), "cluster_name is required") }) t.Run("ngts: cannot be used with --machine-hub", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") privKeyPath := withFile(t, fakePrivKeyPEM) _, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` period: 1h cluster_name: test-cluster `)), withCmdLineFlags("--ngts", "--machine-hub", "--tsg-id", "test-tsg-123", "--client-id", "test-client-id", "--private-key-path", privKeyPath)) require.Error(t, err) assert.Contains(t, err.Error(), "--machine-hub cannot be used with --ngts") }) t.Run("ngts: cannot be used with --venafi-connection", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") privKeyPath := withFile(t, fakePrivKeyPEM) _, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` period: 1h cluster_name: test-cluster `)), withCmdLineFlags("--ngts", "--venafi-connection", "my-conn", "--tsg-id", "test-tsg-123", "--client-id", "test-client-id", "--private-key-path", privKeyPath)) require.Error(t, err) assert.Contains(t, err.Error(), "--venafi-connection cannot be used with --ngts") }) t.Run("ngts: cannot be used with --venafi-cloud", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") privKeyPath := withFile(t, fakePrivKeyPEM) _, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` period: 1h cluster_name: test-cluster `)), withCmdLineFlags("--ngts", "--venafi-cloud", "--tsg-id", "test-tsg-123", "--client-id", "test-client-id", "--private-key-path", privKeyPath)) require.Error(t, err) assert.Contains(t, err.Error(), "--venafi-cloud cannot be used with --ngts") }) t.Run("ngts: cannot be used with --api-token", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") privKeyPath := withFile(t, fakePrivKeyPEM) _, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` period: 1h cluster_name: test-cluster `)), withCmdLineFlags("--ngts", "--api-token", "test-token", "--tsg-id", "test-tsg-123", "--client-id", "test-client-id", "--private-key-path", privKeyPath)) require.Error(t, err) assert.Contains(t, err.Error(), "--api-token cannot be used with --ngts") }) t.Run("ngts: organization_id in config should error", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") privKeyPath := withFile(t, fakePrivKeyPEM) _, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` period: 1h cluster_name: test-cluster organization_id: my-org `)), withCmdLineFlags("--ngts", "--tsg-id", "test-tsg-123", "--client-id", "test-client-id", "--private-key-path", privKeyPath)) require.Error(t, err) assert.Contains(t, err.Error(), "organization_id in config file is not supported in NGTS mode") }) t.Run("ngts: cluster_id in config should error", func(t *testing.T) { t.Setenv("POD_NAMESPACE", "venafi") privKeyPath := withFile(t, fakePrivKeyPEM) _, _, err := ValidateAndCombineConfig(discardLogs(), withConfig(testutil.Undent(` period: 1h cluster_name: test-cluster cluster_id: my-cluster-id `)), withCmdLineFlags("--ngts", "--tsg-id", "test-tsg-123", "--client-id", "test-client-id", "--private-key-path", privKeyPath)) require.Error(t, err) assert.Contains(t, err.Error(), "cluster_id in config file is not supported in NGTS mode") }) } const fakePrivKeyPEM = `-----BEGIN PRIVATE KEY----- MHcCAQEEIFptpPXOvEWDrYkiMhyEH1+FB1GwtwX2tyXH4KtBO6g7oAoGCCqGSM49 AwEHoUQDQgAE/BsIwagYc4YUjSSFyqcStj2qliAkdVGlMoJbMuXupzQ9Qs4TX5Pl dFjz6J/j6Gu4fLPqXmM61Hj6kiuRHx5eHQ== -----END PRIVATE KEY----- ` ================================================ FILE: pkg/agent/dummy_data_gatherer.go ================================================ package agent import ( "context" "fmt" "github.com/jetstack/preflight/pkg/datagatherer" ) type dummyConfig struct { AlwaysFail bool `yaml:"always-fail"` FailedAttempts int `yaml:"failed-attempts"` wantOnCreationErr bool } func (c *dummyConfig) NewDataGatherer(ctx context.Context) (datagatherer.DataGatherer, error) { if c.wantOnCreationErr { return nil, fmt.Errorf("an error") } return &dummyDataGatherer{ AlwaysFail: c.AlwaysFail, FailedAttempts: c.FailedAttempts, }, nil } type dummyDataGatherer struct { AlwaysFail bool attemptNumber int FailedAttempts int } func (g *dummyDataGatherer) Run(ctx context.Context) error { // no async functionality, see Fetch return nil } func (g *dummyDataGatherer) WaitForCacheSync(ctx context.Context) error { // no async functionality, see Fetch return nil } func (c *dummyDataGatherer) Fetch(ctx context.Context) (any, int, error) { var err error if c.attemptNumber < c.FailedAttempts { err = fmt.Errorf("First %d attempts will fail", c.FailedAttempts) } if c.AlwaysFail { err = fmt.Errorf("This data gatherer will always fail") } c.attemptNumber++ return nil, -1, err } ================================================ FILE: pkg/agent/metrics.go ================================================ package agent import ( "github.com/prometheus/client_golang/prometheus" ) var ( metricPayloadSize = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "jscp", Subsystem: "agent", Name: "data_readings_upload_size", Help: "Data readings upload size (in bytes) sent by the jscp in-cluster agent.", }, []string{"organization", "cluster"}) ) ================================================ FILE: pkg/agent/run.go ================================================ package agent import ( "context" "encoding/json" "errors" "fmt" "io" "net" "net/http" "net/http/pprof" "os" "strings" "time" "github.com/cenkalti/backoff/v5" "github.com/go-logr/logr" "github.com/hashicorp/go-multierror" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/spf13/cobra" "golang.org/x/sync/errgroup" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" clientgocorev1 "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/manager" "github.com/jetstack/preflight/api" "github.com/jetstack/preflight/internal/envelope" "github.com/jetstack/preflight/internal/envelope/keyfetch" "github.com/jetstack/preflight/internal/envelope/rsa" "github.com/jetstack/preflight/pkg/client" "github.com/jetstack/preflight/pkg/datagatherer" "github.com/jetstack/preflight/pkg/datagatherer/k8sdynamic" "github.com/jetstack/preflight/pkg/kubeconfig" "github.com/jetstack/preflight/pkg/logs" "github.com/jetstack/preflight/pkg/version" ) var Flags AgentCmdFlags // schema version of the data sent by the agent. // The new default version is v2. // In v2 the agent posts data readings using api.gathereredResources // Any requests without a schema version set will be interpreted // as using v1 by the backend. In v1 the agent sends // raw resource data of unstructuredList const schemaVersion string = "v2.0.0" // Run starts the agent process func Run(cmd *cobra.Command, args []string) (returnErr error) { baseCtx, cancel := context.WithCancel(cmd.Context()) defer cancel() log := klog.FromContext(baseCtx).WithName("Run") log.Info("Starting", "version", version.PreflightVersion, "commit", version.Commit) file, err := os.Open(Flags.ConfigFilePath) if err != nil { return fmt.Errorf("Failed to load config file for agent from: %s", Flags.ConfigFilePath) } defer file.Close() b, err := io.ReadAll(file) if err != nil { return fmt.Errorf("Failed to read config file: %s", err) } cfg, err := ParseConfig(b) if err != nil { return fmt.Errorf("Failed to parse config file: %s", err) } config, preflightClient, err := ValidateAndCombineConfig(log, cfg, Flags) if err != nil { return fmt.Errorf("While evaluating configuration: %v", err) } group, gctx := errgroup.WithContext(baseCtx) defer func() { cancel() if groupErr := group.Wait(); groupErr != nil { returnErr = multierror.Append( returnErr, fmt.Errorf("failed to wait for controller-runtime component to stop: %v", groupErr), ) } }() { server := http.NewServeMux() const serverAddress = ":8081" log := log.WithName("APIServer").WithValues("addr", serverAddress) if Flags.Profiling { log.Info("Profiling endpoints enabled", "path", "/debug/pprof") server.HandleFunc("/debug/pprof/", pprof.Index) server.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) server.HandleFunc("/debug/pprof/profile", pprof.Profile) server.HandleFunc("/debug/pprof/symbol", pprof.Symbol) server.HandleFunc("/debug/pprof/trace", pprof.Trace) } if Flags.Prometheus { log.Info("Metrics endpoints enabled", "path", "/metrics") prometheus.MustRegister(metricPayloadSize) server.Handle("/metrics", promhttp.Handler()) } // Health check endpoint. Since we haven't figured a good way of knowning // what "ready" means for the agent, we just return 200 OK unconditionally. // The goal is to satisfy some Kubernetes distributions, like OpenShift, // that require a liveness and health probe to be present for each pod. log.Info("Healthz endpoints enabled", "path", "/healthz") server.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) }) log.Info("Readyz endpoints enabled", "path", "/readyz") server.HandleFunc("/readyz", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) }) group.Go(func() error { listenCtx := klog.NewContext(gctx, log) err := listenAndServe( listenCtx, &http.Server{ Addr: serverAddress, Handler: server, BaseContext: func(_ net.Listener) context.Context { return listenCtx }, }, ) if err != nil { return fmt.Errorf("APIServer: %s", err) } return nil }) } _, isVenConn := preflightClient.(*client.VenConnClient) if isVenConn { group.Go(func() error { err := preflightClient.(manager.Runnable).Start(gctx) if err != nil { return fmt.Errorf("failed to start a controller-runtime component: %v", err) } // The agent must stop if the controller-runtime component stops. cancel() return nil }) } // To help users notice issues with the agent, we show the error messages in // the agent pod's events. eventf, err := newEventf(log) if err != nil { return fmt.Errorf("failed to create event recorder: %v", err) } // Check if secret encryption is enabled via environment variable // When enabled, secret data will be kept for encryption instead of being redacted encryptSecrets := strings.ToLower(os.Getenv("ARK_SEND_SECRET_VALUES")) == "true" var encryptor envelope.Encryptor if encryptSecrets { encryptor, err = loadEncryptor(gctx, preflightClient) if err != nil { log.Error(err, "Failed to set up encryptor for secrets, secret data will not be sent") encryptSecrets = false } } dataGatherers := map[string]datagatherer.DataGatherer{} // load datagatherer config and boot each one for _, dgConfig := range config.DataGatherers { kind := dgConfig.Kind if dgConfig.DataPath != "" { kind = "local" return fmt.Errorf("running data gatherer %s of type %s as Local, data-path override present: %s", dgConfig.Name, dgConfig.Kind, dgConfig.DataPath) } newDg, err := dgConfig.Config.NewDataGatherer(gctx) if err != nil { return fmt.Errorf("failed to instantiate %q data gatherer %q: %v", kind, dgConfig.Name, err) } dynDg, isDynamicGatherer := newDg.(*k8sdynamic.DataGathererDynamic) if isDynamicGatherer { dynDg.ExcludeAnnotKeys = config.ExcludeAnnotationKeysRegex dynDg.ExcludeLabelKeys = config.ExcludeLabelKeysRegex gvr := dynDg.GVR() if encryptSecrets && gvr.Resource == "secrets" && gvr.Group == "" { log.Info("Secret encryption enabled for datagatherer") dynDg.Encryptor = encryptor } } log.V(logs.Debug).Info("Starting DataGatherer", "name", dgConfig.Name) // start the data gatherers and wait for the cache sync group.Go(func() error { // Most implementations of `DataGatherer.Run` return immediately. // Only the Dynamic DataGatherer starts an informer which runs and // blocks until the supplied channel is closed. // For this reason, we must allow these errgroup Go routines to exit // without cancelling the other Go routines in the group. if err := newDg.Run(gctx); err != nil { return fmt.Errorf("failed to start %q data gatherer %q: %v", kind, dgConfig.Name, err) } return nil }) // regardless of success, this dataGatherers has been given a // chance to sync its cache and we will now continue as normal. We // assume at the informers will either recover or the log messages // above will help operators correct the issue. dataGatherers[dgConfig.Name] = newDg } // Wait for 5 seconds for all informers to sync. If they fail to sync // we continue (as we have no way to know if they will recover or not). // // bootCtx is a context with a timeout to allow the informer 5 // seconds to perform an initial sync. It may fail, and that's fine // too, it will backoff and retry of its own accord. Initial boot // will only be delayed by a max of 5 seconds. bootCtx, bootCancel := context.WithTimeout(gctx, 5*time.Second) defer bootCancel() var timedoutDGs []string for _, dgConfig := range config.DataGatherers { dg := dataGatherers[dgConfig.Name] // wait for the informer to complete an initial sync, we do this to // attempt to have an initial set of data for the first upload of // the run. if err := dg.WaitForCacheSync(bootCtx); err != nil { // log sync failure, this might recover in future if errors.Is(err, k8sdynamic.ErrCacheSyncTimeout) { timedoutDGs = append(timedoutDGs, dgConfig.Name) } else { log.V(logs.Info).Info("Failed to sync cache for datagatherer", "kind", dgConfig.Kind, "name", dgConfig.Name, "error", err) } } } if len(timedoutDGs) > 0 { log.V(logs.Info).Info("Skipping datagatherers for CRDs that can't be found in Kubernetes", "datagatherers", timedoutDGs) } // begin the datagathering loop, periodically sending data to the // configured output using data in datagatherer caches or refreshing from // APIs each cycle depending on datagatherer implementation. // If any of the go routines exit (with nil or error) the main context will // be cancelled, which will cause this blocking loop to exit // instead of waiting for the time period. for { if err := gatherAndOutputData(gctx, eventf, config, preflightClient, dataGatherers); err != nil { return err } if config.OneShot { break } select { case <-gctx.Done(): return nil case <-time.After(config.Period): } } return nil } // loadEncryptor sets up an encryptor for encrypting secrets. For now, it just loads a hardcoded public key func loadEncryptor(ctx context.Context, preflightClient client.Client) (envelope.Encryptor, error) { cyberarkClient, ok := preflightClient.(*client.CyberArkClient) if !ok { return nil, fmt.Errorf("secret encryption is only supported for CyberArk clients") } cfg, err := cyberarkClient.Config() if err != nil { return nil, fmt.Errorf("failed to get CyberArk client config: %w", err) } fetcher, err := keyfetch.NewClient(ctx, cyberarkClient.DiscoveryClient(), cfg, nil) if err != nil { return nil, fmt.Errorf("failed to create key fetcher for secret encryption: %w", err) } encryptor, err := rsa.NewEncryptor(fetcher) if err != nil { return nil, fmt.Errorf("failed to create encryptor for secret encryption: %w", err) } return encryptor, nil } // Creates an event recorder for the agent's Pod object. Expects the env var // POD_NAME to contain the pod name. Note that the RBAC rule allowing sending // events is attached to the pod's service account, not the impersonated service // account (venafi-connection). func newEventf(log logr.Logger) (Eventf, error) { podName := os.Getenv("POD_NAME") podNode := os.Getenv("POD_NODE") podUID := os.Getenv("POD_UID") podNamespace := os.Getenv("POD_NAMESPACE") if podName == "" || podNode == "" || podUID == "" || podNamespace == "" { log.Info( "Pod event recorder disabled", "reason", "The agent does not appear to be running in a Kubernetes cluster.", "detail", "When running in a Kubernetes cluster the following environment variables must be set: POD_NAME, POD_NODE, POD_UID, POD_NAMESPACE", ) return func(eventType, reason, msg string, args ...any) {}, nil } restcfg, err := kubeconfig.LoadRESTConfig("") if err != nil { return nil, fmt.Errorf("failed to load kubeconfig: %v", err) } scheme := runtime.NewScheme() _ = corev1.AddToScheme(scheme) var eventf Eventf eventClient, err := kubernetes.NewForConfig(restcfg) if err != nil { return eventf, fmt.Errorf("failed to create event client: %v", err) } broadcaster := record.NewBroadcaster() broadcaster.StartRecordingToSink(&clientgocorev1.EventSinkImpl{Interface: eventClient.CoreV1().Events(podNamespace)}) eventRec := broadcaster.NewRecorder(scheme, corev1.EventSource{Component: "venafi-kubernetes-agent", Host: podNode}) eventf = func(eventType, reason, msg string, args ...any) { eventRec.Eventf(&corev1.Pod{ObjectMeta: v1.ObjectMeta{Name: podName, Namespace: podNamespace, UID: types.UID(podUID)}}, eventType, reason, msg, args...) } return eventf, nil } // Like Printf but for sending events to the agent's Pod object. type Eventf func(eventType, reason, msg string, args ...any) func gatherAndOutputData(ctx context.Context, eventf Eventf, config CombinedConfig, preflightClient client.Client, dataGatherers map[string]datagatherer.DataGatherer) error { log := klog.FromContext(ctx).WithName("gatherAndOutputData") var readings []*api.DataReading if config.InputPath != "" { log.V(logs.Debug).Info("Reading data from local file", "inputPath", config.InputPath) data, err := os.ReadFile(config.InputPath) if err != nil { return fmt.Errorf("failed to read local data file: %s", err) } err = json.Unmarshal(data, &readings) if err != nil { return fmt.Errorf("failed to unmarshal local data file: %s", err) } } else { var err error readings, err = gatherData(ctx, config, dataGatherers) if err != nil { return err } } { group, ctx := errgroup.WithContext(ctx) backOff := backoff.NewExponentialBackOff() backOff.InitialInterval = 30 * time.Second backOff.MaxInterval = 3 * time.Minute notificationFunc := backoff.Notify(func(err error, t time.Duration) { eventf("Warning", "PushingErr", "retrying in %v after error: %s", t, err) log.Error(err, "Warning: PushingErr: will retry", "retry_after", t) }) post := func() (any, error) { postCtx, cancel := context.WithTimeout(ctx, config.BackoffMaxTime) defer cancel() return struct{}{}, postData(postCtx, config, preflightClient, readings) } group.Go(func() error { _, err := backoff.Retry(ctx, post, backoff.WithBackOff(backOff), backoff.WithNotify(notificationFunc), backoff.WithMaxElapsedTime(config.BackoffMaxTime)) return err }) groupErr := group.Wait() if groupErr != nil { return fmt.Errorf("got a fatal error from one or more upload actions: %s", groupErr) } } return nil } func gatherData(ctx context.Context, config CombinedConfig, dataGatherers map[string]datagatherer.DataGatherer) ([]*api.DataReading, error) { log := klog.FromContext(ctx).WithName("gatherData") var readings []*api.DataReading var dgError *multierror.Error for k, dg := range dataGatherers { dgData, count, err := dg.Fetch(ctx) if err != nil { dgError = multierror.Append(dgError, fmt.Errorf("error in datagatherer %s: %w", k, err)) continue } { // Not all datagatherers return a count. // If `count == -1` it means that the datagatherer does not support returning a count. log := log if count >= 0 { log = log.WithValues("count", count) } log.V(logs.Debug).Info("Successfully gathered", "name", k) } readings = append(readings, &api.DataReading{ ClusterID: config.ClusterID, DataGatherer: k, Timestamp: api.Time{Time: time.Now()}, Data: dgData, SchemaVersion: schemaVersion, }) } if dgError != nil { dgError.ErrorFormat = func(es []error) string { points := make([]string, len(es)) for i, err := range es { points[i] = fmt.Sprintf("* %s", err) } return fmt.Sprintf( "The following %d data gatherer(s) have failed:\n\t%s", len(es), strings.Join(points, "\n\t")) } } if config.StrictMode && dgError.ErrorOrNil() != nil { return nil, fmt.Errorf("halting datagathering in strict mode due to error: %s", dgError.ErrorOrNil()) } return readings, nil } func postData(ctx context.Context, config CombinedConfig, preflightClient client.Client, readings []*api.DataReading) error { log := klog.FromContext(ctx).WithName("postData") err := preflightClient.PostDataReadingsWithOptions(ctx, readings, client.Options{ ClusterName: config.ClusterName, ClusterDescription: config.ClusterDescription, ClaimableCerts: config.ClaimableCerts, // orgID and clusterID are not required for Venafi Cloud auth OrgID: config.OrganizationID, ClusterID: config.ClusterID, }) if err != nil { return fmt.Errorf("post to server failed: %+v", err) } log.Info("Data sent successfully") return nil } // listenAndServe starts the supplied HTTP server and stops it gracefully when // the supplied context is cancelled. // It returns when the graceful server shutdown is complete or when the server // exits with an error. // If the server fails to start, it returns the server error. // If the server fails to shutdown gracefully, it returns the shutdown error. // The server is given 3 seconds to shutdown gracefully before it is stopped // forcefully. func listenAndServe(ctx context.Context, server *http.Server) error { log := klog.FromContext(ctx).WithName("ListenAndServe") log.V(logs.Debug).Info("Starting") listenCTX, listenCancelCause := context.WithCancelCause(context.WithoutCancel(ctx)) go func() { err := server.ListenAndServe() listenCancelCause(fmt.Errorf("ListenAndServe: %s", err)) }() select { case <-listenCTX.Done(): log.V(logs.Debug).Info("Shutdown skipped", "reason", "Server already stopped") return context.Cause(listenCTX) case <-ctx.Done(): log.V(logs.Debug).Info("Shutting down") } shutdownCTX, shutdownCancel := context.WithTimeout(context.WithoutCancel(ctx), time.Second*3) shutdownErr := server.Shutdown(shutdownCTX) shutdownCancel() if shutdownErr != nil { shutdownErr = fmt.Errorf("Shutdown: %s", shutdownErr) } closeErr := server.Close() if closeErr != nil { closeErr = fmt.Errorf("Close: %s", closeErr) } log.V(logs.Debug).Info("Shutdown complete") return errors.Join(shutdownErr, closeErr) } ================================================ FILE: pkg/client/client.go ================================================ package client import ( "context" "fmt" "strings" "github.com/jetstack/preflight/api" ) type ( // Options is the struct describing additional information pertinent to an agent that isn't a data reading // These fields will then be uploaded together with data readings. Options struct { // Only used with Jetstack Secure. OrgID string // Only used with Jetstack Secure. ClusterID string // Used for Venafi Cloud and MachineHub mode. ClusterName string // Used for Venafi Cloud and MachineHub mode. ClusterDescription string // ClaimableCerts controls whether discovered certs can be claimed by other tenants. // true = certs are left unassigned, available for any tenant to claim. // false (default) = certs are owned by this cluster's tenant. ClaimableCerts bool } // The Client interface describes types that perform requests against the Jetstack Secure backend. Client interface { PostDataReadingsWithOptions(ctx context.Context, readings []*api.DataReading, options Options) error } // The Credentials interface describes methods for credential types to implement for verification. Credentials interface { IsClientSet() (ok bool, why string) Validate() error } ) func fullURL(baseURL, path string) string { base := baseURL for strings.HasSuffix(base, "/") { base = strings.TrimSuffix(base, "/") } for strings.HasPrefix(path, "/") { path = strings.TrimPrefix(path, "/") } return fmt.Sprintf("%s/%s", base, path) } ================================================ FILE: pkg/client/client_api_token.go ================================================ package client import ( "bytes" "context" "encoding/json" "fmt" "io" "net/http" "path/filepath" "time" "k8s.io/client-go/transport" "k8s.io/klog/v2" "github.com/jetstack/preflight/api" "github.com/jetstack/preflight/pkg/version" ) type ( // The APITokenClient type is a Client implementation used to upload data readings to the Jetstack Secure platform // using API tokens as its authentication method. APITokenClient struct { apiToken string baseURL string agentMetadata *api.AgentMetadata client *http.Client } ) // NewAPITokenClient returns a new instance of the APITokenClient type that will perform HTTP requests using // the provided API token for authentication. func NewAPITokenClient(agentMetadata *api.AgentMetadata, apiToken, baseURL string) (*APITokenClient, error) { if baseURL == "" { return nil, fmt.Errorf("cannot create APITokenClient: baseURL cannot be empty") } return &APITokenClient{ apiToken: apiToken, agentMetadata: agentMetadata, baseURL: baseURL, client: &http.Client{ Timeout: time.Minute, Transport: transport.DebugWrappers(http.DefaultTransport), }, }, nil } // PostDataReadingsWithOptions uploads the slice of api.DataReading to the Jetstack Secure backend to be processed for later // viewing in the user-interface. func (c *APITokenClient) PostDataReadingsWithOptions(ctx context.Context, readings []*api.DataReading, opts Options) error { return c.postDataReadings(ctx, opts.OrgID, opts.ClusterID, readings) } // PostDataReadings uploads the slice of api.DataReading to the Jetstack Secure backend to be processed for later // viewing in the user-interface. func (c *APITokenClient) postDataReadings(ctx context.Context, orgID, clusterID string, readings []*api.DataReading) error { payload := api.DataReadingsPost{ AgentMetadata: c.agentMetadata, DataGatherTime: time.Now().UTC(), DataReadings: readings, } data, err := json.Marshal(payload) if err != nil { return err } klog.FromContext(ctx).V(2).Info( "uploading data readings", "url", filepath.Join("/api/v1/org", orgID, "datareadings", clusterID), "cluster_id", clusterID, "data_readings_count", len(readings), "data_size_bytes", len(data), ) res, err := c.post(ctx, filepath.Join("/api/v1/org", orgID, "datareadings", clusterID), bytes.NewBuffer(data)) if err != nil { return err } defer res.Body.Close() if code := res.StatusCode; code < 200 || code >= 300 { errorContent := "" body, err := io.ReadAll(res.Body) if err == nil { errorContent = string(body) } return fmt.Errorf("received response with status code %d. Body: [%s]", code, errorContent) } return nil } // Post performs an HTTP POST request. func (c *APITokenClient) post(ctx context.Context, path string, body io.Reader) (*http.Response, error) { req, err := http.NewRequestWithContext(ctx, http.MethodPost, fullURL(c.baseURL, path), body) if err != nil { return nil, err } req.Header.Set("Content-Type", "application/json") req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.apiToken)) version.SetUserAgent(req) return c.client.Do(req) } ================================================ FILE: pkg/client/client_cyberark.go ================================================ package client import ( "context" "crypto/x509" "encoding/base64" "encoding/pem" "fmt" "net/http" "slices" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" "github.com/jetstack/preflight/api" "github.com/jetstack/preflight/internal/cyberark" "github.com/jetstack/preflight/internal/cyberark/dataupload" "github.com/jetstack/preflight/internal/cyberark/servicediscovery" "github.com/jetstack/preflight/pkg/logs" "github.com/jetstack/preflight/pkg/version" ) // CyberArkClient is a client for publishing data readings to CyberArk's discoverycontext API. type CyberArkClient struct { configLoader cyberark.ClientConfigLoader httpClient *http.Client discoveryClient *servicediscovery.Client } var _ Client = &CyberArkClient{} // NewCyberArk initializes a CyberArk client using configuration from environment variables. // It requires an HTTP client to be provided, which will be used for making requests. // The environment variables ARK_SUBDOMAIN, ARK_USERNAME, and ARK_SECRET must be set for authentication. // Sending secrets is controlled by the ARK_SEND_SECRETS environment variable (defaults to "false"). // If sending secrets is enabled, the hardcoded public key will be loaded and an encryptor will be created. // If the configuration is invalid or missing, an error is returned. func NewCyberArk(httpClient *http.Client) (*CyberArkClient, error) { configLoader := cyberark.LoadClientConfigFromEnvironment cfg, err := configLoader() if err != nil { return nil, err } return &CyberArkClient{ configLoader: configLoader, httpClient: httpClient, discoveryClient: servicediscovery.New(httpClient, cfg.Subdomain), }, nil } // PostDataReadingsWithOptions uploads data readings to CyberArk. // It converts the supplied data readings into a snapshot format expected by CyberArk. // Deleted resources are excluded from the snapshot because they are not needed by CyberArk. // It then minimizes the snapshot to avoid uploading unnecessary data. // It initializes a data upload client with the configured HTTP client and credentials, // then uploads a snapshot. // The supplied Options are not used by this publisher. func (o *CyberArkClient) PostDataReadingsWithOptions(ctx context.Context, readings []*api.DataReading, opts Options) error { log := klog.FromContext(ctx) cfg, err := o.configLoader() if err != nil { return fmt.Errorf("failed to load config: %w", err) } serviceMap, tenantUUID, err := o.discoveryClient.DiscoverServices(ctx) if err != nil { return err } snapshot := baseSnapshotFromOptions(opts) if err := convertDataReadings(defaultExtractorFunctions, readings, &snapshot); err != nil { return fmt.Errorf("while converting data readings: %s", err) } // Minimize the snapshot to reduce size and improve privacy minimizeSnapshot(log.V(logs.Debug), &snapshot) datauploadClient, err := cyberark.NewDatauploadClient(ctx, o.httpClient, serviceMap, tenantUUID, cfg) if err != nil { return fmt.Errorf("while initializing data upload client: %s", err) } err = datauploadClient.PutSnapshot(ctx, snapshot) if err != nil { return fmt.Errorf("while uploading snapshot: %s", err) } return nil } func (o *CyberArkClient) DiscoveryClient() *servicediscovery.Client { return o.discoveryClient } func (o *CyberArkClient) Config() (cyberark.ClientConfig, error) { return o.configLoader() } // baseSnapshotFromOptions creates a base snapshot with common fields from the provided options. // This includes the cluster name, description, and agent version. // Other fields like ClusterID and K8SVersion need to be populated separately. func baseSnapshotFromOptions(opts Options) dataupload.Snapshot { return dataupload.Snapshot{ ClusterName: opts.ClusterName, ClusterDescription: opts.ClusterDescription, AgentVersion: version.PreflightVersion, } } // extractOIDCFromReading converts the opaque data from a OIDCDiscoveryData // data reading to allow access to the OIDC fields within. func extractOIDCFromReading(reading *api.DataReading, target *dataupload.Snapshot) error { if reading == nil { return fmt.Errorf("programmer mistake: the DataReading must not be nil") } data, ok := reading.Data.(*api.OIDCDiscoveryData) if !ok { return fmt.Errorf( "programmer mistake: the DataReading must have data type *api.OIDCDiscoveryData. "+ "This DataReading (%s) has data type %T", reading.DataGatherer, reading.Data) } target.OIDCConfig = data.OIDCConfig target.OIDCConfigError = data.OIDCConfigError target.JWKS = data.JWKS target.JWKSError = data.JWKSError return nil } // extractClusterIDAndServerVersionFromReading converts the opaque data from a DiscoveryData // data reading to allow access to the Kubernetes version fields within. func extractClusterIDAndServerVersionFromReading(reading *api.DataReading, target *dataupload.Snapshot) error { if reading == nil { return fmt.Errorf("programmer mistake: the DataReading must not be nil") } data, ok := reading.Data.(*api.DiscoveryData) if !ok { return fmt.Errorf( "programmer mistake: the DataReading must have data type *api.DiscoveryData. "+ "This DataReading (%s) has data type %T", reading.DataGatherer, reading.Data) } target.ClusterID = data.ClusterID if data.ServerVersion != nil { target.K8SVersion = data.ServerVersion.GitVersion } return nil } // extractResourceListFromReading converts the opaque data from a DynamicData // data reading to runtime.Object resources, to allow access to the metadata and // other kubernetes API fields. // Deleted resources are skipped because the CyberArk Discovery and Context service // does not need to see resources that no longer exist. func extractResourceListFromReading(reading *api.DataReading, target *[]runtime.Object) error { if reading == nil { return fmt.Errorf("programmer mistake: the DataReading must not be nil") } data, ok := reading.Data.(*api.DynamicData) if !ok { return fmt.Errorf( "programmer mistake: the DataReading must have data type *api.DynamicData. "+ "This DataReading (%s) has data type %T", reading.DataGatherer, reading.Data) } resources := make([]runtime.Object, 0, len(data.Items)) for i, item := range data.Items { if !item.DeletedAt.IsZero() { continue } if resource, ok := item.Resource.(runtime.Object); ok { resources = append(resources, resource) } else { return fmt.Errorf( "programmer mistake: the DynamicData items must have Resource type runtime.Object. "+ "This item (%d) has Resource type %T", i, item.Resource) } } *target = resources return nil } // defaultExtractorFunctions maps data gatherer names to functions that extract // their data from DataReadings into the appropriate fields of a Snapshot. // Each function takes a DataReading and a pointer to a Snapshot, // and populates the relevant field(s) of the Snapshot based on the DataReading's data. // Deleted resources are excluded from the snapshot because they are not needed by CyberArk. var defaultExtractorFunctions = map[string]func(*api.DataReading, *dataupload.Snapshot) error{ "ark/oidc": extractOIDCFromReading, "ark/discovery": extractClusterIDAndServerVersionFromReading, "ark/secrets": func(r *api.DataReading, s *dataupload.Snapshot) error { return extractResourceListFromReading(r, &s.Secrets) }, "ark/serviceaccounts": func(r *api.DataReading, s *dataupload.Snapshot) error { return extractResourceListFromReading(r, &s.ServiceAccounts) }, "ark/roles": func(r *api.DataReading, s *dataupload.Snapshot) error { return extractResourceListFromReading(r, &s.Roles) }, "ark/clusterroles": func(r *api.DataReading, s *dataupload.Snapshot) error { return extractResourceListFromReading(r, &s.ClusterRoles) }, "ark/rolebindings": func(r *api.DataReading, s *dataupload.Snapshot) error { return extractResourceListFromReading(r, &s.RoleBindings) }, "ark/clusterrolebindings": func(r *api.DataReading, s *dataupload.Snapshot) error { return extractResourceListFromReading(r, &s.ClusterRoleBindings) }, "ark/jobs": func(r *api.DataReading, s *dataupload.Snapshot) error { return extractResourceListFromReading(r, &s.Jobs) }, "ark/cronjobs": func(r *api.DataReading, s *dataupload.Snapshot) error { return extractResourceListFromReading(r, &s.CronJobs) }, "ark/deployments": func(r *api.DataReading, s *dataupload.Snapshot) error { return extractResourceListFromReading(r, &s.Deployments) }, "ark/statefulsets": func(r *api.DataReading, s *dataupload.Snapshot) error { return extractResourceListFromReading(r, &s.Statefulsets) }, "ark/daemonsets": func(r *api.DataReading, s *dataupload.Snapshot) error { return extractResourceListFromReading(r, &s.Daemonsets) }, "ark/pods": func(r *api.DataReading, s *dataupload.Snapshot) error { return extractResourceListFromReading(r, &s.Pods) }, "ark/configmaps": func(r *api.DataReading, s *dataupload.Snapshot) error { return extractResourceListFromReading(r, &s.ConfigMaps) }, "ark/esoexternalsecrets": func(r *api.DataReading, s *dataupload.Snapshot) error { return extractResourceListFromReading(r, &s.ExternalSecrets) }, "ark/esosecretstores": func(r *api.DataReading, s *dataupload.Snapshot) error { return extractResourceListFromReading(r, &s.SecretStores) }, "ark/esoclusterexternalsecrets": func(r *api.DataReading, s *dataupload.Snapshot) error { return extractResourceListFromReading(r, &s.ClusterExternalSecrets) }, "ark/esoclustersecretstores": func(r *api.DataReading, s *dataupload.Snapshot) error { return extractResourceListFromReading(r, &s.ClusterSecretStores) }, } // convertDataReadings processes a list of DataReadings using the provided // extractor functions to populate the fields of the target snapshot. // It ensures that all expected data gatherers are handled and that there are // no unhandled data gatherers. If any discrepancies are found, or if any // extractor function returns an error, it returns an error. // The extractorFunctions map should contain functions for each expected // DataGatherer name, which will be called with the corresponding DataReading // and the target snapshot to populate the relevant fields. // Deleted resources are excluded from the snapshot because they are not needed by CyberArk. func convertDataReadings( extractorFunctions map[string]func(*api.DataReading, *dataupload.Snapshot) error, readings []*api.DataReading, target *dataupload.Snapshot, ) error { expectedDataGatherers := sets.KeySet(extractorFunctions) unhandledDataGatherers := sets.New[string]() missingDataGatherers := expectedDataGatherers.Clone() for _, reading := range readings { dataGathererName := reading.DataGatherer extractFunc, found := extractorFunctions[dataGathererName] if !found { unhandledDataGatherers.Insert(dataGathererName) continue } missingDataGatherers.Delete(dataGathererName) // Call the extractor function to populate the relevant field in the target snapshot. if err := extractFunc(reading, target); err != nil { return fmt.Errorf("while extracting data reading %s: %s", dataGathererName, err) } } if missingDataGatherers.Len() > 0 || unhandledDataGatherers.Len() > 0 { return fmt.Errorf( "unexpected data gatherers, missing: %v, unhandled: %v", sets.List(missingDataGatherers), sets.List(unhandledDataGatherers), ) } return nil } // minimizeSnapshot reduces the size of the snapshot by removing unnecessary data. // // This reduces the bandwidth used when uploading the snapshot to CyberArk, // it reduces the storage used by CyberArk to store the snapshot, and // it provides better privacy for the cluster being scanned; only the necessary // data is included in the snapshot. // // This is a best-effort attempt to minimize the snapshot size. If an error occurs // during analysis of a secret, the error is logged and the secret is kept in the // snapshot (i.e., not excluded). Errors do not prevent the snapshot from being uploaded. // // It performs the following minimization steps: // // 1. Removal of non-clientauth TLS secrets: It filters out TLS secrets that do // not contain a client certificate. This is done to avoid uploading large // TLS secrets that are not relevant for the CyberArk Discovery and Context // service. // // TODO(wallrj): Remove more from the snapshot as we learn more about what // resources the Discovery and Context service require. func minimizeSnapshot(log logr.Logger, snapshot *dataupload.Snapshot) { originalSecretCount := len(snapshot.Secrets) filteredSecrets := make([]runtime.Object, 0, originalSecretCount) for _, secret := range snapshot.Secrets { if isExcludableSecret(log, secret) { continue } filteredSecrets = append(filteredSecrets, secret) } snapshot.Secrets = filteredSecrets log.Info("Minimized snapshot", "originalSecretCount", originalSecretCount, "filteredSecretCount", len(snapshot.Secrets)) } // isExcludableSecret filters out TLS secrets that are definitely of no interest // to CyberArk's Discovery and Context service, specifically TLS secrets that do // not contain a client certificate. // // The Secret is kept if there is any doubt or if there is a problem decoding // its contents. // // Secrets are obtained by a DynamicClient, so they have type // *unstructured.Unstructured. func isExcludableSecret(log logr.Logger, obj runtime.Object) bool { // Fast path: type assertion and kind/type checks unstructuredObj, ok := obj.(*unstructured.Unstructured) if !ok { log.Info("Object is not a Unstructured", "type", fmt.Sprintf("%T", obj)) return false } if unstructuredObj.GetKind() != "Secret" || unstructuredObj.GetAPIVersion() != "v1" { return false } log = log.WithValues("namespace", unstructuredObj.GetNamespace(), "name", unstructuredObj.GetName()) dataMap, found, err := unstructured.NestedMap(unstructuredObj.Object, "data") if err != nil || !found { log.Info("Secret data missing or not a map") return false } secretType, found, err := unstructured.NestedString(unstructuredObj.Object, "type") if err != nil || !found { log.Info("Secret object has no type") return false } if corev1.SecretType(secretType) != corev1.SecretTypeTLS { log.Info("Secrets of this type are never excluded", "type", secretType) return false } return isExcludableTLSSecret(log, dataMap) } // isExcludableTLSSecret checks if a TLS Secret contains a client certificate. // It returns true if the Secret is a TLS Secret and its tls.crt does not // contain a client certificate. func isExcludableTLSSecret(log logr.Logger, dataMap map[string]any) bool { tlsCrtRaw, found := dataMap[corev1.TLSCertKey] if !found { log.Info("TLS Secret does not contain tls.crt key") return true } // Decode base64 if necessary (K8s secrets store data as base64-encoded strings) var tlsCrtBytes []byte switch v := tlsCrtRaw.(type) { case string: decoded, err := base64.StdEncoding.DecodeString(v) if err != nil { log.Info("Failed to decode tls.crt base64", "error", err.Error()) return true } tlsCrtBytes = decoded case []byte: tlsCrtBytes = v default: log.Info("tls.crt is not a string or byte slice", "type", fmt.Sprintf("%T", v)) return true } // Parse PEM certificate chain hasClientCert := searchPEM(tlsCrtBytes, func(block *pem.Block) bool { if block.Type != "CERTIFICATE" || len(block.Bytes) == 0 { return false } cert, err := x509.ParseCertificate(block.Bytes) if err != nil { log.Info("Failed to parse PEM block as X.509 certificate", "error", err.Error()) return false } // Check if the certificate has the ClientAuth EKU return isClientCertificate(cert) }) return !hasClientCert } // searchPEM parses the given PEM data and applies the visitor function to each // PEM block found. If the visitor function returns true for any block, the search // stops and searchPEM returns true. If no blocks cause the visitor to return true, // searchPEM returns false. func searchPEM(data []byte, visitor func(*pem.Block) bool) bool { if visitor == nil { return false } // Parse the PEM encoded certificate chain var block *pem.Block rest := data for { block, rest = pem.Decode(rest) if block == nil { break } if visitor(block) { return true } } return false } // isClientCertificate checks if the given certificate is a client certificate // by checking if it has the ClientAuth EKU. func isClientCertificate(cert *x509.Certificate) bool { if cert == nil { return false } // Skip CA certificates if cert.IsCA { return false } // Check if the certificate has the ClientAuth EKU return slices.Contains(cert.ExtKeyUsage, x509.ExtKeyUsageClientAuth) } ================================================ FILE: pkg/client/client_cyberark_convertdatareadings_test.go ================================================ package client import ( "crypto/ecdsa" "crypto/elliptic" "crypto/rand" "crypto/x509" "crypto/x509/pkix" "encoding/base64" "encoding/pem" "math/big" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/version" "k8s.io/klog/v2/ktesting" "github.com/jetstack/preflight/api" "github.com/jetstack/preflight/internal/cyberark/dataupload" preflightversion "github.com/jetstack/preflight/pkg/version" ) // TestBaseSnapshotFromOptions tests the baseSnapshotFromOptions function. func TestBaseSnapshotFromOptions(t *testing.T) { type testCase struct { name string options Options want dataupload.Snapshot } tests := []testCase{ { name: "ClusterName and ClusterDescription are used, OrgID and ClusterID", options: Options{ OrgID: "unused-org-id", ClusterID: "unused-cluster-id", ClusterName: "some-cluster-name", ClusterDescription: "some-cluster-description", }, want: dataupload.Snapshot{ ClusterName: "some-cluster-name", ClusterDescription: "some-cluster-description", AgentVersion: preflightversion.PreflightVersion, }, }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { got := baseSnapshotFromOptions(tc.options) require.Equal(t, tc.want, got) }) } } // TestExtractServerVersionFromReading tests the extractServerVersionFromReading function. func TestExtractServerVersionFromReading(t *testing.T) { type testCase struct { name string reading *api.DataReading expectedSnapshot dataupload.Snapshot expectError string } tests := []testCase{ { name: "nil reading", expectError: `programmer mistake: the DataReading must not be nil`, }, { name: "nil data", reading: &api.DataReading{ DataGatherer: "ark/discovery", Data: nil, }, expectError: `programmer mistake: the DataReading must have data type *api.DiscoveryData. This DataReading (ark/discovery) has data type `, }, { name: "wrong data type", reading: &api.DataReading{ DataGatherer: "ark/discovery", Data: &api.DynamicData{}, }, expectError: `programmer mistake: the DataReading must have data type *api.DiscoveryData. This DataReading (ark/discovery) has data type *api.DynamicData`, }, { name: "nil server version", reading: &api.DataReading{ DataGatherer: "ark/discovery", Data: &api.DiscoveryData{}, }, expectedSnapshot: dataupload.Snapshot{}, }, { name: "happy path", reading: &api.DataReading{ DataGatherer: "ark/discovery", Data: &api.DiscoveryData{ ClusterID: "success-cluster-id", ServerVersion: &version.Info{ GitVersion: "v1.21.0", }, }, }, expectedSnapshot: dataupload.Snapshot{ ClusterID: "success-cluster-id", K8SVersion: "v1.21.0", }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { var snapshot dataupload.Snapshot err := extractClusterIDAndServerVersionFromReading(test.reading, &snapshot) if test.expectError != "" { assert.EqualError(t, err, test.expectError) assert.Equal(t, dataupload.Snapshot{}, snapshot) return } require.NoError(t, err) assert.Equal(t, test.expectedSnapshot, snapshot) }) } } // TestExtractOIDCFromReading tests the extractOIDCFromReading function. func TestExtractOIDCFromReading(t *testing.T) { type testCase struct { name string reading *api.DataReading expectedSnapshot dataupload.Snapshot expectError string } tests := []testCase{ { name: "nil reading", expectError: `programmer mistake: the DataReading must not be nil`, }, { name: "nil data", reading: &api.DataReading{ DataGatherer: "ark/oidc", Data: nil, }, expectError: `programmer mistake: the DataReading must have data type *api.OIDCDiscoveryData. This DataReading (ark/oidc) has data type `, }, { name: "wrong data type", reading: &api.DataReading{ DataGatherer: "ark/oidc", Data: &api.DiscoveryData{}, }, expectError: `programmer mistake: the DataReading must have data type *api.OIDCDiscoveryData. This DataReading (ark/oidc) has data type *api.DiscoveryData`, }, { name: "happy path", reading: &api.DataReading{ DataGatherer: "ark/oidc", Data: &api.OIDCDiscoveryData{ OIDCConfig: map[string]any{"issuer": "https://example.com"}, OIDCConfigError: "oidc-err", JWKS: map[string]any{"keys": []any{}}, JWKSError: "jwks-err", }, }, expectedSnapshot: dataupload.Snapshot{ OIDCConfig: map[string]any{"issuer": "https://example.com"}, OIDCConfigError: "oidc-err", JWKS: map[string]any{"keys": []any{}}, JWKSError: "jwks-err", }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { var snapshot dataupload.Snapshot err := extractOIDCFromReading(test.reading, &snapshot) if test.expectError != "" { assert.EqualError(t, err, test.expectError) assert.Equal(t, dataupload.Snapshot{}, snapshot) return } require.NoError(t, err) assert.Equal(t, test.expectedSnapshot, snapshot) }) } } // TestExtractResourceListFromReading tests the extractResourceListFromReading function. func TestExtractResourceListFromReading(t *testing.T) { type testCase struct { name string reading *api.DataReading expectedNumItems int expectError string } tests := []testCase{ { name: "nil reading", expectError: `programmer mistake: the DataReading must not be nil`, }, { name: "nil data", reading: &api.DataReading{ DataGatherer: "ark/namespaces", Data: nil, }, expectError: `programmer mistake: the DataReading must have data type *api.DynamicData. ` + `This DataReading (ark/namespaces) has data type `, }, { name: "wrong data type", reading: &api.DataReading{ DataGatherer: "ark/namespaces", Data: &api.DiscoveryData{}, }, expectError: `programmer mistake: the DataReading must have data type *api.DynamicData. ` + `This DataReading (ark/namespaces) has data type *api.DiscoveryData`, }, { name: "nil items", reading: &api.DataReading{ DataGatherer: "ark/namespaces", Data: &api.DynamicData{}, }, expectedNumItems: 0, }, { name: "empty items", reading: &api.DataReading{ DataGatherer: "ark/namespaces", Data: &api.DynamicData{ Items: []*api.GatheredResource{}, }, }, expectedNumItems: 0, }, { name: "wrong item resource type", reading: &api.DataReading{ DataGatherer: "ark/namespaces", Data: &api.DynamicData{ Items: []*api.GatheredResource{ { Resource: &api.DiscoveryData{}, }, }, }, }, expectError: `programmer mistake: the DynamicData items must have Resource type runtime.Object. ` + `This item (0) has Resource type *api.DiscoveryData`, }, { name: "happy path", reading: &api.DataReading{ DataGatherer: "ark/namespaces", Data: &api.DynamicData{ Items: []*api.GatheredResource{ { Resource: &unstructured.Unstructured{ Object: map[string]any{ "kind": "Namespace", "metadata": map[string]any{ "name": "default", "uid": "uid-default", }, }, }, }, { Resource: &unstructured.Unstructured{ Object: map[string]any{ "kind": "Namespace", "metadata": map[string]any{ "name": "kube-system", "uid": "uid-kube-system", }, }, }, }, // Deleted resource should be ignored { DeletedAt: api.Time{Time: time.Now()}, Resource: &unstructured.Unstructured{ Object: map[string]any{ "kind": "Namespace", "metadata": map[string]any{ "name": "kube-system", "uid": "uid-kube-system", }, }, }, }, }, }, }, expectedNumItems: 2, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { var resources []runtime.Object err := extractResourceListFromReading(test.reading, &resources) if test.expectError != "" { assert.EqualError(t, err, test.expectError) assert.Nil(t, resources) return } require.NoError(t, err) require.NotNil(t, resources) assert.Len(t, resources, test.expectedNumItems) }) } } // TestConvertDataReadings_ConfigMaps tests that configmaps are correctly converted. func TestConvertDataReadings_ConfigMaps(t *testing.T) { extractorFunctions := map[string]func(*api.DataReading, *dataupload.Snapshot) error{ "ark/discovery": extractClusterIDAndServerVersionFromReading, "ark/configmaps": func(reading *api.DataReading, snapshot *dataupload.Snapshot) error { return extractResourceListFromReading(reading, &snapshot.ConfigMaps) }, } readings := []*api.DataReading{ { DataGatherer: "ark/discovery", Data: &api.DiscoveryData{ ClusterID: "test-cluster-id", ServerVersion: &version.Info{ GitVersion: "v1.21.0", }, }, }, { DataGatherer: "ark/configmaps", Data: &api.DynamicData{ Items: []*api.GatheredResource{ { Resource: &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "v1", "kind": "ConfigMap", "metadata": map[string]any{ "name": "conjur-connect", "namespace": "conjur", "labels": map[string]any{ "conjur.org/name": "conjur-connect-configmap", }, }, "data": map[string]any{ "config.yaml": "some-config-data", }, }, }, }, { Resource: &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "v1", "kind": "ConfigMap", "metadata": map[string]any{ "name": "another-configmap", "namespace": "default", "labels": map[string]any{ "conjur.org/name": "conjur-connect-configmap", }, }, "data": map[string]any{ "setting": "value", }, }, }, }, // Deleted configmap should be ignored { DeletedAt: api.Time{Time: time.Now()}, Resource: &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "v1", "kind": "ConfigMap", "metadata": map[string]any{ "name": "deleted-configmap", "namespace": "default", }, }, }, }, }, }, }, } var snapshot dataupload.Snapshot err := convertDataReadings(extractorFunctions, readings, &snapshot) require.NoError(t, err) // Verify the snapshot contains the expected data assert.Equal(t, "test-cluster-id", snapshot.ClusterID) assert.Equal(t, "v1.21.0", snapshot.K8SVersion) require.Len(t, snapshot.ConfigMaps, 2, "should have 2 configmaps (deleted one should be excluded)") // Verify the first configmap cm1, ok := snapshot.ConfigMaps[0].(*unstructured.Unstructured) require.True(t, ok, "configmap should be unstructured") assert.Equal(t, "ConfigMap", cm1.GetKind()) assert.Equal(t, "conjur-connect", cm1.GetName()) assert.Equal(t, "conjur", cm1.GetNamespace()) // Verify the second configmap cm2, ok := snapshot.ConfigMaps[1].(*unstructured.Unstructured) require.True(t, ok, "configmap should be unstructured") assert.Equal(t, "ConfigMap", cm2.GetKind()) assert.Equal(t, "another-configmap", cm2.GetName()) assert.Equal(t, "default", cm2.GetNamespace()) } // TestConvertDataReadings_ExternalSecrets tests that externalsecrets are correctly converted. func TestConvertDataReadings_ExternalSecrets(t *testing.T) { extractorFunctions := map[string]func(*api.DataReading, *dataupload.Snapshot) error{ "ark/discovery": extractClusterIDAndServerVersionFromReading, "ark/esoexternalsecrets": func(reading *api.DataReading, snapshot *dataupload.Snapshot) error { return extractResourceListFromReading(reading, &snapshot.ExternalSecrets) }, } readings := []*api.DataReading{ { DataGatherer: "ark/discovery", Data: &api.DiscoveryData{ ClusterID: "test-cluster-id", ServerVersion: &version.Info{ GitVersion: "v1.21.0", }, }, }, { DataGatherer: "ark/esoexternalsecrets", Data: &api.DynamicData{ Items: []*api.GatheredResource{ { Resource: &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "external-secrets.io/v1", "kind": "ExternalSecret", "metadata": map[string]any{ "name": "my-external-secret", "namespace": "default", }, "spec": map[string]any{ "refreshInterval": "1h", "secretStoreRef": map[string]any{ "name": "my-secret-store", "kind": "SecretStore", }, }, }, }, }, { Resource: &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "external-secrets.io/v1", "kind": "ExternalSecret", "metadata": map[string]any{ "name": "another-external-secret", "namespace": "production", }, "spec": map[string]any{ "refreshInterval": "30m", }, }, }, }, // Deleted externalsecret should be ignored { DeletedAt: api.Time{Time: time.Now()}, Resource: &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "external-secrets.io/v1", "kind": "ExternalSecret", "metadata": map[string]any{ "name": "deleted-external-secret", "namespace": "default", }, }, }, }, }, }, }, } var snapshot dataupload.Snapshot err := convertDataReadings(extractorFunctions, readings, &snapshot) require.NoError(t, err) // Verify the snapshot contains the expected data assert.Equal(t, "test-cluster-id", snapshot.ClusterID) assert.Equal(t, "v1.21.0", snapshot.K8SVersion) require.Len(t, snapshot.ExternalSecrets, 2, "should have 2 externalsecrets (deleted one should be excluded)") // Verify the first externalsecret es1, ok := snapshot.ExternalSecrets[0].(*unstructured.Unstructured) require.True(t, ok, "externalsecret should be unstructured") assert.Equal(t, "ExternalSecret", es1.GetKind()) assert.Equal(t, "my-external-secret", es1.GetName()) assert.Equal(t, "default", es1.GetNamespace()) // Verify the second externalsecret es2, ok := snapshot.ExternalSecrets[1].(*unstructured.Unstructured) require.True(t, ok, "externalsecret should be unstructured") assert.Equal(t, "ExternalSecret", es2.GetKind()) assert.Equal(t, "another-external-secret", es2.GetName()) assert.Equal(t, "production", es2.GetNamespace()) } // TestConvertDataReadings_SecretStores tests that secretstores are correctly converted. func TestConvertDataReadings_SecretStores(t *testing.T) { extractorFunctions := map[string]func(*api.DataReading, *dataupload.Snapshot) error{ "ark/discovery": extractClusterIDAndServerVersionFromReading, "ark/esosecretstores": func(reading *api.DataReading, snapshot *dataupload.Snapshot) error { return extractResourceListFromReading(reading, &snapshot.SecretStores) }, } readings := []*api.DataReading{ { DataGatherer: "ark/discovery", Data: &api.DiscoveryData{ ClusterID: "test-cluster-id", ServerVersion: &version.Info{ GitVersion: "v1.21.0", }, }, }, { DataGatherer: "ark/esosecretstores", Data: &api.DynamicData{ Items: []*api.GatheredResource{ { Resource: &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "external-secrets.io/v1", "kind": "SecretStore", "metadata": map[string]any{ "name": "my-secret-store", "namespace": "default", }, "spec": map[string]any{ "provider": map[string]any{ "fake": map[string]any{}, }, }, }, }, }, { Resource: &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "external-secrets.io/v1", "kind": "SecretStore", "metadata": map[string]any{ "name": "aws-secret-store", "namespace": "production", }, "spec": map[string]any{ "provider": map[string]any{ "aws": map[string]any{}, }, }, }, }, }, // Deleted secretstore should be ignored { DeletedAt: api.Time{Time: time.Now()}, Resource: &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "external-secrets.io/v1", "kind": "SecretStore", "metadata": map[string]any{ "name": "deleted-secret-store", "namespace": "default", }, }, }, }, }, }, }, } var snapshot dataupload.Snapshot err := convertDataReadings(extractorFunctions, readings, &snapshot) require.NoError(t, err) // Verify the snapshot contains the expected data assert.Equal(t, "test-cluster-id", snapshot.ClusterID) assert.Equal(t, "v1.21.0", snapshot.K8SVersion) require.Len(t, snapshot.SecretStores, 2, "should have 2 secretstores (deleted one should be excluded)") // Verify the first secretstore ss1, ok := snapshot.SecretStores[0].(*unstructured.Unstructured) require.True(t, ok, "secretstore should be unstructured") assert.Equal(t, "SecretStore", ss1.GetKind()) assert.Equal(t, "my-secret-store", ss1.GetName()) assert.Equal(t, "default", ss1.GetNamespace()) // Verify the second secretstore ss2, ok := snapshot.SecretStores[1].(*unstructured.Unstructured) require.True(t, ok, "secretstore should be unstructured") assert.Equal(t, "SecretStore", ss2.GetKind()) assert.Equal(t, "aws-secret-store", ss2.GetName()) assert.Equal(t, "production", ss2.GetNamespace()) } // TestConvertDataReadings_ClusterExternalSecrets tests that clusterexternalsecrets are correctly converted. func TestConvertDataReadings_ClusterExternalSecrets(t *testing.T) { extractorFunctions := map[string]func(*api.DataReading, *dataupload.Snapshot) error{ "ark/discovery": extractClusterIDAndServerVersionFromReading, "ark/esoclusterexternalsecrets": func(reading *api.DataReading, snapshot *dataupload.Snapshot) error { return extractResourceListFromReading(reading, &snapshot.ClusterExternalSecrets) }, } readings := []*api.DataReading{ { DataGatherer: "ark/discovery", Data: &api.DiscoveryData{ ClusterID: "test-cluster-id", ServerVersion: &version.Info{ GitVersion: "v1.21.0", }, }, }, { DataGatherer: "ark/esoclusterexternalsecrets", Data: &api.DynamicData{ Items: []*api.GatheredResource{ { Resource: &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "external-secrets.io/v1", "kind": "ClusterExternalSecret", "metadata": map[string]any{ "name": "my-cluster-external-secret", }, "spec": map[string]any{ "externalSecretSpec": map[string]any{ "secretStoreRef": map[string]any{ "name": "my-cluster-secret-store", "kind": "ClusterSecretStore", }, }, }, }, }, }, { Resource: &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "external-secrets.io/v1", "kind": "ClusterExternalSecret", "metadata": map[string]any{ "name": "aws-cluster-external-secret", }, "spec": map[string]any{ "externalSecretSpec": map[string]any{ "secretStoreRef": map[string]any{ "name": "aws-cluster-secret-store", "kind": "ClusterSecretStore", }, }, }, }, }, }, // Deleted clusterexternalsecret should be ignored { DeletedAt: api.Time{Time: time.Now()}, Resource: &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "external-secrets.io/v1", "kind": "ClusterExternalSecret", "metadata": map[string]any{ "name": "deleted-cluster-external-secret", }, }, }, }, }, }, }, } var snapshot dataupload.Snapshot err := convertDataReadings(extractorFunctions, readings, &snapshot) require.NoError(t, err) // Verify the snapshot contains the expected data assert.Equal(t, "test-cluster-id", snapshot.ClusterID) assert.Equal(t, "v1.21.0", snapshot.K8SVersion) require.Len(t, snapshot.ClusterExternalSecrets, 2, "should have 2 clusterexternalsecrets (deleted one should be excluded)") // Verify the first clusterexternalsecret ces1, ok := snapshot.ClusterExternalSecrets[0].(*unstructured.Unstructured) require.True(t, ok, "clusterexternalsecret should be unstructured") assert.Equal(t, "ClusterExternalSecret", ces1.GetKind()) assert.Equal(t, "my-cluster-external-secret", ces1.GetName()) // Verify the second clusterexternalsecret ces2, ok := snapshot.ClusterExternalSecrets[1].(*unstructured.Unstructured) require.True(t, ok, "clusterexternalsecret should be unstructured") assert.Equal(t, "ClusterExternalSecret", ces2.GetKind()) assert.Equal(t, "aws-cluster-external-secret", ces2.GetName()) } // TestConvertDataReadings_ClusterSecretStores tests that clustersecretstores are correctly converted. func TestConvertDataReadings_ClusterSecretStores(t *testing.T) { extractorFunctions := map[string]func(*api.DataReading, *dataupload.Snapshot) error{ "ark/discovery": extractClusterIDAndServerVersionFromReading, "ark/esoclustersecretstores": func(reading *api.DataReading, snapshot *dataupload.Snapshot) error { return extractResourceListFromReading(reading, &snapshot.ClusterSecretStores) }, } readings := []*api.DataReading{ { DataGatherer: "ark/discovery", Data: &api.DiscoveryData{ ClusterID: "test-cluster-id", ServerVersion: &version.Info{ GitVersion: "v1.21.0", }, }, }, { DataGatherer: "ark/esoclustersecretstores", Data: &api.DynamicData{ Items: []*api.GatheredResource{ { Resource: &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "external-secrets.io/v1", "kind": "ClusterSecretStore", "metadata": map[string]any{ "name": "my-cluster-secret-store", }, "spec": map[string]any{ "provider": map[string]any{ "fake": map[string]any{}, }, }, }, }, }, { Resource: &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "external-secrets.io/v1", "kind": "ClusterSecretStore", "metadata": map[string]any{ "name": "aws-cluster-secret-store", }, "spec": map[string]any{ "provider": map[string]any{ "aws": map[string]any{}, }, }, }, }, }, // Deleted clustersecretstore should be ignored { DeletedAt: api.Time{Time: time.Now()}, Resource: &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "external-secrets.io/v1", "kind": "ClusterSecretStore", "metadata": map[string]any{ "name": "deleted-cluster-secret-store", }, }, }, }, }, }, }, } var snapshot dataupload.Snapshot err := convertDataReadings(extractorFunctions, readings, &snapshot) require.NoError(t, err) // Verify the snapshot contains the expected data assert.Equal(t, "test-cluster-id", snapshot.ClusterID) assert.Equal(t, "v1.21.0", snapshot.K8SVersion) require.Len(t, snapshot.ClusterSecretStores, 2, "should have 2 clustersecretstores (deleted one should be excluded)") // Verify the first clustersecretstore css1, ok := snapshot.ClusterSecretStores[0].(*unstructured.Unstructured) require.True(t, ok, "clustersecretstore should be unstructured") assert.Equal(t, "ClusterSecretStore", css1.GetKind()) assert.Equal(t, "my-cluster-secret-store", css1.GetName()) // Verify the second clustersecretstore css2, ok := snapshot.ClusterSecretStores[1].(*unstructured.Unstructured) require.True(t, ok, "clustersecretstore should be unstructured") assert.Equal(t, "ClusterSecretStore", css2.GetKind()) assert.Equal(t, "aws-cluster-secret-store", css2.GetName()) } // TestConvertDataReadings_ServiceAccounts tests that serviceaccounts are correctly converted. func TestConvertDataReadings_ServiceAccounts(t *testing.T) { extractorFunctions := map[string]func(*api.DataReading, *dataupload.Snapshot) error{ "ark/discovery": extractClusterIDAndServerVersionFromReading, "ark/serviceaccounts": func(reading *api.DataReading, snapshot *dataupload.Snapshot) error { return extractResourceListFromReading(reading, &snapshot.ServiceAccounts) }, } readings := []*api.DataReading{ { DataGatherer: "ark/discovery", Data: &api.DiscoveryData{ ClusterID: "test-cluster-id", ServerVersion: &version.Info{ GitVersion: "v1.22.0", }, }, }, { DataGatherer: "ark/serviceaccounts", Data: &api.DynamicData{ Items: []*api.GatheredResource{ { Resource: &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "v1", "kind": "ServiceAccount", "metadata": map[string]any{ "name": "default", "namespace": "default", }, }, }, }, { Resource: &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "v1", "kind": "ServiceAccount", "metadata": map[string]any{ "name": "app-sa", "namespace": "production", "labels": map[string]any{ "app": "myapp", }, }, }, }, }, }, }, }, } var snapshot dataupload.Snapshot err := convertDataReadings(extractorFunctions, readings, &snapshot) require.NoError(t, err) assert.Equal(t, "test-cluster-id", snapshot.ClusterID) assert.Equal(t, "v1.22.0", snapshot.K8SVersion) require.Len(t, snapshot.ServiceAccounts, 2) sa1, ok := snapshot.ServiceAccounts[0].(*unstructured.Unstructured) require.True(t, ok) assert.Equal(t, "ServiceAccount", sa1.GetKind()) assert.Equal(t, "default", sa1.GetName()) } // TestConvertDataReadings_Roles tests that roles are correctly converted. func TestConvertDataReadings_Roles(t *testing.T) { extractorFunctions := map[string]func(*api.DataReading, *dataupload.Snapshot) error{ "ark/discovery": extractClusterIDAndServerVersionFromReading, "ark/roles": func(reading *api.DataReading, snapshot *dataupload.Snapshot) error { return extractResourceListFromReading(reading, &snapshot.Roles) }, } readings := []*api.DataReading{ { DataGatherer: "ark/discovery", Data: &api.DiscoveryData{ ClusterID: "rbac-cluster", ServerVersion: &version.Info{ GitVersion: "v1.23.0", }, }, }, { DataGatherer: "ark/roles", Data: &api.DynamicData{ Items: []*api.GatheredResource{ { Resource: &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "rbac.authorization.k8s.io/v1", "kind": "Role", "metadata": map[string]any{ "name": "pod-reader", "namespace": "default", "labels": map[string]any{ "rbac.authorization.k8s.io/aggregate-to-view": "true", }, }, "rules": []any{ map[string]any{ "apiGroups": []any{""}, "resources": []any{"pods"}, "verbs": []any{"get", "list"}, }, }, }, }, }, // Deleted role should be excluded { DeletedAt: api.Time{Time: time.Now()}, Resource: &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "rbac.authorization.k8s.io/v1", "kind": "Role", "metadata": map[string]any{ "name": "deleted-role", "namespace": "default", }, }, }, }, }, }, }, } var snapshot dataupload.Snapshot err := convertDataReadings(extractorFunctions, readings, &snapshot) require.NoError(t, err) assert.Equal(t, "rbac-cluster", snapshot.ClusterID) require.Len(t, snapshot.Roles, 1, "deleted role should be excluded") role, ok := snapshot.Roles[0].(*unstructured.Unstructured) require.True(t, ok) assert.Equal(t, "Role", role.GetKind()) assert.Equal(t, "pod-reader", role.GetName()) } // TestConvertDataReadings_MultipleResources tests conversion with multiple resource types. func TestConvertDataReadings_MultipleResources(t *testing.T) { extractorFunctions := map[string]func(*api.DataReading, *dataupload.Snapshot) error{ "ark/discovery": extractClusterIDAndServerVersionFromReading, "ark/configmaps": func(reading *api.DataReading, snapshot *dataupload.Snapshot) error { return extractResourceListFromReading(reading, &snapshot.ConfigMaps) }, "ark/serviceaccounts": func(reading *api.DataReading, snapshot *dataupload.Snapshot) error { return extractResourceListFromReading(reading, &snapshot.ServiceAccounts) }, "ark/deployments": func(reading *api.DataReading, snapshot *dataupload.Snapshot) error { return extractResourceListFromReading(reading, &snapshot.Deployments) }, } readings := []*api.DataReading{ { DataGatherer: "ark/discovery", Data: &api.DiscoveryData{ ClusterID: "multi-resource-cluster", ServerVersion: &version.Info{ GitVersion: "v1.24.0", }, }, }, { DataGatherer: "ark/configmaps", Data: &api.DynamicData{ Items: []*api.GatheredResource{ { Resource: &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "v1", "kind": "ConfigMap", "metadata": map[string]any{ "name": "app-config", "namespace": "default", }, }, }, }, }, }, }, { DataGatherer: "ark/serviceaccounts", Data: &api.DynamicData{ Items: []*api.GatheredResource{ { Resource: &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "v1", "kind": "ServiceAccount", "metadata": map[string]any{ "name": "app-sa", "namespace": "default", }, }, }, }, }, }, }, { DataGatherer: "ark/deployments", Data: &api.DynamicData{ Items: []*api.GatheredResource{ { Resource: &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "apps/v1", "kind": "Deployment", "metadata": map[string]any{ "name": "web-app", "namespace": "default", }, }, }, }, }, }, }, } var snapshot dataupload.Snapshot err := convertDataReadings(extractorFunctions, readings, &snapshot) require.NoError(t, err) // Verify all resources are present assert.Equal(t, "multi-resource-cluster", snapshot.ClusterID) assert.Equal(t, "v1.24.0", snapshot.K8SVersion) require.Len(t, snapshot.ConfigMaps, 1) require.Len(t, snapshot.ServiceAccounts, 1) require.Len(t, snapshot.Deployments, 1) // Verify each resource type cm, ok := snapshot.ConfigMaps[0].(*unstructured.Unstructured) require.True(t, ok) assert.Equal(t, "app-config", cm.GetName()) sa, ok := snapshot.ServiceAccounts[0].(*unstructured.Unstructured) require.True(t, ok) assert.Equal(t, "app-sa", sa.GetName()) deploy, ok := snapshot.Deployments[0].(*unstructured.Unstructured) require.True(t, ok) assert.Equal(t, "web-app", deploy.GetName()) } // TestConvertDataReadings tests the convertDataReadings function. func TestConvertDataReadings(t *testing.T) { simpleExtractorFunctions := map[string]func(*api.DataReading, *dataupload.Snapshot) error{ "ark/discovery": extractClusterIDAndServerVersionFromReading, "ark/secrets": func(reading *api.DataReading, snapshot *dataupload.Snapshot) error { return extractResourceListFromReading(reading, &snapshot.Secrets) }, } simpleReadings := []*api.DataReading{ { DataGatherer: "ark/discovery", Data: &api.DiscoveryData{ ClusterID: "success-cluster-id", ServerVersion: &version.Info{ GitVersion: "v1.21.0", }, }, }, { DataGatherer: "ark/secrets", Data: &api.DynamicData{ Items: []*api.GatheredResource{ { Resource: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "app-1", Namespace: "team-1", }, }, }, // Deleted secret should be ignored { DeletedAt: api.Time{Time: time.Now()}, Resource: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "deleted-1", Namespace: "team-1", }, }, }, }, }, }, } type testCase struct { name string extractorFunctions map[string]func(*api.DataReading, *dataupload.Snapshot) error readings []*api.DataReading expectedSnapshot dataupload.Snapshot expectError string } tests := []testCase{ { name: "no extractor functions", readings: simpleReadings, extractorFunctions: map[string]func(*api.DataReading, *dataupload.Snapshot) error{}, expectError: `unexpected data gatherers, missing: [], unhandled: [ark/discovery ark/secrets]`, }, { name: "nil extractor functions", readings: simpleReadings, extractorFunctions: nil, expectError: `unexpected data gatherers, missing: [], unhandled: [ark/discovery ark/secrets]`, }, { name: "empty readings", extractorFunctions: simpleExtractorFunctions, readings: []*api.DataReading{}, expectError: `unexpected data gatherers, missing: [ark/discovery ark/secrets], unhandled: []`, }, { name: "nil readings", extractorFunctions: simpleExtractorFunctions, readings: nil, expectError: `unexpected data gatherers, missing: [ark/discovery ark/secrets], unhandled: []`, }, { name: "extractor function error", extractorFunctions: simpleExtractorFunctions, readings: []*api.DataReading{ { DataGatherer: "ark/discovery", Data: &api.DynamicData{}, }, }, expectError: `while extracting data reading ark/discovery: programmer mistake: the DataReading must have data type *api.DiscoveryData. This DataReading (ark/discovery) has data type *api.DynamicData`, }, { name: "happy path", extractorFunctions: simpleExtractorFunctions, readings: simpleReadings, expectedSnapshot: dataupload.Snapshot{ ClusterID: "success-cluster-id", K8SVersion: "v1.21.0", Secrets: []runtime.Object{ &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "app-1", Namespace: "team-1", }, }, }, }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { var snapshot dataupload.Snapshot err := convertDataReadings(test.extractorFunctions, test.readings, &snapshot) if test.expectError != "" { assert.EqualError(t, err, test.expectError) assert.Equal(t, dataupload.Snapshot{}, snapshot) return } require.NoError(t, err) assert.Equal(t, test.expectedSnapshot, snapshot) }) } } // TestMinimizeSnapshot tests the minimizeSnapshot function. // It creates a snapshot with various secrets and service accounts, runs // minimizeSnapshot on it, and checks that the resulting snapshot only contains // the expected secrets and service accounts. func TestMinimizeSnapshot(t *testing.T) { secretWithClientCert := newTLSSecret("tls-secret-with-client", sampleCertificateChain(t, x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth)) secretWithoutClientCert := newTLSSecret("tls-secret-without-client", sampleCertificateChain(t, x509.ExtKeyUsageServerAuth)) opaqueSecret := newOpaqueSecret("opaque-secret") serviceAccount := &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "v1", "kind": "ServiceAccount", "metadata": map[string]any{ "name": "my-service-account", "namespace": "default", }, }, } type testCase struct { name string inputSnapshot dataupload.Snapshot expectedSnapshot dataupload.Snapshot } tests := []testCase{ { name: "empty snapshot", inputSnapshot: dataupload.Snapshot{ AgentVersion: "v1.0.0", ClusterID: "cluster-1", K8SVersion: "v1.21.0", Secrets: []runtime.Object{}, ServiceAccounts: []runtime.Object{}, Roles: []runtime.Object{}, }, expectedSnapshot: dataupload.Snapshot{ AgentVersion: "v1.0.0", ClusterID: "cluster-1", K8SVersion: "v1.21.0", Secrets: []runtime.Object{}, ServiceAccounts: []runtime.Object{}, Roles: []runtime.Object{}, }, }, { name: "snapshot with various secrets and service accounts", inputSnapshot: dataupload.Snapshot{ AgentVersion: "v1.0.0", ClusterID: "cluster-1", K8SVersion: "v1.21.0", Secrets: []runtime.Object{ secretWithClientCert, secretWithoutClientCert, opaqueSecret, }, ServiceAccounts: []runtime.Object{ serviceAccount, }, Roles: []runtime.Object{}, }, expectedSnapshot: dataupload.Snapshot{ AgentVersion: "v1.0.0", ClusterID: "cluster-1", K8SVersion: "v1.21.0", Secrets: []runtime.Object{ secretWithClientCert, opaqueSecret, }, ServiceAccounts: []runtime.Object{ serviceAccount, }, Roles: []runtime.Object{}, }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { log := ktesting.NewLogger(t, ktesting.DefaultConfig) minimizeSnapshot(log, &test.inputSnapshot) assert.Equal(t, test.expectedSnapshot, test.inputSnapshot) }) } } // TestIsExcludableSecret tests the isExcludableSecret function. func TestIsExcludableSecret(t *testing.T) { type testCase struct { name string secret runtime.Object exclude bool } tests := []testCase{ { name: "TLS secret with client cert in tls.crt", secret: newTLSSecret("tls-secret-with-client", sampleCertificateChain(t, x509.ExtKeyUsageClientAuth)), exclude: false, }, { name: "TLS secret with non-client cert in tls.crt", secret: newTLSSecret("tls-secret-without-client", sampleCertificateChain(t, x509.ExtKeyUsageServerAuth)), exclude: true, }, { name: "Non-unstructured", secret: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "non-unstructured-secret", Namespace: "default", }, }, exclude: false, }, { name: "Non-secret", secret: &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "cert-manager/v1", "kind": "Certificate", "metadata": map[string]any{ "name": "non-secret", "namespace": "default", }, }, }, exclude: false, }, { name: "Non-TLS secret", secret: newOpaqueSecret("non-tls-secret"), exclude: false, }, { name: "TLS secret without tls.crt", secret: newTLSSecret("tls-secret-with-no-cert", nil), exclude: true, }, { name: "TLS secret with empty tls.crt", secret: newTLSSecret("tls-secret-with-empty-cert", ""), exclude: true, }, { name: "TLS secret with invalid base64 in tls.crt", secret: newTLSSecret("tls-secret-with-invalid-cert", "invalid-base64"), exclude: true, }, { name: "TLS secret with invalid PEM in tls.crt", secret: newTLSSecret("tls-secret-with-invalid-pem", base64.StdEncoding.EncodeToString([]byte("invalid-pem"))), exclude: true, }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { t.Parallel() log := ktesting.NewLogger(t, ktesting.DefaultConfig) excluded := isExcludableSecret(log, tc.secret) assert.Equal(t, tc.exclude, excluded, "case: %s", tc.name) }) } } // newTLSSecret creates a Kubernetes TLS secret with the given name and certificate data. // If crt is nil, the secret will not contain a "tls.crt" entry. func newTLSSecret(name string, crt any) *unstructured.Unstructured { data := map[string]any{"tls.key": "dummy-key"} if crt != nil { data["tls.crt"] = crt } return &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "v1", "kind": "Secret", "metadata": map[string]any{ "name": name, "namespace": "default", }, "type": "kubernetes.io/tls", "data": data, }, } } // newOpaqueSecret creates a Kubernetes Opaque secret with the given name. func newOpaqueSecret(name string) *unstructured.Unstructured { return &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "v1", "kind": "Secret", "metadata": map[string]any{ "name": name, "namespace": "default", }, "type": "Opaque", "data": map[string]any{ "key": "value", }, }, } } // sampleCertificateChain returns a PEM encoded sample certificate chain for testing purposes. // The leaf certificate is signed by a self-signed CA certificate. // Uses an elliptic curve key for the CA and leaf certificates for speed. // The returned string is base64 encoded to match how TLS certificates // are typically provided in Kubernetes secrets. func sampleCertificateChain(t testing.TB, usages ...x509.ExtKeyUsage) string { t.Helper() caPrivKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) require.NoError(t, err) caTemplate := x509.Certificate{ SerialNumber: big.NewInt(1), Subject: pkix.Name{ Organization: []string{"Test CA"}, CommonName: "Test CA", }, NotBefore: time.Now(), NotAfter: time.Now().Add(24 * time.Hour), KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, ExtKeyUsage: []x509.ExtKeyUsage{}, BasicConstraintsValid: true, IsCA: true, } caCertDER, err := x509.CreateCertificate(rand.Reader, &caTemplate, &caTemplate, &caPrivKey.PublicKey, caPrivKey) require.NoError(t, err) caCertPEM := pem.EncodeToMemory(&pem.Block{ Type: "CERTIFICATE", Bytes: caCertDER, }) clientPrivKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) require.NoError(t, err) clientTemplate := x509.Certificate{ SerialNumber: big.NewInt(2), Subject: pkix.Name{ Organization: []string{"Test Organization"}, CommonName: "example.com", }, NotBefore: time.Now(), NotAfter: time.Now().Add(24 * time.Hour), KeyUsage: x509.KeyUsageDigitalSignature, ExtKeyUsage: usages, } clientCertDER, err := x509.CreateCertificate(rand.Reader, &clientTemplate, &caTemplate, &clientPrivKey.PublicKey, caPrivKey) require.NoError(t, err) clientCertPEM := pem.EncodeToMemory(&pem.Block{ Type: "CERTIFICATE", Bytes: clientCertDER, }) return base64.StdEncoding.EncodeToString(append(clientCertPEM, caCertPEM...)) } ================================================ FILE: pkg/client/client_cyberark_test.go ================================================ package client_test import ( "crypto/x509" "errors" "os" "strings" "testing" "github.com/jetstack/venafi-connection-lib/http_client" "github.com/stretchr/testify/require" k8sversion "k8s.io/apimachinery/pkg/version" "k8s.io/klog/v2" "k8s.io/klog/v2/ktesting" "github.com/jetstack/preflight/api" "github.com/jetstack/preflight/internal/cyberark" "github.com/jetstack/preflight/internal/cyberark/servicediscovery" "github.com/jetstack/preflight/pkg/client" "github.com/jetstack/preflight/pkg/testutil" "github.com/jetstack/preflight/pkg/version" _ "k8s.io/klog/v2/ktesting/init" ) // TestCyberArkClient_PostDataReadingsWithOptions_MockAPI demonstrates that the // dataupload code works with the mock CyberArk APIs. // The environment variables are chosen to match those expected by the mock // server. func TestCyberArkClient_PostDataReadingsWithOptions_MockAPI(t *testing.T) { t.Setenv("ARK_SUBDOMAIN", servicediscovery.MockDiscoverySubdomain) t.Setenv("ARK_USERNAME", "test@example.com") t.Setenv("ARK_SECRET", "somepassword") t.Run("success", func(t *testing.T) { logger := ktesting.NewLogger(t, ktesting.DefaultConfig) ctx := klog.NewContext(t.Context(), logger) httpClient := testutil.FakeCyberArk(t) c, err := client.NewCyberArk(httpClient) require.NoError(t, err) readings := fakeReadings() err = c.PostDataReadingsWithOptions(ctx, readings, client.Options{}) require.NoError(t, err) }) } // TestCyberArkClient_PostDataReadingsWithOptions_RealAPI demonstrates that the // dataupload code works with the real CyberArk APIs. // // To enable verbose request logging: // // go test ./internal/cyberark/dataupload/... \ // -v -count 1 -run TestCyberArkClient_PostDataReadingsWithOptions_RealAPI -args -testing.v 6 func TestCyberArkClient_PostDataReadingsWithOptions_RealAPI(t *testing.T) { if strings.ToLower(os.Getenv("ARK_LIVE_TEST")) != "true" { t.Skip("set ARK_LIVE_TEST=true to run this test against the live service") return } t.Run("success", func(t *testing.T) { logger := ktesting.NewLogger(t, ktesting.DefaultConfig) ctx := klog.NewContext(t.Context(), logger) var rootCAs *x509.CertPool httpClient := http_client.NewDefaultClient(version.UserAgent(), rootCAs) c, err := client.NewCyberArk(httpClient) if err != nil { if errors.Is(err, cyberark.ErrMissingEnvironmentVariables) { t.Skipf("Skipping: %s", err) } require.NoError(t, err) } readings := fakeReadings() err = c.PostDataReadingsWithOptions(ctx, readings, client.Options{}) require.NoError(t, err) }) } // defaultDynamicDatagathererNames is the list of dynamic datagatherers that // are included in the defaultExtractorFunctions map in client_cyberark.go. // This is used by fakeReadings to generate empty readings for all the // dynamic datagatherers. var defaultDynamicDatagathererNames = []string{ "ark/secrets", "ark/serviceaccounts", "ark/configmaps", "ark/esoexternalsecrets", "ark/esosecretstores", "ark/esoclusterexternalsecrets", "ark/esoclustersecretstores", "ark/roles", "ark/clusterroles", "ark/rolebindings", "ark/clusterrolebindings", "ark/jobs", "ark/cronjobs", "ark/deployments", "ark/statefulsets", "ark/daemonsets", "ark/pods", } // fakeReadings returns a set of fake readings that includes a discovery reading // and empty readings for all the default dynamic datagatherers. func fakeReadings() []*api.DataReading { readings := make([]*api.DataReading, len(defaultDynamicDatagathererNames)) for i, name := range defaultDynamicDatagathererNames { readings[i] = &api.DataReading{ DataGatherer: name, Data: &api.DynamicData{}, } } return append([]*api.DataReading{ { DataGatherer: "ark/oidc", Data: &api.OIDCDiscoveryData{ OIDCConfigError: "Failed to fetch /.well-known/openid-configuration: 404 Not Found", JWKSError: "Failed to fetch /openid/v1/jwks: 404 Not Found", }, }, { DataGatherer: "ark/discovery", Data: &api.DiscoveryData{ ClusterID: "ffffffff-ffff-ffff-ffff-ffffffffffff", ServerVersion: &k8sversion.Info{ GitVersion: "v1.21.0", }, }, }, }, readings...) } ================================================ FILE: pkg/client/client_file.go ================================================ package client import ( "context" "encoding/json" "fmt" "os" "k8s.io/klog/v2" "github.com/jetstack/preflight/api" ) // FileClient writes the supplied readings to a file, in JSON format. type FileClient struct { path string } func NewFileClient(path string) Client { return &FileClient{ path: path, } } func (o *FileClient) PostDataReadingsWithOptions(ctx context.Context, readings []*api.DataReading, _ Options) error { log := klog.FromContext(ctx) data, err := json.MarshalIndent(readings, "", " ") if err != nil { return fmt.Errorf("failed to marshal JSON: %s", err) } err = os.WriteFile(o.path, data, 0644) if err != nil { return fmt.Errorf("failed to write file: %s", err) } log.Info("Data saved to local file", "outputPath", o.path) return nil } ================================================ FILE: pkg/client/client_file_test.go ================================================ package client import ( "encoding/json" "os" "strings" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/klog/v2" "k8s.io/klog/v2/ktesting" "github.com/jetstack/preflight/api" ) func TestFileClient_PostDataReadingsWithOptions(t *testing.T) { type testCase struct { name string path string readings []*api.DataReading expectedJSON string expectedError string } tests := []testCase{ { name: "success", path: "{tmp}/data.json", readings: []*api.DataReading{}, expectedJSON: "[]", }, { name: "success-overwrite", path: "{tmp}/exists.json", readings: []*api.DataReading{}, expectedJSON: "[]", }, { name: "json-marshal-error", path: "{tmp}/data.json", readings: []*api.DataReading{ { Data: json.RawMessage("x"), }, }, expectedError: "failed to marshal JSON: json: error calling MarshalJSON for type json.RawMessage: invalid character 'x' looking for beginning of value", expectedJSON: "[]", }, { name: "no-such-file-or-directory", path: "{tmp}/no-such-folder/data.json", readings: []*api.DataReading{}, expectedError: "failed to write file: open {tmp}/no-such-folder/data.json: no such file or directory", expectedJSON: "[]", }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { log := ktesting.NewLogger(t, ktesting.DefaultConfig) ctx := klog.NewContext(t.Context(), log) tmpDir := t.TempDir() require.NoError(t, os.WriteFile(tmpDir+"/exists.json", []byte("existing-content"), 0644)) path := strings.ReplaceAll(tc.path, "{tmp}", tmpDir) expectedError := strings.ReplaceAll(tc.expectedError, "{tmp}", tmpDir) c := NewFileClient(path) err := c.PostDataReadingsWithOptions(ctx, tc.readings, Options{}) if expectedError != "" { assert.EqualError(t, err, expectedError) return } require.NoError(t, err) assert.FileExists(t, path) actualJSON, err := os.ReadFile(path) require.NoError(t, err) assert.JSONEq(t, tc.expectedJSON, string(actualJSON)) }) } } ================================================ FILE: pkg/client/client_ngts.go ================================================ package client import ( "bytes" "context" "crypto" "crypto/tls" "crypto/x509" "encoding/base64" "encoding/json" "fmt" "io" "net/http" "net/url" "os" "path" "strconv" "strings" "sync" "time" "github.com/golang-jwt/jwt/v4" "github.com/google/uuid" "github.com/microcosm-cc/bluemonday" "k8s.io/client-go/transport" "k8s.io/klog/v2" "github.com/jetstack/preflight/api" "github.com/jetstack/preflight/pkg/version" ) // NGTSClient is a Client implementation for uploading data readings to NGTS // using service account keypair authentication. It follows the Private Key JWT // authentication pattern (RFC 7521 + RFC 7523). type NGTSClient struct { credentials *NGTSServiceAccountCredentials accessToken *ngtsAccessToken baseURL *url.URL agentMetadata *api.AgentMetadata tsgID string privateKey crypto.PrivateKey jwtSigningAlg jwt.SigningMethod lock sync.RWMutex // Made public for testing purposes. Client *http.Client } // NGTSServiceAccountCredentials holds the service account authentication credentials for NGTS. type NGTSServiceAccountCredentials struct { // ClientID is the service account client ID ClientID string `json:"client_id,omitempty"` // PrivateKeyFile is the path to the private key file paired to // the public key in the service account PrivateKeyFile string `json:"private_key_file,omitempty"` } // ngtsAccessToken stores an NGTS access token and its expiration time. type ngtsAccessToken struct { accessToken string expirationTime time.Time } // ngtsAccessTokenResponse represents the JSON response from the NGTS token endpoint. type ngtsAccessTokenResponse struct { AccessToken string `json:"access_token"` // base 64 encoded token Type string `json:"token_type"` // always "bearer" ExpiresIn int64 `json:"expires_in"` // number of seconds after which the access token will expire } const ( // ngtsProdURLFormat is the format used for constructing a URL for the production environment. // The TSG ID is part of the URL. ngtsProdURLFormat = "https://%s.ngts.paloaltonetworks.com" // ngtsUploadEndpoint matches the "new" CM-SaaS upload endpoint // Note that "no" is always passed to this endpoint in other paths (e.g. in the venafi-connection client and in the venafi-kubernetes-agent chart) // so we copy that behavior here. ngtsUploadEndpoint = "v1/tlspk/upload/clusterdata/no" // ngtsAccessTokenEndpoint matches the CM-SaaS token endpoint ngtsAccessTokenEndpoint = accessTokenEndpoint // ngtsRequiredGrantType matches the CM-SaaS required grant type for JWTs ngtsRequiredGrantType = requiredGrantType ) // NewNGTSClient creates a new NGTS client that authenticates using keypair authentication // and uploads data to NGTS endpoints. The baseURL parameter can override the default // NGTS server URL for testing purposes. func NewNGTSClient(agentMetadata *api.AgentMetadata, credentials *NGTSServiceAccountCredentials, baseURL string, tsgID string, rootCAs *x509.CertPool) (*NGTSClient, error) { // Load ClientID from file if not provided directly if err := credentials.LoadClientIDIfNeeded(); err != nil { return nil, fmt.Errorf("cannot create NGTSClient: %w", err) } if err := credentials.Validate(); err != nil { return nil, fmt.Errorf("cannot create NGTSClient: %w", err) } // NB: There may be more validation which can be done here, e.g. see // https://pan.dev/scm/api/tenancy/delete-tenancy-v-1-tenant-service-groups-tsg-id/ // > Possible values: >= 10 characters and <= 10 characters, Value must match regular expression ^1[0-9]+$ // For now, leaving this check simple if tsgID == "" { return nil, fmt.Errorf("cannot create NGTSClient: tsgID cannot be empty") } privateKey, jwtSigningAlg, err := parsePrivateKeyAndExtractSigningMethod(credentials.PrivateKeyFile) if err != nil { return nil, fmt.Errorf("while parsing private key file: %w", err) } actualBaseURL := baseURL // Create prod NGTS URL if no explicit URL provided if actualBaseURL == "" { actualBaseURL = fmt.Sprintf(ngtsProdURLFormat, tsgID) } parsedBaseURL, err := url.Parse(actualBaseURL) if err != nil { extra := "" // A possible failure mode would be an incorrectly formatted TSG ID, so warn about that specifically // if we tried to create a prod URL if baseURL == "" { extra = fmt.Sprintf(" (possibly malformed TSG ID %q?)", tsgID) } return nil, fmt.Errorf("invalid NGTS base URL %q: %s%s", baseURL, err, extra) } // Create HTTP transport that honors proxy settings and custom CA certs tr := http.DefaultTransport.(*http.Transport).Clone() if rootCAs != nil { if tr.TLSClientConfig == nil { tr.TLSClientConfig = &tls.Config{} } tr.TLSClientConfig.RootCAs = rootCAs } return &NGTSClient{ agentMetadata: agentMetadata, credentials: credentials, baseURL: parsedBaseURL, tsgID: tsgID, accessToken: &ngtsAccessToken{}, Client: &http.Client{ Timeout: time.Minute, Transport: transport.DebugWrappers(tr), }, privateKey: privateKey, jwtSigningAlg: jwtSigningAlg, }, nil } // LoadClientIDIfNeeded attempts to load the ClientID from a file if it is not already set. // It looks for a "clientID" file in the same directory as the PrivateKeyFile. // For compatibility with the venafi-kubernetes-agent chart, it also supports "clientId" (lowercase 'd'). // If both files exist, "clientID" takes precedence. // This allows the ClientID to be provided either as a direct value or via a Kubernetes secret. func (c *NGTSServiceAccountCredentials) LoadClientIDIfNeeded() error { if c == nil { return fmt.Errorf("credentials are nil") } // If ClientID is already set via helm values / CLI args, nothing to do if c.ClientID != "" { klog.V(2).Info("Using clientID from config.clientID helm value") return nil } // We'd preferably have NGTSServiceAccountCredentials.CredentialPath but we didn't want to make another change // to existing CLI flags; so we depend on PrivateKeyFile and assume clientID is in the same directory. // If PrivateKeyFile is not set, we can't determine where to look for the clientID file if c.PrivateKeyFile == "" { return nil // This is actually a fatal error but will be caught by Validate() later } baseDir := path.Dir(c.PrivateKeyFile) // Try to load ClientID from a file in the same directory as the private key // Try "clientID" first (takes precedence), then "clientId" for backward compatibility clientIDPath := baseDir + "/clientID" clientIDBytes, err := os.ReadFile(clientIDPath) if err != nil { // Try the alternative "clientId" (lowercase 'd') for compatibility with venafi-kubernetes-agent clientIDPath = baseDir + "/clientId" clientIDBytes, err = os.ReadFile(clientIDPath) if err != nil { // If neither file exists, that's okay - we'll let Validate() catch the empty ClientID error later klog.V(2).Info("Could not read clientID from file", "path", clientIDPath, "error", err) return nil } } // Trim whitespace from the clientID c.ClientID = strings.TrimSpace(string(clientIDBytes)) klog.V(2).Info("Loaded clientID from file", "path", clientIDPath) return nil } // Validate checks that the NGTS service account credentials are valid. func (c *NGTSServiceAccountCredentials) Validate() error { if c == nil { return fmt.Errorf("credentials are nil") } if c.ClientID == "" { return fmt.Errorf("client_id cannot be empty") } if c.PrivateKeyFile == "" { return fmt.Errorf("NGTS private key file location cannot be empty") } return nil } // PostDataReadingsWithOptions uploads data readings to the NGTS backend. // The TSG ID is included in the upload path to identify the tenant service group. func (c *NGTSClient) PostDataReadingsWithOptions(ctx context.Context, readings []*api.DataReading, opts Options) error { payload := api.DataReadingsPost{ AgentMetadata: c.agentMetadata, DataGatherTime: time.Now().UTC(), DataReadings: readings, } data, err := json.Marshal(payload) if err != nil { return err } uploadURL := c.baseURL.JoinPath(ngtsUploadEndpoint) // Add cluster name and description as query parameters query := uploadURL.Query() stripHTML := bluemonday.StrictPolicy() if opts.ClusterName != "" { query.Add("name", stripHTML.Sanitize(opts.ClusterName)) } if opts.ClusterDescription != "" { query.Add("description", base64.RawURLEncoding.EncodeToString([]byte(stripHTML.Sanitize(opts.ClusterDescription)))) } if opts.ClaimableCerts { // The TLSPK backend reads "certOwnership=unassigned" — this is the backend contract. query.Add("certOwnership", "unassigned") } uploadURL.RawQuery = query.Encode() klog.FromContext(ctx).V(2).Info( "uploading data readings to NGTS", "url", uploadURL.String(), "cluster_name", opts.ClusterName, "data_readings_count", len(readings), "data_size_bytes", len(data), ) res, err := c.post(ctx, uploadURL.String(), bytes.NewBuffer(data)) if err != nil { return fmt.Errorf("failed to upload data to NGTS: %w", err) } defer res.Body.Close() if code := res.StatusCode; code < 200 || code >= 300 { errorContent := "" body, err := io.ReadAll(res.Body) if err == nil { errorContent = string(body) } return fmt.Errorf("NGTS upload failed with status code %d. Body: [%s]", code, errorContent) } return nil } // post performs an HTTP POST request to NGTS with authentication. func (c *NGTSClient) post(ctx context.Context, url string, body io.Reader) (*http.Response, error) { token, err := c.getValidAccessToken(ctx) if err != nil { return nil, err } req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, body) if err != nil { return nil, err } req.Header.Set("Accept", "application/json") req.Header.Set("Content-Type", "application/json") version.SetUserAgent(req) if len(token.accessToken) > 0 { req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.accessToken)) } return c.Client.Do(req) } // getValidAccessToken returns a valid access token. It will fetch a new access // token from the auth server if the current token does not exist or has expired. func (c *NGTSClient) getValidAccessToken(ctx context.Context) (*ngtsAccessToken, error) { c.lock.RLock() needsUpdate := c.accessToken == nil || time.Now().Add(time.Minute).After(c.accessToken.expirationTime) c.lock.RUnlock() if needsUpdate { err := c.updateAccessToken(ctx) if err != nil { return nil, err } } c.lock.RLock() token := c.accessToken c.lock.RUnlock() return token, nil } // updateAccessToken fetches a new access token from the NGTS auth server using JWT authentication. func (c *NGTSClient) updateAccessToken(ctx context.Context) error { jwtToken, err := c.generateAndSignJwtToken() if err != nil { return fmt.Errorf("failed to generate JWT token for NGTS authentication: %w", err) } values := url.Values{} values.Set("grant_type", ngtsRequiredGrantType) values.Set("assertion", jwtToken) tokenURL := c.baseURL.JoinPath(ngtsAccessTokenEndpoint).String() encoded := values.Encode() request, err := http.NewRequestWithContext(ctx, http.MethodPost, tokenURL, strings.NewReader(encoded)) if err != nil { return err } request.Header.Add("Content-Type", "application/x-www-form-urlencoded") request.Header.Add("Content-Length", strconv.Itoa(len(encoded))) version.SetUserAgent(request) now := time.Now() accessToken := ngtsAccessTokenResponse{} err = c.sendHTTPRequest(request, &accessToken) if err != nil { return fmt.Errorf("failed to obtain NGTS access token: %w", err) } c.lock.Lock() c.accessToken = &ngtsAccessToken{ accessToken: accessToken.AccessToken, expirationTime: now.Add(time.Duration(accessToken.ExpiresIn) * time.Second), } c.lock.Unlock() return nil } // sendHTTPRequest executes an HTTP request and unmarshals the JSON response. func (c *NGTSClient) sendHTTPRequest(request *http.Request, responseObject any) error { response, err := c.Client.Do(request) if err != nil { return err } defer response.Body.Close() if response.StatusCode != http.StatusOK && response.StatusCode != http.StatusCreated { body, _ := io.ReadAll(response.Body) return fmt.Errorf("NGTS API request failed. Request %s, status code: %d, body: [%s]", request.URL, response.StatusCode, body) } body, err := io.ReadAll(response.Body) if err != nil { return err } if err = json.Unmarshal(body, responseObject); err != nil { return err } return nil } // generateAndSignJwtToken creates a JWT token signed with the service account's private key // for authenticating to NGTS. func (c *NGTSClient) generateAndSignJwtToken() (string, error) { // backend still expects "api.venafi.cloud/v1/oauth/token/serviceaccount" for audience, so force that for now venafiCloudProdURL, err := url.Parse(VenafiCloudProdURL) if err != nil { return "", err } claims := make(jwt.MapClaims) claims["sub"] = c.credentials.ClientID claims["iss"] = c.credentials.ClientID claims["iat"] = time.Now().Unix() claims["exp"] = time.Now().Add(time.Minute).Unix() claims["aud"] = path.Join(venafiCloudProdURL.Host, ngtsAccessTokenEndpoint) claims["jti"] = uuid.New().String() token, err := jwt.NewWithClaims(c.jwtSigningAlg, claims).SignedString(c.privateKey) if err != nil { return "", err } return token, nil } ================================================ FILE: pkg/client/client_ngts_test.go ================================================ package client import ( "encoding/json" "fmt" "net/http" "net/http/httptest" "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/jetstack/preflight/api" ) const fakePrivKeyPEM = `-----BEGIN PRIVATE KEY----- MHcCAQEEIFptpPXOvEWDrYkiMhyEH1+FB1GwtwX2tyXH4KtBO6g7oAoGCCqGSM49 AwEHoUQDQgAE/BsIwagYc4YUjSSFyqcStj2qliAkdVGlMoJbMuXupzQ9Qs4TX5Pl dFjz6J/j6Gu4fLPqXmM61Hj6kiuRHx5eHQ== -----END PRIVATE KEY----- ` func withFile(t testing.TB, content string) string { t.Helper() f, err := os.CreateTemp(t.TempDir(), "file") if err != nil { t.Fatalf("failed to create temporary file: %v", err) } defer f.Close() _, err = f.WriteString(content) if err != nil { t.Fatalf("failed to write to temporary file: %v", err) } return f.Name() } func TestNewNGTSClient(t *testing.T) { // Create a temporary key file keyFile := withFile(t, fakePrivKeyPEM) tests := []struct { name string credentials *NGTSServiceAccountCredentials baseURL string tsgID string wantErr bool errContains string }{ { name: "valid credentials and tsg id", credentials: &NGTSServiceAccountCredentials{ ClientID: "test-client-id", PrivateKeyFile: keyFile, }, baseURL: "https://test.ngts.example.com", tsgID: "test-tsg-id", wantErr: false, }, { name: "missing tsg id", credentials: &NGTSServiceAccountCredentials{ ClientID: "test-client-id", PrivateKeyFile: keyFile, }, baseURL: "https://test.ngts.example.com", tsgID: "", wantErr: true, errContains: "tsgID cannot be empty", }, { name: "missing clientID without file", credentials: &NGTSServiceAccountCredentials{ ClientID: "", PrivateKeyFile: keyFile, }, baseURL: "https://test.ngts.example.com", tsgID: "test-tsg-id", wantErr: true, errContains: "client_id cannot be empty", }, { name: "default URL when empty", credentials: &NGTSServiceAccountCredentials{ ClientID: "test-client-id", PrivateKeyFile: keyFile, }, baseURL: "", tsgID: "test-tsg-id", wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { metadata := &api.AgentMetadata{ Version: "test-version", ClusterID: "test-cluster", } client, err := NewNGTSClient(metadata, tt.credentials, tt.baseURL, tt.tsgID, nil) if tt.wantErr { require.Error(t, err) if tt.errContains != "" { assert.Contains(t, err.Error(), tt.errContains) } assert.Nil(t, client) return } require.NoError(t, err) assert.NotNil(t, client) assert.Equal(t, tt.tsgID, client.tsgID) if tt.baseURL != "" { assert.Equal(t, tt.baseURL, client.baseURL.String()) return } assert.Equal(t, fmt.Sprintf(ngtsProdURLFormat, tt.tsgID), client.baseURL.String()) }) } } func TestNGTSClient_LoadClientIDFromFile(t *testing.T) { // Create a temporary directory for the secret files tmpDir := t.TempDir() // Create the private key file keyFile := tmpDir + "/privatekey.pem" err := os.WriteFile(keyFile, []byte(fakePrivKeyPEM), 0o600) require.NoError(t, err) // Create the clientID file in the same directory clientIDFile := tmpDir + "/clientID" err = os.WriteFile(clientIDFile, []byte("test-client-from-file\n"), 0o600) require.NoError(t, err) tests := []struct { name string credentials *NGTSServiceAccountCredentials wantErr bool wantClient string }{ { name: "load clientID from file", credentials: &NGTSServiceAccountCredentials{ ClientID: "", // Empty - should be loaded from file PrivateKeyFile: keyFile, }, wantErr: false, wantClient: "test-client-from-file", }, { name: "explicit clientID takes precedence", credentials: &NGTSServiceAccountCredentials{ ClientID: "explicit-client-id", PrivateKeyFile: keyFile, }, wantErr: false, wantClient: "explicit-client-id", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { metadata := &api.AgentMetadata{ Version: "test-version", ClusterID: "test-cluster", } client, err := NewNGTSClient(metadata, tt.credentials, "https://test.example.com", "test-tsg", nil) if tt.wantErr { require.Error(t, err) return } require.NoError(t, err) assert.NotNil(t, client) assert.Equal(t, tt.wantClient, client.credentials.ClientID) }) } } func TestNGTSClient_LoadClientIDFromFileAlternativeNames(t *testing.T) { tests := []struct { name string setupFiles func(tmpDir string) string // returns keyFile path wantClientID string wantErr bool wantErrContain string }{ { // Note: venafi-kubernetes-agent didn't support storing the client ID in the secret, but // we don't want users moving to discovery-agent to be caught out by such a trivial mistake. name: "load from clientId (lowercase d) for venafi-kubernetes-agent compatibility", setupFiles: func(tmpDir string) string { keyFile := tmpDir + "/privatekey.pem" err := os.WriteFile(keyFile, []byte(fakePrivKeyPEM), 0o600) require.NoError(t, err) // Create clientId file (lowercase 'd') clientIdFile := tmpDir + "/clientId" err = os.WriteFile(clientIdFile, []byte("test-client-from-clientId\n"), 0o600) require.NoError(t, err) return keyFile }, wantClientID: "test-client-from-clientId", wantErr: false, }, { name: "load from clientID (uppercase D)", setupFiles: func(tmpDir string) string { keyFile := tmpDir + "/privatekey.pem" err := os.WriteFile(keyFile, []byte(fakePrivKeyPEM), 0o600) require.NoError(t, err) // Create only clientID file (uppercase 'D') clientIDFile := tmpDir + "/clientID" err = os.WriteFile(clientIDFile, []byte("from-clientID"), 0o600) require.NoError(t, err) return keyFile }, wantClientID: "from-clientID", wantErr: false, }, { name: "error when no clientID file exists", setupFiles: func(tmpDir string) string { keyFile := tmpDir + "/privatekey.pem" err := os.WriteFile(keyFile, []byte(fakePrivKeyPEM), 0o600) require.NoError(t, err) // Don't create any clientID file return keyFile }, wantErr: true, wantErrContain: "client_id cannot be empty", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { tmpDir := t.TempDir() keyFile := tt.setupFiles(tmpDir) credentials := &NGTSServiceAccountCredentials{ ClientID: "", // Empty - should be loaded from file PrivateKeyFile: keyFile, } metadata := &api.AgentMetadata{ Version: "test-version", ClusterID: "test-cluster", } client, err := NewNGTSClient(metadata, credentials, "https://test.example.com", "test-tsg", nil) if tt.wantErr { require.Error(t, err) if tt.wantErrContain != "" { assert.Contains(t, err.Error(), tt.wantErrContain) } return } require.NoError(t, err) assert.NotNil(t, client) assert.Equal(t, tt.wantClientID, client.credentials.ClientID) }) } } func TestNGTSClient_PostDataReadingsWithOptions(t *testing.T) { keyFile := withFile(t, fakePrivKeyPEM) // Create a test server that simulates NGTS backend var receivedRequest *http.Request var receivedBody []byte server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { receivedRequest = r // First request is for access token if r.URL.Path == ngtsAccessTokenEndpoint { w.WriteHeader(http.StatusOK) _ = json.NewEncoder(w).Encode(ngtsAccessTokenResponse{ AccessToken: "test-access-token", Type: "bearer", ExpiresIn: 3600, }) return } // Second request is for data upload body := make([]byte, r.ContentLength) _, _ = r.Body.Read(body) receivedBody = body w.WriteHeader(http.StatusOK) _, _ = w.Write([]byte(`{"status": "success"}`)) })) defer server.Close() credentials := &NGTSServiceAccountCredentials{ ClientID: "test-client-id", PrivateKeyFile: keyFile, } metadata := &api.AgentMetadata{ Version: "test-version", ClusterID: "test-cluster", } tsgID := "test-tsg-123" client, err := NewNGTSClient(metadata, credentials, server.URL, tsgID, nil) require.NoError(t, err) // Test data upload readings := []*api.DataReading{ { DataGatherer: "test-gatherer", Timestamp: api.Time{}, Data: &api.DynamicData{}, }, } opts := Options{ ClusterName: "test-cluster", ClusterDescription: "Test cluster description", } err = client.PostDataReadingsWithOptions(t.Context(), readings, opts) require.NoError(t, err) // Verify the upload request assert.NotNil(t, receivedRequest) assert.Equal(t, "/"+ngtsUploadEndpoint, receivedRequest.URL.Path) assert.Contains(t, receivedRequest.URL.RawQuery, "name=test-cluster") assert.Equal(t, "Bearer test-access-token", receivedRequest.Header.Get("Authorization")) // certOwnership not set — must NOT appear in query assert.NotContains(t, receivedRequest.URL.RawQuery, "certOwnership") // Verify the payload var payload api.DataReadingsPost err = json.Unmarshal(receivedBody, &payload) require.NoError(t, err) assert.Equal(t, 1, len(payload.DataReadings)) // Verify claimableCerts=true is included when set t.Run("claimableCerts: true sends certOwnership=unassigned to backend", func(t *testing.T) { optsUnassigned := Options{ ClusterName: "test-cluster", ClaimableCerts: true, } err = client.PostDataReadingsWithOptions(t.Context(), readings, optsUnassigned) require.NoError(t, err) assert.Contains(t, receivedRequest.URL.RawQuery, "certOwnership=unassigned") }) } func TestNGTSClient_AuthenticationFlow(t *testing.T) { keyFile := withFile(t, fakePrivKeyPEM) authCallCount := 0 server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == ngtsAccessTokenEndpoint { authCallCount++ w.WriteHeader(http.StatusOK) _ = json.NewEncoder(w).Encode(ngtsAccessTokenResponse{ AccessToken: "test-access-token", Type: "bearer", ExpiresIn: 3600, }) return } w.WriteHeader(http.StatusOK) })) defer server.Close() credentials := &NGTSServiceAccountCredentials{ ClientID: "test-client-id", PrivateKeyFile: keyFile, } metadata := &api.AgentMetadata{ Version: "test-version", ClusterID: "test-cluster", } client, err := NewNGTSClient(metadata, credentials, server.URL, "test-tsg", nil) require.NoError(t, err) // Make multiple requests - should only authenticate once readings := []*api.DataReading{{DataGatherer: "test", Data: &api.DynamicData{}}} opts := Options{ClusterName: "test"} for range 3 { err = client.PostDataReadingsWithOptions(t.Context(), readings, opts) require.NoError(t, err) } // Should only authenticate once since token is cached assert.Equal(t, 1, authCallCount) } func TestNGTSClient_ErrorHandling(t *testing.T) { keyFile := withFile(t, fakePrivKeyPEM) tests := []struct { name string serverHandler http.HandlerFunc expectedErrMsg string }{ { name: "authentication failure", serverHandler: func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == ngtsAccessTokenEndpoint { w.WriteHeader(http.StatusUnauthorized) _, _ = w.Write([]byte(`{"error": "invalid_client"}`)) return } w.WriteHeader(http.StatusOK) }, expectedErrMsg: "failed to obtain NGTS access token", }, { name: "upload failure", serverHandler: func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == ngtsAccessTokenEndpoint { w.WriteHeader(http.StatusOK) _ = json.NewEncoder(w).Encode(ngtsAccessTokenResponse{ AccessToken: "test-token", Type: "bearer", ExpiresIn: 3600, }) return } w.WriteHeader(http.StatusInternalServerError) _, _ = w.Write([]byte(`{"error": "internal server error"}`)) }, expectedErrMsg: "NGTS upload failed with status code 500", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { server := httptest.NewServer(tt.serverHandler) defer server.Close() credentials := &NGTSServiceAccountCredentials{ ClientID: "test-client-id", PrivateKeyFile: keyFile, } metadata := &api.AgentMetadata{Version: "test", ClusterID: "test"} client, err := NewNGTSClient(metadata, credentials, server.URL, "test-tsg", nil) require.NoError(t, err) readings := []*api.DataReading{{DataGatherer: "test", Data: &api.DynamicData{}}} opts := Options{ClusterName: "test"} err = client.PostDataReadingsWithOptions(t.Context(), readings, opts) require.Error(t, err) assert.Contains(t, err.Error(), tt.expectedErrMsg) }) } } ================================================ FILE: pkg/client/client_oauth.go ================================================ package client import ( "bytes" "context" "encoding/json" "fmt" "io" "net/http" "net/url" "path/filepath" "strings" "time" "github.com/hashicorp/go-multierror" "k8s.io/client-go/transport" "k8s.io/klog/v2" "github.com/jetstack/preflight/api" "github.com/jetstack/preflight/pkg/version" ) type ( // The OAuthClient type is a Client implementation used to upload data readings to the Jetstack Secure platform // using OAuth as its authentication method. OAuthClient struct { credentials *OAuthCredentials accessToken *accessToken baseURL string agentMetadata *api.AgentMetadata client *http.Client } accessToken struct { bearer string expirationDate time.Time } // OAuthCredentials defines the format of the credentials.json file. OAuthCredentials struct { // UserID is the ID or email for the user or service account. UserID string `json:"user_id"` // UserSecret is the secret for the user or service account. UserSecret string `json:"user_secret"` // The following fields are optional as the default behaviour // is to use the equivalent variables defined at package level // and injected at build time. // ClientID is the oauth2 client ID. ClientID string `json:"client_id,omitempty"` // ClientSecret is the oauth2 client secret. ClientSecret string `json:"client_secret,omitempty"` // AuthServerDomain is the domain for the auth server. AuthServerDomain string `json:"auth_server_domain,omitempty"` } ) var ( // ClientID is the auth0 client identifier (injected at build time) ClientID string // ClientSecret is the auth0 client secret (injected at build time) ClientSecret string // AuthServerDomain is the auth0 domain (injected at build time) AuthServerDomain string ) func (t *accessToken) needsRenew() bool { return t.bearer == "" || time.Now().After(t.expirationDate) } // NewOAuthClient returns a new instance of the OAuthClient type that will perform HTTP requests using OAuth to provide // authentication tokens to the backend API. func NewOAuthClient(agentMetadata *api.AgentMetadata, credentials *OAuthCredentials, baseURL string) (*OAuthClient, error) { if err := credentials.Validate(); err != nil { return nil, fmt.Errorf("cannot create OAuthClient: %v", err) } if baseURL == "" { return nil, fmt.Errorf("programmer mistake: cannot create APITokenClient: baseURL cannot be empty, should have been checked by the caller") } ok, _ := credentials.IsClientSet() if !ok { credentials.ClientID = ClientID credentials.ClientSecret = ClientSecret credentials.AuthServerDomain = AuthServerDomain } ok, why := credentials.IsClientSet() if !ok { return nil, fmt.Errorf("%s", why) } return &OAuthClient{ agentMetadata: agentMetadata, credentials: credentials, baseURL: baseURL, accessToken: &accessToken{}, client: &http.Client{ Timeout: time.Minute, Transport: transport.DebugWrappers(http.DefaultTransport), }, }, nil } func (c *OAuthClient) PostDataReadingsWithOptions(ctx context.Context, readings []*api.DataReading, opts Options) error { return c.postDataReadings(ctx, opts.OrgID, opts.ClusterID, readings) } // PostDataReadings uploads the slice of api.DataReading to the Jetstack Secure backend to be processed for later // viewing in the user-interface. func (c *OAuthClient) postDataReadings(ctx context.Context, orgID, clusterID string, readings []*api.DataReading) error { payload := api.DataReadingsPost{ AgentMetadata: c.agentMetadata, DataGatherTime: time.Now().UTC(), DataReadings: readings, } data, err := json.Marshal(payload) if err != nil { return err } klog.FromContext(ctx).V(2).Info( "uploading data readings", "url", filepath.Join("/api/v1/org", orgID, "datareadings", clusterID), "cluster_id", clusterID, "data_readings_count", len(readings), "data_size_bytes", len(data), ) res, err := c.post(ctx, filepath.Join("/api/v1/org", orgID, "datareadings", clusterID), bytes.NewBuffer(data)) if err != nil { return err } defer res.Body.Close() if code := res.StatusCode; code < 200 || code >= 300 { errorContent := "" body, err := io.ReadAll(res.Body) if err == nil { errorContent = string(body) } return fmt.Errorf("received response with status code %d. Body: [%s]", code, errorContent) } return nil } // Post performs an HTTP POST request. func (c *OAuthClient) post(ctx context.Context, path string, body io.Reader) (*http.Response, error) { token, err := c.getValidAccessToken(ctx) if err != nil { return nil, err } req, err := http.NewRequestWithContext(ctx, http.MethodPost, fullURL(c.baseURL, path), body) if err != nil { return nil, err } req.Header.Set("Content-Type", "application/json") version.SetUserAgent(req) if len(token.bearer) > 0 { req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.bearer)) } return c.client.Do(req) } // getValidAccessToken returns a valid access token. It will fetch a new access // token from the auth server in case the current access token does not exist // or it is expired. func (c *OAuthClient) getValidAccessToken(ctx context.Context) (*accessToken, error) { if c.accessToken.needsRenew() { err := c.renewAccessToken(ctx) if err != nil { return nil, err } } return c.accessToken, nil } func (c *OAuthClient) renewAccessToken(ctx context.Context) error { tokenURL := fmt.Sprintf("https://%s/oauth/token", c.credentials.AuthServerDomain) audience := "https://preflight.jetstack.io/api/v1" payload := url.Values{} payload.Set("grant_type", "password") payload.Set("client_id", c.credentials.ClientID) payload.Set("client_secret", c.credentials.ClientSecret) payload.Set("audience", audience) payload.Set("username", c.credentials.UserID) payload.Set("password", c.credentials.UserSecret) req, err := http.NewRequestWithContext(ctx, http.MethodPost, tokenURL, strings.NewReader(payload.Encode())) if err != nil { return err } req.Header.Add("Content-Type", "application/x-www-form-urlencoded") version.SetUserAgent(req) res, err := http.DefaultClient.Do(req) if err != nil { return err } body, err := io.ReadAll(res.Body) if err != nil { return err } defer res.Body.Close() if status := res.StatusCode; status < 200 || status >= 300 { return fmt.Errorf("auth server did not provide an access token: (status %d) %s.", status, string(body)) } response := struct { Bearer string `json:"access_token"` ExpiresIn uint `json:"expires_in"` }{} err = json.Unmarshal(body, &response) if err != nil { return err } if response.ExpiresIn == 0 { return fmt.Errorf("got wrong expiration for access token") } c.accessToken.bearer = response.Bearer c.accessToken.expirationDate = time.Now().Add(time.Duration(response.ExpiresIn) * time.Second) return nil } // Performs validations. Since it may return a multierror.Error, remember to use // multierror.Prefix(err, "context: ") rather than fmt.Errorf("context: %w", // err) when wrapping the error. func ParseOAuthCredentials(data []byte) (*OAuthCredentials, error) { var credentials OAuthCredentials err := json.Unmarshal(data, &credentials) if err != nil { return nil, err } if err = credentials.Validate(); err != nil { return nil, err } return &credentials, nil } // IsClientSet returns whether the client credentials are set or not. `why` is // only returned when `ok` is false. func (c *OAuthCredentials) IsClientSet() (ok bool, why string) { if c.ClientID == "" { return false, "ClientID is empty" } if c.ClientSecret == "" { return false, "ClientSecret is empty" } if c.AuthServerDomain == "" { return false, "AuthServerDomain is empty" } return true, "" } func (c *OAuthCredentials) Validate() error { var result *multierror.Error if c == nil { return fmt.Errorf("credentials are nil") } if c.UserID == "" { result = multierror.Append(result, fmt.Errorf("user_id cannot be empty")) } if c.UserSecret == "" { result = multierror.Append(result, fmt.Errorf("user_secret cannot be empty")) } return result.ErrorOrNil() } ================================================ FILE: pkg/client/client_venafi_cloud.go ================================================ package client import ( "bytes" "context" "crypto" "encoding/base64" "encoding/json" "fmt" "io" "net/http" "net/url" "path" "path/filepath" "strconv" "strings" "sync" "time" "github.com/golang-jwt/jwt/v4" "github.com/google/uuid" "github.com/hashicorp/go-multierror" "github.com/microcosm-cc/bluemonday" "k8s.io/client-go/transport" "k8s.io/klog/v2" "github.com/jetstack/preflight/api" "github.com/jetstack/preflight/pkg/version" ) type ( // The VenafiCloudClient type is a Client implementation used to upload data readings to the Venafi Cloud platform // using service account authentication as its authentication method. // // This form of authentication follows the Private Key JWT standard found at https://oauth.net/private-key-jwt, // which is a combination of two RFCs: // * RFC 7521 (Assertion Framework) // * RFC 7523 (JWT Profile for Client Authentication) VenafiCloudClient struct { credentials *VenafiSvcAccountCredentials accessToken *venafiCloudAccessToken baseURL string agentMetadata *api.AgentMetadata uploaderID string uploadPath string privateKey crypto.PrivateKey jwtSigningAlg jwt.SigningMethod lock sync.RWMutex // Made public for testing purposes. Client *http.Client } VenafiSvcAccountCredentials struct { // ClientID is the service account client ID ClientID string `json:"client_id,omitempty"` // PrivateKeyFile is the path to the private key file paired to // the public key in the service account PrivateKeyFile string `json:"private_key_file,omitempty"` } venafiCloudAccessToken struct { accessToken string expirationTime time.Time } accessTokenInformation struct { AccessToken string `json:"access_token"` // base 64 encoded token Type string `json:"token_type"` // always be “bearer” for now ExpiresIn int64 `json:"expires_in"` // number of seconds after which the access token will expire } ) const ( // URL for the venafi-cloud backend services VenafiCloudProdURL = "https://api.venafi.cloud" defaultVenafiCloudUploadEndpoint = "v1/tlspk/uploads" accessTokenEndpoint = "/v1/oauth/token/serviceaccount" requiredGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" ) // NewVenafiCloudClient returns a new instance of the VenafiCloudClient type that will perform HTTP requests using a bearer token // to authenticate to the backend API. func NewVenafiCloudClient(agentMetadata *api.AgentMetadata, credentials *VenafiSvcAccountCredentials, baseURL string, uploaderID string, uploadPath string) (*VenafiCloudClient, error) { if err := credentials.Validate(); err != nil { return nil, fmt.Errorf("cannot create VenafiCloudClient: %w", err) } privateKey, jwtSigningAlg, err := parsePrivateKeyAndExtractSigningMethod(credentials.PrivateKeyFile) if err != nil { return nil, fmt.Errorf("while parsing private key file: %w", err) } if baseURL == "" { return nil, fmt.Errorf("cannot create VenafiCloudClient: baseURL cannot be empty") } ok, why := credentials.IsClientSet() if !ok { return nil, fmt.Errorf("%s", why) } if uploadPath == "" { // if the uploadPath is not given, use default upload path uploadPath = defaultVenafiCloudUploadEndpoint } return &VenafiCloudClient{ agentMetadata: agentMetadata, credentials: credentials, baseURL: baseURL, accessToken: &venafiCloudAccessToken{}, Client: &http.Client{ Timeout: time.Minute, Transport: transport.DebugWrappers(http.DefaultTransport), }, uploaderID: uploaderID, uploadPath: uploadPath, privateKey: privateKey, jwtSigningAlg: jwtSigningAlg, }, nil } // ParseVenafiCredentials reads credentials into a VenafiSvcAccountCredentials struct. Performs validations. func ParseVenafiCredentials(data []byte) (*VenafiSvcAccountCredentials, error) { var credentials VenafiSvcAccountCredentials err := json.Unmarshal(data, &credentials) if err != nil { return nil, err } if err = credentials.Validate(); err != nil { return nil, err } return &credentials, nil } func (c *VenafiSvcAccountCredentials) Validate() error { var result *multierror.Error if c == nil { return fmt.Errorf("credentials are nil") } if c.ClientID == "" { result = multierror.Append(result, fmt.Errorf("client_id cannot be empty")) } if c.PrivateKeyFile == "" { result = multierror.Append(result, fmt.Errorf("private_key_file cannot be empty")) } return result.ErrorOrNil() } // IsClientSet returns whether the client credentials are set or not. `why` is // only returned when `ok` is false. func (c *VenafiSvcAccountCredentials) IsClientSet() (ok bool, why string) { if c.ClientID == "" { return false, "ClientID is empty" } if c.PrivateKeyFile == "" { return false, "PrivateKeyFile is empty" } return true, "" } // PostDataReadingsWithOptions uploads the slice of api.DataReading to the Venafi Cloud backend to be processed. // The Options are then passed as URL params in the request func (c *VenafiCloudClient) PostDataReadingsWithOptions(ctx context.Context, readings []*api.DataReading, opts Options) error { payload := api.DataReadingsPost{ AgentMetadata: c.agentMetadata, DataGatherTime: time.Now().UTC(), DataReadings: readings, } data, err := json.Marshal(payload) if err != nil { return err } if !strings.HasSuffix(c.uploadPath, "/") { c.uploadPath = fmt.Sprintf("%s/", c.uploadPath) } venafiCloudUploadURL, err := url.Parse(filepath.Join(c.uploadPath, c.uploaderID)) if err != nil { return err } // validate options and send them as URL params query := venafiCloudUploadURL.Query() stripHTML := bluemonday.StrictPolicy() if opts.ClusterName != "" { query.Add("name", stripHTML.Sanitize(opts.ClusterName)) } if opts.ClusterDescription != "" { query.Add("description", base64.RawURLEncoding.EncodeToString([]byte(stripHTML.Sanitize(opts.ClusterDescription)))) } venafiCloudUploadURL.RawQuery = query.Encode() klog.FromContext(ctx).V(2).Info( "uploading data readings", "url", venafiCloudUploadURL.String(), "cluster_name", opts.ClusterName, "data_readings_count", len(readings), "data_size_bytes", len(data), ) res, err := c.post(ctx, venafiCloudUploadURL.String(), bytes.NewBuffer(data)) if err != nil { return err } defer res.Body.Close() if code := res.StatusCode; code < 200 || code >= 300 { errorContent := "" body, err := io.ReadAll(res.Body) if err == nil { errorContent = string(body) } return fmt.Errorf("received response with status code %d. Body: [%s]", code, errorContent) } return nil } // Post performs an HTTP POST request. func (c *VenafiCloudClient) post(ctx context.Context, path string, body io.Reader) (*http.Response, error) { token, err := c.getValidAccessToken(ctx) if err != nil { return nil, err } req, err := http.NewRequestWithContext(ctx, http.MethodPost, fullURL(c.baseURL, path), body) if err != nil { return nil, err } req.Header.Set("Accept", "application/json") req.Header.Set("Content-Type", "application/json") version.SetUserAgent(req) if len(token.accessToken) > 0 { req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.accessToken)) } return c.Client.Do(req) } // getValidAccessToken returns a valid access token. It will fetch a new access // token from the auth server in case the current access token does not exist // or it is expired. func (c *VenafiCloudClient) getValidAccessToken(ctx context.Context) (*venafiCloudAccessToken, error) { c.lock.RLock() needsUpdate := c.accessToken == nil || time.Now().Add(time.Minute).After(c.accessToken.expirationTime) c.lock.RUnlock() if needsUpdate { err := c.updateAccessToken(ctx) if err != nil { return nil, err } } c.lock.RLock() token := c.accessToken c.lock.RUnlock() return token, nil } func (c *VenafiCloudClient) updateAccessToken(ctx context.Context) error { jwtToken, err := c.generateAndSignJwtToken() if err != nil { return err } values := url.Values{} values.Set("grant_type", requiredGrantType) values.Set("assertion", jwtToken) tokenURL := fullURL(c.baseURL, accessTokenEndpoint) encoded := values.Encode() request, err := http.NewRequestWithContext(ctx, http.MethodPost, tokenURL, strings.NewReader(encoded)) if err != nil { return err } request.Header.Add("Content-Type", "application/x-www-form-urlencoded") request.Header.Add("Content-Length", strconv.Itoa(len(encoded))) version.SetUserAgent(request) now := time.Now() accessToken := accessTokenInformation{} err = c.sendHTTPRequest(request, &accessToken) if err != nil { return err } c.lock.Lock() c.accessToken = &venafiCloudAccessToken{ accessToken: accessToken.AccessToken, expirationTime: now.Add(time.Duration(accessToken.ExpiresIn) * time.Second), } c.lock.Unlock() return nil } func (c *VenafiCloudClient) sendHTTPRequest(request *http.Request, responseObject any) error { response, err := c.Client.Do(request) if err != nil { return err } defer response.Body.Close() if response.StatusCode != http.StatusOK && response.StatusCode != http.StatusCreated { body, _ := io.ReadAll(response.Body) return fmt.Errorf("failed to execute http request to the Control Plane. Request %s, status code: %d, body: [%s]", request.URL, response.StatusCode, body) } body, err := io.ReadAll(response.Body) if err != nil { return err } if err = json.Unmarshal(body, responseObject); err != nil { return err } return nil } func (c *VenafiCloudClient) generateAndSignJwtToken() (string, error) { prodURL, err := url.Parse(VenafiCloudProdURL) if err != nil { return "", err } claims := make(jwt.MapClaims) claims["sub"] = c.credentials.ClientID claims["iss"] = c.credentials.ClientID claims["iat"] = time.Now().Unix() claims["exp"] = time.Now().Add(time.Minute).Unix() claims["aud"] = path.Join(prodURL.Host, accessTokenEndpoint) claims["jti"] = uuid.New().String() token, err := jwt.NewWithClaims(c.jwtSigningAlg, claims).SignedString(c.privateKey) if err != nil { return "", err } return token, nil } ================================================ FILE: pkg/client/client_venconn.go ================================================ package client import ( "bytes" "context" "crypto/x509" "encoding/base64" "encoding/json" "errors" "fmt" "io" "net/http" "time" venapi "github.com/jetstack/venafi-connection-lib/api/v1alpha1" "github.com/jetstack/venafi-connection-lib/chain/sources/venafi" "github.com/jetstack/venafi-connection-lib/venafi_client" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/rest" "k8s.io/client-go/transport" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "github.com/jetstack/preflight/api" "github.com/jetstack/preflight/pkg/version" ) type VenConnClient struct { agentMetadata *api.AgentMetadata connHandler venafi_client.ConnectionHandler installNS string // Namespace in which the agent is running in. venConnName string // Name of the VenafiConnection resource to use. venConnNS string // Namespace of the VenafiConnection resource to use. // Used to make HTTP requests to Venafi Cloud. This field is public for // testing purposes so that we can configure trusted CAs; there should be a // way to do that without messing with the client directly (e.g., a flag to // pass a custom CA?), but it's not there yet. Client *http.Client } // NewVenConnClient lets you make requests to the Venafi Cloud backend using the // given VenafiConnection resource. // // You need to call Start to start watching the VenafiConnection resource. If // you don't, the client will be unable to find the VenafiConnection that you // are referring to as its client-go cache will remain empty. // // The http.Client is used for Venafi and Vault, not for Kubernetes. The // `installNS` is the namespace in which the agent is running in and cannot be // empty. `venConnName` and `venConnNS` must not be empty either. The passed // `restcfg` is not mutated. `trustedCAs` is only used for connecting to Venafi // Cloud and Vault and can be left nil. func NewVenConnClient(restcfg *rest.Config, agentMetadata *api.AgentMetadata, installNS, venConnName, venConnNS string, trustedCAs *x509.CertPool) (*VenConnClient, error) { if installNS == "" { return nil, errors.New("programmer mistake: installNS must be provided") } if venConnName == "" { return nil, errors.New("programmer mistake: venConnName must be provided") } if venConnNS == "" { return nil, errors.New("programmer mistake: venConnNS must be provided") } restcfg = rest.CopyConfig(restcfg) restcfg.Impersonate = rest.ImpersonationConfig{ UserName: fmt.Sprintf("system:serviceaccount:%s:venafi-connection", installNS), } // TLS-related configuration such as root CAs and client certs are contained // in the restcfg; let's create an http.Client that uses them. httpCl, err := rest.HTTPClientFor(restcfg) if err != nil { return nil, fmt.Errorf("while turning the REST config into an HTTP client: %w", err) } restMapper, err := apiutil.NewDynamicRESTMapper(restcfg, httpCl) if err != nil { return nil, fmt.Errorf("while creating the REST mapper: %w", err) } // This Kubernetes client only needs to be able to read and write the // VenafiConnection resources and read Secret resources. scheme := runtime.NewScheme() _ = venapi.AddToScheme(scheme) _ = corev1.AddToScheme(scheme) var unusedTPPDefaultClientID string handler, err := venafi_client.NewConnectionHandler( version.UserAgent(), "venafi-kubernetes-agent.jetstack.io", "VenafiKubernetesAgent", unusedTPPDefaultClientID, restcfg, scheme, restMapper, trustedCAs, ) if err != nil { return nil, err } vcpClient := &http.Client{} tr := http.DefaultTransport.(*http.Transport).Clone() if trustedCAs != nil { tr.TLSClientConfig.RootCAs = trustedCAs } vcpClient.Transport = transport.DebugWrappers(tr) return &VenConnClient{ agentMetadata: agentMetadata, connHandler: handler, installNS: installNS, venConnName: venConnName, venConnNS: venConnNS, Client: vcpClient, }, nil } // Start starts watching VenafiConnections. This function will return soon after // the context is closed, or if an error occurs. func (c *VenConnClient) Start(ctx context.Context) error { return c.connHandler.CacheRunnable().Start(ctx) } // `opts.ClusterName` and `opts.ClusterDescription` are the only values used // from the Options struct. OrgID and ClusterID are not used in Venafi Cloud. func (c *VenConnClient) PostDataReadingsWithOptions(ctx context.Context, readings []*api.DataReading, opts Options) error { if opts.ClusterName == "" { return fmt.Errorf("programmer mistake: the cluster name (aka `cluster_id` in the config file) cannot be left empty") } _, details, err := c.connHandler.Get(ctx, c.installNS, venafi.Scope{}, types.NamespacedName{Name: c.venConnName, Namespace: c.venConnNS}) if err != nil { return fmt.Errorf("while loading the VenafiConnection %s/%s: %w", c.venConnNS, c.venConnName, err) } if details.TPP != nil { return fmt.Errorf(`VenafiConnection %s/%s: the agent cannot be used with TPP`, c.venConnNS, c.venConnName) } if details.VCP != nil && details.VCP.APIKey != "" { // Although it is technically possible to use an API key, we have // decided to not allow it as it isn't recommended and will eventually // be phased out. return fmt.Errorf(`VenafiConnection %s/%s: the agent cannot be used with an API key`, c.venConnNS, c.venConnName) } if details.VCP == nil || details.VCP.AccessToken == "" { return fmt.Errorf(`programmer mistake: VenafiConnection %s/%s: TPPAccessToken is empty in the token returned by connHandler.Get: %v`, c.venConnNS, c.venConnName, details) } payload := api.DataReadingsPost{ AgentMetadata: c.agentMetadata, DataGatherTime: time.Now().UTC(), DataReadings: readings, } data, err := json.Marshal(payload) if err != nil { return err } klog.FromContext(ctx).V(2).Info( "uploading data readings", "url", fullURL(details.VCP.URL, "/v1/tlspk/upload/clusterdata/no"), "cluster_name", opts.ClusterName, "data_readings_count", len(readings), "data_size_bytes", len(data), ) // The path parameter "no" is a dummy parameter to make the Venafi Cloud // backend happy. This parameter, named `uploaderID` in the backend, is not // actually used by the backend. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fullURL(details.VCP.URL, "/v1/tlspk/upload/clusterdata/no"), bytes.NewReader(data)) if err != nil { return err } req.Header.Set("Content-Type", "application/json") req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", details.VCP.AccessToken)) version.SetUserAgent(req) q := req.URL.Query() q.Set("name", opts.ClusterName) if opts.ClusterDescription != "" { q.Set("description", base64.RawURLEncoding.EncodeToString([]byte(opts.ClusterDescription))) } req.URL.RawQuery = q.Encode() res, err := c.Client.Do(req) if err != nil { return err } defer res.Body.Close() if code := res.StatusCode; code < 200 || code >= 300 { errorContent := "" body, err := io.ReadAll(res.Body) if err == nil { errorContent = string(body) } return fmt.Errorf("received response with status code %d. Body: [%s]", code, errorContent) } return nil } ================================================ FILE: pkg/client/client_venconn_test.go ================================================ package client_test import ( "context" "crypto/x509" "net/http" "regexp" "strings" "testing" "github.com/jetstack/venafi-connection-lib/api/v1alpha1" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/rest" "k8s.io/klog/v2" "k8s.io/klog/v2/ktesting" ctrlruntime "sigs.k8s.io/controller-runtime/pkg/client" "github.com/jetstack/preflight/api" "github.com/jetstack/preflight/pkg/client" "github.com/jetstack/preflight/pkg/testutil" ) // These are using envtest (slow) rather than a fake clientset (fast) because // controller-runtime's fake clientset doesn't support server-side apply [1] and // also because we want to create serviceaccount tokens, which isn't supported // by the fake clientset either. // // The goal is to test the following behaviors: // // - VenafiConnection's `accessToken` works as expected with a fake Venafi // Cloud server. // - VenafiConnection's `apiKey` and `tpp` can't be used by the user. // - NewVenConnClient's `trustedCAs` works as expected. // // [1] https://github.com/kubernetes-sigs/controller-runtime/issues/2341 func TestVenConnClient_PostDataReadingsWithOptions(t *testing.T) { ctx, cancel := context.WithCancel(t.Context()) defer cancel() log := ktesting.NewLogger(t, ktesting.NewConfig(ktesting.Verbosity(10))) ctx = klog.NewContext(ctx, log) _, restconf, kclient := testutil.WithEnvtest(t) for _, obj := range testutil.Parse(testutil.VenConnRBAC) { require.NoError(t, kclient.Create(ctx, obj)) } t.Parallel() t.Run("valid accessToken", run_TestVenConnClient_PostDataReadingsWithOptions(ctx, restconf, kclient, testcase{ given: testutil.Undent(` apiVersion: jetstack.io/v1alpha1 kind: VenafiConnection metadata: name: venafi-components namespace: TEST_NAMESPACE spec: vcp: url: FAKE_VENAFI_CLOUD_URL accessToken: - secret: name: accesstoken fields: [accesstoken] allowReferencesFrom: matchExpressions: - {key: kubernetes.io/metadata.name, operator: In, values: [venafi]} --- apiVersion: v1 kind: Secret metadata: name: accesstoken namespace: TEST_NAMESPACE stringData: accesstoken: VALID_ACCESS_TOKEN --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: venafi-connection-accesstoken-reader namespace: TEST_NAMESPACE rules: - apiGroups: [""] resources: ["secrets"] verbs: ["get"] resourceNames: ["accesstoken"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: venafi-connection-accesstoken-reader namespace: TEST_NAMESPACE roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: venafi-connection-accesstoken-reader subjects: - kind: ServiceAccount name: venafi-connection namespace: venafi `), expectReadyCondMsg: "Generated a new token", })) t.Run("error when the apiKey field is used", run_TestVenConnClient_PostDataReadingsWithOptions(ctx, restconf, kclient, testcase{ // Why isn't it possible to use the 'apiKey' field? Although the // Kubernetes Discovery endpoint works with an API key, we have decided // to not support it because it isn't recommended. given: testutil.Undent(` apiVersion: jetstack.io/v1alpha1 kind: VenafiConnection metadata: name: venafi-components namespace: TEST_NAMESPACE spec: vcp: url: FAKE_VENAFI_CLOUD_URL apiKey: - secret: name: apikey fields: [apikey] allowReferencesFrom: matchExpressions: - {key: kubernetes.io/metadata.name, operator: In, values: [venafi]} --- apiVersion: v1 kind: Secret metadata: name: apikey namespace: TEST_NAMESPACE stringData: apikey: VALID_API_KEY --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: venafi-connection-apikey-reader namespace: TEST_NAMESPACE rules: - apiGroups: [""] resources: ["secrets"] verbs: ["get"] resourceNames: ["apikey"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: venafi-connection-apikey-reader namespace: TEST_NAMESPACE roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: venafi-connection-apikey-reader subjects: - kind: ServiceAccount name: venafi-connection namespace: venafi `), // PostDataReadingsWithOptions failed, but Get succeeded; that's why the // condition says the VenafiConnection is ready. expectReadyCondMsg: "Generated a new token", expectErr: "VenafiConnection error-when-the-apikey-field-is-used/venafi-components: the agent cannot be used with an API key", })) t.Run("error when the tpp field is used", run_TestVenConnClient_PostDataReadingsWithOptions(ctx, restconf, kclient, testcase{ // IMPORTANT: The user may think they can use 'tpp', spend time // debugging and making the venafi connection work, and then find out // that it doesn't work. The reason is because as of now, we don't first // check if the user has used the 'tpp' field before running Get. given: testutil.Undent(` apiVersion: jetstack.io/v1alpha1 kind: VenafiConnection metadata: name: venafi-components namespace: TEST_NAMESPACE spec: tpp: url: FAKE_TPP_URL accessToken: - secret: name: accesstoken fields: [accesstoken] allowReferencesFrom: matchExpressions: - {key: kubernetes.io/metadata.name, operator: In, values: [venafi]} --- apiVersion: v1 kind: Secret metadata: name: accesstoken namespace: TEST_NAMESPACE stringData: accesstoken: VALID_ACCESS_TOKEN --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: venafi-connection-accesstoken-reader namespace: TEST_NAMESPACE rules: - apiGroups: [""] resources: ["secrets"] verbs: ["get"] resourceNames: ["accesstoken"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: venafi-connection-accesstoken-reader namespace: TEST_NAMESPACE roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: venafi-connection-accesstoken-reader subjects: - kind: ServiceAccount name: venafi-connection namespace: venafi `), expectReadyCondMsg: "Generated a new token", expectErr: "VenafiConnection error-when-the-tpp-field-is-used/venafi-components: the agent cannot be used with TPP", })) } type testcase struct { given string expectErr string expectReadyCondMsg string } // All tests share the same envtest (i.e., the same apiserver and etcd process), // so each test needs to be contained in its own Kubernetes namespace. func run_TestVenConnClient_PostDataReadingsWithOptions(ctx context.Context, restcfg *rest.Config, kclient ctrlruntime.WithWatch, test testcase) func(t *testing.T) { return func(t *testing.T) { t.Helper() log := ktesting.NewLogger(t, ktesting.NewConfig(ktesting.Verbosity(10))) ctx := klog.NewContext(ctx, log) fakeVenafiCloud, certCloud, fakeVenafiAssert := testutil.FakeVenafiCloud(t) fakeTPP, certTPP := testutil.FakeTPP(t) fakeVenafiAssert(func(t testing.TB, r *http.Request) { if r.URL.Path == "/v1/useraccounts" { return // We only care about /v1/tlspk/upload/clusterdata. } // Let's make sure we didn't forget to add the arbitrary "/no" // (uploader_id) path segment to /v1/tlspk/upload/clusterdata. assert.Equal(t, "/v1/tlspk/upload/clusterdata/no", r.URL.Path) }) certPool := x509.NewCertPool() certPool.AddCert(certCloud) certPool.AddCert(certTPP) cl, err := client.NewVenConnClient( restcfg, &api.AgentMetadata{ClusterID: "no"}, "venafi", // Namespace in which the Agent is running. "venafi-components", // Name of the VenafiConnection. testNameToNamespace(t), // Namespace of the VenafiConnection. certPool, ) require.NoError(t, err) testutil.VenConnStartWatching(ctx, t, cl) test.given = strings.ReplaceAll(test.given, "FAKE_VENAFI_CLOUD_URL", fakeVenafiCloud.URL) test.given = strings.ReplaceAll(test.given, "FAKE_TPP_URL", fakeTPP.URL) test.given = strings.ReplaceAll(test.given, "TEST_NAMESPACE", testNameToNamespace(t)) var givenObjs []ctrlruntime.Object givenObjs = append(givenObjs, testutil.Parse(testutil.Undent(` apiVersion: v1 kind: Namespace metadata: name: `+testNameToNamespace(t)))...) givenObjs = append(givenObjs, testutil.Parse(test.given)...) for _, obj := range givenObjs { require.NoError(t, kclient.Create(ctx, obj)) } err = cl.PostDataReadingsWithOptions(ctx, []*api.DataReading{}, client.Options{ClusterName: "test cluster name"}) if test.expectErr != "" { assert.EqualError(t, err, test.expectErr) } else { require.NoError(t, err) } got := v1alpha1.VenafiConnection{} err = kclient.Get(ctx, types.NamespacedName{Name: "venafi-components", Namespace: testNameToNamespace(t)}, &got) require.NoError(t, err) require.Len(t, got.Status.Conditions, 1) assert.Equal(t, test.expectReadyCondMsg, got.Status.Conditions[0].Message) } } // Because we want valid namespaces for each of the tests, this func converts a // test name into a valid Kubernetes namespace (i.e., a DNS label as per RFC // 1123, including trimming to 63 chars). // // For example, the test name: // // Test/sub test has special chars ':"-;@# and is also super super super super long! // // will be converted to: // // sub-test-has-special-chars-and-is-also-super-super-super-super- // // Only the last part of the test name is used. // // nolint:dupword func testNameToNamespace(t testing.TB) string { regex := regexp.MustCompile("[^a-zA-Z0-9-]") // Only keep the part after the last slash. parts := strings.Split(t.Name(), "/") if len(parts) == 0 { return "" } s := parts[len(parts)-1] s = strings.ToLower(s) s = strings.ReplaceAll(s, "_", "-") s = regex.ReplaceAllString(s, "") s = strings.TrimLeft(s, "-") s = strings.TrimRight(s, "-") return s } ================================================ FILE: pkg/client/util.go ================================================ package client import ( "crypto" "crypto/ecdsa" "crypto/ed25519" "crypto/rsa" "crypto/x509" "encoding/pem" "fmt" "os" "github.com/golang-jwt/jwt/v4" ) // parsePrivateKeyFromPEMFile reads and parses a PEM-encoded private key file. func parsePrivateKeyFromPEMFile(privateKeyFilePath string) (crypto.PrivateKey, error) { pkBytes, err := os.ReadFile(privateKeyFilePath) if err != nil { return nil, fmt.Errorf("failed to fetch private key %q: %s", privateKeyFilePath, err) } der, _ := pem.Decode(pkBytes) if der == nil { return nil, fmt.Errorf("while decoding the PEM-encoded private key %v, its content were: %s", privateKeyFilePath, string(pkBytes)) } if key, err := x509.ParsePKCS1PrivateKey(der.Bytes); err == nil { return key, nil } if key, err := x509.ParsePKCS8PrivateKey(der.Bytes); err == nil { switch key := key.(type) { case *rsa.PrivateKey, *ecdsa.PrivateKey, ed25519.PrivateKey: return key, nil default: return nil, fmt.Errorf("found unknown private key type in PKCS#8 wrapping: %T", key) } } if key, err := x509.ParseECPrivateKey(der.Bytes); err == nil { return key, nil } return nil, fmt.Errorf("while parsing EC private: %w", err) } // parsePrivateKeyAndExtractSigningMethod parses a private key file and determines // the appropriate JWT signing method based on the key type and size. func parsePrivateKeyAndExtractSigningMethod(privateKeyFile string) (crypto.PrivateKey, jwt.SigningMethod, error) { privateKey, err := parsePrivateKeyFromPEMFile(privateKeyFile) if err != nil { return nil, nil, err } var signingMethod jwt.SigningMethod switch key := privateKey.(type) { case *rsa.PrivateKey: bitLen := key.N.BitLen() switch bitLen { case 2048: signingMethod = jwt.SigningMethodRS256 case 3072: signingMethod = jwt.SigningMethodRS384 case 4096: signingMethod = jwt.SigningMethodRS512 default: signingMethod = jwt.SigningMethodRS256 } case *ecdsa.PrivateKey: bitLen := key.Curve.Params().BitSize switch bitLen { case 256: signingMethod = jwt.SigningMethodES256 case 384: signingMethod = jwt.SigningMethodES384 case 521: signingMethod = jwt.SigningMethodES512 default: signingMethod = jwt.SigningMethodES256 } case ed25519.PrivateKey: signingMethod = jwt.SigningMethodEdDSA default: err = fmt.Errorf("unsupported private key type") } return privateKey, signingMethod, err } ================================================ FILE: pkg/datagatherer/datagatherer.go ================================================ // Package datagatherer provides the DataGatherer interface. package datagatherer import "context" // Config is the configuration of a DataGatherer. type Config interface { // NewDataGatherer constructs a DataGatherer with an specific configuration. NewDataGatherer(ctx context.Context) (DataGatherer, error) } // DataGatherer is the interface for Data Gatherers. Data Gatherers are in charge of fetching data from a certain cloud provider API or Kubernetes component. type DataGatherer interface { // Fetch retrieves data. // count is the number of items that were discovered. A negative count means the number // of items was indeterminate. Fetch(ctx context.Context) (data any, count int, err error) // Run starts the data gatherer's informers for resource collection. // Returns error if the data gatherer informer wasn't initialized Run(ctx context.Context) error // WaitForCacheSync waits for the data gatherer's informers cache to sync. WaitForCacheSync(ctx context.Context) error } ================================================ FILE: pkg/datagatherer/k8sdiscovery/discovery.go ================================================ package k8sdiscovery import ( "context" "fmt" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/discovery" "github.com/jetstack/preflight/api" "github.com/jetstack/preflight/pkg/datagatherer" "github.com/jetstack/preflight/pkg/kubeconfig" ) // ConfigDiscovery contains the configuration for the k8s-discovery data-gatherer type ConfigDiscovery struct { // KubeConfigPath is the path to the kubeconfig file. If empty, will assume it runs in-cluster. KubeConfigPath string `yaml:"kubeconfig"` } // UnmarshalYAML unmarshals the Config resolving GroupVersionResource. func (c *ConfigDiscovery) UnmarshalYAML(unmarshal func(any) error) error { aux := struct { KubeConfigPath string `yaml:"kubeconfig"` }{} err := unmarshal(&aux) if err != nil { return err } c.KubeConfigPath = aux.KubeConfigPath return nil } // NewDataGatherer constructs a new instance of the generic K8s data-gatherer for the provided // GroupVersionResource. // It gets the UID of the 'kube-system' namespace to use as the cluster ID, once at startup. // The UID is assumed to be stable for the lifetime of the cluster. // - https://github.com/kubernetes/kubernetes/issues/77487#issuecomment-489786023 func (c *ConfigDiscovery) NewDataGatherer(ctx context.Context) (datagatherer.DataGatherer, error) { cl, err := kubeconfig.NewDiscoveryClient(c.KubeConfigPath) if err != nil { return nil, err } cs, err := kubeconfig.NewClientSet(c.KubeConfigPath) if err != nil { return nil, fmt.Errorf("while creating new clientset: %s", err) } kubesystemNS, err := cs.CoreV1().Namespaces().Get(ctx, "kube-system", metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("while getting the kube-system namespace: %s", err) } return &DataGathererDiscovery{ cl: cl, clusterID: string(kubesystemNS.UID), }, nil } // DataGathererDiscovery stores the config for a k8s-discovery datagatherer type DataGathererDiscovery struct { // The 'discovery' client used for fetching data. cl *discovery.DiscoveryClient // The cluster ID, derived from the UID of the 'kube-system' namespace. clusterID string } func (g *DataGathererDiscovery) Run(ctx context.Context) error { // no async functionality, see Fetch return nil } func (g *DataGathererDiscovery) WaitForCacheSync(ctx context.Context) error { // no async functionality, see Fetch return nil } // Fetch will fetch discovery data from the apiserver, or return an error func (g *DataGathererDiscovery) Fetch(ctx context.Context) (any, int, error) { data, err := g.cl.ServerVersion() if err != nil { return nil, -1, fmt.Errorf("failed to get server version: %v", err) } response := &api.DiscoveryData{ ClusterID: g.clusterID, ServerVersion: data, } return response, 1, nil } ================================================ FILE: pkg/datagatherer/k8sdynamic/cache.go ================================================ package k8sdynamic import ( "fmt" "time" "github.com/go-logr/logr" "github.com/pmylund/go-cache" "k8s.io/apimachinery/pkg/types" "github.com/jetstack/preflight/api" ) // time interface, this is used to fetch the current time // whenever a k8s resource is deleted type timeInterface interface { now() time.Time } var clock timeInterface = &realTime{} type realTime struct { } func (*realTime) now() time.Time { return time.Now() } type cacheResource interface { GetUID() types.UID GetNamespace() string } func logCacheUpdateFailure(log logr.Logger, obj any, operation string) { // We use WithCallStackHelper to ensure the correct caller line numbers in the log messages helper, log := log.WithCallStackHelper() helper() err := fmt.Errorf("not a cacheResource type: %T missing metadata/uid field", obj) log.Error(err, "Cache update failure", "operation", operation) } // onAdd handles the informer creation events, adding the created runtime.Object // to the data gatherer's cache. The cache key is the uid of the object func onAdd(log logr.Logger, obj any, dgCache *cache.Cache) { item, ok := obj.(cacheResource) if ok { cacheObject := &api.GatheredResource{ Resource: obj, } dgCache.Set(string(item.GetUID()), cacheObject, cache.DefaultExpiration) return } logCacheUpdateFailure(log, obj, "add") } // onUpdate handles the informer update events, replacing the old object with the new one // if it's present in the data gatherer's cache, (if the object isn't present, it gets added). // The cache key is the uid of the object func onUpdate(log logr.Logger, oldObj, newObj any, dgCache *cache.Cache) { item, ok := oldObj.(cacheResource) if ok { cacheObject := updateCacheGatheredResource(string(item.GetUID()), newObj, dgCache) dgCache.Set(string(item.GetUID()), cacheObject, cache.DefaultExpiration) return } logCacheUpdateFailure(log, oldObj, "update") } // onDelete handles the informer deletion events, updating the object's properties with the deletion // time of the object (but not removing the object from the cache). // The cache key is the uid of the object func onDelete(log logr.Logger, obj any, dgCache *cache.Cache) { item, ok := obj.(cacheResource) if ok { cacheObject := updateCacheGatheredResource(string(item.GetUID()), obj, dgCache) cacheObject.DeletedAt = api.Time{Time: clock.now()} dgCache.Set(string(item.GetUID()), cacheObject, cache.DefaultExpiration) return } logCacheUpdateFailure(log, obj, "delete") } // creates a new updated instance of a cache object, with the resource // argument. If the object is present in the cache it fetches the object's // properties. func updateCacheGatheredResource(cacheKey string, resource any, dgCache *cache.Cache) *api.GatheredResource { // updated cache object cacheObject := &api.GatheredResource{ Resource: resource, } // update the object's properties, if it's already in the cache if o, ok := dgCache.Get(cacheKey); ok { deletedAt := o.(*api.GatheredResource).DeletedAt if deletedAt.IsZero() && !deletedAt.IsZero() { cacheObject.DeletedAt = deletedAt } } return cacheObject } ================================================ FILE: pkg/datagatherer/k8sdynamic/cache_test.go ================================================ package k8sdynamic import ( "testing" "time" "github.com/go-logr/logr" "github.com/pmylund/go-cache" "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2/ktesting" "github.com/jetstack/preflight/api" ) func makeGatheredResource(obj runtime.Object, deletedAt api.Time) *api.GatheredResource { return &api.GatheredResource{ Resource: obj, DeletedAt: deletedAt, } } func TestOnAddCache(t *testing.T) { tcs := map[string]struct { inputObjects []runtime.Object eventObjects []runtime.Object eventFunc func(log logr.Logger, old, obj any, dgCache *cache.Cache) expected []*api.GatheredResource }{ "add all objects": { inputObjects: []runtime.Object{ getObject("foobar/v1", "Foo", "testfoo", "testns", false), getObject("v1", "Service", "testservice", "testns", false), getObject("foobar/v1", "NotFoo", "notfoo", "testns", false), }, expected: []*api.GatheredResource{ makeGatheredResource(getObject("foobar/v1", "Foo", "testfoo", "testns", false), api.Time{}), makeGatheredResource(getObject("v1", "Service", "testservice", "testns", false), api.Time{}), makeGatheredResource(getObject("foobar/v1", "NotFoo", "notfoo", "testns", false), api.Time{}), }, }, "delete all objects. All objects should have the deletedAt flag": { inputObjects: []runtime.Object{ getObject("foobar/v1", "Foo", "testfoo", "testns", false), getObject("v1", "Service", "testservice", "testns", false), getObject("foobar/v1", "NotFoo", "notfoo", "testns", false), }, // objects to delete eventObjects: []runtime.Object{ getObject("foobar/v1", "Foo", "testfoo", "testns", false), getObject("v1", "Service", "testservice", "testns", false), getObject("foobar/v1", "NotFoo", "notfoo", "testns", false), }, eventFunc: func(log logr.Logger, oldObj, newObj any, dgCache *cache.Cache) { onDelete(log, oldObj, dgCache) }, expected: []*api.GatheredResource{ makeGatheredResource( getObject("foobar/v1", "Foo", "testfoo", "testns", false), api.Time{Time: clock.now()}, ), makeGatheredResource( getObject("v1", "Service", "testservice", "testns", false), api.Time{Time: clock.now()}, ), makeGatheredResource( getObject("foobar/v1", "NotFoo", "notfoo", "testns", false), api.Time{Time: clock.now()}, ), }, }, "update all objects' namespace": { inputObjects: []runtime.Object{ getObject("foobar/v1", "Foo", "testfoo", "testns", false), getObject("v1", "Service", "testservice", "testns", false), getObject("foobar/v1", "NotFoo", "notfoo", "testns", false), }, // objects to update eventObjects: []runtime.Object{ getObject("foobar/v1", "Foo", "testfoo", "testns1", false), getObject("v1", "Service", "testservice", "testns1", false), getObject("foobar/v1", "NotFoo", "notfoo", "testns1", false), }, eventFunc: onUpdate, expected: []*api.GatheredResource{ makeGatheredResource( getObject("foobar/v1", "Foo", "testfoo", "testns1", false), api.Time{}, ), makeGatheredResource( getObject("v1", "Service", "testservice", "testns1", false), api.Time{}, ), makeGatheredResource( getObject("foobar/v1", "NotFoo", "notfoo", "testns1", false), api.Time{}, ), }, }, } for name, tc := range tcs { t.Run(name, func(t *testing.T) { log := ktesting.NewLogger(t, ktesting.NewConfig(ktesting.Verbosity(10))) dgCache := cache.New(5*time.Minute, 30*time.Second) // adding initial objetcs to the cache for _, obj := range tc.inputObjects { onAdd(log, obj, dgCache) } // Testing event founction on set of objects for _, obj := range tc.eventObjects { if tc.eventFunc != nil { tc.eventFunc(log, obj, obj, dgCache) } } // items back from the cache list := []*api.GatheredResource{} for _, item := range dgCache.Items() { cacheObject := item.Object.(*api.GatheredResource) list = append(list, cacheObject) } // sorting list of results by name sortGatheredResources(list) // sorting list of expected results by name sortGatheredResources(tc.expected) if len(list) != len(tc.expected) { t.Errorf("unexpected number of return items found. exp:%+v act:%+v", tc.expected, list) } require.Equal(t, tc.expected, list) }) } } // TestNoneCache demonstrates that the cache helpers do not crash if passed a // non-cachable object, but log an error with a reference to the object type. func TestNoneCache(t *testing.T) { log := ktesting.NewLogger(t, ktesting.NewConfig(ktesting.Verbosity(10))) type notCachable struct{} onAdd(log, ¬Cachable{}, nil) onUpdate(log, ¬Cachable{}, nil, nil) onDelete(log, ¬Cachable{}, nil) } ================================================ FILE: pkg/datagatherer/k8sdynamic/dynamic.go ================================================ package k8sdynamic // The venafi-kubernetes-agent has a requirement that **all** resources should // be uploaded, even short-lived secrets, which are created and deleted // in-between data uploads. A cache was added to the datagatherer code, to // satisfy this requirement. The cache stores all resources for 5 minutes. And // the informer event handlers (onAdd, onUpdate, onDelete) update the cache // accordingly. The onDelete handler does not remove the object from the cache, // but instead marks the object as deleted by setting the DeletedAt field on the // GatheredResource. This ensures that deleted resources are still present in // the cache for the duration of the cache expiry time. // // The cache expiry is hard coded to 5 minutes, which is longer than the // venafi-kubernetes-agent default upload interval of 1 minute. This means that // even if a resource is created and deleted in-between data gatherer runs, it // will still be present in the cache when the data gatherer runs. // // TODO(wallrj): When the agent is deployed as CyberArk disco-agent, the deleted // items are currently discarded before upload. If this remains the case, then the cache is unnecessary // and should be disabled to save memory. // If, in the future, the CyberArk Discovery and Context service does want to // see deleted items, the "deleted resource reporting mechanism" will need to be // redesigned, so that deleted items are retained for the duration of the upload // interval. // // TODO(wallrj): When the agent is deployed as CyberArk disco-agent, the upload // interval is 12 hours by default, so the 5 minute cache expiry is not // sufficient. // // TODO(wallrj): The shared informer is configured to refresh all relist all // resources every 1 minute, which will cause unnecessary load on the apiserver. // We need to look back at the Git history and understand whether this was done // for good reason or due to some misunderstanding. import ( "context" "encoding/json" "errors" "fmt" "regexp" "slices" "strings" "time" "github.com/pmylund/go-cache" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" "k8s.io/client-go/dynamic/dynamicinformer" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" k8scache "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" "github.com/jetstack/preflight/api" "github.com/jetstack/preflight/internal/envelope" "github.com/jetstack/preflight/pkg/datagatherer" "github.com/jetstack/preflight/pkg/kubeconfig" "github.com/jetstack/preflight/pkg/logs" ) // ConfigDynamic contains the configuration for the data-gatherer. type ConfigDynamic struct { // KubeConfigPath is the path to the kubeconfig file. If empty, will assume it runs in-cluster. KubeConfigPath string `yaml:"kubeconfig"` // GroupVersionResource identifies the resource type to gather. GroupVersionResource schema.GroupVersionResource // ExcludeNamespaces is a list of namespaces to exclude. ExcludeNamespaces []string `yaml:"exclude-namespaces"` // IncludeNamespaces is a list of namespaces to include. IncludeNamespaces []string `yaml:"include-namespaces"` // FieldSelectors is a list of field selectors to use when listing this resource FieldSelectors []string `yaml:"field-selectors"` // LabelSelectors is a list of label selectors to use when listing this resource LabelSelectors []string `yaml:"label-selectors"` } // UnmarshalYAML unmarshals the ConfigDynamic resolving GroupVersionResource. func (c *ConfigDynamic) UnmarshalYAML(unmarshal func(any) error) error { aux := struct { KubeConfigPath string `yaml:"kubeconfig"` ResourceType struct { Group string `yaml:"group"` Version string `yaml:"version"` Resource string `yaml:"resource"` } `yaml:"resource-type"` ExcludeNamespaces []string `yaml:"exclude-namespaces"` IncludeNamespaces []string `yaml:"include-namespaces"` FieldSelectors []string `yaml:"field-selectors"` LabelSelectors []string `yaml:"label-selectors"` }{} err := unmarshal(&aux) if err != nil { return err } c.KubeConfigPath = aux.KubeConfigPath c.GroupVersionResource.Group = aux.ResourceType.Group c.GroupVersionResource.Version = aux.ResourceType.Version c.GroupVersionResource.Resource = aux.ResourceType.Resource c.ExcludeNamespaces = aux.ExcludeNamespaces c.IncludeNamespaces = aux.IncludeNamespaces c.FieldSelectors = aux.FieldSelectors c.LabelSelectors = aux.LabelSelectors return nil } // validate validates the configuration. func (c *ConfigDynamic) validate() error { var errs []string if len(c.ExcludeNamespaces) > 0 && len(c.IncludeNamespaces) > 0 { errs = append(errs, "cannot set excluded and included namespaces") } if c.GroupVersionResource.Resource == "" { errs = append(errs, "invalid configuration: GroupVersionResource.Resource cannot be empty") } for i, fieldSelectorString := range c.FieldSelectors { if fieldSelectorString == "" { errs = append(errs, fmt.Sprintf("invalid field selector %d: must not be empty", i)) } _, err := fields.ParseSelector(fieldSelectorString) if err != nil { errs = append(errs, fmt.Sprintf("invalid field selector %d: %s", i, err)) } } for i, labelSelectorString := range c.LabelSelectors { if labelSelectorString == "" { errs = append(errs, fmt.Sprintf("invalid label selector %d: must not be empty", i)) } _, err := labels.Parse(labelSelectorString) if err != nil { errs = append(errs, fmt.Sprintf("invalid label selector %d: %s", i, err)) } } if len(errs) > 0 { return errors.New(strings.Join(errs, ", ")) } return nil } // sharedInformerFunc creates a SharedIndexInformer given a SharedInformerFactory type sharedInformerFunc func(informers.SharedInformerFactory) k8scache.SharedIndexInformer // kubernetesNativeResources map of the native kubernetes resources, linking each resource to a sharedInformerFunc for that resource. // secrets are still treated as unstructured rather than corev1.Secret, for a faster unmarshaling var kubernetesNativeResources = map[schema.GroupVersionResource]sharedInformerFunc{ corev1.SchemeGroupVersion.WithResource("pods"): func(sharedFactory informers.SharedInformerFactory) k8scache.SharedIndexInformer { return sharedFactory.Core().V1().Pods().Informer() }, corev1.SchemeGroupVersion.WithResource("nodes"): func(sharedFactory informers.SharedInformerFactory) k8scache.SharedIndexInformer { return sharedFactory.Core().V1().Nodes().Informer() }, corev1.SchemeGroupVersion.WithResource("services"): func(sharedFactory informers.SharedInformerFactory) k8scache.SharedIndexInformer { return sharedFactory.Core().V1().Services().Informer() }, corev1.SchemeGroupVersion.WithResource("configmaps"): func(sharedFactory informers.SharedInformerFactory) k8scache.SharedIndexInformer { return sharedFactory.Core().V1().ConfigMaps().Informer() }, appsv1.SchemeGroupVersion.WithResource("deployments"): func(sharedFactory informers.SharedInformerFactory) k8scache.SharedIndexInformer { return sharedFactory.Apps().V1().Deployments().Informer() }, appsv1.SchemeGroupVersion.WithResource("daemonsets"): func(sharedFactory informers.SharedInformerFactory) k8scache.SharedIndexInformer { return sharedFactory.Apps().V1().DaemonSets().Informer() }, appsv1.SchemeGroupVersion.WithResource("statefulsets"): func(sharedFactory informers.SharedInformerFactory) k8scache.SharedIndexInformer { return sharedFactory.Apps().V1().StatefulSets().Informer() }, appsv1.SchemeGroupVersion.WithResource("replicasets"): func(sharedFactory informers.SharedInformerFactory) k8scache.SharedIndexInformer { return sharedFactory.Apps().V1().ReplicaSets().Informer() }, appsv1.SchemeGroupVersion.WithResource("replicasets"): func(sharedFactory informers.SharedInformerFactory) k8scache.SharedIndexInformer { return sharedFactory.Apps().V1().ReplicaSets().Informer() }, admissionregistrationv1.SchemeGroupVersion.WithResource("validatingwebhookconfigurations"): func(sharedFactory informers.SharedInformerFactory) k8scache.SharedIndexInformer { return sharedFactory.Admissionregistration().V1().ValidatingWebhookConfigurations().Informer() }, admissionregistrationv1.SchemeGroupVersion.WithResource("mutatingwebhookconfigurations"): func(sharedFactory informers.SharedInformerFactory) k8scache.SharedIndexInformer { return sharedFactory.Admissionregistration().V1().MutatingWebhookConfigurations().Informer() }, batchv1.SchemeGroupVersion.WithResource("jobs"): func(sharedFactory informers.SharedInformerFactory) k8scache.SharedIndexInformer { return sharedFactory.Batch().V1().Jobs().Informer() }, } // NewDataGatherer constructs a new instance of the generic K8s data-gatherer for the provided func (c *ConfigDynamic) NewDataGatherer(ctx context.Context) (datagatherer.DataGatherer, error) { if isNativeResource(c.GroupVersionResource) { clientset, err := kubeconfig.NewClientSet(c.KubeConfigPath) if err != nil { return nil, err } return c.newDataGathererWithClient(ctx, nil, clientset) } else { cl, err := kubeconfig.NewDynamicClient(c.KubeConfigPath) if err != nil { return nil, err } return c.newDataGathererWithClient(ctx, cl, nil) } } func (c *ConfigDynamic) newDataGathererWithClient(ctx context.Context, cl dynamic.Interface, clientset kubernetes.Interface) (datagatherer.DataGatherer, error) { log := klog.FromContext(ctx) if err := c.validate(); err != nil { return nil, err } // init shared informer for selected namespaces fieldSelector := generateExcludedNamespacesFieldSelector(c.ExcludeNamespaces) // Add any custom field selectors to the excluded namespaces selector // The selectors have already been validated, so it is safe to use // ParseSelectorOrDie here. for _, fieldSelectorString := range c.FieldSelectors { fieldSelector = fields.AndSelectors(fieldSelector, fields.ParseSelectorOrDie(fieldSelectorString)) } // Add any custom label selectors // The selectors have already been validated, so Parse is expected to // succeed; any parse error is treated as a programming error. labelSelector := labels.Everything() for _, labelSelectorString := range c.LabelSelectors { selector, err := labels.Parse(labelSelectorString) if err != nil { panic(fmt.Sprintf("PROGRAMMING ERROR: should have been caught in validation: "+ "failed to parse validated label selector %q: %v", labelSelectorString, err)) } reqs, _ := selector.Requirements() labelSelector = labelSelector.Add(reqs...) } // init cache to store gathered resources dgCache := cache.New(5*time.Minute, 30*time.Second) newDataGatherer := &DataGathererDynamic{ groupVersionResource: c.GroupVersionResource, fieldSelector: fieldSelector.String(), labelSelector: labelSelector.String(), namespaces: c.IncludeNamespaces, cache: dgCache, } // In order to reduce memory usage that might come from using Dynamic Informers // * https://github.com/kyverno/kyverno/issues/1832#issuecomment-968782166 // * https://github.com/kubernetes/client-go/issues/832 // * https://github.com/kubernetes/client-go/issues/871 // we use SharedIndexInformer for known resources, these informers have less of an impact on the // memory usage. Dynamic datagatheres will use them for some of the native resources instead of // dynamic informers. if informerFunc, ok := kubernetesNativeResources[c.GroupVersionResource]; ok { factory := informers.NewSharedInformerFactoryWithOptions(clientset, // TODO(wallrj): This causes all resources to be relisted every 1 // minute which will cause unnecessary load on the apiserver. 60*time.Second, informers.WithNamespace(metav1.NamespaceAll), informers.WithTweakListOptions(func(options *metav1.ListOptions) { options.FieldSelector = fieldSelector.String() options.LabelSelector = labelSelector.String() }), ) newDataGatherer.informer = informerFunc(factory) } else { factory := dynamicinformer.NewFilteredDynamicSharedInformerFactory( cl, // TODO(wallrj): This causes all resources to be relisted every 1 // minute which will cause unnecessary load on the apiserver. 60*time.Second, metav1.NamespaceAll, func(options *metav1.ListOptions) { options.FieldSelector = fieldSelector.String() options.LabelSelector = labelSelector.String() }, ) newDataGatherer.informer = factory.ForResource(c.GroupVersionResource).Informer() } registration, err := newDataGatherer.informer.AddEventHandlerWithOptions(k8scache.ResourceEventHandlerFuncs{ AddFunc: func(obj any) { onAdd(log, obj, dgCache) }, UpdateFunc: func(oldObj, newObj any) { onUpdate(log, oldObj, newObj, dgCache) }, DeleteFunc: func(obj any) { onDelete(log, obj, dgCache) }, }, k8scache.HandlerOptions{ Logger: &log, }) if err != nil { return nil, err } newDataGatherer.registration = registration return newDataGatherer, nil } // DataGathererDynamic is a generic gatherer for Kubernetes. It knows how to request // a list of generic resources from the Kubernetes apiserver. // It does not deserialize the objects into structured data, instead utilising // the Kubernetes `Unstructured` type for data handling. // This is to allow us to support arbitrary CRDs and resources that Preflight // does not have registered as part of its `runtime.Scheme`. type DataGathererDynamic struct { // groupVersionResource is the name of the API group, version and resource // that should be fetched by this data gatherer. groupVersionResource schema.GroupVersionResource // namespace, if specified, limits the namespace of the resources returned. // This field *must* be omitted when the groupVersionResource refers to a // non-namespaced resource. namespaces []string // fieldSelector is a field selector string used to filter resources // returned by the Kubernetes API. // https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/ fieldSelector string // labelSelector is a label selector string used to filter resources // returned by the Kubernetes API. labelSelector string // cache holds all resources watched by the data gatherer, default object expiry time 5 minutes // 30 seconds purge time https://pkg.go.dev/github.com/patrickmn/go-cache cache *cache.Cache // informer watches the events around the targeted resource and updates the cache informer k8scache.SharedIndexInformer registration k8scache.ResourceEventHandlerRegistration ExcludeAnnotKeys []*regexp.Regexp ExcludeLabelKeys []*regexp.Regexp // Encryptor, if non-nil, will be used to envelope encrypt Secret data. // If nil, Secret data will be redacted. Encryptor envelope.Encryptor } func (g *DataGathererDynamic) GVR() schema.GroupVersionResource { return g.groupVersionResource } // Run starts the dynamic data gatherer's informers for resource collection. // Returns error if the data gatherer informer wasn't initialized, Run blocks // until the stopCh is closed. func (g *DataGathererDynamic) Run(ctx context.Context) error { log := klog.FromContext(ctx) if g.informer == nil { return fmt.Errorf("informer was not initialized, impossible to start") } // attach WatchErrorHandler, it needs to be set before starting an informer err := g.informer.SetWatchErrorHandler(func(r *k8scache.Reflector, err error) { if strings.Contains(fmt.Sprintf("%s", err), "the server could not find the requested resource") { log.V(logs.Debug).Info("Server missing resource for datagatherer", "groupVersionResource", g.groupVersionResource) } else { log.Info("datagatherer informer has failed and is backing off", "groupVersionResource", g.groupVersionResource, "reason", err) } }) if err != nil { return fmt.Errorf("failed to SetWatchErrorHandler on informer: %s", err) } // start shared informer g.informer.RunWithContext(ctx) return nil } var ErrCacheSyncTimeout = fmt.Errorf("timed out waiting for Kubernetes cache to sync") // WaitForCacheSync waits for the data gatherer's informers cache to sync before // collecting the resources. Use errors.Is(err, ErrCacheSyncTimeout) to check if // the cache sync failed. func (g *DataGathererDynamic) WaitForCacheSync(ctx context.Context) error { // Don't use WaitForNamedCacheSync, since we don't want to log extra messages. if !k8scache.WaitForCacheSync(ctx.Done(), g.registration.HasSynced) { return ErrCacheSyncTimeout } return nil } // Fetch will fetch the requested data from the apiserver, or return an error // if fetching the data fails. func (g *DataGathererDynamic) Fetch(ctx context.Context) (any, int, error) { if g.groupVersionResource.String() == "" { return nil, -1, fmt.Errorf("resource type must be specified") } var items = []*api.GatheredResource{} fetchNamespaces := g.namespaces if len(fetchNamespaces) == 0 { // then they must have been looking for all namespaces fetchNamespaces = []string{metav1.NamespaceAll} } // delete expired items from the cache g.cache.DeleteExpired() for _, item := range g.cache.Items() { // filter cache items by namespace cacheObject := item.Object.(*api.GatheredResource) if resource, ok := cacheObject.Resource.(cacheResource); ok { namespace := resource.GetNamespace() if isIncludedNamespace(namespace, fetchNamespaces) { items = append(items, cacheObject) } continue } return nil, -1, fmt.Errorf("failed to parse cached resource") } // Redact Secret data (which may include encrypting it if enabled) err := g.redactList(ctx, items) if err != nil { return nil, -1, err } return &api.DynamicData{ Items: items, }, len(items), nil } // redactList removes sensitive and superfluous data from the supplied resource list. // All resources have superfluous managed-data fields removed. // All resources have sensitive labels and annotations removed. // Secret and Route are processed as special cases. For these // resources there is an allow-list of fields that should be retained. // For Secret resources, the `data` is redacted, to prevent private keys or sensitive // data being collected; only the tls.crt and ca.crt data keys are retained. // However, if keepSecretData is true (i.e., encryption is enabled), secret data is NOT redacted // so it can be encrypted later in the upload pipeline. // For Route resources, only the fields related to CA certificate and policy are retained. // TODO(wallrj): A short coming of the current allow-list implementation is that // you have to specify absolute fields paths. It is not currently possible to // select all metadata with: `{metadata}`. This means that the metadata for // Secret and Route has fewer fields than the metadata for all other resources. func (g *DataGathererDynamic) redactList(ctx context.Context, list []*api.GatheredResource) error { secretSelectedFields := slices.Clone(SecretSelectedFields) if g.Encryptor != nil { secretSelectedFields = append(secretSelectedFields, FieldPath{"_encryptedData"}) } for i := range list { if item, ok := list[i].Resource.(*unstructured.Unstructured); ok { // Determine the kind of items in case this is a generic 'mixed' list. gvks, _, err := scheme.Scheme.ObjectKinds(item) if err != nil { return err } resource := item // Redact item if it is a Secret or a Route. for _, gvk := range gvks { // secret object if gvk.Kind == "Secret" && (gvk.Group == "core" || gvk.Group == "") { // Note: We must redact data field in all cases! // If encryption is enabled, we encrypt the data and preserve it, but we still need to redact later. // If encryption is enabled and _fails_, we MUST still redact the data field to avoid leaking sensitive information. if g.Encryptor != nil { err := g.encryptDataField(ctx, resource) if err != nil { // WARNING: We CAN NOT return an error here, as that would leak the secret data log := klog.FromContext(ctx).WithName("encryptDataField") log.Error(err, "failed to encrypt secret data field; no encrypted secret data will be sent for object", "secretName", resource.GetName()) } } // Redact to only selected fields if err := Select(secretSelectedFields, resource); err != nil { return err } } else if gvk.Kind == "Route" && gvk.Group == "route.openshift.io" { // route object if err := Select(RouteSelectedFields, resource); err != nil { return err } } } // remove managedFields from all resources Redact(RedactFields, resource) RemoveUnstructuredKeys(g.ExcludeAnnotKeys, resource, "metadata", "annotations") RemoveUnstructuredKeys(g.ExcludeLabelKeys, resource, "metadata", "labels") continue } // objectMeta interface is used to give resources from sharedIndexInformers, (core.Pod|apps.Deployment), a common interface // with access to the metav1.Object type objectMeta interface{ GetObjectMeta() metav1.Object } // all objects fetched from sharedIndexInformers is now redacted // removing the managedFields and `kubectl.kubernetes.io/last-applied-configuration` annotation if item, ok := list[i].Resource.(objectMeta); ok { item.GetObjectMeta().SetManagedFields(nil) delete(item.GetObjectMeta().GetAnnotations(), "kubectl.kubernetes.io/last-applied-configuration") RemoveTypedKeys(g.ExcludeAnnotKeys, item.GetObjectMeta().GetAnnotations()) RemoveTypedKeys(g.ExcludeLabelKeys, item.GetObjectMeta().GetLabels()) resource := item.(runtime.Object) gvks, _, err := scheme.Scheme.ObjectKinds(resource) if err != nil { return err } // During the internal marshal/unmarshal the runtime.Object the metav1.TypeMeta seems to be lost // this section reassigns the TypeMeta to the resource for _, gvk := range gvks { if len(gvk.Kind) == 0 { continue } if len(gvk.Version) == 0 || gvk.Version == runtime.APIVersionInternal { continue } resource.GetObjectKind().SetGroupVersionKind(gvk) break } continue } } return nil } const encryptedDataFieldName = "_encryptedData" var encryptedDataField = FieldPath{encryptedDataFieldName} // encryptDataField encrypts the `data` field of the given secret and stores the encrypted data // in a new field with the name of [encryptedDataFieldName]. The original `data` field is left unchanged, on the // assumption that it will be redacted after the encryption step. // This function does not check that the given resource is actually a Secret; that is the caller's responsibility. func (g *DataGathererDynamic) encryptDataField(ctx context.Context, secret *unstructured.Unstructured) error { if g.Encryptor == nil { return nil } plaintextDataRaw, found, err := unstructured.NestedFieldNoCopy(secret.Object, "data") if err != nil { return fmt.Errorf("error retrieving secret data field during redaction for encryption: %w", err) } if !found { return fmt.Errorf("no data field found on secret") } plaintextDataTyped, ok := plaintextDataRaw.(map[string]any) if !ok { return fmt.Errorf("secret data field is not of expected map type for encryption") } // we want to encrypt the JSON representation of the data field plaintextData, err := json.Marshal(plaintextDataTyped) if err != nil { return fmt.Errorf("failed to marshal secret data field for encryption: %w", err) } encryptedData, err := g.Encryptor.Encrypt(ctx, plaintextData) if err != nil { return fmt.Errorf("failed to encrypt secret data during redaction: %w", err) } err = unstructured.SetNestedField(secret.Object, encryptedData.ToMap(), encryptedDataField...) if err != nil { return fmt.Errorf("failed to set %s field on secret resource during redaction: %w", encryptedDataFieldName, err) } return nil } // Meant for typed clientset objects. func RemoveTypedKeys(excludeAnnotKeys []*regexp.Regexp, m map[string]string) { for key := range m { for _, excludeAnnotKey := range excludeAnnotKeys { if excludeAnnotKey.MatchString(key) { delete(m, key) } } } } // Meant for unstructured clientset objects. Removes the keys from the field // given as input. For example, let's say we have the following object: // // { // "metadata": { // "annotations": { // "key1": "value1", // "key2": "value2" // } // } // } // // Then, the following call: // // RemoveUnstructuredKeys("^key1$", obj, "metadata", "annotations") // // Will result in: // // { // "metadata": { // "annotations": {"key2": "value2"} // } // } // // If the given path doesn't exist or leads to a non-map object, nothing // happens. The leaf object must either be a map[string]interface{} (that's // what's returned by the unstructured clientset) or a map[string]string (that's // what's returned by the typed clientset). func RemoveUnstructuredKeys(excludeKeys []*regexp.Regexp, obj *unstructured.Unstructured, path ...string) { annotsRaw, ok, err := unstructured.NestedFieldNoCopy(obj.Object, path...) if err != nil { return } if !ok { return } // The field may be nil since yaml.Unmarshal's omitempty might not be set // on this struct field. if annotsRaw == nil { return } // The only possible type in an unstructured.Unstructured object is // map[string]interface{}. That's because the yaml.Unmarshal func is used // with an empty map[string]interface{} object, which means all nested // objects will be unmarshalled to a map[string]interface{}. annots, ok := annotsRaw.(map[string]any) if !ok { return } for key := range annots { for _, excludeAnnotKey := range excludeKeys { if excludeAnnotKey.MatchString(key) { delete(annots, key) } } } } // generateExcludedNamespacesFieldSelector creates a field selector string from // a list of namespaces to exclude. func generateExcludedNamespacesFieldSelector(excludeNamespaces []string) fields.Selector { var selectors []fields.Selector for _, excludeNamespace := range excludeNamespaces { if excludeNamespace == "" { continue } selectors = append(selectors, fields.OneTermNotEqualSelector("metadata.namespace", excludeNamespace)) } return fields.AndSelectors(selectors...) } func isIncludedNamespace(namespace string, namespaces []string) bool { if namespaces[0] == metav1.NamespaceAll { return true } return slices.Contains(namespaces, namespace) } func isNativeResource(gvr schema.GroupVersionResource) bool { _, ok := kubernetesNativeResources[gvr] return ok } ================================================ FILE: pkg/datagatherer/k8sdynamic/dynamic_test.go ================================================ package k8sdynamic import ( "context" "crypto/rand" stdrsa "crypto/rsa" "encoding/base64" "encoding/json" "fmt" "reflect" "regexp" "slices" "strings" "sync" "testing" "time" "github.com/lestrrat-go/jwx/v3/jwa" "github.com/lestrrat-go/jwx/v3/jwe" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic/dynamicinformer" "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/informers" fakeclientset "k8s.io/client-go/kubernetes/fake" k8scache "k8s.io/client-go/tools/cache" "github.com/jetstack/preflight/api" "github.com/jetstack/preflight/internal/envelope" "github.com/jetstack/preflight/internal/envelope/keyfetch" "github.com/jetstack/preflight/internal/envelope/rsa" ) func getObject(version, kind, name, namespace string, withManagedFields bool) *unstructured.Unstructured { metadata := map[string]any{ "name": name, "namespace": namespace, "uid": fmt.Sprintf("%s1", name), } if withManagedFields { // []metav1.FieldsV1{} can't be deep copied by fake client so using // string as example value metadata["managedFields"] = "set" } object := map[string]any{ "apiVersion": version, "kind": kind, "metadata": metadata, } return &unstructured.Unstructured{ Object: object, } } func getObjectAnnot(version, kind, name, namespace string, annotations, labels map[string]any) *unstructured.Unstructured { obj := getObject(version, kind, name, namespace, false) metadata, _ := obj.Object["metadata"].(map[string]any) if annotations == nil { annotations = make(map[string]any) } metadata["annotations"] = annotations metadata["labels"] = labels return obj } func getSecret(name, namespace string, data map[string]any, isTLS bool, withLastApplied bool) *unstructured.Unstructured { object := getObject("v1", "Secret", name, namespace, false) if data != nil { object.Object["data"] = data } object.Object["type"] = "Opaque" if isTLS { object.Object["type"] = "kubernetes.io/tls" } metadata, _ := object.Object["metadata"].(map[string]any) annotations := make(map[string]any) // if we're creating a 'raw' secret as scraped that was applied by kubectl if withLastApplied { jsonData, _ := json.Marshal(data) annotations["kubectl.kubernetes.io/last-applied-configuration"] = string(jsonData) } metadata["annotations"] = annotations return object } func sortResourcesByName(list []*unstructured.Unstructured) { slices.SortStableFunc(list, func(a, b *unstructured.Unstructured) int { return strings.Compare(a.GetName(), b.GetName()) }) } func sortGatheredResources(list []*api.GatheredResource) { type namer interface { GetName() string } slices.SortStableFunc(list, func(a, b *api.GatheredResource) int { aNamer, ok := a.Resource.(namer) if !ok { panic("got unexpected resource type") } bNamer, ok := b.Resource.(namer) if !ok { panic("got unexpected resource type") } return strings.Compare(aNamer.GetName(), bNamer.GetName()) }) } func TestNewDataGathererWithClientAndDynamicInformer(t *testing.T) { ctx := t.Context() config := ConfigDynamic{ ExcludeNamespaces: []string{"kube-system"}, GroupVersionResource: schema.GroupVersionResource{Group: "foobar", Version: "v1", Resource: "foos"}, FieldSelectors: []string{ "type!=kubernetes.io/service-account-token", "type!=kubernetes.io/dockercfg", }, LabelSelectors: []string{ "conjur.org/name=conjur-connect-configmap", "app=my-app", }, } cl := fake.NewSimpleDynamicClient(runtime.NewScheme()) dg, err := config.newDataGathererWithClient(ctx, cl, nil) if err != nil { t.Errorf("expected no error but got: %v", err) } expected := &DataGathererDynamic{ groupVersionResource: config.GroupVersionResource, // it's important that the namespaces are set as the IncludeNamespaces // during initialization namespaces: config.IncludeNamespaces, fieldSelector: "metadata.namespace!=kube-system,type!=kubernetes.io/service-account-token,type!=kubernetes.io/dockercfg", labelSelector: "app=my-app,conjur.org/name=conjur-connect-configmap", } gatherer := dg.(*DataGathererDynamic) // test gatherer's fields if !reflect.DeepEqual(gatherer.groupVersionResource, expected.groupVersionResource) { t.Errorf("expected %v, got %v", expected, dg) } if !reflect.DeepEqual(gatherer.namespaces, expected.namespaces) { t.Errorf("expected %v, got %v", expected, dg) } if gatherer.cache == nil { t.Errorf("unexpected cache value: %v", nil) } if gatherer.informer == nil { t.Errorf("unexpected resource informer value: %v", nil) } if gatherer.registration == nil { t.Errorf("unexpected resource event handler registration value: %v", nil) } if !reflect.DeepEqual(gatherer.fieldSelector, expected.fieldSelector) { t.Errorf("expected %v, got %v", expected.fieldSelector, gatherer.fieldSelector) } if !reflect.DeepEqual(gatherer.labelSelector, expected.labelSelector) { t.Errorf("expected %v, got %v", expected.labelSelector, gatherer.labelSelector) } } func TestNewDataGathererWithClientAndSharedIndexInformer(t *testing.T) { ctx := t.Context() config := ConfigDynamic{ IncludeNamespaces: []string{"a"}, GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}, LabelSelectors: []string{ "app=my-app", "version=v1", }, } clientset := fakeclientset.NewSimpleClientset() dg, err := config.newDataGathererWithClient(ctx, nil, clientset) if err != nil { t.Errorf("expected no error but got: %v", err) } expected := &DataGathererDynamic{ groupVersionResource: config.GroupVersionResource, // it's important that the namespaces are set as the IncludeNamespaces // during initialization namespaces: config.IncludeNamespaces, labelSelector: "app=my-app,version=v1", } gatherer := dg.(*DataGathererDynamic) // test gatherer's fields if !reflect.DeepEqual(gatherer.groupVersionResource, expected.groupVersionResource) { t.Errorf("expected %v, got %v", expected, dg) } if !reflect.DeepEqual(gatherer.namespaces, expected.namespaces) { t.Errorf("expected %v, got %v", expected, dg) } if gatherer.cache == nil { t.Errorf("unexpected cache value: %v", nil) } if gatherer.informer == nil { t.Errorf("unexpected resource informer value: %v", nil) } if gatherer.registration == nil { t.Errorf("unexpected event handler registration value: %v", nil) } if !reflect.DeepEqual(gatherer.labelSelector, expected.labelSelector) { t.Errorf("expected %v, got %v", expected.labelSelector, gatherer.labelSelector) } } func TestUnmarshalDynamicConfig(t *testing.T) { textCfg := ` kubeconfig: "/home/someone/.kube/config" resource-type: group: "g" version: "v" resource: "r" exclude-namespaces: - kube-system - my-namespace # this config is invalid, but the validation is tested elsewhere # include-namespaces is here just to ensure that they are loaded # from the config file include-namespaces: - default field-selectors: - type!=kubernetes.io/service-account-token label-selectors: - conjur.org/name=conjur-connect-configmap - app=my-app ` expectedGVR := schema.GroupVersionResource{ Group: "g", Version: "v", Resource: "r", } expectedExcludeNamespaces := []string{ "kube-system", "my-namespace", } expectedIncludeNamespaces := []string{"default"} expectedFieldSelectors := []string{ "type!=kubernetes.io/service-account-token", } expectedLabelSelectors := []string{ "conjur.org/name=conjur-connect-configmap", "app=my-app", } cfg := ConfigDynamic{} err := yaml.Unmarshal([]byte(textCfg), &cfg) if err != nil { t.Fatalf("unexpected error: %+v", err) } if got, want := cfg.KubeConfigPath, "/home/someone/.kube/config"; got != want { t.Errorf("KubeConfigPath does not match: got=%q; want=%q", got, want) } if got, want := cfg.GroupVersionResource, expectedGVR; !reflect.DeepEqual(got, want) { t.Errorf("GroupVersionResource does not match: got=%+v want=%+v", got, want) } if got, want := cfg.ExcludeNamespaces, expectedExcludeNamespaces; !reflect.DeepEqual(got, want) { t.Errorf("ExcludeNamespaces does not match: got=%+v want=%+v", got, want) } if got, want := cfg.IncludeNamespaces, expectedIncludeNamespaces; !reflect.DeepEqual(got, want) { t.Errorf("IncludeNamespaces does not match: got=%+v want=%+v", got, want) } if got, want := cfg.FieldSelectors, expectedFieldSelectors; !reflect.DeepEqual(got, want) { t.Errorf("FieldSelectors does not match: got=%+v want=%+v", got, want) } if got, want := cfg.LabelSelectors, expectedLabelSelectors; !reflect.DeepEqual(got, want) { t.Errorf("LabelSelectors does not match: got=%+v want=%+v", got, want) } } func TestConfigDynamicValidate(t *testing.T) { tests := []struct { Config ConfigDynamic ExpectedError string }{ { Config: ConfigDynamic{ GroupVersionResource: schema.GroupVersionResource{ Group: "", Version: "", Resource: "", }, }, ExpectedError: "invalid configuration: GroupVersionResource.Resource cannot be empty", }, { Config: ConfigDynamic{ IncludeNamespaces: []string{"a"}, ExcludeNamespaces: []string{"b"}, }, ExpectedError: "cannot set excluded and included namespaces", }, { Config: ConfigDynamic{ GroupVersionResource: schema.GroupVersionResource{ Group: "", Version: "v1", Resource: "secrets", }, FieldSelectors: []string{""}, }, ExpectedError: "invalid field selector 0: must not be empty", }, { Config: ConfigDynamic{ GroupVersionResource: schema.GroupVersionResource{ Group: "", Version: "v1", Resource: "secrets", }, FieldSelectors: []string{"foo"}, }, ExpectedError: "invalid field selector 0: invalid selector: 'foo'; can't understand 'foo'", }, } for _, test := range tests { err := test.Config.validate() if err == nil && test.ExpectedError != "" { t.Errorf("expected error: %q, got: nil", test.ExpectedError) } if err != nil && !strings.Contains(err.Error(), test.ExpectedError) { t.Errorf("expected %s, got %s", test.ExpectedError, err.Error()) } } } func TestGenerateExcludedNamespacesFieldSelector(t *testing.T) { tests := []struct { ExcludeNamespaces []string ExpectedFieldSelector string }{ { ExcludeNamespaces: []string{ "", }, ExpectedFieldSelector: "", }, { ExcludeNamespaces: []string{ "kube-system", }, ExpectedFieldSelector: "metadata.namespace!=kube-system", }, { ExcludeNamespaces: []string{ "kube-system", "my-namespace", }, ExpectedFieldSelector: "metadata.namespace!=kube-system,metadata.namespace!=my-namespace", }, } for _, test := range tests { fieldSelector := generateExcludedNamespacesFieldSelector(test.ExcludeNamespaces).String() if fieldSelector != test.ExpectedFieldSelector { t.Errorf("ExpectedFieldSelector does not match: got=%+v want=%+v", fieldSelector, test.ExpectedFieldSelector) } } } // fake time for testing type fakeTime struct { } func (f *fakeTime) now() time.Time { //2021-03-16T18:22:15+00:00 return time.Unix(1615918935, 0) } func init() { clock = &fakeTime{} } type failEncryptor struct{} func (fe *failEncryptor) Encrypt(_ context.Context, plaintext []byte) (*envelope.EncryptedData, error) { return nil, fmt.Errorf("encryption failed") } func TestDynamicGatherer_Fetch(t *testing.T) { privKey, err := stdrsa.GenerateKey(rand.Reader, 2048) require.NoError(t, err) keyID := "test-key-id" fetcher := keyfetch.NewFakeClientWithKey(keyID, privKey.Public().(*stdrsa.PublicKey)) encryptor, err := rsa.NewEncryptor(fetcher) if err != nil { t.Fatalf("failed to create encryptor: %v", err) } // start a k8s client // init the datagatherer's informer with the client // add/delete resources watched by the data gatherer // check the expected result tests := map[string]struct { config ConfigDynamic excludeAnnotsKeys []string excludeLabelKeys []string addObjects []*unstructured.Unstructured deleteObjects map[string]string updateObjects map[string]runtime.Object expected []*api.GatheredResource encryptor envelope.Encryptor expectEncryptionFailure bool err bool }{ "fetches the default namespace": { addObjects: []*unstructured.Unstructured{ getObject("v1", "Namespace", "default", "", false), }, config: ConfigDynamic{ IncludeNamespaces: []string{""}, GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"}, }, expected: []*api.GatheredResource{ { Resource: &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "v1", "kind": "Namespace", "metadata": map[string]any{ "name": "default", "uid": "default1", }, }, }, }, }, }, "only a Foo should be returned if GVR selects foos": { addObjects: []*unstructured.Unstructured{ getObject("foobar/v1", "Foo", "testfoo", "testns", false), getObject("v1", "Service", "testservice", "testns", false), getObject("foobar/v1", "NotFoo", "notfoo", "testns", false), }, config: ConfigDynamic{ IncludeNamespaces: []string{"testns"}, GroupVersionResource: schema.GroupVersionResource{Group: "foobar", Version: "v1", Resource: "foos"}, }, expected: []*api.GatheredResource{ { Resource: getObject("foobar/v1", "Foo", "testfoo", "testns", false), }, }, }, "delete a Foo resource from the testns, the cache should have a Foo with deletedAt set to now()": { addObjects: []*unstructured.Unstructured{ getObject("foobar/v1", "Foo", "testfoo", "testns", false), getObject("v1", "Service", "testservice", "testns", false), getObject("foobar/v1", "NotFoo", "notfoo", "testns", false), }, deleteObjects: map[string]string{ "testns": "testfoo", }, config: ConfigDynamic{ IncludeNamespaces: []string{"testns"}, GroupVersionResource: schema.GroupVersionResource{Group: "foobar", Version: "v1", Resource: "foos"}, }, expected: []*api.GatheredResource{ { Resource: getObject("foobar/v1", "Foo", "testfoo", "testns", false), DeletedAt: api.Time{Time: clock.now()}, }, }, }, "only Foos in the specified namespace should be returned": { config: ConfigDynamic{ IncludeNamespaces: []string{"testns"}, GroupVersionResource: schema.GroupVersionResource{Group: "foobar", Version: "v1", Resource: "foos"}, }, addObjects: []*unstructured.Unstructured{ getObject("foobar/v1", "Foo", "testfoo", "testns", false), getObject("foobar/v1", "Foo", "testfoo", "nottestns", false), }, expected: []*api.GatheredResource{ { Resource: getObject("foobar/v1", "Foo", "testfoo", "testns", false), }, }, }, "Foos in different namespaces should be returned if no namespace field is set": { config: ConfigDynamic{ IncludeNamespaces: []string{""}, GroupVersionResource: schema.GroupVersionResource{Group: "foobar", Version: "v1", Resource: "foos"}, }, addObjects: []*unstructured.Unstructured{ getObject("foobar/v1", "Foo", "testfoo1", "testns1", false), getObject("foobar/v1", "Foo", "testfoo2", "testns2", false), }, expected: []*api.GatheredResource{ { Resource: getObject("foobar/v1", "Foo", "testfoo1", "testns1", false), }, { Resource: getObject("foobar/v1", "Foo", "testfoo2", "testns2", false), }, }, }, "DeleteFoos in different namespaces should be returned if no namespace field is set": { config: ConfigDynamic{ IncludeNamespaces: []string{""}, GroupVersionResource: schema.GroupVersionResource{Group: "foobar", Version: "v1", Resource: "foos"}, }, addObjects: []*unstructured.Unstructured{ getObject("foobar/v1", "Foo", "testfoo1", "testns1", false), getObject("foobar/v1", "Foo", "testfoo2", "testns2", false), }, expected: []*api.GatheredResource{ { Resource: getObject("foobar/v1", "Foo", "testfoo1", "testns1", false), }, { Resource: getObject("foobar/v1", "Foo", "testfoo2", "testns2", false), }, }, }, "Delete all Foo resources, all the fetched resources should have a deletedAt field set to now()": { config: ConfigDynamic{ IncludeNamespaces: []string{""}, GroupVersionResource: schema.GroupVersionResource{Group: "foobar", Version: "v1", Resource: "foos"}, }, deleteObjects: map[string]string{ "testns1": "testfoo1", "testns2": "testfoo2", }, addObjects: []*unstructured.Unstructured{ getObject("foobar/v1", "Foo", "testfoo1", "testns1", false), getObject("foobar/v1", "Foo", "testfoo2", "testns2", false), }, expected: []*api.GatheredResource{ { Resource: getObject("foobar/v1", "Foo", "testfoo1", "testns1", false), DeletedAt: api.Time{Time: clock.now()}, }, { Resource: getObject("foobar/v1", "Foo", "testfoo2", "testns2", false), DeletedAt: api.Time{Time: clock.now()}, }, }, }, "Update all Foo resources, all the fetched resources should have been updated": { config: ConfigDynamic{ IncludeNamespaces: []string{""}, GroupVersionResource: schema.GroupVersionResource{Group: "foobar", Version: "v1", Resource: "foos"}, }, updateObjects: map[string]runtime.Object{ "testns1": getObject("foobar/v1", "Foo", "testfoo1", "testns1", false), "testns2": getObject("foobar/v1", "Foo", "testfoo2", "testns2", false), }, addObjects: []*unstructured.Unstructured{ getObject("foobar/v1", "Foo", "testfoo1", "testns1", false), getObject("foobar/v1", "Foo", "testfoo2", "testns2", false), }, expected: []*api.GatheredResource{ { Resource: getObject("foobar/v1", "Foo", "testfoo1", "testns1", false), }, { Resource: getObject("foobar/v1", "Foo", "testfoo2", "testns2", false), }, }, }, "Secret resources should have data removed": { config: ConfigDynamic{ IncludeNamespaces: []string{""}, GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}, }, addObjects: []*unstructured.Unstructured{ getSecret("testsecret", "testns1", map[string]any{ "secretKey": "secretValue", }, false, true), getSecret("anothertestsecret", "testns2", map[string]any{ "secretNumber": "12345", }, false, true), }, expected: []*api.GatheredResource{ { Resource: getSecret("testsecret", "testns1", nil, false, false), }, { Resource: getSecret("anothertestsecret", "testns2", nil, false, false), }, }, }, "Secret of type kubernetes.io/tls should have crts and not keys": { config: ConfigDynamic{ IncludeNamespaces: []string{""}, GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}, }, addObjects: []*unstructured.Unstructured{ getSecret("testsecret", "testns1", map[string]any{ "tls.key": "secretValue", "tls.crt": "value", "ca.crt": "value", }, true, true), getSecret("anothertestsecret", "testns2", map[string]any{ "example.key": "secretValue", "example.crt": "value", }, true, true), }, expected: []*api.GatheredResource{ { // only tls.crt and ca.cert remain Resource: getSecret("testsecret", "testns1", map[string]any{ "tls.crt": "value", "ca.crt": "value", }, true, false), }, { // all other keys removed Resource: getSecret("anothertestsecret", "testns2", nil, true, false), }, }, }, "Secret resources should have encrypted data when encryption is enabled": { config: ConfigDynamic{ IncludeNamespaces: []string{""}, GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}, }, addObjects: []*unstructured.Unstructured{ getSecret("testsecret", "testns1", map[string]any{ "secretKey": "secretValue", }, false, true), getSecret("anothertestsecret", "testns2", map[string]any{ "secretNumber": "12345", }, false, true), }, encryptor: encryptor, expected: []*api.GatheredResource{ { Resource: getSecret("testsecret", "testns1", nil, false, false), }, { Resource: getSecret("anothertestsecret", "testns2", nil, false, false), }, }, }, "Secret resources should have encrypted data when encryption is enabled with some data fields preserved": { config: ConfigDynamic{ IncludeNamespaces: []string{""}, GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}, }, addObjects: []*unstructured.Unstructured{ getSecret("testsecret-notpreserved", "testns1", map[string]any{ "secretKey": "secretValue", }, false, true), getSecret("testsecret-preserved", "testns1", map[string]any{ "tls.key": "secretValue", "tls.crt": "value", "ca.crt": "value", }, true, true), }, encryptor: encryptor, expected: []*api.GatheredResource{ { // only tls.crt and ca.cert remain, although tls.key will be present in encrypted data Resource: getSecret("testsecret-preserved", "testns1", map[string]any{ "tls.crt": "value", "ca.crt": "value", }, true, false), }, { Resource: getSecret("testsecret-notpreserved", "testns1", nil, false, false), }, }, }, "Secret resources should still be redacted if encryption fails": { config: ConfigDynamic{ IncludeNamespaces: []string{""}, GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}, }, addObjects: []*unstructured.Unstructured{ getSecret("testsecret", "testns1", map[string]any{ "secretKey": "secretValue", }, false, true), }, encryptor: &failEncryptor{}, expectEncryptionFailure: true, expected: []*api.GatheredResource{ { Resource: getSecret("testsecret", "testns1", nil, false, false), }, }, }, "excluded annotations are removed for unstructured-based gatherers such as secrets": { config: ConfigDynamic{GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}}, // To give a realistic regex in this test case, let's use the // example of the Kapp project that uses four annotations that all // start with `kapp.k14s.io/original*`. These annotations are // similar to `kubectl.kubernetes.io/last-applied-configuration` in // that they may contain sensitive information. From [1], they may // look like this: // // kapp.k14s.io/original: | // {"apiVersion":"v1","kind":"Secret","spec":{"data": {"password": "cGFzc3dvcmQ=","username": "bXl1c2VybmFtZQ=="}}} // kapp.k14s.io/original-diff: | // - type: test // path: /data // value: // password: cygpcGVyUzNjcmV0UEBhc3N3b3JkIQ== // username: bXl1c2VybmFtZQ== // // [1]: https://github.com/carvel-dev/kapp/issues/90#issuecomment-602074356 // // The regular expression could be: excludeAnnotsKeys: []string{`^kapp\.k14s\.io/original.*`}, // A somewhat realistic example of labels that would need to be // excluded would be when a company declares ownership using // sensitive identifiers (e.g., employee IDs), and the company // doesn't want these IDs to be exposed. Let's imagine these // employee IDs look like this: // // company.com/employee-id: 12345 // // The regular expression would then be: excludeLabelKeys: []string{`^company\.com/employee-id$`}, addObjects: []*unstructured.Unstructured{getObjectAnnot("v1", "Secret", "s0", "n1", map[string]any{"kapp.k14s.io/original": "foo", "kapp.k14s.io/original-diff": "bar", "normal": "true"}, map[string]any{`company.com/employee-id`: "12345", "prod": "true"}, )}, expected: []*api.GatheredResource{{Resource: getObjectAnnot("v1", "Secret", "s0", "n1", map[string]any{"normal": "true"}, map[string]any{"prod": "true"}, )}}, }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { var wg sync.WaitGroup ctx := t.Context() gvrToListKind := map[schema.GroupVersionResource]string{ {Group: "foobar", Version: "v1", Resource: "foos"}: "UnstructuredList", {Group: "apps", Version: "v1", Resource: "deployments"}: "UnstructuredList", {Group: "", Version: "v1", Resource: "secrets"}: "UnstructuredList", {Group: "", Version: "v1", Resource: "namespaces"}: "UnstructuredList", } addObjs := make([]runtime.Object, len(tc.addObjects)) for i, obj := range tc.addObjects { addObjs[i] = obj } cl := fake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, addObjs...) // init the datagatherer's informer with the client dg, err := tc.config.newDataGathererWithClient(ctx, cl, nil) if err != nil { t.Fatalf("unexpected error: %+v", err) } // initializing test informer, this informer will update the waitGroup making sure all the // update and delete events have all been capture by the informers, the 100 mills sleep is // just to make sure dg informer is caught up. This allows us to wait until the waitGroup is // done before doing the dg.Fetch. factory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(cl, 10*time.Minute, metav1.NamespaceAll, nil) resourceInformer := factory.ForResource(tc.config.GroupVersionResource) testInformer := resourceInformer.Informer() _, err = testInformer.AddEventHandler(k8scache.ResourceEventHandlerFuncs{ DeleteFunc: func(obj any) { defer wg.Done() time.Sleep(100 * time.Millisecond) }, UpdateFunc: func(oldObj, newObj any) { defer wg.Done() time.Sleep(100 * time.Millisecond) }, }) require.NoError(t, err) // start test Informer factory.Start(ctx.Done()) k8scache.WaitForCacheSync(ctx.Done(), testInformer.HasSynced) dgd := dg.(*DataGathererDynamic) for _, key := range tc.excludeAnnotsKeys { dgd.ExcludeAnnotKeys = append(dgd.ExcludeAnnotKeys, regexp.MustCompile(key)) } for _, key := range tc.excludeLabelKeys { dgd.ExcludeLabelKeys = append(dgd.ExcludeLabelKeys, regexp.MustCompile(key)) } if tc.encryptor != nil { dgd.Encryptor = tc.encryptor } // start data gatherer informer dynamiDg := dg go func() { if err = dynamiDg.Run(ctx); err != nil { t.Errorf("unexpected client error: %+v", err) } }() err = dynamiDg.WaitForCacheSync(ctx) if err != nil { t.Fatalf("unexpected client error: %+v", err) } // deletes all the objects set to be deleted, to trigger // a delete event in the informers. Add 1 to wg making "sure" (https://github.com/kubernetes/kubernetes/issues/95372) // the informers cache are sync for ns, delete := range tc.deleteObjects { wg.Add(1) deletePolicy := metav1.DeletePropagationForeground deleteOptions := metav1.DeleteOptions{ PropagationPolicy: &deletePolicy, } err := cl.Resource(tc.config.GroupVersionResource).Namespace(ns).Delete(ctx, delete, deleteOptions) if err != nil { t.Fatalf("unexpected client delete error: %+v", err) } } for ns, update := range tc.updateObjects { wg.Add(1) newObj := update.(*unstructured.Unstructured) _, err := cl.Resource(tc.config.GroupVersionResource).Namespace(ns).Update(ctx, newObj, metav1.UpdateOptions{}) if err != nil { t.Fatalf("unexpected client update error: %+v", err) } } // wait for all the events to occur, else timeut in 30 seconds if waitTimeout(&wg, 30*time.Second) { t.Fatalf("unexpected timeout") } res, expectCount, err := dynamiDg.Fetch(ctx) if err != nil && !tc.err { t.Errorf("expected no error but got: %v", err) } if err == nil && tc.err { t.Errorf("expected to get an error but didn't get one") } if tc.expected != nil { data, ok := res.(*api.DynamicData) if !ok { t.Errorf("expected result be *api.DynamicData but wasn't") } list := data.Items // sorting list of results by name sortGatheredResources(list) // sorting list of expected results by name sortGatheredResources(tc.expected) // check lengths of lists first before we iterate to compare items assert.Len(t, list, expectCount, "unexpected number of resources returned") for i, item := range list { got, ok := item.Resource.(*unstructured.Unstructured) if !ok { t.Errorf("expected resource to be of type unstructured.Unstructured but got %T", item.Resource) } expected, ok := tc.expected[i].Resource.(*unstructured.Unstructured) if !ok { t.Errorf("expected resource to be of type unstructured.Unstructured but got %T", tc.expected[i].Resource) } // If encryption is enabled, validate the encrypted data if tc.encryptor != nil { if tc.expectEncryptionFailure { _, found, err := unstructured.NestedFieldNoCopy(got.Object, encryptedDataFieldName) require.NoError(t, err, "error checking %s field", encryptedDataFieldName) require.False(t, found, "expected %s field to not exist when encryption fails", encryptedDataFieldName) } else { sortResourcesByName(tc.addObjects) compareEncryptedData(t, privKey, got, tc.addObjects[i]) } } assert.Equal(t, expected, got) } } }) } } func compareEncryptedData(t *testing.T, privKey *stdrsa.PrivateKey, got *unstructured.Unstructured, original *unstructured.Unstructured) { t.Helper() // Check that encrypted data field exists encryptedDataRaw, found, err := unstructured.NestedFieldNoCopy(got.Object, encryptedDataFieldName) require.NoError(t, err, "error retrieving %s field", encryptedDataFieldName) require.True(t, found, "expected %s field to exist when encryption is enabled", encryptedDataFieldName) // Convert to map and validate structure encryptedDataMap, ok := encryptedDataRaw.(map[string]any) require.True(t, ok, "expected %s to be a map[string]any", encryptedDataFieldName) // Check type field typeField, ok := encryptedDataMap["type"].(string) require.True(t, ok, "expected type field to be a string") assert.Equal(t, rsa.EncryptionType, typeField, "expected type to be %s", rsa.EncryptionType) // Check data field exists and is valid dataFieldRaw, ok := encryptedDataMap["data"] require.True(t, ok, "expected data field to exist") dataField, ok := dataFieldRaw.(string) require.True(t, ok, "expected data field to be a JSON string") jweBytes, err := base64.StdEncoding.DecodeString(dataField) require.NoError(t, err, "data field should be valid base64 string") require.NotEmpty(t, jweBytes, "expected data field to be non-empty") // Verify JWE can be parsed _, err = jwe.Parse(jweBytes) require.NoError(t, err, "data should be a valid JWE") plaintext, err := jwe.Decrypt(jweBytes, jwe.WithKey(jwa.RSA_OAEP_256(), privKey), jwe.WithContext(t.Context())) require.NoError(t, err, "failed to decrypt JWE") // Verify decrypted plaintext matches expected resource data expectedData, found, err := unstructured.NestedMap(original.Object, "data") require.True(t, found, "expected data field to exist in original resource") require.NoError(t, err, "error retrieving data field from original resource") var decryptedDataMap map[string]any err = json.Unmarshal(plaintext, &decryptedDataMap) require.NoError(t, err, "failed to unmarshal decrypted plaintext") assert.Equal(t, expectedData, decryptedDataMap, "decrypted data does not match original data") // Remove encrypted data so that simple comparison works for other fields unstructured.RemoveNestedField(got.Object, encryptedDataFieldName) } func TestDynamicGathererNativeResources_Fetch(t *testing.T) { // start a k8s client // init the datagatherer's informer with the client // add/delete resources watched by the data gatherer // check the expected result podGVR := schema.GroupVersionResource{Group: corev1.SchemeGroupVersion.Group, Version: corev1.SchemeGroupVersion.Version, Resource: "pods"} tests := map[string]struct { config ConfigDynamic excludeAnnotsKeys []string excludeLabelKeys []string addObjects []runtime.Object deleteObjects map[string]string updateObjects map[string]runtime.Object expected []*api.GatheredResource err bool }{ "only a Pod should be returned if GVR selects pods": { addObjects: []runtime.Object{ getObject("foobar/v1", "Foo", "testfoo", "testns", false), getObject("v1", "Service", "testservice", "testns", false), getObject("foobar/v1", "NotFoo", "notfoo", "testns", false), &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{Name: "testpod1", Namespace: "testns", UID: "uid-testpod1"}}, }, config: ConfigDynamic{ IncludeNamespaces: []string{"testns"}, GroupVersionResource: podGVR, }, expected: []*api.GatheredResource{ { Resource: &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{Name: "testpod1", Namespace: "testns", UID: "uid-testpod1"}}, }, }, }, "delete a Pod resource from the testns, the cache should have a Pod with deletedAt set to now()": { addObjects: []runtime.Object{ &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{Name: "testfoo", Namespace: "testns", UID: "uid-testfoo1"}}, getObject("v1", "Service", "testservice", "testns", false), getObject("foobar/v1", "NotFoo", "notfoo", "testns", false), }, deleteObjects: map[string]string{ "testns": "testfoo", }, config: ConfigDynamic{ IncludeNamespaces: []string{"testns"}, GroupVersionResource: podGVR, }, expected: []*api.GatheredResource{ { Resource: &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{Name: "testfoo", Namespace: "testns", UID: "uid-testfoo1"}}, DeletedAt: api.Time{Time: clock.now()}, }, }, }, "Pods in different namespaces should be returned if no namespace field is set": { config: ConfigDynamic{ IncludeNamespaces: []string{""}, GroupVersionResource: podGVR, }, addObjects: []runtime.Object{ &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{Name: "testpod1", Namespace: "testns", UID: "uid-testpod1"}}, &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{Name: "testpod2", Namespace: "testns2", UID: "uid-testpod2"}}, }, expected: []*api.GatheredResource{ { Resource: &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{Name: "testpod1", Namespace: "testns", UID: "uid-testpod1"}}, }, { Resource: &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{Name: "testpod2", Namespace: "testns2", UID: "uid-testpod2"}}, }, }, }, "Delete Pods in different namespaces should be returned if no namespace field is set": { config: ConfigDynamic{ IncludeNamespaces: []string{""}, GroupVersionResource: podGVR, }, addObjects: []runtime.Object{ &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{Name: "testpod1", Namespace: "testns", UID: "uid-testpod1"}}, &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{Name: "testpod2", Namespace: "testns2", UID: "uid-testpod2"}}, }, expected: []*api.GatheredResource{ { Resource: &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{Name: "testpod1", Namespace: "testns", UID: "uid-testpod1"}}, }, { Resource: &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{Name: "testpod2", Namespace: "testns2", UID: "uid-testpod2"}}, }, }, }, "Delete all Pod resources, all the fetched resources should have a deletedAt field set to now()": { config: ConfigDynamic{ IncludeNamespaces: []string{""}, GroupVersionResource: podGVR, }, deleteObjects: map[string]string{ "testns1": "testpod1", "testns2": "testpod2", }, addObjects: []runtime.Object{ &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{Name: "testpod1", Namespace: "testns1", UID: "uid-testpod1"}}, &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{Name: "testpod2", Namespace: "testns2", UID: "uid-testpod2"}}, }, expected: []*api.GatheredResource{ { Resource: &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{Name: "testpod1", Namespace: "testns1", UID: "uid-testpod1"}}, DeletedAt: api.Time{Time: clock.now()}, }, { Resource: &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{Name: "testpod2", Namespace: "testns2", UID: "uid-testpod2"}}, DeletedAt: api.Time{Time: clock.now()}, }, }, }, "Update all Pods resources, all the fetched resources should have been updated": { config: ConfigDynamic{ IncludeNamespaces: []string{""}, GroupVersionResource: podGVR, }, updateObjects: map[string]runtime.Object{ "testns1": &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{Name: "testpod1", Namespace: "testns1", UID: "uid-testpod1", Labels: map[string]string{"foo": "newlabel"}}}, "testns2": &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{Name: "testpod2", Namespace: "testns2", UID: "uid-testpod2", Labels: map[string]string{"foo": "newlabel"}}}, }, addObjects: []runtime.Object{ &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{Name: "testpod1", Namespace: "testns1", UID: "uid-testpod1"}}, &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{Name: "testpod2", Namespace: "testns2", UID: "uid-testpod2"}}, }, expected: []*api.GatheredResource{ { Resource: &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{Name: "testpod1", Namespace: "testns1", UID: "uid-testpod1", Labels: map[string]string{"foo": "newlabel"}}}, }, { Resource: &corev1.Pod{TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{Name: "testpod2", Namespace: "testns2", UID: "uid-testpod2", Labels: map[string]string{"foo": "newlabel"}}}, }, }, }, "only Pods in the specified namespace should be returned": { config: ConfigDynamic{ IncludeNamespaces: []string{"testns"}, GroupVersionResource: podGVR, }, addObjects: []runtime.Object{ &corev1.Pod{ TypeMeta: metav1.TypeMeta{ Kind: "Pod", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ Name: "testfoo1", Namespace: "testns", UID: "uid-testfoo1", }, }, &corev1.Pod{ TypeMeta: metav1.TypeMeta{ Kind: "Pod", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ Name: "testfoo1", Namespace: "nottestns", UID: "uid-testfoo2", }, }, }, expected: []*api.GatheredResource{ { Resource: &corev1.Pod{ TypeMeta: metav1.TypeMeta{ Kind: "Pod", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ Name: "testfoo1", Namespace: "testns", UID: "uid-testfoo1", }, }, }, }, }, // Pod is the only native resource that we test out of lack of time // (would require a lot of changes to the testing func). Ideally we // should test all native resources such as Service, Deployment, // Ingress, Namespace, and so on. "excluded annotations are removed for typed resources gatherers such as pods": { config: ConfigDynamic{GroupVersionResource: podGVR}, excludeAnnotsKeys: []string{"secret"}, excludeLabelKeys: []string{"secret"}, addObjects: []runtime.Object{ &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p0", UID: "p0", Namespace: "n1", Annotations: map[string]string{"normal-annot": "bar"}}}, &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p1", UID: "p1", Namespace: "n1", Labels: map[string]string{"normal-label": "bar"}}}, &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p2", UID: "p2", Namespace: "n1", Annotations: map[string]string{"super-secret-annot": "bar"}}}, &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p3", UID: "p3", Namespace: "n1", Labels: map[string]string{"super-secret-label": "bar"}}}, }, expected: []*api.GatheredResource{ {Resource: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p0", UID: "p0", Namespace: "n1", Annotations: map[string]string{"normal-annot": "bar"}}, TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}}}, {Resource: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p1", UID: "p1", Namespace: "n1", Labels: map[string]string{"normal-label": "bar"}}, TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}}}, {Resource: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p2", UID: "p2", Namespace: "n1", Annotations: map[string]string{}}, TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}}}, {Resource: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p3", UID: "p3", Namespace: "n1", Labels: map[string]string{}}, TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}}}, }, }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { var wg sync.WaitGroup ctx := t.Context() clientset := fakeclientset.NewSimpleClientset(tc.addObjects...) // init the datagatherer's informer with the client dg, err := tc.config.newDataGathererWithClient(ctx, nil, clientset) if err != nil { t.Fatalf("unexpected error: %+v", err) } // initializing test informer, this informer will capture all the events // that occur in the test case and only allow the dg.Fetch to be performed // after all the events have been triggered factory := informers.NewSharedInformerFactoryWithOptions(clientset, 10*time.Minute, informers.WithNamespace(metav1.NamespaceAll), informers.WithTweakListOptions(func(options *metav1.ListOptions) {})) testInformer := factory.Core().V1().Pods().Informer() _, err = testInformer.AddEventHandler(k8scache.ResourceEventHandlerFuncs{ DeleteFunc: func(obj any) { defer wg.Done() time.Sleep(100 * time.Millisecond) }, UpdateFunc: func(oldObj, newObj any) { defer wg.Done() time.Sleep(100 * time.Millisecond) }, }) require.NoError(t, err) // start test Informer factory.Start(ctx.Done()) k8scache.WaitForCacheSync(ctx.Done(), testInformer.HasSynced) dgd := dg.(*DataGathererDynamic) for _, key := range tc.excludeAnnotsKeys { dgd.ExcludeAnnotKeys = append(dgd.ExcludeAnnotKeys, regexp.MustCompile(key)) } for _, key := range tc.excludeLabelKeys { dgd.ExcludeLabelKeys = append(dgd.ExcludeLabelKeys, regexp.MustCompile(key)) } // start data gatherer informer dynamiDg := dg go func() { if err = dynamiDg.Run(ctx); err != nil { t.Errorf("unexpected client error: %+v", err) } }() err = dynamiDg.WaitForCacheSync(ctx) if err != nil { t.Fatalf("unexpected client error: %+v", err) } // deletes all the objects set to be deleted, to trigger // a delete event in the informers. Add 1 to wg for ns, delete := range tc.deleteObjects { wg.Add(1) deletePolicy := metav1.DeletePropagationForeground deleteOptions := metav1.DeleteOptions{ PropagationPolicy: &deletePolicy, } err := clientset.CoreV1().Pods(ns).Delete(ctx, delete, deleteOptions) if err != nil { t.Fatalf("unexpected client delete error: %+v", err) } } for ns, update := range tc.updateObjects { wg.Add(1) newObj := update.(*corev1.Pod) _, err := clientset.CoreV1().Pods(ns).Update(ctx, newObj, metav1.UpdateOptions{}) if err != nil { t.Fatalf("unexpected client update error: %+v", err) } } // wait for all the events to occur, else timeout in 30 seconds if waitTimeout(&wg, 5*time.Second) { t.Fatalf("unexpected timeout") } rawRes, count, err := dynamiDg.Fetch(ctx) if tc.err { require.Error(t, err) } else { require.NoError(t, err) } if tc.expected != nil { res, ok := rawRes.(*api.DynamicData) require.Truef(t, ok, "expected result be an *api.DynamicData but wasn't") actual := res.Items // sorting list of results by name sortGatheredResources(actual) // sorting list of expected results by name sortGatheredResources(tc.expected) assert.Equal(t, tc.expected, actual) assert.Len(t, actual, count) } }) } } // waitTimeout waits for the waitgroup for the specified max timeout. // Returns true if waiting timed out. func waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool { c := make(chan struct{}) go func() { defer close(c) wg.Wait() }() select { case <-c: return false case <-time.After(timeout): return true } } func TestRemoveUnstructuredKeys(t *testing.T) { t.Run("remove single key", run_TestRemoveUnstructuredKeys(tc_RemoveUnstructuredKeys{ givenPath: []string{"metadata", "annotations"}, givenExclude: []string{"^toexclude$"}, givenObj: map[string]any{ "metadata": map[string]any{ "annotations": map[string]any{ "toexclude": "foo", "tokeep": "bar", }, }, }, expectObj: map[string]any{ "metadata": map[string]any{ "annotations": map[string]any{ "tokeep": "bar", }, }, }, })) t.Run("remove keys using multiple regexes", run_TestRemoveUnstructuredKeys(tc_RemoveUnstructuredKeys{ givenPath: []string{"metadata", "annotations"}, givenExclude: []string{"^toexclude1$", "^toexclude2$"}, givenObj: map[string]any{ "metadata": map[string]any{ "annotations": map[string]any{ "toexclude1": "foo", "toexclude2": "bar", }, }, }, expectObj: map[string]any{ "metadata": map[string]any{"annotations": map[string]any{}}, }, })) t.Run("remove multiple keys with a single regex", run_TestRemoveUnstructuredKeys(tc_RemoveUnstructuredKeys{ givenPath: []string{"metadata", "annotations"}, givenExclude: []string{"toexclude.*"}, givenObj: map[string]any{ "metadata": map[string]any{ "annotations": map[string]any{ "toexclude1": "foo", "toexclude2": "bar", "tokeep": "baz", }, }, }, expectObj: map[string]any{ "metadata": map[string]any{ "annotations": map[string]any{ "tokeep": "baz", }, }, }, })) t.Run("with no regex, the object is untouched", run_TestRemoveUnstructuredKeys(tc_RemoveUnstructuredKeys{ givenPath: []string{"metadata", "annotations"}, givenExclude: []string{}, givenObj: map[string]any{ "metadata": map[string]any{ "annotations": map[string]any{ "tokeep1": "foo", }, }, }, expectObj: map[string]any{ "metadata": map[string]any{ "annotations": map[string]any{ "tokeep1": "foo", }, }, }, })) // The "leaf" field is the field that is at the end of the path. For // example, "annotations" is the leaf field in metadata.annotations. t.Run("works when the leaf field is not found", run_TestRemoveUnstructuredKeys(tc_RemoveUnstructuredKeys{ givenPath: []string{"metadata", "annotations"}, givenExclude: []string{}, givenObj: map[string]any{"metadata": map[string]any{}}, expectObj: map[string]any{"metadata": map[string]any{}}, })) t.Run("works when the leaf field is nil", run_TestRemoveUnstructuredKeys(tc_RemoveUnstructuredKeys{ givenPath: []string{"metadata", "annotations"}, givenExclude: []string{}, givenObj: map[string]any{"metadata": map[string]any{"annotations": nil}}, expectObj: map[string]any{"metadata": map[string]any{"annotations": nil}}, })) t.Run("works when leaf field is unexpectedly not nil and not a known map", run_TestRemoveUnstructuredKeys(tc_RemoveUnstructuredKeys{ givenPath: []string{"metadata", "annotations"}, givenObj: map[string]any{"metadata": map[string]any{"annotations": 42}}, expectObj: map[string]any{"metadata": map[string]any{"annotations": 42}}, })) // The "intermediate" field is the field that is not at the end of the path. // For example, "metadata" is the intermediate field in // metadata.annotations. t.Run("works when the intermediate field doesn't exist", run_TestRemoveUnstructuredKeys(tc_RemoveUnstructuredKeys{ givenPath: []string{"metadata", "annotations"}, givenObj: map[string]any{}, expectObj: map[string]any{}, })) t.Run("works when the intermediate field is nil", run_TestRemoveUnstructuredKeys(tc_RemoveUnstructuredKeys{ givenPath: []string{"metadata", "annotations"}, givenObj: map[string]any{"metadata": nil}, expectObj: map[string]any{"metadata": nil}, })) t.Run("works when the intermediate field is unexpectedly not nil and not a map", run_TestRemoveUnstructuredKeys(tc_RemoveUnstructuredKeys{ givenPath: []string{"metadata", "annotations"}, givenObj: map[string]any{"metadata": 42}, expectObj: map[string]any{"metadata": 42}, })) } type tc_RemoveUnstructuredKeys struct { givenExclude []string givenObj map[string]any givenPath []string expectObj map[string]any } func run_TestRemoveUnstructuredKeys(tc tc_RemoveUnstructuredKeys) func(*testing.T) { return func(t *testing.T) { t.Helper() RemoveUnstructuredKeys(toRegexps(tc.givenExclude), &unstructured.Unstructured{Object: tc.givenObj}, tc.givenPath...) assert.Equal(t, tc.expectObj, tc.givenObj) } } func TestRemoveTypedKeys(t *testing.T) { t.Run("remove single key", run_TestRemoveTypedKeys(tc_TestRemoveTypedKeys{ givenExclude: []string{"^toexclude$"}, given: map[string]string{"toexclude": "foo", "tokeep": "bar"}, expected: map[string]string{"tokeep": "bar"}, })) t.Run("remove keys using multiple regexes", run_TestRemoveTypedKeys(tc_TestRemoveTypedKeys{ givenExclude: []string{"^toexclude1$", "^toexclude2$"}, given: map[string]string{"toexclude1": "foo", "toexclude2": "bar", "tokeep": "baz"}, expected: map[string]string{"tokeep": "baz"}, })) t.Run("remove multiple keys with a single regex", run_TestRemoveTypedKeys(tc_TestRemoveTypedKeys{ givenExclude: []string{"^toexclude.*"}, given: map[string]string{"toexclude1": "foo", "toexclude2": "bar", "tokeep": "baz"}, expected: map[string]string{"tokeep": "baz"}, })) t.Run("with no regex, the object is untouched", run_TestRemoveTypedKeys(tc_TestRemoveTypedKeys{ givenExclude: []string{}, given: map[string]string{"tokeep1": "foo", "tokeep2": "bar"}, expected: map[string]string{"tokeep1": "foo", "tokeep2": "bar"}, })) t.Run("works when the map is nil", run_TestRemoveTypedKeys(tc_TestRemoveTypedKeys{ givenExclude: []string{"^toexclude$"}, given: nil, expected: nil, })) } type tc_TestRemoveTypedKeys struct { givenExclude []string given map[string]string expected map[string]string } func run_TestRemoveTypedKeys(tc tc_TestRemoveTypedKeys) func(t *testing.T) { return func(t *testing.T) { t.Helper() RemoveTypedKeys(toRegexps(tc.givenExclude), tc.given) assert.Equal(t, tc.expected, tc.given) } } func toRegexps(keys []string) []*regexp.Regexp { var regexps []*regexp.Regexp for _, key := range keys { regexps = append(regexps, regexp.MustCompile(key)) } return regexps } // TestValidate_LabelSelectors tests validation of label selectors func TestValidate_LabelSelectors(t *testing.T) { tests := []struct { name string labelSelectors []string expectError bool errorContains string }{ { name: "valid simple label selector", labelSelectors: []string{"app=myapp"}, expectError: false, }, { name: "valid label selector with dot notation", labelSelectors: []string{"conjur.org/name=conjur-connect-configmap"}, expectError: false, }, { name: "valid negative label selector", labelSelectors: []string{"app!=test"}, expectError: false, }, { name: "valid multiple label selectors", labelSelectors: []string{"app=myapp", "environment=production"}, expectError: false, }, { name: "valid label existence check", labelSelectors: []string{"app"}, expectError: false, }, { name: "valid label non-existence check", labelSelectors: []string{"!app"}, expectError: false, }, { name: "valid set-based selector", labelSelectors: []string{"environment in (production, staging)"}, expectError: false, }, { name: "valid negative set-based selector", labelSelectors: []string{"environment notin (dev, test)"}, expectError: false, }, { name: "empty label selector", labelSelectors: []string{""}, expectError: true, errorContains: "must not be empty", }, { name: "invalid label selector syntax", labelSelectors: []string{"invalid===syntax"}, expectError: true, errorContains: "invalid label selector", }, { name: "multiple selectors with one invalid", labelSelectors: []string{"app=valid", "invalid==="}, expectError: true, errorContains: "invalid label selector 1", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config := &ConfigDynamic{ GroupVersionResource: schema.GroupVersionResource{ Version: "v1", Resource: "configmaps", }, LabelSelectors: tt.labelSelectors, } err := config.validate() if tt.expectError { require.Error(t, err) if tt.errorContains != "" { assert.Contains(t, err.Error(), tt.errorContains) } } else { require.NoError(t, err) } }) } } // TestValidate_FieldSelectors tests validation of field selectors. func TestValidate_FieldSelectors(t *testing.T) { tests := []struct { name string fieldSelectors []string expectError bool errorContains string }{ { name: "valid field selector", fieldSelectors: []string{"metadata.name=test"}, expectError: false, }, { name: "valid negative field selector", fieldSelectors: []string{"type!=kubernetes.io/dockercfg"}, expectError: false, }, { name: "multiple valid field selectors", fieldSelectors: []string{"metadata.namespace=default", "type!=Opaque"}, expectError: false, }, { name: "empty field selector", fieldSelectors: []string{""}, expectError: true, errorContains: "must not be empty", }, { name: "invalid field selector syntax", fieldSelectors: []string{"invalid===field"}, expectError: true, errorContains: "invalid field selector", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config := &ConfigDynamic{ GroupVersionResource: schema.GroupVersionResource{ Version: "v1", Resource: "secrets", }, FieldSelectors: tt.fieldSelectors, } err := config.validate() if tt.expectError { require.Error(t, err) if tt.errorContains != "" { assert.Contains(t, err.Error(), tt.errorContains) } } else { require.NoError(t, err) } }) } } // TestValidate_CombinedSelectors tests validation with both field and label selectors. func TestValidate_CombinedSelectors(t *testing.T) { tests := []struct { name string fieldSelectors []string labelSelectors []string expectError bool errorContains string }{ { name: "valid field and label selectors", fieldSelectors: []string{"type!=kubernetes.io/dockercfg"}, labelSelectors: []string{"app=myapp"}, expectError: false, }, { name: "invalid field selector with valid label selector", fieldSelectors: []string{"invalid==="}, labelSelectors: []string{"app=myapp"}, expectError: true, errorContains: "invalid field selector", }, { name: "valid field selector with invalid label selector", fieldSelectors: []string{"type!=Opaque"}, labelSelectors: []string{"invalid==="}, expectError: true, errorContains: "invalid label selector", }, { name: "both selectors invalid", fieldSelectors: []string{"bad===field"}, labelSelectors: []string{"bad===label"}, expectError: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config := &ConfigDynamic{ GroupVersionResource: schema.GroupVersionResource{ Version: "v1", Resource: "configmaps", }, FieldSelectors: tt.fieldSelectors, LabelSelectors: tt.labelSelectors, } err := config.validate() if tt.expectError { require.Error(t, err) if tt.errorContains != "" { assert.Contains(t, err.Error(), tt.errorContains) } } else { require.NoError(t, err) } }) } } ================================================ FILE: pkg/datagatherer/k8sdynamic/fieldfilter.go ================================================ package k8sdynamic import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) // SecretSelectedFields is the list of fields sent from Secret objects to the // backend. // The `data` is redacted, to prevent private keys or sensitive data being // collected. Only the following none-sensitive keys are retained: tls.crt, // ca.crt. These keys are assumed to always contain public TLS certificates. // The `conjur-map` key is also retained, as it is used to map Secrets to // Conjur variables, and is not considered sensitive. // See https://docs.cyberark.com/conjur-open-source/latest/en/content/integrations/k8s-ocp/cjr-secrets-provider-lp.htm var SecretSelectedFields = []FieldPath{ {"kind"}, {"apiVersion"}, {"metadata", "annotations"}, {"metadata", "labels"}, {"metadata", "name"}, {"metadata", "namespace"}, {"metadata", "ownerReferences"}, {"metadata", "selfLink"}, {"metadata", "uid"}, {"metadata", "creationTimestamp"}, {"metadata", "deletionTimestamp"}, {"metadata", "resourceVersion"}, {"immutable"}, {"type"}, {"data", "tls.crt"}, {"data", "ca.crt"}, {"data", "conjur-map"}, } // RouteSelectedFields is the list of fields sent from OpenShift Route objects to the // backend. // The Route resource is redacted because it may contain private keys for TLS. // // TODO(wallrj): Find out if the `.tls.key` field is the only one that may // contain sensitive data and if so, that field could be redacted instead // selecting everything else, for consistency with Ingress or any of the other // resources that are collected. Or alternatively add an comment to explain why // for Route, the set of fields is allow-listed while for Ingress, all fields // are collected. // https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/network_apis/route-route-openshift-io-v1#spec-tls-3 var RouteSelectedFields = []FieldPath{ {"kind"}, {"apiVersion"}, {"metadata", "annotations"}, {"metadata", "name"}, {"metadata", "namespace"}, {"metadata", "ownerReferences"}, {"metadata", "selfLink"}, {"metadata", "uid"}, {"metadata", "creationTimestamp"}, {"metadata", "deletionTimestamp"}, {"metadata", "resourceVersion"}, {"spec", "host"}, {"spec", "to", "kind"}, {"spec", "to", "name"}, {"spec", "to", "weight"}, {"spec", "tls", "termination"}, {"spec", "tls", "certificate"}, {"spec", "tls", "caCertificate"}, {"spec", "tls", "destinationCACertificate"}, {"spec", "tls", "insecureEdgeTerminationPolicy"}, {"spec", "wildcardPolicy"}, {"status"}, } // RedactFields are removed from all objects var RedactFields = []FieldPath{ {"metadata", "managedFields"}, {"metadata", "annotations", "kubectl.kubernetes.io/last-applied-configuration"}, } type FieldPath []string // Select removes all but the supplied fields from the resource func Select(fields []FieldPath, resource *unstructured.Unstructured) error { newResource := unstructured.Unstructured{ Object: map[string]any{}, } for _, field := range fields { value, found, err := unstructured.NestedFieldNoCopy(resource.Object, field...) if err != nil { return err } if !found { continue } if err := unstructured.SetNestedField(newResource.Object, value, field...); err != nil { return err } } resource.Object = newResource.Object return nil } // Redact removes the supplied fields from the resource func Redact(fields []FieldPath, resource *unstructured.Unstructured) { for _, field := range fields { unstructured.RemoveNestedField(resource.Object, field...) } } ================================================ FILE: pkg/datagatherer/k8sdynamic/fieldfilter_test.go ================================================ package k8sdynamic import ( "encoding/json" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "github.com/jetstack/preflight/pkg/testutil" ) func TestSelect(t *testing.T) { t.Run("secret", run_TestSelect( map[string]any{ "apiVersion": "v1", "kind": "Secret", "metadata": map[string]any{ "name": "example", "namespace": "example", "annotations": map[string]any{ "kubectl.kubernetes.io/last-applied-configuration": "secret", }, "labels": map[string]any{ "foo": "bar", }, "resourceVersion": "fake-resource-version", "creationTimestamp": "2025-08-15T00:00:01Z", "deletionTimestamp": "2025-08-15T00:00:02Z", // Examples of fields which are dropped "deletionGracePeriodSeconds": 10, "finalizers": []string{"example.com/fake-finalizer"}, "generation": 11, }, "type": "kubernetes.io/tls", "data": map[string]any{ "tls.crt": "cert data", "tls.key": "secret", "extra": "should be removed", "conjur-map": "should be kept", }, }, SecretSelectedFields, map[string]any{ "apiVersion": "v1", "kind": "Secret", "metadata": map[string]any{ "name": "example", "namespace": "example", "annotations": map[string]any{ // The "last-applied-configuration" isn't ignored in // "Select". "Redact" removes it. "kubectl.kubernetes.io/last-applied-configuration": "secret", }, "labels": map[string]any{ "foo": "bar", }, "resourceVersion": "fake-resource-version", "creationTimestamp": "2025-08-15T00:00:01Z", "deletionTimestamp": "2025-08-15T00:00:02Z", }, "type": "kubernetes.io/tls", "data": map[string]any{ // The "tls.key" is ignored. "tls.crt": "cert data", "conjur-map": "should be kept", }, }, )) // Confirm select function preserves immutability t.Run("secret-immutable", run_TestSelect( map[string]any{ "apiVersion": "v1", "kind": "Secret", "immutable": true, "metadata": map[string]any{ "name": "with-immutable", "namespace": "example", }, "type": "Opaque", }, SecretSelectedFields, map[string]any{ "apiVersion": "v1", "kind": "Secret", "immutable": true, "metadata": map[string]any{ "name": "with-immutable", "namespace": "example", }, "type": "Opaque", }, )) t.Run("secret-immutable-false", run_TestSelect( map[string]any{ "apiVersion": "v1", "kind": "Secret", "immutable": false, "metadata": map[string]any{ "name": "with-immutable-false", "namespace": "example", }, "type": "Opaque", }, SecretSelectedFields, map[string]any{ "apiVersion": "v1", "kind": "Secret", "immutable": false, "metadata": map[string]any{ "name": "with-immutable-false", "namespace": "example", }, "type": "Opaque", }, )) t.Run("secret-immutable-absent", run_TestSelect( map[string]any{ "apiVersion": "v1", "kind": "Secret", "metadata": map[string]any{ "name": "immutable-absent", "namespace": "example", }, "type": "Opaque", }, SecretSelectedFields, map[string]any{ "apiVersion": "v1", "kind": "Secret", "metadata": map[string]any{ "name": "immutable-absent", "namespace": "example", }, "type": "Opaque", }, )) t.Run("route", run_TestSelect( map[string]any{ "apiVersion": "v1", "kind": "Route", "metadata": map[string]any{ "name": "example", "annotations": map[string]any{ "kubectl.kubernetes.io/last-applied-configuration": "secret", }, "labels": map[string]any{ "foo": "bar", }, "resourceVersion": "fake-resource-version", "creationTimestamp": "2025-08-15T00:00:01Z", "deletionTimestamp": "2025-08-15T00:00:02Z", // Examples of fields which are dropped "deletionGracePeriodSeconds": 10, "finalizers": []string{"example.com/fake-finalizer"}, "generation": 11, }, "spec": map[string]any{ "host": "www.example.com", "to": map[string]any{ "kind": "Service", "name": "frontend", }, "tls": map[string]any{ "termination": "reencrypt", "key": "secret", "certificate": "cert data", "caCertificate": "caCert data", "destinationCACertificate": "destinationCaCert data", }, }, }, RouteSelectedFields, map[string]any{ "apiVersion": "v1", "kind": "Route", "metadata": map[string]any{ "name": "example", "annotations": map[string]any{ // The "last-applied-configuration" isn't ignored in // "Select". "Redact" removes it. "kubectl.kubernetes.io/last-applied-configuration": "secret", }, "resourceVersion": "fake-resource-version", "creationTimestamp": "2025-08-15T00:00:01Z", "deletionTimestamp": "2025-08-15T00:00:02Z", }, "spec": map[string]any{ "host": "www.example.com", "to": map[string]any{ "kind": "Service", "name": "frontend", }, "tls": map[string]any{ "termination": "reencrypt", // The "key" field is ignored. "certificate": "cert data", "caCertificate": "caCert data", "destinationCACertificate": "destinationCaCert data", }, }, }, )) } func run_TestSelect(given map[string]any, givenSelect []FieldPath, expect map[string]any) func(*testing.T) { return func(t *testing.T) { t.Helper() givenPtr := unstructured.Unstructured{Object: given} err := Select(givenSelect, &givenPtr) require.NoError(t, err) assert.Equal(t, expect, givenPtr.Object) } } func TestSelectMissingSelectedField(t *testing.T) { resource := &unstructured.Unstructured{ Object: map[string]any{ "kind": "Secret", }, } fieldsToSelect := []FieldPath{ {"kind"}, // required for unstructured unmarshal {"missing"}, } err := Select(fieldsToSelect, resource) require.NoError(t, err) bytes, err := json.MarshalIndent(resource, "", " ") require.NoError(t, err) expectedJSON := testutil.Undent(` { "kind": "Secret" }`) assert.Equal(t, expectedJSON, string(bytes)) } func TestRedactSecret(t *testing.T) { resource := &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "v1", "kind": "Secret", "metadata": map[string]any{ "name": "example", "namespace": "example", "annotations": map[string]any{ "kubectl.kubernetes.io/last-applied-configuration": "secret", }, "managedFields": nil, }, "type": "kubernetes.io/tls", "data": map[string]any{ "tls.crt": "cert data", "tls.key": "secret", }, }, } fieldsToRedact := []FieldPath{ {"metadata", "managedFields"}, {"metadata", "annotations", "kubectl.kubernetes.io/last-applied-configuration"}, {"data", "tls.key"}, } Redact(fieldsToRedact, resource) bytes, err := json.MarshalIndent(resource, "", " ") require.NoError(t, err) expectedJSON := testutil.Undent(` { "apiVersion": "v1", "data": { "tls.crt": "cert data" }, "kind": "Secret", "metadata": { "annotations": {}, "name": "example", "namespace": "example" }, "type": "kubernetes.io/tls" }`) assert.Equal(t, expectedJSON, string(bytes)) } func TestRedactPod(t *testing.T) { resource := &unstructured.Unstructured{ Object: map[string]any{ "apiVersion": "v1", "kind": "Pod", "metadata": map[string]any{ "name": "example", "namespace": "example", "managedFields": []any{}, }, "spec": map[string]any{ "serviceAccountName": "example", }, }, } fieldsToRedact := []FieldPath{ {"metadata", "managedFields"}, } Redact(fieldsToRedact, resource) bytes, err := json.MarshalIndent(resource, "", " ") require.NoError(t, err) expectedJSON := testutil.Undent(` { "apiVersion": "v1", "kind": "Pod", "metadata": { "name": "example", "namespace": "example" }, "spec": { "serviceAccountName": "example" } }`) assert.Equal(t, expectedJSON, string(bytes)) } func TestRedactMissingField(t *testing.T) { resource := &unstructured.Unstructured{ Object: map[string]any{ "kind": "Secret", }, } fieldsToRedact := []FieldPath{ {"missing"}, } Redact(fieldsToRedact, resource) bytes, err := json.MarshalIndent(resource, "", " ") require.NoError(t, err) expectedJSON := testutil.Undent(` { "kind": "Secret" }`) assert.Equal(t, expectedJSON, string(bytes)) } ================================================ FILE: pkg/datagatherer/local/local.go ================================================ package local import ( "context" "fmt" "os" "github.com/jetstack/preflight/pkg/datagatherer" ) // Config is the configuration for a local DataGatherer. type Config struct { // DataPath is the path to file containing the data to load. DataPath string `yaml:"data-path"` } // validate validates the configuration. func (c *Config) validate() error { if c.DataPath == "" { return fmt.Errorf("invalid configuration: DataPath cannot be empty") } return nil } // DataGatherer is a data-gatherer that loads data from a local file. type DataGatherer struct { dataPath string } // NewDataGatherer returns a new DataGatherer. func (c *Config) NewDataGatherer(ctx context.Context) (datagatherer.DataGatherer, error) { if err := c.validate(); err != nil { return nil, err } return &DataGatherer{ dataPath: c.DataPath, }, nil } func (g *DataGatherer) Run(ctx context.Context) error { // no async functionality, see Fetch return nil } func (g *DataGatherer) WaitForCacheSync(ctx context.Context) error { // no async functionality, see Fetch return nil } // Fetch loads and returns the data from the LocalDatagatherer's dataPath func (g *DataGatherer) Fetch(ctx context.Context) (any, int, error) { dataBytes, err := os.ReadFile(g.dataPath) if err != nil { return nil, -1, err } return dataBytes, -1, nil } ================================================ FILE: pkg/datagatherer/oidc/oidc.go ================================================ package oidc import ( "context" "encoding/json" "fmt" "net/url" "strings" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/rest" "k8s.io/klog/v2" "github.com/jetstack/preflight/api" "github.com/jetstack/preflight/pkg/datagatherer" "github.com/jetstack/preflight/pkg/kubeconfig" ) // OIDCDiscovery contains the configuration for the oidc data-gatherer. type OIDCDiscovery struct { // KubeConfigPath is the path to the kubeconfig file. If empty, will assume it runs in-cluster. KubeConfigPath string `yaml:"kubeconfig"` } // UnmarshalYAML unmarshals the Config resolving GroupVersionResource. func (c *OIDCDiscovery) UnmarshalYAML(unmarshal func(any) error) error { aux := struct { KubeConfigPath string `yaml:"kubeconfig"` }{} err := unmarshal(&aux) if err != nil { return err } c.KubeConfigPath = aux.KubeConfigPath return nil } func (c *OIDCDiscovery) NewDataGatherer(ctx context.Context) (datagatherer.DataGatherer, error) { cl, err := kubeconfig.NewDiscoveryClient(c.KubeConfigPath) if err != nil { return nil, err } return &DataGathererOIDC{ cl: cl.RESTClient(), }, nil } // DataGathererOIDC stores the config for an oidc datagatherer. type DataGathererOIDC struct { cl rest.Interface } var _ datagatherer.DataGatherer = &DataGathererOIDC{} func (g *DataGathererOIDC) Run(ctx context.Context) error { return nil } func (g *DataGathererOIDC) WaitForCacheSync(ctx context.Context) error { // no async functionality, see Fetch return nil } // Fetch will fetch the OIDC discovery document and JWKS from the cluster API server. func (g *DataGathererOIDC) Fetch(ctx context.Context) (any, int, error) { oidcResponse, oidcErr := g.fetchOIDCConfig(ctx) jwksResponse, jwksErr := g.fetchJWKS(ctx) errToString := func(err error) string { if err != nil { return err.Error() } return "" } if oidcErr != nil { klog.FromContext(ctx).V(4).Error(oidcErr, "Failed to fetch OIDC configuration") } if jwksErr != nil { klog.FromContext(ctx).V(4).Error(jwksErr, "Failed to fetch JWKS") } return &api.OIDCDiscoveryData{ OIDCConfig: oidcResponse, OIDCConfigError: errToString(oidcErr), JWKS: jwksResponse, JWKSError: errToString(jwksErr), }, 1 /* we have 1 result, so return 1 as count */, nil } func (g *DataGathererOIDC) fetchOIDCConfig(ctx context.Context) (map[string]any, error) { // Fetch the OIDC discovery document from the well-known endpoint. result := g.cl.Get().AbsPath("/.well-known/openid-configuration").Do(ctx) if err := result.Error(); err != nil { return nil, fmt.Errorf("failed to get /.well-known/openid-configuration: %s", k8sErrorMessage(err)) } bytes, _ := result.Raw() // we already checked result.Error(), so there is no error here var oidcResponse map[string]any if err := json.Unmarshal(bytes, &oidcResponse); err != nil { return nil, fmt.Errorf("failed to unmarshal OIDC discovery document: %v (raw: %q)", err, stringFirstN(string(bytes), 80)) } return oidcResponse, nil } func (g *DataGathererOIDC) fetchJWKS(ctx context.Context) (map[string]any, error) { // Fetch the JWKS from the default /openid/v1/jwks endpoint. // We are not using the jwks_uri from the OIDC config because: // - on hybrid OpenShift clusters, we saw it pointed to a non-existent URL // - on fully private AWS EKS clusters, the URL is still public and might not // be reachable from within the cluster (https://github.com/aws/containers-roadmap/issues/2038) // So we are using the default path instead, which we think should work in most cases. result := g.cl.Get().AbsPath("/openid/v1/jwks").Do(ctx) if err := result.Error(); err != nil { return nil, fmt.Errorf("failed to get /openid/v1/jwks: %s", k8sErrorMessage(err)) } bytes, _ := result.Raw() // we already checked result.Error(), so there is no error here var jwksResponse map[string]any if err := json.Unmarshal(bytes, &jwksResponse); err != nil { return nil, fmt.Errorf("failed to unmarshal JWKS response: %v (raw: %q)", err, stringFirstN(string(bytes), 80)) } return jwksResponse, nil } func stringFirstN(s string, n int) string { if len(s) <= n { return s } return s[:n] } // based on https://github.com/kubernetes/kubectl/blob/a64ceaeab69eed1f11a9e1bd91cf2c1446de811c/pkg/cmd/util/helpers.go#L244 func k8sErrorMessage(err error) string { if status, isStatus := err.(apierrors.APIStatus); isStatus { switch s := status.Status(); { case s.Reason == metav1.StatusReasonUnauthorized: return fmt.Sprintf("error: You must be logged in to the server (%s)", s.Message) case len(s.Reason) > 0: return fmt.Sprintf("Error from server (%s): %s", s.Reason, err.Error()) default: return fmt.Sprintf("Error from server: %s", err.Error()) } } if apierrors.IsUnexpectedObjectError(err) { return fmt.Sprintf("Server returned an unexpected response: %s", err.Error()) } if t, isURL := err.(*url.Error); isURL { if strings.Contains(t.Err.Error(), "connection refused") { host := t.URL if server, err := url.Parse(t.URL); err == nil { host = server.Host } return fmt.Sprintf("The connection to the server %s was refused - did you specify the right host or port?", host) } return fmt.Sprintf("Unable to connect to the server: %v", t.Err) } return fmt.Sprintf("error: %v", err) } ================================================ FILE: pkg/datagatherer/oidc/oidc_test.go ================================================ package oidc import ( "bytes" "net/http" "net/http/httptest" "net/url" "testing" "github.com/stretchr/testify/require" "k8s.io/client-go/discovery" "k8s.io/client-go/rest" "github.com/jetstack/preflight/api" ) func makeRESTClient(t *testing.T, ts *httptest.Server) rest.Interface { t.Helper() u, err := url.Parse(ts.URL) if err != nil { t.Fatalf("parse server url: %v", err) } cfg := &rest.Config{ Host: u.Host, } discoveryClient, err := discovery.NewDiscoveryClientForConfigAndClient(cfg, ts.Client()) if err != nil { t.Fatalf("new discovery client: %v", err) } return discoveryClient.RESTClient() } func TestFetch_Success(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case "/.well-known/openid-configuration": w.Header().Set("Content-Type", "application/json") _, _ = w.Write([]byte(`{"issuer":"https://example"}`)) case "/openid/v1/jwks": w.Header().Set("Content-Type", "application/json") _, _ = w.Write([]byte(`{"keys":[]}`)) default: http.NotFound(w, r) } })) defer ts.Close() rc := makeRESTClient(t, ts) g := &DataGathererOIDC{cl: rc} anyRes, count, err := g.Fetch(t.Context()) require.NoError(t, err) require.Equal(t, 1, count) res, ok := anyRes.(*api.OIDCDiscoveryData) require.True(t, ok, "unexpected result type") require.NotNil(t, res.OIDCConfig) require.Equal(t, "https://example", res.OIDCConfig["issuer"].(string)) require.Empty(t, res.OIDCConfigError) require.NotNil(t, res.JWKS) _, ok = res.JWKS["keys"].([]any) require.True(t, ok, "unexpected result type") require.Empty(t, res.JWKSError) } func TestFetch_Errors(t *testing.T) { tests := []struct { name string openidConfigurationResponse func(w http.ResponseWriter, r *http.Request) jwksResponse func(w http.ResponseWriter, r *http.Request) expOIDCConfigError string expJWKSError string }{ { name: "5xx errors", openidConfigurationResponse: func(w http.ResponseWriter, r *http.Request) { http.Error(w, "boom", http.StatusInternalServerError) }, jwksResponse: func(w http.ResponseWriter, r *http.Request) { http.Error(w, "boom", http.StatusInternalServerError) }, expOIDCConfigError: `failed to get /.well-known/openid-configuration: Error from server (InternalError): an error on the server ("boom") has prevented the request from succeeding`, expJWKSError: `failed to get /openid/v1/jwks: Error from server (InternalError): an error on the server ("boom") has prevented the request from succeeding`, }, { name: "malformed JSON", openidConfigurationResponse: func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") _, _ = w.Write([]byte(`}{`)) }, jwksResponse: func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") _, _ = w.Write([]byte(`}`)) _, _ = w.Write(bytes.Repeat([]byte{'0'}, 5000)) }, expOIDCConfigError: `failed to unmarshal OIDC discovery document: invalid character '}' looking for beginning of value (raw: "}{")`, expJWKSError: `failed to unmarshal JWKS response: invalid character '}' looking for beginning of value (raw: "}0000000000000000000000000000000000000000000000000000000000000000000000000000000")`, }, { name: "Forbidden error (no body)", openidConfigurationResponse: func(w http.ResponseWriter, r *http.Request) { http.Error(w, "forbidden", http.StatusForbidden) }, jwksResponse: func(w http.ResponseWriter, r *http.Request) { http.Error(w, "forbidden", http.StatusForbidden) }, expOIDCConfigError: "failed to get /.well-known/openid-configuration: Error from server (Forbidden): forbidden", expJWKSError: "failed to get /openid/v1/jwks: Error from server (Forbidden): forbidden", }, { name: "Forbidden error (*metav1.Status body)", openidConfigurationResponse: func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusForbidden) _, _ = w.Write([]byte(`{ "kind":"Status", "apiVersion":"v1", "metadata":{}, "status":"Failure", "message":"forbidden: User \"system:serviceaccount:default:test\" cannot get path \"/.well-known/openid-configuration\"", "reason":"Forbidden", "details":{}, "code":403 }`)) }, jwksResponse: func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusForbidden) _, _ = w.Write([]byte(`{ "kind":"Status", "apiVersion":"v1", "metadata":{}, "status":"Failure", "message":"forbidden: User \"system:serviceaccount:default:test\" cannot get path \"/openid/v1/jwks\"", "reason":"Forbidden", "details":{}, "code":403 }`)) }, expOIDCConfigError: `failed to get /.well-known/openid-configuration: Error from server (Forbidden): forbidden: User "system:serviceaccount:default:test" cannot get path "/.well-known/openid-configuration"`, expJWKSError: `failed to get /openid/v1/jwks: Error from server (Forbidden): forbidden: User "system:serviceaccount:default:test" cannot get path "/openid/v1/jwks"`, }, { name: "Unauthorized error (*metav1.Status body)", openidConfigurationResponse: func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusForbidden) _, _ = w.Write([]byte(`{ "kind": "Status", "apiVersion": "v1", "metadata": {}, "status": "Failure", "message": "Unauthorized", "reason": "Unauthorized", "code": 401 }`)) }, jwksResponse: func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusForbidden) _, _ = w.Write([]byte(`{ "kind": "Status", "apiVersion": "v1", "metadata": {}, "status": "Failure", "message": "Unauthorized", "reason": "Unauthorized", "code": 401 }`)) }, expOIDCConfigError: `failed to get /.well-known/openid-configuration: error: You must be logged in to the server (Unauthorized)`, expJWKSError: `failed to get /openid/v1/jwks: error: You must be logged in to the server (Unauthorized)`, }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case "/.well-known/openid-configuration": tc.openidConfigurationResponse(w, r) return case "/openid/v1/jwks": tc.jwksResponse(w, r) return default: t.Fatalf("unexpected request path: %s", r.URL.Path) } })) defer ts.Close() rc := makeRESTClient(t, ts) g := &DataGathererOIDC{cl: rc} anyRes, count, err := g.Fetch(t.Context()) require.NoError(t, err) require.Equal(t, 1, count) res, ok := anyRes.(*api.OIDCDiscoveryData) require.True(t, ok, "unexpected result type") require.Nil(t, res.OIDCConfig) require.NotEmpty(t, res.OIDCConfigError) require.Equal(t, tc.expOIDCConfigError, res.OIDCConfigError) require.Nil(t, res.JWKS) require.NotEmpty(t, res.JWKSError) require.Equal(t, tc.expJWKSError, res.JWKSError) }) } } ================================================ FILE: pkg/echo/echo.go ================================================ package echo import ( "encoding/json" "fmt" "net/http" "github.com/fatih/color" "github.com/spf13/cobra" "github.com/jetstack/preflight/api" ) var EchoListen string var Compact bool func Echo(cmd *cobra.Command, args []string) error { http.HandleFunc("/", echoHandler) fmt.Println("Listening to requests at ", EchoListen) return http.ListenAndServe(EchoListen, nil) } func echoHandler(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { writeError(w, fmt.Sprintf("invalid method. Expected POST, received %s", r.Method), http.StatusBadRequest) return } // decode all data, however only datareadings are printed below var payload api.DataReadingsPost err := json.NewDecoder(r.Body).Decode(&payload) if err != nil { writeError(w, fmt.Sprintf("decoding body: %+v", err), http.StatusBadRequest) return } // print the data sent to the echo server to the console if Compact { fmt.Printf("-- %s %s -> created %d\n", r.Method, r.URL.Path, http.StatusCreated) fmt.Printf("received %d readings:\n", len(payload.DataReadings)) for _, r := range payload.DataReadings { fmt.Printf("%+v\n", r) } } else { color.Green("-- %s %s -> created %d\n", r.Method, r.URL.Path, http.StatusCreated) fmt.Printf("received %d readings:\n", len(payload.DataReadings)) for i, r := range payload.DataReadings { c := color.New(color.FgYellow) if i%2 == 0 { c = color.New(color.FgCyan) } c.Printf("%v:\n%s\n", i, prettyPrint(r)) } color.Green("-----") } // return successful response to the agent fmt.Fprintf(w, `{ "status": "ok" }`) w.Header().Set("Content-Type", "application/json") } func writeError(w http.ResponseWriter, err string, code int) { fmt.Printf("-- error %d -> %s\n", code, err) w.Header().Set("Content-Type", "application/json") http.Error(w, fmt.Sprintf(`{ "error": "%s", "code": %d }`, err, code), code) } func prettyPrint(reading *api.DataReading) string { return fmt.Sprintf(`ClusterID: %s Data gatherer: %s Timestamp: %s SchemaVersion: %s Data: %+v`, reading.ClusterID, reading.DataGatherer, reading.Timestamp, reading.SchemaVersion, reading.Data) } ================================================ FILE: pkg/echo/echo_test.go ================================================ package echo import ( "bytes" "encoding/json" "net/http" "net/http/httptest" "testing" "time" "k8s.io/apimachinery/pkg/version" "github.com/jetstack/preflight/api" ) type testInput struct { description string data *api.DataReadingsPost exp int method string } func TestEchoServerRequestResponse(t *testing.T) { // create sample data in same format that would be generated by the agent sampleUploadCases := []testInput{ { description: "correct request input should return status code 200", data: &api.DataReadingsPost{ AgentMetadata: &api.AgentMetadata{ Version: "test suite", ClusterID: "test_suite_cluster", }, DataGatherTime: time.Now(), DataReadings: []*api.DataReading{ { ClusterID: "test_suite_cluster", DataGatherer: "dummy", Timestamp: api.Time{Time: time.Now()}, Data: &api.DiscoveryData{ ServerVersion: &version.Info{ GitVersion: "v1.20.0", }, }, SchemaVersion: "2.0.0", }, }, }, exp: http.StatusOK, method: "POST", }, { description: "sending GET request should return status code 400", method: "GET", data: nil, exp: http.StatusBadRequest, }, } for _, sampleUpload := range sampleUploadCases { // generate the JSON representation of the data to be sent to the echo server requestBodyJSON, err := json.Marshal(sampleUpload.data) if err != nil { t.Fatalf("[%s]\nfailed to generate JSON request body to post: %s", sampleUpload.description, err) } // generate a request to test the handler containing the JSON data as a body req, err := http.NewRequestWithContext(t.Context(), sampleUpload.method, "http://example.com/api/v1/datareadings", bytes.NewBuffer(requestBodyJSON)) if err != nil { t.Fatalf("[%s]\nfailed to generate request to test echo server: %s", sampleUpload.description, err) } // create recorder to save the response rr := httptest.NewRecorder() // perform the request with the handler echoHandler(rr, req) // Check the response from the echo handler is the expected one response := rr.Result() if response.StatusCode != sampleUpload.exp { t.Fatalf("[%s]\necho server responded with an unexpected code: %d", sampleUpload.description, response.StatusCode) } } } ================================================ FILE: pkg/kubeconfig/client.go ================================================ package kubeconfig import ( "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" ) // NewDynamicClient creates a new 'dynamic' clientset using the provided kubeconfig. // If kubeconfigPath is not set/empty, it will attempt to load configuration using // the default loading rules. func NewDynamicClient(kubeconfigPath string) (dynamic.Interface, error) { cfg, err := LoadRESTConfig(kubeconfigPath) if err != nil { return nil, err } cl, err := dynamic.NewForConfig(cfg) if err != nil { return nil, err } return cl, nil } // NewDiscoveryClient creates a new 'discovery' client using the provided // kubeconfig. If kubeconfigPath is not set/empty, it will attempt to load // configuration using the default loading rules. func NewDiscoveryClient(kubeconfigPath string) (*discovery.DiscoveryClient, error) { cfg, err := LoadRESTConfig(kubeconfigPath) if err != nil { return nil, err } discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg) if err != nil { return nil, err } return discoveryClient, nil } // NewClientSet creates a new kubernetes clientset using the provided kubeconfig. // If kubeconfigPath is not set/empty, it will attempt to load configuration using // the default loading rules. func NewClientSet(kubeconfigPath string) (kubernetes.Interface, error) { cfg, err := LoadRESTConfig(kubeconfigPath) if err != nil { return nil, err } clientset, err := kubernetes.NewForConfig(cfg) if err != nil { return nil, err } return clientset, nil } ================================================ FILE: pkg/kubeconfig/client_test.go ================================================ package kubeconfig import ( "os" "testing" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" clientcmdlatest "k8s.io/client-go/tools/clientcmd/api/latest" ) // These tests do not currently validate the created dynamic client uses the // KUBECONFIG file that we create, however it _does_ exercise enough of the // code path to show that the function is correctly selecting which file to // load and returning it. func TestNewDynamicClient_ExplicitKubeconfig(t *testing.T) { kc := createValidTestConfig() path := writeConfigToFile(t, kc) _, err := NewDynamicClient(path) if err != nil { t.Error("failed to create client: ", err) } } func TestNewDynamicClient_InferredKubeconfig(t *testing.T) { kc := createValidTestConfig() path := writeConfigToFile(t, kc) cleanupFn := temporarilySetEnv("KUBECONFIG", path) defer cleanupFn() _, err := NewDynamicClient("") if err != nil { t.Error("failed to create client: ", err) } } func TestNewDiscoveryClient_ExplicitKubeconfig(t *testing.T) { kc := createValidTestConfig() path := writeConfigToFile(t, kc) _, err := NewDiscoveryClient(path) if err != nil { t.Error("failed to create client: ", err) } } func TestNewDiscoveryClient_InferredKubeconfig(t *testing.T) { kc := createValidTestConfig() path := writeConfigToFile(t, kc) cleanupFn := temporarilySetEnv("KUBECONFIG", path) defer cleanupFn() _, err := NewDiscoveryClient("") if err != nil { t.Error("failed to create client: ", err) } } func writeConfigToFile(t *testing.T, cfg clientcmdapi.Config) string { f, err := os.CreateTemp(t.TempDir(), "testcase-*") if err != nil { t.Fatal(err) } defer f.Close() if err := clientcmdlatest.Codec.Encode(&cfg, f); err != nil { t.Fatal(err) } return f.Name() } func createValidTestConfig() clientcmdapi.Config { const ( server = "https://example.com:8080" token = "the-token" ) config := clientcmdapi.NewConfig() config.Clusters["clean"] = &clientcmdapi.Cluster{ Server: server, } config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{ Token: token, } config.Contexts["clean"] = &clientcmdapi.Context{ Cluster: "clean", AuthInfo: "clean", } config.CurrentContext = "clean" return *config } func temporarilySetEnv(key, value string) func() { old := os.Getenv(key) os.Setenv(key, value) return func() { os.Setenv(key, old) } } ================================================ FILE: pkg/kubeconfig/kubeconfig.go ================================================ package kubeconfig import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" ) // LoadRESTConfig loads the kube config from the provided path. If the path is // empty, the kube config will be loaded from KUBECONFIG, and if KUBECONFIG // isn't set, the in-cluster config will be used. func LoadRESTConfig(path string) (*rest.Config, error) { loadingrules := clientcmd.NewDefaultClientConfigLoadingRules() // If the kubeconfig path is provided, use that file and fail if it does // not exist. // If the kubeconfig path is not provided, use the default loading rules // so we read the regular KUBECONFIG variable or create a non-interactive // client for agents running in cluster loadingrules.ExplicitPath = path cfg, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( loadingrules, &clientcmd.ConfigOverrides{}, ).ClientConfig() if err != nil { return nil, err } return cfg, nil } ================================================ FILE: pkg/logs/logs.go ================================================ package logs import ( "bytes" "fmt" "log" "log/slog" "strings" "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/component-base/featuregate" "k8s.io/component-base/logs" logsapi "k8s.io/component-base/logs/api/v1" "k8s.io/klog/v2" ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log" _ "k8s.io/component-base/logs/json/register" ) // venafi-kubernetes-agent follows [Kubernetes Logging Conventions] and writes // logs in [Kubernetes text logging format] by default. It does not support // named levels (aka. severity), instead it uses arbitrary levels. Errors and // warnings are logged to stderr and Info messages to stdout, because that is // how some cloud logging systems (notably Google Cloud Logs Explorer) assign a // severity (INFO or ERROR) in the UI. The agent's and vcert's logs are written // logged as Info messages with level=0. // // Further reading: // - [Kubernetes logging conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md) // - [Kubernetes text logging format](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#text-logging-format) // - [Why not named levels, like Info/Warning/Error?](https://github.com/go-logr/logr?tab=readme-ov-file#why-not-named-levels-like-infowarningerror) // - [GKE logs best practices](https://cloud.google.com/kubernetes-engine/docs/concepts/about-logs#best_practices) // - [Structured Logging KEP](https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/1602-structured-logging/README.md) // - [Examples of using k8s.io/component-base/logs](https://github.com/kubernetes/kubernetes/tree/master/staging/src/k8s.io/component-base/logs/example), // upon which this code was based. var ( // All but the essential logging flags will be hidden to avoid overwhelming // the user. The hidden flags can still be used. For example if a user does // not like the split-stream behavior and a Venafi field engineer can // instruct them to patch --log-json-split-stream=false on to the Deployment // arguments. visibleFlagNames = sets.New[string]("v", "vmodule", "logging-format") // This default logging configuration will be updated with values from the // logging flags, even those that are hidden. configuration = logsapi.NewLoggingConfiguration() // Logging features will be added to this feature gate, but the // feature-gates flag will be hidden from the user. features = featuregate.NewFeatureGate() ) const ( // Standard log verbosity levels. // Use these instead of integers in venafi-kubernetes-agent code. Info = 0 Debug = 1 Trace = 2 ) func init() { runtime.Must(logsapi.AddFeatureGates(features)) // Turn on ALPHA options to enable the split-stream logging options. runtime.Must(features.OverrideDefault(logsapi.LoggingAlphaOptions, true)) } // AddFlags adds log related flags to the supplied flag set. // // The split-stream options are enabled by default, so that errors are logged to // stderr and info to stdout, allowing cloud logging systems to assign a // severity INFO or ERROR to the messages. func AddFlags(fs *pflag.FlagSet) { var tfs pflag.FlagSet logsapi.AddFlags(configuration, &tfs) features.AddFlag(&tfs) tfs.VisitAll(func(f *pflag.Flag) { if !visibleFlagNames.Has(f.Name) { _ = tfs.MarkHidden(f.Name) } // The original usage string includes details about how // JSON logging is only available when BETA logging features are // enabled, but that's not relevant here because the feature is enabled // by default. if f.Name == "logging-format" { f.Usage = `Sets the log format. Permitted formats: "json", "text".` } if f.Name == "log-text-split-stream" { f.DefValue = "true" runtime.Must(f.Value.Set("true")) } if f.Name == "log-json-split-stream" { f.DefValue = "true" runtime.Must(f.Value.Set("true")) } // Since `--v` (which is the long form of `-v`) isn't the standard in // our projects (it only exists in cert-manager, webhook, and such), // let's rename it to the more common `--log-level`, which appears in // openshift-routes, csi-driver, trust-manager, and approver-policy. // More details at: // https://github.com/jetstack/jetstack-secure/pull/596#issuecomment-2421708181 if f.Name == "v" { f.Name = "log-level" f.Shorthand = "v" f.Usage = fmt.Sprintf("%s. 0=Info, 1=Debug, 2=Trace. Use 6-9 for increasingly verbose HTTP request logging. (default: 0)", f.Usage) } }) fs.AddFlagSet(&tfs) } // Initialize uses k8s.io/component-base/logs, to configure the following global // loggers: log, slog, and klog. All are configured to write in the same format. func Initialize() error { // This configures the global logger in klog *and* slog, if compiled with Go // >= 1.21. logs.InitLogs() if err := logsapi.ValidateAndApply(configuration, features); err != nil { return fmt.Errorf("Error in logging configuration: %s", err) } // Thanks to logs.InitLogs, slog.Default now uses klog as its backend. Thus, // the client-go library, which relies on klog.Info, has the same logger as // the agent, which still uses log.Printf. slog := slog.Default() // Let's make sure the VCert library, which is the only library we import to // be using the global log.Default, also uses the common slog logger. vcertLog := log.Default() vcertLog.SetOutput(LogToSlogWriter{Slog: slog, Source: "vcert"}) // The venafi-connection-lib client uses various controller-runtime packages // which emit log messages. Make sure those log messages are not discarded. ctrlruntimelog.SetLogger(klog.Background().WithValues("source", "controller-runtime")) return nil } type LogToSlogWriter struct { Slog *slog.Logger Source string } func (w LogToSlogWriter) Write(p []byte) (n int, err error) { // log.Printf writes a newline at the end of the message, so we need to trim // it. p = bytes.TrimSuffix(p, []byte("\n")) message := string(p) if strings.Contains(message, "error") || strings.Contains(message, "failed") { w.Slog.With("source", w.Source).Error(message) } else { w.Slog.With("source", w.Source).Info(message) } return len(p), nil } ================================================ FILE: pkg/logs/logs_test.go ================================================ package logs_test import ( "bytes" "errors" "fmt" "io" "log" "log/slog" "os" "os/exec" "regexp" "strings" "testing" "time" "github.com/spf13/pflag" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/klog/v2" "github.com/jetstack/preflight/pkg/logs" _ "github.com/Venafi/vcert/v5" ) // TestLogs demonstrates how the logging flags affect the logging output. // // The test executes itself with as a sub-process to avoid mutating the global // logging configuration. // // Inspired by: // - https://stackoverflow.com/a/67945462 // - https://go.dev/src/flag/flag_test.go (TestExitCode) func TestLogs(t *testing.T) { if flags, found := os.LookupEnv("GO_CHILD_FLAG"); found { if _, found := os.LookupEnv("GO_CHILD_SKIP_INITIALIZE"); !found { fs := pflag.NewFlagSet("test-logs", pflag.ContinueOnError) fs.SetOutput(io.Discard) logs.AddFlags(fs) if err := fs.Parse(strings.Split(flags, " ")); err != nil { exitCode := 0 if errors.Is(err, pflag.ErrHelp) { fmt.Fprint(os.Stdout, fs.FlagUsages()) os.Exit(exitCode) } else { exitCode := 1 klog.ErrorS(err, "Exiting due to error", "exit-code", exitCode) klog.FlushAndExit(time.Second, exitCode) } } if err := logs.Initialize(); err != nil { exitCode := 1 klog.ErrorS(err, "Exiting due to error", "exit-code", exitCode) klog.FlushAndExit(time.Second, exitCode) } } log.Print("log Print") slog.Info("slog Info") slog.Warn("slog Warn") slog.Error("slog Error") klog.Info("klog Info") klog.Warning("klog Warning") klog.ErrorS(errors.New("fake-error"), "klog Error") klog.InfoS("klog InfoS", "key", "value") logger := klog.FromContext(t.Context()).WithName("foo") logger.V(3).Info("Contextual Info Level 3", "key", "value") logger.Error(errors.New("fake-error"), "Contextual error", "key", "value") klog.FlushAndExit(time.Second, 0) } tests := []struct { name string flags string skipIntialize bool expectError bool expectStdout string expectStderr string }{ { name: "help", flags: "-h", expectStdout: ` -v, --log-level Level number for the log level verbosity. 0=Info, 1=Debug, 2=Trace. Use 6-9 for increasingly verbose HTTP request logging. (default: 0) --logging-format string Sets the log format. Permitted formats: "json", "text". (default "text") --vmodule pattern=N,... comma-separated list of pattern=N settings for file-filtered logging (only works for text log format) `, }, { name: "unrecognized-flag", flags: "--foo", expectError: true, expectStderr: ` E0000 00:00:00.000000 00000 logs_test.go:000] "Exiting due to error" err="unknown flag: --foo" exit-code=1 `, }, { name: "v-long-form-not-available", flags: "--v=3", expectError: true, expectStderr: ` E0000 00:00:00.000000 00000 logs_test.go:000] "Exiting due to error" err="unknown flag: --v" exit-code=1 `, }, { name: "logging-format-unrecognized", flags: "--logging-format=foo", expectError: true, expectStderr: ` E0000 00:00:00.000000 00000 logs_test.go:000] "Exiting due to error" err="Error in logging configuration: format: Invalid value: \"foo\": Unsupported log format" exit-code=1 `, }, { name: "original-defaults", flags: "", skipIntialize: true, expectStderr: ` 0000/00/00 00:00:00 log Print 0000/00/00 00:00:00 INFO slog Info 0000/00/00 00:00:00 WARN slog Warn 0000/00/00 00:00:00 ERROR slog Error I0000 00:00:00.000000 00000 logs_test.go:000] klog Info W0000 00:00:00.000000 00000 logs_test.go:000] klog Warning E0000 00:00:00.000000 00000 logs_test.go:000] "klog Error" err="fake-error" I0000 00:00:00.000000 00000 logs_test.go:000] "klog InfoS" key="value" E0000 00:00:00.000000 00000 logs_test.go:000] "Contextual error" err="fake-error" logger="foo" key="value" `, }, { name: "modified-defaults", flags: "", expectStdout: ` I0000 00:00:00.000000 00000 logs.go:000] "log Print" source="vcert" I0000 00:00:00.000000 00000 logs_test.go:000] "slog Info" I0000 00:00:00.000000 00000 logs_test.go:000] klog Info I0000 00:00:00.000000 00000 logs_test.go:000] "klog InfoS" key="value" `, expectStderr: ` W0000 00:00:00.000000 00000 logs_test.go:000] "slog Warn" E0000 00:00:00.000000 00000 logs_test.go:000] "slog Error" W0000 00:00:00.000000 00000 logs_test.go:000] klog Warning E0000 00:00:00.000000 00000 logs_test.go:000] "klog Error" err="fake-error" E0000 00:00:00.000000 00000 logs_test.go:000] "Contextual error" err="fake-error" logger="foo" key="value" `, }, { name: "logging-format-json", flags: "--logging-format=json", expectStdout: ` {"ts":0000000000000.000,"caller":"logs/logs.go:000","msg":"log Print","source":"vcert","v":0} {"ts":0000000000000.000,"caller":"logs/logs_test.go:000","msg":"slog Info","v":0} {"ts":0000000000000.000,"caller":"logs/logs_test.go:000","msg":"slog Warn","v":0} {"ts":0000000000000.000,"caller":"logs/logs_test.go:000","msg":"klog Info","v":0} {"ts":0000000000000.000,"caller":"logs/logs_test.go:000","msg":"klog Warning","v":0} {"ts":0000000000000.000,"caller":"logs/logs_test.go:000","msg":"klog InfoS","v":0,"key":"value"} `, expectStderr: ` {"ts":0000000000000.000,"caller":"logs/logs_test.go:000","msg":"slog Error"} {"ts":0000000000000.000,"caller":"logs/logs_test.go:000","msg":"klog Error","err":"fake-error"} {"ts":0000000000000.000,"logger":"foo","caller":"logs/logs_test.go:000","msg":"Contextual error","key":"value","err":"fake-error"} `, }, { name: "log-json-split-stream-false", flags: "--logging-format=json --log-json-split-stream=false", expectStderr: ` {"ts":0000000000000.000,"caller":"logs/logs.go:000","msg":"log Print","source":"vcert","v":0} {"ts":0000000000000.000,"caller":"logs/logs_test.go:000","msg":"slog Info","v":0} {"ts":0000000000000.000,"caller":"logs/logs_test.go:000","msg":"slog Warn","v":0} {"ts":0000000000000.000,"caller":"logs/logs_test.go:000","msg":"slog Error"} {"ts":0000000000000.000,"caller":"logs/logs_test.go:000","msg":"klog Info","v":0} {"ts":0000000000000.000,"caller":"logs/logs_test.go:000","msg":"klog Warning","v":0} {"ts":0000000000000.000,"caller":"logs/logs_test.go:000","msg":"klog Error","err":"fake-error"} {"ts":0000000000000.000,"caller":"logs/logs_test.go:000","msg":"klog InfoS","v":0,"key":"value"} {"ts":0000000000000.000,"logger":"foo","caller":"logs/logs_test.go:000","msg":"Contextual error","key":"value","err":"fake-error"} `, }, { name: "logging-format-text", flags: "--logging-format=text", expectStdout: ` I0000 00:00:00.000000 00000 logs.go:000] "log Print" source="vcert" I0000 00:00:00.000000 00000 logs_test.go:000] "slog Info" I0000 00:00:00.000000 00000 logs_test.go:000] klog Info I0000 00:00:00.000000 00000 logs_test.go:000] "klog InfoS" key="value" `, expectStderr: ` W0000 00:00:00.000000 00000 logs_test.go:000] "slog Warn" E0000 00:00:00.000000 00000 logs_test.go:000] "slog Error" W0000 00:00:00.000000 00000 logs_test.go:000] klog Warning E0000 00:00:00.000000 00000 logs_test.go:000] "klog Error" err="fake-error" E0000 00:00:00.000000 00000 logs_test.go:000] "Contextual error" err="fake-error" logger="foo" key="value" `, }, { name: "log-text-split-stream-false", flags: "--logging-format=text --log-text-split-stream=false", expectStderr: ` I0000 00:00:00.000000 00000 logs.go:000] "log Print" source="vcert" I0000 00:00:00.000000 00000 logs_test.go:000] "slog Info" W0000 00:00:00.000000 00000 logs_test.go:000] "slog Warn" E0000 00:00:00.000000 00000 logs_test.go:000] "slog Error" I0000 00:00:00.000000 00000 logs_test.go:000] klog Info W0000 00:00:00.000000 00000 logs_test.go:000] klog Warning E0000 00:00:00.000000 00000 logs_test.go:000] "klog Error" err="fake-error" I0000 00:00:00.000000 00000 logs_test.go:000] "klog InfoS" key="value" E0000 00:00:00.000000 00000 logs_test.go:000] "Contextual error" err="fake-error" logger="foo" key="value" `, }, { name: "v-level-3", flags: "-v=3", expectStdout: ` I0000 00:00:00.000000 00000 logs.go:000] "log Print" source="vcert" I0000 00:00:00.000000 00000 logs_test.go:000] "slog Info" I0000 00:00:00.000000 00000 logs_test.go:000] klog Info I0000 00:00:00.000000 00000 logs_test.go:000] "klog InfoS" key="value" I0000 00:00:00.000000 00000 logs_test.go:000] "Contextual Info Level 3" logger="foo" key="value" `, expectStderr: ` W0000 00:00:00.000000 00000 logs_test.go:000] "slog Warn" E0000 00:00:00.000000 00000 logs_test.go:000] "slog Error" W0000 00:00:00.000000 00000 logs_test.go:000] klog Warning E0000 00:00:00.000000 00000 logs_test.go:000] "klog Error" err="fake-error" E0000 00:00:00.000000 00000 logs_test.go:000] "Contextual error" err="fake-error" logger="foo" key="value" `, }, { name: "log-level-3", flags: "--log-level=3", expectStdout: ` I0000 00:00:00.000000 00000 logs.go:000] "log Print" source="vcert" I0000 00:00:00.000000 00000 logs_test.go:000] "slog Info" I0000 00:00:00.000000 00000 logs_test.go:000] klog Info I0000 00:00:00.000000 00000 logs_test.go:000] "klog InfoS" key="value" I0000 00:00:00.000000 00000 logs_test.go:000] "Contextual Info Level 3" logger="foo" key="value" `, expectStderr: ` W0000 00:00:00.000000 00000 logs_test.go:000] "slog Warn" E0000 00:00:00.000000 00000 logs_test.go:000] "slog Error" W0000 00:00:00.000000 00000 logs_test.go:000] klog Warning E0000 00:00:00.000000 00000 logs_test.go:000] "klog Error" err="fake-error" E0000 00:00:00.000000 00000 logs_test.go:000] "Contextual error" err="fake-error" logger="foo" key="value" `, }, { name: "vmodule-level-3", flags: "--vmodule=logs_test=3", expectStdout: ` I0000 00:00:00.000000 00000 logs.go:000] "log Print" source="vcert" I0000 00:00:00.000000 00000 logs_test.go:000] "slog Info" I0000 00:00:00.000000 00000 logs_test.go:000] klog Info I0000 00:00:00.000000 00000 logs_test.go:000] "klog InfoS" key="value" I0000 00:00:00.000000 00000 logs_test.go:000] "Contextual Info Level 3" logger="foo" key="value" `, expectStderr: ` W0000 00:00:00.000000 00000 logs_test.go:000] "slog Warn" E0000 00:00:00.000000 00000 logs_test.go:000] "slog Error" W0000 00:00:00.000000 00000 logs_test.go:000] klog Warning E0000 00:00:00.000000 00000 logs_test.go:000] "klog Error" err="fake-error" E0000 00:00:00.000000 00000 logs_test.go:000] "Contextual error" err="fake-error" logger="foo" key="value" `, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { ctx := t.Context() cmd := exec.CommandContext(ctx, os.Args[0], "-test.run=^TestLogs$", "-test.v") var ( stdout bytes.Buffer stderr bytes.Buffer ) cmd.Stdout = &stdout cmd.Stderr = &stderr cmd.Env = append( os.Environ(), "GO_CHILD_FLAG="+test.flags, ) if test.skipIntialize { cmd.Env = append( cmd.Env, "GO_CHILD_SKIP_INITIALIZE=true", ) } err := cmd.Run() t.Logf("FLAGS\n%s\n", test.flags) // Remove the standard output generated by `-test.v` stdoutStr := strings.TrimPrefix(stdout.String(), "=== RUN TestLogs\n") stderrStr := stderr.String() t.Logf("STDOUT\n%s\n", stdoutStr) t.Logf("STDERR\n%s\n", stderrStr) if test.expectError { var target *exec.ExitError require.ErrorAs(t, err, &target) require.Equal(t, 1, target.ExitCode(), "Flag parsing failures should always result in exit code 1") t.Logf("ERROR: %v", err) } else { require.NoError(t, err) } // This trick helps with the readability of the table test: we can // have the first "expected" log line at the same level as the other // lines. test.expectStdout = strings.TrimPrefix(test.expectStdout, "\n") test.expectStderr = strings.TrimPrefix(test.expectStderr, "\n") require.Equal(t, test.expectStdout, replaceWithStaticTimestamps(stdoutStr), "stdout doesn't match") require.Equal(t, test.expectStderr, replaceWithStaticTimestamps(stderrStr), "stderr doesn't match") }) } } var ( timestampRegexpStdLog = regexp.MustCompile(`\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2}`) timestampRegexpKlog = regexp.MustCompile(`\d{4} \d{2}:\d{2}:\d{2}\.\d{6} +\d+`) timestampRegexpJSON = regexp.MustCompile(`"ts":\d+\.?\d*`) fileAndLineRegexpJSON = regexp.MustCompile(`"caller":"([^"]+).go:\d+"`) fileAndLineRegexpKlog = regexp.MustCompile(` ([^:]+).go:\d+`) ) // Replaces the klog and JSON timestamps with a static timestamp to make it // easier to assert the logs. It also replaces the line number with 000 as it // often changes. // // I1018 15:12:57.953433 22183 logs.go:000] log // {"ts":1729258473588.828,"caller":"log/log.go:000","msg":"log Print","v":0} // 2024/10/18 15:40:50 log Print // // to the fixed: // // I0000 00:00:00.000000 00000 logs.go:000] log // {"ts":0000000000000.000,"caller":"log/log.go:000","msg":"log Print","v":0} // 0000/00/00 00:00:00 log Print func replaceWithStaticTimestamps(input string) string { input = timestampRegexpKlog.ReplaceAllString(input, "0000 00:00:00.000000 00000") input = timestampRegexpJSON.ReplaceAllString(input, `"ts":0000000000000.000`) input = timestampRegexpStdLog.ReplaceAllString(input, "0000/00/00 00:00:00") input = fileAndLineRegexpJSON.ReplaceAllString(input, `"caller":"$1.go:000"`) input = fileAndLineRegexpKlog.ReplaceAllString(input, " $1.go:000") return input } func Test_replaceWithStaticTimestamps(t *testing.T) { tests := []struct { name string input string expected string }{ { name: "klog", input: `I1018 15:20:42.861239 2386 logs_test.go:13] "Contextual Info Level 3" logger="foo" key="value"`, expected: `I0000 00:00:00.000000 00000 logs_test.go:000] "Contextual Info Level 3" logger="foo" key="value"`, }, { name: "json-with-nanoseconds", input: `{"ts":1729270111728.125,"caller":"logs/logs_test.go:000","msg":"slog Warn","v":0}`, expected: `{"ts":0000000000000.000,"caller":"logs/logs_test.go:000","msg":"slog Warn","v":0}`, }, { name: "json-might-not-have-nanoseconds", input: `{"ts":1729270111728,"caller":"logs/logs_test.go:000","msg":"slog Info","v":0}`, expected: `{"ts":0000000000000.000,"caller":"logs/logs_test.go:000","msg":"slog Info","v":0}`, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { assert.Equal(t, test.expected, replaceWithStaticTimestamps(test.input)) }) } } ================================================ FILE: pkg/permissions/generate.go ================================================ package permissions import ( "fmt" "strings" rbac "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/yaml" "github.com/jetstack/preflight/pkg/agent" "github.com/jetstack/preflight/pkg/datagatherer/k8sdynamic" ) // AgentRBACManifests is a wrapper around the various RBAC structs needed to grant the agent fine-grained permissions as per its dg configs type AgentRBACManifests struct { // ClusterRoles is a list of roles for resources the agent will collect ClusterRoles []rbac.ClusterRole // ClusterRoleBindings is a list of crbs for resources which have no include/exclude ns configured ClusterRoleBindings []rbac.ClusterRoleBinding // RoleBindings is a list of namespaced bindings to grant permissions when include/exclude ns set RoleBindings []rbac.RoleBinding } const agentNamespace = "jetstack-secure" const agentSubjectName = "agent" func GenerateAgentRBACManifests(dataGatherers []agent.DataGatherer) AgentRBACManifests { // create a new AgentRBACManifest struct var AgentRBACManifests AgentRBACManifests for _, dg := range dataGatherers { if dg.Kind != "k8s-dynamic" { continue } dyConfig := dg.Config.(*k8sdynamic.ConfigDynamic) metadataName := fmt.Sprintf("%s-agent-%s-reader", agentNamespace, dyConfig.GroupVersionResource.Resource) AgentRBACManifests.ClusterRoles = append(AgentRBACManifests.ClusterRoles, rbac.ClusterRole{ TypeMeta: metav1.TypeMeta{ Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1", }, ObjectMeta: metav1.ObjectMeta{ Name: metadataName, }, Rules: []rbac.PolicyRule{ { Verbs: []string{"get", "list", "watch"}, APIGroups: []string{dyConfig.GroupVersionResource.Group}, Resources: []string{dyConfig.GroupVersionResource.Resource}, }, }, }) // if dyConfig.IncludeNamespaces has more than 0 items in it // then, for each namespace create a rbac.RoleBinding in that namespace if len(dyConfig.IncludeNamespaces) != 0 { for _, ns := range dyConfig.IncludeNamespaces { AgentRBACManifests.RoleBindings = append(AgentRBACManifests.RoleBindings, rbac.RoleBinding{ TypeMeta: metav1.TypeMeta{ Kind: "RoleBinding", APIVersion: "rbac.authorization.k8s.io/v1", }, ObjectMeta: metav1.ObjectMeta{ Name: metadataName, Namespace: ns, }, Subjects: []rbac.Subject{ { Kind: "ServiceAccount", Name: agentSubjectName, Namespace: agentNamespace, }, }, RoleRef: rbac.RoleRef{ Kind: "ClusterRole", Name: metadataName, APIGroup: "rbac.authorization.k8s.io", }, }) } } else { // only do this if the dg does not have IncludeNamespaces set AgentRBACManifests.ClusterRoleBindings = append(AgentRBACManifests.ClusterRoleBindings, rbac.ClusterRoleBinding{ TypeMeta: metav1.TypeMeta{ Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1", }, ObjectMeta: metav1.ObjectMeta{ Name: metadataName, }, Subjects: []rbac.Subject{ { Kind: "ServiceAccount", Name: agentSubjectName, Namespace: agentNamespace, }, }, RoleRef: rbac.RoleRef{ Kind: "ClusterRole", Name: metadataName, APIGroup: "rbac.authorization.k8s.io", }, }) } } return AgentRBACManifests } func createClusterRoleString(clusterRoles []rbac.ClusterRole) string { var builder strings.Builder for _, cb := range clusterRoles { data, err := yaml.Marshal(cb) if err != nil { fmt.Print("Cluster Role fails to marshal") } builder.WriteString("\n") builder.Write(data) builder.WriteString("---") } return builder.String() } func createRoleBindingString(roleBindings []rbac.RoleBinding) string { var builder strings.Builder for _, cb := range roleBindings { data, err := yaml.Marshal(cb) if err != nil { fmt.Print("Role Binding fails to marshal") } builder.WriteString("\n") builder.Write(data) builder.WriteString("---") } return builder.String() } func createClusterRoleBindingString(clusterRoleBindings []rbac.ClusterRoleBinding) string { var builder strings.Builder for _, cb := range clusterRoleBindings { data, err := yaml.Marshal(cb) if err != nil { fmt.Print("Cluster Role Binding fails to marshal") } builder.WriteString("\n") builder.Write(data) builder.WriteString("---") } return builder.String() } func GenerateFullManifest(dataGatherers []agent.DataGatherer) string { agentRBACManifestsStruct := GenerateAgentRBACManifests(dataGatherers) agentCLR := createClusterRoleString(agentRBACManifestsStruct.ClusterRoles) agentCLRB := createClusterRoleBindingString(agentRBACManifestsStruct.ClusterRoleBindings) agentRB := createRoleBindingString(agentRBACManifestsStruct.RoleBindings) out := fmt.Sprintf(`%s%s%s`, agentCLR, agentCLRB, agentRB) out = strings.TrimPrefix(out, "\n") out = strings.TrimSpace(out) out = strings.ReplaceAll(out, "\n creationTimestamp: null", "") return out } ================================================ FILE: pkg/permissions/generate_test.go ================================================ package permissions import ( "testing" "github.com/stretchr/testify/require" rbac "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "github.com/jetstack/preflight/pkg/agent" "github.com/jetstack/preflight/pkg/datagatherer/k8sdynamic" ) func TestGenerateAgentRBACManifestsString(t *testing.T) { testCases := []struct { description string dataGatherers []agent.DataGatherer expectedRBACManifests string }{ { description: "Generate ClusterRole and ClusterRoleBinding for simple pod dg use case", dataGatherers: []agent.DataGatherer{ { Name: "k8s/pods", Kind: "k8s-dynamic", Config: &k8sdynamic.ConfigDynamic{ GroupVersionResource: schema.GroupVersionResource{ Version: "v1", Resource: "pods", }, }, }, }, expectedRBACManifests: `apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: jetstack-secure-agent-pods-reader rules: - apiGroups: - "" resources: - pods verbs: - get - list - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: jetstack-secure-agent-pods-reader roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: jetstack-secure-agent-pods-reader subjects: - kind: ServiceAccount name: agent namespace: jetstack-secure ---`, }, { description: "Generate ClusterRole and RoleBinding for simple pod dg with include namespace \"foobar\"", dataGatherers: []agent.DataGatherer{ { Name: "k8s/pods", Kind: "k8s-dynamic", Config: &k8sdynamic.ConfigDynamic{ IncludeNamespaces: []string{"foobar"}, GroupVersionResource: schema.GroupVersionResource{ Version: "v1", Resource: "pods", }, }, }, }, expectedRBACManifests: `apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: jetstack-secure-agent-pods-reader rules: - apiGroups: - "" resources: - pods verbs: - get - list - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: jetstack-secure-agent-pods-reader namespace: foobar roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: jetstack-secure-agent-pods-reader subjects: - kind: ServiceAccount name: agent namespace: jetstack-secure ---`, }, { description: "Generate multiple ClusterRoles and ClusterRoleBindings for simple pod and nodes dg use case", dataGatherers: []agent.DataGatherer{ { Name: "k8s/pods", Kind: "k8s-dynamic", Config: &k8sdynamic.ConfigDynamic{ GroupVersionResource: schema.GroupVersionResource{ Version: "v1", Resource: "pods", }, }, }, { Name: "k8s/nodes", Kind: "k8s-dynamic", Config: &k8sdynamic.ConfigDynamic{ GroupVersionResource: schema.GroupVersionResource{ Version: "v1", Resource: "nodes", }, }, }, }, expectedRBACManifests: `apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: jetstack-secure-agent-pods-reader rules: - apiGroups: - "" resources: - pods verbs: - get - list - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: jetstack-secure-agent-nodes-reader rules: - apiGroups: - "" resources: - nodes verbs: - get - list - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: jetstack-secure-agent-pods-reader roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: jetstack-secure-agent-pods-reader subjects: - kind: ServiceAccount name: agent namespace: jetstack-secure --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: jetstack-secure-agent-nodes-reader roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: jetstack-secure-agent-nodes-reader subjects: - kind: ServiceAccount name: agent namespace: jetstack-secure ---`, }, } for _, input := range testCases { got := GenerateFullManifest(input.dataGatherers) if input.expectedRBACManifests != got { t.Errorf("value mismatch, \n**********expected:******************************\n%s\n**********got:******************************\n%s", input.expectedRBACManifests, got) } } } func TestGenerateAgentRBACManifests(t *testing.T) { testCases := []struct { description string dataGatherers []agent.DataGatherer expectedAgentRBACManifests AgentRBACManifests }{ { description: "Generate ClusterRole and ClusterRoleBinding for simple pod dg use case", dataGatherers: []agent.DataGatherer{ { Name: "k8s/pods", Kind: "k8s-dynamic", Config: &k8sdynamic.ConfigDynamic{ GroupVersionResource: schema.GroupVersionResource{ Version: "v1", Resource: "pods", }, }, }, }, expectedAgentRBACManifests: AgentRBACManifests{ ClusterRoles: []rbac.ClusterRole{ { TypeMeta: metav1.TypeMeta{ Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1", }, ObjectMeta: metav1.ObjectMeta{ Name: "jetstack-secure-agent-pods-reader", }, Rules: []rbac.PolicyRule{ { Verbs: []string{"get", "list", "watch"}, APIGroups: []string{""}, Resources: []string{"pods"}, }, }, }, }, ClusterRoleBindings: []rbac.ClusterRoleBinding{ { TypeMeta: metav1.TypeMeta{ Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1", }, ObjectMeta: metav1.ObjectMeta{ Name: "jetstack-secure-agent-pods-reader", }, Subjects: []rbac.Subject{ { Kind: "ServiceAccount", Name: "agent", Namespace: "jetstack-secure", }, }, RoleRef: rbac.RoleRef{ Kind: "ClusterRole", Name: "jetstack-secure-agent-pods-reader", APIGroup: "rbac.authorization.k8s.io", }, }, }, }, }, { description: "Generate RBAC config for simple pod dg use case where only two namespace are included", dataGatherers: []agent.DataGatherer{ { Name: "k8s/pods", Kind: "k8s-dynamic", Config: &k8sdynamic.ConfigDynamic{ GroupVersionResource: schema.GroupVersionResource{ Version: "v1", Resource: "pods", }, IncludeNamespaces: []string{"example", "foobar"}, }, }, }, expectedAgentRBACManifests: AgentRBACManifests{ ClusterRoles: []rbac.ClusterRole{ { TypeMeta: metav1.TypeMeta{ Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1", }, ObjectMeta: metav1.ObjectMeta{ Name: "jetstack-secure-agent-pods-reader", }, Rules: []rbac.PolicyRule{ { Verbs: []string{"get", "list", "watch"}, APIGroups: []string{""}, Resources: []string{"pods"}, }, }, }, }, RoleBindings: []rbac.RoleBinding{ { TypeMeta: metav1.TypeMeta{ Kind: "RoleBinding", APIVersion: "rbac.authorization.k8s.io/v1", }, ObjectMeta: metav1.ObjectMeta{ Name: "jetstack-secure-agent-pods-reader", Namespace: "example", }, Subjects: []rbac.Subject{ { Kind: "ServiceAccount", Name: "agent", Namespace: "jetstack-secure", }, }, RoleRef: rbac.RoleRef{ Kind: "ClusterRole", Name: "jetstack-secure-agent-pods-reader", APIGroup: "rbac.authorization.k8s.io", }, }, { TypeMeta: metav1.TypeMeta{ Kind: "RoleBinding", APIVersion: "rbac.authorization.k8s.io/v1", }, ObjectMeta: metav1.ObjectMeta{ Name: "jetstack-secure-agent-pods-reader", Namespace: "foobar", }, Subjects: []rbac.Subject{ { Kind: "ServiceAccount", Name: "agent", Namespace: "jetstack-secure", }, }, RoleRef: rbac.RoleRef{ Kind: "ClusterRole", Name: "jetstack-secure-agent-pods-reader", APIGroup: "rbac.authorization.k8s.io", }, }, }, }, }, } for _, input := range testCases { got := GenerateAgentRBACManifests(input.dataGatherers) require.Equal(t, input.expectedAgentRBACManifests, got) } } ================================================ FILE: pkg/testutil/envtest.go ================================================ package testutil import ( "context" "crypto/tls" "crypto/x509" "io" "net/http" "net/http/httptest" "os" "strings" "sync" "testing" "github.com/jetstack/venafi-connection-lib/api/v1alpha1" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ctrlruntime "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" "github.com/jetstack/preflight/internal/cyberark/dataupload" "github.com/jetstack/preflight/internal/cyberark/identity" "github.com/jetstack/preflight/internal/cyberark/servicediscovery" "github.com/jetstack/preflight/pkg/client" ) // To see the API server logs, set: // // export KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT=true func WithEnvtest(t testing.TB) (_ *envtest.Environment, _ *rest.Config, kclient ctrlruntime.WithWatch) { t.Helper() // If KUBEBUILDER_ASSETS isn't set, show a warning to the user. if os.Getenv("KUBEBUILDER_ASSETS") == "" { t.Fatalf("KUBEBUILDER_ASSETS isn't set. You can run this test using `make test`.\n" + "But if you prefer not to use `make`, run these two commands first:\n" + " make _bin/tools/{kube-apiserver,etcd}\n" + " export KUBEBUILDER_ASSETS=$PWD/_bin/tools") } envtest := &envtest.Environment{ ErrorIfCRDPathMissing: true, CRDDirectoryPaths: []string{"../../deploy/charts/venafi-kubernetes-agent/crd_bases/jetstack.io_venaficonnections.yaml"}, } restconf, err := envtest.Start() t.Cleanup(func() { t.Log("Waiting for envtest to exit") e := envtest.Stop() require.NoError(t, e) }) require.NoError(t, err) sch := runtime.NewScheme() _ = v1alpha1.AddToScheme(sch) _ = corev1.AddToScheme(sch) _ = rbacv1.AddToScheme(sch) kclient, err = ctrlruntime.NewWithWatch(restconf, ctrlruntime.Options{Scheme: sch}) require.NoError(t, err) return envtest, restconf, kclient } // Copied from https://github.com/kubernetes/client-go/issues/711#issuecomment-1666075787. func WithKubeconfig(t testing.TB, restCfg *rest.Config) string { t.Helper() clusters := make(map[string]*clientcmdapi.Cluster) clusters["default-cluster"] = &clientcmdapi.Cluster{ Server: restCfg.Host, CertificateAuthorityData: restCfg.CAData, } contexts := make(map[string]*clientcmdapi.Context) contexts["default-context"] = &clientcmdapi.Context{ Cluster: "default-cluster", AuthInfo: "default-user", } authinfos := make(map[string]*clientcmdapi.AuthInfo) authinfos["default-user"] = &clientcmdapi.AuthInfo{ ClientCertificateData: restCfg.CertData, ClientKeyData: restCfg.KeyData, } clientConfig := clientcmdapi.Config{ Kind: "Config", APIVersion: "v1", Clusters: clusters, Contexts: contexts, CurrentContext: "default-context", AuthInfos: authinfos, } d := t.TempDir() kubeconfig, _ := os.CreateTemp(d, "kubeconfig") defer kubeconfig.Close() err := clientcmd.WriteToFile(clientConfig, kubeconfig.Name()) require.NoError(t, err) return kubeconfig.Name() } // Tests calling to VenConnClient.PostDataReadingsWithOptions must call this // function to start the VenafiConnection watcher. If you don't call this, the // test will stall. func VenConnStartWatching(ctx context.Context, t *testing.T, cl client.Client) { t.Helper() require.IsType(t, &client.VenConnClient{}, cl) // This `cancel` is important because the below func `Start(ctx)` needs to // be stopped before the apiserver is stopped. Otherwise, the test fail with // the message "timeout waiting for process kube-apiserver to stop". See: // https://github.com/jetstack/venafi-connection-lib/pull/158#issuecomment-1949002322 // https://github.com/kubernetes-sigs/controller-runtime/issues/1571#issuecomment-945535598 ctx, cancel := context.WithCancel(ctx) go func() { err := cl.(*client.VenConnClient).Start(ctx) require.NoError(t, err) }() t.Cleanup(cancel) } // Works with VenafiCloudClient and VenConnClient. Allows you to trust a given // CA. func TrustCA(t *testing.T, cl client.Client, cert *x509.Certificate) { t.Helper() var httpClient *http.Client switch c := cl.(type) { case *client.VenafiCloudClient: httpClient = c.Client case *client.VenConnClient: httpClient = c.Client default: t.Fatalf("unsupported client type: %T", cl) } pool := x509.NewCertPool() pool.AddCert(cert) if httpClient.Transport == nil { httpClient.Transport = http.DefaultTransport } if httpClient.Transport.(*http.Transport).TLSClientConfig == nil { httpClient.Transport.(*http.Transport).TLSClientConfig = &tls.Config{} } httpClient.Transport.(*http.Transport).TLSClientConfig.RootCAs = pool } // Parses the YAML manifest. Useful for inlining YAML manifests in Go test // files, to be used in conjunction with `undent`. func Parse(yamlmanifest string) []ctrlruntime.Object { dec := yaml.NewYAMLOrJSONDecoder(strings.NewReader(yamlmanifest), 4096) var objs []ctrlruntime.Object for { obj := &unstructured.Unstructured{} err := dec.Decode(obj) if err == io.EOF { break } if err != nil { panic(err) } objs = append(objs, obj) } return objs } type AssertRequest func(t testing.TB, r *http.Request) func FakeVenafiCloud(t *testing.T) (_ *httptest.Server, _ *x509.Certificate, setAssert func(AssertRequest)) { t.Helper() assertFn := func(_ testing.TB, _ *http.Request) {} assertFnMu := sync.Mutex{} setAssert = func(setAssert AssertRequest) { assertFnMu.Lock() defer assertFnMu.Unlock() assertFn = setAssert } server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { t.Logf("fake api.venafi.cloud received request: %s %s", r.Method, r.URL.Path) assertFnMu.Lock() defer assertFnMu.Unlock() assertFn(t, r) if r.URL.Path == "/v1/oauth2/v2.0/756db001-280e-11ee-84fb-991f3177e2d0/token" { _, _ = w.Write([]byte(`{"access_token":"VALID_ACCESS_TOKEN","expires_in":900,"token_type":"bearer"}`)) return } else if r.URL.Path == "/v1/oauth/token/serviceaccount" { _, _ = w.Write([]byte(`{"access_token":"VALID_ACCESS_TOKEN","expires_in":900,"token_type":"bearer"}`)) return } accessToken := strings.TrimPrefix(r.Header.Get("Authorization"), "Bearer ") apiKey := r.Header.Get("Tppl-Api-Key") if accessToken != "VALID_ACCESS_TOKEN" && apiKey != "VALID_API_KEY" { w.WriteHeader(http.StatusUnauthorized) _, _ = w.Write([]byte(`{"error":"expected header 'Authorization: Bearer VALID_ACCESS_TOKEN' or 'tppl-api-key: VALID_API_KEY', but got Authorization=` + r.Header.Get("Authorization") + ` and tppl-api-key=` + r.Header.Get("Tppl-Api-Key"))) return } switch r.URL.Path { case "/v1/tlspk/upload/clusterdata/no": if r.URL.Query().Get("name") != "test cluster name" { w.WriteHeader(http.StatusBadRequest) _, _ = w.Write([]byte(`{"error":"unexpected name query param in the test server: ` + r.URL.Query().Get("name") + `, expected: 'test cluster name'"}`)) return } _, _ = w.Write([]byte(`{"status":"ok","organization":"756db001-280e-11ee-84fb-991f3177e2d0"}`)) case "/v1/useraccounts": _, _ = w.Write([]byte(`{"user": {"username": "user","id": "76a126f0-280e-11ee-84fb-991f3177e2d0"}}`)) default: w.WriteHeader(http.StatusInternalServerError) _, _ = w.Write([]byte(`{"error":"unexpected path in the test server","path":"` + r.URL.Path + `"}`)) } })) t.Cleanup(server.Close) cert, err := x509.ParseCertificate(server.TLS.Certificates[0].Certificate[0]) require.NoError(t, err) return server, cert, setAssert } func FakeTPP(t testing.TB) (*httptest.Server, *x509.Certificate) { t.Helper() server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { t.Logf("fake tpp.example.com received request: %s %s", r.Method, r.URL.Path) accessToken := strings.TrimPrefix(r.Header.Get("Authorization"), "Bearer ") switch r.URL.Path { case "/vedsdk/Identity/Self": if accessToken != "VALID_ACCESS_TOKEN" { w.WriteHeader(http.StatusUnauthorized) return } _, _ = w.Write([]byte(`{"Identities":[{"Name":"TEST"}]}`)) case "/vedsdk/certificates/checkpolicy": _, _ = w.Write([]byte(`{"Policy":{"Subject":{"Organization":{"Value": "test-org"}}}}`)) default: w.WriteHeader(http.StatusInternalServerError) _, _ = w.Write([]byte(`{"error":"unexpected path in the test server","path":"` + r.URL.Path + `"}`)) } })) t.Cleanup(server.Close) cert, err := x509.ParseCertificate(server.TLS.Certificates[0].Certificate[0]) require.NoError(t, err) return server, cert } // FakeCyberArk returns an HTTP client that will route requests to mock CyberArk // Service Discovery, Identity and Discovery and Context APIs. This is useful // for testing code that uses all those APIs, such as // `cyberark.NewDatauploadClient`. // // The environment variable `ARK_DISCOVERY_API` is set to the URL of the mock // Service Discovery API, for the supplied `testing.TB` so that the client under // test will use the mock Service Discovery API. // // The returned HTTP client has a transport which logs requests and responses // depending on log level of the logger supplied in the context. func FakeCyberArk(t testing.TB) *http.Client { t.Helper() identityAPI, _ := identity.MockIdentityServer(t) discoveryContextAPI, _ := dataupload.MockDataUploadServer(t) httpClient := servicediscovery.MockDiscoveryServer(t, servicediscovery.Services{ Identity: servicediscovery.ServiceEndpoint{ API: identityAPI, }, DiscoveryContext: servicediscovery.ServiceEndpoint{ API: discoveryContextAPI, }, }) return httpClient } // Generated using: // // helm template ./deploy/charts/venafi-kubernetes-agent -n venafi --set crds.venafiConnection.include=true --show-only templates/venafi-connection-rbac.yaml | grep -ivE '(helm|\/version)' // // TODO(mael): Once we get the Makefile modules setup, we should generate this // based on the Helm chart rather than having it hardcoded here. Ticket: // https://venafi.atlassian.net/browse/VC-36331 const VenConnRBAC = ` apiVersion: v1 kind: Namespace metadata: name: venafi --- # Source: venafi-kubernetes-agent/templates/venafi-connection-rbac.yaml # The 'venafi-connection' service account is used by multiple # controllers. When configuring which resources a VenafiConnection # can access, the RBAC rules you create manually must point to this SA. apiVersion: v1 kind: ServiceAccount metadata: name: venafi-connection namespace: "venafi" labels: app.kubernetes.io/name: "venafi-connection" app.kubernetes.io/instance: release-name --- # Source: venafi-kubernetes-agent/templates/venafi-connection-rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: venafi-connection-role labels: app.kubernetes.io/name: "venafi-connection" app.kubernetes.io/instance: release-name rules: - apiGroups: [ "" ] resources: [ "namespaces" ] verbs: [ "get", "list", "watch" ] - apiGroups: [ "jetstack.io" ] resources: [ "venaficonnections" ] verbs: [ "get", "list", "watch" ] - apiGroups: [ "jetstack.io" ] resources: [ "venaficonnections/status" ] verbs: [ "get", "patch" ] --- # Source: venafi-kubernetes-agent/templates/venafi-connection-rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: venafi-connection-rolebinding labels: app.kubernetes.io/name: "venafi-connection" app.kubernetes.io/instance: release-name roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: venafi-connection-role subjects: - kind: ServiceAccount name: venafi-connection namespace: "venafi" ` ================================================ FILE: pkg/testutil/undent.go ================================================ package testutil import ( "fmt" ) // Undent removes leading indentation/white-space from given string and returns // it as a string. Useful for inlining YAML manifests in Go code. Inline YAML // manifests in the Go test files makes it easier to read the test case as // opposed to reading verbose-y Go structs. // // This was copied from https://github.com/jimeh/Undent/blob/main/Undent.go, all // credit goes to the author, Jim Myhrberg. // // For code readability purposes, it is possible to start the literal string // with "\n", in which case, the first line is ignored. For example, in the // following example, name and labels have the same indentation level but aren't // aligned due to the leading '`': // // Undent( // ` name: foo // labels: // foo: bar`) // // Instead, you can write a well-aligned text like this: // // Undent(` // name: foo // labels: // foo: bar`) // // For code readability purposes, it is also possible to not have the correct // number of indentations in the last line. For example: // // Undent(` // foo // bar // `) // // For code readability purposes, you can also omit the indentations for empty // lines. For example: // // Undent(` // foo <---- 4 spaces // <---- no indentation here // bar <---- 4 spaces // `) func Undent(s string) string { if len(s) == 0 { return "" } // indentsPerLine is the minimal indent level that we have found up to now. // For example, "\t\t" corresponds to an indentation of 2, and " " an // indentation of 3. indentsPerLine := 99999999999 indentedLinesCnt := 0 // lineOffsets tells you where the beginning of each line is in terms of // offset. Example: // "\tfoo\n\tbar\n" -> [0, 5] // 0 5 var lineOffsets []int // For code readability purposes, users can leave the first line empty. if s[0] != '\n' { lineOffsets = append(lineOffsets, 0) } curLineIndent := 0 // Number of tabs or spaces in the current line. for pos := range s { if s[pos] == '\n' { if pos+1 < len(s) { lineOffsets = append(lineOffsets, pos+1) } curLineIndent = 0 continue } // Skip to the next line if we are already beyond the minimal indent // level that we have found so far. The rest of this line will be kept // as-is. if curLineIndent >= indentsPerLine { continue } // The minimal indent level that we have found so far in previous lines // might not be the smallest indent level. Once we hit the first // non-indent char, let's check whether it is the new minimal indent // level. if s[pos] != ' ' && s[pos] != '\t' { if curLineIndent != 0 { indentedLinesCnt++ } indentsPerLine = curLineIndent continue } curLineIndent++ } // Extract each line without indentation. out := make([]byte, 0, len(s)-(indentsPerLine*indentedLinesCnt)) for line := range lineOffsets { first := lineOffsets[line] // Index of the last character of the line. It is often the '\n' // character, except for the last line. var last int if line == len(lineOffsets)-1 { last = len(s) - 1 } else { last = lineOffsets[line+1] - 1 } var lineStr string switch { // Case 0: if the first line is empty, let's skip it. case line == 0 && first == last: lineStr = "" // Case 1: we want the user to be able to omit some tabs or spaces in // the last line for readability purposes. case line == len(lineOffsets)-1 && s[last] != '\n' && isIndent(s[first:last+1]): lineStr = "" // Case 2: we want the user to be able to omit the indentations for // empty lines for readability purposes. case first == last: lineStr = "\n" // Case 3: error when a line doesn't contain the correct indentation // level. case first+indentsPerLine > last: panic(fmt.Sprintf("line %d has an incorrect indent level: %q", line, s[first:last])) // Case 4: at this point, the indent level is correct, so let's remove // the indentation and keep the rest. case first+indentsPerLine <= last: lineStr = s[first+indentsPerLine : last+1] default: panic(fmt.Sprintf("unexpected case: first: %d, last: %d, indentsPerLine: %d, line: %q", first, last, indentsPerLine, s[first:last])) } out = append(out, lineStr...) } return string(out) } // isIndent returns true if the given string is only made of spaces or a // tabs. func isIndent(s string) bool { for _, r := range s { if r != ' ' && r != '\t' { return false } } return true } ================================================ FILE: pkg/testutil/undent_test.go ================================================ package testutil import ( "testing" "github.com/stretchr/testify/assert" ) // This is a test for the testing func "Undent". I wasn't confident with // Undent's behavior, so I wrote this test to verify it. func Test_Undent(t *testing.T) { t.Run("empty string", runTest_Undent(``, ``)) t.Run("if last line has the same indent as other lines and, it is ignored", runTest_Undent(` foo bar `, "foo\nbar\n")) t.Run("you can un-indent the last line to make the Go code more readable", runTest_Undent(` foo bar `, "foo\nbar\n")) t.Run("last line may not be an empty line", runTest_Undent(` foo bar`, "foo\nbar")) t.Run("1 empty line is preserved", runTest_Undent("\t\tfoo\n\t\t\n\t\tbar\n", "foo\n\nbar\n")) t.Run("2 empty lines are preserved", runTest_Undent("\t\tfoo\n\t\t\n\t\t\n\t\tbar\n", "foo\n\n\nbar\n")) t.Run("you can also omit the tabs or spaces for empty lines", runTest_Undent(` foo bar `, "foo\n\nbar\n")) t.Run("bug fix: last char is not omitted", runTest_Undent("\t\t{\n\t\t \"kind\": \"Secret\"\n\t\t}", "{\n \"kind\": \"Secret\"\n}")) } func runTest_Undent(given, expected string) func(t *testing.T) { return func(t *testing.T) { t.Helper() got := Undent(given) assert.Equal(t, expected, got) } } ================================================ FILE: pkg/version/version.go ================================================ package version import ( "fmt" "net/http" ) // This variables are injected at build time. // PreflightVersion hosts the version of the app. var PreflightVersion = "development" // Commit is the commit hash of the build var Commit string // BuildDate is the date it was built var BuildDate string // GoVersion is the go version that was used to compile this var GoVersion string // UserAgent return a standard user agent for use with all HTTP requests. This is implemented in one place so // it's uniform across the Kubernetes Agent. // // TODO(wallrj): The prefix "Mozilla/5.0" is currently required by the CyberArk inventory API. Remove the prefix when CyberArk relax the API security settings. func UserAgent() string { return fmt.Sprintf("Mozilla/5.0 venafi-kubernetes-agent/%s", PreflightVersion) } // SetUserAgent augments an http.Request with a standard user agent. func SetUserAgent(req *http.Request) { req.Header.Set("User-Agent", UserAgent()) }