Showing preview only (1,486K chars total). Download the full file or copy to clipboard to get everything.
Repository: jetstack/preflight
Branch: master
Commit: 9db49f9f73b5
Files: 256
Total size: 1.4 MB
Directory structure:
gitextract_2mttmn_y/
├── .envrc.template
├── .github/
│ ├── ISSUE_TEMPLATE/
│ │ └── bug_report.md
│ ├── actions/
│ │ └── repo_access/
│ │ └── action.yaml
│ ├── chainguard/
│ │ └── make-self-upgrade.sts.yaml
│ ├── renovate.json5
│ └── workflows/
│ ├── govulncheck.yaml
│ ├── make-self-upgrade.yaml
│ ├── release.yml
│ └── tests.yaml
├── .gitignore
├── .golangci.yaml
├── CONTRIBUTING.md
├── LICENSE
├── LICENSES
├── Makefile
├── OWNERS
├── OWNERS_ALIASES
├── README.md
├── RELEASE.md
├── agent.yaml
├── api/
│ ├── agent.go
│ ├── common.go
│ ├── datareading.go
│ └── datareading_test.go
├── cmd/
│ ├── agent.go
│ ├── agent_test.go
│ ├── ark/
│ │ └── main.go
│ ├── echo.go
│ ├── helpers.go
│ ├── root.go
│ └── version.go
├── deploy/
│ └── charts/
│ ├── disco-agent/
│ │ ├── .helmignore
│ │ ├── Chart.yaml
│ │ ├── README.md
│ │ ├── templates/
│ │ │ ├── NOTES.txt
│ │ │ ├── _helpers.tpl
│ │ │ ├── configmap.yaml
│ │ │ ├── deployment.yaml
│ │ │ ├── poddisruptionbudget.yaml
│ │ │ ├── podmonitor.yaml
│ │ │ ├── rbac.yaml
│ │ │ └── serviceaccount.yaml
│ │ ├── tests/
│ │ │ ├── README.md
│ │ │ ├── __snapshot__/
│ │ │ │ └── configmap_test.yaml.snap
│ │ │ └── configmap_test.yaml
│ │ ├── values.linter.exceptions
│ │ ├── values.schema.json
│ │ └── values.yaml
│ ├── discovery-agent/
│ │ ├── .helmignore
│ │ ├── Chart.yaml
│ │ ├── README.md
│ │ ├── templates/
│ │ │ ├── NOTES.txt
│ │ │ ├── _helpers.tpl
│ │ │ ├── configmap.yaml
│ │ │ ├── deployment.yaml
│ │ │ ├── poddisruptionbudget.yaml
│ │ │ ├── podmonitor.yaml
│ │ │ ├── rbac.yaml
│ │ │ └── serviceaccount.yaml
│ │ ├── tests/
│ │ │ ├── configmap_test.yaml
│ │ │ ├── deployment_test.yaml
│ │ │ ├── poddisruptionbudget_test.yaml
│ │ │ ├── podmonitor_test.yaml
│ │ │ ├── rbac_test.yaml
│ │ │ └── serviceaccount_test.yaml
│ │ ├── values.linter.exceptions
│ │ ├── values.schema.json
│ │ └── values.yaml
│ └── venafi-kubernetes-agent/
│ ├── .helmignore
│ ├── Chart.yaml
│ ├── README.md
│ ├── crd_bases/
│ │ ├── crd.footer.yaml
│ │ ├── crd.header-without-validations.yaml
│ │ ├── crd.header.yaml
│ │ └── jetstack.io_venaficonnections.yaml
│ ├── templates/
│ │ ├── NOTES.txt
│ │ ├── _helpers.tpl
│ │ ├── _venafi-connection.tpl
│ │ ├── configmap.yaml
│ │ ├── deployment.yaml
│ │ ├── poddisruptionbudget.yaml
│ │ ├── podmonitor.yaml
│ │ ├── rbac.yaml
│ │ ├── serviceaccount.yaml
│ │ ├── venafi-connection-crd.without-validations.yaml
│ │ ├── venafi-connection-crd.yaml
│ │ ├── venafi-connection-rbac.yaml
│ │ └── venafi-rbac.yaml
│ ├── tests/
│ │ ├── __snapshot__/
│ │ │ └── configmap_test.yaml.snap
│ │ ├── configmap_test.yaml
│ │ ├── deployment_test.yaml
│ │ └── values/
│ │ └── custom-volumes.yaml
│ ├── values.linter.exceptions
│ ├── values.schema.json
│ └── values.yaml
├── docs/
│ └── datagatherers/
│ ├── k8s-discovery.md
│ ├── k8s-dynamic.md
│ └── local.md
├── examples/
│ ├── cert-manager-agent.yaml
│ ├── echo/
│ │ ├── example.json
│ │ └── example2.json
│ ├── localfile/
│ │ ├── config.yaml
│ │ └── input.json
│ ├── machinehub/
│ │ ├── config.yaml
│ │ └── input.json
│ ├── machinehub.yaml
│ ├── one-shot-oidc.yaml
│ └── one-shot-secret.yaml
├── go.mod
├── go.sum
├── hack/
│ ├── ark/
│ │ ├── cluster-external-secret.yaml
│ │ ├── cluster-secret-store.yaml
│ │ ├── conjur-connect-configmap.yaml
│ │ ├── external-secret.yaml
│ │ ├── secret-store.yaml
│ │ └── test-e2e.sh
│ ├── e2e/
│ │ ├── application-team-1.yaml
│ │ ├── test.sh
│ │ ├── values.venafi-kubernetes-agent.yaml
│ │ └── venafi-components.yaml
│ └── ngts/
│ ├── custom_ca.yaml
│ └── test-e2e.sh
├── internal/
│ ├── cyberark/
│ │ ├── api/
│ │ │ ├── telemetry.go
│ │ │ └── telemetry_test.go
│ │ ├── client.go
│ │ ├── client_test.go
│ │ ├── dataupload/
│ │ │ ├── dataupload.go
│ │ │ ├── dataupload_test.go
│ │ │ └── mock.go
│ │ ├── identity/
│ │ │ ├── advance_authentication_test.go
│ │ │ ├── authenticated_http_client.go
│ │ │ ├── cmd/
│ │ │ │ └── testidentity/
│ │ │ │ └── main.go
│ │ │ ├── identity.go
│ │ │ ├── identity_test.go
│ │ │ ├── mock.go
│ │ │ ├── start_authentication_test.go
│ │ │ └── testdata/
│ │ │ ├── advance_authentication_failure.json
│ │ │ ├── advance_authentication_success.json
│ │ │ ├── start_authentication_bad_user_session_id.json
│ │ │ ├── start_authentication_failure.json
│ │ │ ├── start_authentication_success.json
│ │ │ ├── start_authentication_success_multiple_challenges.json
│ │ │ ├── start_authentication_success_multiple_mechanisms.json
│ │ │ └── start_authentication_success_no_up_mechanism.json
│ │ ├── servicediscovery/
│ │ │ ├── discovery.go
│ │ │ ├── discovery_test.go
│ │ │ ├── mock.go
│ │ │ └── testdata/
│ │ │ ├── README.md
│ │ │ └── discovery_success.json.template
│ │ └── testing/
│ │ └── testing.go
│ └── envelope/
│ ├── doc.go
│ ├── keyfetch/
│ │ ├── client.go
│ │ ├── client_test.go
│ │ ├── doc.go
│ │ ├── fake.go
│ │ └── fake_test.go
│ ├── rsa/
│ │ ├── doc.go
│ │ ├── encryptor.go
│ │ ├── encryptor_test.go
│ │ ├── keys.go
│ │ └── keys_test.go
│ └── types.go
├── klone.yaml
├── main.go
├── make/
│ ├── 00_mod.mk
│ ├── 02_mod.mk
│ ├── _shared/
│ │ ├── generate-verify/
│ │ │ ├── 00_mod.mk
│ │ │ ├── 02_mod.mk
│ │ │ └── util/
│ │ │ └── verify.sh
│ │ ├── go/
│ │ │ ├── .golangci.override.yaml
│ │ │ ├── 01_mod.mk
│ │ │ ├── README.md
│ │ │ └── base/
│ │ │ └── .github/
│ │ │ └── workflows/
│ │ │ └── govulncheck.yaml
│ │ ├── helm/
│ │ │ ├── 01_mod.mk
│ │ │ ├── crd.template.footer.yaml
│ │ │ ├── crd.template.header.yaml
│ │ │ ├── crds.mk
│ │ │ ├── crds_dir.README.md
│ │ │ ├── deploy.mk
│ │ │ └── helm.mk
│ │ ├── help/
│ │ │ ├── 01_mod.mk
│ │ │ └── help.sh
│ │ ├── kind/
│ │ │ ├── 00_kind_image_versions.mk
│ │ │ ├── 00_mod.mk
│ │ │ ├── 01_mod.mk
│ │ │ ├── kind-image-preload.mk
│ │ │ └── kind.mk
│ │ ├── klone/
│ │ │ └── 01_mod.mk
│ │ ├── licenses/
│ │ │ ├── 00_mod.mk
│ │ │ ├── 01_mod.mk
│ │ │ └── licenses.tmpl
│ │ ├── oci-build/
│ │ │ ├── 00_mod.mk
│ │ │ └── 01_mod.mk
│ │ ├── oci-publish/
│ │ │ ├── 00_mod.mk
│ │ │ ├── 01_mod.mk
│ │ │ └── image-exists.sh
│ │ ├── repository-base/
│ │ │ ├── 01_mod.mk
│ │ │ ├── base/
│ │ │ │ ├── .github/
│ │ │ │ │ ├── chainguard/
│ │ │ │ │ │ └── make-self-upgrade.sts.yaml
│ │ │ │ │ └── workflows/
│ │ │ │ │ └── make-self-upgrade.yaml
│ │ │ │ ├── Makefile
│ │ │ │ └── OWNERS_ALIASES
│ │ │ └── renovate-bootstrap-config.json5
│ │ └── tools/
│ │ ├── 00_mod.mk
│ │ └── util/
│ │ ├── checkhash.sh
│ │ ├── hash.sh
│ │ └── lock.sh
│ ├── ark/
│ │ ├── 00_mod.mk
│ │ └── 02_mod.mk
│ ├── connection_crd/
│ │ └── main.go
│ ├── extra_tools.mk
│ ├── ngts/
│ │ ├── 00_mod.mk
│ │ └── 02_mod.mk
│ └── test-unit.mk
└── pkg/
├── agent/
│ ├── config.go
│ ├── config_test.go
│ ├── dummy_data_gatherer.go
│ ├── metrics.go
│ └── run.go
├── client/
│ ├── client.go
│ ├── client_api_token.go
│ ├── client_cyberark.go
│ ├── client_cyberark_convertdatareadings_test.go
│ ├── client_cyberark_test.go
│ ├── client_file.go
│ ├── client_file_test.go
│ ├── client_ngts.go
│ ├── client_ngts_test.go
│ ├── client_oauth.go
│ ├── client_venafi_cloud.go
│ ├── client_venconn.go
│ ├── client_venconn_test.go
│ └── util.go
├── datagatherer/
│ ├── datagatherer.go
│ ├── k8sdiscovery/
│ │ └── discovery.go
│ ├── k8sdynamic/
│ │ ├── cache.go
│ │ ├── cache_test.go
│ │ ├── dynamic.go
│ │ ├── dynamic_test.go
│ │ ├── fieldfilter.go
│ │ └── fieldfilter_test.go
│ ├── local/
│ │ └── local.go
│ └── oidc/
│ ├── oidc.go
│ └── oidc_test.go
├── echo/
│ ├── echo.go
│ └── echo_test.go
├── kubeconfig/
│ ├── client.go
│ ├── client_test.go
│ └── kubeconfig.go
├── logs/
│ ├── logs.go
│ └── logs_test.go
├── permissions/
│ ├── generate.go
│ └── generate_test.go
├── testutil/
│ ├── envtest.go
│ ├── undent.go
│ └── undent_test.go
└── version/
└── version.go
================================================
FILE CONTENTS
================================================
================================================
FILE: .envrc.template
================================================
# Example .envrc file for use with direnv.
# Copy this file to .envrc and edit the values as required.
# Do not check in your .envrc file to source control as it may contain secrets.
# The following variables are required by the E2E test script: ./hack/e2e/test.sh.
export VEN_API_KEY= # your Venafi Cloud API key with full permissions
export VEN_API_KEY_PULL= # your Venafi Cloud API key with pull-only permissions
export VEN_ZONE= # the Venafi Cloud zone to use for certificate requests
export VEN_VCP_REGION= # the Venafi Cloud region to use (us or eu)
export VEN_API_HOST= # the Venafi Cloud API host (usually api.venafi.cloud or api.venafi.eu)
export OCI_BASE= # the base URL for the OCI registry where the Agent chart and image will be pushed
export CLOUDSDK_CORE_PROJECT= # the GCP project ID where a GKE cluster will be created.
export CLOUDSDK_COMPUTE_ZONE= # the GCP zone where a GKE cluster will be created. E.g. europe-west2-b
export CLUSTER_NAME= # the name of the GKE cluster which will be created. E.g. cluster-1
# The following variables are required for CyberArk / MachineHub integration tests.
export ARK_SUBDOMAIN= # your CyberArk tenant subdomain e.g. tlskp-test
export ARK_USERNAME= # your CyberArk username
export ARK_SECRET= # your CyberArk password
# OPTIONAL: the URL for the CyberArk Discovery API if not using the production environment
export ARK_DISCOVERY_API=https://platform-discovery.integration-cyberark.cloud/
================================================
FILE: .github/ISSUE_TEMPLATE/bug_report.md
================================================
---
name: Bug report
about: Issue for something that isn't working as expected
title: ''
labels: ''
assignees: ''
---
<Summary of the bug that you've encountered>
**What happened?**
What is the current bug behavior?
Give all the context you can, provide relevant logs and/or screenshots.
**What should had happened?**
Describe what you expected to happen.
**Possible fixes**
This section is optional and should include possible solutions to explore and discuss further.
================================================
FILE: .github/actions/repo_access/action.yaml
================================================
name: 'Setup repo access'
description: 'Setups authenticate to GitHub repos'
inputs:
DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB:
required: true
description: "DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB secret"
outputs: {}
runs:
using: "composite"
steps:
- name: Configure jetstack/venafi-connection-lib repo pull access
shell: bash
run: |
mkdir ~/.ssh
chmod 700 ~/.ssh
echo "${{ inputs.DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB }}" > ~/.ssh/venafi_connection_lib_id
chmod 600 ~/.ssh/venafi_connection_lib_id
cat <<EOT >> ~/.ssh/config
Host venafi-connection-lib.github.com
HostName github.com
IdentityFile ~/.ssh/venafi_connection_lib_id
IdentitiesOnly yes
EOT
cat <<EOT >> ~/.gitconfig
[url "git@venafi-connection-lib.github.com:jetstack/venafi-connection-lib"]
insteadOf = https://github.com/jetstack/venafi-connection-lib
EOT
echo "GOPRIVATE=github.com/jetstack/venafi-connection-lib" >> $GITHUB_ENV
================================================
FILE: .github/chainguard/make-self-upgrade.sts.yaml
================================================
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
# Edit https://github.com/cert-manager/makefile-modules/blob/main/modules/repository-base/base/.github/chainguard/make-self-upgrade.sts.yaml instead.
issuer: https://token.actions.githubusercontent.com
subject_pattern: ^repo:jetstack/jetstack-secure:ref:refs/heads/(main|master)$
permissions:
contents: write
pull_requests: write
workflows: write
================================================
FILE: .github/renovate.json5
================================================
{
$schema: 'https://docs.renovatebot.com/renovate-schema.json',
extends: [
'github>cert-manager/makefile-modules:renovate-config.json5',
],
}
================================================
FILE: .github/workflows/govulncheck.yaml
================================================
# This file is MANUALLY maintained, but was originally based on the makefile-modules govulncheck workflow. See the original:
# https://github.com/cert-manager/makefile-modules/blob/main/modules/go/base/.github/workflows/govulncheck.yaml
# This file is separated from the upstream file so we can add additional auth for pulling
# private dependencies. Govulncheck doesn't seem to be able to support skipping private
# dependencies.
# Run govulncheck at midnight every night on the main branch,
# to alert us to recent vulnerabilities which affect the Go code in this
# project.
name: govulncheck
on:
workflow_dispatch: {}
schedule:
- cron: '0 0 * * *'
permissions:
contents: read
jobs:
govulncheck:
runs-on: ubuntu-latest
if: github.repository == 'jetstack/jetstack-secure'
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
# Adding `fetch-depth: 0` makes sure tags are also fetched. We need
# the tags so `git describe` returns a valid version.
# see https://github.com/actions/checkout/issues/701 for extra info about this option
with: { fetch-depth: 0 }
# NOTE: This step is the change from the upstream workflow.
# We need credentials to pull the private dependency.
- uses: ./.github/actions/repo_access
with:
DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB: ${{ secrets.DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB }}
- id: go-version
run: |
make print-go-version >> "$GITHUB_OUTPUT"
- uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
with:
go-version: ${{ steps.go-version.outputs.result }}
- run: make verify-govulncheck
================================================
FILE: .github/workflows/make-self-upgrade.yaml
================================================
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
# Edit https://github.com/cert-manager/makefile-modules/blob/main/modules/repository-base/base/.github/workflows/make-self-upgrade.yaml instead.
name: make-self-upgrade
concurrency: make-self-upgrade
on:
workflow_dispatch: {}
schedule:
- cron: '0 0 * * *'
permissions:
contents: read
jobs:
self_upgrade:
runs-on: ubuntu-latest
if: github.repository == 'jetstack/jetstack-secure'
permissions:
id-token: write
env:
SOURCE_BRANCH: "${{ github.ref_name }}"
SELF_UPGRADE_BRANCH: "self-upgrade-${{ github.ref_name }}"
steps:
- name: Fail if branch is not head of branch.
if: ${{ !startsWith(github.ref, 'refs/heads/') && env.SOURCE_BRANCH != '' && env.SELF_UPGRADE_BRANCH != '' }}
run: |
echo "This workflow should not be run on a non-branch-head."
exit 1
- name: Octo STS Token Exchange
uses: octo-sts/action@f603d3be9d8dd9871a265776e625a27b00effe05 # v1.1.1
id: octo-sts
with:
scope: 'jetstack/jetstack-secure'
identity: make-self-upgrade
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
# Adding `fetch-depth: 0` makes sure tags are also fetched. We need
# the tags so `git describe` returns a valid version.
# see https://github.com/actions/checkout/issues/701 for extra info about this option
with:
fetch-depth: 0
token: ${{ steps.octo-sts.outputs.token }}
- id: go-version
run: |
make print-go-version >> "$GITHUB_OUTPUT"
- uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0
with:
go-version: ${{ steps.go-version.outputs.result }}
- run: |
git checkout -B "$SELF_UPGRADE_BRANCH"
- run: |
make -j upgrade-klone
make -j generate
- id: is-up-to-date
shell: bash
run: |
git_status=$(git status -s)
is_up_to_date="true"
if [ -n "$git_status" ]; then
is_up_to_date="false"
echo "The following changes will be committed:"
echo "$git_status"
fi
echo "result=$is_up_to_date" >> "$GITHUB_OUTPUT"
- if: ${{ steps.is-up-to-date.outputs.result != 'true' }}
run: |
git config --global user.name "cert-manager-bot"
git config --global user.email "cert-manager-bot@users.noreply.github.com"
git add -A && git commit -m "BOT: run 'make upgrade-klone' and 'make generate'" --signoff
git push -f origin "$SELF_UPGRADE_BRANCH"
- if: ${{ steps.is-up-to-date.outputs.result != 'true' }}
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
with:
github-token: ${{ steps.octo-sts.outputs.token }}
script: |
const { repo, owner } = context.repo;
const pulls = await github.rest.pulls.list({
owner: owner,
repo: repo,
head: owner + ':' + process.env.SELF_UPGRADE_BRANCH,
base: process.env.SOURCE_BRANCH,
state: 'open',
});
if (pulls.data.length < 1) {
const result = await github.rest.pulls.create({
title: '[CI] Merge ' + process.env.SELF_UPGRADE_BRANCH + ' into ' + process.env.SOURCE_BRANCH,
owner: owner,
repo: repo,
head: process.env.SELF_UPGRADE_BRANCH,
base: process.env.SOURCE_BRANCH,
body: [
'This PR is auto-generated to bump the Makefile modules.',
].join('\n'),
});
await github.rest.issues.addLabels({
owner,
repo,
issue_number: result.data.number,
labels: ['ok-to-test', 'skip-review', 'release-note-none', 'kind/cleanup']
});
}
================================================
FILE: .github/workflows/release.yml
================================================
name: release
on:
push:
tags:
- "v*"
env:
VERSION: ${{ github.ref_name }}
jobs:
build_and_push:
runs-on: ubuntu-latest
permissions:
contents: read # needed for checkout
id-token: write # needed for keyless signing & google auth
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: ./.github/actions/repo_access
with:
DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB: ${{ secrets.DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB }}
- id: go-version
run: |
make print-go-version >> "$GITHUB_OUTPUT"
- uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: quay.io
username: ${{ secrets.QUAY_USERNAME }}
password: ${{ secrets.QUAY_PASSWORD }}
- uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
with:
go-version: ${{ steps.go-version.outputs.result }}
- id: release
run: make release ark-release ngts-release
outputs:
RELEASE_OCI_PREFLIGHT_IMAGE: ${{ steps.release.outputs.RELEASE_OCI_PREFLIGHT_IMAGE }}
RELEASE_OCI_PREFLIGHT_TAG: ${{ steps.release.outputs.RELEASE_OCI_PREFLIGHT_TAG }}
RELEASE_HELM_CHART_IMAGE: ${{ steps.release.outputs.RELEASE_HELM_CHART_IMAGE }}
RELEASE_HELM_CHART_VERSION: ${{ steps.release.outputs.RELEASE_HELM_CHART_VERSION }}
ARK_IMAGE: ${{ steps.release.outputs.ARK_IMAGE }}
ARK_IMAGE_TAG: ${{ steps.release.outputs.ARK_IMAGE_TAG }}
ARK_IMAGE_DIGEST: ${{ steps.release.outputs.ARK_IMAGE_DIGEST }}
ARK_CHART: ${{ steps.release.outputs.ARK_CHART }}
ARK_CHART_TAG: ${{ steps.release.outputs.ARK_CHART_TAG }}
ARK_CHART_DIGEST: ${{ steps.release.outputs.ARK_CHART_DIGEST }}
NGTS_IMAGE: ${{ steps.release.outputs.NGTS_IMAGE }}
NGTS_IMAGE_TAG: ${{ steps.release.outputs.NGTS_IMAGE_TAG }}
NGTS_IMAGE_DIGEST: ${{ steps.release.outputs.NGTS_IMAGE_DIGEST }}
NGTS_CHART: ${{ steps.release.outputs.NGTS_CHART }}
NGTS_CHART_TAG: ${{ steps.release.outputs.NGTS_CHART_TAG }}
NGTS_CHART_DIGEST: ${{ steps.release.outputs.NGTS_CHART_DIGEST }}
github_release:
runs-on: ubuntu-latest
needs: build_and_push
permissions:
contents: write # needed for creating a PR
pull-requests: write # needed for creating a PR
steps:
- run: |
touch .notes-file
echo "OCI_PREFLIGHT_IMAGE: ${{ needs.build_and_push.outputs.RELEASE_OCI_PREFLIGHT_IMAGE }}" >> .notes-file
echo "OCI_PREFLIGHT_TAG: ${{ needs.build_and_push.outputs.RELEASE_OCI_PREFLIGHT_TAG }}" >> .notes-file
echo "HELM_CHART_IMAGE: ${{ needs.build_and_push.outputs.RELEASE_HELM_CHART_IMAGE }}" >> .notes-file
echo "HELM_CHART_VERSION: ${{ needs.build_and_push.outputs.RELEASE_HELM_CHART_VERSION }}" >> .notes-file
echo "ARK_IMAGE: ${{ needs.build_and_push.outputs.ARK_IMAGE }}" >> .notes-file
echo "ARK_IMAGE_TAG: ${{ needs.build_and_push.outputs.ARK_IMAGE_TAG }}" >> .notes-file
echo "ARK_IMAGE_DIGEST: ${{ needs.build_and_push.outputs.ARK_IMAGE_DIGEST }}" >> .notes-file
echo "ARK_CHART: ${{ needs.build_and_push.outputs.ARK_CHART }}" >> .notes-file
echo "ARK_CHART_TAG: ${{ needs.build_and_push.outputs.ARK_CHART_TAG }}" >> .notes-file
echo "ARK_CHART_DIGEST: ${{ needs.build_and_push.outputs.ARK_CHART_DIGEST }}" >> .notes-file
echo "NGTS_IMAGE: ${{ needs.build_and_push.outputs.NGTS_IMAGE }}" >> .notes-file
echo "NGTS_IMAGE_TAG: ${{ needs.build_and_push.outputs.NGTS_IMAGE_TAG }}" >> .notes-file
echo "NGTS_IMAGE_DIGEST: ${{ needs.build_and_push.outputs.NGTS_IMAGE_DIGEST }}" >> .notes-file
echo "NGTS_CHART: ${{ needs.build_and_push.outputs.NGTS_CHART }}" >> .notes-file
echo "NGTS_CHART_TAG: ${{ needs.build_and_push.outputs.NGTS_CHART_TAG }}" >> .notes-file
echo "NGTS_CHART_DIGEST: ${{ needs.build_and_push.outputs.NGTS_CHART_DIGEST }}" >> .notes-file
- env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
gh release create "$VERSION" \
--repo="$GITHUB_REPOSITORY" \
--title="${VERSION}" \
--draft \
--verify-tag \
--notes-file .notes-file
================================================
FILE: .github/workflows/tests.yaml
================================================
name: tests
on:
push:
branches: [master]
pull_request: {}
jobs:
verify:
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
# Adding `fetch-depth: 0` makes sure tags are also fetched. We need
# the tags so `git describe` returns a valid version.
# see https://github.com/actions/checkout/issues/701 for extra info about this option
with: { fetch-depth: 0 }
- uses: ./.github/actions/repo_access
with:
DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB: ${{ secrets.DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB }}
- id: go-version
run: |
make print-go-version >> "$GITHUB_OUTPUT"
- uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
with:
go-version: ${{ steps.go-version.outputs.result }}
- uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: _bin/downloaded
key: downloaded-${{ runner.os }}-${{ hashFiles('klone.yaml') }}-verify
- run: make -j verify
test:
runs-on: ubuntu-latest
timeout-minutes: 15
permissions:
contents: read # needed for checkout
id-token: write # needed for google auth
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
# Adding `fetch-depth: 0` makes sure tags are also fetched. We need
# the tags so `git describe` returns a valid version.
# see https://github.com/actions/checkout/issues/701 for extra info about this option
with: { fetch-depth: 0 }
- uses: ./.github/actions/repo_access
with:
DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB: ${{ secrets.DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB }}
- id: go-version
run: |
make print-go-version >> "$GITHUB_OUTPUT"
- uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
with:
go-version: ${{ steps.go-version.outputs.result }}
- uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: _bin/downloaded
key: downloaded-${{ runner.os }}-${{ hashFiles('klone.yaml') }}-test-unit
# NB: helm unit tests will be run by "make verify", so we don't run it here
- run: make -j test-unit
env:
# These environment variables are required to run the CyberArk client integration tests
ARK_DISCOVERY_API: https://platform-discovery.integration-cyberark.cloud/
ARK_SUBDOMAIN: ${{ secrets.ARK_SUBDOMAIN }}
ARK_USERNAME: ${{ secrets.ARK_USERNAME }}
ARK_SECRET: ${{ secrets.ARK_SECRET }}
ark-test-e2e:
# TEMPORARY: require an explicit label to test disco-agent until the test environment fixes a recurring issue
# where the e2e fails with a 400 error relating to "conflicting tagging values"
# The test is flaky, not broken and re-running eventually makes it pass - but that delays progress on
# other unrelated work.
if: contains(github.event.pull_request.labels.*.name, 'test-ark')
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
# Adding `fetch-depth: 0` makes sure tags are also fetched. We need
# the tags so `git describe` returns a valid version.
# see https://github.com/actions/checkout/issues/701 for extra info about this option
with: { fetch-depth: 0 }
- uses: ./.github/actions/repo_access
with:
DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB: ${{ secrets.DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB }}
- id: go-version
run: |
make print-go-version >> "$GITHUB_OUTPUT"
- uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
with:
go-version: ${{ steps.go-version.outputs.result }}
- uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: _bin/downloaded
key: downloaded-${{ runner.os }}-${{ hashFiles('klone.yaml') }}-test-unit
- run: make -j ark-test-e2e
env:
OCI_BASE: ${{ secrets.ARK_OCI_BASE }}
# These environment variables are required to connect to CyberArk Disco APIs
ARK_DISCOVERY_API: https://platform-discovery.integration-cyberark.cloud/
ARK_SUBDOMAIN: ${{ secrets.ARK_SUBDOMAIN }}
ARK_USERNAME: ${{ secrets.ARK_USERNAME }}
ARK_SECRET: ${{ secrets.ARK_SECRET }}
ngts-test-e2e:
# TEMPORARY: require an explicit label to test NGTS until we have a stable test environment
if: contains(github.event.pull_request.labels.*.name, 'test-ngts')
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
# Adding `fetch-depth: 0` makes sure tags are also fetched. We need
# the tags so `git describe` returns a valid version.
# see https://github.com/actions/checkout/issues/701 for extra info about this option
with: { fetch-depth: 0 }
- uses: ./.github/actions/repo_access
with:
DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB: ${{ secrets.DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB }}
- id: go-version
run: |
make print-go-version >> "$GITHUB_OUTPUT"
- uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
with:
go-version: ${{ steps.go-version.outputs.result }}
- uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: _bin/downloaded
key: downloaded-${{ runner.os }}-${{ hashFiles('klone.yaml') }}-test-unit
- run: make -j ngts-test-e2e
env:
OCI_BASE: ${{ secrets.NGTS_OCI_BASE }}
NGTS_CLIENT_ID: ${{ secrets.NGTS_CLIENT_ID }}
NGTS_PRIVATE_KEY: ${{ secrets.NGTS_PRIVATE_KEY }}
NGTS_TSG_ID: ${{ secrets.NGTS_TSG_ID }}
test-e2e:
if: contains(github.event.pull_request.labels.*.name, 'test-e2e')
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
# Adding `fetch-depth: 0` makes sure tags are also fetched. We need
# the tags so `git describe` returns a valid version.
# see https://github.com/actions/checkout/issues/701 for extra info about this option
with: { fetch-depth: 0 }
- uses: ./.github/actions/repo_access
with:
DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB: ${{ secrets.DEPLOY_KEY_READ_VENAFI_CONNECTION_LIB }}
- name: Authenticate to Google Cloud
uses: google-github-actions/auth@7c6bc770dae815cd3e89ee6cdf493a5fab2cc093 # v3.0.0
with:
credentials_json: '${{ secrets.GCP_SA_KEY }}'
- name: Set up gcloud
uses: google-github-actions/setup-gcloud@aa5489c8933f4cc7a4f7d45035b3b1440c9c10db # v3.0.1
with:
install_components: "gke-gcloud-auth-plugin"
project_id: machineidentitysecurity-jsci-e
- name: Configure Docker for Google Artifact Registry
run: gcloud auth configure-docker europe-west1-docker.pkg.dev
- id: go-version
run: |
make print-go-version >> "$GITHUB_OUTPUT"
- uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
with:
go-version: ${{ steps.go-version.outputs.result }}
- name: Generate timestamp for cluster name
id: timestamp # Give the step an ID to reference its output
run: |
# Generate a timestamp in the format YYMMDD-HHMMSS.
# Extracting from PR name would require sanitization due to GKE cluster naming constraints
TIMESTAMP=$(date +'%y%m%d-%H%M%S')
CLUSTER_NAME="test-secretless-${TIMESTAMP}"
echo "Generated cluster name: ${CLUSTER_NAME}"
echo "cluster_name=${CLUSTER_NAME}" >> $GITHUB_OUTPUT
- run: |
make helm-plugins
make -j test-e2e-gke
# The VEN_API_KEY_PULL secret is set to my API key (Mladen) for glow.in.the.dark tenant.
env:
VEN_API_KEY: ${{ secrets.VEN_API_KEY_PULL }}
VEN_API_KEY_PULL: ${{ secrets.VEN_API_KEY_PULL }}
OCI_BASE: europe-west1-docker.pkg.dev/machineidentitysecurity-jsci-e/js-agent-ci-repo
VEN_API_HOST: api.venafi.cloud
VEN_ZONE: k8s-agent-CI\Default
VEN_VCP_REGION: us
CLOUDSDK_CORE_PROJECT: machineidentitysecurity-jsci-e
CLOUDSDK_COMPUTE_ZONE: europe-west1-b
CLUSTER_NAME: ${{ steps.timestamp.outputs.cluster_name }}
- name: Delete GKE Cluster
# 'always()' - Run this step regardless of success or failure.
# '!contains(...)' - AND only run if the list of PR labels DOES NOT contain 'keep-e2e-cluster'.
# NOTE: You will have to delete the test cluster manually when finished with debugging or incur costs.
if: always() && !contains(github.event.pull_request.labels.*.name, 'keep-e2e-cluster')
run: |
echo "Label 'keep-e2e-cluster' not found. Cleaning up GKE cluster ${{ steps.timestamp.outputs.cluster_name }}"
gcloud container clusters delete ${{ steps.timestamp.outputs.cluster_name }} \
--project=machineidentitysecurity-jsci-e \
--zone=europe-west1-b \
--quiet
================================================
FILE: .gitignore
================================================
/preflight
/preflight.yaml
/builds
/bundles
/output
credentials.json
.terraform
terraform.tfstate
terraform.tfstate.backup
bom.xml
predicate.json
*.pem
*.pub
*.tgz
_bin
.envrc
================================================
FILE: .golangci.yaml
================================================
version: "2"
linters:
default: none
exclusions:
generated: lax
presets: [comments, common-false-positives, legacy, std-error-handling]
rules:
- linters:
- errchkjson
- forbidigo
- gosec
- musttag
- nilerr
- unparam
text: .*
paths: [third_party, builtin$, examples$]
warn-unused: true
settings:
staticcheck:
checks: ["all", "-ST1000", "-ST1001", "-ST1003", "-ST1005", "-ST1012", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-QF1001", "-QF1003", "-QF1008"]
enable:
- asasalint
- asciicheck
- bidichk
- bodyclose
- canonicalheader
- contextcheck
- copyloopvar
- decorder
- dogsled
- dupword
- durationcheck
- errcheck
- errchkjson
- errname
- exhaustive
- exptostd
- forbidigo
- ginkgolinter
- gocheckcompilerdirectives
- gochecksumtype
- gocritic
- goheader
- goprintffuncname
- gosec
- gosmopolitan
- govet
- grouper
- importas
- ineffassign
- interfacebloat
- intrange
- loggercheck
- makezero
- mirror
- misspell
- modernize
- musttag
- nakedret
- nilerr
- nilnil
- noctx
- nosprintfhostport
- predeclared
- promlinter
- protogetter
- reassign
- sloglint
- staticcheck
- tagalign
- testableexamples
- unconvert
- unparam
- unused
- usestdlibvars
- usetesting
- wastedassign
formatters:
enable: [gci, gofmt]
settings:
gci:
sections:
- standard # Standard section: captures all standard packages.
- default # Default section: contains all imports that could not be matched to another section type.
- localmodule # Local module section: contains all local packages. This section is not present unless explicitly enabled.
- blank # Blank section: contains all blank imports. This section is not present unless explicitly enabled.
- dot # Dot section: contains all dot imports. This section is not present unless explicitly enabled.
custom-order: true
exclusions:
generated: lax
paths: [third_party, builtin$, examples$]
================================================
FILE: CONTRIBUTING.md
================================================
# Contributing to Discovery Agent
Thank you for your interest in contributing! This document provides guidelines and instructions for contributing.
Note that this repository holds two separate components:
- disco-agent: For CyberArk DisCo
- venafi-kubernetes-agent: For TLSPK / Certificate Manager SaaS
## Table of Contents
- [Getting Started](#getting-started)
- [Development Environment](#development-environment)
- [Making Changes](#making-changes)
- [Testing](#testing)
- [Submitting a Pull Request](#submitting-a-pull-request)
- [Code Review Process](#code-review-process)
- [Additional Resources](#additional-resources)
### Prerequisites
Before you begin, ensure you have the following installed:
- [Go](https://golang.org/doc/install) (version specified in `go.mod`)
- [Make](https://www.gnu.org/software/make/)
- [Git](https://git-scm.com/)
- [Docker](https://docs.docker.com/get-docker/) (for building container images)
To check which Go version will be used:
```bash
make which-go
```
It's also possible to use a vendored version of Go, via `make vendor-go`.
### Repository Tooling
Most of the setup logic for provisioning tooling and for handling builds and testing
is defined in Makefile logic.
Specifically, `the make/_shared` directory contains shared Makefile logic derived from
the cert-manager [makefile-modules](https://github.com/cert-manager/makefile-modules/) project.
### Setting Up Your Development Environment
1. **Fork the repository** on GitHub
2. **Clone your fork:**
```bash
git clone git@github.com:YOUR-USERNAME/jetstack-secure.git
cd jetstack-secure
```
3. **Add the upstream remote:**
```bash
git remote add upstream git@github.com:jetstack/jetstack-secure.git
```
4. **Run initial verification:**
```bash
make verify
```
This ensures your environment is set up correctly.
## Development Environment
### Local Execution
To build and run the agent locally:
```bash
go run main.go agent --agent-config-file ./path/to/agent/config/file.yaml -p 0h1m0s
```
Example configuration files are available:
- [agent.yaml](./agent.yaml)
- [examples/one-shot-secret.yaml](./examples/one-shot-secret.yaml)
- [examples/cert-manager-agent.yaml](./examples/cert-manager-agent.yaml)
You can also run a local echo server to monitor agent requests:
```bash
go run main.go echo
```
### Useful Make Targets
- `make help` - Show all available make targets
- `make verify` - Run all verification checks (linting, formatting, etc.)
- `make test-unit` - Run unit tests
- `make test-helm` - Run Helm chart tests
- `make generate` - Generate code, documentation, and other artifacts
- `make oci-build-preflight` - Build container image
- `make clean` - Clean all temporary files
## Making Changes
### Creating a Branch
Always create a new branch for your changes:
```bash
git checkout -b feature/your-feature-name
```
Use descriptive branch names:
- `feature/` for new features
- `fix/` for bug fixes
- `docs/` for documentation changes
- `refactor/` for refactoring
### Code Style
This project follows standard Go conventions:
- Run `make verify-golangci-lint` to check your code
- Run `make fix-golangci-lint` to automatically fix some issues
- Ensure all code is formatted with `gofmt`
- Follow the [Effective Go](https://golang.org/doc/effective_go) guidelines
- Most of the conventions are enforced by linters, and violations will prevent code being merged
### Committing Changes
1. **Stage your changes:**
```bash
git add .
```
2. **Run verification before committing:**
```bash
make verify
```
3. **Commit with a descriptive message:**
```bash
git commit -m "Brief description of your changes"
```
Write clear commit messages:
- Use the imperative mood ("Add feature" not "Added feature")
- Keep the first line under 72 characters
- Add additional context in the body if needed
## Testing
### Running Tests Locally
Before submitting a PR, ensure all tests pass:
```bash
# Run unit tests
make test-unit
# Run Helm tests
make test-helm
# Run all verification checks
make verify
```
### End-to-End Tests
E2E tests run automatically in CI when you add specific labels to your PR:
- Add the `test-e2e` label to trigger GKE-based E2E tests
- Add the `keep-e2e-cluster` label if you need to keep the cluster for debugging (remember to delete it manually afterward to avoid costs)
The E2E test script is located at [hack/e2e/test.sh](./hack/e2e/test.sh).
### Writing Tests
- Add unit tests for all new functionality
- Place tests in `*_test.go` files alongside the code they test
- Use the [testify](https://github.com/stretchr/testify) library for assertions
- Aim for meaningful test coverage, not just high percentages
## Submitting a Pull Request
1. **Push your branch to your fork:**
```bash
git push origin feature/your-feature-name
```
2. **Create a Pull Request** on GitHub from your fork to the `master` branch of `jetstack/jetstack-secure`
3. **Fill out the PR description** with:
- Clear description of the changes
- Related issue numbers (if applicable)
- Testing instructions
- Any breaking changes or special considerations
4. **Ensure CI passes:**
- All tests must pass
- Code must pass verification / linting checks
- No merge conflicts
## Code Review Process
### For All Contributors
- PRs require approval before merging
- Keep PRs focused and reasonably sized
- Update your branch if `master` has moved forward:
```bash
git fetch upstream
git rebase upstream/master
git push --force-with-lease origin feature/your-feature-name
```
### For CyberArk Contributors
**Contributors from inside CyberArk should reach out to the cert-manager team for reviews for PRs which are passing CI.**
The cert-manager team maintains this project and will provide code reviews and guidance for merging changes.
## Additional Resources
- [Project Documentation](https://docs.cyberark.com/mis-saas/vaas/k8s-components/c-tlspk-agent-overview/)
- [Issue Tracker](https://github.com/jetstack/jetstack-secure/issues)
- [Release Process](./RELEASE.md)
- [cert-manager Community](https://cert-manager.io/docs/contributing/)
## Getting Help
If you need help or have questions:
1. Check existing [issues](https://github.com/jetstack/jetstack-secure/issues) and [documentation](https://docs.cyberark.com/mis-saas/vaas/k8s-components/c-tlspk-agent-overview/)
2. Open a new issue with the `question` label
3. For CyberArk contributors, reach out to the cert-manager team
## License
By contributing, you agree that your contributions will be licensed under the license in the LICENSE file in the root directory of this repository.
================================================
FILE: LICENSE
================================================
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
FILE: LICENSES
================================================
This LICENSES file is generated by the `licenses` module in makefile-modules[0].
The licenses below the "---" are determined by the go-licenses tool[1].
The aim of this file is to collect the licenses of all dependencies, and provide
a single source of truth for licenses used by this project.
## For Developers
If CI reports that this file is out of date, you should be careful to check that the
new licenses are acceptable for this project before running `make generate-go-licenses`
to update this file.
Acceptable licenses are those allowlisted by the CNCF[2].
You MUST NOT add any new dependencies whose licenses are not allowlisted by the CNCF,
or which do not have an explicit license exception[3].
## For Users
If this file was included in a release artifact, it is a snapshot of the licenses of all dependencies at the time of the release.
You can retrieve the actual license text by following these steps:
1. Find the dependency name in this file
2. Go to the source code repository of this project, and go to the tag corresponding to this release.
3. Find the exact version of the dependency in the `go.mod` file
4. Search for the dependency at the correct version in the [Go package index](https://pkg.go.dev/).
## Links
[0]: https://github.com/cert-manager/makefile-modules/
[1]: https://github.com/google/go-licenses
[2]: https://github.com/cncf/foundation/blob/db4179134ebe7fa00b140a050c19147db808b6fa/policies-guidance/allowed-third-party-license-policy.md#cncf-allowlist-license-policy
[3]: https://github.com/cncf/foundation/blob/db4179134ebe7fa00b140a050c19147db808b6fa/license-exceptions/README.md
---
cel.dev/expr,Apache-2.0
github.com/Khan/genqlient/graphql,MIT
github.com/Venafi/vcert/v5,Apache-2.0
github.com/antlr4-go/antlr/v4,BSD-3-Clause
github.com/aymerick/douceur,MIT
github.com/beorn7/perks/quantile,MIT
github.com/blang/semver/v4,MIT
github.com/cenkalti/backoff/v5,MIT
github.com/cespare/xxhash/v2,MIT
github.com/davecgh/go-spew/spew,ISC
github.com/emicklei/go-restful/v3,MIT
github.com/evanphx/json-patch/v5,BSD-3-Clause
github.com/fatih/color,MIT
github.com/fsnotify/fsnotify,BSD-3-Clause
github.com/fxamacker/cbor/v2,MIT
github.com/go-http-utils/headers,MIT
github.com/go-logr/logr,Apache-2.0
github.com/go-logr/zapr,Apache-2.0
github.com/go-openapi/jsonpointer,Apache-2.0
github.com/go-openapi/jsonreference,Apache-2.0
github.com/go-openapi/swag,Apache-2.0
github.com/go418/concurrentcache,Apache-2.0
github.com/go418/concurrentcache/logger,Apache-2.0
github.com/gogo/protobuf,BSD-3-Clause
github.com/golang-jwt/jwt/v4,MIT
github.com/golang-jwt/jwt/v5,MIT
github.com/google/btree,Apache-2.0
github.com/google/cel-go,Apache-2.0
github.com/google/cel-go,BSD-3-Clause
github.com/google/gnostic-models,Apache-2.0
github.com/google/uuid,BSD-3-Clause
github.com/gorilla/css/scanner,BSD-3-Clause
github.com/gorilla/websocket,BSD-2-Clause
github.com/hashicorp/errwrap,MPL-2.0
github.com/hashicorp/go-multierror,MPL-2.0
github.com/josharian/intern,MIT
github.com/json-iterator/go,MIT
github.com/lestrrat-go/blackmagic,MIT
github.com/lestrrat-go/httpcc,MIT
github.com/lestrrat-go/httprc/v3,MIT
github.com/lestrrat-go/jwx/v3,MIT
github.com/lestrrat-go/option/v2,MIT
github.com/mailru/easyjson,MIT
github.com/mattn/go-colorable,MIT
github.com/mattn/go-isatty,MIT
github.com/microcosm-cc/bluemonday,BSD-3-Clause
github.com/modern-go/concurrent,Apache-2.0
github.com/modern-go/reflect2,Apache-2.0
github.com/munnerz/goautoneg,BSD-3-Clause
github.com/pkg/errors,BSD-2-Clause
github.com/pmezard/go-difflib/difflib,BSD-3-Clause
github.com/pmylund/go-cache,MIT
github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil,BSD-3-Clause
github.com/prometheus/client_golang/prometheus,Apache-2.0
github.com/prometheus/client_model/go,Apache-2.0
github.com/prometheus/common,Apache-2.0
github.com/prometheus/procfs,Apache-2.0
github.com/sosodev/duration,MIT
github.com/spf13/cobra,Apache-2.0
github.com/spf13/pflag,BSD-3-Clause
github.com/stoewer/go-strcase,MIT
github.com/stretchr/testify,MIT
github.com/vektah/gqlparser/v2,MIT
github.com/x448/float16,MIT
github.com/youmark/pkcs8,MIT
go.opentelemetry.io/otel,Apache-2.0
go.opentelemetry.io/otel/trace,Apache-2.0
go.uber.org/multierr,MIT
go.uber.org/zap,MIT
go.yaml.in/yaml/v2,Apache-2.0
go.yaml.in/yaml/v3,MIT
golang.org/x/crypto,BSD-3-Clause
golang.org/x/exp,BSD-3-Clause
golang.org/x/net,BSD-3-Clause
golang.org/x/oauth2,BSD-3-Clause
golang.org/x/sync,BSD-3-Clause
golang.org/x/sys,BSD-3-Clause
golang.org/x/term,BSD-3-Clause
golang.org/x/text,BSD-3-Clause
golang.org/x/time/rate,BSD-3-Clause
gomodules.xyz/jsonpatch/v2,Apache-2.0
google.golang.org/genproto/googleapis/api/expr/v1alpha1,Apache-2.0
google.golang.org/genproto/googleapis/rpc/status,Apache-2.0
google.golang.org/protobuf,BSD-3-Clause
gopkg.in/evanphx/json-patch.v4,BSD-3-Clause
gopkg.in/inf.v0,BSD-3-Clause
gopkg.in/ini.v1,Apache-2.0
gopkg.in/yaml.v2,Apache-2.0
gopkg.in/yaml.v3,MIT
k8s.io/api,Apache-2.0
k8s.io/apiextensions-apiserver/pkg,Apache-2.0
k8s.io/apimachinery/pkg,Apache-2.0
k8s.io/apimachinery/third_party/forked/golang,BSD-3-Clause
k8s.io/apiserver/pkg,Apache-2.0
k8s.io/client-go,Apache-2.0
k8s.io/component-base,Apache-2.0
k8s.io/klog/v2,Apache-2.0
k8s.io/kube-openapi/pkg,Apache-2.0
k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json,BSD-3-Clause
k8s.io/kube-openapi/pkg/internal/third_party/govalidator,MIT
k8s.io/kube-openapi/pkg/validation/errors,Apache-2.0
k8s.io/kube-openapi/pkg/validation/spec,Apache-2.0
k8s.io/kube-openapi/pkg/validation/strfmt,Apache-2.0
k8s.io/kube-openapi/pkg/validation/validate,Apache-2.0
k8s.io/utils,Apache-2.0
k8s.io/utils/internal/third_party/forked/golang,BSD-3-Clause
sigs.k8s.io/controller-runtime/pkg,Apache-2.0
sigs.k8s.io/json,Apache-2.0
sigs.k8s.io/json,BSD-3-Clause
sigs.k8s.io/randfill,Apache-2.0
sigs.k8s.io/structured-merge-diff/v6,Apache-2.0
sigs.k8s.io/yaml,MIT
sigs.k8s.io/yaml,Apache-2.0
sigs.k8s.io/yaml,BSD-3-Clause
================================================
FILE: Makefile
================================================
# Copyright 2023 The cert-manager Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
# Edit https://github.com/cert-manager/makefile-modules/blob/main/modules/repository-base/base/Makefile instead.
# NOTE FOR DEVELOPERS: "How do the Makefiles work and how can I extend them?"
#
# Shared Makefile logic lives in the make/_shared/ directory. The source of truth for these files
# lies outside of this repository, eg. in the cert-manager/makefile-modules repository.
#
# Logic specific to this repository must be defined in the make/00_mod.mk and make/02_mod.mk files:
# - The make/00_mod.mk file is included first and contains variable definitions needed by
# the shared Makefile logic.
# - The make/02_mod.mk file is included later, it can make use of most of the shared targets
# defined in the make/_shared/ directory (all targets defined in 00_mod.mk and 01_mod.mk).
# This file should be used to define targets specific to this repository.
##################################
# Some modules build their dependencies from variables, we want these to be
# evaluated at the last possible moment. For this we use second expansion to
# re-evaluate the generate and verify targets a second time.
#
# See https://www.gnu.org/software/make/manual/html_node/Secondary-Expansion.html
.SECONDEXPANSION:
# For details on some of these "prelude" settings, see:
# https://clarkgrubb.com/makefile-style-guide
MAKEFLAGS += --warn-undefined-variables --no-builtin-rules
SHELL := /usr/bin/env bash
# The `--norc` option prevents "PS1: unbound" errors.
# If Bash thinks it is being run with its standard input connected to a network
# connection (such as via SSH or via Docker), it reads and executes commands
# from ~/.bashrc, regardless of whether it thinks it is in interactive mode.
# Bash does not set PS1 in non-interactive environments. But on Ubuntu 24.04 the
# default /etc/bash.bashrc file assumes that PS1 is set.
#
# See https://www.gnu.org/software/bash/manual/bash.html#Invoked-by-remote-shell-daemon
.SHELLFLAGS := --norc -uo pipefail -c
.DEFAULT_GOAL := help
.DELETE_ON_ERROR:
.SUFFIXES:
FORCE:
noop: # do nothing
# Set empty value for MAKECMDGOALS to prevent the "warning: undefined variable 'MAKECMDGOALS'"
# warning from happening when running make without arguments
MAKECMDGOALS ?=
##################################
# Host OS and architecture setup #
##################################
# The reason we don't use "go env GOOS" or "go env GOARCH" is that the "go"
# binary may not be available in the PATH yet when the Makefiles are
# evaluated. HOST_OS and HOST_ARCH only support Linux, *BSD and macOS (M1
# and Intel).
host_os := $(shell uname -s | tr A-Z a-z)
host_arch := $(shell uname -m)
HOST_OS ?= $(host_os)
HOST_ARCH ?= $(host_arch)
ifeq (x86_64, $(HOST_ARCH))
HOST_ARCH = amd64
else ifeq (aarch64, $(HOST_ARCH))
# linux reports the arm64 arch as aarch64
HOST_ARCH = arm64
endif
##################################
# Git and versioning information #
##################################
git_version := $(shell git describe --tags --always --match='v*' --abbrev=14 --dirty)
VERSION ?= $(git_version)
IS_PRERELEASE := $(shell git describe --tags --always --match='v*' --abbrev=0 | grep -q '-' && echo true || echo false)
GITCOMMIT := $(shell git rev-parse HEAD)
GITEPOCH := $(shell git show -s --format=%ct HEAD)
##################################
# Global variables and dirs #
##################################
bin_dir := _bin
# The ARTIFACTS environment variable is set by the CI system to a directory
# where artifacts should be placed. These artifacts are then uploaded to a
# storage bucket by the CI system (https://docs.prow.k8s.io/docs/components/pod-utilities/).
# An example of such an artifact is a jUnit XML file containing test results.
# If the ARTIFACTS environment variable is not set, we default to a local
# directory in the _bin directory.
ARTIFACTS ?= $(bin_dir)/artifacts
$(bin_dir) $(ARTIFACTS) $(bin_dir)/scratch:
mkdir -p $@
.PHONY: clean
## Clean all temporary files
## @category [shared] Tools
clean:
rm -rf $(bin_dir)
##################################
# Include all the Makefiles #
##################################
-include make/00_mod.mk
-include make/_shared/*/00_mod.mk
-include make/_shared/*/01_mod.mk
-include make/02_mod.mk
-include make/_shared/*/02_mod.mk
================================================
FILE: OWNERS
================================================
approvers:
- j-fuentes
- wwwil
- charlieegan3
- akvilemar
- james-w
- tfadeyi
reviewers:
- j-fuentes
- wwwil
- charlieegan3
================================================
FILE: OWNERS_ALIASES
================================================
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
# Edit https://github.com/cert-manager/makefile-modules/blob/main/modules/repository-base/base/OWNERS_ALIASES instead.
aliases:
cm-maintainers:
- munnerz
- joshvanl
- wallrj
- jakexks
- maelvls
- sgtcodfish
- inteon
- thatsmrtalbot
- erikgb
- hjoshi123
================================================
FILE: README.md
================================================
# Discovery Agent
[](https://github.com/jetstack/jetstack-secure/actions/workflows/tests.yaml)
[](https://pkg.go.dev/github.com/jetstack/jetstack-secure)
[](https://goreportcard.com/report/github.com/jetstack/jetstack-secure)
"The agent" manages your machine identities across Cloud Native Kubernetes and OpenShift environments and builds a detailed view of the enterprise security posture.
## Installation
Please [review the documentation](https://docs.cyberark.com/mis-saas/vaas/k8s-components/c-tlspk-agent-overview/) for the agent.
Detailed installation instructions are available for a variety of methods.
## Local Execution
To build and run a version from master:
```bash
go run main.go agent --agent-config-file ./path/to/agent/config/file.yaml -p 0h1m0s
```
You can configure the agent to perform one data gathering loop and output the data to a local file:
```bash
go run . agent \
--agent-config-file examples/one-shot-secret.yaml \
--one-shot \
--output-path output.json
```
> Some examples of agent configuration files:
>
> - [./agent.yaml](./agent.yaml).
> - [./examples/one-shot-secret.yaml](./examples/one-shot-secret.yaml).
> - [./examples/cert-manager-agent.yaml](./examples/cert-manager-agent.yaml).
You might also want to run a local echo server to monitor requests sent by the agent:
```bash
go run main.go echo
```
## Metrics
The agent exposes its metrics through a Prometheus server, on port 8081.
The Prometheus server is disabled by default but can be enabled by passing the `--enable-metrics` flag to the agent binary.
If you deploy the agent using the venafi-kubernetes-agent Helm chart, the metrics server will be enabled by default, on port 8081.
If you use the Prometheus Operator, you can use `--set metrics.podmonitor.enabled=true` to deploy a `PodMonitor` resource,
which will add the venafi-kubernetes-agent metrics to your Prometheus server.
The following metrics are collected:
- Go collector: via the [default registry](https://github.com/prometheus/client_golang/blob/34e02e282dc4a3cb55ca6441b489ec182e654d59/prometheus/registry.go#L60-L63) in Prometheus `client_golang`.
- Process collector: via the [default registry](https://github.com/prometheus/client_golang/blob/34e02e282dc4a3cb55ca6441b489ec182e654d59/prometheus/registry.go#L60-L63) in Prometheus `client_golang`.
- Agent metrics: `data_readings_upload_size`: Data readings upload size (in bytes) sent by the in-cluster agent.
## End to end testing
An end to end test script is available in the [./hack/e2e/test.sh](./hack/e2e/test.sh) directory. It is configured to run in CI
in the tests.yaml GitHub Actions workflow. To run the script you will need to add the `test-e2e` label to the PR.
The script creates a cluster in GKE and cleanups after itself unless the `keep-e2e-cluster` label is set on the PR. Adding that
label will leave the cluster running for further debugging but it will incur costs so manually delete the cluster when done.
================================================
FILE: RELEASE.md
================================================
# Release Process
> [!NOTE]
> Before starting a release let the docs team know that a release is about to be created so that documentation can be prepared in advance.
> This is not necessary for pre-releases.
The release process is semi-automated.
### Step 1: Git Tag and GitHub Release
> [!NOTE]
>
> Upon pushing the tag, a GitHub Action will do the following:
>
> - Build and publish the container image: `quay.io/jetstack/venafi-agent`,
> - Build and publish the Helm chart: `oci://quay.io/jetstack/charts/venafi-kubernetes-agent`,
> - Build and publish the container image: `quay.io/jetstack/disco-agent`,
> - Build and publish the Helm chart: `oci://quay.io/jetstack/charts/disco-agent`,
> - Build and publish the container image: `quay.io/jetstack/discovery-agent`,
> - Build and publish the Helm chart: `oci://quay.io/jetstack/charts/discovery-agent`,
> - Create a draft GitHub release,
1. Run govulncheck; it's the best indicator that a dependency needs to be upgraded.
```bash
make verify-govulncheck
```
Any failures should be treated extremely seriously and patched before release unless you can be absolutely
confident it's a false positive.
2. Consider upgrading Go dependencies using `go-mod-upgrade`:
```bash
go install github.com/oligot/go-mod-upgrade@latest
go-mod-upgrade
make generate
```
Once complete, you'll need to create a PR to merge the changes.
3. Open the [tests GitHub Actions workflow][tests-workflow]
and verify that it succeeds on the master branch.
4. Create a tag for the new release:
```sh
export VERSION=v1.1.0
git tag --annotate --message="Release ${VERSION}" "${VERSION}"
git push origin "${VERSION}"
```
This triggers a [release action](https://github.com/jetstack/jetstack-secure/actions/workflows/release.yml).
5. Wait until the release action finishes.
6. Navigate to the [GitHub Releases](https://github.com/jetstack/jetstack-secure/releases) page and select the draft release to edit.
1. Click on “Generate release notes” to automatically compile the changelog.
2. Review and refine the generated notes to ensure they’re clear and useful
for end users.
3. Remove any irrelevant entries, such as “update deps,” “update CI,” “update
docs,” or similar internal changes that do not impact user functionality.
7. Publish the release.
8. Inform the `#venafi-kubernetes-agent` channel on Slack that a new version of the Discovery Agent has been released!
Consider also messaging the DisCo team at CyberArk (ask in the cert-manager team Slack channel if you don't know who to message)
9. Inform the docs team of the new release so they can update the
documentation at <https://docs.cyberark.com/>.
[tests-workflow]: https://github.com/jetstack/jetstack-secure/actions/workflows/tests.yaml?query=branch%3Amaster
## Release Artifact Information
For context, the new tag will create the following images:
| Image | Automation |
| -------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- |
| `quay.io/jetstack/venafi-agent` | Automatically built by the [release action](.github/workflows/release.yml) on Git tag pushes |
| `quay.io/jetstack/disco-agent` | Automatically built by the [release action](.github/workflows/release.yml) on Git tag pushes |
| `quay.io/jetstack/discovery-agent` | Automatically built by the [release action](.github/workflows/release.yml) on Git tag pushes |
| `registry.venafi.cloud/venafi-agent/venafi-agent` | Automatically mirrored by Harbor Replication rule |
| `private-registry.venafi.cloud/venafi-agent/venafi-agent` | Automatically mirrored by Harbor Replication rule |
| `private-registry.venafi.eu/venafi-agent/venafi-agent` | Automatically mirrored by Harbor Replication rule |
| `registry.ngts.paloaltonetworks.com/disco-agent/disco-agent` | Automatically mirrored by Harbor Replication rule |
| `registry.ngts.paloaltonetworks.com/discovery-agent/discovery-agent` | Automatically mirrored by Harbor Replication rule |
and the following OCI Helm charts:
| Helm Chart | Automation |
| -------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- |
| `oci://quay.io/jetstack/charts/venafi-kubernetes-agent` | Automatically built by the [release action](.github/workflows/release.yml) on Git tag pushes |
| `oci://quay.io/jetstack/charts/disco-agent` | Automatically built by the [release action](.github/workflows/release.yml) on Git tag pushes |
| `oci://quay.io/jetstack/charts/discovery-agent` | Automatically built by the [release action](.github/workflows/release.yml) on Git tag pushes |
| `oci://registry.venafi.cloud/charts/venafi-kubernetes-agent` | Automatically mirrored by Harbor Replication rule |
| `oci://private-registry.venafi.cloud/charts/venafi-kubernetes-agent` | Automatically mirrored by Harbor Replication rule |
| `oci://private-registry.venafi.eu/charts/venafi-kubernetes-agent` | Automatically mirrored by Harbor Replication rule |
| `oci://registry.ngts.paloaltonetworks.com/charts/disco-agent` | Automatically mirrored by Harbor Replication rule |
| `oci://registry.ngts.paloaltonetworks.com/charts/discovery-agent` | Automatically mirrored by Harbor Replication rule |
### Replication Flows
TODO: These flows are helpful illustrations but describe a process whose source of truth is defined elsewhere. Instead, we should document the replication process where it's defined, in enterprise-builds.
Replication flow for the venafi-kubernetes-agent Helm chart:
```text
v1.1.0 (Git tag in the jetstack-secure repo)
└── oci://quay.io/jetstack/charts/venafi-kubernetes-agent --version 1.1.0 (GitHub Actions in the jetstack-secure repo)
└── oci://eu.gcr.io/jetstack-secure-enterprise/charts/venafi-kubernetes-agent (Enterprise Builds's GitHub Actions)
├── oci://registry.venafi.cloud/charts/venafi-kubernetes-agent --version 1.1.0 (Harbor Replication)
└── oci://private-registry.venafi.cloud/charts/venafi-kubernetes-agent --version 1.1.0 (Harbor Replication)
└── oci://private-registry.venafi.eu/charts/venafi-kubernetes-agent --version 1.1.0 (Harbor Replication)
```
Replication flow for the venafi-kubernetes-agent container image:
```text
v1.1.0 (Git tag in the jetstack-secure repo)
└── quay.io/jetstack/venafi-agent:v1.1.0 (GitHub Actions in the jetstack-secure repo)
└── eu.gcr.io/jetstack-secure-enterprise/venafi-agent:v1.1.0 (Enterprise Builds's GitHub Actions)
├── registry.venafi.cloud/venafi-agent/venafi-agent:v1.1.0 (Harbor Replication)
├── private-registry.venafi.cloud/venafi-agent/venafi-agent:v1.1.0 (Harbor Replication)
└── private-registry.venafi.eu/venafi-agent/venafi-agent:v1.1.0 (Harbor Replication)
```
[public-img-and-chart-replication.tf]: https://gitlab.com/venafi/vaas/delivery/harbor/-/blob/3d114f54092eb44a1deb0edc7c4e8a2d4f855aa2/public-registry/module/subsystems/tlspk/replication.tf
[private-img-and-chart-replication.tf]: https://gitlab.com/venafi/vaas/delivery/harbor/-/blob/3d114f54092eb44a1deb0edc7c4e8a2d4f855aa2/private-registry/module/subsystems/tlspk/replication.tf
[release_enterprise_builds.yaml]: https://github.com/jetstack/enterprise-builds/actions/workflows/release_enterprise_builds.yaml
## Step 2: Testing
When a release is complete, consider installing it into a cluster and testing it. TODO: provide guidance on doing those tests.
================================================
FILE: agent.yaml
================================================
server: "https://platform.jetstack.io"
organization_id: "my-organization"
cluster_id: "my_cluster"
period: "0h1m0s"
data-gatherers:
- kind: "dummy"
name: "dummy"
config:
failed-attempts: 5
- kind: "dummy"
name: "dummy-fail"
config:
always-fail: true
venafi-cloud:
uploader_id: "example-id"
upload_path: "/example/endpoint/path"
================================================
FILE: api/agent.go
================================================
package api
// AgentMetadata is metadata about the agent.
type AgentMetadata struct {
Version string `json:"version"`
// ClusterID is the name of the cluster or host where the agent is running.
// It may send data for other clusters in its datareadings.
ClusterID string `json:"cluster_id"`
}
================================================
FILE: api/common.go
================================================
// Package api provides types for Preflight reports and some common helpers.
package api
import (
"encoding/json"
"time"
)
// TimeFormat defines the format used for timestamps across all this API.
const TimeFormat = time.RFC3339
// Time is a wrapper around time.Time that overrides how it is marshaled into JSON
type Time struct {
time.Time
}
// String returns a string representation of the timestamp
func (t Time) String() string {
return t.Format(TimeFormat)
}
// MarshalJSON marshals the timestamp with RFC3339 format
func (t Time) MarshalJSON() ([]byte, error) {
str := t.String()
jsonStr, err := json.Marshal(str)
if err != nil {
return nil, err
}
return jsonStr, nil
}
================================================
FILE: api/datareading.go
================================================
package api
import (
"bytes"
"encoding/json"
"fmt"
"time"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/version"
)
// DataReadingsPost is the payload in the upload request.
type DataReadingsPost struct {
AgentMetadata *AgentMetadata `json:"agent_metadata"`
// DataGatherTime represents the time that the data readings were gathered
DataGatherTime time.Time `json:"data_gather_time"`
DataReadings []*DataReading `json:"data_readings"`
}
// DataReading is the output of a DataGatherer.
type DataReading struct {
// ClusterID is optional as it can be inferred from the agent
// token when using basic authentication.
ClusterID string `json:"cluster_id,omitempty"`
DataGatherer string `json:"data-gatherer"`
Timestamp Time `json:"timestamp"`
Data any `json:"data"`
SchemaVersion string `json:"schema_version"`
}
// UnmarshalJSON implements the json.Unmarshaler interface for DataReading.
// The function attempts to decode the Data field into known types in a prioritized order.
// Empty data is considered an error, because there is no way to discriminate between data types.
// TODO(wallrj): Add a discriminator field to DataReading to avoid this complex logic.
// E.g. "data_type": "discovery"|"dynamic"
func (o *DataReading) UnmarshalJSON(data []byte) error {
var tmp struct {
ClusterID string `json:"cluster_id,omitempty"`
DataGatherer string `json:"data-gatherer"`
Timestamp Time `json:"timestamp"`
Data json.RawMessage `json:"data"`
SchemaVersion string `json:"schema_version"`
}
// Decode the top-level fields of DataReading
if err := jsonUnmarshalStrict(data, &tmp); err != nil {
return fmt.Errorf("failed to parse DataReading: %s", err)
}
// Assign top-level fields to the DataReading object
o.ClusterID = tmp.ClusterID
o.DataGatherer = tmp.DataGatherer
o.Timestamp = tmp.Timestamp
o.SchemaVersion = tmp.SchemaVersion
// Return an error if data is empty
if len(tmp.Data) == 0 || bytes.Equal(tmp.Data, []byte("null")) || bytes.Equal(tmp.Data, []byte("{}")) {
return fmt.Errorf("failed to parse DataReading.Data for gatherer %q: empty data", o.DataGatherer)
}
// Define a list of decoding attempts with prioritized types
dataTypes := []struct {
target any
assign func(any)
}{
{&OIDCDiscoveryData{}, func(v any) { o.Data = v.(*OIDCDiscoveryData) }},
{&DiscoveryData{}, func(v any) { o.Data = v.(*DiscoveryData) }},
{&DynamicData{}, func(v any) { o.Data = v.(*DynamicData) }},
}
// Attempt to decode the Data field into each type
for _, dataType := range dataTypes {
if err := jsonUnmarshalStrict(tmp.Data, dataType.target); err == nil {
dataType.assign(dataType.target)
return nil
}
}
// Return an error if no type matches
return fmt.Errorf("failed to parse DataReading.Data for gatherer %q: unknown type", o.DataGatherer)
}
// jsonUnmarshalStrict unmarshals JSON data into the provided interface,
// disallowing unknown fields to ensure strict adherence to the expected structure.
func jsonUnmarshalStrict(data []byte, v any) error {
decoder := json.NewDecoder(bytes.NewReader(data))
decoder.DisallowUnknownFields()
return decoder.Decode(v)
}
// GatheredResource wraps the raw k8s resource that is sent to the jetstack secure backend
type GatheredResource struct {
// Resource is a reference to a k8s object that was found by the informer
// should be of type unstructured.Unstructured, raw Object
Resource any
DeletedAt Time
}
func (v GatheredResource) MarshalJSON() ([]byte, error) {
dateString := ""
if !v.DeletedAt.IsZero() {
dateString = v.DeletedAt.Format(TimeFormat)
}
data := struct {
Resource any `json:"resource"`
DeletedAt string `json:"deleted_at,omitempty"`
}{
Resource: v.Resource,
DeletedAt: dateString,
}
return json.Marshal(data)
}
func (v *GatheredResource) UnmarshalJSON(data []byte) error {
var tmpResource struct {
Resource *unstructured.Unstructured `json:"resource"`
DeletedAt Time `json:"deleted_at"`
}
d := json.NewDecoder(bytes.NewReader(data))
d.DisallowUnknownFields()
if err := d.Decode(&tmpResource); err != nil {
return err
}
v.Resource = tmpResource.Resource
v.DeletedAt = tmpResource.DeletedAt
return nil
}
// DynamicData is the DataReading.Data returned by the k8sdynamic.DataGathererDynamic
// gatherer
type DynamicData struct {
// Items is a list of GatheredResource
Items []*GatheredResource `json:"items"`
}
// DiscoveryData is the DataReading.Data returned by the k8sdiscovery.DataGathererDiscovery
// gatherer
type DiscoveryData struct {
// ClusterID is the unique ID of the Kubernetes cluster which this snapshot was taken from.
// This is sourced from the kube-system namespace UID,
// which is assumed to be stable for the lifetime of the cluster.
// - https://github.com/kubernetes/kubernetes/issues/77487#issuecomment-489786023
ClusterID string `json:"cluster_id"`
// ServerVersion is the version information of the k8s apiserver
// See https://godoc.org/k8s.io/apimachinery/pkg/version#Info
ServerVersion *version.Info `json:"server_version"`
}
// OIDCDiscoveryData is the DataReading.Data returned by the oidc.OIDCDiscovery
// gatherer
type OIDCDiscoveryData struct {
// OIDCConfig contains OIDC configuration data from the API server's
// `/.well-known/openid-configuration` endpoint
OIDCConfig map[string]any `json:"openid_configuration,omitempty"`
// OIDCConfigError contains any error encountered while fetching the OIDC configuration
OIDCConfigError string `json:"openid_configuration_error,omitempty"`
// JWKS contains JWKS data from the API server's `/openid/v1/jwks` endpoint
JWKS map[string]any `json:"jwks,omitempty"`
// JWKSError contains any error encountered while fetching the JWKS
JWKSError string `json:"jwks_error,omitempty"`
}
================================================
FILE: api/datareading_test.go
================================================
package api
import (
"encoding/json"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestJSONGatheredResourceDropsEmptyTime(t *testing.T) {
var resource GatheredResource
bytes, err := json.Marshal(resource)
if err != nil {
t.Fatalf("failed to marshal %s", err)
}
expected := `{"resource":null}`
if string(bytes) != expected {
t.Fatalf("unexpected json \ngot %s\nwant %s", string(bytes), expected)
}
}
func TestJSONGatheredResourceSetsTimeWhenPresent(t *testing.T) {
var resource GatheredResource
resource.DeletedAt = Time{time.Date(2021, 3, 29, 0, 0, 0, 0, time.UTC)}
bytes, err := json.Marshal(resource)
if err != nil {
t.Fatalf("failed to marshal %s", err)
}
expected := `{"resource":null,"deleted_at":"2021-03-29T00:00:00Z"}`
if string(bytes) != expected {
t.Fatalf("unexpected json \ngot %s\nwant %s", string(bytes), expected)
}
}
// TestDataReading_UnmarshalJSON tests the UnmarshalJSON method of DataReading
// with various scenarios including valid and invalid JSON inputs.
func TestDataReading_UnmarshalJSON(t *testing.T) {
tests := []struct {
name string
input string
wantDataType any
expectError string
}{
{
name: "DiscoveryData type",
input: `{
"cluster_id": "61b2db64-fd70-49a6-a257-08397b9b4bae",
"data-gatherer": "discovery",
"timestamp": "2024-06-01T12:00:00Z",
"data": {
"cluster_id": "60868ebf-6e47-4184-9bc0-20bb6824e210",
"server_version": {
"major": "1",
"minor": "20",
"gitVersion": "v1.20.0"
}
},
"schema_version": "v1"
}`,
wantDataType: &DiscoveryData{},
},
{
name: "DynamicData type",
input: `{
"cluster_id": "69050b54-c61a-4384-95c3-35f890377a67",
"data-gatherer": "dynamic",
"timestamp": "2024-06-01T12:00:00Z",
"data": {"items": []},
"schema_version": "v1"
}`,
wantDataType: &DynamicData{},
},
{
name: "OIDCDiscoveryData type",
input: `{
"cluster_id": "11111111-2222-3333-4444-555555555555",
"data-gatherer": "oidc",
"timestamp": "2024-06-01T12:00:00Z",
"data": {
"openid_configuration": {"issuer": "https://example.com"},
"jwks": {"keys": []}
},
"schema_version": "v1"
}`,
wantDataType: &OIDCDiscoveryData{},
},
{
name: "Invalid JSON",
input: `not a json`,
expectError: "failed to parse DataReading: invalid character 'o' in literal null (expecting 'u')",
},
{
name: "Missing data field",
input: `{
"cluster_id": "cc5a0429-8dc4-42c8-8e3a-eece9bca15c3",
"data-gatherer": "missing-data-field",
"timestamp": "2024-06-01T12:00:00Z",
"schema_version": "v1"
}`,
expectError: `failed to parse DataReading.Data for gatherer "missing-data-field": empty data`,
},
{
name: "Mismatched data type",
input: `{
"cluster_id": "c272b13e-b19e-4782-833f-d55a305f3c9e",
"data-gatherer": "unknown-data-type",
"timestamp": "2024-06-01T12:00:00Z",
"data": "this should be an object",
"schema_version": "v1"
}`,
expectError: `failed to parse DataReading.Data for gatherer "unknown-data-type": unknown type`,
},
{
name: "Empty data field",
input: `{
"cluster_id": "07909675-113f-4b59-ba5e-529571a191e6",
"data-gatherer": "empty-data",
"timestamp": "2024-06-01T12:00:00Z",
"data": {},
"schema_version": "v1"
}`,
expectError: `failed to parse DataReading.Data for gatherer "empty-data": empty data`,
},
{
name: "Additional field",
input: `{
"cluster_id": "11df7332-4b32-4f5a-903b-0cbbef381850",
"data-gatherer": "additional-field",
"timestamp": "2024-06-01T12:00:00Z",
"data": {
"cluster_id": "60868ebf-6e47-4184-9bc0-20bb6824e210"
},
"extra_field": "should cause error",
"schema_version": "v1"
}`,
expectError: `failed to parse DataReading: json: unknown field "extra_field"`,
},
{
name: "Additional data field",
input: `{
"cluster_id": "ca44c338-987e-4d57-8320-63f538db4292",
"data-gatherer": "additional-data-field",
"timestamp": "2024-06-01T12:00:00Z",
"data": {
"cluster_id": "60868ebf-6e47-4184-9bc0-20bb6824e210",
"server_version": {
"major": "1",
"minor": "20",
"gitVersion": "v1.20.0"
},
"extra_field": "should cause error"
},
"schema_version": "v1"
}`,
expectError: `failed to parse DataReading.Data for gatherer "additional-data-field": unknown type`,
},
{
name: "Empty JSON object",
input: `{}`,
expectError: `failed to parse DataReading.Data for gatherer "": empty data`,
},
{
name: "Null data field",
input: `{
"cluster_id": "36281cb3-7f3a-4efa-9879-7c988a9715b0",
"data-gatherer": "null-data",
"timestamp": "2024-06-01T12:00:00Z",
"data": null,
"schema_version": "v1"
}`,
expectError: `failed to parse DataReading.Data for gatherer "null-data": empty data`,
},
{
name: "Empty string data field",
input: `{
"cluster_id": "7b7aa8ee-58ac-4818-9b29-c0a76296ea1d",
"data-gatherer": "empty-string-data",
"timestamp": "2024-06-01T12:00:00Z",
"data": "",
"schema_version": "v1"
}`,
expectError: `failed to parse DataReading.Data for gatherer "empty-string-data": unknown type`,
},
{
name: "Array instead of object in data field",
input: `{
"cluster_id": "94d7757f-d084-4ccb-963b-f60fece0df2d",
"data-gatherer": "array-data",
"timestamp": "2024-06-01T12:00:00Z",
"data": [],
"schema_version": "v1"
}`,
expectError: `failed to parse DataReading.Data for gatherer "array-data": unknown type`,
},
{
name: "Incorrect timestamp format",
input: `{
"cluster_id": "d58f298d-b8c1-4d99-aa85-c27d9aec6f97",
"data-gatherer": "bad-timestamp",
"timestamp": "not-a-timestamp",
"data": {
"items": []
},
"schema_version": "v1"
}`,
expectError: `failed to parse DataReading: parsing time "not-a-timestamp" as "2006-01-02T15:04:05Z07:00": cannot parse "not-a-timestamp" as "2006"`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var dr DataReading
err := dr.UnmarshalJSON([]byte(tt.input))
if tt.expectError != "" {
assert.EqualError(t, err, tt.expectError)
return
}
assert.NoError(t, err)
assert.IsType(t, tt.wantDataType, dr.Data)
})
}
}
================================================
FILE: cmd/agent.go
================================================
package cmd
import (
"fmt"
"os"
"github.com/spf13/cobra"
"github.com/jetstack/preflight/pkg/agent"
"github.com/jetstack/preflight/pkg/permissions"
)
var agentCmd = &cobra.Command{
Use: "agent",
Short: "start the preflight agent",
Long: `The agent will periodically gather data for the configured data
gatherers and send it to a remote backend for evaluation`,
RunE: agent.Run,
}
var agentInfoCmd = &cobra.Command{
Use: "info",
Short: "print several internal parameters of the agent",
Long: `Print several internal parameters of the agent, as the built-in OAuth2 client ID.`,
Run: func(cmd *cobra.Command, args []string) {
printVersion(true)
fmt.Println()
printOAuth2Config()
},
}
var agentRBACCmd = &cobra.Command{
Use: "rbac",
Short: "print the agent's minimal RBAC manifest",
Long: `Print RBAC string by reading GVRs`,
RunE: func(cmd *cobra.Command, args []string) error {
b, err := os.ReadFile(agent.Flags.ConfigFilePath)
if err != nil {
return fmt.Errorf("Failed to read config file: %s", err)
}
cfg, err := agent.ParseConfig(b)
if err != nil {
return fmt.Errorf("Failed to parse config file: %s", err)
}
err = agent.ValidateDataGatherers(cfg.DataGatherers)
if err != nil {
return fmt.Errorf("Failed to validate data gatherers: %s", err)
}
out := permissions.GenerateFullManifest(cfg.DataGatherers)
fmt.Print(out)
return nil
},
}
func init() {
rootCmd.AddCommand(agentCmd)
agentCmd.AddCommand(agentInfoCmd)
agentCmd.AddCommand(agentRBACCmd)
agent.InitAgentCmdFlags(agentCmd, &agent.Flags)
}
================================================
FILE: cmd/agent_test.go
================================================
package cmd
import (
"bytes"
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"time"
"github.com/stretchr/testify/require"
arktesting "github.com/jetstack/preflight/internal/cyberark/testing"
)
// TestOutputModes tests the different output modes of the agent command.
// It does this by running the agent command in a subprocess with the
// appropriate flags and configuration files.
// It assumes that the test is being run from the "cmd" directory and that
// the repository root is the parent directory of the current working directory.
func TestOutputModes(t *testing.T) {
repoRoot := findRepoRoot(t)
t.Run("localfile", func(t *testing.T) {
runSubprocess(t, repoRoot, []string{
"--agent-config-file", filepath.Join(repoRoot, "examples/localfile/config.yaml"),
"--input-path", filepath.Join(repoRoot, "examples/localfile/input.json"),
"--output-path", "/dev/null",
})
})
t.Run("machinehub", func(t *testing.T) {
if strings.ToLower(os.Getenv("ARK_LIVE_TEST")) != "true" {
t.Skip("set ARK_LIVE_TEST=true to run this test against the live service")
return
}
arktesting.SkipIfNoEnv(t)
t.Log("This test runs against a live service and has been known to flake. If you see timeout issues it's possible that the test is flaking and it could be unrelated to your changes.")
runSubprocess(t, repoRoot, []string{
"--agent-config-file", filepath.Join(repoRoot, "examples/machinehub/config.yaml"),
"--input-path", filepath.Join(repoRoot, "examples/machinehub/input.json"),
"--machine-hub",
})
})
}
// findRepoRoot returns the absolute path to the repository root.
// It assumes that the test is being run from the "cmd" directory.
func findRepoRoot(t *testing.T) string {
cwd, err := os.Getwd()
require.NoError(t, err)
repoRoot, err := filepath.Abs(filepath.Join(cwd, ".."))
require.NoError(t, err)
return repoRoot
}
// runSubprocess runs the current test in a subprocess with the given args.
// It sets the GO_CHILD environment variable to indicate to the subprocess
// that it should run the main function instead of the test function.
// It captures and logs the stdout and stderr of the subprocess.
// It fails the test if the subprocess exits with a non-zero status.
// It uses a timeout to avoid hanging indefinitely.
func runSubprocess(t *testing.T, repoRoot string, args []string) {
if _, found := os.LookupEnv("GO_CHILD"); found {
os.Args = append([]string{
"preflight",
"agent",
"--log-level", "6",
"--one-shot",
}, args...)
Execute()
return
}
t.Log("Running child process", os.Args[0], "-test.run=^"+t.Name()+"$")
ctx, cancel := context.WithTimeout(t.Context(), time.Second*10)
defer cancel()
cmd := exec.CommandContext(ctx, os.Args[0], "-test.run=^"+t.Name()+"$")
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
cmd.Env = append(os.Environ(), "GO_CHILD=true")
err := cmd.Run()
t.Logf("STDOUT\n%s\n", stdout.String())
t.Logf("STDERR\n%s\n", stderr.String())
require.NoError(t, err, fmt.Sprintf("Error: %v\nSTDERR: %s", err, stderr.String()))
}
================================================
FILE: cmd/ark/main.go
================================================
package main
import "github.com/jetstack/preflight/cmd"
func main() {
cmd.Execute()
}
================================================
FILE: cmd/echo.go
================================================
package cmd
import (
"github.com/spf13/cobra"
"github.com/jetstack/preflight/pkg/echo"
)
var echoCmd = &cobra.Command{
Use: "echo",
Short: "starts an echo server to test the agent",
Long: `The agent sends data to a server. This echo server
can be used to act as the server part and echo the data received by the agent.`,
RunE: echo.Echo,
}
func init() {
rootCmd.AddCommand(echoCmd)
echoCmd.PersistentFlags().StringVarP(
&echo.EchoListen,
"listen",
"l",
":8080",
"Address where to listen.",
)
echoCmd.PersistentFlags().BoolVarP(
&echo.Compact,
"compact",
"",
false,
"Prints compact output.",
)
}
================================================
FILE: cmd/helpers.go
================================================
package cmd
import (
"fmt"
"runtime"
"github.com/jetstack/preflight/pkg/client"
"github.com/jetstack/preflight/pkg/version"
)
func printVersion(verbose bool) {
fmt.Println("Preflight version: ", version.PreflightVersion, runtime.GOOS+"/"+runtime.GOARCH)
if verbose {
fmt.Println(" Commit: ", version.Commit)
fmt.Println(" Built: ", version.BuildDate)
fmt.Println(" Go: ", runtime.Version())
}
}
func printOAuth2Config() {
fmt.Println("OAuth2: ")
fmt.Println(" ClientID: ", client.ClientID)
fmt.Println(" AuthServerDomain: ", client.AuthServerDomain)
}
================================================
FILE: cmd/root.go
================================================
package cmd
import (
"context"
"fmt"
"os"
"strings"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"k8s.io/klog/v2"
"github.com/jetstack/preflight/pkg/logs"
)
// rootCmd represents the base command when called without any subcommands
var rootCmd = &cobra.Command{
Use: "preflight",
Short: "Kubernetes cluster configuration checker 🚀",
Long: `Preflight is a tool to automatically perform Kubernetes cluster
configuration checks using Open Policy Agent (OPA).
Preflight checks are bundled into Packages`,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
return logs.Initialize()
},
// SilenceErrors and SilenceUsage prevents this command or any sub-command
// from printing arbitrary text to stderr.
// Why? To ensure that each line of output can be parsed as a single message
// for consumption by logging agents such as fluentd.
// Usage information is still available on stdout with the `-h` and `--help`
// flags.
SilenceErrors: true,
SilenceUsage: true,
}
func init() {
for _, command := range rootCmd.Commands() {
setFlagsFromEnv("PREFLIGHT_", command.PersistentFlags())
}
}
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
// If the root command or sub-command returns an error, the error message will
// be logged and the process will exit with status 1.
func Execute() {
logs.AddFlags(rootCmd.PersistentFlags())
ctx := klog.NewContext(context.Background(), klog.Background())
var exitCode int
if err := rootCmd.ExecuteContext(ctx); err != nil {
exitCode = 1
klog.ErrorS(err, "Exiting due to error", "exit-code", exitCode)
}
klog.FlushAndExit(klog.ExitFlushTimeout, exitCode)
}
func setFlagsFromEnv(prefix string, fs *pflag.FlagSet) {
set := map[string]bool{}
fs.Visit(func(f *pflag.Flag) {
set[f.Name] = true
})
fs.VisitAll(func(f *pflag.Flag) {
// ignore flags set from the commandline
if set[f.Name] {
return
}
// remove trailing _ to reduce common errors with the prefix, i.e. people setting it to MY_PROG_
cleanPrefix := strings.TrimSuffix(prefix, "_")
name := fmt.Sprintf("%s_%s", cleanPrefix, strings.ReplaceAll(strings.ToUpper(f.Name), "-", "_"))
if e, ok := os.LookupEnv(name); ok {
_ = f.Value.Set(e)
}
})
}
================================================
FILE: cmd/version.go
================================================
package cmd
import (
"github.com/spf13/cobra"
)
var verbose bool
var versionCmd = &cobra.Command{
Use: "version",
Short: "Display the version",
Long: `Display preflight version.
`,
Run: func(cmd *cobra.Command, args []string) {
printVersion(verbose)
},
}
func init() {
rootCmd.AddCommand(versionCmd)
versionCmd.PersistentFlags().BoolVar(
&verbose,
"verbose",
false,
"If enabled, displays the additional information about this build.",
)
}
================================================
FILE: deploy/charts/disco-agent/.helmignore
================================================
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
================================================
FILE: deploy/charts/disco-agent/Chart.yaml
================================================
apiVersion: v2
name: disco-agent
description: |-
The disco-agent connects your Kubernetes or Openshift cluster to CyberArk Discovery and Context.
maintainers:
- name: CyberArk
email: support@cyberark.com
url: https://cyberark.com
sources:
- https://github.com/jetstack/jetstack-secure
# These versions are meant to be overridden by `make helm-chart`. No `v` prefix
# for the `version` because Helm doesn't support auto-determining the latest
# version for OCI Helm charts that use a `v` prefix.
version: 0.0.0
appVersion: "v0.0.0"
================================================
FILE: deploy/charts/disco-agent/README.md
================================================
# disco-agent
The Cyberark Discovery and Context Agent connects your Kubernetes or OpenShift
cluster to the Discovery and Context service of the CyberArk Identity Security Platform.
## Quick Start
### Create a Namespace
Create a namespace for the agent:
```sh
export NAMESPACE=cyberark
kubectl create ns "$NAMESPACE" || true
```
### Add credentials to a Secret
You will require tenant details and credentials for the CyberArk Identity Security Platform.
Put them in the following environment variables:
```sh
export ARK_SUBDOMAIN= # your CyberArk tenant subdomain e.g. tlskp-test
export ARK_USERNAME= # your CyberArk username
export ARK_SECRET= # your CyberArk password
# OPTIONAL: the URL for the CyberArk Discovery API if not using the production environment
export ARK_DISCOVERY_API=https://platform-discovery.integration-cyberark.cloud/
```
Create a Secret containing the tenant details and credentials:
```sh
kubectl create secret generic agent-credentials \
--namespace "$NAMESPACE" \
--from-literal=ARK_USERNAME=$ARK_USERNAME \
--from-literal=ARK_SECRET=$ARK_SECRET \
--from-literal=ARK_SUBDOMAIN=$ARK_SUBDOMAIN \
--from-literal=ARK_DISCOVERY_API=$ARK_DISCOVERY_API
```
Alternatively, use the following Secret as a template:
```yaml
# agent-credentials.yaml
apiVersion: v1
kind: Secret
metadata:
name: agent-credentials
namespace: cyberark
type: Opaque
stringData:
ARK_SUBDOMAIN: $ARK_SUBDOMAIN # your CyberArk tenant subdomain e.g. tlskp-test
ARK_SECRET: $ARK_SECRET # your CyberArk password
ARK_USERNAME: $ARK_USERNAME # your CyberArk username
# OPTIONAL: the URL for the CyberArk Discovery API if not using the production environment
# ARK_DISCOVERY_API: https://platform-discovery.integration-cyberark.cloud/
```
### Deploy the agent
Deploy the agent:
```sh
helm upgrade agent "oci://${OCI_BASE}/charts/disco-agent" \
--install \
--create-namespace \
--namespace "$NAMESPACE" \
--set fullnameOverride=disco-agent
```
### Troubleshooting
Check the Pod and its events:
```sh
kubectl describe -n cyberark pods -l app.kubernetes.io/name=disco-agent
```
Check the logs:
```sh
kubectl logs deployments/disco-agent --namespace "${NAMESPACE}" --follow
```
## Values
<!-- AUTO-GENERATED -->
#### **replicaCount** ~ `number`
> Default value:
> ```yaml
> 1
> ```
This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
#### **acceptTerms** ~ `bool`
> Default value:
> ```yaml
> false
> ```
Must be set to indicate that you have read and accepted the CyberArk Terms of Service. If false, the helm chart will fail to install and will print a message with instructions on how to accept the TOS.
#### **imageRegistry** ~ `string`
> Default value:
> ```yaml
> quay.io
> ```
The container registry used for disco-agent images by default. This can include path prefixes (e.g. "artifactory.example.com/docker").
#### **imageNamespace** ~ `string`
> Default value:
> ```yaml
> jetstack
> ```
The repository namespace used for disco-agent images by default.
Examples:
- jetstack
- custom-namespace
#### **image.registry** ~ `string`
Deprecated: per-component registry prefix.
If set, this value is *prepended* to the image repository that the chart would otherwise render. This applies both when `image.repository` is set and when the repository is computed from
`imageRegistry` + `imageNamespace` + `image.name`.
This can produce "double registry" style references such as
`legacy.example.io/quay.io/jetstack/...`. Prefer using the global
`imageRegistry`/`imageNamespace` values.
#### **image.repository** ~ `string`
> Default value:
> ```yaml
> ""
> ```
Full repository override (takes precedence over `imageRegistry`, `imageNamespace`, and `image.name`).
Example: quay.io/jetstack/disco-agent
#### **image.name** ~ `string`
> Default value:
> ```yaml
> disco-agent
> ```
The image name for the Discovery Agent.
This is used (together with `imageRegistry` and `imageNamespace`) to construct the full image reference.
#### **image.pullPolicy** ~ `string`
> Default value:
> ```yaml
> IfNotPresent
> ```
This sets the pull policy for images.
#### **image.tag** ~ `string`
> Default value:
> ```yaml
> ""
> ```
Override the image tag to deploy by setting this variable. If no value is set, the chart's appVersion is used.
#### **image.digest** ~ `string`
> Default value:
> ```yaml
> ""
> ```
Override the image digest to deploy by setting this variable. If set together with `image.tag`, the rendered image will include both tag and digest.
#### **imagePullSecrets** ~ `array`
> Default value:
> ```yaml
> []
> ```
This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
#### **nameOverride** ~ `string`
> Default value:
> ```yaml
> ""
> ```
This is to override the chart name.
#### **fullnameOverride** ~ `string`
> Default value:
> ```yaml
> ""
> ```
#### **serviceAccount.create** ~ `bool`
> Default value:
> ```yaml
> true
> ```
Specifies whether a service account should be created
#### **serviceAccount.automount** ~ `bool`
> Default value:
> ```yaml
> true
> ```
Automatically mount a ServiceAccount's API credentials?
#### **serviceAccount.annotations** ~ `object`
> Default value:
> ```yaml
> {}
> ```
Annotations to add to the service account
#### **serviceAccount.name** ~ `string`
> Default value:
> ```yaml
> ""
> ```
The name of the service account to use.
If not set and create is true, a name is generated using the fullname template
#### **podAnnotations** ~ `object`
> Default value:
> ```yaml
> {}
> ```
This is for setting Kubernetes Annotations to a Pod. For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
#### **podLabels** ~ `object`
> Default value:
> ```yaml
> {}
> ```
This is for setting Kubernetes Labels to a Pod.
For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
#### **podSecurityContext** ~ `object`
> Default value:
> ```yaml
> {}
> ```
#### **securityContext** ~ `object`
> Default value:
> ```yaml
> allowPrivilegeEscalation: false
> capabilities:
> drop:
> - ALL
> readOnlyRootFilesystem: true
> runAsNonRoot: true
> seccompProfile:
> type: RuntimeDefault
> ```
Add Container specific SecurityContext settings to the container. Takes precedence over `podSecurityContext` when set. See https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container
#### **resources** ~ `object`
> Default value:
> ```yaml
> {}
> ```
#### **volumes** ~ `array`
> Default value:
> ```yaml
> []
> ```
Additional volumes on the output Deployment definition.
#### **volumeMounts** ~ `array`
> Default value:
> ```yaml
> []
> ```
Additional volumeMounts on the output Deployment definition.
#### **nodeSelector** ~ `object`
> Default value:
> ```yaml
> {}
> ```
#### **tolerations** ~ `array`
> Default value:
> ```yaml
> []
> ```
#### **affinity** ~ `object`
> Default value:
> ```yaml
> {}
> ```
#### **http_proxy** ~ `string`
Configures the HTTP_PROXY environment variable where a HTTP proxy is required.
#### **https_proxy** ~ `string`
Configures the HTTPS_PROXY environment variable where a HTTP proxy is required.
#### **no_proxy** ~ `string`
Configures the NO_PROXY environment variable where a HTTP proxy is required, but certain domains should be excluded.
#### **podDisruptionBudget** ~ `object`
> Default value:
> ```yaml
> enabled: false
> ```
Configure a PodDisruptionBudget for the agent's Deployment. If running with multiple replicas, consider setting podDisruptionBudget.enabled to true.
#### **config.period** ~ `string`
> Default value:
> ```yaml
> 12h0m0s
> ```
Push data every 12 hours unless changed.
#### **config.excludeAnnotationKeysRegex** ~ `array`
> Default value:
> ```yaml
> []
> ```
You can configure the agent to exclude some annotations or labels from being pushed . All Kubernetes objects are affected. The objects are still pushed, but the specified annotations and labels are removed before being pushed.
Dots is the only character that needs to be escaped in the regex. Use either double quotes with escaped single quotes or unquoted strings for the regex to avoid YAML parsing issues with `\.`.
Example: excludeAnnotationKeysRegex: ['^kapp\.k14s\.io/original.*']
#### **config.excludeLabelKeysRegex** ~ `array`
> Default value:
> ```yaml
> []
> ```
#### **config.clusterName** ~ `string`
> Default value:
> ```yaml
> ""
> ```
A human readable name for the cluster where the agent is deployed (optional).
This cluster name will be associated with the data that the agent uploads to the Discovery and Context service. If empty (the default), the service account name will be used instead.
#### **config.clusterDescription** ~ `string`
> Default value:
> ```yaml
> ""
> ```
A short description of the cluster where the agent is deployed (optional).
This description will be associated with the data that the agent uploads to the Discovery and Context service. The description may include contact information such as the email address of the cluster administrator, so that any problems and risks identified by the Discovery and Context service can be communicated to the people responsible for the affected secrets.
#### **config.sendSecretValues** ~ `bool`
> Default value:
> ```yaml
> true
> ```
Enable sending of Secret values to CyberArk in addition to metadata. Metadata is always sent, but the actual values of Secrets are not sent by default. When enabled, Secret data is encrypted using envelope encryption using a key managed by CyberArk, fetched from the Discovery and Context service.
#### **authentication.secretName** ~ `string`
> Default value:
> ```yaml
> agent-credentials
> ```
#### **extraArgs** ~ `array`
> Default value:
> ```yaml
> []
> ```
```yaml
extraArgs:
- --logging-format=json
- --log-level=6 # To enable HTTP request logging
```
#### **pprof.enabled** ~ `bool`
> Default value:
> ```yaml
> false
> ```
Enable profiling with the pprof endpoint
#### **metrics.enabled** ~ `bool`
> Default value:
> ```yaml
> true
> ```
Enable the metrics server.
If false, the metrics server will be disabled and the other metrics fields below will be ignored.
#### **metrics.podmonitor.enabled** ~ `bool`
> Default value:
> ```yaml
> false
> ```
Create a PodMonitor to add the metrics to Prometheus, if you are using Prometheus Operator. See https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor
#### **metrics.podmonitor.namespace** ~ `string`
The namespace that the pod monitor should live in.
Defaults to the disco-agent namespace.
#### **metrics.podmonitor.prometheusInstance** ~ `string`
> Default value:
> ```yaml
> default
> ```
Specifies the `prometheus` label on the created PodMonitor. This is used when different Prometheus instances have label selectors matching different PodMonitors.
#### **metrics.podmonitor.interval** ~ `string`
> Default value:
> ```yaml
> 60s
> ```
The interval to scrape metrics.
#### **metrics.podmonitor.scrapeTimeout** ~ `string`
> Default value:
> ```yaml
> 30s
> ```
The timeout before a metrics scrape fails.
#### **metrics.podmonitor.labels** ~ `object`
> Default value:
> ```yaml
> {}
> ```
Additional labels to add to the PodMonitor.
#### **metrics.podmonitor.annotations** ~ `object`
> Default value:
> ```yaml
> {}
> ```
Additional annotations to add to the PodMonitor.
#### **metrics.podmonitor.honorLabels** ~ `bool`
> Default value:
> ```yaml
> false
> ```
Keep labels from scraped data, overriding server-side labels.
#### **metrics.podmonitor.endpointAdditionalProperties** ~ `object`
> Default value:
> ```yaml
> {}
> ```
EndpointAdditionalProperties allows setting additional properties on the endpoint such as relabelings, metricRelabelings etc.
For example:
```yaml
endpointAdditionalProperties:
relabelings:
- action: replace
sourceLabels:
- __meta_kubernetes_pod_node_name
targetLabel: instance
```
<!-- /AUTO-GENERATED -->
================================================
FILE: deploy/charts/disco-agent/templates/NOTES.txt
================================================
CHART NAME: {{ .Chart.Name }}
CHART VERSION: {{ .Chart.Version }}
APP VERSION: {{ .Chart.AppVersion }}
- Check the application is running:
> kubectl get pods -n {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }}
- Check the application logs for successful connection to the platform:
> kubectl logs -n {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }}
{{ if .Values.config.sendSecretValues }}
NB: sendSecretValues is set to "true". Encrypted secret data will be sent to the CyberArk Discovery and Context service
{{ end }}
================================================
FILE: deploy/charts/disco-agent/templates/_helpers.tpl
================================================
{{/*
Expand the name of the chart.
*/}}
{{- define "disco-agent.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "disco-agent.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "disco-agent.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "disco-agent.labels" -}}
helm.sh/chart: {{ include "disco-agent.chart" . }}
{{ include "disco-agent.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "disco-agent.selectorLabels" -}}
app.kubernetes.io/name: {{ include "disco-agent.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "disco-agent.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "disco-agent.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
Util function for generating an image reference based on the provided options.
This function is derived from similar functions used in the cert-manager GitHub organization
*/}}
{{- define "disco-agent.image" -}}
{{- /*
Calling convention:
- (tuple <imageValues> <imageRegistry> <imageNamespace> <defaultReference>)
We intentionally pass imageRegistry/imageNamespace as explicit arguments rather than reading
from `.Values` inside this helper, because `helm-tool lint` does not reliably track `.Values.*`
usage through tuple/variable indirection.
*/ -}}
{{- if ne (len .) 4 -}}
{{- fail (printf "ERROR: template \"disco-agent.image\" expects (tuple <imageValues> <imageRegistry> <imageNamespace> <defaultReference>), got %d arguments" (len .)) -}}
{{- end -}}
{{- $image := index . 0 -}}
{{- $imageRegistry := index . 1 | default "" -}}
{{- $imageNamespace := index . 2 | default "" -}}
{{- $defaultReference := index . 3 -}}
{{- $repository := "" -}}
{{- if $image.repository -}}
{{- $repository = $image.repository -}}
{{- /*
Backwards compatibility: if image.registry is set, additionally prefix the repository with this registry.
*/ -}}
{{- if $image.registry -}}
{{- $repository = printf "%s/%s" $image.registry $repository -}}
{{- end -}}
{{- else -}}
{{- $name := required "ERROR: image.name must be set when image.repository is empty" $image.name -}}
{{- $repository = $name -}}
{{- if $imageNamespace -}}
{{- $repository = printf "%s/%s" $imageNamespace $repository -}}
{{- end -}}
{{- if $imageRegistry -}}
{{- $repository = printf "%s/%s" $imageRegistry $repository -}}
{{- end -}}
{{- /*
Backwards compatibility: if image.registry is set, additionally prefix the repository with this registry.
*/ -}}
{{- if $image.registry -}}
{{- $repository = printf "%s/%s" $image.registry $repository -}}
{{- end -}}
{{- end -}}
{{- $repository -}}
{{- if and $image.tag $image.digest -}}
{{- printf ":%s@%s" $image.tag $image.digest -}}
{{- else if $image.tag -}}
{{- printf ":%s" $image.tag -}}
{{- else if $image.digest -}}
{{- printf "@%s" $image.digest -}}
{{- else -}}
{{- printf "%s" $defaultReference -}}
{{- end -}}
{{- end }}
================================================
FILE: deploy/charts/disco-agent/templates/configmap.yaml
================================================
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "disco-agent.fullname" . }}-config
namespace: {{ .Release.Namespace }}
labels:
{{- include "disco-agent.labels" . | nindent 4 }}
data:
config.yaml: |-
cluster_name: {{ .Values.config.clusterName | quote }}
cluster_description: {{ .Values.config.clusterDescription | quote }}
period: {{ .Values.config.period | quote }}
{{- with .Values.config.excludeAnnotationKeysRegex }}
exclude-annotation-keys-regex:
{{- . | toYaml | nindent 6 }}
{{- end }}
{{- with .Values.config.excludeLabelKeysRegex }}
exclude-label-keys-regex:
{{- . | toYaml | nindent 6 }}
{{- end }}
data-gatherers:
- kind: oidc
name: ark/oidc
- kind: k8s-discovery
name: ark/discovery
- kind: k8s-dynamic
name: ark/secrets
config:
resource-type:
version: v1
resource: secrets
field-selectors:
- type!=kubernetes.io/dockercfg
- type!=kubernetes.io/dockerconfigjson
- type!=bootstrap.kubernetes.io/token
- type!=helm.sh/release.v1
- kind: k8s-dynamic
name: ark/serviceaccounts
config:
resource-type:
resource: serviceaccounts
version: v1
- kind: k8s-dynamic
name: ark/roles
config:
resource-type:
version: v1
group: rbac.authorization.k8s.io
resource: roles
- kind: k8s-dynamic
name: ark/clusterroles
config:
resource-type:
version: v1
group: rbac.authorization.k8s.io
resource: clusterroles
- kind: k8s-dynamic
name: ark/rolebindings
config:
resource-type:
version: v1
group: rbac.authorization.k8s.io
resource: rolebindings
- kind: k8s-dynamic
name: ark/clusterrolebindings
config:
resource-type:
version: v1
group: rbac.authorization.k8s.io
resource: clusterrolebindings
- kind: k8s-dynamic
name: ark/jobs
config:
resource-type:
version: v1
group: batch
resource: jobs
- kind: k8s-dynamic
name: ark/cronjobs
config:
resource-type:
version: v1
group: batch
resource: cronjobs
- kind: k8s-dynamic
name: ark/deployments
config:
resource-type:
version: v1
group: apps
resource: deployments
- kind: k8s-dynamic
name: ark/statefulsets
config:
resource-type:
version: v1
group: apps
resource: statefulsets
- kind: k8s-dynamic
name: ark/daemonsets
config:
resource-type:
version: v1
group: apps
resource: daemonsets
- kind: k8s-dynamic
name: ark/pods
config:
resource-type:
version: v1
resource: pods
- kind: k8s-dynamic
name: ark/configmaps
config:
resource-type:
resource: configmaps
version: v1
label-selectors:
- conjur.org/name=conjur-connect-configmap
- kind: k8s-dynamic
name: ark/esoexternalsecrets
config:
resource-type:
group: external-secrets.io
version: v1
resource: externalsecrets
- kind: k8s-dynamic
name: ark/esosecretstores
config:
resource-type:
group: external-secrets.io
version: v1
resource: secretstores
- kind: k8s-dynamic
name: ark/esoclusterexternalsecrets
config:
resource-type:
group: external-secrets.io
version: v1
resource: clusterexternalsecrets
- kind: k8s-dynamic
name: ark/esoclustersecretstores
config:
resource-type:
group: external-secrets.io
version: v1
resource: clustersecretstores
================================================
FILE: deploy/charts/disco-agent/templates/deployment.yaml
================================================
{{- if not .Values.acceptTerms }}
{{- fail "\n\n=================================================================\n Terms & Conditions Notice\n=================================================================\n\nBefore installing this application, you must review and accept\nthe terms and conditions available at:\nhttps://www.cyberark.com/contract-terms/\n\nTo proceed with installation, you must indicate acceptance by\nsetting:\n\n - In your values file: acceptTerms: true\n or\n - Via the Helm flag: --set acceptTerms=true\n\nBy continuing with the next command, you confirm that you have\nreviewed and accepted these terms and conditions.\n\n=================================================================\n" }}
{{- end }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "disco-agent.fullname" . }}
labels:
{{- include "disco-agent.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "disco-agent.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "disco-agent.labels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "disco-agent.serviceAccountName" . }}
{{- with .Values.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: agent
{{- with .Values.securityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
image: "{{ template "disco-agent.image" (tuple .Values.image .Values.imageRegistry .Values.imageNamespace (printf ":%s" .Chart.AppVersion)) }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_UID
valueFrom:
fieldRef:
fieldPath: metadata.uid
- name: POD_NODE
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: ARK_USERNAME
valueFrom:
secretKeyRef:
name: {{ .Values.authentication.secretName }}
key: ARK_USERNAME
- name: ARK_SECRET
valueFrom:
secretKeyRef:
name: {{ .Values.authentication.secretName }}
key: ARK_SECRET
- name: ARK_SUBDOMAIN
valueFrom:
secretKeyRef:
name: {{ .Values.authentication.secretName }}
key: ARK_SUBDOMAIN
- name: ARK_DISCOVERY_API
valueFrom:
secretKeyRef:
name: {{ .Values.authentication.secretName }}
key: ARK_DISCOVERY_API
optional: true
- name: ARK_SEND_SECRET_VALUES
value: {{ .Values.config.sendSecretValues | default "false" | quote }}
{{- with .Values.http_proxy }}
- name: HTTP_PROXY
value: {{ . }}
{{- end }}
{{- with .Values.https_proxy }}
- name: HTTPS_PROXY
value: {{ . }}
{{- end }}
{{- with .Values.no_proxy }}
- name: NO_PROXY
value: {{ . }}
{{- end }}
args:
- "agent"
- "-c"
- "/etc/disco-agent/config.yaml"
- --machine-hub
- --logging-format=json
{{- if .Values.metrics.enabled }}
- --enable-metrics
{{- end }}
{{- if .Values.pprof.enabled }}
- --enable-pprof
{{- end }}
{{- range .Values.extraArgs }}
- {{ . | quote }}
{{- end }}
{{- with .Values.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
volumeMounts:
- name: config
mountPath: "/etc/disco-agent"
readOnly: true
{{- with .Values.volumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
ports:
- name: agent-api
containerPort: 8081
volumes:
- name: config
configMap:
name: {{ include "disco-agent.fullname" . }}-config
optional: false
{{- with .Values.volumes }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
================================================
FILE: deploy/charts/disco-agent/templates/poddisruptionbudget.yaml
================================================
{{- if .Values.podDisruptionBudget.enabled }}
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: {{ include "disco-agent.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "disco-agent.labels" . | nindent 4 }}
spec:
selector:
matchLabels:
{{- include "disco-agent.selectorLabels" . | nindent 6 }}
{{- if not (or (hasKey .Values.podDisruptionBudget "minAvailable") (hasKey .Values.podDisruptionBudget "maxUnavailable")) }}
minAvailable: 1 # Default value because minAvailable and maxUnavailable are not set
{{- end }}
{{- if hasKey .Values.podDisruptionBudget "minAvailable" }}
minAvailable: {{ .Values.podDisruptionBudget.minAvailable }}
{{- end }}
{{- if hasKey .Values.podDisruptionBudget "maxUnavailable" }}
maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }}
{{- end }}
{{- end }}
================================================
FILE: deploy/charts/disco-agent/templates/podmonitor.yaml
================================================
{{- if and .Values.metrics.enabled .Values.metrics.podmonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: {{ include "disco-agent.fullname" . }}
{{- if .Values.metrics.podmonitor.namespace }}
namespace: {{ .Values.metrics.podmonitor.namespace }}
{{- else }}
namespace: {{ .Release.Namespace | quote }}
{{- end }}
labels:
{{- include "disco-agent.labels" . | nindent 4 }}
prometheus: {{ .Values.metrics.podmonitor.prometheusInstance }}
{{- with .Values.metrics.podmonitor.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.metrics.podmonitor.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
jobLabel: {{ include "disco-agent.fullname" . }}
selector:
matchLabels:
{{- include "disco-agent.selectorLabels" . | nindent 6 }}
{{- if .Values.metrics.podmonitor.namespace }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace | quote }}
{{- end }}
podMetricsEndpoints:
- port: agent-api
path: /metrics
interval: {{ .Values.metrics.podmonitor.interval }}
scrapeTimeout: {{ .Values.metrics.podmonitor.scrapeTimeout }}
honorLabels: {{ .Values.metrics.podmonitor.honorLabels }}
{{- with .Values.metrics.podmonitor.endpointAdditionalProperties }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
================================================
FILE: deploy/charts/disco-agent/templates/rbac.yaml
================================================
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "disco-agent.fullname" . }}-event-emitted
labels:
{{- include "disco-agent.labels" . | nindent 4 }}
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["create"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "disco-agent.fullname" . }}-event-emitted
labels:
{{- include "disco-agent.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "disco-agent.fullname" . }}-event-emitted
subjects:
- kind: ServiceAccount
name: {{ include "disco-agent.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "disco-agent.fullname" . }}-cluster-viewer
labels:
{{- include "disco-agent.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: view
subjects:
- kind: ServiceAccount
name: {{ include "disco-agent.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "disco-agent.fullname" . }}-secret-reader
labels:
{{- include "disco-agent.labels" . | nindent 4 }}
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "disco-agent.fullname" . }}-secret-reader
labels:
{{- include "disco-agent.labels" . | nindent 4 }}
roleRef:
kind: ClusterRole
name: {{ include "disco-agent.fullname" . }}-secret-reader
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: {{ include "disco-agent.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "disco-agent.fullname" . }}-rbac-reader
labels:
{{- include "disco-agent.labels" . | nindent 4 }}
rules:
- apiGroups: ["rbac.authorization.k8s.io"]
resources:
- roles
- clusterroles
- rolebindings
- clusterrolebindings
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "disco-agent.fullname" . }}-rbac-reader
labels:
{{- include "disco-agent.labels" . | nindent 4 }}
roleRef:
kind: ClusterRole
name: {{ include "disco-agent.fullname" . }}-rbac-reader
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: {{ include "disco-agent.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "disco-agent.fullname" . }}-oidc-discovery
labels:
{{- include "disco-agent.labels" . | nindent 4 }}
roleRef:
kind: ClusterRole
name: system:service-account-issuer-discovery
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: {{ include "disco-agent.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "disco-agent.fullname" . }}-eso-reader
labels:
{{- include "disco-agent.labels" . | nindent 4 }}
rules:
- apiGroups: ["external-secrets.io"]
resources:
- externalsecrets
- clusterexternalsecrets
- secretstores
- clustersecretstores
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "disco-agent.fullname" . }}-eso-reader
labels:
{{- include "disco-agent.labels" . | nindent 4 }}
roleRef:
kind: ClusterRole
name: {{ include "disco-agent.fullname" . }}-eso-reader
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: {{ include "disco-agent.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
================================================
FILE: deploy/charts/disco-agent/templates/serviceaccount.yaml
================================================
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "disco-agent.serviceAccountName" . }}
labels:
{{- include "disco-agent.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
{{- end }}
================================================
FILE: deploy/charts/disco-agent/tests/README.md
================================================
# `helm unittest`
We use `helm unittest` to test the YAML output coming out of the Helm chart.
In order to update the snapshots, run the following command:
```bash
make test-helm-snapshot
```
================================================
FILE: deploy/charts/disco-agent/tests/__snapshot__/configmap_test.yaml.snap
================================================
custom-cluster-description:
1: |
apiVersion: v1
data:
config.yaml: |-
cluster_name: ""
cluster_description: "A cloud hosted Kubernetes cluster hosting production workloads.\n\nteam: team-1\nemail: team-1@example.com\npurpose: Production workloads\n"
period: "12h0m0s"
data-gatherers:
- kind: oidc
name: ark/oidc
- kind: k8s-discovery
name: ark/discovery
- kind: k8s-dynamic
name: ark/secrets
config:
resource-type:
version: v1
resource: secrets
field-selectors:
- type!=kubernetes.io/dockercfg
- type!=kubernetes.io/dockerconfigjson
- type!=bootstrap.kubernetes.io/token
- type!=helm.sh/release.v1
- kind: k8s-dynamic
name: ark/serviceaccounts
config:
resource-type:
resource: serviceaccounts
version: v1
- kind: k8s-dynamic
name: ark/roles
config:
resource-type:
version: v1
group: rbac.authorization.k8s.io
resource: roles
- kind: k8s-dynamic
name: ark/clusterroles
config:
resource-type:
version: v1
group: rbac.authorization.k8s.io
resource: clusterroles
- kind: k8s-dynamic
name: ark/rolebindings
config:
resource-type:
version: v1
group: rbac.authorization.k8s.io
resource: rolebindings
- kind: k8s-dynamic
name: ark/clusterrolebindings
config:
resource-type:
version: v1
group: rbac.authorization.k8s.io
resource: clusterrolebindings
- kind: k8s-dynamic
name: ark/jobs
config:
resource-type:
version: v1
group: batch
resource: jobs
- kind: k8s-dynamic
name: ark/cronjobs
config:
resource-type:
version: v1
group: batch
resource: cronjobs
- kind: k8s-dynamic
name: ark/deployments
config:
resource-type:
version: v1
group: apps
resource: deployments
- kind: k8s-dynamic
name: ark/statefulsets
config:
resource-type:
version: v1
group: apps
resource: statefulsets
- kind: k8s-dynamic
name: ark/daemonsets
config:
resource-type:
version: v1
group: apps
resource: daemonsets
- kind: k8s-dynamic
name: ark/pods
config:
resource-type:
version: v1
resource: pods
- kind: k8s-dynamic
name: ark/configmaps
config:
resource-type:
resource: configmaps
version: v1
label-selectors:
- conjur.org/name=conjur-connect-configmap
- kind: k8s-dynamic
name: ark/esoexternalsecrets
config:
resource-type:
group: external-secrets.io
version: v1
resource: externalsecrets
- kind: k8s-dynamic
name: ark/esosecretstores
config:
resource-type:
group: external-secrets.io
version: v1
resource: secretstores
- kind: k8s-dynamic
name: ark/esoclusterexternalsecrets
config:
resource-type:
group: external-secrets.io
version: v1
resource: clusterexternalsecrets
- kind: k8s-dynamic
name: ark/esoclustersecretstores
config:
resource-type:
group: external-secrets.io
version: v1
resource: clustersecretstores
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/instance: test
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: disco-agent
app.kubernetes.io/version: v0.0.0
helm.sh/chart: disco-agent-0.0.0
name: test-disco-agent-config
namespace: test-ns
custom-cluster-name:
1: |
apiVersion: v1
data:
config.yaml: |-
cluster_name: "cluster-1 region-1 cloud-1 "
cluster_description: ""
period: "12h0m0s"
data-gatherers:
- kind: oidc
name: ark/oidc
- kind: k8s-discovery
name: ark/discovery
- kind: k8s-dynamic
name: ark/secrets
config:
resource-type:
version: v1
resource: secrets
field-selectors:
- type!=kubernetes.io/dockercfg
- type!=kubernetes.io/dockerconfigjson
- type!=bootstrap.kubernetes.io/token
- type!=helm.sh/release.v1
- kind: k8s-dynamic
name: ark/serviceaccounts
config:
resource-type:
resource: serviceaccounts
version: v1
- kind: k8s-dynamic
name: ark/roles
config:
resource-type:
version: v1
group: rbac.authorization.k8s.io
resource: roles
- kind: k8s-dynamic
name: ark/clusterroles
config:
resource-type:
version: v1
group: rbac.authorization.k8s.io
resource: clusterroles
- kind: k8s-dynamic
name: ark/rolebindings
config:
resource-type:
version: v1
group: rbac.authorization.k8s.io
resource: rolebindings
- kind: k8s-dynamic
name: ark/clusterrolebindings
config:
resource-type:
version: v1
group: rbac.authorization.k8s.io
resource: clusterrolebindings
- kind: k8s-dynamic
name: ark/jobs
config:
resource-type:
version: v1
group: batch
resource: jobs
- kind: k8s-dynamic
name: ark/cronjobs
config:
resource-type:
version: v1
group: batch
resource: cronjobs
- kind: k8s-dynamic
name: ark/deployments
config:
resource-type:
version: v1
group: apps
resource: deployments
- kind: k8s-dynamic
name: ark/statefulsets
config:
resource-type:
version: v1
group: apps
resource: statefulsets
- kind: k8s-dynamic
name: ark/daemonsets
config:
resource-type:
version: v1
group: apps
resource: daemonsets
- kind: k8s-dynamic
name: ark/pods
config:
resource-type:
version: v1
resource: pods
- kind: k8s-dynamic
name: ark/configmaps
config:
resource-type:
resource: configmaps
version: v1
label-selectors:
- conjur.org/name=conjur-connect-configmap
- kind: k8s-dynamic
name: ark/esoexternalsecrets
config:
resource-type:
group: external-secrets.io
version: v1
resource: externalsecrets
- kind: k8s-dynamic
name: ark/esosecretstores
config:
resource-type:
group: external-secrets.io
version: v1
resource: secretstores
- kind: k8s-dynamic
name: ark/esoclusterexternalsecrets
config:
resource-type:
group: external-secrets.io
version: v1
resource: clusterexternalsecrets
- kind: k8s-dynamic
name: ark/esoclustersecretstores
config:
resource-type:
group: external-secrets.io
version: v1
resource: clustersecretstores
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/instance: test
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: disco-agent
app.kubernetes.io/version: v0.0.0
helm.sh/chart: disco-agent-0.0.0
name: test-disco-agent-config
namespace: test-ns
custom-period:
1: |
apiVersion: v1
data:
config.yaml: |-
cluster_name: ""
cluster_description: ""
period: "1m"
data-gatherers:
- kind: oidc
name: ark/oidc
- kind: k8s-discovery
name: ark/discovery
- kind: k8s-dynamic
name: ark/secrets
config:
resource-type:
version: v1
resource: secrets
field-selectors:
- type!=kubernetes.io/dockercfg
- type!=kubernetes.io/dockerconfigjson
- type!=bootstrap.kubernetes.io/token
- type!=helm.sh/release.v1
- kind: k8s-dynamic
name: ark/serviceaccounts
config:
resource-type:
resource: serviceaccounts
version: v1
- kind: k8s-dynamic
name: ark/roles
config:
resource-type:
version: v1
group: rbac.authorization.k8s.io
resource: roles
- kind: k8s-dynamic
name: ark/clusterroles
config:
resource-type:
version: v1
group: rbac.authorization.k8s.io
resource: clusterroles
- kind: k8s-dynamic
name: ark/rolebindings
config:
resource-type:
version: v1
group: rbac.authorization.k8s.io
resource: rolebindings
- kind: k8s-dynamic
name: ark/clusterrolebindings
config:
resource-type:
version: v1
group: rbac.authorization.k8s.io
resource: clusterrolebindings
- kind: k8s-dynamic
name: ark/jobs
config:
resource-type:
version: v1
group: batch
resource: jobs
- kind: k8s-dynamic
name: ark/cronjobs
config:
resource-type:
version: v1
group: batch
resource: cronjobs
- kind: k8s-dynamic
name: ark/deployments
config:
resource-type:
version: v1
group: apps
resource: deployments
- kind: k8s-dynamic
name: ark/statefulsets
config:
resource-type:
version: v1
group: apps
resource: statefulsets
- kind: k8s-dynamic
name: ark/daemonsets
config:
resource-type:
version: v1
group: apps
resource: daemonsets
- kind: k8s-dynamic
name: ark/pods
config:
resource-type:
version: v1
resource: pods
- kind: k8s-dynamic
name: ark/configmaps
config:
resource-type:
resource: configmaps
version: v1
label-selectors:
- conjur.org/name=conjur-connect-configmap
- kind: k8s-dynamic
name: ark/esoexternalsecrets
config:
resource-type:
group: external-secrets.io
version: v1
resource: externalsecrets
- kind: k8s-dynamic
name: ark/esosecretstores
config:
resource-type:
group: external-secrets.io
version: v1
resource: secretstores
- kind: k8s-dynamic
name: ark/esoclusterexternalsecrets
config:
resource-type:
group: external-secrets.io
version: v1
resource: clusterexternalsecrets
- kind: k8s-dynamic
name: ark/esoclustersecretstores
config:
resource-type:
group: external-secrets.io
version: v1
resource: clustersecretstores
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/instance: test
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: disco-agent
app.kubernetes.io/version: v0.0.0
helm.sh/chart: disco-agent-0.0.0
name: test-disco-agent-config
namespace: test-ns
defaults:
1: |
apiVersion: v1
data:
config.yaml: |-
cluster_name: ""
cluster_description: ""
period: "12h0m0s"
data-gatherers:
- kind: oidc
name: ark/oidc
- kind: k8s-discovery
name: ark/discovery
- kind: k8s-dynamic
name: ark/secrets
config:
resource-type:
version: v1
resource: secrets
field-selectors:
- type!=kubernetes.io/dockercfg
- type!=kubernetes.io/dockerconfigjson
- type!=bootstrap.kubernetes.io/token
- type!=helm.sh/release.v1
- kind: k8s-dynamic
name: ark/serviceaccounts
config:
resource-type:
resource: serviceaccounts
version: v1
- kind: k8s-dynamic
name: ark/roles
config:
resource-type:
version: v1
group: rbac.authorization.k8s.io
resource: roles
- kind: k8s-dynamic
name: ark/clusterroles
config:
resource-type:
version: v1
group: rbac.authorization.k8s.io
resource: clusterroles
- kind: k8s-dynamic
name: ark/rolebindings
config:
resource-type:
version: v1
group: rbac.authorization.k8s.io
resource: rolebindings
- kind: k8s-dynamic
name: ark/clusterrolebindings
config:
resource-type:
version: v1
group: rbac.authorization.k8s.io
resource: clusterrolebindings
- kind: k8s-dynamic
name: ark/jobs
config:
resource-type:
version: v1
group: batch
resource: jobs
- kind: k8s-dynamic
name: ark/cronjobs
config:
resource-type:
version: v1
group: batch
resource: cronjobs
- kind: k8s-dynamic
name: ark/deployments
config:
resource-type:
version: v1
group: apps
resource: deployments
- kind: k8s-dynamic
name: ark/statefulsets
config:
resource-type:
version: v1
group: apps
resource: statefulsets
- kind: k8s-dynamic
name: ark/daemonsets
config:
resource-type:
version: v1
group: apps
resource: daemonsets
- kind: k8s-dynamic
name: ark/pods
config:
resource-type:
version: v1
resource: pods
- kind: k8s-dynamic
name: ark/configmaps
config:
resource-type:
resource: configmaps
version: v1
label-selectors:
- conjur.org/name=conjur-connect-configmap
- kind: k8s-dynamic
name: ark/esoexternalsecrets
config:
resource-type:
group: external-secrets.io
version: v1
resource: externalsecrets
- kind: k8s-dynamic
name: ark/esosecretstores
config:
resource-type:
group: external-secrets.io
version: v1
resource: secretstores
- kind: k8s-dynamic
name: ark/esoclusterexternalsecrets
config:
resource-type:
group: external-secrets.io
version: v1
resource: clusterexternalsecrets
- kind: k8s-dynamic
name: ark/esoclustersecretstores
config:
resource-type:
group: external-secrets.io
version: v1
resource: clustersecretstores
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/instance: test
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: disco-agent
app.kubernetes.io/version: v0.0.0
helm.sh/chart: disco-agent-0.0.0
name: test-disco-agent-config
namespace: test-ns
================================================
FILE: deploy/charts/disco-agent/tests/configmap_test.yaml
================================================
suite: test the contents of the config.yaml
templates:
- configmap.yaml
release:
name: test
namespace: test-ns
tests:
- it: defaults
asserts:
- matchSnapshot: {}
- it: custom-period
set:
config.period: 1m
asserts:
- matchSnapshot: {}
- it: custom-cluster-name
set:
config.clusterName: "cluster-1 region-1 cloud-1 "
asserts:
- matchSnapshot: {}
- it: custom-cluster-description
set:
config.clusterDescription: |
A cloud hosted Kubernetes cluster hosting production workloads.
team: team-1
email: team-1@example.com
purpose: Production workloads
asserts:
- matchSnapshot: {}
================================================
FILE: deploy/charts/disco-agent/values.linter.exceptions
================================================
================================================
FILE: deploy/charts/disco-agent/values.schema.json
================================================
{
"$defs": {
"helm-values": {
"additionalProperties": false,
"properties": {
"acceptTerms": {
"$ref": "#/$defs/helm-values.acceptTerms"
},
"affinity": {
"$ref": "#/$defs/helm-values.affinity"
},
"authentication": {
"$ref": "#/$defs/helm-values.authentication"
},
"config": {
"$ref": "#/$defs/helm-values.config"
},
"extraArgs": {
"$ref": "#/$defs/helm-values.extraArgs"
},
"fullnameOverride": {
"$ref": "#/$defs/helm-values.fullnameOverride"
},
"global": {
"$ref": "#/$defs/helm-values.global"
},
"http_proxy": {
"$ref": "#/$defs/helm-values.http_proxy"
},
"https_proxy": {
"$ref": "#/$defs/helm-values.https_proxy"
},
"image": {
"$ref": "#/$defs/helm-values.image"
},
"imageNamespace": {
"$ref": "#/$defs/helm-values.imageNamespace"
},
"imagePullSecrets": {
"$ref": "#/$defs/helm-values.imagePullSecrets"
},
"imageRegistry": {
"$ref": "#/$defs/helm-values.imageRegistry"
},
"metrics": {
"$ref": "#/$defs/helm-values.metrics"
},
"nameOverride": {
"$ref": "#/$defs/helm-values.nameOverride"
},
"no_proxy": {
"$ref": "#/$defs/helm-values.no_proxy"
},
"nodeSelector": {
"$ref": "#/$defs/helm-values.nodeSelector"
},
"podAnnotations": {
"$ref": "#/$defs/helm-values.podAnnotations"
},
"podDisruptionBudget": {
"$ref": "#/$defs/helm-values.podDisruptionBudget"
},
"podLabels": {
"$ref": "#/$defs/helm-values.podLabels"
},
"podSecurityContext": {
"$ref": "#/$defs/helm-values.podSecurityContext"
},
"pprof": {
"$ref": "#/$defs/helm-values.pprof"
},
"replicaCount": {
"$ref": "#/$defs/helm-values.replicaCount"
},
"resources": {
"$ref": "#/$defs/helm-values.resources"
},
"securityContext": {
"$ref": "#/$defs/helm-values.securityContext"
},
"serviceAccount": {
"$ref": "#/$defs/helm-values.serviceAccount"
},
"tolerations": {
"$ref": "#/$defs/helm-values.tolerations"
},
"volumeMounts": {
"$ref": "#/$defs/helm-values.volumeMounts"
},
"volumes": {
"$ref": "#/$defs/helm-values.volumes"
}
},
"type": "object"
},
"helm-values.acceptTerms": {
"default": false,
"description": "Must be set to indicate that you have read and accepted the CyberArk Terms of Service. If false, the helm chart will fail to install and will print a message with instructions on how to accept the TOS.",
"type": "boolean"
},
"helm-values.affinity": {
"default": {},
"type": "object"
},
"helm-values.authentication": {
"additionalProperties": false,
"properties": {
"secretName": {
"$ref": "#/$defs/helm-values.authentication.secretName"
}
},
"type": "object"
},
"helm-values.authentication.secretName": {
"default": "agent-credentials",
"type": "string"
},
"helm-values.config": {
"additionalProperties": false,
"properties": {
"clusterDescription": {
"$ref": "#/$defs/helm-values.config.clusterDescription"
},
"clusterName": {
"$ref": "#/$defs/helm-values.config.clusterName"
},
"excludeAnnotationKeysRegex": {
"$ref": "#/$defs/helm-values.config.excludeAnnotationKeysRegex"
},
"excludeLabelKeysRegex": {
"$ref": "#/$defs/helm-values.config.excludeLabelKeysRegex"
},
"period": {
"$ref": "#/$defs/helm-values.config.period"
},
"sendSecretValues": {
"$ref": "#/$defs/helm-values.config.sendSecretValues"
}
},
"type": "object"
},
"helm-values.config.clusterDescription": {
"default": "",
"description": "A short description of the cluster where the agent is deployed (optional).\n\nThis description will be associated with the data that the agent uploads to the Discovery and Context service. The description may include contact information such as the email address of the cluster administrator, so that any problems and risks identified by the Discovery and Context service can be communicated to the people responsible for the affected secrets.",
"type": "string"
},
"helm-values.config.clusterName": {
"default": "",
"description": "A human readable name for the cluster where the agent is deployed (optional).\n\nThis cluster name will be associated with the data that the agent uploads to the Discovery and Context service. If empty (the default), the service account name will be used instead.",
"type": "string"
},
"helm-values.config.excludeAnnotationKeysRegex": {
"default": [],
"description": "You can configure the agent to exclude some annotations or labels from being pushed . All Kubernetes objects are affected. The objects are still pushed, but the specified annotations and labels are removed before being pushed.\n\nDots is the only character that needs to be escaped in the regex. Use either double quotes with escaped single quotes or unquoted strings for the regex to avoid YAML parsing issues with `\\.`.\n\nExample: excludeAnnotationKeysRegex: ['^kapp\\.k14s\\.io/original.*']",
"items": {},
"type": "array"
},
"helm-values.config.excludeLabelKeysRegex": {
"default": [],
"items": {},
"type": "array"
},
"helm-values.config.period": {
"default": "12h0m0s",
"description": "Push data every 12 hours unless changed.",
"type": "string"
},
"helm-values.config.sendSecretValues": {
"default": true,
"description": "Enable sending of Secret values to CyberArk in addition to metadata. Metadata is always sent, but the actual values of Secrets are not sent by default. When enabled, Secret data is encrypted using envelope encryption using a key managed by CyberArk, fetched from the Discovery and Context service.",
"type": "boolean"
},
"helm-values.extraArgs": {
"default": [],
"description": "extraArgs:\n- --logging-format=json\n- --log-level=6 # To enable HTTP request logging",
"items": {},
"type": "array"
},
"helm-values.fullnameOverride": {
"default": "",
"type": "string"
},
"helm-values.global": {
"description": "Global values shared across all (sub)charts"
},
"helm-values.http_proxy": {
"description": "Configures the HTTP_PROXY environment variable where a HTTP proxy is required.",
"type": "string"
},
"helm-values.https_proxy": {
"description": "Configures the HTTPS_PROXY environment variable where a HTTP proxy is required.",
"type": "string"
},
"helm-values.image": {
"additionalProperties": false,
"properties": {
"digest": {
"$ref": "#/$defs/helm-values.image.digest"
},
"name": {
"$ref": "#/$defs/helm-values.image.name"
},
"pullPolicy": {
"$ref": "#/$defs/helm-values.image.pullPolicy"
},
"registry": {
"$ref": "#/$defs/helm-values.image.registry"
},
"repository": {
"$ref": "#/$defs/helm-values.image.repository"
},
"tag": {
"$ref": "#/$defs/helm-values.image.tag"
}
},
"type": "object"
},
"helm-values.image.digest": {
"default": "",
"description": "Override the image digest to deploy by setting this variable. If set together with `image.tag`, the rendered image will include both tag and digest.",
"type": "string"
},
"helm-values.image.name": {
"default": "disco-agent",
"description": "The image name for the Discovery Agent.\nThis is used (together with `imageRegistry` and `imageNamespace`) to construct the full image reference.",
"type": "string"
},
"helm-values.image.pullPolicy": {
"default": "IfNotPresent",
"description": "This sets the pull policy for images.",
"type": "string"
},
"helm-values.image.registry": {
"description": "Deprecated: per-component registry prefix.\n\nIf set, this value is *prepended* to the image repository that the chart would otherwise render. This applies both when `image.repository` is set and when the repository is computed from\n`imageRegistry` + `imageNamespace` + `image.name`.\n\nThis can produce \"double registry\" style references such as\n`legacy.example.io/quay.io/jetstack/...`. Prefer using the global\n`imageRegistry`/`imageNamespace` values.",
"type": "string"
},
"helm-values.image.repository": {
"default": "",
"description": "Full repository override (takes precedence over `imageRegistry`, `imageNamespace`, and `image.name`).\nExample: quay.io/jetstack/disco-agent",
"type": "string"
},
"helm-values.image.tag": {
"default": "",
"description": "Override the image tag to deploy by setting this variable. If no value is set, the chart's appVersion is used.",
"type": "string"
},
"helm-values.imageNamespace": {
"default": "jetstack",
"description": "The repository namespace used for disco-agent images by default.\nExamples:\n- jetstack\n- custom-namespace",
"type": "string"
},
"helm-values.imagePullSecrets": {
"default": [],
"description": "This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/",
"items": {},
"type": "array"
},
"helm-values.imageRegistry": {
"default": "quay.io",
"description": "The container registry used for disco-agent images by default. This can include path prefixes (e.g. \"artifactory.example.com/docker\").",
"type": "string"
},
"helm-values.metrics": {
"additionalProperties": false,
"properties": {
"enabled": {
"$ref": "#/$defs/helm-values.metrics.enabled"
},
"podmonitor": {
"$ref": "#/$defs/helm-values.metrics.podmonitor"
}
},
"type": "object"
},
"helm-values.metrics.enabled": {
"default": true,
"description": "Enable the metrics server.\nIf false, the metrics server will be disabled and the other metrics fields below will be ignored.",
"type": "boolean"
},
"helm-values.metrics.podmonitor": {
"additionalProperties": false,
"properties": {
"annotations": {
"$ref": "#/$defs/helm-values.metrics.podmonitor.annotations"
},
"enabled": {
"$ref": "#/$defs/helm-values.metrics.podmonitor.enabled"
},
"endpointAdditionalProperties": {
"$ref": "#/$defs/helm-values.metrics.podmonitor.endpointAdditionalProperties"
},
"honorLabels": {
"$ref": "#/$defs/helm-values.metrics.podmonitor.honorLabels"
},
"interval": {
"$ref": "#/$defs/helm-values.metrics.podmonitor.interval"
},
"labels": {
"$ref": "#/$defs/helm-values.metrics.podmonitor.labels"
},
"namespace": {
"$ref": "#/$defs/helm-values.metrics.podmonitor.namespace"
},
"prometheusInstance": {
"$ref": "#/$defs/helm-values.metrics.podmonitor.prometheusInstance"
},
"scrapeTimeout": {
"$ref": "#/$defs/helm-values.metrics.podmonitor.scrapeTimeout"
}
},
"type": "object"
},
"helm-values.metrics.podmonitor.annotations": {
"default": {},
"description": "Additional annotations to add to the PodMonitor.",
"type": "object"
},
"helm-values.metrics.podmonitor.enabled": {
"default": false,
"description": "Create a PodMonitor to add the metrics to Prometheus, if you are using Prometheus Operator. See https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor",
"type": "boolean"
},
"helm-values.metrics.podmonitor.endpointAdditionalProperties": {
"default": {},
"description": "EndpointAdditionalProperties allows setting additional properties on the endpoint such as relabelings, metricRelabelings etc.\n\nFor example:\nendpointAdditionalProperties:\n relabelings:\n - action: replace\n sourceLabels:\n - __meta_kubernetes_pod_node_name\n targetLabel: instance",
"type": "object"
},
"helm-values.metrics.podmonitor.honorLabels": {
"default": false,
"description": "Keep labels from scraped data, overriding server-side labels.",
"type": "boolean"
},
"helm-values.metrics.podmonitor.interval": {
"default": "60s",
"description": "The interval to scrape metrics.",
"type": "string"
},
"helm-values.metrics.podmonitor.labels": {
"default": {},
"description": "Additional labels to add to the PodMonitor.",
"type": "object"
},
"helm-values.metrics.podmonitor.namespace": {
"description": "The namespace that the pod monitor should live in.\nDefaults to the disco-agent namespace.",
"type": "string"
},
"helm-values.metrics.podmonitor.prometheusInstance": {
"default": "default",
"description": "Specifies the `prometheus` label on the created PodMonitor. This is used when different Prometheus instances have label selectors matching different PodMonitors.",
"type": "string"
},
"helm-values.metrics.podmonitor.scrapeTimeout": {
"default": "30s",
"description": "The timeout before a metrics scrape fails.",
"type": "string"
},
"helm-values.nameOverride": {
"default": "",
"description": "This is to override the chart name.",
"type": "string"
},
"helm-values.no_proxy": {
"description": "Configures the NO_PROXY environment variable where a HTTP proxy is required, but certain domains should be excluded.",
"type": "string"
},
"helm-values.nodeSelector": {
"default": {},
"type": "object"
},
"helm-values.podAnnotations": {
"default": {},
"description": "This is for setting Kubernetes Annotations to a Pod. For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/",
"type": "object"
},
"helm-values.podDisruptionBudget": {
"default": {
"enabled": false
},
"description": "Configure a PodDisruptionBudget for the agent's Deployment. If running with multiple replicas, consider setting podDisruptionBudget.enabled to true.",
"type": "object"
},
"helm-values.podLabels": {
"default": {},
"description": "This is for setting Kubernetes Labels to a Pod.\nFor more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/",
"type": "object"
},
"helm-values.podSecurityContext": {
"default": {},
"type": "object"
},
"helm-values.pprof": {
"additionalProperties": false,
"properties": {
"enabled": {
"$ref": "#/$defs/helm-values.pprof.enabled"
}
},
"type": "object"
},
"helm-values.pprof.enabled": {
"default": false,
"description": "Enable profiling with the pprof endpoint",
"type": "boolean"
},
"helm-values.replicaCount": {
"default": 1,
"description": "This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/",
"type": "number"
},
"helm-values.resources": {
"default": {},
"type": "object"
},
"helm-values.securityContext": {
"default": {
"allowPrivilegeEscalation": false,
"capabilities": {
"drop": [
"ALL"
]
},
"readOnlyRootFilesystem": true,
"runAsNonRoot": true,
"seccompProfile": {
"type": "RuntimeDefault"
}
},
"description": "Add Container specific SecurityContext settings to the container. Takes precedence over `podSecurityContext` when set. See https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container",
"type": "object"
},
"helm-values.serviceAccount": {
"additionalProperties": false,
"properties": {
"annotations": {
"$ref": "#/$defs/helm-values.serviceAccount.annotations"
},
"automount": {
"$ref": "#/$defs/helm-values.serviceAccount.automount"
},
"create": {
"$ref": "#/$defs/helm-values.serviceAccount.create"
},
"name": {
"$ref": "#/$defs/helm-values.serviceAccount.name"
}
},
"type": "object"
},
"helm-values.serviceAccount.annotations": {
"default": {},
"description": "Annotations to add to the service account",
"type": "object"
},
"helm-values.serviceAccount.automount": {
"default": true,
"description": "Automatically mount a ServiceAccount's API credentials?",
"type": "boolean"
},
"helm-values.serviceAccount.create": {
"default": true,
"description": "Specifies whether a service account should be created",
"type": "boolean"
},
"helm-values.serviceAccount.name": {
"default": "",
"description": "The name of the service account to use.\nIf not set and create is true, a name is generated using the fullname template",
"type": "string"
},
"helm-values.tolerations": {
"default": [],
"items": {},
"type": "array"
},
"helm-values.volumeMounts": {
"default": [],
"description": "Additional volumeMounts on the output Deployment definition.",
"items": {},
"type": "array"
},
"helm-values.volumes": {
"default": [],
"description": "Additional volumes on the output Deployment definition.",
"items": {},
"type": "array"
}
},
"$ref": "#/$defs/helm-values",
"$schema": "http://json-schema.org/draft-07/schema#"
}
================================================
FILE: deploy/charts/disco-agent/values.yaml
================================================
# Default values for disco-agent.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
replicaCount: 1
# Must be set to indicate that you have read and accepted the CyberArk Terms of Service. If false, the helm chart will fail to install and will print a message with instructions on how to accept the TOS.
acceptTerms: false
# The container registry used for disco-agent images by default.
# This can include path prefixes (e.g. "artifactory.example.com/docker").
# +docs:property
imageRegistry: "quay.io"
# The repository namespace used for disco-agent images by default.
# Examples:
# - jetstack
# - custom-namespace
# +docs:property
imageNamespace: "jetstack"
# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/
image:
# Deprecated: per-component registry prefix.
#
# If set, this value is *prepended* to the image repository that the chart would otherwise render.
# This applies both when `image.repository` is set and when the repository is computed from
# `imageRegistry` + `imageNamespace` + `image.name`.
#
# This can produce "double registry" style references such as
# `legacy.example.io/quay.io/jetstack/...`. Prefer using the global
# `imageRegistry`/`imageNamespace` values.
# +docs:property
# registry: quay.io
# Full repository override (takes precedence over `imageRegistry`, `imageNamespace`,
# and `image.name`).
# Example: quay.io/jetstack/disco-agent
# +docs:property
repository: ""
# The image name for the Discovery Agent.
# This is used (together with `imageRegistry` and `imageNamespace`) to construct the full
# image reference.
# +docs:property
name: disco-agent
# This sets the pull policy for images.
pullPolicy: IfNotPresent
# Override the image tag to deploy by setting this variable.
# If no value is set, the chart's appVersion is used.
tag: ""
# Override the image digest to deploy by setting this variable.
# If set together with `image.tag`, the rendered image will include both tag and digest.
digest: ""
# This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []
# This is to override the chart name.
nameOverride: ""
fullnameOverride: ""
# This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/
serviceAccount:
# Specifies whether a service account should be created
create: true
# Automatically mount a ServiceAccount's API credentials?
automount: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# This is for setting Kubernetes Annotations to a Pod.
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
podAnnotations: {}
# This is for setting Kubernetes Labels to a Pod.
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
podLabels: {}
podSecurityContext: {}
# fsGroup: 2000
# Add Container specific SecurityContext settings to the container. Takes
# precedence over `podSecurityContext` when set. See
# https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container
# +docs:property
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
allowPrivilegeEscalation: false
seccompProfile: { type: RuntimeDefault }
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# Additional volumes on the output Deployment definition.
volumes: []
# - name: foo
# secret:
# secretName: mysecret
# optional: false
# Additional volumeMounts on the output Deployment definition.
volumeMounts: []
# - name: foo
# mountPath: "/etc/foo"
# readOnly: true
nodeSelector: {}
tolerations: []
affinity: {}
# Configures the HTTP_PROXY environment variable where a HTTP proxy is required.
# +docs:property
# http_proxy: "http://proxy:8080"
# Configures the HTTPS_PROXY environment variable where a HTTP proxy is required.
# +docs:property
# https_proxy: "https://proxy:8080"
# Configures the NO_PROXY environment variable where a HTTP proxy is required,
# but certain domains should be excluded.
# +docs:property
# no_proxy: 127.0.0.1,localhost
# Configure a PodDisruptionBudget for the agent's Deployment. If running with multiple
# replicas, consider setting podDisruptionBudget.enabled to true.
# +docs:property
podDisruptionBudget:
# Enable or disable the PodDisruptionBudget resource, which helps prevent downtime
# during voluntary disruptions such as during a Node upgrade.
enabled: false
# Configure the minimum available pods for disruptions. Can either be set to
# an integer (e.g. 1) or a percentage value (e.g. 25%).
# Cannot be used if `maxUnavailable` is set.
# +docs:property
# minAvailable: 1
# Configure the maximum unavailable pods for disruptions. Can either be set to
# an integer (e.g. 1) or a percentage value (e.g. 25%).
# Cannot be used if `minAvailable` is set.
# +docs:property
# maxUnavailable: 1
# Configuration for the agent
config:
# Push data every 12 hours unless changed.
period: "12h0m0s"
# You can configure the agent to exclude some annotations or
# labels from being pushed . All Kubernetes objects
# are affected. The objects are still pushed, but the specified annotations
# and labels are removed before being pushed.
#
# Dots is the only character that needs to be escaped in the regex. Use either
# double quotes with escaped single quotes or unquoted strings for the regex
# to avoid YAML parsing issues with `\.`.
#
# Example: excludeAnnotationKeysRegex: ['^kapp\.k14s\.io/original.*']
excludeAnnotationKeysRegex: []
excludeLabelKeysRegex: []
# A human readable name for the cluster where the agent is deployed (optional).
#
# This cluster name will be associated with the data that the agent uploads to
# the Discovery and Context service. If empty (the default), the service
# account name will be used instead.
clusterName: ""
# A short description of the cluster where the agent is deployed (optional).
#
# This description will be associated with the data that the agent uploads to
# the Discovery and Context service. The description may include contact
# information such as the email address of the cluster administrator, so that
# any problems and risks identified by the Discovery and Context service can
# be communicated to the people responsible for the affected secrets.
clusterDescription: ""
# Enable sending of Secret values to CyberArk in addition to metadata.
# Metadata is always sent, but the actual values of Secrets are not sent by default.
# When enabled, Secret data is encrypted using envelope encryption using
# a key managed by CyberArk, fetched from the Discovery and Context service.
sendSecretValues: true
authentication:
secretName: agent-credentials
# extraArgs:
# - --logging-format=json
# - --log-level=6 # To enable HTTP request logging
extraArgs: []
pprof:
# Enable profiling with the pprof endpoint
enabled: false
metrics:
# Enable the metrics server.
# If false, the metrics server will be disabled and the other metrics fields below will be ignored.
enabled: true
podmonitor:
# Create a PodMonitor to add the metrics to Prometheus, if you are using Prometheus Operator.
# See https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor
enabled: false
# The namespace that the pod monitor should live in.
# Defaults to the disco-agent namespace.
# +docs:property
# namespace: cyberark
# Specifies the `prometheus` label on the created PodMonitor.
# This is used when different Prometheus instances have label selectors
# matching different PodMonitors.
prometheusInstance: default
# The interval to scrape metrics.
interval: 60s
# The timeout before a metrics scrape fails.
scrapeTimeout: 30s
# Additional labels to add to the PodMonitor.
labels: {}
# Additional annotations to add to the PodMonitor.
annotations: {}
# Keep labels from scraped data, overriding server-side labels.
honorLabels: false
# EndpointAdditionalProperties allows setting additional properties on the endpoint such as relabelings, metricRelabelings etc.
#
# For example:
# endpointAdditionalProperties:
# relabelings:
# - action: replace
# sourceLabels:
# - __meta_kubernetes_pod_node_name
# targetLabel: instance
endpointAdditionalProperties: {}
================================================
FILE: deploy/charts/discovery-agent/.helmignore
================================================
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
================================================
FILE: deploy/charts/discovery-agent/Chart.yaml
================================================
apiVersion: v2
name: discovery-agent
description: |-
The discovery-agent connects your Kubernetes or Openshift cluster to NGTS for discovery and monitoring.
maintainers:
- name: Palo Alto Networks
url: https://www.paloaltonetworks.com
sources:
- https://github.com/jetstack/jetstack-secure
# These versions are meant to be overridden by `make helm-chart`. No `v` prefix
# for the `version` because Helm doesn't support auto-determining the latest
# version for OCI Helm charts that use a `v` prefix.
version: 0.0.0
appVersion: "v0.0.0"
================================================
FILE: deploy/charts/discovery-agent/README.md
================================================
# discovery-agent
The Discovery Agent connects your Kubernetes or OpenShift cluster to Palo Alto NGTS.
## Values
<!-- AUTO-GENERATED -->
#### **config.tsgID** ~ `string`
> Default value:
> ```yaml
> ""
> ```
Required: The TSG (Tenant Service Group) ID to use when connecting to SCM. NB: TSG IDs are numeric, but must be treated as strings to avoid issues with YAML data types. With the Helm CLI use `--set-string`; with YAML always pass TSG IDs in double quotes.
#### **config.clusterName** ~ `string`
> Default value:
> ```yaml
> ""
> ```
Required: A human readable name for the cluster into which the agent is being deployed.
This cluster name will be associated with the data that the agent uploads to the backend.
#### **config.clusterDescription** ~ `string`
> Default value:
> ```yaml
> ""
> ```
A short description of the cluster where the agent is deployed (optional).
This description will be associated with the data that the agent uploads to the backend.
#### **config.claimableCerts** ~ `bool`
> Default value:
> ```yaml
> false
> ```
Whether discovered certs can be claimed by other tenants (optional). true = certs are left unassigned, available for any tenant to claim. false (default) = certs are owned by this cluster's tenant.
#### **config.period** ~ `string`
> Default value:
> ```yaml
> 0h1m0s
> ```
How often to push data to the remote server
#### **config.excludeAnnotationKeysRegex** ~ `array`
> Default value:
> ```yaml
> []
> ```
You can configure the agent to exclude some annotations or labels from being pushed. All Kubernetes objects are affected. The objects are still pushed, but the specified annotations and labels are removed before being pushed.
Dots is the only character that needs to be escaped in the regex. Use either double quotes with escaped single quotes or unquoted strings for the regex to avoid YAML parsing issues with `\.`.
Example: excludeAnnotationKeysRegex: ['^kapp\.k14s\.io/original.*']
#### **config.excludeLabelKeysRegex** ~ `array`
> Default value:
> ```yaml
> []
> ```
#### **config.clientID** ~ `string`
> Default value:
> ```yaml
> ""
> ```
Deprecated: Client ID for the configured service account. The client ID should be provided in the "clientID" field of the authentication secret (see config.secretName). This field is provided for compatibility for users migrating from the "venafi-kubernetes-agent" chart.
#### **config.secretName** ~ `string`
> Default value:
> ```yaml
> discovery-agent-credentials
> ```
The name of the Secret containing the NGTS built-in service account credentials.
The Secret must contain the following key:
- privatekey.pem: PEM-encoded private key for the service account
The Secret should also contain the following key:
- clientID: Service account client ID (config.clientID must be set if not present)
#### **replicaCount** ~ `number`
> Default value:
> ```yaml
> 1
> ```
This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
#### **imageRegistry** ~ `string`
> Default value:
> ```yaml
> quay.io
> ```
The container registry used for discovery-agent images by default. This can include path prefixes (e.g. "artifactory.example.com/docker").
#### **imageNamespace** ~ `string`
> Default value:
> ```yaml
> jetstack
> ```
The repository namespace used for discovery-agent images by default.
Examples:
- jetstack
- custom-namespace
#### **image.repository** ~ `string`
> Default value:
> ```yaml
> ""
> ```
Full repository override (takes precedence over `imageRegistry`, `imageNamespace`, and `image.name`).
Example: quay.io/jetstack/discovery-agent
#### **image.name** ~ `string`
> Default value:
> ```yaml
> discovery-agent
> ```
The image name for the Discovery Agent.
This is used (together with `imageRegistry` and `imageNamespace`) to construct the full image reference.
#### **image.pullPolicy** ~ `string`
> Default value:
> ```yaml
> IfNotPresent
> ```
This sets the pull policy for images.
#### **image.tag** ~ `string`
> Default value:
> ```yaml
> ""
> ```
Override the image tag to deploy by setting this variable. If no value is set, the chart's appVersion is used.
#### **image.digest** ~ `string`
> Default value:
> ```yaml
> ""
> ```
Override the image digest to deploy by setting this variable. If set together with `image.tag`, the rendered image will include both tag and digest.
#### **imagePullSecrets** ~ `array`
> Default value:
> ```yaml
> []
> ```
This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
#### **nameOverride** ~ `string`
> Default value:
> ```yaml
> ""
> ```
This is to override the chart name.
#### **fullnameOverride** ~ `string`
> Default value:
> ```yaml
> ""
> ```
#### **serviceAccount.create** ~ `bool`
> Default value:
> ```yaml
> true
> ```
Specifies whether a service account should be created
#### **serviceAccount.automount** ~ `bool`
> Default value:
> ```yaml
> true
> ```
Automatically mount a ServiceAccount's API credentials?
#### **serviceAccount.annotations** ~ `object`
> Default value:
> ```yaml
> {}
> ```
Annotations to add to the service account
#### **serviceAccount.name** ~ `string`
> Default value:
> ```yaml
> ""
> ```
The name of the service account to use.
If not set and create is true, a name is generated using the fullname template
#### **podAnnotations** ~ `object`
> Default value:
> ```yaml
> {}
> ```
This is for setting Kubernetes Annotations to a Pod. For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
#### **podLabels** ~ `object`
> Default value:
> ```yaml
> {}
> ```
This is for setting Kubernetes Labels to a Pod.
For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
#### **podSecurityContext** ~ `object`
> Default value:
> ```yaml
> {}
> ```
#### **securityContext** ~ `object`
> Default value:
> ```yaml
> allowPrivilegeEscalation: false
> capabilities:
> drop:
> - ALL
> readOnlyRootFilesystem: true
> runAsNonRoot: true
> seccompProfile:
> type: RuntimeDefault
> ```
Add Container specific SecurityContext settings to the container. Takes precedence over `podSecurityContext` when set. See https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container
#### **resources** ~ `object`
> Default value:
> ```yaml
> {}
> ```
#### **volumes** ~ `array`
> Default value:
> ```yaml
> []
> ```
Additional volumes on the output Deployment definition.
#### **volumeMounts** ~ `array`
> Default value:
> ```yaml
> []
> ```
Additional volumeMounts on the output Deployment definition.
#### **nodeSelector** ~ `object`
> Default value:
> ```yaml
> {}
> ```
#### **tolerations** ~ `array`
> Default value:
> ```yaml
> []
> ```
#### **affinity** ~ `object`
> Default value:
> ```yaml
> {}
> ```
#### **http_proxy** ~ `string`
Configures the HTTP_PROXY environment variable where a HTTP proxy is required.
#### **https_proxy** ~ `string`
Configures the HTTPS_PROXY environment variable where a HTTP proxy is required.
#### **no_proxy** ~ `string`
Configures the NO_PROXY environment variable where a HTTP proxy is required, but certain domains should be excluded.
#### **podDisruptionBudget** ~ `object`
> Default value:
> ```yaml
> enabled: false
> ```
Configure a PodDisruptionBudget for the agent's Deployment. If running with multiple replicas, consider setting podDisruptionBudget.enabled to true.
#### **extraArgs** ~ `array`
> Default value:
> ```yaml
> []
> ```
```yaml
extraArgs:
- --logging-format=json
- --log-level=6 # To enable HTTP request logging
```
#### **pprof.enabled** ~ `bool`
> Default value:
> ```yaml
> false
> ```
Enable profiling with the pprof endpoint
#### **metrics.enabled** ~ `bool`
> Default value:
> ```yaml
> true
> ```
Enable the metrics server.
If false, the metrics server will be disabled and the other metrics fields below will be ignored.
#### **metrics.podmonitor.enabled** ~ `bool`
> Default value:
> ```yaml
> false
> ```
Create a PodMonitor to add the metrics to Prometheus, if you are using Prometheus Operator. See https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor
#### **metrics.podmonitor.namespace** ~ `string`
The namespace that the pod monitor should live in.
Defaults to the discovery-agent namespace.
#### **metrics.podmonitor.prometheusInstance** ~ `string`
> Default value:
> ```yaml
> default
> ```
Specifies the `prometheus` label on the created PodMonitor. This is used when different Prometheus instances have label selectors matching different PodMonitors.
#### **metrics.podmonitor.interval** ~ `string`
> Default value:
> ```yaml
> 60s
> ```
The interval to scrape metrics.
#### **metrics.podmonitor.scrapeTimeout** ~ `string`
> Default value:
> ```yaml
> 30s
> ```
The timeout before a metrics scrape fails.
#### **metrics.podmonitor.labels** ~ `object`
> Default value:
> ```yaml
> {}
> ```
Additional labels to add to the PodMonitor.
#### **metrics.podmonitor.annotations** ~ `object`
> Default value:
> ```yaml
> {}
> ```
Additional annotations to add to the PodMonitor.
#### **metrics.podmonitor.honorLabels** ~ `bool`
> Default value:
> ```yaml
> false
> ```
Keep labels from scraped data, overriding server-side labels.
#### **metrics.podmonitor.endpointAdditionalProperties** ~ `object`
> Default value:
> ```yaml
> {}
> ```
EndpointAdditionalProperties allows setting additional properties on the endpoint such as relabelings, metricRelabelings etc.
For example:
```yaml
endpointAdditionalProperties:
relabelings:
- action: replace
sourceLabels:
- __meta_kubernetes_pod_node_name
targetLabel: instance
```
<!-- /AUTO-GENERATED -->
================================================
FILE: deploy/charts/discovery-agent/templates/NOTES.txt
================================================
CHART NAME: {{ .Chart.Name }}
CHART VERSION: {{ .Chart.Version }}
APP VERSION: {{ .Chart.AppVersion }}
- Check the application is running:
> kubectl get pods -n {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }}
- Check the application logs for successful connection to NGTS:
> kubectl logs -n {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }}
================================================
FILE: deploy/charts/discovery-agent/templates/_helpers.tpl
================================================
{{/*
Expand the name of the chart.
*/}}
{{- define "discovery-agent.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "discovery-agent.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "discovery-agent.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "discovery-agent.labels" -}}
helm.sh/chart: {{ include "discovery-agent.chart" . }}
{{ include "discovery-agent.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "discovery-agent.selectorLabels" -}}
app.kubernetes.io/name: {{ include "discovery-agent.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "discovery-agent.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "discovery-agent.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
Util function for generating an image reference based on the provided options.
This function is derived from similar functions used in the cert-manager GitHub organization
*/}}
{{- define "discovery-agent.image" -}}
{{- /*
Calling convention:
- (tuple <imageValues> <imageRegistry> <imageNamespace> <defaultReference>)
We intentionally pass imageRegistry/imageNamespace as explicit arguments rather than reading
from `.Values` inside this helper, because `helm-tool lint` does not reliably track `.Values.*`
usage through tuple/variable indirection.
*/ -}}
{{- if ne (len .) 4 -}}
{{- fail (printf "ERROR: template \"discovery-agent.image\" expects (tuple <imageValues> <imageRegistry> <imageNamespace> <defaultReference>), got %d arguments" (len .)) -}}
{{- end -}}
{{- $image := index . 0 -}}
{{- $imageRegistry := index . 1 | default "" -}}
{{- $imageNamespace := index . 2 | default "" -}}
{{- $defaultReference := index . 3 -}}
{{- $repository := "" -}}
{{- if $image.repository -}}
{{- $repository = $image.repository -}}
{{- else -}}
{{- $name := required "ERROR: image.name must be set when image.repository is empty" $image.name -}}
{{- $repository = $name -}}
{{- if $imageNamespace -}}
{{- $repository = printf "%s/%s" $imageNamespace $repository -}}
{{- end -}}
{{- if $imageRegistry -}}
{{- $repository = printf "%s/%s" $imageRegistry $repository -}}
{{- end -}}
{{- end -}}
{{- $repository -}}
{{- if and $image.tag $image.digest -}}
{{- printf ":%s@%s" $image.tag $image.digest -}}
{{- else if $image.tag -}}
{{- printf ":%s" $image.tag -}}
{{- else if $image.digest -}}
{{- printf "@%s" $image.digest -}}
{{- else -}}
{{- printf "%s" $defaultReference -}}
{{- end -}}
{{- end }}
================================================
FILE: deploy/charts/discovery-agent/templates/configmap.yaml
================================================
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "discovery-agent.fullname" . }}-config
namespace: {{ .Release.Namespace }}
labels:
{{- include "discovery-agent.labels" . | nindent 4 }}
data:
config.yaml: |-
cluster_name: {{ required "config.clusterName is required" .Values.config.clusterName | quote }}
cluster_description: {{ .Values.config.clusterDescription | quote }}
{{- if .Values.config.claimableCerts }}
claimable_certs: true
{{- end }}
period: {{ .Values.config.period | quote }}
{{- with .Values.config.excludeAnnotationKeysRegex }}
exclude-annotation-keys-regex:
{{- . | toYaml | nindent 6 }}
{{- end }}
{{- with .Values.config.excludeLabelKeysRegex }}
exclude-label-keys-regex:
{{- . | toYaml | nindent 6 }}
{{- end }}
data-gatherers:
- kind: k8s-discovery
name: k8s/discovery
- kind: k8s-dynamic
name: k8s/secrets
config:
resource-type:
version: v1
resource: secrets
field-selectors:
- type!=kubernetes.io/dockercfg
- type!=kubernetes.io/dockerconfigjson
- type!=bootstrap.kubernetes.io/token
- type!=helm.sh/release.v1
- kind: k8s-dynamic
name: k8s/jobs
config:
resource-type:
version: v1
group: batch
resource: jobs
- kind: k8s-dynamic
name: k8s/cronjobs
config:
resource-type:
version: v1
group: batch
resource: cronjobs
- kind: k8s-dynamic
name: k8s/deployments
config:
resource-type:
version: v1
group: apps
resource: deployments
- kind: k8s-dynamic
name: k8s/statefulsets
config:
resource-type:
version: v1
group: apps
resource: statefulsets
- kind: k8s-dynamic
name: k8s/daemonsets
config:
resource-type:
version: v1
group: apps
resource: daemonsets
- kind: k8s-dynamic
name: k8s/pods
config:
resource-type:
version: v1
resource: pods
================================================
FILE: deploy/charts/discovery-agent/templates/deployment.yaml
================================================
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "discovery-agent.fullname" . }}
labels:
{{- include "discovery-agent.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "discovery-agent.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "discovery-agent.labels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "discovery-agent.serviceAccountName" . }}
{{- with .Values.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: agent
{{- with .Values.securityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
image: "{{ template "discovery-agent.image" (tuple .Values.image .Values.imageRegistry .Values.imageNamespace (printf ":%s" .Chart.AppVersion)) }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_UID
valueFrom:
fieldRef:
fieldPath: metadata.uid
- name: POD_NODE
valueFrom:
fieldRef:
fieldPath: spec.nodeName
{{- with .Values.http_proxy }}
- name: HTTP_PROXY
value: {{ . }}
{{- end }}
{{- with .Values.https_proxy }}
- name: HTTPS_PROXY
value: {{ . }}
{{- end }}
{{- with .Values.no_proxy }}
- name: NO_PROXY
value: {{ . }}
{{- end }}
args:
- "agent"
- "-c"
- "/etc/discovery-agent/config.yaml"
- --ngts
- --tsg-id
- {{ required "config.tsgID is required" .Values.config.tsgID | toString | quote }}
{{- with .Values.config.serverURL }}
- --ngts-server-url
- {{ . | quote }}
{{- end }}
{{- if or .Values.config.clientID .Values.config.clientId }}
- --client-id
- {{ .Values.config.clientID | default .Values.config.clientId }}
{{- end }}
- --private-key-path
- /etc/discovery-agent/credentials/privatekey.pem
- --logging-format=json
{{- if .Values.metrics.enabled }}
- --enable-metrics
{{- end }}
{{- if .Values.pprof.enabled }}
- --enable-pprof
{{- end }}
{{- range .Values.extraArgs }}
- {{ . | quote }}
{{- end }}
{{- with .Values.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
volumeMounts:
- name: config
mountPath: "/etc/discovery-agent"
readOnly: true
- name: credentials
mountPath: "/etc/discovery-agent/credentials"
readOnly: true
{{- with .Values.volumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
ports:
- name: agent-api
containerPort: 8081
volumes:
- name: config
configMap:
name: {{ include "discovery-agent.fullname" . }}-config
optional: false
- name: credentials
secret:
secretName: {{ .Values.config.secretName }}
optional: false
{{- with .Values.volumes }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
================================================
FILE: deploy/charts/discovery-agent/templates/poddisruptionbudget.yaml
================================================
{{- if .Values.podDisruptionBudget.enabled }}
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: {{ include "discovery-agent.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "discovery-agent.labels" . | nindent 4 }}
spec:
selector:
matchLabels:
{{- include "discovery-agent.selectorLabels" . | nindent 6 }}
{{- if not (or (hasKey .Values.podDisruptionBudget "minAvailable") (hasKey .Values.podDisruptionBudget "maxUnavailable")) }}
minAvailable: 1 # Default value because minAvailable and maxUnavailable are not set
{{- end }}
{{- if hasKey .Values.podDisruptionBudget "minAvailable" }}
minAvailable: {{ .Values.podDisruptionBudget.minAvailable }}
{{- end }}
{{- if hasKey .Values.podDisruptionBudget "maxUnavailable" }}
maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }}
{{- end }}
{{- end }}
================================================
FILE: deploy/charts/discovery-agent/templates/podmonitor.yaml
================================================
{{- if and .Values.metrics.enabled .Values.metrics.podmonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: {{ include "discovery-agent.fullname" . }}
{{- if .Values.metrics.podmonitor.namespace }}
namespace: {{ .Values.metrics.podmonitor.namespace }}
{{- else }}
namespace: {{ .Release.Namespace | quote }}
{{- end }}
labels:
{{- include "discovery-agent.labels" . | nindent 4 }}
prometheus: {{ .Values.metrics.podmonitor.prometheusInstance }}
{{- with .Values.metrics.podmonitor.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.metrics.podm
gitextract_2mttmn_y/
├── .envrc.template
├── .github/
│ ├── ISSUE_TEMPLATE/
│ │ └── bug_report.md
│ ├── actions/
│ │ └── repo_access/
│ │ └── action.yaml
│ ├── chainguard/
│ │ └── make-self-upgrade.sts.yaml
│ ├── renovate.json5
│ └── workflows/
│ ├── govulncheck.yaml
│ ├── make-self-upgrade.yaml
│ ├── release.yml
│ └── tests.yaml
├── .gitignore
├── .golangci.yaml
├── CONTRIBUTING.md
├── LICENSE
├── LICENSES
├── Makefile
├── OWNERS
├── OWNERS_ALIASES
├── README.md
├── RELEASE.md
├── agent.yaml
├── api/
│ ├── agent.go
│ ├── common.go
│ ├── datareading.go
│ └── datareading_test.go
├── cmd/
│ ├── agent.go
│ ├── agent_test.go
│ ├── ark/
│ │ └── main.go
│ ├── echo.go
│ ├── helpers.go
│ ├── root.go
│ └── version.go
├── deploy/
│ └── charts/
│ ├── disco-agent/
│ │ ├── .helmignore
│ │ ├── Chart.yaml
│ │ ├── README.md
│ │ ├── templates/
│ │ │ ├── NOTES.txt
│ │ │ ├── _helpers.tpl
│ │ │ ├── configmap.yaml
│ │ │ ├── deployment.yaml
│ │ │ ├── poddisruptionbudget.yaml
│ │ │ ├── podmonitor.yaml
│ │ │ ├── rbac.yaml
│ │ │ └── serviceaccount.yaml
│ │ ├── tests/
│ │ │ ├── README.md
│ │ │ ├── __snapshot__/
│ │ │ │ └── configmap_test.yaml.snap
│ │ │ └── configmap_test.yaml
│ │ ├── values.linter.exceptions
│ │ ├── values.schema.json
│ │ └── values.yaml
│ ├── discovery-agent/
│ │ ├── .helmignore
│ │ ├── Chart.yaml
│ │ ├── README.md
│ │ ├── templates/
│ │ │ ├── NOTES.txt
│ │ │ ├── _helpers.tpl
│ │ │ ├── configmap.yaml
│ │ │ ├── deployment.yaml
│ │ │ ├── poddisruptionbudget.yaml
│ │ │ ├── podmonitor.yaml
│ │ │ ├── rbac.yaml
│ │ │ └── serviceaccount.yaml
│ │ ├── tests/
│ │ │ ├── configmap_test.yaml
│ │ │ ├── deployment_test.yaml
│ │ │ ├── poddisruptionbudget_test.yaml
│ │ │ ├── podmonitor_test.yaml
│ │ │ ├── rbac_test.yaml
│ │ │ └── serviceaccount_test.yaml
│ │ ├── values.linter.exceptions
│ │ ├── values.schema.json
│ │ └── values.yaml
│ └── venafi-kubernetes-agent/
│ ├── .helmignore
│ ├── Chart.yaml
│ ├── README.md
│ ├── crd_bases/
│ │ ├── crd.footer.yaml
│ │ ├── crd.header-without-validations.yaml
│ │ ├── crd.header.yaml
│ │ └── jetstack.io_venaficonnections.yaml
│ ├── templates/
│ │ ├── NOTES.txt
│ │ ├── _helpers.tpl
│ │ ├── _venafi-connection.tpl
│ │ ├── configmap.yaml
│ │ ├── deployment.yaml
│ │ ├── poddisruptionbudget.yaml
│ │ ├── podmonitor.yaml
│ │ ├── rbac.yaml
│ │ ├── serviceaccount.yaml
│ │ ├── venafi-connection-crd.without-validations.yaml
│ │ ├── venafi-connection-crd.yaml
│ │ ├── venafi-connection-rbac.yaml
│ │ └── venafi-rbac.yaml
│ ├── tests/
│ │ ├── __snapshot__/
│ │ │ └── configmap_test.yaml.snap
│ │ ├── configmap_test.yaml
│ │ ├── deployment_test.yaml
│ │ └── values/
│ │ └── custom-volumes.yaml
│ ├── values.linter.exceptions
│ ├── values.schema.json
│ └── values.yaml
├── docs/
│ └── datagatherers/
│ ├── k8s-discovery.md
│ ├── k8s-dynamic.md
│ └── local.md
├── examples/
│ ├── cert-manager-agent.yaml
│ ├── echo/
│ │ ├── example.json
│ │ └── example2.json
│ ├── localfile/
│ │ ├── config.yaml
│ │ └── input.json
│ ├── machinehub/
│ │ ├── config.yaml
│ │ └── input.json
│ ├── machinehub.yaml
│ ├── one-shot-oidc.yaml
│ └── one-shot-secret.yaml
├── go.mod
├── go.sum
├── hack/
│ ├── ark/
│ │ ├── cluster-external-secret.yaml
│ │ ├── cluster-secret-store.yaml
│ │ ├── conjur-connect-configmap.yaml
│ │ ├── external-secret.yaml
│ │ ├── secret-store.yaml
│ │ └── test-e2e.sh
│ ├── e2e/
│ │ ├── application-team-1.yaml
│ │ ├── test.sh
│ │ ├── values.venafi-kubernetes-agent.yaml
│ │ └── venafi-components.yaml
│ └── ngts/
│ ├── custom_ca.yaml
│ └── test-e2e.sh
├── internal/
│ ├── cyberark/
│ │ ├── api/
│ │ │ ├── telemetry.go
│ │ │ └── telemetry_test.go
│ │ ├── client.go
│ │ ├── client_test.go
│ │ ├── dataupload/
│ │ │ ├── dataupload.go
│ │ │ ├── dataupload_test.go
│ │ │ └── mock.go
│ │ ├── identity/
│ │ │ ├── advance_authentication_test.go
│ │ │ ├── authenticated_http_client.go
│ │ │ ├── cmd/
│ │ │ │ └── testidentity/
│ │ │ │ └── main.go
│ │ │ ├── identity.go
│ │ │ ├── identity_test.go
│ │ │ ├── mock.go
│ │ │ ├── start_authentication_test.go
│ │ │ └── testdata/
│ │ │ ├── advance_authentication_failure.json
│ │ │ ├── advance_authentication_success.json
│ │ │ ├── start_authentication_bad_user_session_id.json
│ │ │ ├── start_authentication_failure.json
│ │ │ ├── start_authentication_success.json
│ │ │ ├── start_authentication_success_multiple_challenges.json
│ │ │ ├── start_authentication_success_multiple_mechanisms.json
│ │ │ └── start_authentication_success_no_up_mechanism.json
│ │ ├── servicediscovery/
│ │ │ ├── discovery.go
│ │ │ ├── discovery_test.go
│ │ │ ├── mock.go
│ │ │ └── testdata/
│ │ │ ├── README.md
│ │ │ └── discovery_success.json.template
│ │ └── testing/
│ │ └── testing.go
│ └── envelope/
│ ├── doc.go
│ ├── keyfetch/
│ │ ├── client.go
│ │ ├── client_test.go
│ │ ├── doc.go
│ │ ├── fake.go
│ │ └── fake_test.go
│ ├── rsa/
│ │ ├── doc.go
│ │ ├── encryptor.go
│ │ ├── encryptor_test.go
│ │ ├── keys.go
│ │ └── keys_test.go
│ └── types.go
├── klone.yaml
├── main.go
├── make/
│ ├── 00_mod.mk
│ ├── 02_mod.mk
│ ├── _shared/
│ │ ├── generate-verify/
│ │ │ ├── 00_mod.mk
│ │ │ ├── 02_mod.mk
│ │ │ └── util/
│ │ │ └── verify.sh
│ │ ├── go/
│ │ │ ├── .golangci.override.yaml
│ │ │ ├── 01_mod.mk
│ │ │ ├── README.md
│ │ │ └── base/
│ │ │ └── .github/
│ │ │ └── workflows/
│ │ │ └── govulncheck.yaml
│ │ ├── helm/
│ │ │ ├── 01_mod.mk
│ │ │ ├── crd.template.footer.yaml
│ │ │ ├── crd.template.header.yaml
│ │ │ ├── crds.mk
│ │ │ ├── crds_dir.README.md
│ │ │ ├── deploy.mk
│ │ │ └── helm.mk
│ │ ├── help/
│ │ │ ├── 01_mod.mk
│ │ │ └── help.sh
│ │ ├── kind/
│ │ │ ├── 00_kind_image_versions.mk
│ │ │ ├── 00_mod.mk
│ │ │ ├── 01_mod.mk
│ │ │ ├── kind-image-preload.mk
│ │ │ └── kind.mk
│ │ ├── klone/
│ │ │ └── 01_mod.mk
│ │ ├── licenses/
│ │ │ ├── 00_mod.mk
│ │ │ ├── 01_mod.mk
│ │ │ └── licenses.tmpl
│ │ ├── oci-build/
│ │ │ ├── 00_mod.mk
│ │ │ └── 01_mod.mk
│ │ ├── oci-publish/
│ │ │ ├── 00_mod.mk
│ │ │ ├── 01_mod.mk
│ │ │ └── image-exists.sh
│ │ ├── repository-base/
│ │ │ ├── 01_mod.mk
│ │ │ ├── base/
│ │ │ │ ├── .github/
│ │ │ │ │ ├── chainguard/
│ │ │ │ │ │ └── make-self-upgrade.sts.yaml
│ │ │ │ │ └── workflows/
│ │ │ │ │ └── make-self-upgrade.yaml
│ │ │ │ ├── Makefile
│ │ │ │ └── OWNERS_ALIASES
│ │ │ └── renovate-bootstrap-config.json5
│ │ └── tools/
│ │ ├── 00_mod.mk
│ │ └── util/
│ │ ├── checkhash.sh
│ │ ├── hash.sh
│ │ └── lock.sh
│ ├── ark/
│ │ ├── 00_mod.mk
│ │ └── 02_mod.mk
│ ├── connection_crd/
│ │ └── main.go
│ ├── extra_tools.mk
│ ├── ngts/
│ │ ├── 00_mod.mk
│ │ └── 02_mod.mk
│ └── test-unit.mk
└── pkg/
├── agent/
│ ├── config.go
│ ├── config_test.go
│ ├── dummy_data_gatherer.go
│ ├── metrics.go
│ └── run.go
├── client/
│ ├── client.go
│ ├── client_api_token.go
│ ├── client_cyberark.go
│ ├── client_cyberark_convertdatareadings_test.go
│ ├── client_cyberark_test.go
│ ├── client_file.go
│ ├── client_file_test.go
│ ├── client_ngts.go
│ ├── client_ngts_test.go
│ ├── client_oauth.go
│ ├── client_venafi_cloud.go
│ ├── client_venconn.go
│ ├── client_venconn_test.go
│ └── util.go
├── datagatherer/
│ ├── datagatherer.go
│ ├── k8sdiscovery/
│ │ └── discovery.go
│ ├── k8sdynamic/
│ │ ├── cache.go
│ │ ├── cache_test.go
│ │ ├── dynamic.go
│ │ ├── dynamic_test.go
│ │ ├── fieldfilter.go
│ │ └── fieldfilter_test.go
│ ├── local/
│ │ └── local.go
│ └── oidc/
│ ├── oidc.go
│ └── oidc_test.go
├── echo/
│ ├── echo.go
│ └── echo_test.go
├── kubeconfig/
│ ├── client.go
│ ├── client_test.go
│ └── kubeconfig.go
├── logs/
│ ├── logs.go
│ └── logs_test.go
├── permissions/
│ ├── generate.go
│ └── generate_test.go
├── testutil/
│ ├── envtest.go
│ ├── undent.go
│ └── undent_test.go
└── version/
└── version.go
SYMBOL INDEX (501 symbols across 82 files)
FILE: api/agent.go
type AgentMetadata (line 4) | type AgentMetadata struct
FILE: api/common.go
constant TimeFormat (line 10) | TimeFormat = time.RFC3339
type Time (line 13) | type Time struct
method String (line 18) | func (t Time) String() string {
method MarshalJSON (line 23) | func (t Time) MarshalJSON() ([]byte, error) {
FILE: api/datareading.go
type DataReadingsPost (line 14) | type DataReadingsPost struct
type DataReading (line 22) | type DataReading struct
method UnmarshalJSON (line 37) | func (o *DataReading) UnmarshalJSON(data []byte) error {
function jsonUnmarshalStrict (line 86) | func jsonUnmarshalStrict(data []byte, v any) error {
type GatheredResource (line 93) | type GatheredResource struct
method MarshalJSON (line 100) | func (v GatheredResource) MarshalJSON() ([]byte, error) {
method UnmarshalJSON (line 117) | func (v *GatheredResource) UnmarshalJSON(data []byte) error {
type DynamicData (line 136) | type DynamicData struct
type DiscoveryData (line 143) | type DiscoveryData struct
type OIDCDiscoveryData (line 156) | type OIDCDiscoveryData struct
FILE: api/datareading_test.go
function TestJSONGatheredResourceDropsEmptyTime (line 11) | func TestJSONGatheredResourceDropsEmptyTime(t *testing.T) {
function TestJSONGatheredResourceSetsTimeWhenPresent (line 25) | func TestJSONGatheredResourceSetsTimeWhenPresent(t *testing.T) {
function TestDataReading_UnmarshalJSON (line 42) | func TestDataReading_UnmarshalJSON(t *testing.T) {
FILE: cmd/agent.go
function init (line 58) | func init() {
FILE: cmd/agent_test.go
function TestOutputModes (line 24) | func TestOutputModes(t *testing.T) {
function findRepoRoot (line 54) | func findRepoRoot(t *testing.T) string {
function runSubprocess (line 68) | func runSubprocess(t *testing.T, repoRoot string, args []string) {
FILE: cmd/ark/main.go
function main (line 5) | func main() {
FILE: cmd/echo.go
function init (line 17) | func init() {
FILE: cmd/helpers.go
function printVersion (line 11) | func printVersion(verbose bool) {
function printOAuth2Config (line 20) | func printOAuth2Config() {
FILE: cmd/root.go
function init (line 37) | func init() {
function Execute (line 47) | func Execute() {
function setFlagsFromEnv (line 58) | func setFlagsFromEnv(prefix string, fs *pflag.FlagSet) {
FILE: cmd/version.go
function init (line 19) | func init() {
FILE: internal/cyberark/api/telemetry.go
constant TelemetryHeaderKey (line 22) | TelemetryHeaderKey = "X-Cybr-Telemetry"
function init (line 30) | func init() {
function SetTelemetryRequestHeader (line 41) | func SetTelemetryRequestHeader(req *http.Request) {
FILE: internal/cyberark/api/telemetry_test.go
function TestSetTelemetryRequestHeader (line 13) | func TestSetTelemetryRequestHeader(t *testing.T) {
FILE: internal/cyberark/client.go
type ClientConfig (line 15) | type ClientConfig struct
type ClientConfigLoader (line 22) | type ClientConfigLoader
function LoadClientConfigFromEnvironment (line 32) | func LoadClientConfigFromEnvironment() (ClientConfig, error) {
function NewDatauploadClient (line 52) | func NewDatauploadClient(ctx context.Context, httpClient *http.Client, s...
FILE: internal/cyberark/client_test.go
function TestCyberArkClient_PutSnapshot_MockAPI (line 25) | func TestCyberArkClient_PutSnapshot_MockAPI(t *testing.T) {
function TestCyberArkClient_PutSnapshot_RealAPI (line 67) | func TestCyberArkClient_PutSnapshot_RealAPI(t *testing.T) {
FILE: internal/cyberark/dataupload/dataupload.go
constant maxRetrievePresignedUploadURLBodySize (line 25) | maxRetrievePresignedUploadURLBodySize = 10 * 1024
constant apiPathSnapshotLinks (line 30) | apiPathSnapshotLinks = "/ingestions/kubernetes/snapshot-links"
type CyberArkClient (line 33) | type CyberArkClient struct
method PutSnapshot (line 120) | func (c *CyberArkClient) PutSnapshot(ctx context.Context, snapshot Sna...
method retrievePresignedUploadURL (line 200) | func (c *CyberArkClient) retrievePresignedUploadURL(ctx context.Contex...
function New (line 43) | func New(httpClient *http.Client, baseURL string, tenantUUID string, aut...
type Snapshot (line 56) | type Snapshot struct
constant SigV4Support (line 179) | SigV4Support = "sigv4"
type RetrievePresignedUploadURLRequest (line 182) | type RetrievePresignedUploadURLRequest struct
FILE: internal/cyberark/dataupload/dataupload_test.go
function TestCyberArkClient_PutSnapshot_MockAPI (line 22) | func TestCyberArkClient_PutSnapshot_MockAPI(t *testing.T) {
FILE: internal/cyberark/dataupload/mock.go
constant successBearerToken (line 27) | successBearerToken = "success-token"
constant successClusterID (line 29) | successClusterID = "ffffffff-ffff-ffff-ffff-ffffffffffff"
type uploadValues (line 32) | type uploadValues struct
type mockDataUploadServer (line 37) | type mockDataUploadServer struct
method ServeHTTP (line 85) | func (mds *mockDataUploadServer) ServeHTTP(w http.ResponseWriter, r *h...
method handleSnapshotLinks (line 103) | func (mds *mockDataUploadServer) handleSnapshotLinks(w http.ResponseWr...
method handlePresignedUpload (line 206) | func (mds *mockDataUploadServer) handlePresignedUpload(w http.Response...
function MockDataUploadServer (line 60) | func MockDataUploadServer(t testing.TB) (string, *http.Client) {
function randHex (line 93) | func randHex() string {
constant amzExampleChecksumError (line 198) | amzExampleChecksumError = `<?xml version="1.0" encoding="UTF-8"?>
FILE: internal/cyberark/identity/advance_authentication_test.go
function Test_IdentityAdvanceAuthentication (line 15) | func Test_IdentityAdvanceAuthentication(t *testing.T) {
FILE: internal/cyberark/identity/authenticated_http_client.go
type RequestAuthenticator (line 8) | type RequestAuthenticator
method AuthenticateRequest (line 12) | func (c *Client) AuthenticateRequest(req *http.Request) (string, error) {
FILE: internal/cyberark/identity/cmd/testidentity/main.go
constant subdomainFlag (line 26) | subdomainFlag = "subdomain"
constant usernameFlag (line 27) | usernameFlag = "username"
constant passwordEnv (line 28) | passwordEnv = "ARK_SECRET"
function run (line 36) | func run(ctx context.Context) error {
function main (line 69) | func main() {
FILE: internal/cyberark/identity/identity.go
constant MechanismUsernamePassword (line 24) | MechanismUsernamePassword = "UP"
constant ActionAnswer (line 28) | ActionAnswer = "Answer"
constant SummaryLoginSuccess (line 33) | SummaryLoginSuccess = "LoginSuccess"
constant SummaryNewPackage (line 37) | SummaryNewPackage = "NewPackage"
constant maxStartAuthenticationBodySize (line 42) | maxStartAuthenticationBodySize = 10 * 1024
constant maxAdvanceAuthenticationBodySize (line 47) | maxAdvanceAuthenticationBodySize = 30 * 1024
type startAuthenticationRequestBody (line 56) | type startAuthenticationRequestBody struct
type identityResponseBody (line 71) | type identityResponseBody struct
type startAuthenticationResponseBody (line 91) | type startAuthenticationResponseBody
type advanceAuthenticationResponseBody (line 94) | type advanceAuthenticationResponseBody
type startAuthenticationResponseResult (line 97) | type startAuthenticationResponseResult struct
type startAuthenticationChallenge (line 116) | type startAuthenticationChallenge struct
type startAuthenticationMechanism (line 122) | type startAuthenticationMechanism struct
type advanceAuthenticationRequestBody (line 140) | type advanceAuthenticationRequestBody struct
type advanceAuthenticationResponseResult (line 165) | type advanceAuthenticationResponseResult struct
type Client (line 178) | type Client struct
method LoginUsernamePassword (line 210) | func (c *Client) LoginUsernamePassword(ctx context.Context, username s...
method doStartAuthentication (line 248) | func (c *Client) doStartAuthentication(ctx context.Context, username s...
method doAdvanceAuthentication (line 363) | func (c *Client) doAdvanceAuthentication(ctx context.Context, username...
type token (line 189) | type token struct
function New (line 195) | func New(httpClient *http.Client, baseURL string, subdomain string) *Cli...
function setIdentityHeaders (line 439) | func setIdentityHeaders(r *http.Request) {
FILE: internal/cyberark/identity/identity_test.go
type inputs (line 25) | type inputs struct
function TestLoginUsernamePassword_MockAPI (line 36) | func TestLoginUsernamePassword_MockAPI(t *testing.T) {
function TestLoginUsernamePassword_RealAPI (line 52) | func TestLoginUsernamePassword_RealAPI(t *testing.T) {
function loginUsernamePasswordTests (line 72) | func loginUsernamePasswordTests(t *testing.T, inputsGenerator func(t tes...
FILE: internal/cyberark/identity/mock.go
constant successUser (line 21) | successUser = "test@example.com"
constant failureUser (line 22) | failureUser = "test-fail@example.com"
constant successUserMultipleChallenges (line 23) | successUserMultipleChallenges = "test-multiple-challenges@example.com"
constant successUserMultipleMechanisms (line 24) | successUserMultipleMechanisms = "test-multiple-mechanisms@example.com"
constant noUPMechanism (line 25) | noUPMechanism = "noup@example.com"
constant successMechanismID (line 27) | successMechanismID = "aaaaaaa_AAAAAAAAAAAAAAAAAAAAAAAAAAAA-1111111"
constant successSessionID (line 28) | successSessionID = "mysessionid101"
constant successPassword (line 29) | successPassword = "somepassword"
constant mockSuccessfulStartAuthenticationToken (line 34) | mockSuccessfulStartAuthenticationToken = "success-token"
type mockIdentityServer (line 63) | type mockIdentityServer struct
method ServeHTTP (line 80) | func (mis *mockIdentityServer) ServeHTTP(w http.ResponseWriter, r *htt...
method handleStartAuthentication (line 120) | func (mis *mockIdentityServer) handleStartAuthentication(w http.Respon...
method handleAdvanceAuthentication (line 180) | func (mis *mockIdentityServer) handleAdvanceAuthentication(w http.Resp...
function MockIdentityServer (line 69) | func MockIdentityServer(t testing.TB) (string, *http.Client) {
function checkRequestHeaders (line 98) | func checkRequestHeaders(r *http.Request) error {
FILE: internal/cyberark/identity/start_authentication_test.go
function Test_IdentityStartAuthentication (line 10) | func Test_IdentityStartAuthentication(t *testing.T) {
FILE: internal/cyberark/servicediscovery/discovery.go
constant ProdDiscoveryAPIBaseURL (line 21) | ProdDiscoveryAPIBaseURL = "https://platform-discovery.cyberark.cloud/"
constant IdentityServiceName (line 25) | IdentityServiceName = "identity_administration"
constant DiscoveryContextServiceName (line 29) | DiscoveryContextServiceName = "discoverycontext"
constant maxDiscoverBodySize (line 33) | maxDiscoverBodySize = 2 * 1024 * 1024
type Client (line 39) | type Client struct
method DiscoverServices (line 114) | func (c *Client) DiscoverServices(ctx context.Context) (*Services, str...
function New (line 53) | func New(httpClient *http.Client, subdomain string) *Client {
type DiscoveryResponse (line 75) | type DiscoveryResponse struct
type Service (line 87) | type Service struct
type ServiceEndpoint (line 97) | type ServiceEndpoint struct
type Services (line 106) | type Services struct
FILE: internal/cyberark/servicediscovery/discovery_test.go
function Test_DiscoverIdentityAPIURL (line 15) | func Test_DiscoverIdentityAPIURL(t *testing.T) {
FILE: internal/cyberark/servicediscovery/mock.go
constant MockDiscoverySubdomain (line 24) | MockDiscoverySubdomain = "tlskp-test"
constant mockIdentityAPIURL (line 26) | mockIdentityAPIURL = "https://ajp5871.id.integration-cyberark.cl...
constant mockDiscoveryContextAPIURL (line 27) | mockDiscoveryContextAPIURL = "https://venafi-test.inventory.integration-...
constant prefix (line 28) | prefix = "/api/public/tenant-discovery?bySubdomain="
type mockDiscoveryServer (line 34) | type mockDiscoveryServer struct
method ServeHTTP (line 72) | func (mds *mockDiscoveryServer) ServeHTTP(w http.ResponseWriter, r *ht...
function MockDiscoveryServer (line 53) | func MockDiscoveryServer(t testing.TB, services Services) *http.Client {
FILE: internal/cyberark/testing/testing.go
function SkipIfNoEnv (line 9) | func SkipIfNoEnv(t testing.TB) {
FILE: internal/envelope/keyfetch/client.go
constant minRSAKeySize (line 27) | minRSAKeySize = 2048
type KeyFetcher (line 31) | type KeyFetcher interface
type PublicKey (line 40) | type PublicKey struct
type Client (line 51) | type Client struct
method FetchKey (line 90) | func (c *Client) FetchKey(ctx context.Context) (PublicKey, error) {
function NewClient (line 69) | func NewClient(ctx context.Context, discoveryClient *servicediscovery.Cl...
FILE: internal/envelope/keyfetch/client_test.go
function testClientSetup (line 19) | func testClientSetup(t *testing.T, jwksServerURL string) (*Client, cyber...
function mockJWKSServer (line 59) | func mockJWKSServer(t *testing.T, statusCode int, jwksResponse string) *...
function TestClient_FetchKey (line 80) | func TestClient_FetchKey(t *testing.T) {
FILE: internal/envelope/keyfetch/fake.go
type FakeClient (line 15) | type FakeClient struct
method FetchKey (line 52) | func (f *FakeClient) FetchKey(ctx context.Context) (PublicKey, error) {
function NewFakeClient (line 29) | func NewFakeClient() *FakeClient {
function NewFakeClientWithKey (line 34) | func NewFakeClientWithKey(keyID string, key *rsa.PublicKey) *FakeClient {
function NewFakeClientWithError (line 44) | func NewFakeClientWithError(err error) *FakeClient {
FILE: internal/envelope/keyfetch/fake_test.go
function TestFakeClient (line 14) | func TestFakeClient(t *testing.T) {
FILE: internal/envelope/rsa/encryptor.go
constant EncryptionType (line 16) | EncryptionType = "JWE-RSA"
type Encryptor (line 24) | type Encryptor struct
method Encrypt (line 39) | func (e *Encryptor) Encrypt(ctx context.Context, data []byte) (*envelo...
function NewEncryptor (line 30) | func NewEncryptor(fetcher keyfetch.KeyFetcher) (*Encryptor, error) {
FILE: internal/envelope/rsa/encryptor_test.go
constant testKeyID (line 19) | testKeyID = "test-key-id"
constant minRSAKeySize (line 21) | minRSAKeySize = 2048
function testKey (line 31) | func testKey() *rsa.PrivateKey {
function TestEncrypt_VariousDataSizes (line 44) | func TestEncrypt_VariousDataSizes(t *testing.T) {
function TestEncrypt_EmptyData (line 89) | func TestEncrypt_EmptyData(t *testing.T) {
function TestEncrypt_NonDeterministic (line 101) | func TestEncrypt_NonDeterministic(t *testing.T) {
function TestEncrypt_JWEFormat (line 122) | func TestEncrypt_JWEFormat(t *testing.T) {
function TestEncrypt_DecryptRoundtrip (line 140) | func TestEncrypt_DecryptRoundtrip(t *testing.T) {
FILE: internal/envelope/rsa/keys.go
constant HardcodedPublicKeyPEM (line 17) | HardcodedPublicKeyPEM = `-----BEGIN PUBLIC KEY-----
constant hardcodedUID (line 29) | hardcodedUID = "A39798E6-8CE7-4E6E-9CF6-24A3C923B3A7"
function LoadPublicKeyFromPEM (line 34) | func LoadPublicKeyFromPEM(pemBytes []byte) (*rsa.PublicKey, error) {
function LoadPublicKeyFromPEMFile (line 69) | func LoadPublicKeyFromPEMFile(path string) (*rsa.PublicKey, error) {
function LoadHardcodedPublicKey (line 82) | func LoadHardcodedPublicKey() (*rsa.PublicKey, string, error) {
FILE: internal/envelope/rsa/keys_test.go
function generateTestKeyPEM (line 20) | func generateTestKeyPEM(t *testing.T, keySize int, pemType string) []byte {
function TestLoadPublicKeyFromPEM_PKIX (line 50) | func TestLoadPublicKeyFromPEM_PKIX(t *testing.T) {
function TestLoadPublicKeyFromPEM_PKCS1 (line 59) | func TestLoadPublicKeyFromPEM_PKCS1(t *testing.T) {
function TestLoadPublicKeyFromPEM_InvalidPEM (line 68) | func TestLoadPublicKeyFromPEM_InvalidPEM(t *testing.T) {
function TestLoadPublicKeyFromPEM_WrongPEMType (line 77) | func TestLoadPublicKeyFromPEM_WrongPEMType(t *testing.T) {
function TestLoadPublicKeyFromPEM_NonRSAKey (line 94) | func TestLoadPublicKeyFromPEM_NonRSAKey(t *testing.T) {
function TestLoadPublicKeyFromPEMFile_ValidFile (line 114) | func TestLoadPublicKeyFromPEMFile_ValidFile(t *testing.T) {
function TestLoadPublicKeyFromPEMFile_MissingFile (line 128) | func TestLoadPublicKeyFromPEMFile_MissingFile(t *testing.T) {
function TestLoadPublicKeyFromPEMFile_InvalidContent (line 135) | func TestLoadPublicKeyFromPEMFile_InvalidContent(t *testing.T) {
function TestLoadHardcodedPublicKey_CanBeUsedWithEncryptor (line 147) | func TestLoadHardcodedPublicKey_CanBeUsedWithEncryptor(t *testing.T) {
FILE: internal/envelope/types.go
type EncryptedData (line 9) | type EncryptedData struct
method ToMap (line 20) | func (ed *EncryptedData) ToMap() map[string]any {
type Encryptor (line 37) | type Encryptor interface
FILE: main.go
function main (line 5) | func main() {
FILE: make/connection_crd/main.go
function main (line 11) | func main() {
FILE: pkg/agent/config.go
type Config (line 33) | type Config struct
method Dump (line 1092) | func (c *Config) Dump() (string, error) {
type Endpoint (line 78) | type Endpoint struct
type DataGatherer (line 84) | type DataGatherer struct
method UnmarshalYAML (line 1044) | func (dg *DataGatherer) UnmarshalYAML(unmarshal func(any) error) error {
type VenafiCloudConfig (line 91) | type VenafiCloudConfig struct
type AgentCmdFlags (line 103) | type AgentCmdFlags struct
function InitAgentCmdFlags (line 198) | func InitAgentCmdFlags(c *cobra.Command, cfg *AgentCmdFlags) {
type OutputMode (line 380) | type OutputMode
constant JetstackSecureOAuth (line 383) | JetstackSecureOAuth OutputMode = "Jetstack Secure OAuth"
constant JetstackSecureAPIToken (line 384) | JetstackSecureAPIToken OutputMode = "Jetstack Secure API Token"
constant VenafiCloudKeypair (line 385) | VenafiCloudKeypair OutputMode = "Venafi Cloud Key Pair Service ...
constant VenafiCloudVenafiConnection (line 386) | VenafiCloudVenafiConnection OutputMode = "Venafi Cloud VenafiConnection"
constant LocalFile (line 387) | LocalFile OutputMode = "Local File"
constant MachineHub (line 388) | MachineHub OutputMode = "MachineHub"
constant NGTS (line 389) | NGTS OutputMode = "NGTS"
type CombinedConfig (line 394) | type CombinedConfig struct
function ValidateAndCombineConfig (line 457) | func ValidateAndCombineConfig(log logr.Logger, cfg Config, flags AgentCm...
function validateCredsAndCreateClient (line 859) | func validateCredsAndCreateClient(log logr.Logger, flagCredentialsPath, ...
function ValidateDataGatherers (line 1005) | func ValidateDataGatherers(dataGatherers []DataGatherer) error {
function getInClusterNamespace (line 1020) | func getInClusterNamespace() (string, error) {
function reMarshal (line 1029) | func reMarshal(rawConfig any, config datagatherer.Config) error {
function ParseConfig (line 1105) | func ParseConfig(data []byte) (Config, error) {
type credType (line 1116) | type credType
constant CredOldJetstackSecureOAuth (line 1119) | CredOldJetstackSecureOAuth credType = "CredOldJetstackSecureOAuth"
constant CredVenafiCloudKeypair (line 1120) | CredVenafiCloudKeypair credType = "CredVenafiCloudKeypair"
function readCredentialsFile (line 1123) | func readCredentialsFile(path string) ([]byte, error) {
FILE: pkg/agent/config_test.go
function Test_ValidateAndCombineConfig (line 22) | func Test_ValidateAndCombineConfig(t *testing.T) {
function Test_ValidateAndCombineConfig_VenafiCloudKeyPair (line 741) | func Test_ValidateAndCombineConfig_VenafiCloudKeyPair(t *testing.T) {
function Test_ValidateAndCombineConfig_VenafiConnection (line 785) | func Test_ValidateAndCombineConfig_VenafiConnection(t *testing.T) {
function Test_ParseConfig (line 884) | func Test_ParseConfig(t *testing.T) {
function Test_ValidateDataGatherers (line 954) | func Test_ValidateDataGatherers(t *testing.T) {
function withFile (line 993) | func withFile(t testing.TB, content string) string {
function recordLogs (line 1010) | func recordLogs(t *testing.T) (logr.Logger, ktesting.Buffer) {
function discardLogs (line 1017) | func discardLogs() logr.Logger {
function withConfig (line 1022) | func withConfig(s string) Config {
function withCmdLineFlags (line 1030) | func withCmdLineFlags(flags ...string) AgentCmdFlags {
function withoutCmdLineFlags (line 1042) | func withoutCmdLineFlags() AgentCmdFlags {
constant fakeKubeconfig (line 1046) | fakeKubeconfig = `
function Test_ValidateAndCombineConfig_NGTS (line 1068) | func Test_ValidateAndCombineConfig_NGTS(t *testing.T) {
constant fakePrivKeyPEM (line 1262) | fakePrivKeyPEM = `-----BEGIN PRIVATE KEY-----
FILE: pkg/agent/dummy_data_gatherer.go
type dummyConfig (line 10) | type dummyConfig struct
method NewDataGatherer (line 16) | func (c *dummyConfig) NewDataGatherer(ctx context.Context) (datagather...
type dummyDataGatherer (line 26) | type dummyDataGatherer struct
method Run (line 32) | func (g *dummyDataGatherer) Run(ctx context.Context) error {
method WaitForCacheSync (line 37) | func (g *dummyDataGatherer) WaitForCacheSync(ctx context.Context) error {
method Fetch (line 42) | func (c *dummyDataGatherer) Fetch(ctx context.Context) (any, int, erro...
FILE: pkg/agent/run.go
constant schemaVersion (line 53) | schemaVersion string = "v2.0.0"
function Run (line 56) | func Run(cmd *cobra.Command, args []string) (returnErr error) {
function loadEncryptor (line 285) | func loadEncryptor(ctx context.Context, preflightClient client.Client) (...
function newEventf (line 313) | func newEventf(log logr.Logger) (Eventf, error) {
type Eventf (line 351) | type Eventf
function gatherAndOutputData (line 353) | func gatherAndOutputData(ctx context.Context, eventf Eventf, config Comb...
function gatherData (line 407) | func gatherData(ctx context.Context, config CombinedConfig, dataGatherer...
function postData (line 457) | func postData(ctx context.Context, config CombinedConfig, preflightClien...
function listenAndServe (line 482) | func listenAndServe(ctx context.Context, server *http.Server) error {
FILE: pkg/client/client.go
type Options (line 14) | type Options struct
type Client (line 34) | type Client interface
type Credentials (line 39) | type Credentials interface
function fullURL (line 45) | func fullURL(baseURL, path string) string {
FILE: pkg/client/client_api_token.go
type APITokenClient (line 23) | type APITokenClient struct
method PostDataReadingsWithOptions (line 51) | func (c *APITokenClient) PostDataReadingsWithOptions(ctx context.Conte...
method postDataReadings (line 57) | func (c *APITokenClient) postDataReadings(ctx context.Context, orgID, ...
method post (line 96) | func (c *APITokenClient) post(ctx context.Context, path string, body i...
function NewAPITokenClient (line 33) | func NewAPITokenClient(agentMetadata *api.AgentMetadata, apiToken, baseU...
FILE: pkg/client/client_cyberark.go
type CyberArkClient (line 28) | type CyberArkClient struct
method PostDataReadingsWithOptions (line 65) | func (o *CyberArkClient) PostDataReadingsWithOptions(ctx context.Conte...
method DiscoveryClient (line 99) | func (o *CyberArkClient) DiscoveryClient() *servicediscovery.Client {
method Config (line 103) | func (o *CyberArkClient) Config() (cyberark.ClientConfig, error) {
function NewCyberArk (line 43) | func NewCyberArk(httpClient *http.Client) (*CyberArkClient, error) {
function baseSnapshotFromOptions (line 110) | func baseSnapshotFromOptions(opts Options) dataupload.Snapshot {
function extractOIDCFromReading (line 120) | func extractOIDCFromReading(reading *api.DataReading, target *dataupload...
function extractClusterIDAndServerVersionFromReading (line 139) | func extractClusterIDAndServerVersionFromReading(reading *api.DataReadin...
function extractResourceListFromReading (line 161) | func extractResourceListFromReading(reading *api.DataReading, target *[]...
function convertDataReadings (line 258) | func convertDataReadings(
function minimizeSnapshot (line 309) | func minimizeSnapshot(log logr.Logger, snapshot *dataupload.Snapshot) {
function isExcludableSecret (line 331) | func isExcludableSecret(log logr.Logger, obj runtime.Object) bool {
function isExcludableTLSSecret (line 366) | func isExcludableTLSSecret(log logr.Logger, dataMap map[string]any) bool {
function searchPEM (line 410) | func searchPEM(data []byte, visitor func(*pem.Block) bool) bool {
function isClientCertificate (line 431) | func isClientCertificate(cert *x509.Certificate) bool {
FILE: pkg/client/client_cyberark_convertdatareadings_test.go
function TestBaseSnapshotFromOptions (line 30) | func TestBaseSnapshotFromOptions(t *testing.T) {
function TestExtractServerVersionFromReading (line 61) | func TestExtractServerVersionFromReading(t *testing.T) {
function TestExtractOIDCFromReading (line 130) | func TestExtractOIDCFromReading(t *testing.T) {
function TestExtractResourceListFromReading (line 194) | func TestExtractResourceListFromReading(t *testing.T) {
function TestConvertDataReadings_ConfigMaps (line 321) | func TestConvertDataReadings_ConfigMaps(t *testing.T) {
function TestConvertDataReadings_ExternalSecrets (line 423) | func TestConvertDataReadings_ExternalSecrets(t *testing.T) {
function TestConvertDataReadings_SecretStores (line 523) | func TestConvertDataReadings_SecretStores(t *testing.T) {
function TestConvertDataReadings_ClusterExternalSecrets (line 623) | func TestConvertDataReadings_ClusterExternalSecrets(t *testing.T) {
function TestConvertDataReadings_ClusterSecretStores (line 724) | func TestConvertDataReadings_ClusterSecretStores(t *testing.T) {
function TestConvertDataReadings_ServiceAccounts (line 819) | func TestConvertDataReadings_ServiceAccounts(t *testing.T) {
function TestConvertDataReadings_Roles (line 888) | func TestConvertDataReadings_Roles(t *testing.T) {
function TestConvertDataReadings_MultipleResources (line 965) | func TestConvertDataReadings_MultipleResources(t *testing.T) {
function TestConvertDataReadings (line 1074) | func TestConvertDataReadings(t *testing.T) {
function TestMinimizeSnapshot (line 1199) | func TestMinimizeSnapshot(t *testing.T) {
function TestIsExcludableSecret (line 1280) | func TestIsExcludableSecret(t *testing.T) {
function newTLSSecret (line 1361) | func newTLSSecret(name string, crt any) *unstructured.Unstructured {
function newOpaqueSecret (line 1381) | func newOpaqueSecret(name string) *unstructured.Unstructured {
function sampleCertificateChain (line 1403) | func sampleCertificateChain(t testing.TB, usages ...x509.ExtKeyUsage) st...
FILE: pkg/client/client_cyberark_test.go
function TestCyberArkClient_PostDataReadingsWithOptions_MockAPI (line 30) | func TestCyberArkClient_PostDataReadingsWithOptions_MockAPI(t *testing.T) {
function TestCyberArkClient_PostDataReadingsWithOptions_RealAPI (line 56) | func TestCyberArkClient_PostDataReadingsWithOptions_RealAPI(t *testing.T) {
function fakeReadings (line 108) | func fakeReadings() []*api.DataReading {
FILE: pkg/client/client_file.go
type FileClient (line 15) | type FileClient struct
method PostDataReadingsWithOptions (line 25) | func (o *FileClient) PostDataReadingsWithOptions(ctx context.Context, ...
function NewFileClient (line 19) | func NewFileClient(path string) Client {
FILE: pkg/client/client_file_test.go
function TestFileClient_PostDataReadingsWithOptions (line 17) | func TestFileClient_PostDataReadingsWithOptions(t *testing.T) {
FILE: pkg/client/client_ngts.go
type NGTSClient (line 35) | type NGTSClient struct
method PostDataReadingsWithOptions (line 226) | func (c *NGTSClient) PostDataReadingsWithOptions(ctx context.Context, ...
method post (line 284) | func (c *NGTSClient) post(ctx context.Context, url string, body io.Rea...
method getValidAccessToken (line 308) | func (c *NGTSClient) getValidAccessToken(ctx context.Context) (*ngtsAc...
method updateAccessToken (line 328) | func (c *NGTSClient) updateAccessToken(ctx context.Context) error {
method sendHTTPRequest (line 367) | func (c *NGTSClient) sendHTTPRequest(request *http.Request, responseOb...
method generateAndSignJwtToken (line 394) | func (c *NGTSClient) generateAndSignJwtToken() (string, error) {
type NGTSServiceAccountCredentials (line 51) | type NGTSServiceAccountCredentials struct
method LoadClientIDIfNeeded (line 164) | func (c *NGTSServiceAccountCredentials) LoadClientIDIfNeeded() error {
method Validate (line 208) | func (c *NGTSServiceAccountCredentials) Validate() error {
type ngtsAccessToken (line 60) | type ngtsAccessToken struct
type ngtsAccessTokenResponse (line 66) | type ngtsAccessTokenResponse struct
constant ngtsProdURLFormat (line 75) | ngtsProdURLFormat = "https://%s.ngts.paloaltonetworks.com"
constant ngtsUploadEndpoint (line 80) | ngtsUploadEndpoint = "v1/tlspk/upload/clusterdata/no"
constant ngtsAccessTokenEndpoint (line 83) | ngtsAccessTokenEndpoint = accessTokenEndpoint
constant ngtsRequiredGrantType (line 86) | ngtsRequiredGrantType = requiredGrantType
function NewNGTSClient (line 92) | func NewNGTSClient(agentMetadata *api.AgentMetadata, credentials *NGTSSe...
FILE: pkg/client/client_ngts_test.go
constant fakePrivKeyPEM (line 17) | fakePrivKeyPEM = `-----BEGIN PRIVATE KEY-----
function withFile (line 24) | func withFile(t testing.TB, content string) string {
function TestNewNGTSClient (line 41) | func TestNewNGTSClient(t *testing.T) {
function TestNGTSClient_LoadClientIDFromFile (line 128) | func TestNGTSClient_LoadClientIDFromFile(t *testing.T) {
function TestNGTSClient_LoadClientIDFromFileAlternativeNames (line 189) | func TestNGTSClient_LoadClientIDFromFileAlternativeNames(t *testing.T) {
function TestNGTSClient_PostDataReadingsWithOptions (line 275) | func TestNGTSClient_PostDataReadingsWithOptions(t *testing.T) {
function TestNGTSClient_AuthenticationFlow (line 362) | func TestNGTSClient_AuthenticationFlow(t *testing.T) {
function TestNGTSClient_ErrorHandling (line 407) | func TestNGTSClient_ErrorHandling(t *testing.T) {
FILE: pkg/client/client_oauth.go
type OAuthClient (line 26) | type OAuthClient struct
method PostDataReadingsWithOptions (line 106) | func (c *OAuthClient) PostDataReadingsWithOptions(ctx context.Context,...
method postDataReadings (line 112) | func (c *OAuthClient) postDataReadings(ctx context.Context, orgID, clu...
method post (line 151) | func (c *OAuthClient) post(ctx context.Context, path string, body io.R...
method getValidAccessToken (line 176) | func (c *OAuthClient) getValidAccessToken(ctx context.Context) (*acces...
method renewAccessToken (line 187) | func (c *OAuthClient) renewAccessToken(ctx context.Context) error {
type accessToken (line 34) | type accessToken struct
method needsRenew (line 68) | func (t *accessToken) needsRenew() bool {
type OAuthCredentials (line 40) | type OAuthCredentials struct
method IsClientSet (line 260) | func (c *OAuthCredentials) IsClientSet() (ok bool, why string) {
method Validate (line 274) | func (c *OAuthCredentials) Validate() error {
function NewOAuthClient (line 74) | func NewOAuthClient(agentMetadata *api.AgentMetadata, credentials *OAuth...
function ParseOAuthCredentials (line 243) | func ParseOAuthCredentials(data []byte) (*OAuthCredentials, error) {
FILE: pkg/client/client_venafi_cloud.go
type VenafiCloudClient (line 39) | type VenafiCloudClient struct
method PostDataReadingsWithOptions (line 172) | func (c *VenafiCloudClient) PostDataReadingsWithOptions(ctx context.Co...
method post (line 230) | func (c *VenafiCloudClient) post(ctx context.Context, path string, bod...
method getValidAccessToken (line 255) | func (c *VenafiCloudClient) getValidAccessToken(ctx context.Context) (...
method updateAccessToken (line 274) | func (c *VenafiCloudClient) updateAccessToken(ctx context.Context) err...
method sendHTTPRequest (line 312) | func (c *VenafiCloudClient) sendHTTPRequest(request *http.Request, res...
method generateAndSignJwtToken (line 337) | func (c *VenafiCloudClient) generateAndSignJwtToken() (string, error) {
type VenafiSvcAccountCredentials (line 55) | type VenafiSvcAccountCredentials struct
method Validate (line 139) | func (c *VenafiSvcAccountCredentials) Validate() error {
method IsClientSet (line 159) | func (c *VenafiSvcAccountCredentials) IsClientSet() (ok bool, why stri...
type venafiCloudAccessToken (line 63) | type venafiCloudAccessToken struct
type accessTokenInformation (line 68) | type accessTokenInformation struct
constant VenafiCloudProdURL (line 77) | VenafiCloudProdURL = "https://api.venafi.cloud"
constant defaultVenafiCloudUploadEndpoint (line 78) | defaultVenafiCloudUploadEndpoint = "v1/tlspk/uploads"
constant accessTokenEndpoint (line 79) | accessTokenEndpoint = "/v1/oauth/token/serviceaccount"
constant requiredGrantType (line 80) | requiredGrantType = "urn:ietf:params:oauth:grant-type:jwt...
function NewVenafiCloudClient (line 85) | func NewVenafiCloudClient(agentMetadata *api.AgentMetadata, credentials ...
function ParseVenafiCredentials (line 124) | func ParseVenafiCredentials(data []byte) (*VenafiSvcAccountCredentials, ...
FILE: pkg/client/client_venconn.go
type VenConnClient (line 30) | type VenConnClient struct
method Start (line 125) | func (c *VenConnClient) Start(ctx context.Context) error {
method PostDataReadingsWithOptions (line 131) | func (c *VenConnClient) PostDataReadingsWithOptions(ctx context.Contex...
function NewVenConnClient (line 56) | func NewVenConnClient(restcfg *rest.Config, agentMetadata *api.AgentMeta...
FILE: pkg/client/client_venconn_test.go
function TestVenConnClient_PostDataReadingsWithOptions (line 38) | func TestVenConnClient_PostDataReadingsWithOptions(t *testing.T) {
type testcase (line 221) | type testcase struct
function run_TestVenConnClient_PostDataReadingsWithOptions (line 229) | func run_TestVenConnClient_PostDataReadingsWithOptions(ctx context.Conte...
function testNameToNamespace (line 305) | func testNameToNamespace(t testing.TB) string {
FILE: pkg/client/util.go
function parsePrivateKeyFromPEMFile (line 17) | func parsePrivateKeyFromPEMFile(privateKeyFilePath string) (crypto.Priva...
function parsePrivateKeyAndExtractSigningMethod (line 48) | func parsePrivateKeyAndExtractSigningMethod(privateKeyFile string) (cryp...
FILE: pkg/datagatherer/datagatherer.go
type Config (line 7) | type Config interface
type DataGatherer (line 13) | type DataGatherer interface
FILE: pkg/datagatherer/k8sdiscovery/discovery.go
type ConfigDiscovery (line 16) | type ConfigDiscovery struct
method UnmarshalYAML (line 22) | func (c *ConfigDiscovery) UnmarshalYAML(unmarshal func(any) error) err...
method NewDataGatherer (line 41) | func (c *ConfigDiscovery) NewDataGatherer(ctx context.Context) (dataga...
type DataGathererDiscovery (line 61) | type DataGathererDiscovery struct
method Run (line 68) | func (g *DataGathererDiscovery) Run(ctx context.Context) error {
method WaitForCacheSync (line 73) | func (g *DataGathererDiscovery) WaitForCacheSync(ctx context.Context) ...
method Fetch (line 79) | func (g *DataGathererDiscovery) Fetch(ctx context.Context) (any, int, ...
FILE: pkg/datagatherer/k8sdynamic/cache.go
type timeInterface (line 16) | type timeInterface interface
type realTime (line 22) | type realTime struct
method now (line 25) | func (*realTime) now() time.Time {
type cacheResource (line 29) | type cacheResource interface
function logCacheUpdateFailure (line 34) | func logCacheUpdateFailure(log logr.Logger, obj any, operation string) {
function onAdd (line 44) | func onAdd(log logr.Logger, obj any, dgCache *cache.Cache) {
function onUpdate (line 59) | func onUpdate(log logr.Logger, oldObj, newObj any, dgCache *cache.Cache) {
function onDelete (line 72) | func onDelete(log logr.Logger, obj any, dgCache *cache.Cache) {
function updateCacheGatheredResource (line 86) | func updateCacheGatheredResource(cacheKey string, resource any, dgCache ...
FILE: pkg/datagatherer/k8sdynamic/cache_test.go
function makeGatheredResource (line 16) | func makeGatheredResource(obj runtime.Object, deletedAt api.Time) *api.G...
function TestOnAddCache (line 23) | func TestOnAddCache(t *testing.T) {
function TestNoneCache (line 141) | func TestNoneCache(t *testing.T) {
FILE: pkg/datagatherer/k8sdynamic/dynamic.go
type ConfigDynamic (line 72) | type ConfigDynamic struct
method UnmarshalYAML (line 88) | func (c *ConfigDynamic) UnmarshalYAML(unmarshal func(any) error) error {
method validate (line 119) | func (c *ConfigDynamic) validate() error {
method NewDataGatherer (line 201) | func (c *ConfigDynamic) NewDataGatherer(ctx context.Context) (datagath...
method newDataGathererWithClient (line 219) | func (c *ConfigDynamic) newDataGathererWithClient(ctx context.Context,...
type sharedInformerFunc (line 157) | type sharedInformerFunc
type DataGathererDynamic (line 321) | type DataGathererDynamic struct
method GVR (line 351) | func (g *DataGathererDynamic) GVR() schema.GroupVersionResource {
method Run (line 358) | func (g *DataGathererDynamic) Run(ctx context.Context) error {
method WaitForCacheSync (line 387) | func (g *DataGathererDynamic) WaitForCacheSync(ctx context.Context) er...
method Fetch (line 398) | func (g *DataGathererDynamic) Fetch(ctx context.Context) (any, int, er...
method redactList (line 451) | func (g *DataGathererDynamic) redactList(ctx context.Context, list []*...
method encryptDataField (line 551) | func (g *DataGathererDynamic) encryptDataField(ctx context.Context, se...
constant encryptedDataFieldName (line 543) | encryptedDataFieldName = "_encryptedData"
function RemoveTypedKeys (line 590) | func RemoveTypedKeys(excludeAnnotKeys []*regexp.Regexp, m map[string]str...
function RemoveUnstructuredKeys (line 628) | func RemoveUnstructuredKeys(excludeKeys []*regexp.Regexp, obj *unstructu...
function generateExcludedNamespacesFieldSelector (line 663) | func generateExcludedNamespacesFieldSelector(excludeNamespaces []string)...
function isIncludedNamespace (line 674) | func isIncludedNamespace(namespace string, namespaces []string) bool {
function isNativeResource (line 681) | func isNativeResource(gvr schema.GroupVersionResource) bool {
FILE: pkg/datagatherer/k8sdynamic/dynamic_test.go
function getObject (line 40) | func getObject(version, kind, name, namespace string, withManagedFields ...
function getObjectAnnot (line 64) | func getObjectAnnot(version, kind, name, namespace string, annotations, ...
function getSecret (line 77) | func getSecret(name, namespace string, data map[string]any, isTLS bool, ...
function sortResourcesByName (line 103) | func sortResourcesByName(list []*unstructured.Unstructured) {
function sortGatheredResources (line 109) | func sortGatheredResources(list []*api.GatheredResource) {
function TestNewDataGathererWithClientAndDynamicInformer (line 130) | func TestNewDataGathererWithClientAndDynamicInformer(t *testing.T) {
function TestNewDataGathererWithClientAndSharedIndexInformer (line 186) | func TestNewDataGathererWithClientAndSharedIndexInformer(t *testing.T) {
function TestUnmarshalDynamicConfig (line 232) | func TestUnmarshalDynamicConfig(t *testing.T) {
function TestConfigDynamicValidate (line 304) | func TestConfigDynamicValidate(t *testing.T) {
function TestGenerateExcludedNamespacesFieldSelector (line 361) | func TestGenerateExcludedNamespacesFieldSelector(t *testing.T) {
type fakeTime (line 396) | type fakeTime struct
method now (line 399) | func (f *fakeTime) now() time.Time {
function init (line 404) | func init() {
type failEncryptor (line 408) | type failEncryptor struct
method Encrypt (line 410) | func (fe *failEncryptor) Encrypt(_ context.Context, plaintext []byte) ...
function TestDynamicGatherer_Fetch (line 414) | func TestDynamicGatherer_Fetch(t *testing.T) {
function compareEncryptedData (line 919) | func compareEncryptedData(t *testing.T, privKey *stdrsa.PrivateKey, got ...
function TestDynamicGathererNativeResources_Fetch (line 970) | func TestDynamicGathererNativeResources_Fetch(t *testing.T) {
function waitTimeout (line 1282) | func waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool {
function TestRemoveUnstructuredKeys (line 1296) | func TestRemoveUnstructuredKeys(t *testing.T) {
type tc_RemoveUnstructuredKeys (line 1418) | type tc_RemoveUnstructuredKeys struct
function run_TestRemoveUnstructuredKeys (line 1425) | func run_TestRemoveUnstructuredKeys(tc tc_RemoveUnstructuredKeys) func(*...
function TestRemoveTypedKeys (line 1433) | func TestRemoveTypedKeys(t *testing.T) {
type tc_TestRemoveTypedKeys (line 1465) | type tc_TestRemoveTypedKeys struct
function run_TestRemoveTypedKeys (line 1471) | func run_TestRemoveTypedKeys(tc tc_TestRemoveTypedKeys) func(t *testing....
function toRegexps (line 1479) | func toRegexps(keys []string) []*regexp.Regexp {
function TestValidate_LabelSelectors (line 1488) | func TestValidate_LabelSelectors(t *testing.T) {
function TestValidate_FieldSelectors (line 1579) | func TestValidate_FieldSelectors(t *testing.T) {
function TestValidate_CombinedSelectors (line 1639) | func TestValidate_CombinedSelectors(t *testing.T) {
FILE: pkg/datagatherer/k8sdynamic/fieldfilter.go
type FieldPath (line 78) | type FieldPath
function Select (line 81) | func Select(fields []FieldPath, resource *unstructured.Unstructured) err...
function Redact (line 105) | func Redact(fields []FieldPath, resource *unstructured.Unstructured) {
FILE: pkg/datagatherer/k8sdynamic/fieldfilter_test.go
function TestSelect (line 14) | func TestSelect(t *testing.T) {
function run_TestSelect (line 210) | func run_TestSelect(given map[string]any, givenSelect []FieldPath, expec...
function TestSelectMissingSelectedField (line 221) | func TestSelectMissingSelectedField(t *testing.T) {
function TestRedactSecret (line 245) | func TestRedactSecret(t *testing.T) {
function TestRedactPod (line 293) | func TestRedactPod(t *testing.T) {
function TestRedactMissingField (line 332) | func TestRedactMissingField(t *testing.T) {
FILE: pkg/datagatherer/local/local.go
type Config (line 12) | type Config struct
method validate (line 18) | func (c *Config) validate() error {
method NewDataGatherer (line 31) | func (c *Config) NewDataGatherer(ctx context.Context) (datagatherer.Da...
type DataGatherer (line 26) | type DataGatherer struct
method Run (line 41) | func (g *DataGatherer) Run(ctx context.Context) error {
method WaitForCacheSync (line 46) | func (g *DataGatherer) WaitForCacheSync(ctx context.Context) error {
method Fetch (line 52) | func (g *DataGatherer) Fetch(ctx context.Context) (any, int, error) {
FILE: pkg/datagatherer/oidc/oidc.go
type OIDCDiscovery (line 21) | type OIDCDiscovery struct
method UnmarshalYAML (line 27) | func (c *OIDCDiscovery) UnmarshalYAML(unmarshal func(any) error) error {
method NewDataGatherer (line 41) | func (c *OIDCDiscovery) NewDataGatherer(ctx context.Context) (datagath...
type DataGathererOIDC (line 53) | type DataGathererOIDC struct
method Run (line 59) | func (g *DataGathererOIDC) Run(ctx context.Context) error {
method WaitForCacheSync (line 63) | func (g *DataGathererOIDC) WaitForCacheSync(ctx context.Context) error {
method Fetch (line 69) | func (g *DataGathererOIDC) Fetch(ctx context.Context) (any, int, error) {
method fetchOIDCConfig (line 95) | func (g *DataGathererOIDC) fetchOIDCConfig(ctx context.Context) (map[s...
method fetchJWKS (line 111) | func (g *DataGathererOIDC) fetchJWKS(ctx context.Context) (map[string]...
function stringFirstN (line 132) | func stringFirstN(s string, n int) string {
function k8sErrorMessage (line 140) | func k8sErrorMessage(err error) string {
FILE: pkg/datagatherer/oidc/oidc_test.go
function makeRESTClient (line 17) | func makeRESTClient(t *testing.T, ts *httptest.Server) rest.Interface {
function TestFetch_Success (line 36) | func TestFetch_Success(t *testing.T) {
function TestFetch_Errors (line 71) | func TestFetch_Errors(t *testing.T) {
FILE: pkg/echo/echo.go
function Echo (line 18) | func Echo(cmd *cobra.Command, args []string) error {
function echoHandler (line 24) | func echoHandler(w http.ResponseWriter, r *http.Request) {
function writeError (line 67) | func writeError(w http.ResponseWriter, err string, code int) {
function prettyPrint (line 73) | func prettyPrint(reading *api.DataReading) string {
FILE: pkg/echo/echo_test.go
type testInput (line 16) | type testInput struct
function TestEchoServerRequestResponse (line 23) | func TestEchoServerRequestResponse(t *testing.T) {
FILE: pkg/kubeconfig/client.go
function NewDynamicClient (line 12) | func NewDynamicClient(kubeconfigPath string) (dynamic.Interface, error) {
function NewDiscoveryClient (line 29) | func NewDiscoveryClient(kubeconfigPath string) (*discovery.DiscoveryClie...
function NewClientSet (line 46) | func NewClientSet(kubeconfigPath string) (kubernetes.Interface, error) {
FILE: pkg/kubeconfig/client_test.go
function TestNewDynamicClient_ExplicitKubeconfig (line 16) | func TestNewDynamicClient_ExplicitKubeconfig(t *testing.T) {
function TestNewDynamicClient_InferredKubeconfig (line 25) | func TestNewDynamicClient_InferredKubeconfig(t *testing.T) {
function TestNewDiscoveryClient_ExplicitKubeconfig (line 36) | func TestNewDiscoveryClient_ExplicitKubeconfig(t *testing.T) {
function TestNewDiscoveryClient_InferredKubeconfig (line 45) | func TestNewDiscoveryClient_InferredKubeconfig(t *testing.T) {
function writeConfigToFile (line 56) | func writeConfigToFile(t *testing.T, cfg clientcmdapi.Config) string {
function createValidTestConfig (line 68) | func createValidTestConfig() clientcmdapi.Config {
function temporarilySetEnv (line 90) | func temporarilySetEnv(key, value string) func() {
FILE: pkg/kubeconfig/kubeconfig.go
function LoadRESTConfig (line 11) | func LoadRESTConfig(path string) (*rest.Config, error) {
FILE: pkg/logs/logs.go
constant Info (line 58) | Info = 0
constant Debug (line 59) | Debug = 1
constant Trace (line 60) | Trace = 2
function init (line 63) | func init() {
function AddFlags (line 74) | func AddFlags(fs *pflag.FlagSet) {
function Initialize (line 116) | func Initialize() error {
type LogToSlogWriter (line 141) | type LogToSlogWriter struct
method Write (line 146) | func (w LogToSlogWriter) Write(p []byte) (n int, err error) {
FILE: pkg/logs/logs_test.go
function TestLogs (line 35) | func TestLogs(t *testing.T) {
function replaceWithStaticTimestamps (line 338) | func replaceWithStaticTimestamps(input string) string {
function Test_replaceWithStaticTimestamps (line 347) | func Test_replaceWithStaticTimestamps(t *testing.T) {
FILE: pkg/permissions/generate.go
type AgentRBACManifests (line 16) | type AgentRBACManifests struct
constant agentNamespace (line 25) | agentNamespace = "jetstack-secure"
constant agentSubjectName (line 26) | agentSubjectName = "agent"
function GenerateAgentRBACManifests (line 28) | func GenerateAgentRBACManifests(dataGatherers []agent.DataGatherer) Agen...
function createClusterRoleString (line 120) | func createClusterRoleString(clusterRoles []rbac.ClusterRole) string {
function createRoleBindingString (line 135) | func createRoleBindingString(roleBindings []rbac.RoleBinding) string {
function createClusterRoleBindingString (line 150) | func createClusterRoleBindingString(clusterRoleBindings []rbac.ClusterRo...
function GenerateFullManifest (line 166) | func GenerateFullManifest(dataGatherers []agent.DataGatherer) string {
FILE: pkg/permissions/generate_test.go
function TestGenerateAgentRBACManifestsString (line 15) | func TestGenerateAgentRBACManifestsString(t *testing.T) {
function TestGenerateAgentRBACManifests (line 196) | func TestGenerateAgentRBACManifests(t *testing.T) {
FILE: pkg/testutil/envtest.go
function WithEnvtest (line 37) | func WithEnvtest(t testing.TB) (_ *envtest.Environment, _ *rest.Config, ...
function WithKubeconfig (line 72) | func WithKubeconfig(t testing.TB, restCfg *rest.Config) string {
function VenConnStartWatching (line 112) | func VenConnStartWatching(ctx context.Context, t *testing.T, cl client.C...
function TrustCA (line 132) | func TrustCA(t *testing.T, cl client.Client, cert *x509.Certificate) {
function Parse (line 159) | func Parse(yamlmanifest string) []ctrlruntime.Object {
type AssertRequest (line 177) | type AssertRequest
function FakeVenafiCloud (line 179) | func FakeVenafiCloud(t *testing.T) (_ *httptest.Server, _ *x509.Certific...
function FakeTPP (line 235) | func FakeTPP(t testing.TB) (*httptest.Server, *x509.Certificate) {
function FakeCyberArk (line 276) | func FakeCyberArk(t testing.TB) *http.Client {
constant VenConnRBAC (line 299) | VenConnRBAC = `
FILE: pkg/testutil/undent.go
function Undent (line 48) | func Undent(s string) string {
function isIndent (line 154) | func isIndent(s string) bool {
FILE: pkg/testutil/undent_test.go
function Test_Undent (line 11) | func Test_Undent(t *testing.T) {
function runTest_Undent (line 40) | func runTest_Undent(given, expected string) func(t *testing.T) {
FILE: pkg/version/version.go
function UserAgent (line 26) | func UserAgent() string {
function SetUserAgent (line 31) | func SetUserAgent(req *http.Request) {
Condensed preview — 256 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (1,545K chars).
[
{
"path": ".envrc.template",
"chars": 1507,
"preview": "# Example .envrc file for use with direnv.\n# Copy this file to .envrc and edit the values as required.\n# Do not check in"
},
{
"path": ".github/ISSUE_TEMPLATE/bug_report.md",
"chars": 478,
"preview": "---\nname: Bug report\nabout: Issue for something that isn't working as expected\ntitle: ''\nlabels: ''\nassignees: ''\n\n---\n\n"
},
{
"path": ".github/actions/repo_access/action.yaml",
"chars": 1070,
"preview": "name: 'Setup repo access'\ndescription: 'Setups authenticate to GitHub repos'\ninputs:\n DEPLOY_KEY_READ_VENAFI_CONNECTION"
},
{
"path": ".github/chainguard/make-self-upgrade.sts.yaml",
"chars": 408,
"preview": "# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.\n# Edit https://github.com/cert-manager/makefile-modules/blob/main/m"
},
{
"path": ".github/renovate.json5",
"chars": 152,
"preview": "{\n $schema: 'https://docs.renovatebot.com/renovate-schema.json',\n extends: [\n 'github>cert-manager/makefile-modules"
},
{
"path": ".github/workflows/govulncheck.yaml",
"chars": 1729,
"preview": "# This file is MANUALLY maintained, but was originally based on the makefile-modules govulncheck workflow. See the origi"
},
{
"path": ".github/workflows/make-self-upgrade.yaml",
"chars": 4058,
"preview": "# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.\n# Edit https://github.com/cert-manager/makefile-modules/blob/main/m"
},
{
"path": ".github/workflows/release.yml",
"chars": 4366,
"preview": "name: release\non:\n push:\n tags:\n - \"v*\"\n\nenv:\n VERSION: ${{ github.ref_name }}\n\njobs:\n build_and_push:\n ru"
},
{
"path": ".github/workflows/tests.yaml",
"chars": 9452,
"preview": "name: tests\non:\n push:\n branches: [master]\n pull_request: {}\njobs:\n verify:\n runs-on: ubuntu-latest\n timeout"
},
{
"path": ".gitignore",
"chars": 177,
"preview": "/preflight\n/preflight.yaml\n/builds\n/bundles\n/output\ncredentials.json\n.terraform\nterraform.tfstate\nterraform.tfstate.back"
},
{
"path": ".golangci.yaml",
"chars": 2223,
"preview": "version: \"2\"\nlinters:\n default: none\n exclusions:\n generated: lax\n presets: [comments, common-false-positives, l"
},
{
"path": "CONTRIBUTING.md",
"chars": 6711,
"preview": "# Contributing to Discovery Agent\n\nThank you for your interest in contributing! This document provides guidelines and in"
},
{
"path": "LICENSE",
"chars": 11357,
"preview": " Apache License\n Version 2.0, January 2004\n "
},
{
"path": "LICENSES",
"chars": 5962,
"preview": "This LICENSES file is generated by the `licenses` module in makefile-modules[0].\n\nThe licenses below the \"---\" are deter"
},
{
"path": "Makefile",
"chars": 4911,
"preview": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may"
},
{
"path": "OWNERS",
"chars": 124,
"preview": "approvers:\n- j-fuentes\n- wwwil\n- charlieegan3\n- akvilemar\n- james-w\n- tfadeyi\nreviewers:\n- j-fuentes\n- wwwil\n- charlieeg"
},
{
"path": "OWNERS_ALIASES",
"chars": 349,
"preview": "# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.\n# Edit https://github.com/cert-manager/makefile-modules/blob/main/m"
},
{
"path": "README.md",
"chars": 3254,
"preview": "# Discovery Agent\n\n[\n\nfunc TestJSONGathere"
},
{
"path": "cmd/agent.go",
"chars": 1576,
"preview": "package cmd\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com/spf13/cobra\"\n\n\t\"github.com/jetstack/preflight/pkg/agent\"\n\t\"github.com/j"
},
{
"path": "cmd/agent_test.go",
"chars": 3094,
"preview": "package cmd\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"gith"
},
{
"path": "cmd/ark/main.go",
"chars": 89,
"preview": "package main\n\nimport \"github.com/jetstack/preflight/cmd\"\n\nfunc main() {\n\tcmd.Execute()\n}\n"
},
{
"path": "cmd/echo.go",
"chars": 633,
"preview": "package cmd\n\nimport (\n\t\"github.com/spf13/cobra\"\n\n\t\"github.com/jetstack/preflight/pkg/echo\"\n)\n\nvar echoCmd = &cobra.Comma"
},
{
"path": "cmd/helpers.go",
"chars": 590,
"preview": "package cmd\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\n\t\"github.com/jetstack/preflight/pkg/client\"\n\t\"github.com/jetstack/preflight/pkg"
},
{
"path": "cmd/root.go",
"chars": 2337,
"preview": "package cmd\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com/spf13/cobra\"\n\t\"github.com/spf13/pflag\"\n\t\"k8s.io/k"
},
{
"path": "cmd/version.go",
"chars": 464,
"preview": "package cmd\n\nimport (\n\t\"github.com/spf13/cobra\"\n)\n\nvar verbose bool\n\nvar versionCmd = &cobra.Command{\n\tUse: \"version\","
},
{
"path": "deploy/charts/disco-agent/.helmignore",
"chars": 349,
"preview": "# Patterns to ignore when building packages.\n# This supports shell glob matching, relative path matching, and\n# negation"
},
{
"path": "deploy/charts/disco-agent/Chart.yaml",
"chars": 548,
"preview": "apiVersion: v2\nname: disco-agent\ndescription: |-\n The disco-agent connects your Kubernetes or Openshift cluster to Cybe"
},
{
"path": "deploy/charts/disco-agent/README.md",
"chars": 12301,
"preview": "# disco-agent\n\nThe Cyberark Discovery and Context Agent connects your Kubernetes or OpenShift\ncluster to the Discovery a"
},
{
"path": "deploy/charts/disco-agent/templates/NOTES.txt",
"chars": 575,
"preview": "CHART NAME: {{ .Chart.Name }}\nCHART VERSION: {{ .Chart.Version }}\nAPP VERSION: {{ .Chart.AppVersion }}\n\n- Check the appl"
},
{
"path": "deploy/charts/disco-agent/templates/_helpers.tpl",
"chars": 3995,
"preview": "{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"disco-agent.name\" -}}\n{{- default .Chart.Name .Values.nameOverride |"
},
{
"path": "deploy/charts/disco-agent/templates/configmap.yaml",
"chars": 3953,
"preview": "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ include \"disco-agent.fullname\" . }}-config\n namespace: {{ .Release."
},
{
"path": "deploy/charts/disco-agent/templates/deployment.yaml",
"chars": 5237,
"preview": "{{- if not .Values.acceptTerms }}\n {{- fail \"\\n\\n=================================================================\\n "
},
{
"path": "deploy/charts/disco-agent/templates/poddisruptionbudget.yaml",
"chars": 872,
"preview": "{{- if .Values.podDisruptionBudget.enabled }}\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n name: {{ inclu"
},
{
"path": "deploy/charts/disco-agent/templates/podmonitor.yaml",
"chars": 1372,
"preview": "{{- if and .Values.metrics.enabled .Values.metrics.podmonitor.enabled }}\napiVersion: monitoring.coreos.com/v1\nkind: PodM"
},
{
"path": "deploy/charts/disco-agent/templates/rbac.yaml",
"chars": 4036,
"preview": "---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n name: {{ include \"disco-agent.fullname\" . }}-event-e"
},
{
"path": "deploy/charts/disco-agent/templates/serviceaccount.yaml",
"chars": 397,
"preview": "{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ include \"disco-agent.s"
},
{
"path": "deploy/charts/disco-agent/tests/README.md",
"chars": 195,
"preview": "# `helm unittest`\n\nWe use `helm unittest` to test the YAML output coming out of the Helm chart.\n\nIn order to update the "
},
{
"path": "deploy/charts/disco-agent/tests/__snapshot__/configmap_test.yaml.snap",
"chars": 17207,
"preview": "custom-cluster-description:\n 1: |\n apiVersion: v1\n data:\n config.yaml: |-\n cluster_name: \"\"\n c"
},
{
"path": "deploy/charts/disco-agent/tests/configmap_test.yaml",
"chars": 693,
"preview": "suite: test the contents of the config.yaml\ntemplates:\n - configmap.yaml\nrelease:\n name: test\n namespace: test-ns\ntes"
},
{
"path": "deploy/charts/disco-agent/values.linter.exceptions",
"chars": 0,
"preview": ""
},
{
"path": "deploy/charts/disco-agent/values.schema.json",
"chars": 18655,
"preview": "{\n \"$defs\": {\n \"helm-values\": {\n \"additionalProperties\": false,\n \"properties\": {\n \"acceptTerms\": {\n"
},
{
"path": "deploy/charts/disco-agent/values.yaml",
"chars": 9512,
"preview": "# Default values for disco-agent.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates."
},
{
"path": "deploy/charts/discovery-agent/.helmignore",
"chars": 349,
"preview": "# Patterns to ignore when building packages.\n# This supports shell glob matching, relative path matching, and\n# negation"
},
{
"path": "deploy/charts/discovery-agent/Chart.yaml",
"chars": 549,
"preview": "apiVersion: v2\nname: discovery-agent\ndescription: |-\n The discovery-agent connects your Kubernetes or Openshift cluster"
},
{
"path": "deploy/charts/discovery-agent/README.md",
"chars": 9932,
"preview": "# discovery-agent\n\nThe Discovery Agent connects your Kubernetes or OpenShift cluster to Palo Alto NGTS.\n\n## Values\n\n<!--"
},
{
"path": "deploy/charts/discovery-agent/templates/NOTES.txt",
"chars": 395,
"preview": "CHART NAME: {{ .Chart.Name }}\nCHART VERSION: {{ .Chart.Version }}\nAPP VERSION: {{ .Chart.AppVersion }}\n\n- Check the appl"
},
{
"path": "deploy/charts/discovery-agent/templates/_helpers.tpl",
"chars": 3579,
"preview": "{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"discovery-agent.name\" -}}\n{{- default .Chart.Name .Values.nameOverri"
},
{
"path": "deploy/charts/discovery-agent/templates/configmap.yaml",
"chars": 2150,
"preview": "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ include \"discovery-agent.fullname\" . }}-config\n namespace: {{ .Rele"
},
{
"path": "deploy/charts/discovery-agent/templates/deployment.yaml",
"chars": 4439,
"preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ include \"discovery-agent.fullname\" . }}\n labels:\n {{- incl"
},
{
"path": "deploy/charts/discovery-agent/templates/poddisruptionbudget.yaml",
"chars": 884,
"preview": "{{- if .Values.podDisruptionBudget.enabled }}\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n name: {{ inclu"
},
{
"path": "deploy/charts/discovery-agent/templates/podmonitor.yaml",
"chars": 1388,
"preview": "{{- if and .Values.metrics.enabled .Values.metrics.podmonitor.enabled }}\napiVersion: monitoring.coreos.com/v1\nkind: PodM"
},
{
"path": "deploy/charts/discovery-agent/templates/rbac.yaml",
"chars": 3283,
"preview": "---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n name: {{ include \"discovery-agent.fullname\" . }}-eve"
},
{
"path": "deploy/charts/discovery-agent/templates/serviceaccount.yaml",
"chars": 405,
"preview": "{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ include \"discovery-age"
},
{
"path": "deploy/charts/discovery-agent/tests/configmap_test.yaml",
"chars": 2642,
"preview": "suite: test configmap\ntemplates:\n - configmap.yaml\n\ntests:\n # Test basic ConfigMap rendering\n - it: should create Con"
},
{
"path": "deploy/charts/discovery-agent/tests/deployment_test.yaml",
"chars": 10193,
"preview": "suite: test deployment\ntemplates:\n - deployment.yaml\n\ntests:\n # Test that tsgID is rendered correctly as a string\n - "
},
{
"path": "deploy/charts/discovery-agent/tests/poddisruptionbudget_test.yaml",
"chars": 2813,
"preview": "suite: test poddisruptionbudget\ntemplates:\n - poddisruptionbudget.yaml\n\ntests:\n # Test PodDisruptionBudget is not crea"
},
{
"path": "deploy/charts/discovery-agent/tests/podmonitor_test.yaml",
"chars": 4653,
"preview": "suite: test podmonitor\ntemplates:\n - podmonitor.yaml\n\ntests:\n # Test PodMonitor is not created by default\n - it: shou"
},
{
"path": "deploy/charts/discovery-agent/tests/rbac_test.yaml",
"chars": 4970,
"preview": "suite: test rbac\ntemplates:\n - rbac.yaml\n\ntests:\n # Test that all RBAC resources are created\n - it: should create all"
},
{
"path": "deploy/charts/discovery-agent/tests/serviceaccount_test.yaml",
"chars": 2306,
"preview": "suite: test serviceaccount\ntemplates:\n - serviceaccount.yaml\n\ntests:\n # Test ServiceAccount is created by default\n - "
},
{
"path": "deploy/charts/discovery-agent/values.linter.exceptions",
"chars": 0,
"preview": ""
},
{
"path": "deploy/charts/discovery-agent/values.schema.json",
"chars": 19197,
"preview": "{\n \"$defs\": {\n \"helm-values\": {\n \"additionalProperties\": false,\n \"properties\": {\n \"affinity\": {\n "
},
{
"path": "deploy/charts/discovery-agent/values.yaml",
"chars": 9898,
"preview": "# Configuration for the Discovery Agent\nconfig:\n # Required: The TSG (Tenant Service Group) ID to use when connecting t"
},
{
"path": "deploy/charts/venafi-kubernetes-agent/.helmignore",
"chars": 349,
"preview": "# Patterns to ignore when building packages.\n# This supports shell glob matching, relative path matching, and\n# negation"
},
{
"path": "deploy/charts/venafi-kubernetes-agent/Chart.yaml",
"chars": 593,
"preview": "apiVersion: v2\nname: venafi-kubernetes-agent\ntype: application\n\ndescription: |-\n The Discovery Agent connects your Kube"
},
{
"path": "deploy/charts/venafi-kubernetes-agent/README.md",
"chars": 15582,
"preview": "# venafi-kubernetes-agent\n\nThe Discovery Agent connects your Kubernetes or OpenShift cluster to the CyberArk Certificate"
},
{
"path": "deploy/charts/venafi-kubernetes-agent/crd_bases/crd.footer.yaml",
"chars": 20,
"preview": "{{ end }}\n{{ end }}\n"
},
{
"path": "deploy/charts/venafi-kubernetes-agent/crd_bases/crd.header-without-validations.yaml",
"chars": 617,
"preview": "{{/* DO NOT EDIT. Use 'make generate-crds-venconn' to regenerate. */}}\n{{- if .Values.crds.venafiConnection.include }}\n{"
},
{
"path": "deploy/charts/venafi-kubernetes-agent/crd_bases/crd.header.yaml",
"chars": 621,
"preview": "{{/* DO NOT EDIT. Use 'make generate-crds-venconn' to regenerate. */}}\n{{- if .Values.crds.venafiConnection.include }}\n{"
},
{
"path": "deploy/charts/venafi-kubernetes-agent/crd_bases/jetstack.io_venaficonnections.yaml",
"chars": 78089,
"preview": "# DO NOT EDIT: Use 'make generate-crds-venconn' to regenerate.\n---\napiVersion: apiextensions.k8s.io/v1\nkind: CustomResou"
},
{
"path": "deploy/charts/venafi-kubernetes-agent/templates/NOTES.txt",
"chars": 1207,
"preview": "{{- if .Values.config.configmap.name }}\nYou are using a custom configuration in the following ConfigMap: {{ .Values.conf"
},
{
"path": "deploy/charts/venafi-kubernetes-agent/templates/_helpers.tpl",
"chars": 4140,
"preview": "{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"venafi-kubernetes-agent.name\" -}}\n{{- default .Chart.Name .Values.na"
},
{
"path": "deploy/charts/venafi-kubernetes-agent/templates/_venafi-connection.tpl",
"chars": 732,
"preview": "{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"venafi-connection.chart\" -}}\n{{- printf "
},
{
"path": "deploy/charts/venafi-kubernetes-agent/templates/configmap.yaml",
"chars": 7857,
"preview": "{{ if not .Values.config.configmap.name }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: agent-config\n namespace"
},
{
"path": "deploy/charts/venafi-kubernetes-agent/templates/deployment.yaml",
"chars": 5013,
"preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ include \"venafi-kubernetes-agent.fullname\" . }}\n namespace: {"
},
{
"path": "deploy/charts/venafi-kubernetes-agent/templates/poddisruptionbudget.yaml",
"chars": 908,
"preview": "{{- if .Values.podDisruptionBudget.enabled }}\napiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n name: {{ inclu"
},
{
"path": "deploy/charts/venafi-kubernetes-agent/templates/podmonitor.yaml",
"chars": 1423,
"preview": "{{- if and .Values.metrics.enabled .Values.metrics.podmonitor.enabled }}\napiVersion: monitoring.coreos.com/v1\nkind: PodM"
},
{
"path": "deploy/charts/venafi-kubernetes-agent/templates/rbac.yaml",
"chars": 14468,
"preview": "---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n name: {{ include \"venafi-kubernetes-agent.fullname\" "
},
{
"path": "deploy/charts/venafi-kubernetes-agent/templates/serviceaccount.yaml",
"chars": 390,
"preview": "{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ include \"venafi-kubern"
},
{
"path": "deploy/charts/venafi-kubernetes-agent/templates/venafi-connection-crd.without-validations.yaml",
"chars": 76127,
"preview": "{{/* DO NOT EDIT. Use 'make generate-crds-venconn' to regenerate. */}}\n{{- if .Values.crds.venafiConnection.include }}\n{"
},
{
"path": "deploy/charts/venafi-kubernetes-agent/templates/venafi-connection-crd.yaml",
"chars": 79406,
"preview": "{{/* DO NOT EDIT. Use 'make generate-crds-venconn' to regenerate. */}}\n{{- if .Values.crds.venafiConnection.include }}\n{"
},
{
"path": "deploy/charts/venafi-kubernetes-agent/templates/venafi-connection-rbac.yaml",
"chars": 1311,
"preview": "{{- if .Values.crds.venafiConnection.include }}\n# The 'venafi-connection' service account is used by multiple\n# controll"
},
{
"path": "deploy/charts/venafi-kubernetes-agent/templates/venafi-rbac.yaml",
"chars": 939,
"preview": "{{- if .Values.authentication.venafiConnection.enabled }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n"
},
{
"path": "deploy/charts/venafi-kubernetes-agent/tests/__snapshot__/configmap_test.yaml.snap",
"chars": 37787,
"preview": "custom-cluster-description:\n 1: |\n raw: |\n - Check the credentials Secret exists: \"agent-credentials\"\n > k"
},
{
"path": "deploy/charts/venafi-kubernetes-agent/tests/configmap_test.yaml",
"chars": 853,
"preview": "suite: test the contents of the config.yaml\ntemplates:\n - configmap.yaml\n - NOTES.txt\nrelease:\n name: test\n namespac"
},
{
"path": "deploy/charts/venafi-kubernetes-agent/tests/deployment_test.yaml",
"chars": 4571,
"preview": "suite: test deployment\ntemplates:\n - deployment.yaml\n\ntests:\n # Basic checks on deployment\n - it: templates as expect"
},
{
"path": "deploy/charts/venafi-kubernetes-agent/tests/values/custom-volumes.yaml",
"chars": 319,
"preview": "volumes:\n - name: cabundle\n configMap:\n name: cabundle\n optional: false\n defaultMode: 0644\n item"
},
{
"path": "deploy/charts/venafi-kubernetes-agent/values.linter.exceptions",
"chars": 0,
"preview": ""
},
{
"path": "deploy/charts/venafi-kubernetes-agent/values.schema.json",
"chars": 26036,
"preview": "{\n \"$defs\": {\n \"helm-values\": {\n \"additionalProperties\": false,\n \"properties\": {\n \"affinity\": {\n "
},
{
"path": "deploy/charts/venafi-kubernetes-agent/values.yaml",
"chars": 13256,
"preview": "# Default values for jetstack-agent.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templat"
},
{
"path": "docs/datagatherers/k8s-discovery.md",
"chars": 434,
"preview": "# k8s-discovery\n\nThis datagatherer uses the [DiscoveryClient](https://godoc.org/k8s.io/client-go/discovery#DiscoveryClie"
},
{
"path": "docs/datagatherers/k8s-dynamic.md",
"chars": 3286,
"preview": "# Kubernetes Data Gatherer\n\nThe Kubernetes dynamic data gatherer collects information about resources stored\nin the Kube"
},
{
"path": "docs/datagatherers/local.md",
"chars": 732,
"preview": "# Local Data Gatherer\n\nThe Local data gatherer is intended to be used for reading data for evaluation\nfrom the local fil"
},
{
"path": "examples/cert-manager-agent.yaml",
"chars": 848,
"preview": "organization_id: \"my-organization\"\ncluster_id: \"my_cluster\"\nschedule: \"* * * *\"\ntoken: xxxx\nendpoint:\n protocol: https\n"
},
{
"path": "examples/echo/example.json",
"chars": 22,
"preview": "{\n \"sampledata\": 1\n}\n"
},
{
"path": "examples/echo/example2.json",
"chars": 20,
"preview": "{\n\t\"sampledata\": 1\n}"
},
{
"path": "examples/localfile/config.yaml",
"chars": 80,
"preview": "# No config is required to run the agent with an input file and an output file.\n"
},
{
"path": "examples/localfile/input.json",
"chars": 3,
"preview": "[]\n"
},
{
"path": "examples/machinehub/config.yaml",
"chars": 11,
"preview": "# Not used\n"
},
{
"path": "examples/machinehub/input.json",
"chars": 4930,
"preview": "[\n {\n \"data-gatherer\": \"ark/oidc\",\n \"data\": {\n \"openid_configuration\": {\n \"id"
},
{
"path": "examples/machinehub.yaml",
"chars": 4117,
"preview": "# An example agent config for MachineHub output mode.\n#\n# For example:\n#\n# export ARK_SUBDOMAIN= # your CyberArk t"
},
{
"path": "examples/one-shot-oidc.yaml",
"chars": 359,
"preview": "# one-shot-oidc.yaml\n#\n# An example configuration file which can be used for local testing.\n# For example:\n#\n# go run ."
},
{
"path": "examples/one-shot-secret.yaml",
"chars": 809,
"preview": "# one-shot-secret.yaml\n#\n# An example configuration file which can be used for local testing.\n# It gathers only secrets "
},
{
"path": "go.mod",
"chars": 5307,
"preview": "// TODO(wallrj): Rename the Go module to match the repository name\nmodule github.com/jetstack/preflight\n\ngo 1.24.4\n\nrequ"
},
{
"path": "go.sum",
"chars": 32109,
"preview": "cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=\ncel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX"
},
{
"path": "hack/ark/cluster-external-secret.yaml",
"chars": 839,
"preview": "# Sample ClusterExternalSecret for e2e testing\n# This is a minimal ClusterExternalSecret CR that will be discovered by t"
},
{
"path": "hack/ark/cluster-secret-store.yaml",
"chars": 644,
"preview": "# Sample ClusterSecretStore for e2e testing\n# This is a minimal ClusterSecretStore CR that will be discovered by the age"
},
{
"path": "hack/ark/conjur-connect-configmap.yaml",
"chars": 2029,
"preview": "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: conjur-connect-configmap\n namespace: default\n labels:\n conjur.org/"
},
{
"path": "hack/ark/external-secret.yaml",
"chars": 732,
"preview": "# Sample ExternalSecret for e2e testing\n# This is a minimal ExternalSecret CR that will be discovered by the agent.\n# No"
},
{
"path": "hack/ark/secret-store.yaml",
"chars": 656,
"preview": "# Sample SecretStore for e2e testing\n# This is a minimal SecretStore CR that will be discovered by the agent.\n# Note: Th"
},
{
"path": "hack/ark/test-e2e.sh",
"chars": 5455,
"preview": "#!/usr/bin/env bash\n#\n# Build and deploy the disco-agent Helm chart.\n# Wait for the agent to log a message indicating su"
},
{
"path": "hack/e2e/application-team-1.yaml",
"chars": 2213,
"preview": "apiVersion: v1\nkind: Namespace\nmetadata:\n name: team-1\n---\napiVersion: policy.cert-manager.io/v1alpha1\nkind: Certificat"
},
{
"path": "hack/e2e/test.sh",
"chars": 9135,
"preview": "#!/usr/bin/env bash\n#\n# Build and install venafi-kubernetes-agent for VenafiConnection based authentication.\n# Wait for "
},
{
"path": "hack/e2e/values.venafi-kubernetes-agent.yaml",
"chars": 305,
"preview": "config:\n clusterName: venafi-kubernetes-agent-e2e\n clusterDescription: |\n A cluster used for testing the venafi-k"
},
{
"path": "hack/e2e/venafi-components.yaml",
"chars": 598,
"preview": "apiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: venafi-components\n---\napiVersion: rbac.authorization.k8s.io/v1\nkin"
},
{
"path": "hack/ngts/custom_ca.yaml",
"chars": 282,
"preview": "# These values are used to set a custom CA bundle during the NGTS test.\n# Only used when developing locally, as detected"
},
{
"path": "hack/ngts/test-e2e.sh",
"chars": 5048,
"preview": "#!/usr/bin/env bash\n#\n# Build and deploy the discovery-agent Helm chart for NGTS.\n# Wait for the agent to log a message "
},
{
"path": "internal/cyberark/api/telemetry.go",
"chars": 1382,
"preview": "package api\n\nimport (\n\t\"encoding/base64\"\n\t\"net/http\"\n\t\"net/url\"\n\n\t\"github.com/jetstack/preflight/pkg/version\"\n)\n\n// Inte"
},
{
"path": "internal/cyberark/api/telemetry_test.go",
"chars": 991,
"preview": "package api\n\nimport (\n\t\"encoding/base64\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\n//"
},
{
"path": "internal/cyberark/client.go",
"chars": 2487,
"preview": "package cyberark\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"net/http\"\n\t\"os\"\n\n\t\"github.com/jetstack/preflight/internal/cyberark/dat"
},
{
"path": "internal/cyberark/client_test.go",
"chars": 3372,
"preview": "package cyberark_test\n\nimport (\n\t\"crypto/x509\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/jetstack/venafi-connection-lib/"
},
{
"path": "internal/cyberark/dataupload/dataupload.go",
"chars": 10125,
"preview": "package dataupload\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/sha256\"\n\t\"encoding/base64\"\n\t\"encoding/hex\"\n\t\"encoding/json\"\n\t\""
},
{
"path": "internal/cyberark/dataupload/dataupload_test.go",
"chars": 3513,
"preview": "package dataupload_test\n\nimport (\n\t\"fmt\"\n\t\"net/http\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n\t\"k8s.io/klog/v2"
},
{
"path": "internal/cyberark/dataupload/mock.go",
"chars": 10448,
"preview": "package dataupload\n\nimport (\n\t\"bytes\"\n\t\"crypto/rand\"\n\t\"crypto/sha256\"\n\t\"encoding/base64\"\n\t\"encoding/hex\"\n\t\"encoding/json"
},
{
"path": "internal/cyberark/identity/advance_authentication_test.go",
"chars": 5144,
"preview": "package identity\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"k8s.io/klog/v2\"\n\t\"k8s.io/klog/v2/ktesting\"\n\n\t\"github.com/jetstack/prefli"
},
{
"path": "internal/cyberark/identity/authenticated_http_client.go",
"chars": 674,
"preview": "package identity\n\nimport (\n\t\"fmt\"\n\t\"net/http\"\n)\n\ntype RequestAuthenticator func(req *http.Request) (string, error)\n\n// A"
},
{
"path": "internal/cyberark/identity/cmd/testidentity/main.go",
"chars": 2442,
"preview": "package main\n\nimport (\n\t\"context\"\n\t\"crypto/x509\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"os/signal\"\n\n\t\"github.com/jetstack/venafi-connect"
},
{
"path": "internal/cyberark/identity/identity.go",
"chars": 19066,
"preview": "package identity\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"sync\"\n\t\"time\"\n\n\t\"k"
},
{
"path": "internal/cyberark/identity/identity_test.go",
"chars": 4005,
"preview": "package identity\n\n// This file contains tests for the LoginUsernamePassword function in the\n// identity package. The tes"
},
{
"path": "internal/cyberark/identity/mock.go",
"chars": 7343,
"preview": "package identity\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"testing\"\n\n\t\"github.com/s"
},
{
"path": "internal/cyberark/identity/start_authentication_test.go",
"chars": 2902,
"preview": "package identity\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/jetstack/preflight/internal/cyberark/servicediscovery\"\n)\n\nfun"
},
{
"path": "internal/cyberark/identity/testdata/advance_authentication_failure.json",
"chars": 386,
"preview": "{\n \"success\": false,\n \"Result\": {\n \"Summary\": \"Failure\"\n },\n \"Message\": \"Authentication (login or challenge) has "
},
{
"path": "internal/cyberark/identity/testdata/advance_authentication_success.json",
"chars": 644,
"preview": "{\n \"success\": true,\n \"Result\": {\n \"AuthLevel\": \"Normal\",\n \"DisplayName\": \"Namey McNamerson\",\n \"Token\": \"succe"
},
{
"path": "internal/cyberark/identity/testdata/start_authentication_bad_user_session_id.json",
"chars": 958,
"preview": "{\n \"success\": true,\n \"Result\": {\n \"ClientHints\": {\n \"PersistDefault\": false,\n \"AllowPersist\": true,\n "
},
{
"path": "internal/cyberark/identity/testdata/start_authentication_failure.json",
"chars": 373,
"preview": "{\n \"success\": false,\n \"Result\": {\n \"Summary\": \"Undefined\"\n },\n \"Message\": \"Authentication (login or challenge) ha"
},
{
"path": "internal/cyberark/identity/testdata/start_authentication_success.json",
"chars": 953,
"preview": "{\n \"success\": true,\n \"Result\": {\n \"ClientHints\": {\n \"PersistDefault\": false,\n \"AllowPersist\": true,\n "
},
{
"path": "internal/cyberark/identity/testdata/start_authentication_success_multiple_challenges.json",
"chars": 1441,
"preview": "{\n \"success\": true,\n \"Result\": {\n \"ClientHints\": {\n \"PersistDefault\": false,\n \"AllowPersist\": true,\n "
},
{
"path": "internal/cyberark/identity/testdata/start_authentication_success_multiple_mechanisms.json",
"chars": 1391,
"preview": "{\n \"success\": true,\n \"Result\": {\n \"ClientHints\": {\n \"PersistDefault\": false,\n \"AllowPersist\": true,\n "
},
{
"path": "internal/cyberark/identity/testdata/start_authentication_success_no_up_mechanism.json",
"chars": 1108,
"preview": "{\n \"success\": true,\n \"Result\": {\n \"ClientHints\": {\n \"PersistDefault\": false,\n \"AllowPersist\": true,\n "
},
{
"path": "internal/cyberark/servicediscovery/discovery.go",
"chars": 7112,
"preview": "package servicediscovery\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"path\"\n\t\"sync\""
},
{
"path": "internal/cyberark/servicediscovery/discovery_test.go",
"chars": 2560,
"preview": "package servicediscovery\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testif"
},
{
"path": "internal/cyberark/servicediscovery/mock.go",
"chars": 4983,
"preview": "package servicediscovery\n\nimport (\n\t\"bytes\"\n\t\"crypto/rand\"\n\t\"encoding/hex\"\n\t\"encoding/json\"\n\t\"net/http\"\n\t\"net/http/httpt"
},
{
"path": "internal/cyberark/servicediscovery/testdata/README.md",
"chars": 549,
"preview": "# Test data for CyberArk Discovery\n\nAll data in this folder is derived from an unauthenticated endpoint accessible from "
},
{
"path": "internal/cyberark/servicediscovery/testdata/discovery_success.json.template",
"chars": 5675,
"preview": "{\n \"region\": \"us-east-1\",\n \"dr_region\": \"us-east-2\",\n \"subdomain\": \"venafi-test\",\n \"platform_id\": \"platform-123\",\n "
},
{
"path": "internal/cyberark/testing/testing.go",
"chars": 392,
"preview": "package testing\n\nimport (\n\t\"os\"\n\t\"testing\"\n)\n\n// SkipIfNoEnv skips the test if the required CyberArk environment variabl"
},
{
"path": "internal/envelope/doc.go",
"chars": 513,
"preview": "// Package envelope provides types and interfaces for envelope encryption.\n//\n// Envelope encryption combines asymmetric"
},
{
"path": "internal/envelope/keyfetch/client.go",
"chars": 6058,
"preview": "package keyfetch\n\nimport (\n\t\"context\"\n\t\"crypto/rsa\"\n\t\"crypto/x509\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"sync\"\n\t\"time\"\n\n"
},
{
"path": "internal/envelope/keyfetch/client_test.go",
"chars": 14371,
"preview": "package keyfetch\n\nimport (\n\t\"context\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\""
},
{
"path": "internal/envelope/keyfetch/doc.go",
"chars": 440,
"preview": "// Package keyfetch provides a client for fetching encryption keys from an HTTP endpoint.\n//\n// The client retrieves pub"
},
{
"path": "internal/envelope/keyfetch/fake.go",
"chars": 2136,
"preview": "package keyfetch\n\nimport (\n\t\"context\"\n\t\"crypto/rand\"\n\t\"crypto/rsa\"\n\t\"fmt\"\n)\n\n// Compile-time check that FakeClient imple"
},
{
"path": "internal/envelope/keyfetch/fake_test.go",
"chars": 2150,
"preview": "package keyfetch\n\nimport (\n\t\"context\"\n\t\"crypto/rand\"\n\t\"crypto/rsa\"\n\t\"errors\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/a"
},
{
"path": "internal/envelope/rsa/doc.go",
"chars": 492,
"preview": "// Package rsa implements RSA envelope encryption using JWE (JSON Web Encryption) format.\n// It conforms to the interfac"
},
{
"path": "internal/envelope/rsa/encryptor.go",
"chars": 2286,
"preview": "package rsa\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com/lestrrat-go/jwx/v3/jwa\"\n\t\"github.com/lestrrat-go/jwx/v3/jwe\"\n\n\t\"gi"
},
{
"path": "internal/envelope/rsa/encryptor_test.go",
"chars": 5007,
"preview": "package rsa\n\nimport (\n\t\"crypto/rand\"\n\t\"crypto/rsa\"\n\t\"encoding/base64\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com/lestrr"
},
{
"path": "internal/envelope/rsa/keys.go",
"chars": 3026,
"preview": "package rsa\n\nimport (\n\t\"crypto/rsa\"\n\t\"crypto/x509\"\n\t\"encoding/pem\"\n\t\"fmt\"\n\t\"os\"\n)\n\n// This file contains helpers for loa"
},
{
"path": "internal/envelope/rsa/keys_test.go",
"chars": 4706,
"preview": "package rsa_test\n\nimport (\n\t\"crypto/ecdsa\"\n\t\"crypto/elliptic\"\n\t\"crypto/rand\"\n\t\"crypto/rsa\"\n\t\"crypto/x509\"\n\t\"encoding/pem"
},
{
"path": "internal/envelope/types.go",
"chars": 1239,
"preview": "package envelope\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n)\n\n// EncryptedData represents encrypted data along with metadata"
},
{
"path": "klone.yaml",
"chars": 2718,
"preview": "# This klone.yaml file describes the Makefile modules and versions that are\n# cloned into the \"make/_shared\" folder. The"
},
{
"path": "main.go",
"chars": 89,
"preview": "package main\n\nimport \"github.com/jetstack/preflight/cmd\"\n\nfunc main() {\n\tcmd.Execute()\n}\n"
},
{
"path": "make/00_mod.mk",
"chars": 3420,
"preview": "repo_name := github.com/jetstack/jetstack-secure\n# This is a work around for the mismatch between the repo name and the "
},
{
"path": "make/02_mod.mk",
"chars": 5573,
"preview": "include make/test-unit.mk\ninclude make/ark/02_mod.mk\ninclude make/ngts/02_mod.mk\n\nGITHUB_OUTPUT ?= /dev/stderr\n.PHONY: r"
},
{
"path": "make/_shared/generate-verify/00_mod.mk",
"chars": 706,
"preview": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may"
},
{
"path": "make/_shared/generate-verify/02_mod.mk",
"chars": 1712,
"preview": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may"
},
{
"path": "make/_shared/generate-verify/util/verify.sh",
"chars": 2447,
"preview": "#!/usr/bin/env bash\n\n# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the "
},
{
"path": "make/_shared/go/.golangci.override.yaml",
"chars": 2066,
"preview": "version: \"2\"\nlinters:\n default: none\n exclusions:\n generated: lax\n presets: [ comments, common-false-positives, "
},
{
"path": "make/_shared/go/01_mod.mk",
"chars": 6196,
"preview": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may"
},
{
"path": "make/_shared/go/README.md",
"chars": 49,
"preview": "# README\n\nA module for various Go static checks.\n"
},
{
"path": "make/_shared/go/base/.github/workflows/govulncheck.yaml",
"chars": 1190,
"preview": "# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.\n# Edit https://github.com/cert-manager/makefile-modules/blob/main/m"
},
{
"path": "make/_shared/helm/01_mod.mk",
"chars": 785,
"preview": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may"
},
{
"path": "make/_shared/helm/crd.template.footer.yaml",
"chars": 11,
"preview": "{{- end }}\n"
},
{
"path": "make/_shared/helm/crd.template.header.yaml",
"chars": 298,
"preview": "{{- if REPLACE_CRD_EXPRESSION }}\napiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n name: \"R"
},
{
"path": "make/_shared/helm/crds.mk",
"chars": 3227,
"preview": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may"
},
{
"path": "make/_shared/helm/crds_dir.README.md",
"chars": 477,
"preview": "# CRDs source directory\n\n> **WARNING**: if you are an end-user, you probably should NOT need to use the\n> files in this "
},
{
"path": "make/_shared/helm/deploy.mk",
"chars": 1666,
"preview": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may"
},
{
"path": "make/_shared/helm/helm.mk",
"chars": 7688,
"preview": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may"
},
{
"path": "make/_shared/help/01_mod.mk",
"chars": 735,
"preview": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may"
},
{
"path": "make/_shared/help/help.sh",
"chars": 4647,
"preview": "#!/usr/bin/env bash\n\n# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the "
},
{
"path": "make/_shared/kind/00_kind_image_versions.mk",
"chars": 2194,
"preview": "# Copyright 2024 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may"
},
{
"path": "make/_shared/kind/00_mod.mk",
"chars": 1244,
"preview": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may"
},
{
"path": "make/_shared/kind/01_mod.mk",
"chars": 708,
"preview": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may"
},
{
"path": "make/_shared/kind/kind-image-preload.mk",
"chars": 2829,
"preview": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may"
},
{
"path": "make/_shared/kind/kind.mk",
"chars": 3277,
"preview": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may"
},
{
"path": "make/_shared/klone/01_mod.mk",
"chars": 938,
"preview": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may"
},
{
"path": "make/_shared/licenses/00_mod.mk",
"chars": 656,
"preview": "# Copyright 2024 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may"
},
{
"path": "make/_shared/licenses/01_mod.mk",
"chars": 3026,
"preview": "# Copyright 2024 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may"
},
{
"path": "make/_shared/licenses/licenses.tmpl",
"chars": 1692,
"preview": "This LICENSES file is generated by the `licenses` module in makefile-modules[0].\n\nThe licenses below the \"---\" are deter"
},
{
"path": "make/_shared/oci-build/00_mod.mk",
"chars": 6128,
"preview": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may"
},
{
"path": "make/_shared/oci-build/01_mod.mk",
"chars": 3486,
"preview": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may"
},
{
"path": "make/_shared/oci-publish/00_mod.mk",
"chars": 2748,
"preview": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may"
},
{
"path": "make/_shared/oci-publish/01_mod.mk",
"chars": 5076,
"preview": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may"
},
{
"path": "make/_shared/oci-publish/image-exists.sh",
"chars": 1894,
"preview": "#!/usr/bin/env bash\n\n# Copyright 2022 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the "
},
{
"path": "make/_shared/repository-base/01_mod.mk",
"chars": 1315,
"preview": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may"
},
{
"path": "make/_shared/repository-base/base/.github/chainguard/make-self-upgrade.sts.yaml",
"chars": 409,
"preview": "# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.\n# Edit https://github.com/cert-manager/makefile-modules/blob/main/m"
},
{
"path": "make/_shared/repository-base/base/.github/workflows/make-self-upgrade.yaml",
"chars": 4060,
"preview": "# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.\n# Edit https://github.com/cert-manager/makefile-modules/blob/main/m"
},
{
"path": "make/_shared/repository-base/base/Makefile",
"chars": 4911,
"preview": "# Copyright 2023 The cert-manager Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may"
}
]
// ... and 56 more files (download for full content)
About this extraction
This page contains the full source code of the jetstack/preflight GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 256 files (1.4 MB), approximately 373.8k tokens, and a symbol index with 501 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.